diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/cris/include/asm | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/cris/include/asm')
50 files changed, 2509 insertions, 0 deletions
diff --git a/kernel/arch/cris/include/asm/Kbuild b/kernel/arch/cris/include/asm/Kbuild new file mode 100644 index 000000000..057e51859 --- /dev/null +++ b/kernel/arch/cris/include/asm/Kbuild @@ -0,0 +1,29 @@ +generic-y += atomic.h +generic-y += barrier.h +generic-y += clkdev.h +generic-y += cmpxchg.h +generic-y += cputime.h +generic-y += device.h +generic-y += div64.h +generic-y += exec.h +generic-y += emergency-restart.h +generic-y += futex.h +generic-y += hardirq.h +generic-y += irq_regs.h +generic-y += irq_work.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += linkage.h +generic-y += local.h +generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += module.h +generic-y += percpu.h +generic-y += preempt.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += topology.h +generic-y += trace_clock.h +generic-y += vga.h +generic-y += xor.h diff --git a/kernel/arch/cris/include/asm/asm-offsets.h b/kernel/arch/cris/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/kernel/arch/cris/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/kernel/arch/cris/include/asm/axisflashmap.h b/kernel/arch/cris/include/asm/axisflashmap.h new file mode 100644 index 000000000..015ca5445 --- /dev/null +++ b/kernel/arch/cris/include/asm/axisflashmap.h @@ -0,0 +1,61 @@ +#ifndef __ASM_AXISFLASHMAP_H +#define __ASM_AXISFLASHMAP_H + +/* Bootblock parameters are stored at 0xc000 and has the FLASH_BOOT_MAGIC + * as start, it ends with 0xFFFFFFFF */ +#define FLASH_BOOT_MAGIC 0xbeefcace +#define BOOTPARAM_OFFSET 0xc000 +/* apps/bootblocktool is used to read and write the parameters, + * and it has nothing to do with the partition table. + */ + +#define PARTITION_TABLE_OFFSET 10 +#define PARTITION_TABLE_MAGIC 0xbeef /* Not a good magic */ + +/* The partitiontable_head is located at offset +10: */ +struct partitiontable_head { + __u16 magic; /* PARTITION_TABLE_MAGIC */ + __u16 size; /* Length of ptable block (entries + end marker) */ + __u32 checksum; /* simple longword sum, over entries + end marker */ +}; + +/* And followed by partition table entries */ +struct partitiontable_entry { + __u32 offset; /* relative to the sector the ptable is in */ + __u32 size; /* in bytes */ + __u32 checksum; /* simple longword sum */ + __u16 type; /* see type codes below */ + __u16 flags; /* bit 0: ro/rw = 1/0 */ + __u32 future0; /* 16 bytes reserved for future use */ + __u32 future1; + __u32 future2; + __u32 future3; +}; +/* ended by an end marker: */ +#define PARTITIONTABLE_END_MARKER 0xFFFFFFFF +#define PARTITIONTABLE_END_MARKER_SIZE 4 + +#define PARTITIONTABLE_END_PAD 10 + +/* Complete structure for whole partition table */ +/* note that table may end before CONFIG_ETRAX_PTABLE_ENTRIES by setting + * offset of the last entry + 1 to PARTITIONTABLE_END_MARKER. + */ +struct partitiontable { + __u8 skip[PARTITION_TABLE_OFFSET]; + struct partitiontable_head head; + struct partitiontable_entry entries[]; +}; + +#define PARTITION_TYPE_PARAM 0x0001 +#define PARTITION_TYPE_KERNEL 0x0002 +#define PARTITION_TYPE_JFFS 0x0003 +#define PARTITION_TYPE_JFFS2 0x0000 + +#define PARTITION_FLAGS_READONLY_MASK 0x0001 +#define PARTITION_FLAGS_READONLY 0x0001 + +/* The master mtd for the entire flash. */ +extern struct mtd_info *axisflash_mtd; + +#endif diff --git a/kernel/arch/cris/include/asm/bitops.h b/kernel/arch/cris/include/asm/bitops.h new file mode 100644 index 000000000..8062cb52d --- /dev/null +++ b/kernel/arch/cris/include/asm/bitops.h @@ -0,0 +1,50 @@ +/* asm/bitops.h for Linux/CRIS + * + * TODO: asm versions if speed is needed + * + * All bit operations return 0 if the bit was cleared before the + * operation and != 0 if it was not. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ + +#ifndef _CRIS_BITOPS_H +#define _CRIS_BITOPS_H + +/* Currently this is unsuitable for consumption outside the kernel. */ +#ifdef __KERNEL__ + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +#include <arch/bitops.h> +#include <linux/compiler.h> +#include <asm/barrier.h> + +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> + +/* + * Since we define it "external", it collides with the built-in + * definition, which doesn't have the same semantics. We don't want to + * use -fno-builtin, so just hide the name ffs. + */ +#define ffs(x) kernel_ffs(x) + +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/find.h> +#include <asm-generic/bitops/lock.h> + +#include <asm-generic/bitops/le.h> + +#include <asm-generic/bitops/ext2-atomic-setbit.h> + +#include <asm-generic/bitops/sched.h> + +#endif /* __KERNEL__ */ + +#endif /* _CRIS_BITOPS_H */ diff --git a/kernel/arch/cris/include/asm/bug.h b/kernel/arch/cris/include/asm/bug.h new file mode 100644 index 000000000..3b3958963 --- /dev/null +++ b/kernel/arch/cris/include/asm/bug.h @@ -0,0 +1,4 @@ +#ifndef _CRIS_BUG_H +#define _CRIS_BUG_H +#include <arch/bug.h> +#endif diff --git a/kernel/arch/cris/include/asm/bugs.h b/kernel/arch/cris/include/asm/bugs.h new file mode 100644 index 000000000..c5907aac1 --- /dev/null +++ b/kernel/arch/cris/include/asm/bugs.h @@ -0,0 +1,21 @@ +/* $Id: bugs.h,v 1.2 2001/01/17 17:03:18 bjornw Exp $ + * + * include/asm-cris/bugs.h + * + * Copyright (C) 2001 Axis Communications AB + */ + +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + */ + +static void check_bugs(void) +{ +} + + + + diff --git a/kernel/arch/cris/include/asm/cache.h b/kernel/arch/cris/include/asm/cache.h new file mode 100644 index 000000000..a692b9fba --- /dev/null +++ b/kernel/arch/cris/include/asm/cache.h @@ -0,0 +1,6 @@ +#ifndef _ASM_CACHE_H +#define _ASM_CACHE_H + +#include <arch/cache.h> + +#endif /* _ASM_CACHE_H */ diff --git a/kernel/arch/cris/include/asm/cacheflush.h b/kernel/arch/cris/include/asm/cacheflush.h new file mode 100644 index 000000000..36795bca6 --- /dev/null +++ b/kernel/arch/cris/include/asm/cacheflush.h @@ -0,0 +1,32 @@ +#ifndef _CRIS_CACHEFLUSH_H +#define _CRIS_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +/* The cache doesn't need to be flushed when TLB entries change because + * the cache is mapped to physical memory, not virtual memory + */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +int change_page_attr(struct page *page, int numpages, pgprot_t prot); + +#endif /* _CRIS_CACHEFLUSH_H */ diff --git a/kernel/arch/cris/include/asm/checksum.h b/kernel/arch/cris/include/asm/checksum.h new file mode 100644 index 000000000..75dcb77d6 --- /dev/null +++ b/kernel/arch/cris/include/asm/checksum.h @@ -0,0 +1,83 @@ +/* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */ + +#ifndef _CRIS_CHECKSUM_H +#define _CRIS_CHECKSUM_H + +#include <arch/checksum.h> + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); + +/* + * Fold a partial checksum into a word + */ + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ + sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ + return (__force __sum16)~sum; +} + +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, + int *errptr); + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + * + */ + +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return csum_fold(csum_partial(iph, ihl * 4, 0)); +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ + +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ + return csum_fold (csum_partial(buff, len, 0)); +} + +#endif diff --git a/kernel/arch/cris/include/asm/current.h b/kernel/arch/cris/include/asm/current.h new file mode 100644 index 000000000..5f5c0efd0 --- /dev/null +++ b/kernel/arch/cris/include/asm/current.h @@ -0,0 +1,15 @@ +#ifndef _CRIS_CURRENT_H +#define _CRIS_CURRENT_H + +#include <linux/thread_info.h> + +struct task_struct; + +static inline struct task_struct * get_current(void) +{ + return current_thread_info()->task; +} + +#define current get_current() + +#endif /* !(_CRIS_CURRENT_H) */ diff --git a/kernel/arch/cris/include/asm/delay.h b/kernel/arch/cris/include/asm/delay.h new file mode 100644 index 000000000..75ec581bf --- /dev/null +++ b/kernel/arch/cris/include/asm/delay.h @@ -0,0 +1,27 @@ +#ifndef _CRIS_DELAY_H +#define _CRIS_DELAY_H + +/* + * Copyright (C) 1998-2002 Axis Communications AB + * + * Delay routines, using a pre-computed "loops_per_second" value. + */ + +#include <arch/delay.h> + +/* Use only for very small delays ( < 1 msec). */ + +extern unsigned long loops_per_usec; /* arch/cris/mm/init.c */ + +/* May be defined by arch/delay.h. */ +#ifndef udelay +static inline void udelay(unsigned long usecs) +{ + __delay(usecs * loops_per_usec); +} +#endif + +#endif /* defined(_CRIS_DELAY_H) */ + + + diff --git a/kernel/arch/cris/include/asm/dma-mapping.h b/kernel/arch/cris/include/asm/dma-mapping.h new file mode 100644 index 000000000..2f0f654f1 --- /dev/null +++ b/kernel/arch/cris/include/asm/dma-mapping.h @@ -0,0 +1,172 @@ +/* DMA mapping. Nothing tricky here, just virt_to_phys */ + +#ifndef _ASM_CRIS_DMA_MAPPING_H +#define _ASM_CRIS_DMA_MAPPING_H + +#include <linux/mm.h> +#include <linux/kernel.h> + +#include <asm/cache.h> +#include <asm/io.h> +#include <asm/scatterlist.h> + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +#ifdef CONFIG_PCI +#include <asm-generic/dma-coherent.h> + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); +#else +static inline void * +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag) +{ + BUG(); + return NULL; +} + +static inline void +dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + BUG(); +} +#endif +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return virt_to_phys(ptr); +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + printk("Map sg\n"); + return nents; +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + return page_to_phys(page) + offset; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); +} + +static inline void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ +} + +static inline int +dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if(mask < 0x00ffffff) + return 0; + + return 1; +} + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if(!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +static inline void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ +} + +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) + + +#endif diff --git a/kernel/arch/cris/include/asm/dma.h b/kernel/arch/cris/include/asm/dma.h new file mode 100644 index 000000000..30fd715fa --- /dev/null +++ b/kernel/arch/cris/include/asm/dma.h @@ -0,0 +1,21 @@ +/* $Id: dma.h,v 1.2 2001/05/09 12:17:42 johana Exp $ */ + +#ifndef _ASM_DMA_H +#define _ASM_DMA_H + +#include <arch/dma.h> + +/* it's useless on the Etrax, but unfortunately needed by the new + bootmem allocator (but this should do it for this) */ + +#define MAX_DMA_ADDRESS PAGE_OFFSET + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* _ASM_DMA_H */ diff --git a/kernel/arch/cris/include/asm/elf.h b/kernel/arch/cris/include/asm/elf.h new file mode 100644 index 000000000..c2a394ff5 --- /dev/null +++ b/kernel/arch/cris/include/asm/elf.h @@ -0,0 +1,89 @@ +#ifndef __ASMCRIS_ELF_H +#define __ASMCRIS_ELF_H + +/* + * ELF register definitions.. + */ + +#include <asm/user.h> + +#define R_CRIS_NONE 0 +#define R_CRIS_8 1 +#define R_CRIS_16 2 +#define R_CRIS_32 3 +#define R_CRIS_8_PCREL 4 +#define R_CRIS_16_PCREL 5 +#define R_CRIS_32_PCREL 6 +#define R_CRIS_GNU_VTINHERIT 7 +#define R_CRIS_GNU_VTENTRY 8 +#define R_CRIS_COPY 9 +#define R_CRIS_GLOB_DAT 10 +#define R_CRIS_JUMP_SLOT 11 +#define R_CRIS_RELATIVE 12 +#define R_CRIS_16_GOT 13 +#define R_CRIS_32_GOT 14 +#define R_CRIS_16_GOTPLT 15 +#define R_CRIS_32_GOTPLT 16 +#define R_CRIS_32_GOTREL 17 +#define R_CRIS_32_PLT_GOTREL 18 +#define R_CRIS_32_PLT_PCREL 19 + +typedef unsigned long elf_greg_t; + +/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is + thus exposed to user-space. */ +#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* A placeholder; CRIS does not have any fp regs. */ +typedef unsigned long elf_fpregset_t; + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_CRIS + +#include <arch/elf.h> + +/* The master for these definitions is {binutils}/include/elf/cris.h: */ +/* User symbols in this file have a leading underscore. */ +#define EF_CRIS_UNDERSCORE 0x00000001 + +/* This is a mask for different incompatible machine variants. */ +#define EF_CRIS_VARIANT_MASK 0x0000000e + +/* Variant 0; may contain v0..10 object. */ +#define EF_CRIS_VARIANT_ANY_V0_V10 0x00000000 + +/* Variant 1; contains v32 object. */ +#define EF_CRIS_VARIANT_V32 0x00000002 + +/* Variant 2; contains object compatible with v32 and v10. */ +#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004 +/* End of excerpt from {binutils}/include/elf/cris.h. */ + +#define ELF_EXEC_PAGESIZE 8192 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + +/* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. +*/ + +#define ELF_PLATFORM (NULL) + +#endif diff --git a/kernel/arch/cris/include/asm/eshlibld.h b/kernel/arch/cris/include/asm/eshlibld.h new file mode 100644 index 000000000..10ce36cf7 --- /dev/null +++ b/kernel/arch/cris/include/asm/eshlibld.h @@ -0,0 +1,113 @@ +/*!************************************************************************** +*! +*! FILE NAME : eshlibld.h +*! +*! DESCRIPTION: Prototypes for exported shared library functions +*! +*! FUNCTIONS : perform_cris_aout_relocations, shlibmod_fork, shlibmod_exit +*! (EXPORTED) +*! +*!--------------------------------------------------------------------------- +*! +*! (C) Copyright 1998, 1999 Axis Communications AB, LUND, SWEDEN +*! +*!**************************************************************************/ +/* $Id: eshlibld.h,v 1.2 2001/02/23 13:47:33 bjornw Exp $ */ + +#ifndef _cris_relocate_h +#define _cris_relocate_h + +/* Please note that this file is also compiled into the xsim simulator. + Try to avoid breaking its double use (only works on a little-endian + 32-bit machine such as the i386 anyway). + + Use __KERNEL__ when you're about to use kernel functions, + (which you should not do here anyway, since this file is + used by glibc). + Use defined(__KERNEL__) || defined(__elinux__) when doing + things that only makes sense on an elinux system. + Use __CRIS__ when you're about to do (really) CRIS-specific code. +*/ + +/* We have dependencies all over the place for the host system + for xsim being a linux system, so let's not pretend anything + else with #ifdef:s here until fixed. */ +#include <linux/limits.h> + +/* Maybe do sanity checking if file input. */ +#undef SANITYCHECK_RELOC + +/* Maybe output debug messages. */ +#undef RELOC_DEBUG + +/* Maybe we want to share core as well as disk space. + Mainly depends on the config macro CONFIG_SHARE_SHLIB_CORE, but it is + assumed that we want to share code when debugging (exposes more + trouble). */ +#ifndef SHARE_LIB_CORE +# if (defined(__KERNEL__) || !defined(RELOC_DEBUG)) \ + && !defined(CONFIG_SHARE_SHLIB_CORE) +# define SHARE_LIB_CORE 0 +# else +# define SHARE_LIB_CORE 1 +# endif /* __KERNEL__ etc */ +#endif /* SHARE_LIB_CORE */ + + +/* Main exported function; supposed to be called when the program a.out + has been read in. */ +extern int +perform_cris_aout_relocations(unsigned long text, unsigned long tlength, + unsigned long data, unsigned long dlength, + unsigned long baddr, unsigned long blength, + + /* These may be zero when there's "perfect" + position-independent code. */ + unsigned char *trel, unsigned long tsrel, + unsigned long dsrel, + + /* These will be zero at a first try, to see + if code is statically linked. Else a + second try, with the symbol table and + string table nonzero should be done. */ + unsigned char *symbols, unsigned long symlength, + unsigned char *strings, unsigned long stringlength, + + /* These will only be used when symbol table + information is present. */ + char **env, int envc, + int euid, int is_suid); + + +#ifdef RELOC_DEBUG +/* Task-specific debug stuff. */ +struct task_reloc_debug { + struct memdebug *alloclast; + unsigned long alloc_total; + unsigned long export_total; +}; +#endif /* RELOC_DEBUG */ + +#if SHARE_LIB_CORE + +/* When code (and some very specific data) is shared and not just + dynamically linked, we need to export hooks for exec beginning and + end. */ + +struct shlibdep; + +extern void +shlibmod_exit(struct shlibdep **deps); + +/* Returns 0 if failure, nonzero for ok. */ +extern int +shlibmod_fork(struct shlibdep **deps); + +#else /* ! SHARE_LIB_CORE */ +# define shlibmod_exit(x) +# define shlibmod_fork(x) 1 +#endif /* ! SHARE_LIB_CORE */ + +#endif _cris_relocate_h +/********************** END OF FILE eshlibld.h *****************************/ + diff --git a/kernel/arch/cris/include/asm/etraxi2c.h b/kernel/arch/cris/include/asm/etraxi2c.h new file mode 100644 index 000000000..e369a7620 --- /dev/null +++ b/kernel/arch/cris/include/asm/etraxi2c.h @@ -0,0 +1,36 @@ +/* $Id: etraxi2c.h,v 1.1 2001/01/18 15:49:57 bjornw Exp $ */ + +#ifndef _LINUX_ETRAXI2C_H +#define _LINUX_ETRAXI2C_H + +/* etraxi2c _IOC_TYPE, bits 8 to 15 in ioctl cmd */ + +#define ETRAXI2C_IOCTYPE 44 + +/* supported ioctl _IOC_NR's */ + +/* in write operations, the argument contains both i2c + * slave, register and value. + */ + +#define I2C_WRITEARG(slave, reg, value) (((slave) << 16) | ((reg) << 8) | (value)) +#define I2C_READARG(slave, reg) (((slave) << 16) | ((reg) << 8)) + +#define I2C_ARGSLAVE(arg) ((arg) >> 16) +#define I2C_ARGREG(arg) (((arg) >> 8) & 0xff) +#define I2C_ARGVALUE(arg) ((arg) & 0xff) + +#define I2C_WRITEREG 0x1 /* write to an i2c register */ +#define I2C_READREG 0x2 /* read from an i2c register */ + +/* +EXAMPLE usage: + + i2c_arg = I2C_WRITEARG(STA013_WRITE_ADDR, reg, val); + ioctl(fd, _IO(ETRAXI2C_IOCTYPE, I2C_WRITEREG), i2c_arg); + + i2c_arg = I2C_READARG(STA013_READ_ADDR, reg); + val = ioctl(fd, _IO(ETRAXI2C_IOCTYPE, I2C_READREG), i2c_arg); + +*/ +#endif diff --git a/kernel/arch/cris/include/asm/fasttimer.h b/kernel/arch/cris/include/asm/fasttimer.h new file mode 100644 index 000000000..8f8a8d6c9 --- /dev/null +++ b/kernel/arch/cris/include/asm/fasttimer.h @@ -0,0 +1,47 @@ +/* + * linux/include/asm-cris/fasttimer.h + * + * Fast timers for ETRAX100LX + * Copyright (C) 2000-2007 Axis Communications AB + */ +#include <linux/time.h> /* struct timeval */ +#include <linux/timex.h> + +#ifdef CONFIG_ETRAX_FAST_TIMER + +typedef void fast_timer_function_type(unsigned long); + +struct fasttime_t { + unsigned long tv_jiff; /* jiffies */ + unsigned long tv_usec; /* microseconds */ +}; + +struct fast_timer{ /* Close to timer_list */ + struct fast_timer *next; + struct fast_timer *prev; + struct fasttime_t tv_set; + struct fasttime_t tv_expires; + unsigned long delay_us; + fast_timer_function_type *function; + unsigned long data; + const char *name; +}; + +extern struct fast_timer *fast_timer_list; + +void start_one_shot_timer(struct fast_timer *t, + fast_timer_function_type *function, + unsigned long data, + unsigned long delay_us, + const char *name); + +int del_fast_timer(struct fast_timer * t); +/* return 1 if deleted */ + + +void schedule_usleep(unsigned long us); + + +int fast_timer_init(void); + +#endif diff --git a/kernel/arch/cris/include/asm/fb.h b/kernel/arch/cris/include/asm/fb.h new file mode 100644 index 000000000..c7df38030 --- /dev/null +++ b/kernel/arch/cris/include/asm/fb.h @@ -0,0 +1,12 @@ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ +#include <linux/fb.h> + +#define fb_pgprotect(...) do {} while (0) + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/kernel/arch/cris/include/asm/ftrace.h b/kernel/arch/cris/include/asm/ftrace.h new file mode 100644 index 000000000..40a8c178f --- /dev/null +++ b/kernel/arch/cris/include/asm/ftrace.h @@ -0,0 +1 @@ +/* empty */ diff --git a/kernel/arch/cris/include/asm/hw_irq.h b/kernel/arch/cris/include/asm/hw_irq.h new file mode 100644 index 000000000..298066020 --- /dev/null +++ b/kernel/arch/cris/include/asm/hw_irq.h @@ -0,0 +1,5 @@ +#ifndef _ASM_HW_IRQ_H +#define _ASM_HW_IRQ_H + +#endif + diff --git a/kernel/arch/cris/include/asm/io.h b/kernel/arch/cris/include/asm/io.h new file mode 100644 index 000000000..752a3f45d --- /dev/null +++ b/kernel/arch/cris/include/asm/io.h @@ -0,0 +1,192 @@ +#ifndef _ASM_CRIS_IO_H +#define _ASM_CRIS_IO_H + +#include <asm/page.h> /* for __va, __pa */ +#include <arch/io.h> +#include <asm-generic/iomap.h> +#include <linux/kernel.h> + +struct cris_io_operations +{ + u32 (*read_mem)(void *addr, int size); + void (*write_mem)(u32 val, int size, void *addr); + u32 (*read_io)(u32 port, void *addr, int size, int count); + void (*write_io)(u32 port, void *addr, int size, int count); +}; + +#ifdef CONFIG_PCI +extern struct cris_io_operations *cris_iops; +#else +#define cris_iops ((struct cris_io_operations*)NULL) +#endif + +/* + * Change virtual addresses to physical addresses and vv. + */ + +static inline unsigned long virt_to_phys(volatile void * address) +{ + return __pa(address); +} + +static inline void * phys_to_virt(unsigned long address) +{ + return __va(address); +} + +extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); +extern void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot); + +static inline void __iomem * ioremap (unsigned long offset, unsigned long size) +{ + return __ioremap(offset, size, 0); +} + +extern void iounmap(volatile void * __iomem addr); + +extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); + +/* + * IO bus memory addresses are also 1:1 with the physical address + */ +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +/* + * readX/writeX() are used to access memory mapped devices. On some + * architectures the memory mapped IO stuff needs to be accessed + * differently. On the CRIS architecture, we just read/write the + * memory location directly. + */ +#ifdef CONFIG_PCI +#define PCI_SPACE(x) ((((unsigned)(x)) & 0x10000000) == 0x10000000) +#else +#define PCI_SPACE(x) 0 +#endif +static inline unsigned char readb(const volatile void __iomem *addr) +{ + if (PCI_SPACE(addr) && cris_iops) + return cris_iops->read_mem((void*)addr, 1); + else + return *(volatile unsigned char __force *) addr; +} +static inline unsigned short readw(const volatile void __iomem *addr) +{ + if (PCI_SPACE(addr) && cris_iops) + return cris_iops->read_mem((void*)addr, 2); + else + return *(volatile unsigned short __force *) addr; +} +static inline unsigned int readl(const volatile void __iomem *addr) +{ + if (PCI_SPACE(addr) && cris_iops) + return cris_iops->read_mem((void*)addr, 4); + else + return *(volatile unsigned int __force *) addr; +} +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) +#define __raw_readb readb +#define __raw_readw readw +#define __raw_readl readl + +static inline void writeb(unsigned char b, volatile void __iomem *addr) +{ + if (PCI_SPACE(addr) && cris_iops) + cris_iops->write_mem(b, 1, (void*)addr); + else + *(volatile unsigned char __force *) addr = b; +} +static inline void writew(unsigned short b, volatile void __iomem *addr) +{ + if (PCI_SPACE(addr) && cris_iops) + cris_iops->write_mem(b, 2, (void*)addr); + else + *(volatile unsigned short __force *) addr = b; +} +static inline void writel(unsigned int b, volatile void __iomem *addr) +{ + if (PCI_SPACE(addr) && cris_iops) + cris_iops->write_mem(b, 4, (void*)addr); + else + *(volatile unsigned int __force *) addr = b; +} +#define writeb_relaxed(b, addr) writeb(b, addr) +#define writew_relaxed(b, addr) writew(b, addr) +#define writel_relaxed(b, addr) writel(b, addr) +#define __raw_writeb writeb +#define __raw_writew writew +#define __raw_writel writel + +#define mmiowb() + +#define memset_io(a,b,c) memset((void *)(a),(b),(c)) +#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) +#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) + + +/* I/O port access. Normally there is no I/O space on CRIS but when + * Cardbus/PCI is enabled the request is passed through the bridge. + */ + +#define IO_SPACE_LIMIT 0xffff +#define inb(port) (cris_iops ? cris_iops->read_io(port,NULL,1,1) : 0) +#define inw(port) (cris_iops ? cris_iops->read_io(port,NULL,2,1) : 0) +#define inl(port) (cris_iops ? cris_iops->read_io(port,NULL,4,1) : 0) +#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0) +#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0) +#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0) +static inline void outb(unsigned char data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 1, 1); +} +static inline void outw(unsigned short data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 2, 1); +} +static inline void outl(unsigned int data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 4, 1); +} +static inline void outsb(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 1, count); +} +static inline void outsw(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 2, count); +} +static inline void outsl(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 4, count); +} + +#define inb_p(port) inb(port) +#define inw_p(port) inw(port) +#define inl_p(port) inl(port) +#define outb_p(val, port) outb((val), (port)) +#define outw_p(val, port) outw((val), (port)) +#define outl_p(val, port) outl((val), (port)) + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif diff --git a/kernel/arch/cris/include/asm/irq.h b/kernel/arch/cris/include/asm/irq.h new file mode 100644 index 000000000..ce0fcf540 --- /dev/null +++ b/kernel/arch/cris/include/asm/irq.h @@ -0,0 +1,13 @@ +#ifndef _ASM_IRQ_H +#define _ASM_IRQ_H + +#include <arch/irq.h> + +static inline int irq_canonicalize(int irq) +{ + return irq; +} + +#endif /* _ASM_IRQ_H */ + + diff --git a/kernel/arch/cris/include/asm/irqflags.h b/kernel/arch/cris/include/asm/irqflags.h new file mode 100644 index 000000000..943ba5ca6 --- /dev/null +++ b/kernel/arch/cris/include/asm/irqflags.h @@ -0,0 +1 @@ +#include <arch/irqflags.h> diff --git a/kernel/arch/cris/include/asm/mmu.h b/kernel/arch/cris/include/asm/mmu.h new file mode 100644 index 000000000..e06ea94ec --- /dev/null +++ b/kernel/arch/cris/include/asm/mmu.h @@ -0,0 +1,10 @@ +/* + * CRIS MMU constants and PTE layout + */ + +#ifndef _CRIS_MMU_H +#define _CRIS_MMU_H + +#include <arch/mmu.h> + +#endif diff --git a/kernel/arch/cris/include/asm/mmu_context.h b/kernel/arch/cris/include/asm/mmu_context.h new file mode 100644 index 000000000..1d45fd636 --- /dev/null +++ b/kernel/arch/cris/include/asm/mmu_context.h @@ -0,0 +1,27 @@ +#ifndef __CRIS_MMU_CONTEXT_H +#define __CRIS_MMU_CONTEXT_H + +#include <asm-generic/mm_hooks.h> + +extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +extern void get_mmu_context(struct mm_struct *mm); +extern void destroy_context(struct mm_struct *mm); +extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk); + +#define deactivate_mm(tsk,mm) do { } while (0) + +#define activate_mm(prev,next) switch_mm((prev),(next),NULL) + +/* current active pgd - this is similar to other processors pgd + * registers like cr3 on the i386 + */ + +/* defined in arch/cris/mm/fault.c */ +DECLARE_PER_CPU(pgd_t *, current_pgd); + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +#endif diff --git a/kernel/arch/cris/include/asm/mutex.h b/kernel/arch/cris/include/asm/mutex.h new file mode 100644 index 000000000..458c1f7fb --- /dev/null +++ b/kernel/arch/cris/include/asm/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include <asm-generic/mutex-dec.h> diff --git a/kernel/arch/cris/include/asm/page.h b/kernel/arch/cris/include/asm/page.h new file mode 100644 index 000000000..dfc53f9b8 --- /dev/null +++ b/kernel/arch/cris/include/asm/page.h @@ -0,0 +1,73 @@ +#ifndef _CRIS_PAGE_H +#define _CRIS_PAGE_H + +#include <arch/page.h> +#include <linux/const.h> + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 13 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +/* + * These are used to make use of C type-checking.. + */ +#ifndef __ASSEMBLY__ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; +#endif + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ +/* for that before indexing into the page table starting at mem_map */ +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) + +/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so + * we can let the map start there. notice that we subtract PAGE_OFFSET because + * we start our mem_map there - in other ports they map mem_map physically and + * use __pa instead. in our system both the physical and virtual address of DRAM + * is too high to let mem_map start at 0, so we do it this way instead (similar + * to arm and m68k I think) + */ + +#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT)) +#define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT) + +/* convert a page (based on mem_map and forward) to a physical address + * do this by figuring out the virtual address and then use __pa + */ + +#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) + +#ifndef __ASSEMBLY__ + +#endif /* __ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* _CRIS_PAGE_H */ + diff --git a/kernel/arch/cris/include/asm/pci.h b/kernel/arch/cris/include/asm/pci.h new file mode 100644 index 000000000..cc2399c17 --- /dev/null +++ b/kernel/arch/cris/include/asm/pci.h @@ -0,0 +1,57 @@ +#ifndef __ASM_CRIS_PCI_H +#define __ASM_CRIS_PCI_H + + +#ifdef __KERNEL__ +#include <linux/mm.h> /* for struct page */ + +/* Can be used to override the logic in pci_scan_bus for skipping + already-configured bus numbers - to be used for buggy BIOSes + or architectures with incomplete PCI setup by the loader */ + +#define pcibios_assign_all_busses(void) 1 + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +#define PCIBIOS_MIN_CARDBUS_IO 0x4000 + +void pcibios_config_init(void); +struct pci_bus * pcibios_scan_root(int bus); + +void pcibios_set_master(struct pci_dev *dev); +struct irq_routing_table *pcibios_get_irq_routing_table(void); +int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); + +/* Dynamic DMA mapping stuff. + * i386 has everything mapped statically. + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <asm/scatterlist.h> +#include <linux/string.h> +#include <asm/io.h> + +struct pci_dev; + +/* The PCI address space does equal the physical memory + * address space. The networking and block device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +#define HAVE_PCI_MMAP +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + + +#endif /* __KERNEL__ */ + +/* implement the pci_ DMA API in terms of the generic device dma_ one */ +#include <asm-generic/pci-dma-compat.h> + +/* generic pci stuff */ +#include <asm-generic/pci.h> + +#endif /* __ASM_CRIS_PCI_H */ diff --git a/kernel/arch/cris/include/asm/pgalloc.h b/kernel/arch/cris/include/asm/pgalloc.h new file mode 100644 index 000000000..235ece437 --- /dev/null +++ b/kernel/arch/cris/include/asm/pgalloc.h @@ -0,0 +1,63 @@ +#ifndef _CRIS_PGALLOC_H +#define _CRIS_PGALLOC_H + +#include <linux/threads.h> +#include <linux/mm.h> + +#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) +#define pmd_populate(mm, pmd, pte) pmd_set(pmd, page_address(pte)) +#define pmd_pgtable(pmd) pmd_page(pmd) + +/* + * Allocate and free page tables. + */ + +static inline pgd_t *pgd_alloc (struct mm_struct *mm) +{ + return (pgd_t *)get_zeroed_page(GFP_KERNEL); +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_page((unsigned long)pgd); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) +{ + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + return pte; +} + +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) +{ + struct page *pte; + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_page(pte); +} + +#define __pte_free_tlb(tlb,pte,address) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) + +#define check_pgt_cache() do { } while (0) + +#endif diff --git a/kernel/arch/cris/include/asm/pgtable.h b/kernel/arch/cris/include/asm/pgtable.h new file mode 100644 index 000000000..ceefc314d --- /dev/null +++ b/kernel/arch/cris/include/asm/pgtable.h @@ -0,0 +1,295 @@ +/* + * CRIS pgtable.h - macros and functions to manipulate page tables. + */ + +#ifndef _CRIS_PGTABLE_H +#define _CRIS_PGTABLE_H + +#include <asm/page.h> +#include <asm-generic/pgtable-nopmd.h> + +#ifndef __ASSEMBLY__ +#include <linux/sched.h> +#include <asm/mmu.h> +#endif +#include <arch/pgtable.h> + +/* + * The Linux memory management assumes a three-level page table setup. On + * CRIS, we use that, but "fold" the mid level into the top-level page + * table. Since the MMU TLB is software loaded through an interrupt, it + * supports any page table structure, so we could have used a three-level + * setup, but for the amounts of memory we normally use, a two-level is + * probably more efficient. + * + * This file contains the functions and defines necessary to modify and use + * the CRIS page table tree. + */ +#ifndef __ASSEMBLY__ +extern void paging_init(void); +#endif + +/* Certain architectures need to do special things when pte's + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) +#define set_pgu(pudptr, pudval) (*(pudptr) = pudval) + +/* PGDIR_SHIFT determines the size of the area a second-level page table can + * map. It is equal to the page size times the number of PTE's that fit in + * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number. + */ + +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * entries per page directory level: we use a two-level, so + * we don't really have any PMD directory physically. + * pointers are 4 bytes so we can use the page size and + * divide it by 4 (shift by 2). + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2)) + +/* calculate how many PGD entries a user-level program can use + * the first mappable virtual address is 0 + * (TASK_SIZE is the maximum virtual address space) + */ + +#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* zero page used for uninitialized stuff */ +#ifndef __ASSEMBLY__ +extern unsigned long empty_zero_page; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) +#endif + +/* number of bits that fit into a memory pointer */ +#define BITS_PER_PTR (8*sizeof(unsigned long)) + +/* to align the pointer to a pointer address */ +#define PTR_MASK (~(sizeof(void*)-1)) + +/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ +/* 64-bit machines, beware! SRB. */ +#define SIZEOF_PTR_LOG2 2 + +/* to find an entry in a page-table */ +#define PAGE_PTR(address) \ +((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) + +/* to set the page-dir */ +#define SET_PAGE_DIR(tsk,pgdir) + +#define pte_none(x) (!pte_val(x)) +#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) +#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) + +#define pmd_none(x) (!pmd_val(x)) +/* by removing the _PAGE_KERNEL bit from the comparison, the same pmd_bad + * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries. + */ +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE) +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) + +#ifndef __ASSEMBLY__ + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ + +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) { return 0; } + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + { + pte_val(pte) |= _PAGE_SILENT_READ; + if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) == + (_PAGE_WRITE | _PAGE_MODIFIED)) + pte_val(pte) |= _PAGE_SILENT_WRITE; + } + return pte; +} +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ + +/* What actually goes as arguments to the various functions is less than + * obvious, but a rule of thumb is that struct page's goes as struct page *, + * really physical DRAM addresses are unsigned long's, and DRAM "virtual" + * addresses (the 0xc0xxxxxx's) goes as void *'s. + */ + +static inline pte_t __mk_pte(void * page, pgprot_t pgprot) +{ + pte_t pte; + /* the PTE needs a physical address */ + pte_val(pte) = __pa(page) | pgprot_val(pgprot); + return pte; +} + +#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot)) + +#define mk_pte_phys(physpage, pgprot) \ +({ \ + pte_t __pte; \ + \ + pte_val(__pte) = (physpage) + pgprot_val(pgprot); \ + __pte; \ +}) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } + +#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE)) + + +/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval + * __pte_page(pte_val) refers to the "virtual" DRAM interval + * pte_pagenr refers to the page-number counted starting from the virtual DRAM start + */ + +static inline unsigned long __pte_page(pte_t pte) +{ + /* the PTE contains a physical address */ + return (unsigned long)__va(pte_val(pte) & PAGE_MASK); +} + +#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) + +/* permanent address of a page */ + +#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) +#define pte_page(pte) (mem_map+pte_pagenr(pte)) + +/* only the pte's themselves need to point to physical DRAM (see above) + * the pagetable links are purely handled within the kernel SW and thus + * don't need the __pa and __va transformations. + */ + +static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) +{ pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; } + +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + +/* to find an entry in a page-table-directory. */ +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* to find an entry in a page-table-directory */ +static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address) +{ + return mm->pgd + pgd_index(address); +} + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* Find an entry in the third-level page table.. */ +#define __pte_offset(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) + +#define pte_unmap(pte) do { } while (0) +#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT) +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) + + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ + +/* + * CRIS doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + * + * Actually I am not sure on what this could be used for. + */ +static inline void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t *ptep) +{ +} + +/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ +/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */ + +#define __swp_type(x) (((x).val >> 5) & 0x7f) +#define __swp_offset(x) ((x).val >> 12) +#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define kern_addr_valid(addr) (1) + +#include <asm-generic/pgtable.h> + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +typedef pte_t *pte_addr_t; + +#endif /* __ASSEMBLY__ */ +#endif /* _CRIS_PGTABLE_H */ diff --git a/kernel/arch/cris/include/asm/processor.h b/kernel/arch/cris/include/asm/processor.h new file mode 100644 index 000000000..862126b58 --- /dev/null +++ b/kernel/arch/cris/include/asm/processor.h @@ -0,0 +1,70 @@ +/* + * include/asm-cris/processor.h + * + * Copyright (C) 2000, 2001 Axis Communications AB + * + * Authors: Bjorn Wesen Initial version + * + */ + +#ifndef __ASM_CRIS_PROCESSOR_H +#define __ASM_CRIS_PROCESSOR_H + +#include <asm/page.h> +#include <asm/ptrace.h> +#include <arch/processor.h> +#include <arch/system.h> + +struct task_struct; + +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) + +/* THREAD_SIZE is the size of the thread_info/kernel_stack combo. + * normally, the stack is found by doing something like p + THREAD_SIZE + * in CRIS, a page is 8192 bytes, which seems like a sane size + */ +#define THREAD_SIZE PAGE_SIZE +#define THREAD_SIZE_ORDER (0) + +/* + * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack. + * This macro allows us to find those regs for a task. + * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while + * we're in the kernel, won't affect this - only the first user->kernel transition + * registers are reached by this. + */ + +#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1) + +/* + * Dito but for the currently running task + */ + +#define task_pt_regs(task) user_regs(task_thread_info(task)) + +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) + +extern unsigned long thread_saved_pc(struct task_struct *tsk); + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ + /* Nothing needs to be done. */ +} + +#define init_stack (init_thread_union.stack) + +#define cpu_relax() barrier() +#define cpu_relax_lowlatency() cpu_relax() + +void default_idle(void); + +#endif /* __ASM_CRIS_PROCESSOR_H */ diff --git a/kernel/arch/cris/include/asm/ptrace.h b/kernel/arch/cris/include/asm/ptrace.h new file mode 100644 index 000000000..9e788d04a --- /dev/null +++ b/kernel/arch/cris/include/asm/ptrace.h @@ -0,0 +1,14 @@ +#ifndef _CRIS_PTRACE_H +#define _CRIS_PTRACE_H + +#include <uapi/asm/ptrace.h> + + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + +#define profile_pc(regs) instruction_pointer(regs) +#define current_user_stack_pointer() rdusp() + +#endif /* _CRIS_PTRACE_H */ diff --git a/kernel/arch/cris/include/asm/segment.h b/kernel/arch/cris/include/asm/segment.h new file mode 100644 index 000000000..c067513be --- /dev/null +++ b/kernel/arch/cris/include/asm/segment.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SEGMENT_H +#define _ASM_SEGMENT_H + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#endif diff --git a/kernel/arch/cris/include/asm/serial.h b/kernel/arch/cris/include/asm/serial.h new file mode 100644 index 000000000..af7535a95 --- /dev/null +++ b/kernel/arch/cris/include/asm/serial.h @@ -0,0 +1,9 @@ +#ifndef _ASM_SERIAL_H +#define _ASM_SERIAL_H + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + */ +#define BASE_BAUD (1843200 / 16) + +#endif /* _ASM_SERIAL_H */ diff --git a/kernel/arch/cris/include/asm/shmparam.h b/kernel/arch/cris/include/asm/shmparam.h new file mode 100644 index 000000000..d29d12270 --- /dev/null +++ b/kernel/arch/cris/include/asm/shmparam.h @@ -0,0 +1,8 @@ +#ifndef _ASM_CRIS_SHMPARAM_H +#define _ASM_CRIS_SHMPARAM_H + +/* same as asm-i386/ version.. */ + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _ASM_CRIS_SHMPARAM_H */ diff --git a/kernel/arch/cris/include/asm/signal.h b/kernel/arch/cris/include/asm/signal.h new file mode 100644 index 000000000..c11b8745c --- /dev/null +++ b/kernel/arch/cris/include/asm/signal.h @@ -0,0 +1,23 @@ +#ifndef _ASM_CRIS_SIGNAL_H +#define _ASM_CRIS_SIGNAL_H + +#include <uapi/asm/signal.h> + +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#define __ARCH_HAS_SA_RESTORER + +#include <asm/sigcontext.h> + +#endif diff --git a/kernel/arch/cris/include/asm/string.h b/kernel/arch/cris/include/asm/string.h new file mode 100644 index 000000000..d5db39f9e --- /dev/null +++ b/kernel/arch/cris/include/asm/string.h @@ -0,0 +1,20 @@ +#ifndef _ASM_CRIS_STRING_H +#define _ASM_CRIS_STRING_H + +/* the optimized memcpy is in arch/cris/lib/string.c */ + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, size_t); + +/* New and improved. In arch/cris/lib/memset.c */ + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, size_t); + +#ifdef CONFIG_ETRAX_ARCH_V32 +/* For v32 we provide strcmp. */ +#define __HAVE_ARCH_STRCMP +extern int strcmp(const char *s1, const char *s2); +#endif + +#endif diff --git a/kernel/arch/cris/include/asm/swab.h b/kernel/arch/cris/include/asm/swab.h new file mode 100644 index 000000000..991b6ace1 --- /dev/null +++ b/kernel/arch/cris/include/asm/swab.h @@ -0,0 +1,7 @@ +#ifndef _CRIS_SWAB_H +#define _CRIS_SWAB_H + +#include <arch/swab.h> +#include <uapi/asm/swab.h> + +#endif /* _CRIS_SWAB_H */ diff --git a/kernel/arch/cris/include/asm/switch_to.h b/kernel/arch/cris/include/asm/switch_to.h new file mode 100644 index 000000000..d842e1163 --- /dev/null +++ b/kernel/arch/cris/include/asm/switch_to.h @@ -0,0 +1,12 @@ +#ifndef __ASM_CRIS_SWITCH_TO_H +#define __ASM_CRIS_SWITCH_TO_H + +/* the switch_to macro calls resume, an asm function in entry.S which does the actual + * task switching. + */ + +extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int); +#define switch_to(prev,next,last) last = resume(prev,next, \ + (int)&((struct task_struct *)0)->thread) + +#endif /* __ASM_CRIS_SWITCH_TO_H */ diff --git a/kernel/arch/cris/include/asm/termios.h b/kernel/arch/cris/include/asm/termios.h new file mode 100644 index 000000000..1991cd9e4 --- /dev/null +++ b/kernel/arch/cris/include/asm/termios.h @@ -0,0 +1,51 @@ +#ifndef _CRIS_TERMIOS_H +#define _CRIS_TERMIOS_H + +#include <uapi/asm/termios.h> + + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* _CRIS_TERMIOS_H */ diff --git a/kernel/arch/cris/include/asm/thread_info.h b/kernel/arch/cris/include/asm/thread_info.h new file mode 100644 index 000000000..4ead1b40d --- /dev/null +++ b/kernel/arch/cris/include/asm/thread_info.h @@ -0,0 +1,85 @@ +/* thread_info.h: CRIS low-level thread information + * + * Copyright (C) 2002 David Howells (dhowells@redhat.com) + * - Incorporating suggestions made by Linus Torvalds and Dave Miller + * + * CRIS port by Axis Communications + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include <asm/types.h> +#include <asm/processor.h> +#include <arch/thread_info.h> +#include <asm/segment.h> +#endif + + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants must also be changed + */ +#ifndef __ASSEMBLY__ +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + __u32 tls; /* TLS for this thread */ + + mm_segment_t addr_limit; /* thread address space: + 0-0xBFFFFFFF for user-thead + 0-0xFFFFFFFF for kernel-thread + */ + __u8 supervisor_stack[0]; +}; + +#endif + +/* + * macros/functions for gaining access to the thread information structure + */ +#ifndef __ASSEMBLY__ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +} + +#define init_thread_info (init_thread_union.thread_info) + +#endif /* !__ASSEMBLY__ */ + +/* + * thread information flags + * - these are process state flags that various assembly files may need to access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ + +#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) + +#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ +#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_THREAD_INFO_H */ diff --git a/kernel/arch/cris/include/asm/timex.h b/kernel/arch/cris/include/asm/timex.h new file mode 100644 index 000000000..980924ae7 --- /dev/null +++ b/kernel/arch/cris/include/asm/timex.h @@ -0,0 +1,24 @@ +/* + * linux/include/asm-cris/timex.h + * + * CRIS architecture timex specifications + */ + +#ifndef _ASM_CRIS_TIMEX_H +#define _ASM_CRIS_TIMEX_H + +#include <arch/timex.h> + +/* + * We don't have a cycle-counter.. but we do not support SMP anyway where this is + * used so it does not matter. + */ + +typedef unsigned long long cycles_t; + +static inline cycles_t get_cycles(void) +{ + return 0; +} + +#endif diff --git a/kernel/arch/cris/include/asm/tlb.h b/kernel/arch/cris/include/asm/tlb.h new file mode 100644 index 000000000..77384ea2f --- /dev/null +++ b/kernel/arch/cris/include/asm/tlb.h @@ -0,0 +1,19 @@ +#ifndef _CRIS_TLB_H +#define _CRIS_TLB_H + +#include <linux/pagemap.h> + +#include <arch/tlb.h> + +/* + * cris doesn't need any special per-pte or + * per-vma handling.. + */ +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) +#include <asm-generic/tlb.h> + +#endif diff --git a/kernel/arch/cris/include/asm/tlbflush.h b/kernel/arch/cris/include/asm/tlbflush.h new file mode 100644 index 000000000..b424f43a9 --- /dev/null +++ b/kernel/arch/cris/include/asm/tlbflush.h @@ -0,0 +1,41 @@ +#ifndef _CRIS_TLBFLUSH_H +#define _CRIS_TLBFLUSH_H + +#include <linux/mm.h> +#include <asm/processor.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> + +/* + * TLB flushing (implemented in arch/cris/mm/tlb.c): + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + * + */ + +extern void __flush_tlb_all(void); +extern void __flush_tlb_mm(struct mm_struct *mm); +extern void __flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr); + +#define flush_tlb_all __flush_tlb_all +#define flush_tlb_mm __flush_tlb_mm +#define flush_tlb_page __flush_tlb_page + +static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} + +static inline void flush_tlb(void) +{ + flush_tlb_mm(current->mm); +} + +#define flush_tlb_kernel_range(start, end) flush_tlb_all() + +#endif /* _CRIS_TLBFLUSH_H */ diff --git a/kernel/arch/cris/include/asm/types.h b/kernel/arch/cris/include/asm/types.h new file mode 100644 index 000000000..a3cac7757 --- /dev/null +++ b/kernel/arch/cris/include/asm/types.h @@ -0,0 +1,12 @@ +#ifndef _ETRAX_TYPES_H +#define _ETRAX_TYPES_H + +#include <uapi/asm/types.h> + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ + +#define BITS_PER_LONG 32 + +#endif diff --git a/kernel/arch/cris/include/asm/uaccess.h b/kernel/arch/cris/include/asm/uaccess.h new file mode 100644 index 000000000..e3530d0f1 --- /dev/null +++ b/kernel/arch/cris/include/asm/uaccess.h @@ -0,0 +1,428 @@ +/* + * Authors: Bjorn Wesen (bjornw@axis.com) + * Hans-Peter Nilsson (hp@axis.com) + */ + +/* Asm:s have been tweaked (within the domain of correctness) to give + satisfactory results for "gcc version 2.96 20000427 (experimental)". + + Check regularly... + + Register $r9 is chosen for temporaries, being a call-clobbered register + first in line to be used (notably for local blocks), not colliding with + parameter registers. */ + +#ifndef _CRIS_UACCESS_H +#define _CRIS_UACCESS_H + +#ifndef __ASSEMBLY__ +#include <linux/sched.h> +#include <linux/errno.h> +#include <asm/processor.h> +#include <asm/page.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +/* addr_limit is the maximum accessible address for the task. we misuse + * the KERNEL_DS and USER_DS values to both assign and compare the + * addr_limit values through the equally misnamed get/set_fs macros. + * (see above) + */ + +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) +#define USER_DS MAKE_MM_SEG(TASK_SIZE) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a, b) ((a).seg == (b).seg) + +#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __user_ok(addr, size) \ + (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size))) +#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) + +#include <arch/uaccess.h> + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned long insn, fixup; +}; + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * This gets kind of ugly. We want to return _two_ values in "get_user()" + * and yet we don't want to do any pointers, because that is too much + * of a performance impact. Thus we have a few rather ugly macros here, + * and hide all the ugliness from the user. + * + * The "__xxx" versions of the user access functions are versions that + * do not verify the address space, that must have been done previously + * with a separate "access_ok()" call (this is used when we do multiple + * accesses to the same area of user memory). + * + * As we use the same address space for kernel and user data on + * CRIS, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + */ +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + +extern long __put_user_bad(void); + +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + __put_user_asm(x, ptr, retval, "move.b"); \ + break; \ + case 2: \ + __put_user_asm(x, ptr, retval, "move.w"); \ + break; \ + case 4: \ + __put_user_asm(x, ptr, retval, "move.d"); \ + break; \ + case 8: \ + __put_user_asm_64(x, ptr, retval); \ + break; \ + default: \ + __put_user_bad(); \ + } \ +} while (0) + +#define __get_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + __get_user_asm(x, ptr, retval, "move.b"); \ + break; \ + case 2: \ + __get_user_asm(x, ptr, retval, "move.w"); \ + break; \ + case 4: \ + __get_user_asm(x, ptr, retval, "move.d"); \ + break; \ + case 8: \ + __get_user_asm_64(x, ptr, retval); \ + break; \ + default: \ + (x) = __get_user_bad(); \ + } \ +} while (0) + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err; \ + __put_user_size((x), (ptr), (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct *)(x)) + + + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err, __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT, __gu_val = 0; \ + const __typeof__(*(ptr)) *__gu_addr = (ptr); \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + +extern long __get_user_bad(void); + +/* More complex functions. Most are inline, but some call functions that + live in lib/usercopy.c */ + +extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); +extern unsigned long __do_clear_user(void __user *to, unsigned long n); + +static inline unsigned long +__generic_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + return __copy_user(to, from, n); + return n; +} + +static inline unsigned long +__generic_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + return __copy_user_zeroing(to, from, n); + return n; +} + +static inline unsigned long +__generic_clear_user(void __user *to, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + return __do_clear_user(to, n); + return n; +} + +static inline long +__strncpy_from_user(char *dst, const char __user *src, long count) +{ + return __do_strncpy_from_user(dst, src, count); +} + +static inline long +strncpy_from_user(char *dst, const char __user *src, long count) +{ + long res = -EFAULT; + + if (access_ok(VERIFY_READ, src, 1)) + res = __do_strncpy_from_user(dst, src, count); + return res; +} + + +/* Note that these expand awfully if made into switch constructs, so + don't do that. */ + +static inline unsigned long +__constant_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long ret = 0; + + if (n == 0) + ; + else if (n == 1) + __asm_copy_from_user_1(to, from, ret); + else if (n == 2) + __asm_copy_from_user_2(to, from, ret); + else if (n == 3) + __asm_copy_from_user_3(to, from, ret); + else if (n == 4) + __asm_copy_from_user_4(to, from, ret); + else if (n == 5) + __asm_copy_from_user_5(to, from, ret); + else if (n == 6) + __asm_copy_from_user_6(to, from, ret); + else if (n == 7) + __asm_copy_from_user_7(to, from, ret); + else if (n == 8) + __asm_copy_from_user_8(to, from, ret); + else if (n == 9) + __asm_copy_from_user_9(to, from, ret); + else if (n == 10) + __asm_copy_from_user_10(to, from, ret); + else if (n == 11) + __asm_copy_from_user_11(to, from, ret); + else if (n == 12) + __asm_copy_from_user_12(to, from, ret); + else if (n == 13) + __asm_copy_from_user_13(to, from, ret); + else if (n == 14) + __asm_copy_from_user_14(to, from, ret); + else if (n == 15) + __asm_copy_from_user_15(to, from, ret); + else if (n == 16) + __asm_copy_from_user_16(to, from, ret); + else if (n == 20) + __asm_copy_from_user_20(to, from, ret); + else if (n == 24) + __asm_copy_from_user_24(to, from, ret); + else + ret = __generic_copy_from_user(to, from, n); + + return ret; +} + +/* Ditto, don't make a switch out of this. */ + +static inline unsigned long +__constant_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned long ret = 0; + + if (n == 0) + ; + else if (n == 1) + __asm_copy_to_user_1(to, from, ret); + else if (n == 2) + __asm_copy_to_user_2(to, from, ret); + else if (n == 3) + __asm_copy_to_user_3(to, from, ret); + else if (n == 4) + __asm_copy_to_user_4(to, from, ret); + else if (n == 5) + __asm_copy_to_user_5(to, from, ret); + else if (n == 6) + __asm_copy_to_user_6(to, from, ret); + else if (n == 7) + __asm_copy_to_user_7(to, from, ret); + else if (n == 8) + __asm_copy_to_user_8(to, from, ret); + else if (n == 9) + __asm_copy_to_user_9(to, from, ret); + else if (n == 10) + __asm_copy_to_user_10(to, from, ret); + else if (n == 11) + __asm_copy_to_user_11(to, from, ret); + else if (n == 12) + __asm_copy_to_user_12(to, from, ret); + else if (n == 13) + __asm_copy_to_user_13(to, from, ret); + else if (n == 14) + __asm_copy_to_user_14(to, from, ret); + else if (n == 15) + __asm_copy_to_user_15(to, from, ret); + else if (n == 16) + __asm_copy_to_user_16(to, from, ret); + else if (n == 20) + __asm_copy_to_user_20(to, from, ret); + else if (n == 24) + __asm_copy_to_user_24(to, from, ret); + else + ret = __generic_copy_to_user(to, from, n); + + return ret; +} + +/* No switch, please. */ + +static inline unsigned long +__constant_clear_user(void __user *to, unsigned long n) +{ + unsigned long ret = 0; + + if (n == 0) + ; + else if (n == 1) + __asm_clear_1(to, ret); + else if (n == 2) + __asm_clear_2(to, ret); + else if (n == 3) + __asm_clear_3(to, ret); + else if (n == 4) + __asm_clear_4(to, ret); + else if (n == 8) + __asm_clear_8(to, ret); + else if (n == 12) + __asm_clear_12(to, ret); + else if (n == 16) + __asm_clear_16(to, ret); + else if (n == 20) + __asm_clear_20(to, ret); + else if (n == 24) + __asm_clear_24(to, ret); + else + ret = __generic_clear_user(to, n); + + return ret; +} + + +#define clear_user(to, n) \ + (__builtin_constant_p(n) ? \ + __constant_clear_user(to, n) : \ + __generic_clear_user(to, n)) + +#define copy_from_user(to, from, n) \ + (__builtin_constant_p(n) ? \ + __constant_copy_from_user(to, from, n) : \ + __generic_copy_from_user(to, from, n)) + +#define copy_to_user(to, from, n) \ + (__builtin_constant_p(n) ? \ + __constant_copy_to_user(to, from, n) : \ + __generic_copy_to_user(to, from, n)) + +/* We let the __ versions of copy_from/to_user inline, because they're often + * used in fast paths and have only a small space overhead. + */ + +static inline unsigned long +__generic_copy_from_user_nocheck(void *to, const void __user *from, + unsigned long n) +{ + return __copy_user_zeroing(to, from, n); +} + +static inline unsigned long +__generic_copy_to_user_nocheck(void __user *to, const void *from, + unsigned long n) +{ + return __copy_user(to, from, n); +} + +static inline unsigned long +__generic_clear_user_nocheck(void __user *to, unsigned long n) +{ + return __do_clear_user(to, n); +} + +/* without checking */ + +#define __copy_to_user(to, from, n) \ + __generic_copy_to_user_nocheck((to), (from), (n)) +#define __copy_from_user(to, from, n) \ + __generic_copy_from_user_nocheck((to), (from), (n)) +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user +#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n)) + +#define strlen_user(str) strnlen_user((str), 0x7ffffffe) + +#endif /* __ASSEMBLY__ */ + +#endif /* _CRIS_UACCESS_H */ diff --git a/kernel/arch/cris/include/asm/ucontext.h b/kernel/arch/cris/include/asm/ucontext.h new file mode 100644 index 000000000..eed6ad5eb --- /dev/null +++ b/kernel/arch/cris/include/asm/ucontext.h @@ -0,0 +1,12 @@ +#ifndef _ASM_CRIS_UCONTEXT_H +#define _ASM_CRIS_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* !_ASM_CRIS_UCONTEXT_H */ diff --git a/kernel/arch/cris/include/asm/unaligned.h b/kernel/arch/cris/include/asm/unaligned.h new file mode 100644 index 000000000..7b3f3fec5 --- /dev/null +++ b/kernel/arch/cris/include/asm/unaligned.h @@ -0,0 +1,13 @@ +#ifndef _ASM_CRIS_UNALIGNED_H +#define _ASM_CRIS_UNALIGNED_H + +/* + * CRIS can do unaligned accesses itself. + */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> + +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le + +#endif /* _ASM_CRIS_UNALIGNED_H */ diff --git a/kernel/arch/cris/include/asm/unistd.h b/kernel/arch/cris/include/asm/unistd.h new file mode 100644 index 000000000..0f40fed1b --- /dev/null +++ b/kernel/arch/cris/include/asm/unistd.h @@ -0,0 +1,36 @@ +#ifndef _ASM_CRIS_UNISTD_H_ +#define _ASM_CRIS_UNISTD_H_ + +#include <uapi/asm/unistd.h> + + +#define NR_syscalls 360 + +#include <arch/unistd.h> + +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_IPC +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLD_MMAP +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE + +#endif /* _ASM_CRIS_UNISTD_H_ */ diff --git a/kernel/arch/cris/include/asm/user.h b/kernel/arch/cris/include/asm/user.h new file mode 100644 index 000000000..59147cf43 --- /dev/null +++ b/kernel/arch/cris/include/asm/user.h @@ -0,0 +1,52 @@ +#ifndef __ASM_CRIS_USER_H +#define __ASM_CRIS_USER_H + +#include <linux/types.h> +#include <asm/ptrace.h> +#include <asm/page.h> +#include <arch/user.h> + +/* + * Core file format: The core file is written in such a way that gdb + * can understand it and provide useful information to the user (under + * linux we use the `trad-core' bfd). The file contents are as follows: + * + * upage: 1 page consisting of a user struct that tells gdb + * what is present in the file. Directly after this is a + * copy of the task_struct, which is currently not used by gdb, + * but it may come in handy at some point. All of the registers + * are stored as part of the upage. The upage should always be + * only one page long. + * data: The data segment follows next. We use current->end_text to + * current->brk to pick up all of the user variables, plus any memory + * that may have been sbrk'ed. No attempt is made to determine if a + * page is demand-zero or if a page is totally unused, we just cover + * the entire range. All of the addresses are rounded in such a way + * that an integral number of pages is written. + * stack: We need the stack information in order to get a meaningful + * backtrace. We need to write the data from usp to + * current->start_stack, so we round each of these in order to be able + * to write an integer number of pages. + */ + +struct user { + struct user_regs_struct regs; /* entire machine state */ + size_t u_tsize; /* text size (pages) */ + size_t u_dsize; /* data size (pages) */ + size_t u_ssize; /* stack size (pages) */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* signal causing core dump */ + unsigned long u_ar0; /* help gdb find registers */ + unsigned long magic; /* identifies a core file */ + char u_comm[32]; /* user command name */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_DATA_START_ADDR (u.start_data) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* __ASM_CRIS_USER_H */ |