diff options
Diffstat (limited to 'kernel/arch/score/include')
93 files changed, 2952 insertions, 0 deletions
diff --git a/kernel/arch/score/include/asm/Kbuild b/kernel/arch/score/include/asm/Kbuild new file mode 100644 index 000000000..83ed116d4 --- /dev/null +++ b/kernel/arch/score/include/asm/Kbuild @@ -0,0 +1,15 @@ + +header-y += + + +generic-y += barrier.h +generic-y += clkdev.h +generic-y += cputime.h +generic-y += irq_work.h +generic-y += mcs_spinlock.h +generic-y += preempt.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += trace_clock.h +generic-y += xor.h +generic-y += serial.h diff --git a/kernel/arch/score/include/asm/asm-offsets.h b/kernel/arch/score/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/kernel/arch/score/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/kernel/arch/score/include/asm/asmmacro.h b/kernel/arch/score/include/asm/asmmacro.h new file mode 100644 index 000000000..a04a54cea --- /dev/null +++ b/kernel/arch/score/include/asm/asmmacro.h @@ -0,0 +1,161 @@ +#ifndef _ASM_SCORE_ASMMACRO_H +#define _ASM_SCORE_ASMMACRO_H + +#include <asm/asm-offsets.h> + +#ifdef __ASSEMBLY__ + +.macro SAVE_ALL + mfcr r30, cr0 + mv r31, r0 + nop + /* if UMs == 1, change stack. */ + slli.c r30, r30, 28 + bpl 1f + la r31, kernelsp + lw r31, [r31] +1: + mv r30, r0 + addri r0, r31, -PT_SIZE + + sw r30, [r0, PT_R0] + .set r1 + sw r1, [r0, PT_R1] + .set nor1 + sw r2, [r0, PT_R2] + sw r3, [r0, PT_R3] + sw r4, [r0, PT_R4] + sw r5, [r0, PT_R5] + sw r6, [r0, PT_R6] + sw r7, [r0, PT_R7] + + sw r8, [r0, PT_R8] + sw r9, [r0, PT_R9] + sw r10, [r0, PT_R10] + sw r11, [r0, PT_R11] + sw r12, [r0, PT_R12] + sw r13, [r0, PT_R13] + sw r14, [r0, PT_R14] + sw r15, [r0, PT_R15] + + sw r16, [r0, PT_R16] + sw r17, [r0, PT_R17] + sw r18, [r0, PT_R18] + sw r19, [r0, PT_R19] + sw r20, [r0, PT_R20] + sw r21, [r0, PT_R21] + sw r22, [r0, PT_R22] + sw r23, [r0, PT_R23] + + sw r24, [r0, PT_R24] + sw r25, [r0, PT_R25] + sw r25, [r0, PT_R25] + sw r26, [r0, PT_R26] + sw r27, [r0, PT_R27] + + sw r28, [r0, PT_R28] + sw r29, [r0, PT_R29] + orri r28, r0, 0x1fff + li r31, 0x00001fff + xor r28, r28, r31 + + mfcehl r30, r31 + sw r30, [r0, PT_CEH] + sw r31, [r0, PT_CEL] + + mfcr r31, cr0 + sw r31, [r0, PT_PSR] + + mfcr r31, cr1 + sw r31, [r0, PT_CONDITION] + + mfcr r31, cr2 + sw r31, [r0, PT_ECR] + + mfcr r31, cr5 + srli r31, r31, 1 + slli r31, r31, 1 + sw r31, [r0, PT_EPC] +.endm + +.macro RESTORE_ALL_AND_RET + mfcr r30, cr0 + srli r30, r30, 1 + slli r30, r30, 1 + mtcr r30, cr0 + nop + nop + nop + nop + nop + + .set r1 + ldis r1, 0x00ff + and r30, r30, r1 + not r1, r1 + lw r31, [r0, PT_PSR] + and r31, r31, r1 + .set nor1 + or r31, r31, r30 + mtcr r31, cr0 + nop + nop + nop + nop + nop + + lw r30, [r0, PT_CONDITION] + mtcr r30, cr1 + nop + nop + nop + nop + nop + + lw r30, [r0, PT_CEH] + lw r31, [r0, PT_CEL] + mtcehl r30, r31 + + .set r1 + lw r1, [r0, PT_R1] + .set nor1 + lw r2, [r0, PT_R2] + lw r3, [r0, PT_R3] + lw r4, [r0, PT_R4] + lw r5, [r0, PT_R5] + lw r6, [r0, PT_R6] + lw r7, [r0, PT_R7] + + lw r8, [r0, PT_R8] + lw r9, [r0, PT_R9] + lw r10, [r0, PT_R10] + lw r11, [r0, PT_R11] + lw r12, [r0, PT_R12] + lw r13, [r0, PT_R13] + lw r14, [r0, PT_R14] + lw r15, [r0, PT_R15] + + lw r16, [r0, PT_R16] + lw r17, [r0, PT_R17] + lw r18, [r0, PT_R18] + lw r19, [r0, PT_R19] + lw r20, [r0, PT_R20] + lw r21, [r0, PT_R21] + lw r22, [r0, PT_R22] + lw r23, [r0, PT_R23] + + lw r24, [r0, PT_R24] + lw r25, [r0, PT_R25] + lw r26, [r0, PT_R26] + lw r27, [r0, PT_R27] + lw r28, [r0, PT_R28] + lw r29, [r0, PT_R29] + + lw r30, [r0, PT_EPC] + lw r0, [r0, PT_R0] + mtcr r30, cr5 + rte +.endm + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SCORE_ASMMACRO_H */ diff --git a/kernel/arch/score/include/asm/atomic.h b/kernel/arch/score/include/asm/atomic.h new file mode 100644 index 000000000..edf33dbde --- /dev/null +++ b/kernel/arch/score/include/asm/atomic.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_ATOMIC_H +#define _ASM_SCORE_ATOMIC_H + +#include <asm/cmpxchg.h> +#include <asm-generic/atomic.h> + +#endif /* _ASM_SCORE_ATOMIC_H */ diff --git a/kernel/arch/score/include/asm/bitops.h b/kernel/arch/score/include/asm/bitops.h new file mode 100644 index 000000000..c1bf8d6d0 --- /dev/null +++ b/kernel/arch/score/include/asm/bitops.h @@ -0,0 +1,10 @@ +#ifndef _ASM_SCORE_BITOPS_H +#define _ASM_SCORE_BITOPS_H + +#include <asm/byteorder.h> /* swab32 */ +#include <asm/barrier.h> + +#include <asm-generic/bitops.h> +#include <asm-generic/bitops/__fls.h> + +#endif /* _ASM_SCORE_BITOPS_H */ diff --git a/kernel/arch/score/include/asm/bug.h b/kernel/arch/score/include/asm/bug.h new file mode 100644 index 000000000..fd7164af1 --- /dev/null +++ b/kernel/arch/score/include/asm/bug.h @@ -0,0 +1,17 @@ +#ifndef _ASM_SCORE_BUG_H +#define _ASM_SCORE_BUG_H + +#include <asm-generic/bug.h> + +struct pt_regs; +extern void __die(const char *, struct pt_regs *, const char *, + const char *, unsigned long) __attribute__((noreturn)); +extern void __die_if_kernel(const char *, struct pt_regs *, const char *, + const char *, unsigned long); + +#define die(msg, regs) \ + __die(msg, regs, __FILE__ ":", __func__, __LINE__) +#define die_if_kernel(msg, regs) \ + __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__) + +#endif /* _ASM_SCORE_BUG_H */ diff --git a/kernel/arch/score/include/asm/bugs.h b/kernel/arch/score/include/asm/bugs.h new file mode 100644 index 000000000..a062e1056 --- /dev/null +++ b/kernel/arch/score/include/asm/bugs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BUGS_H +#define _ASM_SCORE_BUGS_H + +#include <asm-generic/bugs.h> + +#endif /* _ASM_SCORE_BUGS_H */ diff --git a/kernel/arch/score/include/asm/cache.h b/kernel/arch/score/include/asm/cache.h new file mode 100644 index 000000000..ae3d59f2d --- /dev/null +++ b/kernel/arch/score/include/asm/cache.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_CACHE_H +#define _ASM_SCORE_CACHE_H + +#define L1_CACHE_SHIFT 4 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* _ASM_SCORE_CACHE_H */ diff --git a/kernel/arch/score/include/asm/cacheflush.h b/kernel/arch/score/include/asm/cacheflush.h new file mode 100644 index 000000000..1d545d0ce --- /dev/null +++ b/kernel/arch/score/include/asm/cacheflush.h @@ -0,0 +1,48 @@ +#ifndef _ASM_SCORE_CACHEFLUSH_H +#define _ASM_SCORE_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include <linux/mm.h> + +extern void flush_cache_all(void); +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void flush_cache_page(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +extern void flush_cache_sigtramp(unsigned long addr); +extern void flush_icache_all(void); +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void flush_dcache_range(unsigned long start, unsigned long end); +extern void flush_dcache_page(struct page *page); + +#define PG_dcache_dirty PG_arch_1 + +#define flush_cache_dup_mm(mm) do {} while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) +#define flush_cache_vmap(start, end) do {} while (0) +#define flush_cache_vunmap(start, end) do {} while (0) + +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ + if (vma->vm_flags & VM_EXEC) { + void *v = page_address(page); + flush_icache_range((unsigned long) v, + (unsigned long) v + PAGE_SIZE); + } +} + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + if ((vma->vm_flags & VM_EXEC)) \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + } while (0) + +#endif /* _ASM_SCORE_CACHEFLUSH_H */ diff --git a/kernel/arch/score/include/asm/checksum.h b/kernel/arch/score/include/asm/checksum.h new file mode 100644 index 000000000..961bd6401 --- /dev/null +++ b/kernel/arch/score/include/asm/checksum.h @@ -0,0 +1,244 @@ +#ifndef _ASM_SCORE_CHECKSUM_H +#define _ASM_SCORE_CHECKSUM_H + +#include <linux/in6.h> +#include <asm/uaccess.h> + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +unsigned int csum_partial(const void *buff, int len, __wsum sum); +unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, + unsigned int sum, int *csum_err); +unsigned int csum_partial_copy(const char *src, char *dst, + int len, unsigned int sum); + +/* + * this is a new version of the above that records errors it finds in *errp, + * but continues and zeros the rest of the buffer. + */ + +/* + * Copy and checksum to user + */ +#define HAVE_CSUM_COPY_USER +static inline +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + __wsum sum, int *err_ptr) +{ + sum = csum_partial(src, len, sum); + if (copy_to_user(dst, src, len)) { + *err_ptr = -EFAULT; + return (__force __wsum) -1; /* invalid checksum */ + } + return sum; +} + + +#define csum_partial_copy_nocheck csum_partial_copy +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum sum) +{ + /* the while loop is unnecessary really, it's always enough with two + iterations */ + __asm__ __volatile__( + ".set volatile\n\t" + ".set\tr1\n\t" + "slli\tr1,%0, 16\n\t" + "add\t%0,%0, r1\n\t" + "cmp.c\tr1, %0\n\t" + "srli\t%0, %0, 16\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:ldi\tr30, 0xffff\n\t" + "xor\t%0, %0, r30\n\t" + "slli\t%0, %0, 16\n\t" + "srli\t%0, %0, 16\n\t" + ".set\tnor1\n\t" + ".set optimize\n\t" + : "=r" (sum) + : "0" (sum)); + return sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + * + * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by + * Arnt Gulbrandsen. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned int sum; + unsigned long dummy; + + __asm__ __volatile__( + ".set volatile\n\t" + ".set\tnor1\n\t" + "lw\t%0, [%1]\n\t" + "subri\t%2, %2, 4\n\t" + "slli\t%2, %2, 2\n\t" + "lw\t%3, [%1, 4]\n\t" + "add\t%2, %2, %1\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "lw\t%3, [%1, 8]\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "lw\t%3, [%1, 12]\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n" + + "1:\tlw\t%3, [%1, 16]\n\t" + "addi\t%1, 4\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t2f\n\t" + "addi\t%0, 0x1\n" + "2:cmp.c\t%2, %1\n\t" + "bne\t1b\n\t" + + ".set\tr1\n\t" + ".set optimize\n\t" + : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy) + : "1" (iph), "2" (ihl)); + + return csum_fold(sum); +} + +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + unsigned long tmp = (ntohs(len) << 16) + proto * 256; + __asm__ __volatile__( + ".set volatile\n\t" + "add\t%0, %0, %2\n\t" + "cmp.c\t%2, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %4\n\t" + "cmp.c\t%4, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + ".set optimize\n\t" + : "=r" (sum) + : "0" (daddr), "r"(saddr), + "r" (tmp), + "r" (sum)); + return sum; +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +static inline unsigned short ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + __asm__ __volatile__( + ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t" + "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t" + "cmp.c\t%5, %0\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %6\t\t\t# csum\n\t" + "cmp.c\t%6, %0\n\t" + "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "1:lw\t%1, [%2, 4]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%2,8]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%2, 12]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0,%1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 0]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 4]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 8]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 12]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + ".set\toptimize" + : "=r" (sum), "=r" (proto) + : "r" (saddr), "r" (daddr), + "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); + + return csum_fold(sum); +} +#endif /* _ASM_SCORE_CHECKSUM_H */ diff --git a/kernel/arch/score/include/asm/cmpxchg.h b/kernel/arch/score/include/asm/cmpxchg.h new file mode 100644 index 000000000..f384839c3 --- /dev/null +++ b/kernel/arch/score/include/asm/cmpxchg.h @@ -0,0 +1,49 @@ +#ifndef _ASM_SCORE_CMPXCHG_H +#define _ASM_SCORE_CMPXCHG_H + +#include <linux/irqflags.h> + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); + return retval; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#define __HAVE_ARCH_CMPXCHG 1 + +#include <asm-generic/cmpxchg-local.h> + +#endif /* _ASM_SCORE_CMPXCHG_H */ diff --git a/kernel/arch/score/include/asm/current.h b/kernel/arch/score/include/asm/current.h new file mode 100644 index 000000000..16eae9cba --- /dev/null +++ b/kernel/arch/score/include/asm/current.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_CURRENT_H +#define _ASM_SCORE_CURRENT_H + +#include <asm-generic/current.h> + +#endif /* _ASM_SCORE_CURRENT_H */ diff --git a/kernel/arch/score/include/asm/delay.h b/kernel/arch/score/include/asm/delay.h new file mode 100644 index 000000000..529e49471 --- /dev/null +++ b/kernel/arch/score/include/asm/delay.h @@ -0,0 +1,28 @@ +#ifndef _ASM_SCORE_DELAY_H +#define _ASM_SCORE_DELAY_H + +#include <asm-generic/param.h> + +static inline void __delay(unsigned long loops) +{ + /* 3 cycles per loop. */ + __asm__ __volatile__ ( + "1:\tsubi\t%0, 3\n\t" + "cmpz.c\t%0\n\t" + "ble\t1b\n\t" + : "=r" (loops) + : "0" (loops)); +} + +static inline void __udelay(unsigned long usecs) +{ + unsigned long loops_per_usec; + + loops_per_usec = (loops_per_jiffy * HZ) / 1000000; + + __delay(usecs * loops_per_usec); +} + +#define udelay(usecs) __udelay(usecs) + +#endif /* _ASM_SCORE_DELAY_H */ diff --git a/kernel/arch/score/include/asm/device.h b/kernel/arch/score/include/asm/device.h new file mode 100644 index 000000000..2dc7cc5d5 --- /dev/null +++ b/kernel/arch/score/include/asm/device.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DEVICE_H +#define _ASM_SCORE_DEVICE_H + +#include <asm-generic/device.h> + +#endif /* _ASM_SCORE_DEVICE_H */ diff --git a/kernel/arch/score/include/asm/div64.h b/kernel/arch/score/include/asm/div64.h new file mode 100644 index 000000000..75fae1982 --- /dev/null +++ b/kernel/arch/score/include/asm/div64.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DIV64_H +#define _ASM_SCORE_DIV64_H + +#include <asm-generic/div64.h> + +#endif /* _ASM_SCORE_DIV64_H */ diff --git a/kernel/arch/score/include/asm/dma.h b/kernel/arch/score/include/asm/dma.h new file mode 100644 index 000000000..9f4418529 --- /dev/null +++ b/kernel/arch/score/include/asm/dma.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_DMA_H +#define _ASM_SCORE_DMA_H + +#include <asm/io.h> + +#define MAX_DMA_ADDRESS (0) + +#endif /* _ASM_SCORE_DMA_H */ diff --git a/kernel/arch/score/include/asm/elf.h b/kernel/arch/score/include/asm/elf.h new file mode 100644 index 000000000..6a9421c69 --- /dev/null +++ b/kernel/arch/score/include/asm/elf.h @@ -0,0 +1,97 @@ +#ifndef _ASM_SCORE_ELF_H +#define _ASM_SCORE_ELF_H + +#include <linux/ptrace.h> + +#define EM_SCORE7 135 + +/* Relocation types. */ +#define R_SCORE_NONE 0 +#define R_SCORE_HI16 1 +#define R_SCORE_LO16 2 +#define R_SCORE_BCMP 3 +#define R_SCORE_24 4 +#define R_SCORE_PC19 5 +#define R_SCORE16_11 6 +#define R_SCORE16_PC8 7 +#define R_SCORE_ABS32 8 +#define R_SCORE_ABS16 9 +#define R_SCORE_DUMMY2 10 +#define R_SCORE_GP15 11 +#define R_SCORE_GNU_VTINHERIT 12 +#define R_SCORE_GNU_VTENTRY 13 +#define R_SCORE_GOT15 14 +#define R_SCORE_GOT_LO16 15 +#define R_SCORE_CALL15 16 +#define R_SCORE_GPREL32 17 +#define R_SCORE_REL32 18 +#define R_SCORE_DUMMY_HI16 19 +#define R_SCORE_IMM30 20 +#define R_SCORE_IMM32 21 + +/* ELF register definitions */ +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Score does not have fp regs. */ +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t; + +#define elf_check_arch(x) ((x)->e_machine == EM_SCORE7) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SCORE7 + +struct task_struct; +struct pt_regs; + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +#define ELF_PLAT_INIT(_r, load_addr) \ +do { \ + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ + _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ + _r->regs[30] = _r->regs[31] = 0; \ +} while (0) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#ifndef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#endif /* _ASM_SCORE_ELF_H */ diff --git a/kernel/arch/score/include/asm/emergency-restart.h b/kernel/arch/score/include/asm/emergency-restart.h new file mode 100644 index 000000000..ca31e9803 --- /dev/null +++ b/kernel/arch/score/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_EMERGENCY_RESTART_H +#define _ASM_SCORE_EMERGENCY_RESTART_H + +#include <asm-generic/emergency-restart.h> + +#endif /* _ASM_SCORE_EMERGENCY_RESTART_H */ diff --git a/kernel/arch/score/include/asm/exec.h b/kernel/arch/score/include/asm/exec.h new file mode 100644 index 000000000..f9f3cd59c --- /dev/null +++ b/kernel/arch/score/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_EXEC_H +#define _ASM_SCORE_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_SCORE_EXEC_H */ diff --git a/kernel/arch/score/include/asm/fixmap.h b/kernel/arch/score/include/asm/fixmap.h new file mode 100644 index 000000000..ee1676694 --- /dev/null +++ b/kernel/arch/score/include/asm/fixmap.h @@ -0,0 +1,82 @@ +#ifndef _ASM_SCORE_FIXMAP_H +#define _ASM_SCORE_FIXMAP_H + +#include <asm/page.h> + +#define PHY_RAM_BASE 0x00000000 +#define PHY_IO_BASE 0x10000000 + +#define VIRTUAL_RAM_BASE 0xa0000000 +#define VIRTUAL_IO_BASE 0xb0000000 + +#define RAM_SPACE_SIZE 0x10000000 +#define IO_SPACE_SIZE 0x10000000 + +/* Kernel unmapped, cached 512MB */ +#define KSEG1 0xa0000000 + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { +#define FIX_N_COLOURS 8 + FIX_CMAP_BEGIN, + FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + __end_of_fixed_addresses +}; + +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) \ + ((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + return __virt_to_fix(vaddr); +} + +#endif /* _ASM_SCORE_FIXMAP_H */ diff --git a/kernel/arch/score/include/asm/ftrace.h b/kernel/arch/score/include/asm/ftrace.h new file mode 100644 index 000000000..79d6f10e1 --- /dev/null +++ b/kernel/arch/score/include/asm/ftrace.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_FTRACE_H +#define _ASM_SCORE_FTRACE_H + +#endif /* _ASM_SCORE_FTRACE_H */ diff --git a/kernel/arch/score/include/asm/futex.h b/kernel/arch/score/include/asm/futex.h new file mode 100644 index 000000000..1dca2420f --- /dev/null +++ b/kernel/arch/score/include/asm/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_FUTEX_H +#define _ASM_SCORE_FUTEX_H + +#include <asm-generic/futex.h> + +#endif /* _ASM_SCORE_FUTEX_H */ diff --git a/kernel/arch/score/include/asm/hardirq.h b/kernel/arch/score/include/asm/hardirq.h new file mode 100644 index 000000000..dc932c50d --- /dev/null +++ b/kernel/arch/score/include/asm/hardirq.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_HARDIRQ_H +#define _ASM_SCORE_HARDIRQ_H + +#include <asm-generic/hardirq.h> + +#endif /* _ASM_SCORE_HARDIRQ_H */ diff --git a/kernel/arch/score/include/asm/hw_irq.h b/kernel/arch/score/include/asm/hw_irq.h new file mode 100644 index 000000000..4caafb2b5 --- /dev/null +++ b/kernel/arch/score/include/asm/hw_irq.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_HW_IRQ_H +#define _ASM_SCORE_HW_IRQ_H + +#endif /* _ASM_SCORE_HW_IRQ_H */ diff --git a/kernel/arch/score/include/asm/io.h b/kernel/arch/score/include/asm/io.h new file mode 100644 index 000000000..574c8827a --- /dev/null +++ b/kernel/arch/score/include/asm/io.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_IO_H +#define _ASM_SCORE_IO_H + +#include <asm-generic/io.h> + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt +#endif /* _ASM_SCORE_IO_H */ diff --git a/kernel/arch/score/include/asm/irq.h b/kernel/arch/score/include/asm/irq.h new file mode 100644 index 000000000..c883f3df3 --- /dev/null +++ b/kernel/arch/score/include/asm/irq.h @@ -0,0 +1,25 @@ +#ifndef _ASM_SCORE_IRQ_H +#define _ASM_SCORE_IRQ_H + +#define EXCEPTION_VECTOR_BASE_ADDR 0xa0000000 +#define VECTOR_ADDRESS_OFFSET_MODE4 0 +#define VECTOR_ADDRESS_OFFSET_MODE16 1 + +#define DEBUG_VECTOR_SIZE (0x4) +#define DEBUG_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x1fc) + +#define GENERAL_VECTOR_SIZE (0x10) +#define GENERAL_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x200) + +#define NR_IRQS 64 +#define IRQ_VECTOR_SIZE (0x10) +#define IRQ_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x210) +#define IRQ_VECTOR_END_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x5f0) + +#define irq_canonicalize(irq) (irq) + +#define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */ + +extern void interrupt_exception_vector(void); + +#endif /* _ASM_SCORE_IRQ_H */ diff --git a/kernel/arch/score/include/asm/irq_regs.h b/kernel/arch/score/include/asm/irq_regs.h new file mode 100644 index 000000000..b8e881c9a --- /dev/null +++ b/kernel/arch/score/include/asm/irq_regs.h @@ -0,0 +1,11 @@ +#ifndef _ASM_SCORE_IRQ_REGS_H +#define _ASM_SCORE_IRQ_REGS_H + +#include <linux/thread_info.h> + +static inline struct pt_regs *get_irq_regs(void) +{ + return current_thread_info()->regs; +} + +#endif /* _ASM_SCORE_IRQ_REGS_H */ diff --git a/kernel/arch/score/include/asm/irqflags.h b/kernel/arch/score/include/asm/irqflags.h new file mode 100644 index 000000000..37c6ac9dd --- /dev/null +++ b/kernel/arch/score/include/asm/irqflags.h @@ -0,0 +1,120 @@ +#ifndef _ASM_SCORE_IRQFLAGS_H +#define _ASM_SCORE_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + + asm volatile( + " mfcr r8, cr0 \n" + " nop \n" + " nop \n" + " mv %0, r8 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " ldi r9, 0x1 \n" + " and %0, %0, r9 \n" + : "=r" (flags) + : + : "r8", "r9"); + return flags; +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + asm volatile( + " mfcr r8, cr0 \n" + " li r9, 0xfffffffe \n" + " nop \n" + " mv %0, r8 \n" + " and r8, r8, r9 \n" + " mtcr r8, cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : "=r" (flags) + : + : "r8", "r9", "memory"); + + return flags; +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile( + " mfcr r8, cr0 \n" + " ldi r9, 0x1 \n" + " and %0, %0, r9 \n" + " or r8, r8, %0 \n" + " mtcr r8, cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : + : "r"(flags) + : "r8", "r9", "memory"); +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile( + " mfcr r8,cr0 \n" + " nop \n" + " nop \n" + " ori r8,0x1 \n" + " mtcr r8,cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : + : + : "r8", "memory"); +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile( + " mfcr r8,cr0 \n" + " nop \n" + " nop \n" + " srli r8,r8,1 \n" + " slli r8,r8,1 \n" + " mtcr r8,cr0 \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + : + : + : "r8", "memory"); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & 1); +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_IRQFLAGS_H */ diff --git a/kernel/arch/score/include/asm/kdebug.h b/kernel/arch/score/include/asm/kdebug.h new file mode 100644 index 000000000..a666e513f --- /dev/null +++ b/kernel/arch/score/include/asm/kdebug.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_KDEBUG_H +#define _ASM_SCORE_KDEBUG_H + +#include <asm-generic/kdebug.h> + +#endif /* _ASM_SCORE_KDEBUG_H */ diff --git a/kernel/arch/score/include/asm/kmap_types.h b/kernel/arch/score/include/asm/kmap_types.h new file mode 100644 index 000000000..6c46eb507 --- /dev/null +++ b/kernel/arch/score/include/asm/kmap_types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_KMAP_TYPES_H +#define _ASM_SCORE_KMAP_TYPES_H + +#include <asm-generic/kmap_types.h> + +#endif /* _ASM_SCORE_KMAP_TYPES_H */ diff --git a/kernel/arch/score/include/asm/linkage.h b/kernel/arch/score/include/asm/linkage.h new file mode 100644 index 000000000..2323a8ecf --- /dev/null +++ b/kernel/arch/score/include/asm/linkage.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_LINKAGE_H +#define _ASM_SCORE_LINKAGE_H + +#define __ALIGN .align 2 +#define __ALIGN_STR ".align 2" + +#endif /* _ASM_SCORE_LINKAGE_H */ diff --git a/kernel/arch/score/include/asm/local.h b/kernel/arch/score/include/asm/local.h new file mode 100644 index 000000000..7e02f13db --- /dev/null +++ b/kernel/arch/score/include/asm/local.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_LOCAL_H +#define _ASM_SCORE_LOCAL_H + +#include <asm-generic/local.h> + +#endif /* _ASM_SCORE_LOCAL_H */ diff --git a/kernel/arch/score/include/asm/local64.h b/kernel/arch/score/include/asm/local64.h new file mode 100644 index 000000000..36c93b5cc --- /dev/null +++ b/kernel/arch/score/include/asm/local64.h @@ -0,0 +1 @@ +#include <asm-generic/local64.h> diff --git a/kernel/arch/score/include/asm/mmu.h b/kernel/arch/score/include/asm/mmu.h new file mode 100644 index 000000000..676828e4c --- /dev/null +++ b/kernel/arch/score/include/asm/mmu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MMU_H +#define _ASM_SCORE_MMU_H + +typedef unsigned long mm_context_t; + +#endif /* _ASM_SCORE_MMU_H */ diff --git a/kernel/arch/score/include/asm/mmu_context.h b/kernel/arch/score/include/asm/mmu_context.h new file mode 100644 index 000000000..2644577c9 --- /dev/null +++ b/kernel/arch/score/include/asm/mmu_context.h @@ -0,0 +1,113 @@ +#ifndef _ASM_SCORE_MMU_CONTEXT_H +#define _ASM_SCORE_MMU_CONTEXT_H + +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <asm-generic/mm_hooks.h> + +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm/scoreregs.h> + +/* + * For the fast tlb miss handlers, we keep a per cpu array of pointers + * to the current pgd for each processor. Also, the proc. id is stuffed + * into the context register. + */ +extern unsigned long asid_cache; +extern unsigned long pgd_current; + +#define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd)) + +#define TLBMISS_HANDLER_SETUP() \ +do { \ + write_c0_context(0); \ + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \ +} while (0) + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +#define ASID_VERSION_MASK 0xfffff000 +#define ASID_FIRST_VERSION 0x1000 + +/* PEVN --------- VPN ---------- --ASID--- -NA- */ +/* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */ +/* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */ +#define ASID_INC 0x10 +#define ASID_MASK 0xff0 + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{} + +static inline void +get_new_mmu_context(struct mm_struct *mm) +{ + unsigned long asid = asid_cache + ASID_INC; + + if (!(asid & ASID_MASK)) { + local_flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + + mm->context = asid; + asid_cache = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context = 0; + return 0; +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + if ((next->context ^ asid_cache) & ASID_VERSION_MASK) + get_new_mmu_context(next); + + pevn_set(next->context); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + local_irq_restore(flags); +} + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{} + +static inline void +deactivate_mm(struct task_struct *task, struct mm_struct *mm) +{} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void +activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + + local_irq_save(flags); + get_new_mmu_context(next); + pevn_set(next->context); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + local_irq_restore(flags); +} + +#endif /* _ASM_SCORE_MMU_CONTEXT_H */ diff --git a/kernel/arch/score/include/asm/module.h b/kernel/arch/score/include/asm/module.h new file mode 100644 index 000000000..abf395bbf --- /dev/null +++ b/kernel/arch/score/include/asm/module.h @@ -0,0 +1,35 @@ +#ifndef _ASM_SCORE_MODULE_H +#define _ASM_SCORE_MODULE_H + +#include <linux/list.h> +#include <asm/uaccess.h> +#include <asm-generic/module.h> + +struct mod_arch_specific { + /* Data Bus Error exception tables */ + struct list_head dbe_list; + const struct exception_table_entry *dbe_start; + const struct exception_table_entry *dbe_end; +}; + +typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ + +/* Given an address, look for it in the exception tables. */ +#ifdef CONFIG_MODULES +const struct exception_table_entry *search_module_dbetables(unsigned long addr); +#else +static inline const struct exception_table_entry +*search_module_dbetables(unsigned long addr) +{ + return NULL; +} +#endif + +#define MODULE_PROC_FAMILY "SCORE7" +#define MODULE_KERNEL_TYPE "32BIT " +#define MODULE_KERNEL_SMTC "" + +#define MODULE_ARCH_VERMAGIC \ + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC + +#endif /* _ASM_SCORE_MODULE_H */ diff --git a/kernel/arch/score/include/asm/mutex.h b/kernel/arch/score/include/asm/mutex.h new file mode 100644 index 000000000..10d48fe4d --- /dev/null +++ b/kernel/arch/score/include/asm/mutex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MUTEX_H +#define _ASM_SCORE_MUTEX_H + +#include <asm-generic/mutex-dec.h> + +#endif /* _ASM_SCORE_MUTEX_H */ diff --git a/kernel/arch/score/include/asm/page.h b/kernel/arch/score/include/asm/page.h new file mode 100644 index 000000000..1e9ade8e7 --- /dev/null +++ b/kernel/arch/score/include/asm/page.h @@ -0,0 +1,93 @@ +#ifndef _ASM_SCORE_PAGE_H +#define _ASM_SCORE_PAGE_H + +#include <linux/pfn.h> +#include <linux/const.h> + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT (12) +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) +#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) + +/* align addr on a size boundary - adjust address up/down if needed */ +#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1))) +#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) + +/* align addr on a size boundary - adjust address up if needed */ +#define _ALIGN(addr, size) _ALIGN_UP(addr, size) + +/* + * PAGE_OFFSET -- the first address of the first page of memory. When not + * using MMU this corresponds to the first free page in physical memory (aligned + * on a page boundary). + */ +#define PAGE_OFFSET (0xA0000000UL) + +#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +#define copy_user_page(vto, vfrom, vaddr, topg) \ + memcpy((vto), (vfrom), PAGE_SIZE) + +/* + * These are used to make use of C type-checking.. + */ + +typedef struct { unsigned long pte; } pte_t; /* page table entry */ +typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; +extern unsigned long max_pfn; + +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) + +#define phys_to_pfn(phys) (PFN_DOWN(phys)) +#define pfn_to_phys(pfn) (PFN_PHYS(pfn)) + +#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) +#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) + +#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) +#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) + +#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) +#define page_to_bus(page) (page_to_phys(page)) +#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) + +#define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn)) + +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) + +#endif /* __ASSEMBLY__ */ + +#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) + +#endif /* __KERNEL__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* _ASM_SCORE_PAGE_H */ diff --git a/kernel/arch/score/include/asm/pci.h b/kernel/arch/score/include/asm/pci.h new file mode 100644 index 000000000..3f3cfd825 --- /dev/null +++ b/kernel/arch/score/include/asm/pci.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_PCI_H +#define _ASM_SCORE_PCI_H + +#endif /* _ASM_SCORE_PCI_H */ diff --git a/kernel/arch/score/include/asm/percpu.h b/kernel/arch/score/include/asm/percpu.h new file mode 100644 index 000000000..e7bd4e05b --- /dev/null +++ b/kernel/arch/score/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_PERCPU_H +#define _ASM_SCORE_PERCPU_H + +#include <asm-generic/percpu.h> + +#endif /* _ASM_SCORE_PERCPU_H */ diff --git a/kernel/arch/score/include/asm/pgalloc.h b/kernel/arch/score/include/asm/pgalloc.h new file mode 100644 index 000000000..2e067657d --- /dev/null +++ b/kernel/arch/score/include/asm/pgalloc.h @@ -0,0 +1,86 @@ +#ifndef _ASM_SCORE_PGALLOC_H +#define _ASM_SCORE_PGALLOC_H + +#include <linux/mm.h> +#include <linux/highmem.h> +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd((unsigned long)pte)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); +} + +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, + PTE_ORDER); + + return pte; +} + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_pages((unsigned long)pte, PTE_ORDER); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_pages(pte, PTE_ORDER); +} + +#define __pte_free_tlb(tlb, pte, buf) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) + +#define check_pgt_cache() do {} while (0) + +#endif /* _ASM_SCORE_PGALLOC_H */ diff --git a/kernel/arch/score/include/asm/pgtable-bits.h b/kernel/arch/score/include/asm/pgtable-bits.h new file mode 100644 index 000000000..0e5c6f466 --- /dev/null +++ b/kernel/arch/score/include/asm/pgtable-bits.h @@ -0,0 +1,24 @@ +#ifndef _ASM_SCORE_PGTABLE_BITS_H +#define _ASM_SCORE_PGTABLE_BITS_H + +#define _PAGE_ACCESSED (1<<5) /* implemented in software */ +#define _PAGE_READ (1<<6) /* implemented in software */ +#define _PAGE_WRITE (1<<7) /* implemented in software */ +#define _PAGE_PRESENT (1<<9) /* implemented in software */ +#define _PAGE_MODIFIED (1<<10) /* implemented in software */ + +#define _PAGE_GLOBAL (1<<0) +#define _PAGE_VALID (1<<1) +#define _PAGE_SILENT_READ (1<<1) /* synonym */ +#define _PAGE_DIRTY (1<<2) /* Write bit */ +#define _PAGE_SILENT_WRITE (1<<2) +#define _PAGE_CACHE (1<<3) /* cache */ +#define _CACHE_MASK (1<<3) +#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ + +#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) +#define _PAGE_CHG_MASK \ + (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) + +#endif /* _ASM_SCORE_PGTABLE_BITS_H */ diff --git a/kernel/arch/score/include/asm/pgtable.h b/kernel/arch/score/include/asm/pgtable.h new file mode 100644 index 000000000..0553e5cd5 --- /dev/null +++ b/kernel/arch/score/include/asm/pgtable.h @@ -0,0 +1,268 @@ +#ifndef _ASM_SCORE_PGTABLE_H +#define _ASM_SCORE_PGTABLE_H + +#include <linux/const.h> +#include <asm-generic/pgtable-nopmd.h> + +#include <asm/fixmap.h> +#include <asm/setup.h> +#include <asm/pgtable-bits.h> + +extern void load_pgd(unsigned long pg_dir); +extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* + * Entries per page directory level: we use two-level, so + * we don't really have any PUD/PMD directory physically. + */ +#define PGD_ORDER 0 +#define PTE_ORDER 0 + +#define PTRS_PER_PGD 1024 +#define PTRS_PER_PTE 1024 + +#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +#define VMALLOC_START (0xc0000000UL) + +#define PKMAP_BASE (0xfd000000UL) + +#define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE) + +#define pte_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ + __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ + __FILE__, __LINE__, pgd_val(e)) + +/* + * Empty pgd/pmd entries point to the invalid_pte_table. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == (unsigned long) invalid_pte_table; +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != (unsigned long) invalid_pte_table; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); +} + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) \ + __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define __pgd_offset(address) pgd_index(address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* to find an entry in a page-table-directory */ +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* Find an entry in the third-level page table.. */ +#define __pte_offset(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_unmap(pte) ((void)(pte)) + +#define __pte_to_swp_entry(pte) \ + ((swp_entry_t) { pte_val(pte)}) +#define __swp_entry_to_pte(x) ((pte_t) {(x).val}) + +#define pmd_phys(pmd) __pa((void *)pmd_val(pmd)) +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +#define pte_clear(mm, addr, xp) \ + do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_present(pgd) (1) +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_clear(pgdp) do { } while (0) + +#define kern_addr_valid(addr) (1) +#define pmd_page_vaddr(pmd) pmd_val(pmd) + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_CACHE) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_CACHE) +#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ + __WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK); + + return __pgprot(prot); +} + +#define __swp_type(x) ((x).val & 0x1f) +#define __swp_offset(x) ((x).val >> 10) +#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 10)}) + +extern unsigned long empty_zero_page; +extern unsigned long zero_page_mask; + +#define ZERO_PAGE(vaddr) \ + (virt_to_page((void *)(empty_zero_page + \ + (((unsigned long)(vaddr)) & zero_page_mask)))) + +#define pgtable_cache_init() do {} while (0) + +#define arch_enter_lazy_cpu_mode() do {} while (0) + +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_WRITE; +} + +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_MODIFIED; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +#define pte_special(pte) (0) + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; +} + +#define set_pmd(pmdptr, pmdval) \ + do { *(pmdptr) = (pmdval); } while (0) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +extern unsigned long pgd_current; +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern void paging_init(void); + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t pte); +extern void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + pte_t pte = *ptep; + __update_tlb(vma, address, pte); + __update_cache(vma, address, pte); +} + +#ifndef __ASSEMBLY__ +#include <asm-generic/pgtable.h> + +void setup_memory(void); +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_PGTABLE_H */ diff --git a/kernel/arch/score/include/asm/processor.h b/kernel/arch/score/include/asm/processor.h new file mode 100644 index 000000000..851f44199 --- /dev/null +++ b/kernel/arch/score/include/asm/processor.h @@ -0,0 +1,105 @@ +#ifndef _ASM_SCORE_PROCESSOR_H +#define _ASM_SCORE_PROCESSOR_H + +#include <linux/cpumask.h> +#include <linux/threads.h> + +#include <asm/segment.h> + +struct task_struct; + +/* + * System setup and hardware flags.. + */ +extern void (*cpu_wait)(void); + +extern unsigned long thread_saved_pc(struct task_struct *tsk); +extern void start_thread(struct pt_regs *regs, + unsigned long pc, unsigned long sp); +extern unsigned long get_wchan(struct task_struct *p); + +/* + * Return current * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +#define cpu_relax() barrier() +#define cpu_relax_lowlatency() cpu_relax() +#define release_thread(thread) do {} while (0) + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ +#define TASK_SIZE 0x7fff8000UL + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE +#endif + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + unsigned long reg0, reg2, reg3; + unsigned long reg12, reg13, reg14, reg15, reg16; + unsigned long reg17, reg18, reg19, reg20, reg21; + + unsigned long cp0_psr; + unsigned long cp0_ema; /* Last user fault */ + unsigned long cp0_badvaddr; /* Last user fault */ + unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ + unsigned long error_code; + unsigned long trap_no; + + unsigned long mflags; + unsigned long reg29; + + unsigned long single_step; + unsigned long ss_nextcnt; + + unsigned long insn1_type; + unsigned long addr1; + unsigned long insn1; + + unsigned long insn2_type; + unsigned long addr2; + unsigned long insn2; + + mm_segment_t current_ds; +}; + +#define INIT_THREAD { \ + .reg0 = 0, \ + .reg2 = 0, \ + .reg3 = 0, \ + .reg12 = 0, \ + .reg13 = 0, \ + .reg14 = 0, \ + .reg15 = 0, \ + .reg16 = 0, \ + .reg17 = 0, \ + .reg18 = 0, \ + .reg19 = 0, \ + .reg20 = 0, \ + .reg21 = 0, \ + .cp0_psr = 0, \ + .error_code = 0, \ + .trap_no = 0, \ +} + +#define kstk_tos(tsk) \ + ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32) +#define task_pt_regs(tsk) ((struct pt_regs *)kstk_tos(tsk) - 1) + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) + +#endif /* _ASM_SCORE_PROCESSOR_H */ diff --git a/kernel/arch/score/include/asm/ptrace.h b/kernel/arch/score/include/asm/ptrace.h new file mode 100644 index 000000000..abc279d96 --- /dev/null +++ b/kernel/arch/score/include/asm/ptrace.h @@ -0,0 +1,25 @@ +#ifndef _ASM_SCORE_PTRACE_H +#define _ASM_SCORE_PTRACE_H + +#include <uapi/asm/ptrace.h> + + +struct task_struct; + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) ((regs->cp0_psr & 8) == 8) + +#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc) +#define profile_pc(regs) instruction_pointer(regs) +#define user_stack_pointer(r) ((unsigned long)(r)->regs[0]) + +extern void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); +extern int read_tsk_short(struct task_struct *, unsigned long, + unsigned short *); + +#define arch_has_single_step() (1) + +#endif /* _ASM_SCORE_PTRACE_H */ diff --git a/kernel/arch/score/include/asm/scoreregs.h b/kernel/arch/score/include/asm/scoreregs.h new file mode 100644 index 000000000..d0ad29204 --- /dev/null +++ b/kernel/arch/score/include/asm/scoreregs.h @@ -0,0 +1,51 @@ +#ifndef _ASM_SCORE_SCOREREGS_H +#define _ASM_SCORE_SCOREREGS_H + +#include <linux/linkage.h> + +/* TIMER register */ +#define TIME0BASE 0x96080000 +#define P_TIMER0_CTRL (TIME0BASE + 0x00) +#define P_TIMER0_CPP_CTRL (TIME0BASE + 0x04) +#define P_TIMER0_PRELOAD (TIME0BASE + 0x08) +#define P_TIMER0_CPP_REG (TIME0BASE + 0x0C) +#define P_TIMER0_UPCNT (TIME0BASE + 0x10) + +/* Timer Controller Register */ +/* bit 0 Timer enable */ +#define TMR_DISABLE 0x0000 +#define TMR_ENABLE 0x0001 + +/* bit 1 Interrupt enable */ +#define TMR_IE_DISABLE 0x0000 +#define TMR_IE_ENABLE 0x0002 + +/* bit 2 Output enable */ +#define TMR_OE_DISABLE 0x0004 +#define TMR_OE_ENABLE 0x0000 + +/* bit4 Up/Down counting selection */ +#define TMR_UD_DOWN 0x0000 +#define TMR_UD_UP 0x0010 + +/* bit5 Up/Down counting control selection */ +#define TMR_UDS_UD 0x0000 +#define TMR_UDS_EXTUD 0x0020 + +/* bit6 Time output mode */ +#define TMR_OM_TOGGLE 0x0000 +#define TMR_OM_PILSE 0x0040 + +/* bit 8..9 External input active edge selection */ +#define TMR_ES_PE 0x0000 +#define TMR_ES_NE 0x0100 +#define TMR_ES_BOTH 0x0200 + +/* bit 10..11 Operating mode */ +#define TMR_M_FREE 0x0000 /* free running timer mode */ +#define TMR_M_PERIODIC 0x0400 /* periodic timer mode */ +#define TMR_M_FC 0x0800 /* free running counter mode */ +#define TMR_M_PC 0x0c00 /* periodic counter mode */ + +#define SYSTEM_CLOCK (27*1000000/4) /* 27 MHz */ +#endif /* _ASM_SCORE_SCOREREGS_H */ diff --git a/kernel/arch/score/include/asm/segment.h b/kernel/arch/score/include/asm/segment.h new file mode 100644 index 000000000..e16cf6afb --- /dev/null +++ b/kernel/arch/score/include/asm/segment.h @@ -0,0 +1,21 @@ +#ifndef _ASM_SCORE_SEGMENT_H +#define _ASM_SCORE_SEGMENT_H + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define KERNEL_DS ((mm_segment_t){0}) +#define USER_DS KERNEL_DS + +# define get_ds() (KERNEL_DS) +# define get_fs() (current_thread_info()->addr_limit) +# define set_fs(x) \ + do { current_thread_info()->addr_limit = (x); } while (0) + +# define segment_eq(a, b) ((a).seg == (b).seg) + +# endif /* __ASSEMBLY__ */ +#endif /* _ASM_SCORE_SEGMENT_H */ diff --git a/kernel/arch/score/include/asm/setup.h b/kernel/arch/score/include/asm/setup.h new file mode 100644 index 000000000..1f3aa7262 --- /dev/null +++ b/kernel/arch/score/include/asm/setup.h @@ -0,0 +1,36 @@ +#ifndef _ASM_SCORE_SETUP_H +#define _ASM_SCORE_SETUP_H + +#include <uapi/asm/setup.h> + + +extern void pagetable_init(void); +extern void pgd_init(unsigned long page); + +extern void setup_early_printk(void); +extern void cpu_cache_init(void); +extern void tlb_init(void); + +extern void handle_nmi(void); +extern void handle_adelinsn(void); +extern void handle_adedata(void); +extern void handle_ibe(void); +extern void handle_pel(void); +extern void handle_sys(void); +extern void handle_ccu(void); +extern void handle_ri(void); +extern void handle_tr(void); +extern void handle_ades(void); +extern void handle_cee(void); +extern void handle_cpe(void); +extern void handle_dve(void); +extern void handle_dbe(void); +extern void handle_reserved(void); +extern void handle_tlb_refill(void); +extern void handle_tlb_invaild(void); +extern void handle_mod(void); +extern void debug_exception_vector(void); +extern void general_exception_vector(void); +extern void interrupt_exception_vector(void); + +#endif /* _ASM_SCORE_SETUP_H */ diff --git a/kernel/arch/score/include/asm/shmparam.h b/kernel/arch/score/include/asm/shmparam.h new file mode 100644 index 000000000..1d6081314 --- /dev/null +++ b/kernel/arch/score/include/asm/shmparam.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SHMPARAM_H +#define _ASM_SCORE_SHMPARAM_H + +#include <asm-generic/shmparam.h> + +#endif /* _ASM_SCORE_SHMPARAM_H */ diff --git a/kernel/arch/score/include/asm/string.h b/kernel/arch/score/include/asm/string.h new file mode 100644 index 000000000..8a6bf5063 --- /dev/null +++ b/kernel/arch/score/include/asm/string.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_STRING_H +#define _ASM_SCORE_STRING_H + +extern void *memset(void *__s, int __c, size_t __count); +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +#endif /* _ASM_SCORE_STRING_H */ diff --git a/kernel/arch/score/include/asm/switch_to.h b/kernel/arch/score/include/asm/switch_to.h new file mode 100644 index 000000000..031756b59 --- /dev/null +++ b/kernel/arch/score/include/asm/switch_to.h @@ -0,0 +1,13 @@ +#ifndef _ASM_SCORE_SWITCH_TO_H +#define _ASM_SCORE_SWITCH_TO_H + +extern void *resume(void *last, void *next, void *next_ti); + +#define switch_to(prev, next, last) \ +do { \ + (last) = resume(prev, next, task_thread_info(next)); \ +} while (0) + +#define finish_arch_switch(prev) do {} while (0) + +#endif /* _ASM_SCORE_SWITCH_TO_H */ diff --git a/kernel/arch/score/include/asm/syscalls.h b/kernel/arch/score/include/asm/syscalls.h new file mode 100644 index 000000000..98d1df92f --- /dev/null +++ b/kernel/arch/score/include/asm/syscalls.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_SYSCALLS_H +#define _ASM_SCORE_SYSCALLS_H + +asmlinkage long score_rt_sigreturn(struct pt_regs *regs); + +#include <asm-generic/syscalls.h> + +#endif /* _ASM_SCORE_SYSCALLS_H */ diff --git a/kernel/arch/score/include/asm/thread_info.h b/kernel/arch/score/include/asm/thread_info.h new file mode 100644 index 000000000..7d9ffb15c --- /dev/null +++ b/kernel/arch/score/include/asm/thread_info.h @@ -0,0 +1,92 @@ +#ifndef _ASM_SCORE_THREAD_INFO_H +#define _ASM_SCORE_THREAD_INFO_H + +#ifdef __KERNEL__ + +#define KU_MASK 0x08 +#define KU_USER 0x08 +#define KU_KERN 0x00 + +#include <asm/page.h> +#include <linux/const.h> + +/* thread information allocation */ +#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_MASK (THREAD_SIZE - _AC(1,UL)) + +#ifndef __ASSEMBLY__ + +#include <asm/processor.h> + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + unsigned long tp_value; /* thread pointer */ + __u32 cpu; /* current CPU */ + + /* 0 => preemptable, < 0 => BUG */ + int preempt_count; + + /* + * thread address space: + * 0-0xBFFFFFFF for user-thead + * 0-0xFFFFFFFF for kernel-thread + */ + mm_segment_t addr_limit; + struct pt_regs *regs; +}; + +/* + * macros/functions for gaining access to the thread information structure + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("r28"); +#define current_thread_info() __current_thread_info + +#endif /* !__ASSEMBLY__ */ + +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ + +#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) + +#define _TIF_WORK_MASK (0x0000ffff) + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SCORE_THREAD_INFO_H */ diff --git a/kernel/arch/score/include/asm/timex.h b/kernel/arch/score/include/asm/timex.h new file mode 100644 index 000000000..a524ae0c5 --- /dev/null +++ b/kernel/arch/score/include/asm/timex.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_TIMEX_H +#define _ASM_SCORE_TIMEX_H + +#define CLOCK_TICK_RATE 27000000 /* Timer input freq. */ + +#include <asm-generic/timex.h> + +#endif /* _ASM_SCORE_TIMEX_H */ diff --git a/kernel/arch/score/include/asm/tlb.h b/kernel/arch/score/include/asm/tlb.h new file mode 100644 index 000000000..46882ed52 --- /dev/null +++ b/kernel/arch/score/include/asm/tlb.h @@ -0,0 +1,17 @@ +#ifndef _ASM_SCORE_TLB_H +#define _ASM_SCORE_TLB_H + +/* + * SCORE doesn't need any special per-pte or per-vma handling, except + * we need to flush cache for area to be unmapped. + */ +#define tlb_start_vma(tlb, vma) do {} while (0) +#define tlb_end_vma(tlb, vma) do {} while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0) +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +extern void score7_FTLB_refill_Handler(void); + +#include <asm-generic/tlb.h> + +#endif /* _ASM_SCORE_TLB_H */ diff --git a/kernel/arch/score/include/asm/tlbflush.h b/kernel/arch/score/include/asm/tlbflush.h new file mode 100644 index 000000000..9cce97836 --- /dev/null +++ b/kernel/arch/score/include/asm/tlbflush.h @@ -0,0 +1,142 @@ +#ifndef _ASM_SCORE_TLBFLUSH_H +#define _ASM_SCORE_TLBFLUSH_H + +#include <linux/mm.h> + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void local_flush_tlb_kernel_range(unsigned long start, + unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); +extern void local_flush_tlb_one(unsigned long vaddr); + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_range(vma, vmaddr, end) \ + local_flush_tlb_range(vma, vmaddr, end) +#define flush_tlb_kernel_range(vmaddr, end) \ + local_flush_tlb_kernel_range(vmaddr, end) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) + +#ifndef __ASSEMBLY__ + +static inline unsigned long pevn_get(void) +{ + unsigned long val; + + __asm__ __volatile__( + "mfcr %0, cr11\n" + "nop\nnop\n" + : "=r" (val)); + + return val; +} + +static inline void pevn_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr11\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline void pectx_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr12\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline unsigned long pectx_get(void) +{ + unsigned long val; + __asm__ __volatile__( + "mfcr %0, cr12\n" + "nop\nnop\n" + : "=r" (val)); + return val; +} +static inline unsigned long tlblock_get(void) +{ + unsigned long val; + + __asm__ __volatile__( + "mfcr %0, cr7\n" + "nop\nnop\n" + : "=r" (val)); + return val; +} +static inline void tlblock_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr7\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline void tlbpt_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr8\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline long tlbpt_get(void) +{ + long val; + + __asm__ __volatile__( + "mfcr %0, cr8\n" + "nop\nnop\n" + : "=r" (val)); + + return val; +} + +static inline void peaddr_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr9\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +/* TLB operations. */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__("stlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__("mftlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__("mtptlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__("mtrtlb;nop;nop;nop;nop;nop"); +} + +#endif /* Not __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_TLBFLUSH_H */ diff --git a/kernel/arch/score/include/asm/topology.h b/kernel/arch/score/include/asm/topology.h new file mode 100644 index 000000000..425fba381 --- /dev/null +++ b/kernel/arch/score/include/asm/topology.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TOPOLOGY_H +#define _ASM_SCORE_TOPOLOGY_H + +#include <asm-generic/topology.h> + +#endif /* _ASM_SCORE_TOPOLOGY_H */ diff --git a/kernel/arch/score/include/asm/uaccess.h b/kernel/arch/score/include/asm/uaccess.h new file mode 100644 index 000000000..20a359122 --- /dev/null +++ b/kernel/arch/score/include/asm/uaccess.h @@ -0,0 +1,429 @@ +#ifndef __SCORE_UACCESS_H +#define __SCORE_UACCESS_H + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/thread_info.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define segment_eq(a, b) ((a).seg == (b).seg) + +/* + * Is a address valid? This does a straighforward calculation rather + * than tests. + * + * Address valid if: + * - "addr" doesn't have any high-bits set + * - AND "size" doesn't have any high-bits set + * - AND "addr+size" doesn't have any high-bits set + * - OR we are in kernel mode. + * + * __ua_size() is a trick to avoid runtime checking of positive constant + * sizes; for those we already know at compile time that the size is ok. + */ +#define __ua_size(size) \ + ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size)) + +/* + * access_ok: - Checks if a user space pointer is valid + * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that + * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe + * to write to a block, it is always safe to read from it. + * @addr: User space pointer to start of block to check + * @size: Size of block to check + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * Checks if a pointer to a block of memory in user space is valid. + * + * Returns true (nonzero) if the memory block may be valid, false (zero) + * if it is definitely invalid. + * + * Note that, depending on architecture, this function probably just + * checks that the pointer is in the user space range - after calling + * this function, memory access functions may still return -EFAULT. + */ + +#define __access_ok(addr, size) \ + (((long)((get_fs().seg) & \ + ((addr) | ((addr) + (size)) | \ + __ua_size(size)))) == 0) + +#define access_ok(type, addr, size) \ + likely(__access_ok((unsigned long)(addr), (size))) + +/* + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * __put_user: - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) + +/* + * __get_user: - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +/* + * Yuck. We need two variants, one for 64bit operation and one + * for 32 bit mode and old iron. + */ +extern void __get_user_unknown(void); + +#define __get_user_common(val, size, ptr) \ +do { \ + switch (size) { \ + case 1: \ + __get_user_asm(val, "lb", ptr); \ + break; \ + case 2: \ + __get_user_asm(val, "lh", ptr); \ + break; \ + case 4: \ + __get_user_asm(val, "lw", ptr); \ + break; \ + case 8: \ + if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ + __gu_err = 0; \ + else \ + __gu_err = -EFAULT; \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ +} while (0) + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err = 0; \ + __get_user_common((x), size, ptr); \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_user_common((x), size, __gu_ptr); \ + \ + __gu_err; \ +}) + +#define __get_user_asm(val, insn, addr) \ +{ \ + long __gu_tmp; \ + \ + __asm__ __volatile__( \ + "1:" insn " %1, %3\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:li %0, %4\n" \ + "j 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".word 1b, 3b\n" \ + ".previous\n" \ + : "=r" (__gu_err), "=r" (__gu_tmp) \ + : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ + \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ +} + +/* + * Yuck. We need two variants, one for 64bit operation and one + * for 32 bit mode and old iron. + */ +#define __put_user_nocheck(val, ptr, size) \ +({ \ + __typeof__(*(ptr)) __pu_val; \ + long __pu_err = 0; \ + \ + __pu_val = (val); \ + switch (size) { \ + case 1: \ + __put_user_asm("sb", ptr); \ + break; \ + case 2: \ + __put_user_asm("sh", ptr); \ + break; \ + case 4: \ + __put_user_asm("sw", ptr); \ + break; \ + case 8: \ + if ((__copy_to_user((void *)ptr, &__pu_val, 8)) == 0) \ + __pu_err = 0; \ + else \ + __pu_err = -EFAULT; \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + __pu_err; \ +}) + + +#define __put_user_check(val, ptr, size) \ +({ \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (val); \ + long __pu_err = -EFAULT; \ + \ + if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ + switch (size) { \ + case 1: \ + __put_user_asm("sb", __pu_addr); \ + break; \ + case 2: \ + __put_user_asm("sh", __pu_addr); \ + break; \ + case 4: \ + __put_user_asm("sw", __pu_addr); \ + break; \ + case 8: \ + if ((__copy_to_user((void *)__pu_addr, &__pu_val, 8)) == 0)\ + __pu_err = 0; \ + else \ + __pu_err = -EFAULT; \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + } \ + __pu_err; \ +}) + +#define __put_user_asm(insn, ptr) \ + __asm__ __volatile__( \ + "1:" insn " %2, %3\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3:li %0, %4\n" \ + "j 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".word 1b, 3b\n" \ + ".previous\n" \ + : "=r" (__pu_err) \ + : "0" (0), "r" (__pu_val), "o" (__m(ptr)), \ + "i" (-EFAULT)); + +extern void __put_user_unknown(void); +extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); + +static inline unsigned long +copy_from_user(void *to, const void *from, unsigned long len) +{ + unsigned long over; + + if (access_ok(VERIFY_READ, from, len)) + return __copy_tofrom_user(to, from, len); + + if ((unsigned long)from < TASK_SIZE) { + over = (unsigned long)from + len - TASK_SIZE; + return __copy_tofrom_user(to, from, len - over) + over; + } + return len; +} + +static inline unsigned long +copy_to_user(void *to, const void *from, unsigned long len) +{ + unsigned long over; + + if (access_ok(VERIFY_WRITE, to, len)) + return __copy_tofrom_user(to, from, len); + + if ((unsigned long)to < TASK_SIZE) { + over = (unsigned long)to + len - TASK_SIZE; + return __copy_tofrom_user(to, from, len - over) + over; + } + return len; +} + +#define __copy_from_user(to, from, len) \ + __copy_tofrom_user((to), (from), (len)) + +#define __copy_to_user(to, from, len) \ + __copy_tofrom_user((to), (from), (len)) + +static inline unsigned long +__copy_to_user_inatomic(void *to, const void *from, unsigned long len) +{ + return __copy_to_user(to, from, len); +} + +static inline unsigned long +__copy_from_user_inatomic(void *to, const void *from, unsigned long len) +{ + return __copy_from_user(to, from, len); +} + +#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) + +static inline unsigned long +copy_in_user(void *to, const void *from, unsigned long len) +{ + if (access_ok(VERIFY_READ, from, len) && + access_ok(VERFITY_WRITE, to, len)) + return copy_from_user(to, from, len); +} + +/* + * __clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * + * Zero a block of memory in user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be cleared. + * On success, this will be zero. + */ +extern unsigned long __clear_user(void __user *src, unsigned long size); + +static inline unsigned long clear_user(char *src, unsigned long size) +{ + if (access_ok(VERIFY_WRITE, src, size)) + return __clear_user(src, size); + + return -EFAULT; +} +/* + * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @src: Source address, in user space. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from userspace to kernel space. + * Caller must check the specified block with access_ok() before calling + * this function. + * + * On success, returns the length of the string (not including the trailing + * NUL). + * + * If access to userspace fails, returns -EFAULT (some data may have been + * copied). + * + * If @count is smaller than the length of the string, copies @count bytes + * and returns @count. + */ +extern int __strncpy_from_user(char *dst, const char *src, long len); + +static inline int strncpy_from_user(char *dst, const char *src, long len) +{ + if (access_ok(VERIFY_READ, src, 1)) + return __strncpy_from_user(dst, src, len); + + return -EFAULT; +} + +extern int __strlen_user(const char *src); +static inline long strlen_user(const char __user *src) +{ + return __strlen_user(src); +} + +extern int __strnlen_user(const char *str, long len); +static inline long strnlen_user(const char __user *str, long len) +{ + if (!access_ok(VERIFY_READ, str, 0)) + return 0; + else + return __strnlen_user(str, len); +} + +struct exception_table_entry { + unsigned long insn; + unsigned long fixup; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#endif /* __SCORE_UACCESS_H */ + diff --git a/kernel/arch/score/include/asm/ucontext.h b/kernel/arch/score/include/asm/ucontext.h new file mode 100644 index 000000000..9bc07b9f3 --- /dev/null +++ b/kernel/arch/score/include/asm/ucontext.h @@ -0,0 +1 @@ +#include <asm-generic/ucontext.h> diff --git a/kernel/arch/score/include/asm/unaligned.h b/kernel/arch/score/include/asm/unaligned.h new file mode 100644 index 000000000..2fc06de51 --- /dev/null +++ b/kernel/arch/score/include/asm/unaligned.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_UNALIGNED_H +#define _ASM_SCORE_UNALIGNED_H + +#include <asm-generic/unaligned.h> + +#endif /* _ASM_SCORE_UNALIGNED_H */ diff --git a/kernel/arch/score/include/asm/user.h b/kernel/arch/score/include/asm/user.h new file mode 100644 index 000000000..7bfb8e2c8 --- /dev/null +++ b/kernel/arch/score/include/asm/user.h @@ -0,0 +1,21 @@ +#ifndef _ASM_SCORE_USER_H +#define _ASM_SCORE_USER_H + +struct user_regs_struct { + unsigned long regs[32]; + + unsigned long cel; + unsigned long ceh; + + unsigned long sr0; /* cnt */ + unsigned long sr1; /* lcr */ + unsigned long sr2; /* scr */ + + unsigned long cp0_epc; + unsigned long cp0_ema; + unsigned long cp0_psr; + unsigned long cp0_ecr; + unsigned long cp0_condition; +}; + +#endif /* _ASM_SCORE_USER_H */ diff --git a/kernel/arch/score/include/uapi/asm/Kbuild b/kernel/arch/score/include/uapi/asm/Kbuild new file mode 100644 index 000000000..040178cdb --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/Kbuild @@ -0,0 +1,34 @@ +# UAPI Header export list +include include/uapi/asm-generic/Kbuild.asm + +header-y += auxvec.h +header-y += bitsperlong.h +header-y += byteorder.h +header-y += errno.h +header-y += fcntl.h +header-y += ioctl.h +header-y += ioctls.h +header-y += ipcbuf.h +header-y += kvm_para.h +header-y += mman.h +header-y += msgbuf.h +header-y += param.h +header-y += poll.h +header-y += posix_types.h +header-y += ptrace.h +header-y += resource.h +header-y += sembuf.h +header-y += setup.h +header-y += shmbuf.h +header-y += sigcontext.h +header-y += siginfo.h +header-y += signal.h +header-y += socket.h +header-y += sockios.h +header-y += stat.h +header-y += statfs.h +header-y += swab.h +header-y += termbits.h +header-y += termios.h +header-y += types.h +header-y += unistd.h diff --git a/kernel/arch/score/include/uapi/asm/auxvec.h b/kernel/arch/score/include/uapi/asm/auxvec.h new file mode 100644 index 000000000..f69151565 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_AUXVEC_H +#define _ASM_SCORE_AUXVEC_H + +#endif /* _ASM_SCORE_AUXVEC_H */ diff --git a/kernel/arch/score/include/uapi/asm/bitsperlong.h b/kernel/arch/score/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000..86ff337aa --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/bitsperlong.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BITSPERLONG_H +#define _ASM_SCORE_BITSPERLONG_H + +#include <asm-generic/bitsperlong.h> + +#endif /* _ASM_SCORE_BITSPERLONG_H */ diff --git a/kernel/arch/score/include/uapi/asm/byteorder.h b/kernel/arch/score/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..88cbebc79 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/byteorder.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BYTEORDER_H +#define _ASM_SCORE_BYTEORDER_H + +#include <linux/byteorder/little_endian.h> + +#endif /* _ASM_SCORE_BYTEORDER_H */ diff --git a/kernel/arch/score/include/uapi/asm/errno.h b/kernel/arch/score/include/uapi/asm/errno.h new file mode 100644 index 000000000..29ff39d5a --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/errno.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_ERRNO_H +#define _ASM_SCORE_ERRNO_H + +#include <asm-generic/errno.h> + +#endif /* _ASM_SCORE_ERRNO_H */ diff --git a/kernel/arch/score/include/uapi/asm/fcntl.h b/kernel/arch/score/include/uapi/asm/fcntl.h new file mode 100644 index 000000000..03968a310 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/fcntl.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_FCNTL_H +#define _ASM_SCORE_FCNTL_H + +#include <asm-generic/fcntl.h> + +#endif /* _ASM_SCORE_FCNTL_H */ diff --git a/kernel/arch/score/include/uapi/asm/ioctl.h b/kernel/arch/score/include/uapi/asm/ioctl.h new file mode 100644 index 000000000..a351d2194 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/ioctl.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IOCTL_H +#define _ASM_SCORE_IOCTL_H + +#include <asm-generic/ioctl.h> + +#endif /* _ASM_SCORE_IOCTL_H */ diff --git a/kernel/arch/score/include/uapi/asm/ioctls.h b/kernel/arch/score/include/uapi/asm/ioctls.h new file mode 100644 index 000000000..ed01d2b9a --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/ioctls.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IOCTLS_H +#define _ASM_SCORE_IOCTLS_H + +#include <asm-generic/ioctls.h> + +#endif /* _ASM_SCORE_IOCTLS_H */ diff --git a/kernel/arch/score/include/uapi/asm/ipcbuf.h b/kernel/arch/score/include/uapi/asm/ipcbuf.h new file mode 100644 index 000000000..e082ceff1 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/ipcbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IPCBUF_H +#define _ASM_SCORE_IPCBUF_H + +#include <asm-generic/ipcbuf.h> + +#endif /* _ASM_SCORE_IPCBUF_H */ diff --git a/kernel/arch/score/include/uapi/asm/kvm_para.h b/kernel/arch/score/include/uapi/asm/kvm_para.h new file mode 100644 index 000000000..14fab8f0b --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/kvm_para.h @@ -0,0 +1 @@ +#include <asm-generic/kvm_para.h> diff --git a/kernel/arch/score/include/uapi/asm/mman.h b/kernel/arch/score/include/uapi/asm/mman.h new file mode 100644 index 000000000..84d85ddfe --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/mman.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MMAN_H +#define _ASM_SCORE_MMAN_H + +#include <asm-generic/mman.h> + +#endif /* _ASM_SCORE_MMAN_H */ diff --git a/kernel/arch/score/include/uapi/asm/msgbuf.h b/kernel/arch/score/include/uapi/asm/msgbuf.h new file mode 100644 index 000000000..7506721e2 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/msgbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MSGBUF_H +#define _ASM_SCORE_MSGBUF_H + +#include <asm-generic/msgbuf.h> + +#endif /* _ASM_SCORE_MSGBUF_H */ diff --git a/kernel/arch/score/include/uapi/asm/param.h b/kernel/arch/score/include/uapi/asm/param.h new file mode 100644 index 000000000..916b8690b --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/param.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_PARAM_H +#define _ASM_SCORE_PARAM_H + +#include <asm-generic/param.h> + +#endif /* _ASM_SCORE_PARAM_H */ diff --git a/kernel/arch/score/include/uapi/asm/poll.h b/kernel/arch/score/include/uapi/asm/poll.h new file mode 100644 index 000000000..18532db02 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/poll.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_POLL_H +#define _ASM_SCORE_POLL_H + +#include <asm-generic/poll.h> + +#endif /* _ASM_SCORE_POLL_H */ diff --git a/kernel/arch/score/include/uapi/asm/posix_types.h b/kernel/arch/score/include/uapi/asm/posix_types.h new file mode 100644 index 000000000..b88acf800 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/posix_types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_POSIX_TYPES_H +#define _ASM_SCORE_POSIX_TYPES_H + +#include <asm-generic/posix_types.h> + +#endif /* _ASM_SCORE_POSIX_TYPES_H */ diff --git a/kernel/arch/score/include/uapi/asm/ptrace.h b/kernel/arch/score/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..5c5e79405 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/ptrace.h @@ -0,0 +1,65 @@ +#ifndef _UAPI_ASM_SCORE_PTRACE_H +#define _UAPI_ASM_SCORE_PTRACE_H + +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + +#define SINGLESTEP16_INSN 0x7006 +#define SINGLESTEP32_INSN 0x840C8000 +#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */ +#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */ + +/* Define instruction mask */ +#define INSN32_MASK 0x80008000 + +#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */ +#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */ + +#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */ +#define B32M 0xFC008000 +#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */ +#define BL32M B32 +#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */ +#define BR32M 0xFFE0807E +#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */ +#define BRL32M BR32M + +#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32) + +#define J16 0x3000 /* 0_011_....... */ +#define J16M 0xF000 +#define B16 0x4000 /* 0_100_....... */ +#define B16M 0xF000 +#define BR16 0x0004 /* 0_000.......0100 */ +#define BR16M 0xF00F +#define B16_SET (J16 | B16 | BR16) + + +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + */ +struct pt_regs { + unsigned long pad0[6]; /* stack arguments */ + unsigned long orig_r4; + unsigned long orig_r7; + long is_syscall; + + unsigned long regs[32]; + + unsigned long cel; + unsigned long ceh; + + unsigned long sr0; /* cnt */ + unsigned long sr1; /* lcr */ + unsigned long sr2; /* scr */ + + unsigned long cp0_epc; + unsigned long cp0_ema; + unsigned long cp0_psr; + unsigned long cp0_ecr; + unsigned long cp0_condition; +}; + + +#endif /* _UAPI_ASM_SCORE_PTRACE_H */ diff --git a/kernel/arch/score/include/uapi/asm/resource.h b/kernel/arch/score/include/uapi/asm/resource.h new file mode 100644 index 000000000..9ce22bc7b --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/resource.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_RESOURCE_H +#define _ASM_SCORE_RESOURCE_H + +#include <asm-generic/resource.h> + +#endif /* _ASM_SCORE_RESOURCE_H */ diff --git a/kernel/arch/score/include/uapi/asm/sembuf.h b/kernel/arch/score/include/uapi/asm/sembuf.h new file mode 100644 index 000000000..dae5e835c --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/sembuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SEMBUF_H +#define _ASM_SCORE_SEMBUF_H + +#include <asm-generic/sembuf.h> + +#endif /* _ASM_SCORE_SEMBUF_H */ diff --git a/kernel/arch/score/include/uapi/asm/setup.h b/kernel/arch/score/include/uapi/asm/setup.h new file mode 100644 index 000000000..ab9dbdb59 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/setup.h @@ -0,0 +1,9 @@ +#ifndef _UAPI_ASM_SCORE_SETUP_H +#define _UAPI_ASM_SCORE_SETUP_H + +#define COMMAND_LINE_SIZE 256 +#define MEMORY_START 0 +#define MEMORY_SIZE 0x2000000 + + +#endif /* _UAPI_ASM_SCORE_SETUP_H */ diff --git a/kernel/arch/score/include/uapi/asm/shmbuf.h b/kernel/arch/score/include/uapi/asm/shmbuf.h new file mode 100644 index 000000000..c85b2429b --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/shmbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SHMBUF_H +#define _ASM_SCORE_SHMBUF_H + +#include <asm-generic/shmbuf.h> + +#endif /* _ASM_SCORE_SHMBUF_H */ diff --git a/kernel/arch/score/include/uapi/asm/sigcontext.h b/kernel/arch/score/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..5ffda39dd --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/sigcontext.h @@ -0,0 +1,22 @@ +#ifndef _ASM_SCORE_SIGCONTEXT_H +#define _ASM_SCORE_SIGCONTEXT_H + +/* + * Keep this struct definition in sync with the sigcontext fragment + * in arch/score/tools/offset.c + */ +struct sigcontext { + unsigned int sc_regmask; + unsigned int sc_psr; + unsigned int sc_condition; + unsigned long sc_pc; + unsigned long sc_regs[32]; + unsigned int sc_ssflags; + unsigned int sc_mdceh; + unsigned int sc_mdcel; + unsigned int sc_ecr; + unsigned long sc_ema; + unsigned long sc_sigset[4]; +}; + +#endif /* _ASM_SCORE_SIGCONTEXT_H */ diff --git a/kernel/arch/score/include/uapi/asm/siginfo.h b/kernel/arch/score/include/uapi/asm/siginfo.h new file mode 100644 index 000000000..87ca35607 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/siginfo.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SIGINFO_H +#define _ASM_SCORE_SIGINFO_H + +#include <asm-generic/siginfo.h> + +#endif /* _ASM_SCORE_SIGINFO_H */ diff --git a/kernel/arch/score/include/uapi/asm/signal.h b/kernel/arch/score/include/uapi/asm/signal.h new file mode 100644 index 000000000..2605bc06b --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/signal.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SIGNAL_H +#define _ASM_SCORE_SIGNAL_H + +#include <asm-generic/signal.h> + +#endif /* _ASM_SCORE_SIGNAL_H */ diff --git a/kernel/arch/score/include/uapi/asm/socket.h b/kernel/arch/score/include/uapi/asm/socket.h new file mode 100644 index 000000000..612a70e38 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/socket.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SOCKET_H +#define _ASM_SCORE_SOCKET_H + +#include <asm-generic/socket.h> + +#endif /* _ASM_SCORE_SOCKET_H */ diff --git a/kernel/arch/score/include/uapi/asm/sockios.h b/kernel/arch/score/include/uapi/asm/sockios.h new file mode 100644 index 000000000..ba8256480 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/sockios.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SOCKIOS_H +#define _ASM_SCORE_SOCKIOS_H + +#include <asm-generic/sockios.h> + +#endif /* _ASM_SCORE_SOCKIOS_H */ diff --git a/kernel/arch/score/include/uapi/asm/stat.h b/kernel/arch/score/include/uapi/asm/stat.h new file mode 100644 index 000000000..503705550 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/stat.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_STAT_H +#define _ASM_SCORE_STAT_H + +#include <asm-generic/stat.h> + +#endif /* _ASM_SCORE_STAT_H */ diff --git a/kernel/arch/score/include/uapi/asm/statfs.h b/kernel/arch/score/include/uapi/asm/statfs.h new file mode 100644 index 000000000..36e41004e --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_STATFS_H +#define _ASM_SCORE_STATFS_H + +#include <asm-generic/statfs.h> + +#endif /* _ASM_SCORE_STATFS_H */ diff --git a/kernel/arch/score/include/uapi/asm/swab.h b/kernel/arch/score/include/uapi/asm/swab.h new file mode 100644 index 000000000..fadc3cc6d --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/swab.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SWAB_H +#define _ASM_SCORE_SWAB_H + +#include <asm-generic/swab.h> + +#endif /* _ASM_SCORE_SWAB_H */ diff --git a/kernel/arch/score/include/uapi/asm/termbits.h b/kernel/arch/score/include/uapi/asm/termbits.h new file mode 100644 index 000000000..9a95c1412 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TERMBITS_H +#define _ASM_SCORE_TERMBITS_H + +#include <asm-generic/termbits.h> + +#endif /* _ASM_SCORE_TERMBITS_H */ diff --git a/kernel/arch/score/include/uapi/asm/termios.h b/kernel/arch/score/include/uapi/asm/termios.h new file mode 100644 index 000000000..40984e811 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TERMIOS_H +#define _ASM_SCORE_TERMIOS_H + +#include <asm-generic/termios.h> + +#endif /* _ASM_SCORE_TERMIOS_H */ diff --git a/kernel/arch/score/include/uapi/asm/types.h b/kernel/arch/score/include/uapi/asm/types.h new file mode 100644 index 000000000..214003277 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TYPES_H +#define _ASM_SCORE_TYPES_H + +#include <asm-generic/types.h> + +#endif /* _ASM_SCORE_TYPES_H */ diff --git a/kernel/arch/score/include/uapi/asm/unistd.h b/kernel/arch/score/include/uapi/asm/unistd.h new file mode 100644 index 000000000..9cb4260a5 --- /dev/null +++ b/kernel/arch/score/include/uapi/asm/unistd.h @@ -0,0 +1,11 @@ +#define __ARCH_HAVE_MMU + +#define __ARCH_WANT_SYSCALL_NO_AT +#define __ARCH_WANT_SYSCALL_NO_FLAGS +#define __ARCH_WANT_SYSCALL_OFF_T +#define __ARCH_WANT_SYSCALL_DEPRECATED +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK + +#include <asm-generic/unistd.h> |