diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/alpha/lib/copy_user.S | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/alpha/lib/copy_user.S')
-rw-r--r-- | kernel/arch/alpha/lib/copy_user.S | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/kernel/arch/alpha/lib/copy_user.S b/kernel/arch/alpha/lib/copy_user.S new file mode 100644 index 000000000..6f3fab9eb --- /dev/null +++ b/kernel/arch/alpha/lib/copy_user.S @@ -0,0 +1,145 @@ +/* + * arch/alpha/lib/copy_user.S + * + * Copy to/from user space, handling exceptions as we go.. This + * isn't exactly pretty. + * + * This is essentially the same as "memcpy()", but with a few twists. + * Notably, we have to make sure that $0 is always up-to-date and + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + * + * NOTE! This is not directly C-callable, because the calling semantics are + * different: + * + * Inputs: + * length in $0 + * destination address in $6 + * source address in $7 + * return address in $28 + * + * Outputs: + * bytes left to copy in $0 + * + * Clobbers: + * $1,$2,$3,$4,$5,$6,$7 + */ + +/* Allow an exception for an insn; exit if we get one. */ +#define EXI(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + lda $31, $exitin-99b($31); \ + .previous + +#define EXO(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + lda $31, $exitout-99b($31); \ + .previous + + .set noat + .align 4 + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + and $6,7,$3 + beq $0,$35 + beq $3,$36 + subq $3,8,$3 + .align 4 +$37: + EXI( ldq_u $1,0($7) ) + EXO( ldq_u $2,0($6) ) + extbl $1,$7,$1 + mskbl $2,$6,$2 + insbl $1,$6,$1 + addq $3,1,$3 + bis $1,$2,$1 + EXO( stq_u $1,0($6) ) + subq $0,1,$0 + addq $6,1,$6 + addq $7,1,$7 + beq $0,$41 + bne $3,$37 +$36: + and $7,7,$1 + bic $0,7,$4 + beq $1,$43 + beq $4,$48 + EXI( ldq_u $3,0($7) ) + .align 4 +$50: + EXI( ldq_u $2,8($7) ) + subq $4,8,$4 + extql $3,$7,$3 + extqh $2,$7,$1 + bis $3,$1,$1 + EXO( stq $1,0($6) ) + addq $7,8,$7 + subq $0,8,$0 + addq $6,8,$6 + bis $2,$2,$3 + bne $4,$50 +$48: + beq $0,$41 + .align 4 +$57: + EXI( ldq_u $1,0($7) ) + EXO( ldq_u $2,0($6) ) + extbl $1,$7,$1 + mskbl $2,$6,$2 + insbl $1,$6,$1 + bis $1,$2,$1 + EXO( stq_u $1,0($6) ) + subq $0,1,$0 + addq $6,1,$6 + addq $7,1,$7 + bne $0,$57 + br $31,$41 + .align 4 +$43: + beq $4,$65 + .align 4 +$66: + EXI( ldq $1,0($7) ) + subq $4,8,$4 + EXO( stq $1,0($6) ) + addq $7,8,$7 + subq $0,8,$0 + addq $6,8,$6 + bne $4,$66 +$65: + beq $0,$41 + EXI( ldq $2,0($7) ) + EXO( ldq $1,0($6) ) + mskql $2,$0,$2 + mskqh $1,$0,$1 + bis $2,$1,$2 + EXO( stq $2,0($6) ) + bis $31,$31,$0 +$41: +$35: +$exitout: + ret $31,($28),1 + +$exitin: + /* A stupid byte-by-byte zeroing of the rest of the output + buffer. This cures security holes by never leaving + random kernel data around to be copied elsewhere. */ + + mov $0,$1 +$101: + EXO ( ldq_u $2,0($6) ) + subq $1,1,$1 + mskbl $2,$6,$2 + EXO ( stq_u $2,0($6) ) + addq $6,1,$6 + bgt $1,$101 + ret $31,($28),1 + + .end __copy_user |