diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/crypto/nx/nx-aes-gcm.c | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/crypto/nx/nx-aes-gcm.c')
-rw-r--r-- | kernel/drivers/crypto/nx/nx-aes-gcm.c | 175 |
1 files changed, 86 insertions, 89 deletions
diff --git a/kernel/drivers/crypto/nx/nx-aes-gcm.c b/kernel/drivers/crypto/nx/nx-aes-gcm.c index c6ebeb644..abd465f47 100644 --- a/kernel/drivers/crypto/nx/nx-aes-gcm.c +++ b/kernel/drivers/crypto/nx/nx-aes-gcm.c @@ -25,7 +25,6 @@ #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> -#include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" @@ -36,7 +35,7 @@ static int gcm_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; @@ -75,7 +74,7 @@ static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); + struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); char *nonce = nx_ctx->priv.gcm.nonce; int rc; @@ -93,17 +92,6 @@ out: return rc; } -static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > crypto_aead_alg(tfm)->maxauthsize) - return -EINVAL; - - crypto_aead_crt(tfm)->authsize = authsize; - - return 0; -} - static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { @@ -116,25 +104,24 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, return -EINVAL; } - crypto_aead_crt(tfm)->authsize = authsize; - return 0; } static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, - u8 *out) + u8 *out, + unsigned int assoclen) { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; - unsigned int nbytes = req->assoclen; + unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { - scatterwalk_start(&walk, req->assoc); + scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; @@ -159,7 +146,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, - req->assoc, processed, &to_process); + req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; @@ -180,7 +167,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); @@ -190,13 +177,15 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, return rc; } -static int gmac(struct aead_request *req, struct blkcipher_desc *desc) +static int gmac(struct aead_request *req, struct blkcipher_desc *desc, + unsigned int assoclen) { int rc; - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *nx_sg; - unsigned int nbytes = req->assoclen; + unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; @@ -225,7 +214,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, - req->assoc, processed, &to_process); + req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; @@ -251,7 +240,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); - atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); + atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); @@ -266,7 +255,8 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, int enc) { int rc; - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; char out[AES_BLOCK_SIZE]; struct nx_sg *in_sg, *out_sg; @@ -327,9 +317,11 @@ out: return rc; } -static int gcm_aes_nx_crypt(struct aead_request *req, int enc) +static int gcm_aes_nx_crypt(struct aead_request *req, int enc, + unsigned int assoclen) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; @@ -345,10 +337,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; if (nbytes == 0) { - if (req->assoclen == 0) + if (assoclen == 0) rc = gcm_empty(req, &desc, enc); else - rc = gmac(req, &desc); + rc = gmac(req, &desc, assoclen); if (rc) goto out; else @@ -356,9 +348,10 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) } /* Process associated data */ - csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; - if (req->assoclen) { - rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); + csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; + if (assoclen) { + rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, + assoclen); if (rc) goto out; } @@ -376,9 +369,9 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) to_process = nbytes - processed; csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; - desc.tfm = (struct crypto_blkcipher *) req->base.tfm; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, - req->src, &to_process, processed, + req->src, &to_process, + processed + req->assoclen, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) @@ -413,18 +406,20 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) mac: if (enc) { /* copy out the auth tag */ - scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, - req->dst, nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_TO_SG); + scatterwalk_map_and_copy( + csbcpb->cpb.aes_gcm.out_pat_or_mac, + req->dst, req->assoclen + nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_TO_SG); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; - scatterwalk_map_and_copy(itag, req->src, nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_FROM_SG); - rc = memcmp(itag, otag, + scatterwalk_map_and_copy( + itag, req->src, req->assoclen + nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_FROM_SG); + rc = crypto_memneq(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } @@ -440,7 +435,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req) memcpy(iv, req->iv, 12); - return gcm_aes_nx_crypt(req, 1); + return gcm_aes_nx_crypt(req, 1, req->assoclen); } static int gcm_aes_nx_decrypt(struct aead_request *req) @@ -450,12 +445,13 @@ static int gcm_aes_nx_decrypt(struct aead_request *req) memcpy(iv, req->iv, 12); - return gcm_aes_nx_crypt(req, 0); + return gcm_aes_nx_crypt(req, 0, req->assoclen); } static int gcm4106_aes_nx_encrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; @@ -463,12 +459,16 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req) memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); - return gcm_aes_nx_crypt(req, 1); + if (req->assoclen < 8) + return -EINVAL; + + return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); } static int gcm4106_aes_nx_decrypt(struct aead_request *req) { - struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); + struct nx_crypto_ctx *nx_ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; @@ -476,7 +476,10 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); - return gcm_aes_nx_crypt(req, 0); + if (req->assoclen < 8) + return -EINVAL; + + return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); } /* tell the block cipher walk routines that this is a stream cipher by @@ -484,45 +487,39 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) * during encrypt/decrypt doesn't solve this problem, because it calls * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, * but instead uses this tfm->blocksize. */ -struct crypto_alg nx_gcm_aes_alg = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_gcm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = gcm_aes_nx_set_key, - .setauthsize = gcm_aes_nx_setauthsize, - .encrypt = gcm_aes_nx_encrypt, - .decrypt = gcm_aes_nx_decrypt, - } +struct aead_alg nx_gcm_aes_alg = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-nx", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_gcm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm_aes_nx_set_key, + .encrypt = gcm_aes_nx_encrypt, + .decrypt = gcm_aes_nx_decrypt, }; -struct crypto_alg nx_gcm4106_aes_alg = { - .cra_name = "rfc4106(gcm(aes))", - .cra_driver_name = "rfc4106-gcm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_nivaead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_gcm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - .geniv = "seqiv", - .setkey = gcm4106_aes_nx_set_key, - .setauthsize = gcm4106_aes_nx_setauthsize, - .encrypt = gcm4106_aes_nx_encrypt, - .decrypt = gcm4106_aes_nx_decrypt, - } +struct aead_alg nx_gcm4106_aes_alg = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-nx", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_gcm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm4106_aes_nx_set_key, + .setauthsize = gcm4106_aes_nx_setauthsize, + .encrypt = gcm4106_aes_nx_encrypt, + .decrypt = gcm4106_aes_nx_decrypt, }; |