summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/crypto/ccp
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/crypto/ccp
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/crypto/ccp')
-rw-r--r--kernel/drivers/crypto/ccp/Kconfig14
-rw-r--r--kernel/drivers/crypto/ccp/ccp-crypto-aes-cmac.c20
-rw-r--r--kernel/drivers/crypto/ccp/ccp-crypto-main.c6
-rw-r--r--kernel/drivers/crypto/ccp/ccp-crypto-sha.c13
-rw-r--r--kernel/drivers/crypto/ccp/ccp-ops.c117
-rw-r--r--kernel/drivers/crypto/ccp/ccp-pci.c2
-rw-r--r--kernel/drivers/crypto/ccp/ccp-platform.c83
7 files changed, 131 insertions, 124 deletions
diff --git a/kernel/drivers/crypto/ccp/Kconfig b/kernel/drivers/crypto/ccp/Kconfig
index 7639ffc36..3cd848106 100644
--- a/kernel/drivers/crypto/ccp/Kconfig
+++ b/kernel/drivers/crypto/ccp/Kconfig
@@ -5,20 +5,18 @@ config CRYPTO_DEV_CCP_DD
select HW_RANDOM
help
Provides the interface to use the AMD Cryptographic Coprocessor
- which can be used to accelerate or offload encryption operations
- such as SHA, AES and more. If you choose 'M' here, this module
- will be called ccp.
+ which can be used to offload encryption operations such as SHA,
+ AES and more. If you choose 'M' here, this module will be called
+ ccp.
config CRYPTO_DEV_CCP_CRYPTO
- tristate "Encryption and hashing acceleration support"
+ tristate "Encryption and hashing offload support"
depends on CRYPTO_DEV_CCP_DD
default m
- select CRYPTO_ALGAPI
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
help
Support for using the cryptographic API with the AMD Cryptographic
- Coprocessor. This module supports acceleration and offload of SHA
- and AES algorithms. If you choose 'M' here, this module will be
- called ccp_crypto.
+ Coprocessor. This module supports offload of SHA and AES algorithms.
+ If you choose 'M' here, this module will be called ccp_crypto.
diff --git a/kernel/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/kernel/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index ea7e84469..d89f20c04 100644
--- a/kernel/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/kernel/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -118,10 +118,19 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
if (rctx->buf_count) {
sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+ if (!sg) {
+ ret = -EINVAL;
+ goto e_free;
+ }
}
- if (nbytes)
+ if (nbytes) {
sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+ if (!sg) {
+ ret = -EINVAL;
+ goto e_free;
+ }
+ }
if (need_pad) {
int pad_length = block_size - (len & (block_size - 1));
@@ -132,6 +141,10 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
rctx->pad[0] = 0x80;
sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
+ if (!sg) {
+ ret = -EINVAL;
+ goto e_free;
+ }
}
if (sg) {
sg_mark_end(sg);
@@ -163,6 +176,11 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
+
+e_free:
+ sg_free_table(&rctx->data_sg);
+
+ return ret;
}
static int ccp_aes_cmac_init(struct ahash_request *req)
diff --git a/kernel/drivers/crypto/ccp/ccp-crypto-main.c b/kernel/drivers/crypto/ccp/ccp-crypto-main.c
index bdec01ec6..e0380e59c 100644
--- a/kernel/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/kernel/drivers/crypto/ccp/ccp-crypto-main.c
@@ -305,14 +305,16 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
for (sg = table->sgl; sg; sg = sg_next(sg))
if (!sg_page(sg))
break;
- BUG_ON(!sg);
+ if (WARN_ON(!sg))
+ return NULL;
for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
sg_set_page(sg, sg_page(sg_add), sg_add->length,
sg_add->offset);
sg_last = sg;
}
- BUG_ON(sg_add);
+ if (WARN_ON(sg_add))
+ return NULL;
return sg_last;
}
diff --git a/kernel/drivers/crypto/ccp/ccp-crypto-sha.c b/kernel/drivers/crypto/ccp/ccp-crypto-sha.c
index 507b34e0c..d14b3f28e 100644
--- a/kernel/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/kernel/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -107,7 +107,15 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
+ if (!sg) {
+ ret = -EINVAL;
+ goto e_free;
+ }
sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
+ if (!sg) {
+ ret = -EINVAL;
+ goto e_free;
+ }
sg_mark_end(sg);
sg = rctx->data_sg.sgl;
@@ -142,6 +150,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
return ret;
+
+e_free:
+ sg_free_table(&rctx->data_sg);
+
+ return ret;
}
static int ccp_sha_init(struct ahash_request *req)
diff --git a/kernel/drivers/crypto/ccp/ccp-ops.c b/kernel/drivers/crypto/ccp/ccp-ops.c
index 71f2e3c89..c6e883b29 100644
--- a/kernel/drivers/crypto/ccp/ccp-ops.c
+++ b/kernel/drivers/crypto/ccp/ccp-ops.c
@@ -52,8 +52,7 @@ struct ccp_dm_workarea {
struct ccp_sg_workarea {
struct scatterlist *sg;
- unsigned int nents;
- unsigned int length;
+ int nents;
struct scatterlist *dma_sg;
struct device *dma_dev;
@@ -496,8 +495,10 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
if (!sg)
return 0;
- wa->nents = sg_nents(sg);
- wa->length = sg->length;
+ wa->nents = sg_nents_for_len(sg, len);
+ if (wa->nents < 0)
+ return wa->nents;
+
wa->bytes_left = len;
wa->sg_used = 0;
@@ -610,15 +611,16 @@ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
1);
}
-static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
- struct scatterlist *sg,
- unsigned int len, unsigned int se_len,
- bool sign_extend)
+static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+ struct scatterlist *sg,
+ unsigned int len, unsigned int se_len,
+ bool sign_extend)
{
unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
u8 buffer[CCP_REVERSE_BUF_SIZE];
- BUG_ON(se_len > sizeof(buffer));
+ if (WARN_ON(se_len > sizeof(buffer)))
+ return -EINVAL;
sg_offset = len;
dm_offset = 0;
@@ -641,6 +643,8 @@ static void ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
se_len - ksb_len);
}
}
+
+ return 0;
}
static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
@@ -1605,8 +1609,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
goto e_ksb;
- ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
- false);
+ ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
+ CCP_KSB_BYTES, false);
+ if (ret)
+ goto e_exp;
ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
@@ -1622,11 +1628,15 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ret)
goto e_exp;
- ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
- false);
+ ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
+ CCP_KSB_BYTES, false);
+ if (ret)
+ goto e_src;
src.address += o_len; /* Adjust the address for the copy operation */
- ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
- false);
+ ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
+ CCP_KSB_BYTES, false);
+ if (ret)
+ goto e_src;
src.address -= o_len; /* Reset the address to original value */
/* Prepare the output area for the operation */
@@ -1840,21 +1850,27 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
save = src.address;
/* Copy the ECC modulus */
- ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
/* Copy the first operand */
- ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
- ecc->u.mm.operand_1_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
+ ecc->u.mm.operand_1_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
/* Copy the second operand */
- ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
- ecc->u.mm.operand_2_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
+ ecc->u.mm.operand_2_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
}
@@ -1959,18 +1975,24 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
save = src.address;
/* Copy the ECC modulus */
- ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
/* Copy the first point X and Y coordinate */
- ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
- ecc->u.pm.point_1.x_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
+ ecc->u.pm.point_1.x_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
- ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
- ecc->u.pm.point_1.y_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
+ ecc->u.pm.point_1.y_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
/* Set the first point Z coordianate to 1 */
@@ -1979,13 +2001,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
/* Copy the second point X and Y coordinate */
- ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
- ecc->u.pm.point_2.x_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
+ ecc->u.pm.point_2.x_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
- ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
- ecc->u.pm.point_2.y_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
+ ecc->u.pm.point_2.y_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
/* Set the second point Z coordianate to 1 */
@@ -1993,16 +2019,21 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
src.address += CCP_ECC_OPERAND_SIZE;
} else {
/* Copy the Domain "a" parameter */
- ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
- ecc->u.pm.domain_a_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
+ ecc->u.pm.domain_a_len,
+ CCP_ECC_OPERAND_SIZE, false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
/* Copy the scalar value */
- ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
- ecc->u.pm.scalar_len,
- CCP_ECC_OPERAND_SIZE, false);
+ ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
+ ecc->u.pm.scalar_len,
+ CCP_ECC_OPERAND_SIZE,
+ false);
+ if (ret)
+ goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
}
}
diff --git a/kernel/drivers/crypto/ccp/ccp-pci.c b/kernel/drivers/crypto/ccp/ccp-pci.c
index af190d479..6ade02f04 100644
--- a/kernel/drivers/crypto/ccp/ccp-pci.c
+++ b/kernel/drivers/crypto/ccp/ccp-pci.c
@@ -319,7 +319,7 @@ static const struct pci_device_id ccp_pci_table[] = {
MODULE_DEVICE_TABLE(pci, ccp_pci_table);
static struct pci_driver ccp_pci_driver = {
- .name = "AMD Cryptographic Coprocessor",
+ .name = "ccp",
.id_table = ccp_pci_table,
.probe = ccp_pci_probe,
.remove = ccp_pci_remove,
diff --git a/kernel/drivers/crypto/ccp/ccp-platform.c b/kernel/drivers/crypto/ccp/ccp-platform.c
index b1c20b2b5..01b50cb4c 100644
--- a/kernel/drivers/crypto/ccp/ccp-platform.c
+++ b/kernel/drivers/crypto/ccp/ccp-platform.c
@@ -29,7 +29,6 @@
#include "ccp-dev.h"
struct ccp_platform {
- int use_acpi;
int coherent;
};
@@ -90,64 +89,12 @@ static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
return NULL;
}
-#ifdef CONFIG_ACPI
-static int ccp_acpi_support(struct ccp_device *ccp)
-{
- struct ccp_platform *ccp_platform = ccp->dev_specific;
- struct acpi_device *adev = ACPI_COMPANION(ccp->dev);
- acpi_handle handle;
- acpi_status status;
- unsigned long long data;
- int cca;
-
- /* Retrieve the device cache coherency value */
- handle = adev->handle;
- do {
- status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
- if (!ACPI_FAILURE(status)) {
- cca = data;
- break;
- }
- } while (!ACPI_FAILURE(status));
-
- if (ACPI_FAILURE(status)) {
- dev_err(ccp->dev, "error obtaining acpi coherency value\n");
- return -EINVAL;
- }
-
- ccp_platform->coherent = !!cca;
-
- return 0;
-}
-#else /* CONFIG_ACPI */
-static int ccp_acpi_support(struct ccp_device *ccp)
-{
- return -EINVAL;
-}
-#endif
-
-#ifdef CONFIG_OF
-static int ccp_of_support(struct ccp_device *ccp)
-{
- struct ccp_platform *ccp_platform = ccp->dev_specific;
-
- ccp_platform->coherent = of_dma_is_coherent(ccp->dev->of_node);
-
- return 0;
-}
-#else
-static int ccp_of_support(struct ccp_device *ccp)
-{
- return -EINVAL;
-}
-#endif
-
static int ccp_platform_probe(struct platform_device *pdev)
{
struct ccp_device *ccp;
struct ccp_platform *ccp_platform;
struct device *dev = &pdev->dev;
- struct acpi_device *adev = ACPI_COMPANION(dev);
+ enum dev_dma_attr attr;
struct resource *ior;
int ret;
@@ -164,8 +111,6 @@ static int ccp_platform_probe(struct platform_device *pdev)
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
- ccp_platform->use_acpi = (!adev || acpi_disabled) ? 0 : 1;
-
ior = ccp_find_mmio_area(ccp);
ccp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) {
@@ -174,26 +119,24 @@ static int ccp_platform_probe(struct platform_device *pdev)
}
ccp->io_regs = ccp->io_map;
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
goto e_err;
}
- if (ccp_platform->use_acpi)
- ret = ccp_acpi_support(ccp);
- else
- ret = ccp_of_support(ccp);
- if (ret)
- goto e_err;
-
+ ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
if (ccp_platform->coherent)
ccp->axcache = CACHE_WB_NO_ALLOC;
else
ccp->axcache = CACHE_NONE;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ goto e_err;
+ }
+
dev_set_drvdata(dev, ccp);
ret = ccp_init(ccp);
@@ -276,6 +219,7 @@ static const struct acpi_device_id ccp_acpi_match[] = {
{ "AMDI0C00", 0 },
{ },
};
+MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#endif
#ifdef CONFIG_OF
@@ -283,11 +227,12 @@ static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a" },
{ },
};
+MODULE_DEVICE_TABLE(of, ccp_of_match);
#endif
static struct platform_driver ccp_platform_driver = {
.driver = {
- .name = "AMD Cryptographic Coprocessor",
+ .name = "ccp",
#ifdef CONFIG_ACPI
.acpi_match_table = ccp_acpi_match,
#endif