summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/crypto/qat
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/crypto/qat')
-rw-r--r--kernel/drivers/crypto/qat/Kconfig21
-rw-r--r--kernel/drivers/crypto/qat/Makefile1
-rw-r--r--kernel/drivers/crypto/qat/qat_common/.gitignore1
-rw-r--r--kernel/drivers/crypto/qat/qat_common/Makefile14
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_accel_devices.h47
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_accel_engine.c47
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_admin.c290
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_aer.c8
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_cfg.c9
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_cfg_common.h3
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_cfg_user.h12
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_common_drv.h60
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_ctl_drv.c15
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_dev_mgr.c286
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_hw_arbiter.c (renamed from kernel/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c)37
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_init.c106
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c438
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h146
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_sriov.c306
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_transport.c15
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_transport_access_macros.h5
-rw-r--r--kernel/drivers/crypto/qat/qat_common/adf_transport_debug.c16
-rw-r--r--kernel/drivers/crypto/qat/qat_common/icp_qat_fw.h2
-rw-r--r--kernel/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h112
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_algs.c471
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_asym_algs.c755
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_crypto.c101
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_crypto.h2
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_hal.c19
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn111
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_rsapubkey.asn14
-rw-r--r--kernel/drivers/crypto/qat/qat_common/qat_uclo.c27
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/Makefile5
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/adf_admin.c145
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c38
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h12
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.c102
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.h9
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xcc/adf_isr.c139
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xccvf/Makefile5
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c172
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h68
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c393
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h (renamed from kernel/drivers/crypto/qat/qat_dh895xcc/qat_admin.c)70
-rw-r--r--kernel/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c258
45 files changed, 3984 insertions, 819 deletions
diff --git a/kernel/drivers/crypto/qat/Kconfig b/kernel/drivers/crypto/qat/Kconfig
index 49bede2a9..eefccf7b8 100644
--- a/kernel/drivers/crypto/qat/Kconfig
+++ b/kernel/drivers/crypto/qat/Kconfig
@@ -2,18 +2,18 @@ config CRYPTO_DEV_QAT
tristate
select CRYPTO_AEAD
select CRYPTO_AUTHENC
- select CRYPTO_ALGAPI
- select CRYPTO_AES
- select CRYPTO_CBC
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AKCIPHER
+ select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select FW_LOADER
+ select ASN1
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
depends on X86 && PCI
- default n
select CRYPTO_DEV_QAT
help
Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
@@ -21,3 +21,16 @@ config CRYPTO_DEV_QAT_DH895xCC
To compile this as a module, choose M here: the module
will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+ tristate "Support for Intel(R) DH895xCC Virtual Function"
+ depends on X86 && PCI
+ select PCI_IOV
+ select CRYPTO_DEV_QAT
+
+ help
+ Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+ Virtual Function for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_dh895xccvf.
diff --git a/kernel/drivers/crypto/qat/Makefile b/kernel/drivers/crypto/qat/Makefile
index d11481be2..a3ce0b70e 100644
--- a/kernel/drivers/crypto/qat/Makefile
+++ b/kernel/drivers/crypto/qat/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
diff --git a/kernel/drivers/crypto/qat/qat_common/.gitignore b/kernel/drivers/crypto/qat/qat_common/.gitignore
new file mode 100644
index 000000000..ee328374d
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/.gitignore
@@ -0,0 +1 @@
+*-asn1.[ch]
diff --git a/kernel/drivers/crypto/qat/qat_common/Makefile b/kernel/drivers/crypto/qat/qat_common/Makefile
index e0424dc38..9e9e196c6 100644
--- a/kernel/drivers/crypto/qat/qat_common/Makefile
+++ b/kernel/drivers/crypto/qat/qat_common/Makefile
@@ -1,3 +1,11 @@
+$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
+ $(obj)/qat_rsapubkey-asn1.h
+$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
+ $(obj)/qat_rsaprivkey-asn1.h
+
+clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
+clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
+
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_ctl_drv.o \
@@ -6,9 +14,15 @@ intel_qat-objs := adf_cfg.o \
adf_accel_engine.o \
adf_aer.o \
adf_transport.o \
+ adf_admin.o \
+ adf_hw_arbiter.o \
qat_crypto.o \
qat_algs.o \
+ qat_rsapubkey-asn1.o \
+ qat_rsaprivkey-asn1.o \
+ qat_asym_algs.o \
qat_uclo.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_accel_devices.h b/kernel/drivers/crypto/qat/qat_common/adf_accel_devices.h
index f22ce7169..ca853d50b 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/kernel/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -46,14 +46,17 @@
*/
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
-#include <linux/proc_fs.h>
#include <linux/io.h>
+#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -80,6 +83,7 @@ struct adf_bar {
struct adf_accel_msix {
struct msix_entry *entries;
char **names;
+ u32 num_entries;
} __packed;
struct adf_accel_pci {
@@ -100,6 +104,7 @@ enum dev_sku_info {
DEV_SKU_2,
DEV_SKU_3,
DEV_SKU_4,
+ DEV_SKU_VF,
DEV_SKU_UNKNOWN,
};
@@ -114,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
return "SKU3";
case DEV_SKU_4:
return "SKU4";
+ case DEV_SKU_VF:
+ return "SKUVF";
case DEV_SKU_UNKNOWN:
default:
break;
@@ -136,23 +143,29 @@ struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
uint32_t (*get_accel_mask)(uint32_t fuse);
uint32_t (*get_ae_mask)(uint32_t fuse);
+ uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+ uint32_t (*get_pf2vf_offset)(uint32_t i);
+ uint32_t (*get_vintmsk_offset)(uint32_t i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
- void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
- void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+ int (*send_admin_init)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
+ void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+ const uint32_t **cfg);
+ void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
const char *fw_name;
- uint32_t pci_dev_id;
+ const char *fw_mmp_name;
uint32_t fuses;
uint32_t accel_capabilities_mask;
uint16_t accel_mask;
@@ -164,6 +177,7 @@ struct adf_hw_device_data {
uint8_t num_accel;
uint8_t num_logical_accel;
uint8_t num_engines;
+ uint8_t min_iov_compat_ver;
} __packed;
/* CSR write macro */
@@ -185,6 +199,16 @@ struct icp_qat_fw_loader_handle;
struct adf_fw_loader_data {
struct icp_qat_fw_loader_handle *fw_loader;
const struct firmware *uof_fw;
+ const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+ struct adf_accel_dev *accel_dev;
+ struct tasklet_struct vf2pf_bh_tasklet;
+ struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+ struct ratelimit_state vf2pf_ratelimit;
+ u32 vf_nr;
+ bool init;
};
struct adf_accel_dev {
@@ -200,6 +224,21 @@ struct adf_accel_dev {
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
+ union {
+ struct {
+ /* vf_info is non-zero when SR-IOV is init'ed */
+ struct adf_accel_vf_info *vf_info;
+ } pf;
+ struct {
+ char *irq_name;
+ struct tasklet_struct pf2vf_bh_tasklet;
+ struct mutex vf2pf_lock; /* protect CSR access */
+ struct completion iov_msg_completion;
+ uint8_t compatible;
+ uint8_t pf_version;
+ } vf;
+ };
+ bool is_vf;
uint8_t accel_id;
} __packed;
#endif
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_accel_engine.c b/kernel/drivers/crypto/qat/qat_common/adf_accel_engine.c
index 7f8b66c91..20b08bdcb 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -55,24 +55,36 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- void *uof_addr;
- uint32_t uof_size;
+ void *uof_addr, *mmp_addr;
+ u32 uof_size, mmp_size;
+ if (!hw_device->fw_name)
+ return 0;
+
+ if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+ &accel_dev->accel_pci_dev.pci_dev->dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+ hw_device->fw_mmp_name);
+ return -EFAULT;
+ }
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
&accel_dev->accel_pci_dev.pci_dev->dev)) {
- dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
hw_device->fw_name);
- return -EFAULT;
+ goto out_err;
}
uof_size = loader_data->uof_fw->size;
uof_addr = (void *)loader_data->uof_fw->data;
+ mmp_size = loader_data->mmp_fw->size;
+ mmp_addr = (void *)loader_data->mmp_fw->data;
+ qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
goto out_err;
}
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
- dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
goto out_err;
}
return 0;
@@ -85,14 +97,17 @@ out_err:
void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return;
qat_uclo_del_uof_obj(loader_data->fw_loader);
qat_hal_deinit(loader_data->fw_loader);
-
- if (loader_data->uof_fw)
- release_firmware(loader_data->uof_fw);
-
+ release_firmware(loader_data->uof_fw);
+ release_firmware(loader_data->mmp_fw);
loader_data->uof_fw = NULL;
+ loader_data->mmp_fw = NULL;
loader_data->fw_loader = NULL;
}
@@ -102,6 +117,9 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ if (!hw_data->fw_name)
+ return 0;
+
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_start(loader_data->fw_loader, ae, 0xFF);
@@ -120,6 +138,9 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ if (!hw_data->fw_name)
+ return 0;
+
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
@@ -146,6 +167,10 @@ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
int adf_ae_init(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
if (!loader_data)
@@ -169,6 +194,10 @@ int adf_ae_init(struct adf_accel_dev *accel_dev)
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (!hw_device->fw_name)
+ return 0;
qat_hal_deinit(loader_data->fw_loader);
kfree(accel_dev->fw_loader);
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_admin.c b/kernel/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644
index 000000000..147d755fe
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/adf_admin.c
@@ -0,0 +1,290 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+ dma_addr_t phy_addr;
+ dma_addr_t const_tbl_addr;
+ void *virt_addr;
+ void __iomem *mailbox_addr;
+ struct mutex lock; /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+ void *in, void *out)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+ int offset = ae * ADF_ADMINMSG_LEN * 2;
+ void __iomem *mailbox = admin->mailbox_addr;
+ int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+ int times, received;
+
+ mutex_lock(&admin->lock);
+
+ if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+ mutex_unlock(&admin->lock);
+ return -EAGAIN;
+ }
+
+ memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+ ADF_CSR_WR(mailbox, mb_offset, 1);
+ received = 0;
+ for (times = 0; times < 50; times++) {
+ msleep(20);
+ if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+ received = 1;
+ break;
+ }
+ }
+ if (received)
+ memcpy(out, admin->virt_addr + offset +
+ ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+ else
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg to accelerator\n");
+
+ mutex_unlock(&admin->lock);
+ return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
+ int i;
+
+ memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+ req.init_admin_cmd_id = cmd;
+
+ if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+ req.init_cfg_sz = 1024;
+ req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+ }
+ for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+ memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+ if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+ resp.init_resp_hdr.status)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+ int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+ if (ret)
+ return ret;
+ return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *csr = pmisc->virt_addr;
+ void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+ u64 reg_val;
+
+ admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!admin)
+ return -ENOMEM;
+ admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &admin->phy_addr, GFP_KERNEL);
+ if (!admin->virt_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+ kfree(admin);
+ return -ENOMEM;
+ }
+
+ admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
+ (void *) const_tab, 1024,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+ admin->const_tbl_addr))) {
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+ kfree(admin);
+ return -ENOMEM;
+ }
+ reg_val = (u64)admin->phy_addr;
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+ ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+ mutex_init(&admin->lock);
+ admin->mailbox_addr = mailbox;
+ accel_dev->admin = admin;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+ struct adf_admin_comms *admin = accel_dev->admin;
+
+ if (!admin)
+ return;
+
+ if (admin->virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_addr, admin->phy_addr);
+
+ dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
+ DMA_TO_DEVICE);
+ mutex_destroy(&admin->lock);
+ kfree(admin);
+ accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_aer.c b/kernel/drivers/crypto/qat/qat_common/adf_aer.c
index 2dbc733b8..0a5ca0ba5 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,9 +88,15 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
+ if (accel_dev->is_vf)
+ return;
+
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
+ if (!parent)
+ parent = pdev;
+
if (!pci_wait_for_pending_transaction(pdev))
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
@@ -206,7 +212,7 @@ static struct pci_error_handlers adf_err_handler = {
* QAT acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
{
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_cfg.c b/kernel/drivers/crypto/qat/qat_common/adf_cfg.c
index ab65bc274..d08797905 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -123,7 +123,7 @@ static const struct file_operations qat_dev_cfg_fops = {
* The table stores device specific config values.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
{
@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+ if (!dev_cfg_data)
+ return;
+
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
@@ -276,7 +279,7 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
* in the given acceleration device
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
const char *section_name,
@@ -327,7 +330,7 @@ EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
* will be stored.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
{
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_cfg_common.h b/kernel/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 88b82187a..c697fb1cd 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/kernel/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -60,7 +60,7 @@
#define ADF_CFG_NO_DEVICE 0xFF
#define ADF_CFG_AFFINITY_WHATEVER 0xFF
#define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES 32
+#define ADF_MAX_DEVICES (32 * 32)
enum adf_cfg_val_type {
ADF_DEC,
@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
enum adf_device_type {
DEV_UNKNOWN = 0,
DEV_DH895XCC,
+ DEV_DH895XCCVF,
};
struct adf_dev_status_info {
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_cfg_user.h b/kernel/drivers/crypto/qat/qat_common/adf_cfg_user.h
index 0c38a155a..ef5988afd 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/kernel/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -54,14 +54,6 @@ struct adf_user_cfg_key_val {
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
union {
- char *user_val_ptr;
- uint64_t padding1;
- };
- union {
- struct adf_user_cfg_key_val *prev;
- uint64_t padding2;
- };
- union {
struct adf_user_cfg_key_val *next;
uint64_t padding3;
};
@@ -75,10 +67,6 @@ struct adf_user_cfg_section {
uint64_t padding1;
};
union {
- struct adf_user_cfg_section *prev;
- uint64_t padding2;
- };
- union {
struct adf_user_cfg_section *next;
uint64_t padding3;
};
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_common_drv.h b/kernel/drivers/crypto/qat/qat_common/adf_common_drv.h
index 0666ee6a3..3f76bd495 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/kernel/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -53,6 +53,13 @@
#include "icp_qat_fw_loader_handle.h"
#include "icp_qat_hal.h"
+#define ADF_MAJOR_VERSION 0
+#define ADF_MINOR_VERSION 2
+#define ADF_BUILD_VERSION 0
+#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
+ __stringify(ADF_MINOR_VERSION) "." \
+ __stringify(ADF_BUILD_VERSION)
+
#define ADF_STATUS_RESTARTING 0
#define ADF_STATUS_STARTING 1
#define ADF_STATUS_CONFIGURED 2
@@ -84,9 +91,13 @@ struct service_hndl {
unsigned long start_status;
char *name;
struct list_head list;
- int admin;
};
+static inline int get_current_node(void)
+{
+ return topology_physical_package_id(smp_processor_id());
+}
+
int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
@@ -95,13 +106,24 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
int adf_processes_dev_register(void);
void adf_processes_dev_unregister(void);
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
struct adf_accel_dev *adf_devmgr_get_first(void);
@@ -123,6 +145,12 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
int adf_dev_get(struct adf_accel_dev *accel_dev);
void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -134,10 +162,11 @@ int qat_crypto_unregister(void);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
-int qat_algs_init(void);
-void qat_algs_exit(void);
+void qat_alg_asym_callback(void *resp);
int qat_algs_register(void);
-int qat_algs_unregister(void);
+void qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
@@ -189,4 +218,23 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ uint32_t vf_mask);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
#endif
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/kernel/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index cb5f066e9..473d36d91 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
goto out_err;
}
- params_head = section_head->params;
+ params_head = section.params;
while (params_head) {
if (copy_from_user(&key_val, (void __user *)params_head,
@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
}
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
- if (!accel_dev) {
- pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+ if (!accel_dev)
return -ENODEV;
- }
+
hw_data = accel_dev->hw_device;
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info.num_ae = hw_data->get_num_aes(hw_data);
@@ -464,9 +463,6 @@ static int __init adf_register_ctl_device_driver(void)
{
mutex_init(&adf_ctl_lock);
- if (qat_algs_init())
- goto err_algs_init;
-
if (adf_chr_drv_create())
goto err_chr_dev;
@@ -483,8 +479,6 @@ err_crypto_register:
err_aer:
adf_chr_drv_destroy();
err_chr_dev:
- qat_algs_exit();
-err_algs_init:
mutex_destroy(&adf_ctl_lock);
return -EFAULT;
}
@@ -494,7 +488,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_chr_drv_destroy();
adf_exit_aer();
qat_crypto_unregister();
- qat_algs_exit();
+ adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
}
@@ -504,3 +498,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS_CRYPTO("intel_qat");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/kernel/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 3f0ff9e7d..8dfdb8f90 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -50,21 +50,125 @@
#include "adf_common_drv.h"
static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
static uint32_t num_devices;
+struct vf_id_map {
+ u32 bdf;
+ u32 id;
+ u32 fake_id;
+ bool attached;
+ struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+ return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+ PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+ (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+ return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+
+ if (ptr->bdf == bdf)
+ return ptr;
+ }
+ return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+ struct list_head *itr;
+
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->fake_id == fake)
+ return ptr->id;
+ }
+ return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ * for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+ struct vf_id_map *map;
+ struct list_head *ptr, *tmp;
+
+ mutex_lock(&table_lock);
+ list_for_each_safe(ptr, tmp, &vfs_table) {
+ map = list_entry(ptr, struct vf_id_map, list);
+ if (map->bdf != -1)
+ num_devices--;
+
+ if (vf && map->bdf == -1)
+ continue;
+
+ list_del(ptr);
+ kfree(map);
+ }
+ mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data: Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+ struct adf_hw_device_class *class = hw_data->dev_class;
+ struct list_head *itr;
+ int i = 0;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
+ list_entry(itr, struct adf_accel_dev, list);
+
+ if (ptr->hw_device->dev_class == class)
+ ptr->hw_device->instance_id = i++;
+
+ if (i == class->instances)
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
/**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
*
* Function adds acceleration device to the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
{
struct list_head *itr;
+ int ret = 0;
if (num_devices == ADF_MAX_DEVICES) {
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
}
mutex_lock(&table_lock);
- list_for_each(itr, &accel_table) {
- struct adf_accel_dev *ptr =
+ atomic_set(&accel_dev->ref_count, 0);
+
+ /* PF on host or VF on guest */
+ if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+ struct vf_id_map *map;
+
+ list_for_each(itr, &accel_table) {
+ struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
- if (ptr == accel_dev) {
- mutex_unlock(&table_lock);
- return -EEXIST;
+ if (ptr == accel_dev) {
+ ret = -EEXIST;
+ goto unlock;
+ }
}
+
+ list_add_tail(&accel_dev->list, &accel_table);
+ accel_dev->accel_id = num_devices++;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ map->bdf = ~0;
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
+ } else if (accel_dev->is_vf && pf) {
+ /* VF on host */
+ struct adf_accel_vf_info *vf_info;
+ struct vf_id_map *map;
+
+ vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (map) {
+ struct vf_id_map *next;
+
+ accel_dev->accel_id = map->id;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->fake_id++;
+ map->attached = true;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id++;
+ next = list_next_entry(next, list);
+ }
+
+ ret = 0;
+ goto unlock;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ accel_dev->accel_id = num_devices++;
+ list_add_tail(&accel_dev->list, &accel_table);
+ map->bdf = adf_get_vf_num(accel_dev);
+ map->id = accel_dev->accel_id;
+ map->fake_id = map->id;
+ map->attached = true;
+ list_add_tail(&map->list, &vfs_table);
}
- atomic_set(&accel_dev->ref_count, 0);
- list_add_tail(&accel_dev->list, &accel_table);
- accel_dev->accel_id = num_devices++;
+unlock:
mutex_unlock(&table_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
/**
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
* @accel_dev: Pointer to acceleration device.
+ * @pf: Corresponding PF if the accel_dev is a VF
*
* Function removes acceleration device from the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: void
*/
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ struct adf_accel_dev *pf)
{
mutex_lock(&table_lock);
+ if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+ num_devices--;
+ } else if (accel_dev->is_vf && pf) {
+ struct vf_id_map *map, *next;
+
+ map = adf_find_vf(adf_get_vf_num(accel_dev));
+ if (!map) {
+ dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+ goto unlock;
+ }
+ map->fake_id--;
+ map->attached = false;
+ next = list_next_entry(map, list);
+ while (next && &next->list != &vfs_table) {
+ next->fake_id--;
+ next = list_next_entry(next, list);
+ }
+ }
+unlock:
list_del(&accel_dev->list);
- num_devices--;
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{
struct list_head *itr;
+ int real_id;
mutex_lock(&table_lock);
+ real_id = adf_get_vf_real_id(id);
+ if (real_id < 0)
+ goto unlock;
+
+ id = real_id;
+
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
-
if (ptr->accel_id == id) {
mutex_unlock(&table_lock);
return ptr;
}
}
+unlock:
mutex_unlock(&table_lock);
return NULL;
}
@@ -180,21 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
return -ENODEV;
}
-void adf_devmgr_get_num_dev(uint32_t *num)
+static int adf_get_num_dettached_vfs(void)
{
struct list_head *itr;
+ int vfs = 0;
- *num = 0;
- list_for_each(itr, &accel_table) {
- (*num)++;
+ mutex_lock(&table_lock);
+ list_for_each(itr, &vfs_table) {
+ struct vf_id_map *ptr =
+ list_entry(itr, struct vf_id_map, list);
+ if (ptr->bdf != ~0 && !ptr->attached)
+ vfs++;
}
+ mutex_unlock(&table_lock);
+ return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+ *num = num_devices - adf_get_num_dettached_vfs();
}
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
{
return atomic_read(&accel_dev->ref_count) != 0;
}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
int adf_dev_get(struct adf_accel_dev *accel_dev)
{
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
@@ -202,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
return -EFAULT;
return 0;
}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
void adf_dev_put(struct adf_accel_dev *accel_dev)
{
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
module_put(accel_dev->owner);
}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
int adf_dev_started(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/kernel/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index 1864bdb36..6849422e0 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -44,9 +44,8 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <adf_accel_devices.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
#define ADF_ARB_REQ_RING_NUM 8
@@ -58,7 +57,6 @@
#define ADF_ARB_RO_EN_OFFSET 0x090
#define ADF_ARB_WQCFG_OFFSET 0x100
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
-#define ADF_ARB_WRK_2_SER_MAP 10
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
@@ -89,10 +87,11 @@
int adf_init_arb(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
- uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
- uint32_t arb, i;
- const uint32_t *thd_2_arb_cfg;
+ u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+ u32 arb, i;
+ const u32 *thd_2_arb_cfg;
/* Service arb configured for 32 bytes responses and
* ring flow control check enabled. */
@@ -109,30 +108,39 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
/* Setup worker queue registers */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, i);
/* Map worker threads to service arbiters */
- adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+ hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
if (!thd_2_arb_cfg)
return -EFAULT;
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
return 0;
}
-
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+/**
+ * adf_update_ring_arb() - update ring arbitration rgister
+ * @accel_dev: Pointer to ring data.
+ *
+ * Function enables or disables rings for/from arbitration.
+ */
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring->bank->bank_number,
ring->bank->ring_mask & 0xFF);
}
+EXPORT_SYMBOL_GPL(adf_update_ring_arb);
void adf_exit_arb(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr;
unsigned int i;
@@ -146,14 +154,15 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
/* Shutdown work queue */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, 0);
/* Unmap worker threads to service arbiters */
- for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+ for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_init.c b/kernel/drivers/crypto/qat/qat_common/adf_init.c
index 245f43237..d873eeecc 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_init.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_init.c
@@ -69,7 +69,7 @@ static void adf_service_add(struct service_hndl *service)
* Function adds the acceleration service to the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_service_register(struct service_hndl *service)
{
@@ -94,7 +94,7 @@ static void adf_service_remove(struct service_hndl *service)
* Function remove the acceleration service from the acceleration framework.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_service_unregister(struct service_hndl *service)
{
@@ -114,7 +114,7 @@ EXPORT_SYMBOL_GPL(adf_service_unregister);
* Initialize the ring data structures and the admin comms and arbitration
* services.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_init(struct adf_accel_dev *accel_dev)
{
@@ -177,20 +177,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
*/
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to initialise service %s\n",
- service->name);
- return -EFAULT;
- }
- set_bit(accel_dev->accel_id, &service->init_status);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise service %s\n",
@@ -201,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
hw_data->enable_error_correction(accel_dev);
+ hw_data->enable_vf2pf_comms(accel_dev);
return 0;
}
@@ -214,10 +201,11 @@ EXPORT_SYMBOL_GPL(adf_dev_init);
* is ready to be used.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_start(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
@@ -229,22 +217,13 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_START)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to start service %s\n",
- service->name);
- return -EFAULT;
- }
- set_bit(accel_dev->accel_id, &service->start_status);
+ if (hw_data->send_admin_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+ return -EFAULT;
}
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
"Failed to start service %s\n",
@@ -257,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
- if (qat_algs_register()) {
+ if (!list_empty(&accel_dev->crypto_list) &&
+ (qat_algs_register() || qat_asym_algs_register())) {
dev_err(&GET_DEV(accel_dev),
"Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -276,7 +256,7 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_dev_stop(struct adf_accel_dev *accel_dev)
{
@@ -292,14 +272,13 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
- if (qat_algs_unregister())
- dev_err(&GET_DEV(accel_dev),
- "Failed to unregister crypto algs\n");
+ if (!list_empty(&accel_dev->crypto_list)) {
+ qat_algs_unregister();
+ qat_asym_algs_unregister();
+ }
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
if (!test_bit(accel_dev->accel_id, &service->start_status))
continue;
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -310,19 +289,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->start_status);
}
}
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
- if (!test_bit(accel_dev->accel_id, &service->start_status))
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_STOP))
- dev_err(&GET_DEV(accel_dev),
- "Failed to shutdown service %s\n",
- service->name);
- else
- clear_bit(accel_dev->accel_id, &service->start_status);
- }
if (wait)
msleep(100);
@@ -373,21 +339,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (!test_bit(accel_dev->accel_id, &service->init_status))
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
- dev_err(&GET_DEV(accel_dev),
- "Failed to shutdown service %s\n",
- service->name);
- else
- clear_bit(accel_dev->accel_id, &service->init_status);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (!test_bit(accel_dev->accel_id, &service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -413,6 +364,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
+ hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_dev_shutdown);
@@ -424,17 +376,6 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
- dev_err(&GET_DEV(accel_dev),
- "Failed to restart service %s.\n",
- service->name);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
@@ -450,17 +391,6 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
- if (service->admin)
- continue;
- if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
- dev_err(&GET_DEV(accel_dev),
- "Failed to restart service %s.\n",
- service->name);
- }
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
- if (!service->admin)
- continue;
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644
index 000000000..5fdbad809
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -0,0 +1,438 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET 0x3A000
+#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+/**
+ * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function enables PF to VF interrupts
+ */
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables PF to VF interrupts
+ */
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 reg;
+
+ /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+ reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+ }
+
+ /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+ reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+ }
+}
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function disables VF to PF interrupts
+ */
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 reg;
+
+ /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+ ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+ }
+
+ /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+ ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+ struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_bar_addr =
+ pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ u32 val, pf2vf_offset, count = 0;
+ u32 local_in_use_mask, local_in_use_pattern;
+ u32 remote_in_use_mask, remote_in_use_pattern;
+ struct mutex *lock; /* lock preventing concurrent acces of CSR */
+ u32 int_bit;
+ int ret = 0;
+
+ if (accel_dev->is_vf) {
+ pf2vf_offset = hw_data->get_pf2vf_offset(0);
+ lock = &accel_dev->vf.vf2pf_lock;
+ local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+ local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+ remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+ remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+ int_bit = ADF_VF2PF_INT;
+ } else {
+ pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+ lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+ local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+ local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+ remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+ remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+ int_bit = ADF_PF2VF_INT;
+ }
+
+ mutex_lock(lock);
+
+ /* Check if PF2VF CSR is in use by remote function */
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PF2VF CSR in use by remote function\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Attempt to get ownership of PF2VF CSR */
+ msg &= ~local_in_use_mask;
+ msg |= local_in_use_pattern;
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+ /* Wait in case remote func also attempting to get ownership */
+ msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ if ((val & local_in_use_mask) != local_in_use_pattern) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PF2VF CSR in use by remote - collision detected\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
+ * remain in the PF2VF CSR for all writes including ACK from remote
+ * until this local function relinquishes the CSR. Send the message
+ * by interrupting the remote.
+ */
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+ /* Wait for confirmation from remote func it received the message */
+ do {
+ msleep(ADF_IOV_MSG_ACK_DELAY);
+ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+ } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+ if (val & int_bit) {
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+ val &= ~int_bit;
+ ret = -EIO;
+ }
+
+ /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+ mutex_unlock(lock);
+ return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Message to send
+ * @vf_nr: VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+ u32 count = 0;
+ int ret;
+
+ do {
+ ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+ if (ret)
+ msleep(ADF_IOV_MSG_RETRY_DELAY);
+ } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ int bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+ /* Read message from the VF */
+ msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+ /* To ACK, clear the VF2PFINT bit */
+ msg &= ~ADF_VF2PF_INT;
+ ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+ if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+ /* Ignore legacy non-system (non-kernel) VF2PF messages */
+ goto err;
+
+ switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+ case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+ {
+ u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+ resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+ ADF_PF2VF_MSGTYPE_SHIFT) |
+ (ADF_PFVF_COMPATIBILITY_VERSION <<
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Compatibility Version Request from VF%d vers=%u\n",
+ vf_nr + 1, vf_compat_ver);
+
+ if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+ dev_err(&GET_DEV(accel_dev),
+ "VF (vers %d) incompatible with PF (vers %d)\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+ dev_err(&GET_DEV(accel_dev),
+ "VF (vers %d) compat with PF (vers %d) unkn.\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ } else {
+ dev_dbg(&GET_DEV(accel_dev),
+ "VF (vers %d) compatible with PF (vers %d)\n",
+ vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+ resp |= ADF_PF2VF_VF_COMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ }
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Legacy VersionRequest received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+ ADF_PF2VF_MSGTYPE_SHIFT) |
+ (ADF_PFVF_COMPATIBILITY_VERSION <<
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+ resp |= ADF_PF2VF_VF_COMPATIBLE <<
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ /* Set legacy major and minor version num */
+ resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+ 1 << ADF_PF2VF_MINORVERSION_SHIFT;
+ break;
+ case ADF_VF2PF_MSGTYPE_INIT:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Init message received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ vf_info->init = true;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Shutdown message received from VF%d 0x%x\n",
+ vf_nr + 1, msg);
+ vf_info->init = false;
+ }
+ break;
+ default:
+ goto err;
+ }
+
+ if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+ dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+ /* re-enable interrupt on PF from this VF */
+ adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+ return;
+err:
+ dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+ vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_vf_info *vf;
+ u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+ (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarting msg to VF%d\n", i);
+ }
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 msg = 0;
+ int ret;
+
+ msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+ msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+ msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+ BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+ /* Send request from VF to PF */
+ ret = adf_iov_putmsg(accel_dev, msg, 0);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Compatibility Version Request.\n");
+ return ret;
+ }
+
+ /* Wait for response */
+ if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+ timeout)) {
+ dev_err(&GET_DEV(accel_dev),
+ "IOV request/response message timeout expired\n");
+ return -EIO;
+ }
+
+ /* Response from PF received, check compatibility */
+ switch (accel_dev->vf.compatible) {
+ case ADF_PF2VF_VF_COMPATIBLE:
+ break;
+ case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+ /* VF is newer than PF and decides whether it is compatible */
+ if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+ break;
+ /* fall through */
+ case ADF_PF2VF_VF_INCOMPATIBLE:
+ dev_err(&GET_DEV(accel_dev),
+ "PF (vers %d) and VF (vers %d) are not compatible\n",
+ accel_dev->vf.pf_version,
+ ADF_PFVF_COMPATIBILITY_VERSION);
+ return -EINVAL;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid response from PF; assume not compatible\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ adf_enable_pf2vf_interrupts(accel_dev);
+ return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644
index 000000000..5acd531a1
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -0,0 +1,146 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | VF2PF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | PF2VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT 2
+#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
+#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
+#define ADF_PF2VF_MINORVERSION_SHIFT 6
+#define ADF_PF2VF_MAJORVERSION_SHIFT 10
+#define ADF_PF2VF_VF_COMPATIBLE 1
+#define ADF_PF2VF_VF_INCOMPATIBLE 2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
+#define ADF_VF2PF_INT BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT 18
+#define ADF_VF2PF_MSGTYPE_INIT 0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_IOV_MSG_ACK_DELAY 2
+#define ADF_IOV_MSG_ACK_MAX_RETRY 100
+#define ADF_IOV_MSG_RETRY_DELAY 5
+#define ADF_IOV_MSG_MAX_RETRIES 3
+#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
+ ADF_IOV_MSG_ACK_MAX_RETRY + \
+ ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_sriov.c b/kernel/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644
index 000000000..1117a8b58
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -0,0 +1,306 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS 96
+
+#define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS 12
+
+#define ME2FUNCTION_MAP_REG_SIZE 4
+#define ME2FUNCTION_MAP_VALID BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \
+ ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
+ ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
+ ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+ struct work_struct pf2vf_resp_work;
+ struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+ struct adf_pf2vf_resp *pf2vf_resp =
+ container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+ adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+ kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+ struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+ struct adf_pf2vf_resp *pf2vf_resp;
+
+ pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+ if (!pf2vf_resp)
+ return;
+
+ pf2vf_resp->vf_info = vf_info;
+ INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+ queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ struct adf_accel_vf_info *vf_info;
+ int i;
+ u32 reg;
+
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ if (!pf2vf_resp_wq)
+ return -ENOMEM;
+
+ for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+ i++, vf_info++) {
+ /* This ptr will be populated when VFs will be created */
+ vf_info->accel_dev = accel_dev;
+ vf_info->vf_nr = i;
+
+ tasklet_init(&vf_info->vf2pf_bh_tasklet,
+ (void *)adf_vf2pf_bh_handler,
+ (unsigned long)vf_info);
+ mutex_init(&vf_info->pf2vf_lock);
+ ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ }
+
+ /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+ reg |= ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+ reg |= ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+
+ /* Enable VF to PF interrupts for all VFs */
+ adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+ /*
+ * Due to the hardware design, when SR-IOV and the ring arbiter
+ * are enabled all the VFs supported in hardware must be enabled in
+ * order for all the hardware resources (i.e. bundles) to be usable.
+ * When SR-IOV is enabled, each of the VFs will own one bundle.
+ */
+ return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @pdev: Pointer to pci device.
+ *
+ * Function disables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc =
+ &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+ int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+ struct adf_accel_vf_info *vf;
+ u32 reg;
+ int i;
+
+ if (!accel_dev->pf.vf_info)
+ return;
+
+ adf_pf2vf_notify_restarting(accel_dev);
+
+ pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+ /* Disable VF to PF interrupts */
+ adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+ /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+ for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+ reg &= ~ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+ }
+
+ /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+ for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+ reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+ reg &= ~ME2FUNCTION_MAP_VALID;
+ WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+ }
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+ tasklet_disable(&vf->vf2pf_bh_tasklet);
+ tasklet_kill(&vf->vf2pf_bh_tasklet);
+ mutex_destroy(&vf->pf2vf_lock);
+ }
+
+ kfree(accel_dev->pf.vf_info);
+ accel_dev->pf.vf_info = NULL;
+
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev: Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ int totalvfs = pci_sriov_get_totalvfs(pdev);
+ unsigned long val;
+ int ret;
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -EFAULT;
+ }
+
+ if (!iommu_present(&pci_bus_type))
+ dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
+
+ if (accel_dev->pf.vf_info) {
+ dev_info(&pdev->dev, "Already enabled for this device\n");
+ return -EINVAL;
+ }
+
+ if (adf_dev_started(accel_dev)) {
+ if (adf_devmgr_in_reset(accel_dev) ||
+ adf_dev_in_use(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Device busy\n");
+ return -EBUSY;
+ }
+
+ if (adf_dev_stop(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to stop qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ adf_dev_shutdown(accel_dev);
+ }
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ return -EFAULT;
+ val = 0;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ return -EFAULT;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ /* Allocate memory for VF info structs */
+ accel_dev->pf.vf_info = kcalloc(totalvfs,
+ sizeof(struct adf_accel_vf_info),
+ GFP_KERNEL);
+ if (!accel_dev->pf.vf_info)
+ return -ENOMEM;
+
+ if (adf_dev_init(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ if (adf_dev_start(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+ accel_dev->accel_id);
+ return -EFAULT;
+ }
+
+ ret = adf_enable_sriov(accel_dev);
+ if (ret)
+ return ret;
+
+ return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_transport.c b/kernel/drivers/crypto/qat/qat_common/adf_transport.c
index ccec32748..3865ae8d9 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_transport.c
@@ -264,6 +264,10 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
return -EFAULT;
}
+ if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+ dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+ return -EFAULT;
+ }
bank = &transport_data->banks[bank_num];
if (adf_reserve_ring(bank, ring_num)) {
@@ -285,7 +289,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
goto err;
/* Enable HW arbitration for the given ring */
- accel_dev->hw_device->hw_arb_ring_enable(ring);
+ adf_update_ring_arb(ring);
if (adf_ring_debugfs_add(ring, ring_name)) {
dev_err(&GET_DEV(accel_dev),
@@ -302,14 +306,13 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
err:
adf_cleanup_ring(ring);
adf_unreserve_ring(bank, ring_num);
- accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_update_ring_arb(ring);
return ret;
}
void adf_remove_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
- struct adf_accel_dev *accel_dev = bank->accel_dev;
/* Disable interrupts for the given ring */
adf_disable_ring_irq(bank, ring->ring_number);
@@ -322,7 +325,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
adf_ring_debugfs_rm(ring);
adf_unreserve_ring(bank, ring->ring_number);
/* Disable HW arbitration for the given ring */
- accel_dev->hw_device->hw_arb_ring_disable(ring);
+ adf_update_ring_arb(ring);
adf_cleanup_ring(ring);
}
@@ -449,7 +452,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
err:
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
ring = &bank->rings[i];
- if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
+ if (hw_data->tx_rings_mask & (1 << i))
kfree(ring->inflights);
}
return -ENOMEM;
@@ -463,7 +466,7 @@ err:
* acceleration device accel_dev.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
*/
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
{
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/kernel/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 160c9a36c..6ad7e4e1e 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/kernel/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -97,8 +97,9 @@
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
/* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
- ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+ ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
SIZE) & ~0x4)
diff --git a/kernel/drivers/crypto/qat/qat_common/adf_transport_debug.c b/kernel/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e41986967..52340b9bb 100644
--- a/kernel/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/kernel/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -86,9 +86,7 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
{
struct adf_etr_ring_data *ring = sfile->private;
struct adf_etr_bank_data *bank = ring->bank;
- uint32_t *msg = v;
void __iomem *csr = ring->bank->csr_addr;
- int i, x;
if (v == SEQ_START_TOKEN) {
int head, tail, empty;
@@ -113,18 +111,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
seq_puts(sfile, "----------- Ring data ------------\n");
return 0;
}
- seq_printf(sfile, "%p:", msg);
- x = 0;
- i = 0;
- for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
- seq_printf(sfile, " %08X", *(msg + i));
- if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
- (++x == 8)) {
- seq_printf(sfile, "\n%p:", msg + i + 1);
- x = 0;
- }
- }
- seq_puts(sfile, "\n");
+ seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+ v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
return 0;
}
diff --git a/kernel/drivers/crypto/qat/qat_common/icp_qat_fw.h b/kernel/drivers/crypto/qat/qat_common/icp_qat_fw.h
index f1e30e24a..46747f01b 100644
--- a/kernel/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/kernel/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp {
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
diff --git a/kernel/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/kernel/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 000000000..0d7a9b51c
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,112 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+ u64 content_desc_addr;
+ u32 content_desc_resrvd;
+ u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+ u64 opaque;
+ u64 src_data_addr;
+ u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+ u8 resrvd1;
+ u8 resrvd2;
+ u8 service_type;
+ u8 hdr_flags;
+ u16 comn_req_flags;
+ u16 resrvd4;
+ struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+ struct icp_qat_fw_req_pke_hdr pke_hdr;
+ struct icp_qat_fw_req_pke_mid pke_mid;
+ u8 output_param_count;
+ u8 input_param_count;
+ u16 resrvd1;
+ u32 resrvd2;
+ u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+ u8 resrvd1;
+ u8 resrvd2;
+ u8 response_type;
+ u8 hdr_flags;
+ u16 comn_resp_flags;
+ u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+ struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+ u64 opaque;
+ u64 src_data_addr;
+ u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+ QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+ ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+ QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_algs.c b/kernel/drivers/crypto/qat/qat_common/qat_algs.c
index 34139a889..59e4c3af1 100644
--- a/kernel/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/kernel/drivers/crypto/qat/qat_common/qat_algs.c
@@ -47,13 +47,12 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
-#include <crypto/aead.h>
+#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/rng.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -63,13 +62,13 @@
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
-#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
ICP_QAT_HW_CIPHER_ENCRYPT)
-#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
@@ -113,9 +112,6 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
- uint8_t salt[AES_BLOCK_SIZE];
- spinlock_t lock; /* protects qat_alg_aead_ctx struct */
};
struct qat_alg_ablkcipher_ctx {
@@ -130,11 +126,6 @@ struct qat_alg_ablkcipher_ctx {
spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
-static int get_current_node(void)
-{
- return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
{
switch (qat_hash_alg) {
@@ -278,12 +269,13 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
-static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
int alg,
- struct crypto_authenc_keys *keys)
+ struct crypto_authenc_keys *keys,
+ int mode)
{
- struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
- unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
struct icp_qat_hw_auth_algo_blk *hash =
@@ -297,7 +289,7 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
/* CD setup */
- cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -358,12 +350,13 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
return 0;
}
-static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
int alg,
- struct crypto_authenc_keys *keys)
+ struct crypto_authenc_keys *keys,
+ int mode)
{
- struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
- unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+ unsigned int digestsize = crypto_aead_authsize(aead_tfm);
struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
struct icp_qat_hw_cipher_algo_blk *cipher =
@@ -382,7 +375,7 @@ static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
sizeof(struct icp_qat_fw_la_cipher_req_params));
/* CD setup */
- cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -473,7 +466,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
int alg, const uint8_t *key,
- unsigned int keylen)
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
@@ -481,12 +474,12 @@ static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
- enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+ enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
}
static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
int alg, const uint8_t *key,
- unsigned int keylen)
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
@@ -494,51 +487,67 @@ static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
- dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+
+ if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
+ dec_cd->aes.cipher_config.val =
+ QAT_AES_HW_CONFIG_DEC(alg, mode);
+ else
+ dec_cd->aes.cipher_config.val =
+ QAT_AES_HW_CONFIG_ENC(alg, mode);
}
-static int qat_alg_validate_key(int key_len, int *alg)
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
{
- switch (key_len) {
- case AES_KEYSIZE_128:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
- break;
- case AES_KEYSIZE_192:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
- break;
- case AES_KEYSIZE_256:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
- break;
- default:
- return -EINVAL;
+ if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (key_len) {
+ case AES_KEYSIZE_128 << 1:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case AES_KEYSIZE_256 << 1:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
}
return 0;
}
-static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
- const uint8_t *key, unsigned int keylen)
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen, int mode)
{
struct crypto_authenc_keys keys;
int alg;
- if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
- return -EFAULT;
-
if (crypto_authenc_extractkeys(&keys, key, keylen))
goto bad_key;
- if (qat_alg_validate_key(keys.enckeylen, &alg))
+ if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
goto bad_key;
- if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
goto error;
- if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
goto error;
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
error:
return -EFAULT;
@@ -546,15 +555,16 @@ error:
static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
const uint8_t *key,
- unsigned int keylen)
+ unsigned int keylen,
+ int mode)
{
int alg;
- if (qat_alg_validate_key(keylen, &alg))
+ if (qat_alg_validate_key(keylen, &alg, mode))
goto bad_key;
- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
+ qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
+ qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
bad_key:
crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -567,7 +577,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev;
- spin_lock(&ctx->lock);
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
@@ -581,7 +590,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
struct qat_crypto_instance *inst =
qat_crypto_get_instance_node(node);
if (!inst) {
- spin_unlock(&ctx->lock);
return -EINVAL;
}
@@ -591,19 +599,17 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
- spin_unlock(&ctx->lock);
return -ENOMEM;
}
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
- spin_unlock(&ctx->lock);
goto out_free_enc;
}
}
- spin_unlock(&ctx->lock);
- if (qat_alg_aead_init_sessions(ctx, key, keylen))
+ if (qat_alg_aead_init_sessions(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE))
goto out_free_all;
return 0;
@@ -654,22 +660,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
}
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
- struct scatterlist *assoc,
struct scatterlist *sgl,
- struct scatterlist *sglout, uint8_t *iv,
- uint8_t ivlen,
+ struct scatterlist *sglout,
struct qat_crypto_request *qat_req)
{
struct device *dev = &GET_DEV(inst->accel_dev);
- int i, bufs = 0, sg_nctr = 0;
- int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+ int i, sg_nctr = 0;
+ int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
- ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ ((1 + n) * sizeof(struct qat_alg_buf));
if (unlikely(!n))
return -EINVAL;
@@ -683,29 +687,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(dma_mapping_error(dev, blp)))
goto err;
- for_each_sg(assoc, sg, assoc_n, i) {
- if (!sg->length)
- continue;
- bufl->bufers[bufs].addr = dma_map_single(dev,
- sg_virt(sg),
- sg->length,
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
- }
- if (ivlen) {
- bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = ivlen;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
- }
-
for_each_sg(sgl, sg, n, i) {
- int y = sg_nctr + bufs;
+ int y = sg_nctr;
if (!sg->length)
continue;
@@ -718,7 +701,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
sg_nctr++;
}
- bufl->num_bufs = sg_nctr + bufs;
+ bufl->num_bufs = sg_nctr;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -728,7 +711,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
n = sg_nents(sglout);
sz_out = sizeof(struct qat_alg_buf_list) +
- ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ ((1 + n) * sizeof(struct qat_alg_buf));
sg_nctr = 0;
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -738,14 +721,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err;
bufers = buflout->bufers;
- /* For out of place operation dma map only data and
- * reuse assoc mapping and iv */
- for (i = 0; i < bufs; i++) {
- bufers[i].len = bufl->bufers[i].len;
- bufers[i].addr = bufl->bufers[i].addr;
- }
for_each_sg(sglout, sg, n, i) {
- int y = sg_nctr + bufs;
+ int y = sg_nctr;
if (!sg->length)
continue;
@@ -758,7 +735,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].len = sg->length;
sg_nctr++;
}
- buflout->num_bufs = sg_nctr + bufs;
+ buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
@@ -772,7 +749,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
err:
dev_err(dev, "Failed to map buf for dma\n");
sg_nctr = 0;
- for (i = 0; i < n + bufs; i++)
+ for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
@@ -783,7 +760,7 @@ err:
kfree(bufl);
if (sgl != sglout && buflout) {
n = sg_nents(sglout);
- for (i = bufs; i < n + bufs; i++)
+ for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
@@ -843,11 +820,10 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
- int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+ int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
- areq->iv, AES_BLOCK_SIZE, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -861,12 +837,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = areq->cryptlen - digst_size;
- cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+ cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
- auth_param->auth_len = areq->assoclen +
- cipher_param->cipher_length + AES_BLOCK_SIZE;
+ auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
@@ -878,8 +853,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
- int enc_iv)
+static int qat_alg_aead_enc(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
@@ -888,10 +862,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
+ uint8_t *iv = areq->iv;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
- iv, AES_BLOCK_SIZE, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -906,16 +880,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- if (enc_iv) {
- cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
- cipher_param->cipher_offset = areq->assoclen;
- } else {
- memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
- cipher_param->cipher_length = areq->cryptlen;
- cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
- }
+ memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+ cipher_param->cipher_length = areq->cryptlen;
+ cipher_param->cipher_offset = areq->assoclen;
+
auth_param->auth_off = 0;
- auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+ auth_param->auth_len = areq->assoclen + areq->cryptlen;
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
@@ -928,28 +898,9 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
return -EINPROGRESS;
}
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
- return qat_alg_aead_enc_internal(areq, areq->iv, 0);
-}
-
-static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
-{
- struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
- struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- __be64 seq;
-
- memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
- seq = cpu_to_be64(req->seq);
- memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
- &seq, sizeof(uint64_t));
- return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
-}
-
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const uint8_t *key,
- unsigned int keylen)
+ const u8 *key, unsigned int keylen,
+ int mode)
{
struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct device *dev;
@@ -990,7 +941,7 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
}
spin_unlock(&ctx->lock);
- if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
+ if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
goto out_free_all;
return 0;
@@ -1008,6 +959,27 @@ out_free_enc:
return -ENOMEM;
}
+static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_XTS_MODE);
+}
+
static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
@@ -1018,8 +990,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
- NULL, 0, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -1056,8 +1027,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
struct icp_qat_fw_la_bulk_req *msg;
int ret, ctr = 0;
- ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
- NULL, 0, qat_req);
+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
if (unlikely(ret))
return ret;
@@ -1084,46 +1054,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
return -EINPROGRESS;
}
-static int qat_alg_aead_init(struct crypto_tfm *tfm,
+static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
{
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
- return -EFAULT;
- spin_lock_init(&ctx->lock);
+ return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
- tfm->crt_aead.reqsize = sizeof(struct aead_request) +
- sizeof(struct qat_crypto_request);
- ctx->tfm = tfm;
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ sizeof(struct qat_crypto_request));
return 0;
}
-static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
}
-static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
}
-static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
}
-static void qat_alg_aead_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
{
- struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- if (!IS_ERR(ctx->hash_tfm))
- crypto_free_shash(ctx->hash_tfm);
+ crypto_free_shash(ctx->hash_tfm);
if (!inst)
return;
@@ -1180,75 +1147,107 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
qat_crypto_put_instance(inst);
}
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "authenc(hmac(sha1),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha1_init,
- .cra_exit = qat_alg_aead_exit,
- .cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA1_DIGEST_SIZE,
- },
+
+static struct aead_alg qat_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_alg_aead_sha1_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
},
+ .init = qat_alg_aead_sha256_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
}, {
- .cra_name = "authenc(hmac(sha256),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_alg_aead_sha512_init,
+ .exit = qat_alg_aead_exit,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "qat_aes_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
+ .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha256_init,
- .cra_exit = qat_alg_aead_exit,
+ .cra_init = qat_alg_ablkcipher_init,
+ .cra_exit = qat_alg_ablkcipher_exit,
.cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
+ .ablkcipher = {
+ .setkey = qat_alg_ablkcipher_cbc_setkey,
+ .decrypt = qat_alg_ablkcipher_decrypt,
+ .encrypt = qat_alg_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA256_DIGEST_SIZE,
},
},
}, {
- .cra_name = "authenc(hmac(sha512),cbc(aes))",
- .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "qat_aes_ctr",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
.cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
+ .cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_aead_sha512_init,
- .cra_exit = qat_alg_aead_exit,
+ .cra_init = qat_alg_ablkcipher_init,
+ .cra_exit = qat_alg_ablkcipher_exit,
.cra_u = {
- .aead = {
- .setkey = qat_alg_aead_setkey,
- .decrypt = qat_alg_aead_dec,
- .encrypt = qat_alg_aead_enc,
- .givencrypt = qat_alg_aead_genivenc,
+ .ablkcipher = {
+ .setkey = qat_alg_ablkcipher_ctr_setkey,
+ .decrypt = qat_alg_ablkcipher_decrypt,
+ .encrypt = qat_alg_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .maxauthsize = SHA512_DIGEST_SIZE,
},
},
}, {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "qat_aes_cbc",
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "qat_aes_xts",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1260,7 +1259,7 @@ static struct crypto_alg qat_algs[] = { {
.cra_exit = qat_alg_ablkcipher_exit,
.cra_u = {
.ablkcipher = {
- .setkey = qat_alg_ablkcipher_setkey,
+ .setkey = qat_alg_ablkcipher_xts_setkey,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1272,42 +1271,44 @@ static struct crypto_alg qat_algs[] = { {
int qat_algs_register(void)
{
- int ret = 0;
+ int ret = 0, i;
mutex_lock(&algs_lock);
- if (++active_devs == 1) {
- int i;
+ if (++active_devs != 1)
+ goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags =
- (qat_algs[i].cra_type == &crypto_aead_type) ?
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
- CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+ qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
- }
- mutex_unlock(&algs_lock);
- return ret;
-}
+ ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ if (ret)
+ goto unlock;
-int qat_algs_unregister(void)
-{
- int ret = 0;
+ for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+ qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
- mutex_lock(&algs_lock);
- if (--active_devs == 0)
- ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ if (ret)
+ goto unreg_algs;
+
+unlock:
mutex_unlock(&algs_lock);
return ret;
-}
-int qat_algs_init(void)
-{
- crypto_get_default_rng();
- return 0;
+unreg_algs:
+ crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ goto unlock;
}
-void qat_algs_exit(void)
+void qat_algs_unregister(void)
{
- crypto_put_default_rng();
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+
+ crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+ crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
}
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_asym_algs.c b/kernel/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 000000000..51c594fda
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,755 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <crypto/scatterwalk.h>
+#include "qat_rsapubkey-asn1.h"
+#include "qat_rsaprivkey-asn1.h"
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+ union {
+ struct {
+ dma_addr_t m;
+ dma_addr_t e;
+ dma_addr_t n;
+ } enc;
+ struct {
+ dma_addr_t c;
+ dma_addr_t d;
+ dma_addr_t n;
+ } dec;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+ union {
+ struct {
+ dma_addr_t c;
+ } enc;
+ struct {
+ dma_addr_t m;
+ } dec;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+ char *n;
+ char *e;
+ char *d;
+ dma_addr_t dma_n;
+ dma_addr_t dma_e;
+ dma_addr_t dma_d;
+ unsigned int key_sz;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_rsa_request {
+ struct qat_rsa_input_params in;
+ struct qat_rsa_output_params out;
+ dma_addr_t phy_in;
+ dma_addr_t phy_out;
+ char *src_align;
+ char *dst_align;
+ struct icp_qat_fw_pke_request req;
+ struct qat_rsa_ctx *ctx;
+ int err;
+} __aligned(64);
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct akcipher_request *areq = (void *)(__force long)resp->opaque;
+ struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
+ struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
+ req->in.enc.m);
+ else
+ dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
+ DMA_TO_DEVICE);
+
+ areq->dst_len = req->ctx->key_sz;
+ if (req->dst_align) {
+ char *ptr = req->dst_align;
+
+ while (!(*ptr) && areq->dst_len) {
+ areq->dst_len--;
+ ptr++;
+ }
+
+ if (areq->dst_len != req->ctx->key_sz)
+ memmove(req->dst_align, ptr, areq->dst_len);
+
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+
+ dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
+ req->out.enc.c);
+ } else {
+ char *ptr = sg_virt(areq->dst);
+
+ while (!(*ptr) && areq->dst_len) {
+ areq->dst_len--;
+ ptr++;
+ }
+
+ if (sg_virt(areq->dst) != ptr && areq->dst_len)
+ memmove(sg_virt(areq->dst), ptr, areq->dst_len);
+
+ dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+ DMA_FROM_DEVICE);
+ }
+
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+
+ akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+ struct icp_qat_fw_pke_resp *resp = _resp;
+
+ qat_rsa_cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_EP_512;
+ case 1024:
+ return PKE_RSA_EP_1024;
+ case 1536:
+ return PKE_RSA_EP_1536;
+ case 2048:
+ return PKE_RSA_EP_2048;
+ case 3072:
+ return PKE_RSA_EP_3072;
+ case 4096:
+ return PKE_RSA_EP_4096;
+ default:
+ return 0;
+ };
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP1_512;
+ case 1024:
+ return PKE_RSA_DP1_1024;
+ case 1536:
+ return PKE_RSA_DP1_1536;
+ case 2048:
+ return PKE_RSA_DP1_2048;
+ case 3072:
+ return PKE_RSA_DP1_3072;
+ case 4096:
+ return PKE_RSA_DP1_4096;
+ default:
+ return 0;
+ };
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_rsa_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+
+ if (unlikely(!ctx->n || !ctx->e))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->ctx = ctx;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.enc.e = ctx->dma_e;
+ qat_req->in.enc.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+ qat_req->src_align = NULL;
+ qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
+ req->src_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
+ return ret;
+
+ } else {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.enc.m,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+ 0, req->src_len, 0);
+ }
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
+ goto unmap_src;
+
+ } else {
+ qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.enc.c,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+
+ }
+ qat_req->in.in_tab[3] = 0;
+ qat_req->out.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.enc.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.enc.m))
+ dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (qat_req->dst_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+ qat_req->out.enc.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->out.enc.c))
+ dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
+ DMA_FROM_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_rsa_request *qat_req =
+ PTR_ALIGN(akcipher_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+
+ if (unlikely(!ctx->n || !ctx->d))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->key_sz) {
+ req->dst_len = ctx->key_sz;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->ctx = ctx;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ qat_req->in.dec.d = ctx->dma_d;
+ qat_req->in.dec.n = ctx->dma_n;
+ ret = -ENOMEM;
+
+ /*
+ * src can be of any size in valid range, but HW expects it to be the
+ * same as modulo n so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+ qat_req->src_align = NULL;
+ qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
+ req->dst_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
+ return ret;
+
+ } else {
+ int shift = ctx->key_sz - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->in.dec.c,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+ 0, req->src_len, 0);
+ }
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
+ goto unmap_src;
+
+ } else {
+ qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
+ &qat_req->out.dec.m,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+
+ }
+
+ qat_req->in.in_tab[3] = 0;
+ qat_req->out.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->input_param_count = 3;
+ msg->output_param_count = 1;
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.dec.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dec.c))
+ dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (qat_req->dst_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+ qat_req->out.dec.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->out.dec.m))
+ dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
+ DMA_FROM_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ctx->key_sz = vlen;
+ ret = -EINVAL;
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
+ pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+ goto err;
+ }
+ /* invalid key size provided */
+ if (!qat_rsa_enc_fn_id(ctx->key_sz))
+ goto err;
+
+ ret = -ENOMEM;
+ ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->n, ptr, ctx->key_sz);
+ return 0;
+err:
+ ctx->key_sz = 0;
+ ctx->n = NULL;
+ return ret;
+}
+
+int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+ ctx->e = NULL;
+ return -EINVAL;
+ }
+
+ ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+ if (!ctx->e) {
+ ctx->e = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+}
+
+int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct qat_rsa_ctx *ctx = context;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr = value;
+ int ret;
+
+ while (!*ptr && vlen) {
+ ptr++;
+ vlen--;
+ }
+
+ ret = -EINVAL;
+ if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+ goto err;
+
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (vlen != 256 && vlen != 384)) {
+ pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+ goto err;
+ }
+
+ ret = -ENOMEM;
+ ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+ if (!ctx->n)
+ goto err;
+
+ memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+ return 0;
+err:
+ ctx->d = NULL;
+ return ret;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ int ret;
+
+ /* Free the old key if any */
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+
+ ctx->n = NULL;
+ ctx->e = NULL;
+ ctx->d = NULL;
+
+ if (private)
+ ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
+ keylen);
+ else
+ ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
+ keylen);
+ if (ret < 0)
+ goto free;
+
+ if (!ctx->n || !ctx->e) {
+ /* invalid key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+ if (private && !ctx->d) {
+ /* invalid private key provided */
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return 0;
+free:
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ ctx->d = NULL;
+ }
+ if (ctx->e) {
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ ctx->e = NULL;
+ }
+ if (ctx->n) {
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ ctx->n = NULL;
+ ctx->key_sz = 0;
+ }
+ return ret;
+}
+
+static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return qat_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return qat_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int qat_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return (ctx->n) ? ctx->key_sz : -EINVAL;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(get_current_node());
+
+ if (!inst)
+ return -EINVAL;
+
+ ctx->key_sz = 0;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ if (ctx->n)
+ dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+ if (ctx->e)
+ dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+ if (ctx->d) {
+ memset(ctx->d, '\0', ctx->key_sz);
+ dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+ }
+ qat_crypto_put_instance(ctx->inst);
+ ctx->n = NULL;
+ ctx->d = NULL;
+ ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+ .encrypt = qat_rsa_enc,
+ .decrypt = qat_rsa_dec,
+ .sign = qat_rsa_dec,
+ .verify = qat_rsa_enc,
+ .set_pub_key = qat_rsa_setpubkey,
+ .set_priv_key = qat_rsa_setprivkey,
+ .max_size = qat_rsa_max_size,
+ .init = qat_rsa_init_tfm,
+ .exit = qat_rsa_exit_tfm,
+ .reqsize = sizeof(struct qat_rsa_request) + 64,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "qat-rsa",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+ },
+};
+
+int qat_asym_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs == 1) {
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ }
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs == 0)
+ crypto_unregister_akcipher(&rsa);
+ mutex_unlock(&algs_lock);
+}
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_crypto.c b/kernel/drivers/crypto/qat/qat_common/qat_crypto.c
index 3bd705ca5..9cab15497 100644
--- a/kernel/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/kernel/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -60,8 +60,8 @@ static struct service_hndl qat_crypto;
void qat_crypto_put_instance(struct qat_crypto_instance *inst)
{
- if (atomic_sub_return(1, &inst->refctr) == 0)
- adf_dev_put(inst->accel_dev);
+ atomic_dec(&inst->refctr);
+ adf_dev_put(inst->accel_dev);
}
static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
@@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
if (inst->pke_rx)
adf_remove_ring(inst->pke_rx);
- if (inst->rnd_tx)
- adf_remove_ring(inst->rnd_tx);
-
- if (inst->rnd_rx)
- adf_remove_ring(inst->rnd_rx);
-
list_del(list_ptr);
kfree(inst);
}
@@ -103,47 +97,66 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
{
struct adf_accel_dev *accel_dev = NULL;
- struct qat_crypto_instance *inst_best = NULL;
+ struct qat_crypto_instance *inst = NULL;
struct list_head *itr;
unsigned long best = ~0;
list_for_each(itr, adf_devmgr_get_head()) {
- accel_dev = list_entry(itr, struct adf_accel_dev, list);
- if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
- dev_to_node(&GET_DEV(accel_dev)) < 0) &&
- adf_dev_started(accel_dev))
- break;
- accel_dev = NULL;
+ struct adf_accel_dev *tmp_dev;
+ unsigned long ctr;
+
+ tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+
+ if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
+ dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
+ adf_dev_started(tmp_dev) &&
+ !list_empty(&tmp_dev->crypto_list)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ accel_dev = tmp_dev;
+ best = ctr;
+ }
+ }
}
- if (!accel_dev) {
- pr_err("QAT: Could not find a device on node %d\n", node);
- accel_dev = adf_devmgr_get_first();
+ if (!accel_dev)
+ pr_info("QAT: Could not find a device on node %d\n", node);
+
+ /* Get any started device */
+ list_for_each(itr, adf_devmgr_get_head()) {
+ struct adf_accel_dev *tmp_dev;
+
+ tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+
+ if (adf_dev_started(tmp_dev) &&
+ !list_empty(&tmp_dev->crypto_list)) {
+ accel_dev = tmp_dev;
+ break;
+ }
}
- if (!accel_dev || !adf_dev_started(accel_dev))
+
+ if (!accel_dev)
return NULL;
+ best = ~0;
list_for_each(itr, &accel_dev->crypto_list) {
- struct qat_crypto_instance *inst;
- unsigned long cur;
-
- inst = list_entry(itr, struct qat_crypto_instance, list);
- cur = atomic_read(&inst->refctr);
- if (best > cur) {
- inst_best = inst;
- best = cur;
+ struct qat_crypto_instance *tmp_inst;
+ unsigned long ctr;
+
+ tmp_inst = list_entry(itr, struct qat_crypto_instance, list);
+ ctr = atomic_read(&tmp_inst->refctr);
+ if (best > ctr) {
+ inst = tmp_inst;
+ best = ctr;
}
}
- if (inst_best) {
- if (atomic_add_return(1, &inst_best->refctr) == 1) {
- if (adf_dev_get(accel_dev)) {
- atomic_dec(&inst_best->refctr);
- dev_err(&GET_DEV(accel_dev),
- "Could not increment dev refctr\n");
- return NULL;
- }
+ if (inst) {
+ if (adf_dev_get(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+ return NULL;
}
+ atomic_inc(&inst->refctr);
}
- return inst_best;
+ return inst;
}
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
@@ -158,7 +171,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
INIT_LIST_HEAD(&accel_dev->crypto_list);
strlcpy(key, ADF_NUM_CY, sizeof(key));
-
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
return -EFAULT;
@@ -187,7 +199,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
if (kstrtoul(val, 10, &num_msg_sym))
goto err;
+
num_msg_sym = num_msg_sym >> 1;
+
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
goto err;
@@ -202,11 +216,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
msg_size, key, NULL, 0, &inst->sym_tx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, NULL, 0, &inst->rnd_tx))
- goto err;
-
msg_size = msg_size >> 1;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
@@ -220,15 +229,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
&inst->sym_rx))
goto err;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
- &inst->rnd_rx))
- goto err;
-
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
- msg_size, key, qat_alg_callback, 0,
+ msg_size, key, qat_alg_asym_callback, 0,
&inst->pke_rx))
goto err;
}
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_crypto.h b/kernel/drivers/crypto/qat/qat_common/qat_crypto.h
index d503007b4..dc0273fe3 100644
--- a/kernel/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/kernel/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -57,8 +57,6 @@ struct qat_crypto_instance {
struct adf_etr_ring_data *sym_rx;
struct adf_etr_ring_data *pke_tx;
struct adf_etr_ring_data *pke_rx;
- struct adf_etr_ring_data *rnd_tx;
- struct adf_etr_ring_data *rnd_rx;
struct adf_accel_dev *accel_dev;
struct list_head list;
unsigned long state;
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_hal.c b/kernel/drivers/crypto/qat/qat_common/qat_hal.c
index 274ff7e9d..380e76180 100644
--- a/kernel/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/kernel/drivers/crypto/qat/qat_common/qat_hal.c
@@ -671,7 +671,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
#define LOCAL_TO_XFER_REG_OFFSET 0x800
#define ICP_DH895XCC_EP_OFFSET 0x3a000
-#define ICP_DH895XCC_PMISC_BAR 1
int qat_hal_init(struct adf_accel_dev *accel_dev)
{
unsigned char ae;
@@ -679,21 +678,24 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
struct icp_qat_fw_loader_handle *handle;
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *bar =
+ struct adf_bar *misc_bar =
&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+ struct adf_bar *sram_bar =
+ &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+ handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_CAP_OFFSET;
- handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+ handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
ICP_DH895XCC_AE_OFFSET;
- handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+ handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
+ ICP_DH895XCC_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
-
+ handle->hal_sram_addr_v = sram_bar->virt_addr;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle)
goto out_hal_handle;
@@ -1032,7 +1034,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst,
unsigned int inst_num, unsigned int size,
unsigned int addr, unsigned int *value)
{
- int i, val_indx;
+ int i;
unsigned int cur_value;
const uint64_t *inst_arr;
int fixup_offset;
@@ -1040,8 +1042,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst,
int orig_num;
orig_num = inst_num;
- val_indx = 0;
- cur_value = value[val_indx++];
+ cur_value = value[0];
inst_arr = inst_4b;
usize = ARRAY_SIZE(inst_4b);
fixup_offset = inst_num;
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1 b/kernel/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
new file mode 100644
index 000000000..f0066adb7
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
@@ -0,0 +1,11 @@
+RsaPrivKey ::= SEQUENCE {
+ version INTEGER,
+ n INTEGER ({ qat_rsa_get_n }),
+ e INTEGER ({ qat_rsa_get_e }),
+ d INTEGER ({ qat_rsa_get_d }),
+ prime1 INTEGER,
+ prime2 INTEGER,
+ exponent1 INTEGER,
+ exponent2 INTEGER,
+ coefficient INTEGER
+}
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1 b/kernel/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
new file mode 100644
index 000000000..bd667b31a
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
@@ -0,0 +1,4 @@
+RsaPubKey ::= SEQUENCE {
+ n INTEGER ({ qat_rsa_get_n }),
+ e INTEGER ({ qat_rsa_get_e })
+}
diff --git a/kernel/drivers/crypto/qat/qat_common/qat_uclo.c b/kernel/drivers/crypto/qat/qat_common/qat_uclo.c
index 1e27f9f7f..c48f181e8 100644
--- a/kernel/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/kernel/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -359,28 +359,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{
- unsigned int i;
- struct icp_qat_uof_memvar_attr *mem_val_attr;
-
- mem_val_attr =
- (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
- sizeof(struct icp_qat_uof_initmem));
-
switch (init_mem->region) {
- case ICP_QAT_UOF_SRAM_REGION:
- if ((init_mem->addr + init_mem->num_in_bytes) >
- ICP_DH895XCC_PESRAM_BAR_SIZE) {
- pr_err("QAT: initmem on SRAM is out of range");
- return -EINVAL;
- }
- for (i = 0; i < init_mem->val_attr_num; i++) {
- qat_uclo_wr_sram_by_words(handle,
- init_mem->addr +
- mem_val_attr->offset_in_byte,
- &mem_val_attr->value, 4);
- mem_val_attr++;
- }
- break;
case ICP_QAT_UOF_LMEM_REGION:
if (qat_uclo_init_lmem_seg(handle, init_mem))
return -EINVAL;
@@ -990,6 +969,12 @@ out_err:
return -EFAULT;
}
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+ void *addr_ptr, int mem_size)
+{
+ qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
+}
+
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size)
{
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/Makefile b/kernel/drivers/crypto/qat/qat_dh895xcc/Makefile
index 25171c557..8c79c5437 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/kernel/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -2,7 +2,4 @@ ccflags-y := -I$(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-objs := adf_drv.o \
adf_isr.o \
- adf_dh895xcc_hw_data.o \
- adf_hw_arbiter.o \
- qat_admin.o \
- adf_admin.o
+ adf_dh895xcc_hw_data.o
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
deleted file mode 100644
index e4666065c..000000000
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <adf_accel_devices.h>
-#include "adf_drv.h"
-#include "adf_dh895xcc_hw_data.h"
-
-#define ADF_ADMINMSG_LEN 32
-
-struct adf_admin_comms {
- dma_addr_t phy_addr;
- void *virt_addr;
- void __iomem *mailbox_addr;
- struct mutex lock; /* protects adf_admin_comms struct */
-};
-
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
- uint32_t ae, void *in, void *out)
-{
- struct adf_admin_comms *admin = accel_dev->admin;
- int offset = ae * ADF_ADMINMSG_LEN * 2;
- void __iomem *mailbox = admin->mailbox_addr;
- int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
- int times, received;
-
- mutex_lock(&admin->lock);
-
- if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
- mutex_unlock(&admin->lock);
- return -EAGAIN;
- }
-
- memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
- ADF_CSR_WR(mailbox, mb_offset, 1);
- received = 0;
- for (times = 0; times < 50; times++) {
- msleep(20);
- if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
- received = 1;
- break;
- }
- }
- if (received)
- memcpy(out, admin->virt_addr + offset +
- ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
- else
- dev_err(&GET_DEV(accel_dev),
- "Failed to send admin msg to accelerator\n");
-
- mutex_unlock(&admin->lock);
- return received ? 0 : -EFAULT;
-}
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
- struct adf_admin_comms *admin;
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
- void __iomem *csr = pmisc->virt_addr;
- void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
- uint64_t reg_val;
-
- admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
- dev_to_node(&GET_DEV(accel_dev)));
- if (!admin)
- return -ENOMEM;
- admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- &admin->phy_addr, GFP_KERNEL);
- if (!admin->virt_addr) {
- dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
- kfree(admin);
- return -ENOMEM;
- }
- reg_val = (uint64_t)admin->phy_addr;
- ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
- ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
- mutex_init(&admin->lock);
- admin->mailbox_addr = mailbox;
- accel_dev->admin = admin;
- return 0;
-}
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
- struct adf_admin_comms *admin = accel_dev->admin;
-
- if (!admin)
- return;
-
- if (admin->virt_addr)
- dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
- admin->virt_addr, admin->phy_addr);
-
- mutex_destroy(&admin->lock);
- kfree(admin);
- accel_dev->admin = NULL;
-}
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index b1386922d..ff54257ec 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -45,8 +45,9 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h"
-#include "adf_common_drv.h"
#include "adf_drv.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@@ -117,6 +118,11 @@ static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
return ADF_DH895XCC_ETR_BAR;
}
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCC_SRAM_BAR;
+}
+
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
@@ -156,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
}
}
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+ return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+ return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -192,18 +208,23 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
/* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- ADF_DH895XCC_SMIA0_MASK);
+ accel_dev->pf.vf_info ? 0 :
+ GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
ADF_DH895XCC_SMIA1_MASK);
}
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcc_class;
hw_data->instance_id = dh895xcc_class.instances++;
hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
- hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
@@ -211,21 +232,28 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
- hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
- hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_vintmsk_offset = get_vintmsk_offset;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_DH895XCC_FW;
+ hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 25269a9f2..88dffb297 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,7 @@
#define ADF_DH895x_HW_DATA_H_
/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
#define ADF_DH895XCC_ETR_BAR 2
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
@@ -79,10 +80,11 @@
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
-/* Admin Messages Registers */
-#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
-#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
-#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
-#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C)
+#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8)
+#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_mmp.bin"
#endif
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 9decea277..f8dd14f23 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -82,16 +82,21 @@ static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = adf_driver_name,
.probe = adf_probe,
- .remove = adf_remove
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
};
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
- adf_dev_shutdown(accel_dev);
-
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
@@ -100,7 +105,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
}
if (accel_dev->hw_device) {
- switch (accel_dev->hw_device->pci_dev_id) {
+ switch (accel_pci_dev->pci_dev->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
break;
@@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
break;
}
kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
- adf_devmgr_rm_dev(accel_dev);
- pci_release_regions(accel_pci_dev->pci_dev);
- pci_disable_device(accel_pci_dev->pci_dev);
- kfree(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_dev_configure(struct adf_accel_dev *accel_dev)
@@ -167,12 +170,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
key, (void *)&val, ADF_DEC))
goto err;
- val = 4;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
val = 8;
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
@@ -185,12 +182,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev)
key, (void *)&val, ADF_DEC))
goto err;
- val = 12;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
- if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, (void *)&val, ADF_DEC))
- goto err;
-
val = ADF_COALESCING_DEF_TIME;
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
@@ -217,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret;
+ int ret, bar_mask;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -241,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
- if (adf_devmgr_add_dev(accel_dev)) {
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
@@ -267,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
return -ENODEV;
}
- accel_pci_dev = &accel_dev->accel_pci_dev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
&hw_data->fuses);
@@ -276,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
- accel_pci_dev->pci_dev = pdev;
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
@@ -286,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
- hw_data->dev_class->name, hw_data->instance_id);
+ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+ ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
- dev_err(&pdev->dev, "Could not create debugfs dir\n");
+ dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
@@ -300,6 +294,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err;
+ pcie_set_readrq(pdev, 1024);
+
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
@@ -311,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
- goto out_err;
+ goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
@@ -322,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_request_regions(pdev, adf_driver_name)) {
ret = -EFAULT;
- goto out_err;
+ goto out_err_disable;
}
/* Read accelerator capabilities mask */
@@ -330,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
- for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
- struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+ ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
- bar_nr = i * 2;
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
- dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
- goto out_err;
+ goto out_err_free_reg;
}
}
pci_set_master(pdev);
@@ -350,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
- goto out_err;
+ goto out_err_free_reg;
}
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
- goto out_err;
+ goto out_err_free_reg;
}
ret = adf_dev_configure(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
- if (ret) {
- adf_dev_stop(accel_dev);
- goto out_err;
- }
+ if (ret)
+ goto out_err_dev_stop;
- return 0;
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+ adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
return ret;
}
@@ -389,15 +395,17 @@ static void adf_remove(struct pci_dev *pdev)
}
if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+ adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
- if (qat_admin_register())
- return -EFAULT;
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
@@ -409,7 +417,6 @@ static int __init adfdrv_init(void)
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
- qat_admin_unregister();
}
module_init(adfdrv_init);
@@ -417,5 +424,6 @@ module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE("qat_895xcc.bin");
+MODULE_FIRMWARE(ADF_DH895XCC_FW);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
index a2fbb6ce7..85ff245bd 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
+++ b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
@@ -53,15 +53,6 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
uint32_t const **arb_map_config);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
- uint32_t ae, void *in, void *out);
-int qat_admin_register(void);
-int qat_admin_unregister(void);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
#endif
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 0d03c109c..5570f7879 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/kernel/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -59,21 +59,30 @@
#include <adf_transport_access_macros.h>
#include <adf_transport_internal.h>
#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
- int i;
-
- for (i = 0; i < msix_num_entries; i++)
- pci_dev_info->msix_entries.entries[i].entry = i;
+ u32 msix_num_entries = 1;
+
+ /* If SR-IOV is disabled, add entries for each bank */
+ if (!accel_dev->pf.vf_info) {
+ int i;
+
+ msix_num_entries += hw_data->num_banks;
+ for (i = 0; i < msix_num_entries; i++)
+ pci_dev_info->msix_entries.entries[i].entry = i;
+ } else {
+ pci_dev_info->msix_entries.entries[0].entry =
+ hw_data->num_banks;
+ }
if (pci_enable_msix_exact(pci_dev_info->pci_dev,
pci_dev_info->msix_entries.entries,
msix_num_entries)) {
- dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
+ dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
return -EFAULT;
}
return 0;
@@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
- dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
- accel_dev->accel_id);
- return IRQ_HANDLED;
+#ifdef CONFIG_PCI_IOV
+ /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+ if (accel_dev->pf.vf_info) {
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+ u32 vf_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
+ 0x0000FFFF) << 16) |
+ ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
+ 0x01FFFE00) >> 9);
+
+ if (vf_mask) {
+ struct adf_accel_vf_info *vf_info;
+ bool irq_handled = false;
+ int i;
+
+ /* Disable VF2PF interrupts for VFs with pending ints */
+ adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+ /*
+ * Schedule tasklets to handle VF2PF interrupt BHs
+ * unless the VF is malicious and is attempting to
+ * flood the host OS with VF2PF interrupts.
+ */
+ for_each_set_bit(i, (const unsigned long *)&vf_mask,
+ (sizeof(vf_mask) * BITS_PER_BYTE)) {
+ vf_info = accel_dev->pf.vf_info + i;
+
+ if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Too many ints from VF%d\n",
+ vf_info->vf_nr + 1);
+ continue;
+ }
+
+ /* Tasklet will re-enable ints from this VF */
+ tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+ irq_handled = true;
+ }
+
+ if (irq_handled)
+ return IRQ_HANDLED;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+ accel_dev->accel_id);
+
+ return IRQ_NONE;
}
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
@@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport;
- int ret, i;
+ int ret, i = 0;
char *name;
- /* Request msix irq for all banks */
- for (i = 0; i < hw_data->num_banks; i++) {
- struct adf_etr_bank_data *bank = &etr_data->banks[i];
- unsigned int cpu, cpus = num_online_cpus();
-
- name = *(pci_dev_info->msix_entries.names + i);
- snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
- "qat%d-bundle%d", accel_dev->accel_id, i);
- ret = request_irq(msixe[i].vector,
- adf_msix_isr_bundle, 0, name, bank);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d for %s\n",
- msixe[i].vector, name);
- return ret;
+ /* Request msix irq for all banks unless SR-IOV enabled */
+ if (!accel_dev->pf.vf_info) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ struct adf_etr_bank_data *bank = &etr_data->banks[i];
+ unsigned int cpu, cpus = num_online_cpus();
+
+ name = *(pci_dev_info->msix_entries.names + i);
+ snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat%d-bundle%d", accel_dev->accel_id, i);
+ ret = request_irq(msixe[i].vector,
+ adf_msix_isr_bundle, 0, name, bank);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to enable irq %d for %s\n",
+ msixe[i].vector, name);
+ return ret;
+ }
+
+ cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+ i) % cpus;
+ irq_set_affinity_hint(msixe[i].vector,
+ get_cpu_mask(cpu));
}
-
- cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
- irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
}
/* Request msix irq for AE */
@@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport;
- int i;
+ int i = 0;
- for (i = 0; i < hw_data->num_banks; i++) {
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, &etr_data->banks[i]);
+ if (pci_dev_info->msix_entries.num_entries > 1) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ irq_set_affinity_hint(msixe[i].vector, NULL);
+ free_irq(msixe[i].vector, &etr_data->banks[i]);
+ }
}
irq_set_affinity_hint(msixe[i].vector, NULL);
free_irq(msixe[i].vector, accel_dev);
@@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
char **names;
struct msix_entry *entries;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
+ u32 msix_num_entries = 1;
+
+ /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+ if (!accel_dev->pf.vf_info)
+ msix_num_entries += hw_data->num_banks;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
@@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
if (!(*(names + i)))
goto err;
}
+ accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
accel_dev->accel_pci_dev.msix_entries.entries = entries;
accel_dev->accel_pci_dev.msix_entries.names = names;
return 0;
@@ -198,13 +267,11 @@ err:
static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t msix_num_entries = hw_data->num_banks + 1;
char **names = accel_dev->accel_pci_dev.msix_entries.names;
int i;
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < msix_num_entries; i++)
+ for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
kfree(*(names + i));
kfree(names);
}
diff --git a/kernel/drivers/crypto/qat/qat_dh895xccvf/Makefile b/kernel/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 000000000..85399fcbb
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o \
+ adf_isr.o \
+ adf_dh895xccvf_hw_data.o
diff --git a/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 000000000..a9a27eff4
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,172 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+ .name = ADF_DH895XCCVF_DEVICE_NAME,
+ .type = DEV_DH895XCCVF,
+ .instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+ return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+ return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+ return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+ return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &dh895xcciov_class;
+ hw_data->instance_id = dh895xcciov_class.instances++;
+ hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+ hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+ hw_data->num_logical_accel = 1;
+ hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+ hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+ hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+ hw_data->free_irq = adf_vf_isr_resource_free;
+ hw_data->enable_error_correction = adf_vf_void_noop;
+ hw_data->init_admin_comms = adf_vf_int_noop;
+ hw_data->exit_admin_comms = adf_vf_void_noop;
+ hw_data->send_admin_init = adf_vf2pf_init;
+ hw_data->init_arb = adf_vf_int_noop;
+ hw_data->exit_arb = adf_vf_void_noop;
+ hw_data->disable_iov = adf_vf2pf_shutdown;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_vintmsk_offset = get_vintmsk_offset;
+ hw_data->get_sku = get_sku;
+ hw_data->enable_ints = adf_vf_void_noop;
+ hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+ hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 000000000..8f6babfef
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,68 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
+#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0)
+
+#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204
+#define ADF_DH895XCC_VINTSOU_BUN BIT(0)
+#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1)
+
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208
+#endif
diff --git a/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 000000000..789426f21
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,393 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = adf_driver_name,
+ .probe = adf_probe,
+ .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+ pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+ pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+ struct adf_accel_dev *pf;
+ int i;
+
+ for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+ if (bar->virt_addr)
+ pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+ }
+
+ if (accel_dev->hw_device) {
+ switch (accel_pci_dev->pci_dev->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ break;
+ }
+ kfree(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_cfg_dev_remove(accel_dev);
+ debugfs_remove(accel_dev->debugfs_dir);
+ pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+ adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ unsigned long val, bank = 0;
+
+ if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+ goto err;
+ if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&bank, ADF_DEC))
+ goto err;
+
+ val = bank;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&val, ADF_DEC))
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
+
+ val = 128;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+ (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 8;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 10;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+ (int)bank);
+ if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, (void *)&val, ADF_DEC))
+ goto err;
+
+ val = 1;
+ if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ ADF_NUM_CY, (void *)&val, ADF_DEC))
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
+ return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_dev *pf;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ char name[ADF_DEVICE_NAME_LENGTH];
+ unsigned int i, bar_nr;
+ int ret, bar_mask;
+
+ switch (ent->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+ return -ENODEV;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!accel_dev)
+ return -ENOMEM;
+
+ accel_dev->is_vf = true;
+ pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /* Add accel device to accel table */
+ if (adf_devmgr_add_dev(accel_dev, pf)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ kfree(accel_dev);
+ return -EFAULT;
+ }
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and configure device configuration structure */
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ accel_dev->hw_device = hw_data;
+ switch (ent->device) {
+ case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+ break;
+ default:
+ ret = -ENODEV;
+ goto out_err;
+ }
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* Create dev top level debugfs entry */
+ snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+ ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+ if (!accel_dev->debugfs_dir) {
+ dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* enable PCI device */
+ if (pci_enable_device(pdev)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* set dma identifier */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ ret = -EFAULT;
+ goto out_err_disable;
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ } else {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ }
+
+ if (pci_request_regions(pdev, adf_driver_name)) {
+ ret = -EFAULT;
+ goto out_err_disable;
+ }
+
+ /* Find and map all the device's BARS */
+ i = 0;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+ ADF_PCI_MAX_BARS * 2) {
+ struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+ bar->base_addr = pci_resource_start(pdev, bar_nr);
+ if (!bar->base_addr)
+ break;
+ bar->size = pci_resource_len(pdev, bar_nr);
+ bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+ ret = -EFAULT;
+ goto out_err_free_reg;
+ }
+ }
+ pci_set_master(pdev);
+ /* Completion for VF2PF request/response message exchange */
+ init_completion(&accel_dev->vf.iov_msg_completion);
+
+ ret = adf_dev_configure(accel_dev);
+ if (ret)
+ goto out_err_free_reg;
+
+ ret = adf_dev_init(accel_dev);
+ if (ret)
+ goto out_err_dev_shutdown;
+
+ ret = adf_dev_start(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+ adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+ pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+ pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ kfree(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ if (adf_dev_stop(accel_dev))
+ dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+ adf_dev_shutdown(accel_dev);
+ adf_cleanup_accel(accel_dev);
+ adf_cleanup_pci_dev(accel_dev);
+ kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+ request_module("intel_qat");
+
+ if (pci_register_driver(&adf_driver)) {
+ pr_err("QAT: Driver initialization failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+ pci_unregister_driver(&adf_driver);
+ adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/kernel/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
index 55b7a8e48..e270e4a63 100644
--- a/kernel/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
+++ b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
@@ -44,64 +44,14 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <icp_qat_fw_init_admin.h>
+#ifndef ADF_DH895xVF_DRV_H_
+#define ADF_DH895xVF_DRV_H_
#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include "adf_drv.h"
-
-static struct service_hndl qat_admin;
-
-static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct icp_qat_fw_init_admin_req req;
- struct icp_qat_fw_init_admin_resp resp;
- int i;
-
- memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
- req.init_admin_cmd_id = cmd;
- for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
- memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
- if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
- resp.init_resp_hdr.status)
- return -EFAULT;
- }
- return 0;
-}
-
-static int qat_admin_start(struct adf_accel_dev *accel_dev)
-{
- return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
-}
-
-static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
- enum adf_event event)
-{
- int ret;
-
- switch (event) {
- case ADF_EVENT_START:
- ret = qat_admin_start(accel_dev);
- break;
- case ADF_EVENT_STOP:
- case ADF_EVENT_INIT:
- case ADF_EVENT_SHUTDOWN:
- default:
- ret = 0;
- }
- return ret;
-}
-
-int qat_admin_register(void)
-{
- memset(&qat_admin, 0, sizeof(struct service_hndl));
- qat_admin.event_hld = qat_admin_event_handler;
- qat_admin.name = "qat_admin";
- qat_admin.admin = 1;
- return adf_service_register(&qat_admin);
-}
-
-int qat_admin_unregister(void)
-{
- return adf_service_unregister(&qat_admin);
-}
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+#endif
diff --git a/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
new file mode 100644
index 000000000..87c5d8adb
--- /dev/null
+++ b/kernel/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
@@ -0,0 +1,258 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2014 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2014 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_drv.h"
+#include "adf_dh895xccvf_hw_data.h"
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+ if (stat) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to enable MSI interrupts\n");
+ return stat;
+ }
+
+ accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+ if (!accel_dev->vf.irq_name)
+ return -ENOMEM;
+
+ return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ kfree(accel_dev->vf.irq_name);
+ pci_disable_msi(pdev);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+ struct adf_accel_dev *accel_dev = data;
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+ u32 msg;
+
+ /* Read the message from PF */
+ msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
+
+ if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+ /* Ignore legacy non-system (non-kernel) PF2VF messages */
+ goto err;
+
+ switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Restarting msg received from PF 0x%x\n", msg);
+ adf_dev_stop(accel_dev);
+ break;
+ case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Version resp received from PF 0x%x\n", msg);
+ accel_dev->vf.pf_version =
+ (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+ ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+ accel_dev->vf.compatible =
+ (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+ ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+ complete(&accel_dev->vf.iov_msg_completion);
+ break;
+ default:
+ goto err;
+ }
+
+ /* To ack, clear the PF2VFINT bit */
+ msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
+ ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ return;
+err:
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+ msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+ (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+ mutex_init(&accel_dev->vf.vf2pf_lock);
+ return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+ tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+ tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+ mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+ struct adf_accel_dev *accel_dev = privdata;
+ void __iomem *pmisc_bar_addr =
+ (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+ u32 v_int;
+
+ /* Read VF INT source CSR to determine the source of VF interrupt */
+ v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
+
+ /* Check for PF2VF interrupt */
+ if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
+ /* Disable PF to VF interrupt */
+ adf_disable_pf2vf_interrupts(accel_dev);
+
+ /* Schedule tasklet to handle interrupt BH */
+ tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+ return IRQ_HANDLED;
+ }
+
+ /* Check bundle interrupt */
+ if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+ /* Disable Flag and Coalesce Ring Interrupts */
+ WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+ 0);
+ tasklet_hi_schedule(&bank->resp_handler);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ unsigned int cpu;
+ int ret;
+
+ snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+ "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+ (void *)accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+ accel_dev->vf.irq_name);
+ return ret;
+ }
+ cpu = accel_dev->accel_id % num_online_cpus();
+ irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+ return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+ (unsigned long)priv_data->banks);
+ return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+ struct adf_etr_data *priv_data = accel_dev->transport;
+
+ tasklet_disable(&priv_data->banks[0].resp_handler);
+ tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ irq_set_affinity_hint(pdev->irq, NULL);
+ free_irq(pdev->irq, (void *)accel_dev);
+ adf_cleanup_bh(accel_dev);
+ adf_cleanup_pf2vf_bh(accel_dev);
+ adf_disable_msi(accel_dev);
+}
+
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+ if (adf_enable_msi(accel_dev))
+ goto err_out;
+
+ if (adf_setup_pf2vf_bh(accel_dev))
+ goto err_out;
+
+ if (adf_setup_bh(accel_dev))
+ goto err_out;
+
+ if (adf_request_msi_irq(accel_dev))
+ goto err_out;
+
+ return 0;
+err_out:
+ adf_vf_isr_resource_free(accel_dev);
+ return -EFAULT;
+}