summaryrefslogtreecommitdiffstats
path: root/kernel/sound/soc/intel/common
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/sound/soc/intel/common
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/sound/soc/intel/common')
-rw-r--r--kernel/sound/soc/intel/common/Makefile7
-rw-r--r--kernel/sound/soc/intel/common/sst-acpi.c286
-rw-r--r--kernel/sound/soc/intel/common/sst-dsp-priv.h373
-rw-r--r--kernel/sound/soc/intel/common/sst-dsp.c420
-rw-r--r--kernel/sound/soc/intel/common/sst-dsp.h285
-rw-r--r--kernel/sound/soc/intel/common/sst-firmware.c1205
-rw-r--r--kernel/sound/soc/intel/common/sst-ipc.c294
-rw-r--r--kernel/sound/soc/intel/common/sst-ipc.h91
8 files changed, 2961 insertions, 0 deletions
diff --git a/kernel/sound/soc/intel/common/Makefile b/kernel/sound/soc/intel/common/Makefile
new file mode 100644
index 000000000..f24154ca4
--- /dev/null
+++ b/kernel/sound/soc/intel/common/Makefile
@@ -0,0 +1,7 @@
+snd-soc-sst-dsp-objs := sst-dsp.o sst-firmware.o
+snd-soc-sst-acpi-objs := sst-acpi.o
+snd-soc-sst-ipc-objs := sst-ipc.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
+
diff --git a/kernel/sound/soc/intel/common/sst-acpi.c b/kernel/sound/soc/intel/common/sst-acpi.c
new file mode 100644
index 000000000..42f293f9c
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-acpi.c
@@ -0,0 +1,286 @@
+/*
+ * Intel SST loader on ACPI systems
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "sst-dsp.h"
+
+#define SST_LPT_DSP_DMA_ADDR_OFFSET 0x0F0000
+#define SST_WPT_DSP_DMA_ADDR_OFFSET 0x0FE000
+#define SST_LPT_DSP_DMA_SIZE (1024 - 1)
+
+/* Descriptor for SST ASoC machine driver */
+struct sst_acpi_mach {
+ /* ACPI ID for the matching machine driver. Audio codec for instance */
+ const u8 id[ACPI_ID_LEN];
+ /* machine driver name */
+ const char *drv_name;
+ /* firmware file name */
+ const char *fw_filename;
+};
+
+/* Descriptor for setting up SST platform data */
+struct sst_acpi_desc {
+ const char *drv_name;
+ struct sst_acpi_mach *machines;
+ /* Platform resource indexes. Must set to -1 if not used */
+ int resindex_lpe_base;
+ int resindex_pcicfg_base;
+ int resindex_fw_base;
+ int irqindex_host_ipc;
+ int resindex_dma_base;
+ /* Unique number identifying the SST core on platform */
+ int sst_id;
+ /* DMA only valid when resindex_dma_base != -1*/
+ int dma_engine;
+ int dma_size;
+};
+
+struct sst_acpi_priv {
+ struct platform_device *pdev_mach;
+ struct platform_device *pdev_pcm;
+ struct sst_pdata sst_pdata;
+ struct sst_acpi_desc *desc;
+ struct sst_acpi_mach *mach;
+};
+
+static void sst_acpi_fw_cb(const struct firmware *fw, void *context)
+{
+ struct platform_device *pdev = context;
+ struct device *dev = &pdev->dev;
+ struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev);
+ struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata;
+ struct sst_acpi_desc *desc = sst_acpi->desc;
+ struct sst_acpi_mach *mach = sst_acpi->mach;
+
+ sst_pdata->fw = fw;
+ if (!fw) {
+ dev_err(dev, "Cannot load firmware %s\n", mach->fw_filename);
+ return;
+ }
+
+ /* register PCM and DAI driver */
+ sst_acpi->pdev_pcm =
+ platform_device_register_data(dev, desc->drv_name, -1,
+ sst_pdata, sizeof(*sst_pdata));
+ if (IS_ERR(sst_acpi->pdev_pcm)) {
+ dev_err(dev, "Cannot register device %s. Error %d\n",
+ desc->drv_name, (int)PTR_ERR(sst_acpi->pdev_pcm));
+ }
+
+ return;
+}
+
+static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
+ void *context, void **ret)
+{
+ *(bool *)context = true;
+ return AE_OK;
+}
+
+static struct sst_acpi_mach *sst_acpi_find_machine(
+ struct sst_acpi_mach *machines)
+{
+ struct sst_acpi_mach *mach;
+ bool found = false;
+
+ for (mach = machines; mach->id[0]; mach++)
+ if (ACPI_SUCCESS(acpi_get_devices(mach->id,
+ sst_acpi_mach_match,
+ &found, NULL)) && found)
+ return mach;
+
+ return NULL;
+}
+
+static int sst_acpi_probe(struct platform_device *pdev)
+{
+ const struct acpi_device_id *id;
+ struct device *dev = &pdev->dev;
+ struct sst_acpi_priv *sst_acpi;
+ struct sst_pdata *sst_pdata;
+ struct sst_acpi_mach *mach;
+ struct sst_acpi_desc *desc;
+ struct resource *mmio;
+ int ret = 0;
+
+ sst_acpi = devm_kzalloc(dev, sizeof(*sst_acpi), GFP_KERNEL);
+ if (sst_acpi == NULL)
+ return -ENOMEM;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ desc = (struct sst_acpi_desc *)id->driver_data;
+ mach = sst_acpi_find_machine(desc->machines);
+ if (mach == NULL) {
+ dev_err(dev, "No matching ASoC machine driver found\n");
+ return -ENODEV;
+ }
+
+ sst_pdata = &sst_acpi->sst_pdata;
+ sst_pdata->id = desc->sst_id;
+ sst_pdata->dma_dev = dev;
+ sst_acpi->desc = desc;
+ sst_acpi->mach = mach;
+
+ sst_pdata->resindex_dma_base = desc->resindex_dma_base;
+ if (desc->resindex_dma_base >= 0) {
+ sst_pdata->dma_engine = desc->dma_engine;
+ sst_pdata->dma_base = desc->resindex_dma_base;
+ sst_pdata->dma_size = desc->dma_size;
+ }
+
+ if (desc->irqindex_host_ipc >= 0)
+ sst_pdata->irq = platform_get_irq(pdev, desc->irqindex_host_ipc);
+
+ if (desc->resindex_lpe_base >= 0) {
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_lpe_base);
+ if (mmio) {
+ sst_pdata->lpe_base = mmio->start;
+ sst_pdata->lpe_size = resource_size(mmio);
+ }
+ }
+
+ if (desc->resindex_pcicfg_base >= 0) {
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_pcicfg_base);
+ if (mmio) {
+ sst_pdata->pcicfg_base = mmio->start;
+ sst_pdata->pcicfg_size = resource_size(mmio);
+ }
+ }
+
+ if (desc->resindex_fw_base >= 0) {
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM,
+ desc->resindex_fw_base);
+ if (mmio) {
+ sst_pdata->fw_base = mmio->start;
+ sst_pdata->fw_size = resource_size(mmio);
+ }
+ }
+
+ platform_set_drvdata(pdev, sst_acpi);
+
+ /* register machine driver */
+ sst_acpi->pdev_mach =
+ platform_device_register_data(dev, mach->drv_name, -1,
+ sst_pdata, sizeof(*sst_pdata));
+ if (IS_ERR(sst_acpi->pdev_mach))
+ return PTR_ERR(sst_acpi->pdev_mach);
+
+ /* continue SST probing after firmware is loaded */
+ ret = request_firmware_nowait(THIS_MODULE, true, mach->fw_filename,
+ dev, GFP_KERNEL, pdev, sst_acpi_fw_cb);
+ if (ret)
+ platform_device_unregister(sst_acpi->pdev_mach);
+
+ return ret;
+}
+
+static int sst_acpi_remove(struct platform_device *pdev)
+{
+ struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev);
+ struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata;
+
+ platform_device_unregister(sst_acpi->pdev_mach);
+ if (!IS_ERR_OR_NULL(sst_acpi->pdev_pcm))
+ platform_device_unregister(sst_acpi->pdev_pcm);
+ release_firmware(sst_pdata->fw);
+
+ return 0;
+}
+
+static struct sst_acpi_mach haswell_machines[] = {
+ { "INT33CA", "haswell-audio", "intel/IntcSST1.bin" },
+ {}
+};
+
+static struct sst_acpi_desc sst_acpi_haswell_desc = {
+ .drv_name = "haswell-pcm-audio",
+ .machines = haswell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_fw_base = -1,
+ .irqindex_host_ipc = 0,
+ .sst_id = SST_DEV_ID_LYNX_POINT,
+ .dma_engine = SST_DMA_TYPE_DW,
+ .resindex_dma_base = SST_LPT_DSP_DMA_ADDR_OFFSET,
+ .dma_size = SST_LPT_DSP_DMA_SIZE,
+};
+
+static struct sst_acpi_mach broadwell_machines[] = {
+ { "INT343A", "broadwell-audio", "intel/IntcSST2.bin" },
+ {}
+};
+
+static struct sst_acpi_desc sst_acpi_broadwell_desc = {
+ .drv_name = "haswell-pcm-audio",
+ .machines = broadwell_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_fw_base = -1,
+ .irqindex_host_ipc = 0,
+ .sst_id = SST_DEV_ID_WILDCAT_POINT,
+ .dma_engine = SST_DMA_TYPE_DW,
+ .resindex_dma_base = SST_WPT_DSP_DMA_ADDR_OFFSET,
+ .dma_size = SST_LPT_DSP_DMA_SIZE,
+};
+
+static struct sst_acpi_mach baytrail_machines[] = {
+ { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
+ { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master" },
+ {}
+};
+
+static struct sst_acpi_desc sst_acpi_baytrail_desc = {
+ .drv_name = "baytrail-pcm-audio",
+ .machines = baytrail_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = 1,
+ .resindex_fw_base = 2,
+ .irqindex_host_ipc = 5,
+ .sst_id = SST_DEV_ID_BYT,
+ .resindex_dma_base = -1,
+};
+
+static struct acpi_device_id sst_acpi_match[] = {
+ { "INT33C8", (unsigned long)&sst_acpi_haswell_desc },
+ { "INT3438", (unsigned long)&sst_acpi_broadwell_desc },
+ { "80860F28", (unsigned long)&sst_acpi_baytrail_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sst_acpi_match);
+
+static struct platform_driver sst_acpi_driver = {
+ .probe = sst_acpi_probe,
+ .remove = sst_acpi_remove,
+ .driver = {
+ .name = "sst-acpi",
+ .acpi_match_table = ACPI_PTR(sst_acpi_match),
+ },
+};
+module_platform_driver(sst_acpi_driver);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_DESCRIPTION("Intel SST loader on ACPI systems");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/sound/soc/intel/common/sst-dsp-priv.h b/kernel/sound/soc/intel/common/sst-dsp-priv.h
new file mode 100644
index 000000000..396d54510
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-dsp-priv.h
@@ -0,0 +1,373 @@
+/*
+ * Intel Smart Sound Technology
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SOUND_SOC_SST_DSP_PRIV_H
+#define __SOUND_SOC_SST_DSP_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+
+struct sst_mem_block;
+struct sst_module;
+struct sst_fw;
+
+/* do we need to remove or keep */
+#define DSP_DRAM_ADDR_OFFSET 0x400000
+
+/*
+ * DSP Operations exported by platform Audio DSP driver.
+ */
+struct sst_ops {
+ /* DSP core boot / reset */
+ void (*boot)(struct sst_dsp *);
+ void (*reset)(struct sst_dsp *);
+ int (*wake)(struct sst_dsp *);
+ void (*sleep)(struct sst_dsp *);
+ void (*stall)(struct sst_dsp *);
+
+ /* Shim IO */
+ void (*write)(void __iomem *addr, u32 offset, u32 value);
+ u32 (*read)(void __iomem *addr, u32 offset);
+ void (*write64)(void __iomem *addr, u32 offset, u64 value);
+ u64 (*read64)(void __iomem *addr, u32 offset);
+
+ /* DSP I/DRAM IO */
+ void (*ram_read)(struct sst_dsp *sst, void *dest, void __iomem *src,
+ size_t bytes);
+ void (*ram_write)(struct sst_dsp *sst, void __iomem *dest, void *src,
+ size_t bytes);
+
+ void (*dump)(struct sst_dsp *);
+
+ /* IRQ handlers */
+ irqreturn_t (*irq_handler)(int irq, void *context);
+
+ /* SST init and free */
+ int (*init)(struct sst_dsp *sst, struct sst_pdata *pdata);
+ void (*free)(struct sst_dsp *sst);
+
+ /* FW module parser/loader */
+ int (*parse_fw)(struct sst_fw *sst_fw);
+};
+
+/*
+ * Audio DSP memory offsets and addresses.
+ */
+struct sst_addr {
+ u32 lpe_base;
+ u32 shim_offset;
+ u32 iram_offset;
+ u32 dram_offset;
+ u32 dsp_iram_offset;
+ u32 dsp_dram_offset;
+ void __iomem *lpe;
+ void __iomem *shim;
+ void __iomem *pci_cfg;
+ void __iomem *fw_ext;
+};
+
+/*
+ * Audio DSP Mailbox configuration.
+ */
+struct sst_mailbox {
+ void __iomem *in_base;
+ void __iomem *out_base;
+ size_t in_size;
+ size_t out_size;
+};
+
+/*
+ * Audio DSP memory block types.
+ */
+enum sst_mem_type {
+ SST_MEM_IRAM = 0,
+ SST_MEM_DRAM = 1,
+ SST_MEM_ANY = 2,
+ SST_MEM_CACHE= 3,
+};
+
+/*
+ * Audio DSP Generic Firmware File.
+ *
+ * SST Firmware files can consist of 1..N modules. This generic structure is
+ * used to manage each firmware file and it's modules regardless of SST firmware
+ * type. A SST driver may load multiple FW files.
+ */
+struct sst_fw {
+ struct sst_dsp *dsp;
+
+ /* base addresses of FW file data */
+ dma_addr_t dmable_fw_paddr; /* physical address of fw data */
+ void *dma_buf; /* virtual address of fw data */
+ u32 size; /* size of fw data */
+
+ /* lists */
+ struct list_head list; /* DSP list of FW */
+ struct list_head module_list; /* FW list of modules */
+
+ void *private; /* core doesn't touch this */
+};
+
+/*
+ * Audio DSP Generic Module Template.
+ *
+ * Used to define and register a new FW module. This data is extracted from
+ * FW module header information.
+ */
+struct sst_module_template {
+ u32 id;
+ u32 entry; /* entry point */
+ u32 scratch_size;
+ u32 persistent_size;
+};
+
+/*
+ * Block Allocator - Used to allocate blocks of DSP memory.
+ */
+struct sst_block_allocator {
+ u32 id;
+ u32 offset;
+ int size;
+ enum sst_mem_type type;
+};
+
+/*
+ * Runtime Module Instance - A module object can be instanciated multiple
+ * times within the DSP FW.
+ */
+struct sst_module_runtime {
+ struct sst_dsp *dsp;
+ int id;
+ struct sst_module *module; /* parent module we belong too */
+
+ u32 persistent_offset; /* private memory offset */
+ void *private;
+
+ struct list_head list;
+ struct list_head block_list; /* list of blocks used */
+};
+
+/*
+ * Runtime Module Context - The runtime context must be manually stored by the
+ * driver prior to enter S3 and restored after leaving S3. This should really be
+ * part of the memory context saved by the enter D3 message IPC ???
+ */
+struct sst_module_runtime_context {
+ dma_addr_t dma_buffer;
+ u32 *buffer;
+};
+
+/*
+ * Audio DSP Module State
+ */
+enum sst_module_state {
+ SST_MODULE_STATE_UNLOADED = 0, /* default state */
+ SST_MODULE_STATE_LOADED,
+ SST_MODULE_STATE_INITIALIZED, /* and inactive */
+ SST_MODULE_STATE_ACTIVE,
+};
+
+/*
+ * Audio DSP Generic Module.
+ *
+ * Each Firmware file can consist of 1..N modules. A module can span multiple
+ * ADSP memory blocks. The simplest FW will be a file with 1 module. A module
+ * can be instanciated multiple times in the DSP.
+ */
+struct sst_module {
+ struct sst_dsp *dsp;
+ struct sst_fw *sst_fw; /* parent FW we belong too */
+
+ /* module configuration */
+ u32 id;
+ u32 entry; /* module entry point */
+ s32 offset; /* module offset in firmware file */
+ u32 size; /* module size */
+ u32 scratch_size; /* global scratch memory required */
+ u32 persistent_size; /* private memory required */
+ enum sst_mem_type type; /* destination memory type */
+ u32 data_offset; /* offset in ADSP memory space */
+ void *data; /* module data */
+
+ /* runtime */
+ u32 usage_count; /* can be unloaded if count == 0 */
+ void *private; /* core doesn't touch this */
+
+ /* lists */
+ struct list_head block_list; /* Module list of blocks in use */
+ struct list_head list; /* DSP list of modules */
+ struct list_head list_fw; /* FW list of modules */
+ struct list_head runtime_list; /* list of runtime module objects*/
+
+ /* state */
+ enum sst_module_state state;
+};
+
+/*
+ * SST Memory Block operations.
+ */
+struct sst_block_ops {
+ int (*enable)(struct sst_mem_block *block);
+ int (*disable)(struct sst_mem_block *block);
+};
+
+/*
+ * SST Generic Memory Block.
+ *
+ * SST ADP memory has multiple IRAM and DRAM blocks. Some ADSP blocks can be
+ * power gated.
+ */
+struct sst_mem_block {
+ struct sst_dsp *dsp;
+ struct sst_module *module; /* module that uses this block */
+
+ /* block config */
+ u32 offset; /* offset from base */
+ u32 size; /* block size */
+ u32 index; /* block index 0..N */
+ enum sst_mem_type type; /* block memory type IRAM/DRAM */
+ struct sst_block_ops *ops; /* block operations, if any */
+
+ /* block status */
+ u32 bytes_used; /* bytes in use by modules */
+ void *private; /* generic core does not touch this */
+ int users; /* number of modules using this block */
+
+ /* block lists */
+ struct list_head module_list; /* Module list of blocks */
+ struct list_head list; /* Map list of free/used blocks */
+};
+
+/*
+ * Generic SST Shim Interface.
+ */
+struct sst_dsp {
+
+ /* runtime */
+ struct sst_dsp_device *sst_dev;
+ spinlock_t spinlock; /* IPC locking */
+ struct mutex mutex; /* DSP FW lock */
+ struct device *dev;
+ struct device *dma_dev;
+ void *thread_context;
+ int irq;
+ u32 id;
+
+ /* list of free and used ADSP memory blocks */
+ struct list_head used_block_list;
+ struct list_head free_block_list;
+
+ /* operations */
+ struct sst_ops *ops;
+
+ /* debug FS */
+ struct dentry *debugfs_root;
+
+ /* base addresses */
+ struct sst_addr addr;
+
+ /* mailbox */
+ struct sst_mailbox mailbox;
+
+ /* SST FW files loaded and their modules */
+ struct list_head module_list;
+ struct list_head fw_list;
+
+ /* scratch buffer */
+ struct list_head scratch_block_list;
+ u32 scratch_offset;
+ u32 scratch_size;
+
+ /* platform data */
+ struct sst_pdata *pdata;
+
+ /* DMA FW loading */
+ struct sst_dma *dma;
+ bool fw_use_dma;
+};
+
+/* Size optimised DRAM/IRAM memcpy */
+static inline void sst_dsp_write(struct sst_dsp *sst, void *src,
+ u32 dest_offset, size_t bytes)
+{
+ sst->ops->ram_write(sst, sst->addr.lpe + dest_offset, src, bytes);
+}
+
+static inline void sst_dsp_read(struct sst_dsp *sst, void *dest,
+ u32 src_offset, size_t bytes)
+{
+ sst->ops->ram_read(sst, dest, sst->addr.lpe + src_offset, bytes);
+}
+
+static inline void *sst_dsp_get_thread_context(struct sst_dsp *sst)
+{
+ return sst->thread_context;
+}
+
+/* Create/Free FW files - can contain multiple modules */
+struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
+ const struct firmware *fw, void *private);
+void sst_fw_free(struct sst_fw *sst_fw);
+void sst_fw_free_all(struct sst_dsp *dsp);
+int sst_fw_reload(struct sst_fw *sst_fw);
+void sst_fw_unload(struct sst_fw *sst_fw);
+
+/* Create/Free firmware modules */
+struct sst_module *sst_module_new(struct sst_fw *sst_fw,
+ struct sst_module_template *template, void *private);
+void sst_module_free(struct sst_module *module);
+struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id);
+int sst_module_alloc_blocks(struct sst_module *module);
+int sst_module_free_blocks(struct sst_module *module);
+
+/* Create/Free firmware module runtime instances */
+struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
+ int id, void *private);
+void sst_module_runtime_free(struct sst_module_runtime *runtime);
+struct sst_module_runtime *sst_module_runtime_get_from_id(
+ struct sst_module *module, u32 id);
+int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
+ int offset);
+int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime);
+int sst_module_runtime_save(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context);
+int sst_module_runtime_restore(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context);
+
+/* generic block allocation */
+int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list);
+int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list);
+
+/* scratch allocation */
+int sst_block_alloc_scratch(struct sst_dsp *dsp);
+void sst_block_free_scratch(struct sst_dsp *dsp);
+
+/* Register the DSPs memory blocks - would be nice to read from ACPI */
+struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
+ u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
+ void *private);
+void sst_mem_block_unregister_all(struct sst_dsp *dsp);
+
+/* Create/Free DMA resources */
+int sst_dma_new(struct sst_dsp *sst);
+void sst_dma_free(struct sst_dma *dma);
+
+u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
+ enum sst_mem_type type);
+#endif
diff --git a/kernel/sound/soc/intel/common/sst-dsp.c b/kernel/sound/soc/intel/common/sst-dsp.c
new file mode 100644
index 000000000..64e94212d
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-dsp.c
@@ -0,0 +1,420 @@
+/*
+ * Intel Smart Sound Technology (SST) DSP Core Driver
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include "sst-dsp.h"
+#include "sst-dsp-priv.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel-sst.h>
+
+/* Internal generic low-level SST IO functions - can be overidden */
+void sst_shim32_write(void __iomem *addr, u32 offset, u32 value)
+{
+ writel(value, addr + offset);
+}
+EXPORT_SYMBOL_GPL(sst_shim32_write);
+
+u32 sst_shim32_read(void __iomem *addr, u32 offset)
+{
+ return readl(addr + offset);
+}
+EXPORT_SYMBOL_GPL(sst_shim32_read);
+
+void sst_shim32_write64(void __iomem *addr, u32 offset, u64 value)
+{
+ memcpy_toio(addr + offset, &value, sizeof(value));
+}
+EXPORT_SYMBOL_GPL(sst_shim32_write64);
+
+u64 sst_shim32_read64(void __iomem *addr, u32 offset)
+{
+ u64 val;
+
+ memcpy_fromio(&val, addr + offset, sizeof(val));
+ return val;
+}
+EXPORT_SYMBOL_GPL(sst_shim32_read64);
+
+static inline void _sst_memcpy_toio_32(volatile u32 __iomem *dest,
+ u32 *src, size_t bytes)
+{
+ int i, words = bytes >> 2;
+
+ for (i = 0; i < words; i++)
+ writel(src[i], dest + i);
+}
+
+static inline void _sst_memcpy_fromio_32(u32 *dest,
+ const volatile __iomem u32 *src, size_t bytes)
+{
+ int i, words = bytes >> 2;
+
+ for (i = 0; i < words; i++)
+ dest[i] = readl(src + i);
+}
+
+void sst_memcpy_toio_32(struct sst_dsp *sst,
+ void __iomem *dest, void *src, size_t bytes)
+{
+ _sst_memcpy_toio_32(dest, src, bytes);
+}
+EXPORT_SYMBOL_GPL(sst_memcpy_toio_32);
+
+void sst_memcpy_fromio_32(struct sst_dsp *sst, void *dest,
+ void __iomem *src, size_t bytes)
+{
+ _sst_memcpy_fromio_32(dest, src, bytes);
+}
+EXPORT_SYMBOL_GPL(sst_memcpy_fromio_32);
+
+/* Public API */
+void sst_dsp_shim_write(struct sst_dsp *sst, u32 offset, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sst->spinlock, flags);
+ sst->ops->write(sst->addr.shim, offset, value);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_write);
+
+u32 sst_dsp_shim_read(struct sst_dsp *sst, u32 offset)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&sst->spinlock, flags);
+ val = sst->ops->read(sst->addr.shim, offset);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_read);
+
+void sst_dsp_shim_write64(struct sst_dsp *sst, u32 offset, u64 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sst->spinlock, flags);
+ sst->ops->write64(sst->addr.shim, offset, value);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_write64);
+
+u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset)
+{
+ unsigned long flags;
+ u64 val;
+
+ spin_lock_irqsave(&sst->spinlock, flags);
+ val = sst->ops->read64(sst->addr.shim, offset);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_read64);
+
+void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value)
+{
+ sst->ops->write(sst->addr.shim, offset, value);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_write_unlocked);
+
+u32 sst_dsp_shim_read_unlocked(struct sst_dsp *sst, u32 offset)
+{
+ return sst->ops->read(sst->addr.shim, offset);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_read_unlocked);
+
+void sst_dsp_shim_write64_unlocked(struct sst_dsp *sst, u32 offset, u64 value)
+{
+ sst->ops->write64(sst->addr.shim, offset, value);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_write64_unlocked);
+
+u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset)
+{
+ return sst->ops->read64(sst->addr.shim, offset);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_read64_unlocked);
+
+int sst_dsp_shim_update_bits_unlocked(struct sst_dsp *sst, u32 offset,
+ u32 mask, u32 value)
+{
+ bool change;
+ unsigned int old, new;
+ u32 ret;
+
+ ret = sst_dsp_shim_read_unlocked(sst, offset);
+
+ old = ret;
+ new = (old & (~mask)) | (value & mask);
+
+ change = (old != new);
+ if (change)
+ sst_dsp_shim_write_unlocked(sst, offset, new);
+
+ return change;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_unlocked);
+
+int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
+ u64 mask, u64 value)
+{
+ bool change;
+ u64 old, new;
+
+ old = sst_dsp_shim_read64_unlocked(sst, offset);
+
+ new = (old & (~mask)) | (value & mask);
+
+ change = (old != new);
+ if (change)
+ sst_dsp_shim_write64_unlocked(sst, offset, new);
+
+ return change;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked);
+
+int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
+ u32 mask, u32 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sst->spinlock, flags);
+ change = sst_dsp_shim_update_bits_unlocked(sst, offset, mask, value);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
+ return change;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits);
+
+int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
+ u64 mask, u64 value)
+{
+ unsigned long flags;
+ bool change;
+
+ spin_lock_irqsave(&sst->spinlock, flags);
+ change = sst_dsp_shim_update_bits64_unlocked(sst, offset, mask, value);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
+ return change;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64);
+
+void sst_dsp_dump(struct sst_dsp *sst)
+{
+ if (sst->ops->dump)
+ sst->ops->dump(sst);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dump);
+
+void sst_dsp_reset(struct sst_dsp *sst)
+{
+ if (sst->ops->reset)
+ sst->ops->reset(sst);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_reset);
+
+int sst_dsp_boot(struct sst_dsp *sst)
+{
+ if (sst->ops->boot)
+ sst->ops->boot(sst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_boot);
+
+int sst_dsp_wake(struct sst_dsp *sst)
+{
+ if (sst->ops->wake)
+ return sst->ops->wake(sst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_wake);
+
+void sst_dsp_sleep(struct sst_dsp *sst)
+{
+ if (sst->ops->sleep)
+ sst->ops->sleep(sst);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_sleep);
+
+void sst_dsp_stall(struct sst_dsp *sst)
+{
+ if (sst->ops->stall)
+ sst->ops->stall(sst);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_stall);
+
+void sst_dsp_ipc_msg_tx(struct sst_dsp *dsp, u32 msg)
+{
+ sst_dsp_shim_write_unlocked(dsp, SST_IPCX, msg | SST_IPCX_BUSY);
+ trace_sst_ipc_msg_tx(msg);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_ipc_msg_tx);
+
+u32 sst_dsp_ipc_msg_rx(struct sst_dsp *dsp)
+{
+ u32 msg;
+
+ msg = sst_dsp_shim_read_unlocked(dsp, SST_IPCX);
+ trace_sst_ipc_msg_rx(msg);
+
+ return msg;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_ipc_msg_rx);
+
+int sst_dsp_mailbox_init(struct sst_dsp *sst, u32 inbox_offset, size_t inbox_size,
+ u32 outbox_offset, size_t outbox_size)
+{
+ sst->mailbox.in_base = sst->addr.lpe + inbox_offset;
+ sst->mailbox.out_base = sst->addr.lpe + outbox_offset;
+ sst->mailbox.in_size = inbox_size;
+ sst->mailbox.out_size = outbox_size;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_mailbox_init);
+
+void sst_dsp_outbox_write(struct sst_dsp *sst, void *message, size_t bytes)
+{
+ u32 i;
+
+ trace_sst_ipc_outbox_write(bytes);
+
+ memcpy_toio(sst->mailbox.out_base, message, bytes);
+
+ for (i = 0; i < bytes; i += 4)
+ trace_sst_ipc_outbox_wdata(i, *(u32 *)(message + i));
+}
+EXPORT_SYMBOL_GPL(sst_dsp_outbox_write);
+
+void sst_dsp_outbox_read(struct sst_dsp *sst, void *message, size_t bytes)
+{
+ u32 i;
+
+ trace_sst_ipc_outbox_read(bytes);
+
+ memcpy_fromio(message, sst->mailbox.out_base, bytes);
+
+ for (i = 0; i < bytes; i += 4)
+ trace_sst_ipc_outbox_rdata(i, *(u32 *)(message + i));
+}
+EXPORT_SYMBOL_GPL(sst_dsp_outbox_read);
+
+void sst_dsp_inbox_write(struct sst_dsp *sst, void *message, size_t bytes)
+{
+ u32 i;
+
+ trace_sst_ipc_inbox_write(bytes);
+
+ memcpy_toio(sst->mailbox.in_base, message, bytes);
+
+ for (i = 0; i < bytes; i += 4)
+ trace_sst_ipc_inbox_wdata(i, *(u32 *)(message + i));
+}
+EXPORT_SYMBOL_GPL(sst_dsp_inbox_write);
+
+void sst_dsp_inbox_read(struct sst_dsp *sst, void *message, size_t bytes)
+{
+ u32 i;
+
+ trace_sst_ipc_inbox_read(bytes);
+
+ memcpy_fromio(message, sst->mailbox.in_base, bytes);
+
+ for (i = 0; i < bytes; i += 4)
+ trace_sst_ipc_inbox_rdata(i, *(u32 *)(message + i));
+}
+EXPORT_SYMBOL_GPL(sst_dsp_inbox_read);
+
+struct sst_dsp *sst_dsp_new(struct device *dev,
+ struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
+{
+ struct sst_dsp *sst;
+ int err;
+
+ dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
+
+ sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
+ if (sst == NULL)
+ return NULL;
+
+ spin_lock_init(&sst->spinlock);
+ mutex_init(&sst->mutex);
+ sst->dev = dev;
+ sst->dma_dev = pdata->dma_dev;
+ sst->thread_context = sst_dev->thread_context;
+ sst->sst_dev = sst_dev;
+ sst->id = pdata->id;
+ sst->irq = pdata->irq;
+ sst->ops = sst_dev->ops;
+ sst->pdata = pdata;
+ INIT_LIST_HEAD(&sst->used_block_list);
+ INIT_LIST_HEAD(&sst->free_block_list);
+ INIT_LIST_HEAD(&sst->module_list);
+ INIT_LIST_HEAD(&sst->fw_list);
+ INIT_LIST_HEAD(&sst->scratch_block_list);
+
+ /* Initialise SST Audio DSP */
+ if (sst->ops->init) {
+ err = sst->ops->init(sst, pdata);
+ if (err < 0)
+ return NULL;
+ }
+
+ /* Register the ISR */
+ err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
+ sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
+ if (err)
+ goto irq_err;
+
+ err = sst_dma_new(sst);
+ if (err)
+ dev_warn(dev, "sst_dma_new failed %d\n", err);
+
+ return sst;
+
+irq_err:
+ if (sst->ops->free)
+ sst->ops->free(sst);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_new);
+
+void sst_dsp_free(struct sst_dsp *sst)
+{
+ free_irq(sst->irq, sst);
+ if (sst->ops->free)
+ sst->ops->free(sst);
+
+ sst_dma_free(sst->dma);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_free);
+
+/* Module information */
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_DESCRIPTION("Intel SST Core");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/sound/soc/intel/common/sst-dsp.h b/kernel/sound/soc/intel/common/sst-dsp.h
new file mode 100644
index 000000000..96aeb2556
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-dsp.h
@@ -0,0 +1,285 @@
+/*
+ * Intel Smart Sound Technology (SST) Core
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SOUND_SOC_SST_DSP_H
+#define __SOUND_SOC_SST_DSP_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+/* SST Device IDs */
+#define SST_DEV_ID_LYNX_POINT 0x33C8
+#define SST_DEV_ID_WILDCAT_POINT 0x3438
+#define SST_DEV_ID_BYT 0x0F28
+
+/* Supported SST DMA Devices */
+#define SST_DMA_TYPE_DW 1
+
+/* autosuspend delay 5s*/
+#define SST_RUNTIME_SUSPEND_DELAY (5 * 1000)
+
+/* SST Shim register map
+ * The register naming can differ between products. Some products also
+ * contain extra functionality.
+ */
+#define SST_CSR 0x00
+#define SST_PISR 0x08
+#define SST_PIMR 0x10
+#define SST_ISRX 0x18
+#define SST_ISRD 0x20
+#define SST_IMRX 0x28
+#define SST_IMRD 0x30
+#define SST_IPCX 0x38 /* IPC IA -> SST */
+#define SST_IPCD 0x40 /* IPC SST -> IA */
+#define SST_ISRSC 0x48
+#define SST_ISRLPESC 0x50
+#define SST_IMRSC 0x58
+#define SST_IMRLPESC 0x60
+#define SST_IPCSC 0x68
+#define SST_IPCLPESC 0x70
+#define SST_CLKCTL 0x78
+#define SST_CSR2 0x80
+#define SST_LTRC 0xE0
+#define SST_HMDC 0xE8
+
+#define SST_SHIM_BEGIN SST_CSR
+#define SST_SHIM_END SST_HDMC
+
+#define SST_DBGO 0xF0
+
+#define SST_SHIM_SIZE 0x100
+#define SST_PWMCTRL 0x1000
+
+/* SST Shim Register bits
+ * The register bit naming can differ between products. Some products also
+ * contain extra functionality.
+ */
+
+/* CSR / CS */
+#define SST_CSR_RST (0x1 << 1)
+#define SST_CSR_SBCS0 (0x1 << 2)
+#define SST_CSR_SBCS1 (0x1 << 3)
+#define SST_CSR_DCS(x) (x << 4)
+#define SST_CSR_DCS_MASK (0x7 << 4)
+#define SST_CSR_STALL (0x1 << 10)
+#define SST_CSR_S0IOCS (0x1 << 21)
+#define SST_CSR_S1IOCS (0x1 << 23)
+#define SST_CSR_LPCS (0x1 << 31)
+#define SST_CSR_24MHZ_LPCS (SST_CSR_SBCS0 | SST_CSR_SBCS1 | SST_CSR_LPCS)
+#define SST_CSR_24MHZ_NO_LPCS (SST_CSR_SBCS0 | SST_CSR_SBCS1)
+#define SST_BYT_CSR_RST (0x1 << 0)
+#define SST_BYT_CSR_VECTOR_SEL (0x1 << 1)
+#define SST_BYT_CSR_STALL (0x1 << 2)
+#define SST_BYT_CSR_PWAITMODE (0x1 << 3)
+
+/* ISRX / ISC */
+#define SST_ISRX_BUSY (0x1 << 1)
+#define SST_ISRX_DONE (0x1 << 0)
+#define SST_BYT_ISRX_REQUEST (0x1 << 1)
+
+/* ISRD / ISD */
+#define SST_ISRD_BUSY (0x1 << 1)
+#define SST_ISRD_DONE (0x1 << 0)
+
+/* IMRX / IMC */
+#define SST_IMRX_BUSY (0x1 << 1)
+#define SST_IMRX_DONE (0x1 << 0)
+#define SST_BYT_IMRX_REQUEST (0x1 << 1)
+
+/* IMRD / IMD */
+#define SST_IMRD_DONE (0x1 << 0)
+#define SST_IMRD_BUSY (0x1 << 1)
+#define SST_IMRD_SSP0 (0x1 << 16)
+#define SST_IMRD_DMAC0 (0x1 << 21)
+#define SST_IMRD_DMAC1 (0x1 << 22)
+#define SST_IMRD_DMAC (SST_IMRD_DMAC0 | SST_IMRD_DMAC1)
+
+/* IPCX / IPCC */
+#define SST_IPCX_DONE (0x1 << 30)
+#define SST_IPCX_BUSY (0x1 << 31)
+#define SST_BYT_IPCX_DONE ((u64)0x1 << 62)
+#define SST_BYT_IPCX_BUSY ((u64)0x1 << 63)
+
+/* IPCD */
+#define SST_IPCD_DONE (0x1 << 30)
+#define SST_IPCD_BUSY (0x1 << 31)
+#define SST_BYT_IPCD_DONE ((u64)0x1 << 62)
+#define SST_BYT_IPCD_BUSY ((u64)0x1 << 63)
+
+/* CLKCTL */
+#define SST_CLKCTL_SMOS(x) (x << 24)
+#define SST_CLKCTL_MASK (3 << 24)
+#define SST_CLKCTL_DCPLCG (1 << 18)
+#define SST_CLKCTL_SCOE1 (1 << 17)
+#define SST_CLKCTL_SCOE0 (1 << 16)
+
+/* CSR2 / CS2 */
+#define SST_CSR2_SDFD_SSP0 (1 << 1)
+#define SST_CSR2_SDFD_SSP1 (1 << 2)
+
+/* LTRC */
+#define SST_LTRC_VAL(x) (x << 0)
+
+/* HMDC */
+#define SST_HMDC_HDDA0(x) (x << 0)
+#define SST_HMDC_HDDA1(x) (x << 7)
+#define SST_HMDC_HDDA_E0_CH0 1
+#define SST_HMDC_HDDA_E0_CH1 2
+#define SST_HMDC_HDDA_E0_CH2 4
+#define SST_HMDC_HDDA_E0_CH3 8
+#define SST_HMDC_HDDA_E1_CH0 SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH0)
+#define SST_HMDC_HDDA_E1_CH1 SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH1)
+#define SST_HMDC_HDDA_E1_CH2 SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH2)
+#define SST_HMDC_HDDA_E1_CH3 SST_HMDC_HDDA1(SST_HMDC_HDDA_E0_CH3)
+#define SST_HMDC_HDDA_E0_ALLCH (SST_HMDC_HDDA_E0_CH0 | SST_HMDC_HDDA_E0_CH1 | \
+ SST_HMDC_HDDA_E0_CH2 | SST_HMDC_HDDA_E0_CH3)
+#define SST_HMDC_HDDA_E1_ALLCH (SST_HMDC_HDDA_E1_CH0 | SST_HMDC_HDDA_E1_CH1 | \
+ SST_HMDC_HDDA_E1_CH2 | SST_HMDC_HDDA_E1_CH3)
+
+
+/* SST Vendor Defined Registers and bits */
+#define SST_VDRTCTL0 0xa0
+#define SST_VDRTCTL1 0xa4
+#define SST_VDRTCTL2 0xa8
+#define SST_VDRTCTL3 0xaC
+
+/* VDRTCTL0 */
+#define SST_VDRTCL0_D3PGD (1 << 0)
+#define SST_VDRTCL0_D3SRAMPGD (1 << 1)
+#define SST_VDRTCL0_DSRAMPGE_SHIFT 12
+#define SST_VDRTCL0_DSRAMPGE_MASK (0xfffff << SST_VDRTCL0_DSRAMPGE_SHIFT)
+#define SST_VDRTCL0_ISRAMPGE_SHIFT 2
+#define SST_VDRTCL0_ISRAMPGE_MASK (0x3ff << SST_VDRTCL0_ISRAMPGE_SHIFT)
+
+/* VDRTCTL2 */
+#define SST_VDRTCL2_DCLCGE (1 << 1)
+#define SST_VDRTCL2_DTCGE (1 << 10)
+#define SST_VDRTCL2_APLLSE_MASK (1 << 31)
+
+/* PMCS */
+#define SST_PMCS 0x84
+#define SST_PMCS_PS_MASK 0x3
+
+struct sst_dsp;
+
+/*
+ * SST Device.
+ *
+ * This structure is populated by the SST core driver.
+ */
+struct sst_dsp_device {
+ /* Mandatory fields */
+ struct sst_ops *ops;
+ irqreturn_t (*thread)(int irq, void *context);
+ void *thread_context;
+};
+
+/*
+ * SST Platform Data.
+ */
+struct sst_pdata {
+ /* ACPI data */
+ u32 lpe_base;
+ u32 lpe_size;
+ u32 pcicfg_base;
+ u32 pcicfg_size;
+ u32 fw_base;
+ u32 fw_size;
+ int irq;
+
+ /* Firmware */
+ const struct firmware *fw;
+
+ /* DMA */
+ int resindex_dma_base; /* other fields invalid if equals to -1 */
+ u32 dma_base;
+ u32 dma_size;
+ int dma_engine;
+ struct device *dma_dev;
+
+ /* DSP */
+ u32 id;
+ void *dsp;
+};
+
+/* Initialization */
+struct sst_dsp *sst_dsp_new(struct device *dev,
+ struct sst_dsp_device *sst_dev, struct sst_pdata *pdata);
+void sst_dsp_free(struct sst_dsp *sst);
+
+/* SHIM Read / Write */
+void sst_dsp_shim_write(struct sst_dsp *sst, u32 offset, u32 value);
+u32 sst_dsp_shim_read(struct sst_dsp *sst, u32 offset);
+int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
+ u32 mask, u32 value);
+void sst_dsp_shim_write64(struct sst_dsp *sst, u32 offset, u64 value);
+u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
+int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
+ u64 mask, u64 value);
+
+/* SHIM Read / Write Unlocked for callers already holding sst lock */
+void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
+u32 sst_dsp_shim_read_unlocked(struct sst_dsp *sst, u32 offset);
+int sst_dsp_shim_update_bits_unlocked(struct sst_dsp *sst, u32 offset,
+ u32 mask, u32 value);
+void sst_dsp_shim_write64_unlocked(struct sst_dsp *sst, u32 offset, u64 value);
+u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
+int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
+ u64 mask, u64 value);
+
+/* Internal generic low-level SST IO functions - can be overidden */
+void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
+u32 sst_shim32_read(void __iomem *addr, u32 offset);
+void sst_shim32_write64(void __iomem *addr, u32 offset, u64 value);
+u64 sst_shim32_read64(void __iomem *addr, u32 offset);
+void sst_memcpy_toio_32(struct sst_dsp *sst,
+ void __iomem *dest, void *src, size_t bytes);
+void sst_memcpy_fromio_32(struct sst_dsp *sst,
+ void *dest, void __iomem *src, size_t bytes);
+
+/* DSP reset & boot */
+void sst_dsp_reset(struct sst_dsp *sst);
+int sst_dsp_boot(struct sst_dsp *sst);
+int sst_dsp_wake(struct sst_dsp *sst);
+void sst_dsp_sleep(struct sst_dsp *sst);
+void sst_dsp_stall(struct sst_dsp *sst);
+
+/* DMA */
+int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id);
+void sst_dsp_dma_put_channel(struct sst_dsp *dsp);
+int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size);
+int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size);
+
+/* Msg IO */
+void sst_dsp_ipc_msg_tx(struct sst_dsp *dsp, u32 msg);
+u32 sst_dsp_ipc_msg_rx(struct sst_dsp *dsp);
+
+/* Mailbox management */
+int sst_dsp_mailbox_init(struct sst_dsp *dsp, u32 inbox_offset,
+ size_t inbox_size, u32 outbox_offset, size_t outbox_size);
+void sst_dsp_inbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
+void sst_dsp_inbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
+void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
+void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
+void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
+
+/* Debug */
+void sst_dsp_dump(struct sst_dsp *sst);
+
+#endif
diff --git a/kernel/sound/soc/intel/common/sst-firmware.c b/kernel/sound/soc/intel/common/sst-firmware.c
new file mode 100644
index 000000000..ebcca6dc4
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-firmware.c
@@ -0,0 +1,1205 @@
+/*
+ * Intel SST Firmware Loader
+ *
+ * Copyright (C) 2013, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+
+/* supported DMA engine drivers */
+#include <linux/platform_data/dma-dw.h>
+#include <linux/dma/dw.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include "sst-dsp.h"
+#include "sst-dsp-priv.h"
+
+#define SST_DMA_RESOURCES 2
+#define SST_DSP_DMA_MAX_BURST 0x3
+#define SST_HSW_BLOCK_ANY 0xffffffff
+
+#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
+
+struct sst_dma {
+ struct sst_dsp *sst;
+
+ struct dw_dma_chip *chip;
+
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *ch;
+};
+
+static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
+{
+ /* __iowrite32_copy use 32bit size values so divide by 4 */
+ __iowrite32_copy((void *)dest, src, bytes/4);
+}
+
+static void sst_dma_transfer_complete(void *arg)
+{
+ struct sst_dsp *sst = (struct sst_dsp *)arg;
+
+ dev_dbg(sst->dev, "DMA: callback\n");
+}
+
+static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct sst_dma *dma = sst->dma;
+
+ if (dma->ch == NULL) {
+ dev_err(sst->dev, "error: no DMA channel\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
+ (unsigned long)src_addr, (unsigned long)dest_addr, size);
+
+ desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
+ src_addr, size, DMA_CTRL_ACK);
+ if (!desc){
+ dev_err(sst->dev, "error: dma prep memcpy failed\n");
+ return -EINVAL;
+ }
+
+ desc->callback = sst_dma_transfer_complete;
+ desc->callback_param = sst;
+
+ desc->tx_submit(desc);
+ dma_wait_for_async_tx(desc);
+
+ return 0;
+}
+
+/* copy to DSP */
+int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
+ src_addr, size);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
+
+/* copy from DSP */
+int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ return sst_dsp_dma_copy(sst, dest_addr,
+ src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
+
+/* remove module from memory - callers hold locks */
+static void block_list_remove(struct sst_dsp *dsp,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block, *tmp;
+ int err;
+
+ /* disable each block */
+ list_for_each_entry(block, block_list, module_list) {
+
+ if (block->ops && block->ops->disable) {
+ err = block->ops->disable(block);
+ if (err < 0)
+ dev_err(dsp->dev,
+ "error: cant disable block %d:%d\n",
+ block->type, block->index);
+ }
+ }
+
+ /* mark each block as free */
+ list_for_each_entry_safe(block, tmp, block_list, module_list) {
+ list_del(&block->module_list);
+ list_move(&block->list, &dsp->free_block_list);
+ dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ }
+}
+
+/* prepare the memory block to receive data from host - callers hold locks */
+static int block_list_prepare(struct sst_dsp *dsp,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block;
+ int ret = 0;
+
+ /* enable each block so that's it'e ready for data */
+ list_for_each_entry(block, block_list, module_list) {
+
+ if (block->ops && block->ops->enable && !block->users) {
+ ret = block->ops->enable(block);
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: cant disable block %d:%d\n",
+ block->type, block->index);
+ goto err;
+ }
+ }
+ }
+ return ret;
+
+err:
+ list_for_each_entry(block, block_list, module_list) {
+ if (block->ops && block->ops->disable)
+ block->ops->disable(block);
+ }
+ return ret;
+}
+
+static struct dw_dma_platform_data dw_pdata = {
+ .is_private = 1,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+};
+
+static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
+ int irq)
+{
+ struct dw_dma_chip *chip;
+ int err;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ chip->irq = irq;
+ chip->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(chip->regs))
+ return ERR_CAST(chip->regs);
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
+ if (err)
+ return ERR_PTR(err);
+
+ chip->dev = dev;
+ err = dw_dma_probe(chip, &dw_pdata);
+ if (err)
+ return ERR_PTR(err);
+
+ return chip;
+}
+
+static void dw_remove(struct dw_dma_chip *chip)
+{
+ dw_dma_remove(chip);
+}
+
+static bool dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct sst_dsp *dsp = (struct sst_dsp *)param;
+
+ return chan->device->dev == dsp->dma_dev;
+}
+
+int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
+{
+ struct sst_dma *dma = dsp->dma;
+ struct dma_slave_config slave;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
+ if (dma->ch == NULL) {
+ dev_err(dsp->dev, "error: DMA request channel failed\n");
+ return -EIO;
+ }
+
+ memset(&slave, 0, sizeof(slave));
+ slave.direction = DMA_MEM_TO_DEV;
+ slave.src_addr_width =
+ slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
+
+ ret = dmaengine_slave_config(dma->ch, &slave);
+ if (ret) {
+ dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
+ ret);
+ dma_release_channel(dma->ch);
+ dma->ch = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
+
+void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
+{
+ struct sst_dma *dma = dsp->dma;
+
+ if (!dma->ch)
+ return;
+
+ dma_release_channel(dma->ch);
+ dma->ch = NULL;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
+
+int sst_dma_new(struct sst_dsp *sst)
+{
+ struct sst_pdata *sst_pdata = sst->pdata;
+ struct sst_dma *dma;
+ struct resource mem;
+ const char *dma_dev_name;
+ int ret = 0;
+
+ if (sst->pdata->resindex_dma_base == -1)
+ /* DMA is not used, return and squelsh error messages */
+ return 0;
+
+ /* configure the correct platform data for whatever DMA engine
+ * is attached to the ADSP IP. */
+ switch (sst->pdata->dma_engine) {
+ case SST_DMA_TYPE_DW:
+ dma_dev_name = "dw_dmac";
+ break;
+ default:
+ dev_err(sst->dev, "error: invalid DMA engine %d\n",
+ sst->pdata->dma_engine);
+ return -EINVAL;
+ }
+
+ dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->sst = sst;
+
+ memset(&mem, 0, sizeof(mem));
+
+ mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
+ mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
+ mem.flags = IORESOURCE_MEM;
+
+ /* now register DMA engine device */
+ dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
+ if (IS_ERR(dma->chip)) {
+ dev_err(sst->dev, "error: DMA device register failed\n");
+ ret = PTR_ERR(dma->chip);
+ goto err_dma_dev;
+ }
+
+ sst->dma = dma;
+ sst->fw_use_dma = true;
+ return 0;
+
+err_dma_dev:
+ devm_kfree(sst->dev, dma);
+ return ret;
+}
+EXPORT_SYMBOL(sst_dma_new);
+
+void sst_dma_free(struct sst_dma *dma)
+{
+
+ if (dma == NULL)
+ return;
+
+ if (dma->ch)
+ dma_release_channel(dma->ch);
+
+ if (dma->chip)
+ dw_remove(dma->chip);
+
+}
+EXPORT_SYMBOL(sst_dma_free);
+
+/* create new generic firmware object */
+struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
+ const struct firmware *fw, void *private)
+{
+ struct sst_fw *sst_fw;
+ int err;
+
+ if (!dsp->ops->parse_fw)
+ return NULL;
+
+ sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
+ if (sst_fw == NULL)
+ return NULL;
+
+ sst_fw->dsp = dsp;
+ sst_fw->private = private;
+ sst_fw->size = fw->size;
+
+ /* allocate DMA buffer to store FW data */
+ sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
+ &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
+ if (!sst_fw->dma_buf) {
+ dev_err(dsp->dev, "error: DMA alloc failed\n");
+ kfree(sst_fw);
+ return NULL;
+ }
+
+ /* copy FW data to DMA-able memory */
+ memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
+
+ if (dsp->fw_use_dma) {
+ err = sst_dsp_dma_get_channel(dsp, 0);
+ if (err < 0)
+ goto chan_err;
+ }
+
+ /* call core specific FW paser to load FW data into DSP */
+ err = dsp->ops->parse_fw(sst_fw);
+ if (err < 0) {
+ dev_err(dsp->dev, "error: parse fw failed %d\n", err);
+ goto parse_err;
+ }
+
+ if (dsp->fw_use_dma)
+ sst_dsp_dma_put_channel(dsp);
+
+ mutex_lock(&dsp->mutex);
+ list_add(&sst_fw->list, &dsp->fw_list);
+ mutex_unlock(&dsp->mutex);
+
+ return sst_fw;
+
+parse_err:
+ if (dsp->fw_use_dma)
+ sst_dsp_dma_put_channel(dsp);
+chan_err:
+ dma_free_coherent(dsp->dma_dev, sst_fw->size,
+ sst_fw->dma_buf,
+ sst_fw->dmable_fw_paddr);
+ sst_fw->dma_buf = NULL;
+ kfree(sst_fw);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_fw_new);
+
+int sst_fw_reload(struct sst_fw *sst_fw)
+{
+ struct sst_dsp *dsp = sst_fw->dsp;
+ int ret;
+
+ dev_dbg(dsp->dev, "reloading firmware\n");
+
+ /* call core specific FW paser to load FW data into DSP */
+ ret = dsp->ops->parse_fw(sst_fw);
+ if (ret < 0)
+ dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_fw_reload);
+
+void sst_fw_unload(struct sst_fw *sst_fw)
+{
+ struct sst_dsp *dsp = sst_fw->dsp;
+ struct sst_module *module, *mtmp;
+ struct sst_module_runtime *runtime, *rtmp;
+
+ dev_dbg(dsp->dev, "unloading firmware\n");
+
+ mutex_lock(&dsp->mutex);
+
+ /* check module by module */
+ list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
+ if (module->sst_fw == sst_fw) {
+
+ /* remove runtime modules */
+ list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
+
+ block_list_remove(dsp, &runtime->block_list);
+ list_del(&runtime->list);
+ kfree(runtime);
+ }
+
+ /* now remove the module */
+ block_list_remove(dsp, &module->block_list);
+ list_del(&module->list);
+ kfree(module);
+ }
+ }
+
+ /* remove all scratch blocks */
+ block_list_remove(dsp, &dsp->scratch_block_list);
+
+ mutex_unlock(&dsp->mutex);
+}
+EXPORT_SYMBOL_GPL(sst_fw_unload);
+
+/* free single firmware object */
+void sst_fw_free(struct sst_fw *sst_fw)
+{
+ struct sst_dsp *dsp = sst_fw->dsp;
+
+ mutex_lock(&dsp->mutex);
+ list_del(&sst_fw->list);
+ mutex_unlock(&dsp->mutex);
+
+ if (sst_fw->dma_buf)
+ dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
+ sst_fw->dmable_fw_paddr);
+ kfree(sst_fw);
+}
+EXPORT_SYMBOL_GPL(sst_fw_free);
+
+/* free all firmware objects */
+void sst_fw_free_all(struct sst_dsp *dsp)
+{
+ struct sst_fw *sst_fw, *t;
+
+ mutex_lock(&dsp->mutex);
+ list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
+
+ list_del(&sst_fw->list);
+ dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
+ sst_fw->dmable_fw_paddr);
+ kfree(sst_fw);
+ }
+ mutex_unlock(&dsp->mutex);
+}
+EXPORT_SYMBOL_GPL(sst_fw_free_all);
+
+/* create a new SST generic module from FW template */
+struct sst_module *sst_module_new(struct sst_fw *sst_fw,
+ struct sst_module_template *template, void *private)
+{
+ struct sst_dsp *dsp = sst_fw->dsp;
+ struct sst_module *sst_module;
+
+ sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
+ if (sst_module == NULL)
+ return NULL;
+
+ sst_module->id = template->id;
+ sst_module->dsp = dsp;
+ sst_module->sst_fw = sst_fw;
+ sst_module->scratch_size = template->scratch_size;
+ sst_module->persistent_size = template->persistent_size;
+ sst_module->entry = template->entry;
+ sst_module->state = SST_MODULE_STATE_UNLOADED;
+
+ INIT_LIST_HEAD(&sst_module->block_list);
+ INIT_LIST_HEAD(&sst_module->runtime_list);
+
+ mutex_lock(&dsp->mutex);
+ list_add(&sst_module->list, &dsp->module_list);
+ mutex_unlock(&dsp->mutex);
+
+ return sst_module;
+}
+EXPORT_SYMBOL_GPL(sst_module_new);
+
+/* free firmware module and remove from available list */
+void sst_module_free(struct sst_module *sst_module)
+{
+ struct sst_dsp *dsp = sst_module->dsp;
+
+ mutex_lock(&dsp->mutex);
+ list_del(&sst_module->list);
+ mutex_unlock(&dsp->mutex);
+
+ kfree(sst_module);
+}
+EXPORT_SYMBOL_GPL(sst_module_free);
+
+struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
+ int id, void *private)
+{
+ struct sst_dsp *dsp = module->dsp;
+ struct sst_module_runtime *runtime;
+
+ runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+ if (runtime == NULL)
+ return NULL;
+
+ runtime->id = id;
+ runtime->dsp = dsp;
+ runtime->module = module;
+ INIT_LIST_HEAD(&runtime->block_list);
+
+ mutex_lock(&dsp->mutex);
+ list_add(&runtime->list, &module->runtime_list);
+ mutex_unlock(&dsp->mutex);
+
+ return runtime;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_new);
+
+void sst_module_runtime_free(struct sst_module_runtime *runtime)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+
+ mutex_lock(&dsp->mutex);
+ list_del(&runtime->list);
+ mutex_unlock(&dsp->mutex);
+
+ kfree(runtime);
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_free);
+
+static struct sst_mem_block *find_block(struct sst_dsp *dsp,
+ struct sst_block_allocator *ba)
+{
+ struct sst_mem_block *block;
+
+ list_for_each_entry(block, &dsp->free_block_list, list) {
+ if (block->type == ba->type && block->offset == ba->offset)
+ return block;
+ }
+
+ return NULL;
+}
+
+/* Block allocator must be on block boundary */
+static int block_alloc_contiguous(struct sst_dsp *dsp,
+ struct sst_block_allocator *ba, struct list_head *block_list)
+{
+ struct list_head tmp = LIST_HEAD_INIT(tmp);
+ struct sst_mem_block *block;
+ u32 block_start = SST_HSW_BLOCK_ANY;
+ int size = ba->size, offset = ba->offset;
+
+ while (ba->size > 0) {
+
+ block = find_block(dsp, ba);
+ if (!block) {
+ list_splice(&tmp, &dsp->free_block_list);
+
+ ba->size = size;
+ ba->offset = offset;
+ return -ENOMEM;
+ }
+
+ list_move_tail(&block->list, &tmp);
+ ba->offset += block->size;
+ ba->size -= block->size;
+ }
+ ba->size = size;
+ ba->offset = offset;
+
+ list_for_each_entry(block, &tmp, list) {
+
+ if (block->offset < block_start)
+ block_start = block->offset;
+
+ list_add(&block->module_list, block_list);
+
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ }
+
+ list_splice(&tmp, &dsp->used_block_list);
+ return 0;
+}
+
+/* allocate first free DSP blocks for data - callers hold locks */
+static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block, *tmp;
+ int ret = 0;
+
+ if (ba->size == 0)
+ return 0;
+
+ /* find first free whole blocks that can hold module */
+ list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
+ continue;
+
+ if (ba->size > block->size)
+ continue;
+
+ ba->offset = block->offset;
+ block->bytes_used = ba->size % block->size;
+ list_add(&block->module_list, block_list);
+ list_move(&block->list, &dsp->used_block_list);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ return 0;
+ }
+
+ /* then find free multiple blocks that can hold module */
+ list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
+ continue;
+
+ /* do we span > 1 blocks */
+ if (ba->size > block->size) {
+
+ /* align ba to block boundary */
+ ba->offset = block->offset;
+
+ ret = block_alloc_contiguous(dsp, ba, block_list);
+ if (ret == 0)
+ return ret;
+
+ }
+ }
+
+ /* not enough free block space */
+ return -ENOMEM;
+}
+
+int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
+{
+ int ret;
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+ ba->size, ba->offset, ba->type);
+
+ mutex_lock(&dsp->mutex);
+
+ ret = block_alloc(dsp, ba, block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
+ goto out;
+ }
+
+ /* prepare DSP blocks for module usage */
+ ret = block_list_prepare(dsp, block_list);
+ if (ret < 0)
+ dev_err(dsp->dev, "error: prepare failed\n");
+
+out:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_alloc_blocks);
+
+int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
+{
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_free_blocks);
+
+/* allocate memory blocks for static module addresses - callers hold locks */
+static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block, *tmp;
+ struct sst_block_allocator ba_tmp = *ba;
+ u32 end = ba->offset + ba->size, block_end;
+ int err;
+
+ /* only IRAM/DRAM blocks are managed */
+ if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
+ return 0;
+
+ /* are blocks already attached to this module */
+ list_for_each_entry_safe(block, tmp, block_list, module_list) {
+
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
+ continue;
+
+ block_end = block->offset + block->size;
+
+ /* find block that holds section */
+ if (ba->offset >= block->offset && end <= block_end)
+ return 0;
+
+ /* does block span more than 1 section */
+ if (ba->offset >= block->offset && ba->offset < block_end) {
+
+ /* align ba to block boundary */
+ ba_tmp.size -= block_end - ba->offset;
+ ba_tmp.offset = block_end;
+ err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
+ if (err < 0)
+ return -ENOMEM;
+
+ /* module already owns blocks */
+ return 0;
+ }
+ }
+
+ /* find first free blocks that can hold section in free list */
+ list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+ block_end = block->offset + block->size;
+
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
+ continue;
+
+ /* find block that holds section */
+ if (ba->offset >= block->offset && end <= block_end) {
+
+ /* add block */
+ list_move(&block->list, &dsp->used_block_list);
+ list_add(&block->module_list, block_list);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ return 0;
+ }
+
+ /* does block span more than 1 section */
+ if (ba->offset >= block->offset && ba->offset < block_end) {
+
+ /* add block */
+ list_move(&block->list, &dsp->used_block_list);
+ list_add(&block->module_list, block_list);
+ /* align ba to block boundary */
+ ba_tmp.size -= block_end - ba->offset;
+ ba_tmp.offset = block_end;
+
+ err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
+ if (err < 0)
+ return -ENOMEM;
+
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+/* Load fixed module data into DSP memory blocks */
+int sst_module_alloc_blocks(struct sst_module *module)
+{
+ struct sst_dsp *dsp = module->dsp;
+ struct sst_fw *sst_fw = module->sst_fw;
+ struct sst_block_allocator ba;
+ int ret;
+
+ memset(&ba, 0, sizeof(ba));
+ ba.size = module->size;
+ ba.type = module->type;
+ ba.offset = module->offset;
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+ ba.size, ba.offset, ba.type);
+
+ mutex_lock(&dsp->mutex);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &module->block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: no free blocks for section at offset 0x%x size 0x%x\n",
+ module->offset, module->size);
+ mutex_unlock(&dsp->mutex);
+ return -ENOMEM;
+ }
+
+ /* prepare DSP blocks for module copy */
+ ret = block_list_prepare(dsp, &module->block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: fw module prepare failed\n");
+ goto err;
+ }
+
+ /* copy partial module data to blocks */
+ if (dsp->fw_use_dma) {
+ ret = sst_dsp_dma_copyto(dsp,
+ dsp->addr.lpe_base + module->offset,
+ sst_fw->dmable_fw_paddr + module->data_offset,
+ module->size);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: module copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
+ module->size);
+
+ mutex_unlock(&dsp->mutex);
+ return ret;
+
+err:
+ block_list_remove(dsp, &module->block_list);
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
+
+/* Unload entire module from DSP memory */
+int sst_module_free_blocks(struct sst_module *module)
+{
+ struct sst_dsp *dsp = module->dsp;
+
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, &module->block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_module_free_blocks);
+
+int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
+ int offset)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ struct sst_block_allocator ba;
+ int ret;
+
+ if (module->persistent_size == 0)
+ return 0;
+
+ memset(&ba, 0, sizeof(ba));
+ ba.size = module->persistent_size;
+ ba.type = SST_MEM_DRAM;
+
+ mutex_lock(&dsp->mutex);
+
+ /* do we need to allocate at a fixed address ? */
+ if (offset != 0) {
+
+ ba.offset = offset;
+
+ dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
+ ba.size, ba.type, ba.offset);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
+
+ } else {
+ dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
+ ba.size, ba.type);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc(dsp, &ba, &runtime->block_list);
+ }
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: no free blocks for runtime module size 0x%x\n",
+ module->persistent_size);
+ mutex_unlock(&dsp->mutex);
+ return -ENOMEM;
+ }
+ runtime->persistent_offset = ba.offset;
+
+ /* prepare DSP blocks for module copy */
+ ret = block_list_prepare(dsp, &runtime->block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: runtime block prepare failed\n");
+ goto err;
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return ret;
+
+err:
+ block_list_remove(dsp, &module->block_list);
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
+
+int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, &runtime->block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
+
+int sst_module_runtime_save(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ int ret = 0;
+
+ dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
+ runtime->id, runtime->persistent_offset,
+ module->persistent_size);
+
+ context->buffer = dma_alloc_coherent(dsp->dma_dev,
+ module->persistent_size,
+ &context->dma_buffer, GFP_DMA | GFP_KERNEL);
+ if (!context->buffer) {
+ dev_err(dsp->dev, "error: DMA context alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dsp->mutex);
+
+ if (dsp->fw_use_dma) {
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
+ dsp->addr.lpe_base + runtime->persistent_offset,
+ module->persistent_size);
+ sst_dsp_dma_put_channel(dsp);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: context copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(context->buffer, dsp->addr.lpe +
+ runtime->persistent_offset,
+ module->persistent_size);
+
+err:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_save);
+
+int sst_module_runtime_restore(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ int ret = 0;
+
+ dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
+ runtime->id, runtime->persistent_offset,
+ module->persistent_size);
+
+ mutex_lock(&dsp->mutex);
+
+ if (!context->buffer) {
+ dev_info(dsp->dev, "no context buffer need to restore!\n");
+ goto err;
+ }
+
+ if (dsp->fw_use_dma) {
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = sst_dsp_dma_copyto(dsp,
+ dsp->addr.lpe_base + runtime->persistent_offset,
+ context->dma_buffer, module->persistent_size);
+ sst_dsp_dma_put_channel(dsp);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: module copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
+ context->buffer, module->persistent_size);
+
+ dma_free_coherent(dsp->dma_dev, module->persistent_size,
+ context->buffer, context->dma_buffer);
+ context->buffer = NULL;
+
+err:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
+
+/* register a DSP memory block for use with FW based modules */
+struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
+ u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
+ void *private)
+{
+ struct sst_mem_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (block == NULL)
+ return NULL;
+
+ block->offset = offset;
+ block->size = size;
+ block->index = index;
+ block->type = type;
+ block->dsp = dsp;
+ block->private = private;
+ block->ops = ops;
+
+ mutex_lock(&dsp->mutex);
+ list_add(&block->list, &dsp->free_block_list);
+ mutex_unlock(&dsp->mutex);
+
+ return block;
+}
+EXPORT_SYMBOL_GPL(sst_mem_block_register);
+
+/* unregister all DSP memory blocks */
+void sst_mem_block_unregister_all(struct sst_dsp *dsp)
+{
+ struct sst_mem_block *block, *tmp;
+
+ mutex_lock(&dsp->mutex);
+
+ /* unregister used blocks */
+ list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
+ list_del(&block->list);
+ kfree(block);
+ }
+
+ /* unregister free blocks */
+ list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
+ list_del(&block->list);
+ kfree(block);
+ }
+
+ mutex_unlock(&dsp->mutex);
+}
+EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
+
+/* allocate scratch buffer blocks */
+int sst_block_alloc_scratch(struct sst_dsp *dsp)
+{
+ struct sst_module *module;
+ struct sst_block_allocator ba;
+ int ret;
+
+ mutex_lock(&dsp->mutex);
+
+ /* calculate required scratch size */
+ dsp->scratch_size = 0;
+ list_for_each_entry(module, &dsp->module_list, list) {
+ dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
+ module->id, module->scratch_size);
+ if (dsp->scratch_size < module->scratch_size)
+ dsp->scratch_size = module->scratch_size;
+ }
+
+ dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
+ dsp->scratch_size);
+
+ if (dsp->scratch_size == 0) {
+ dev_info(dsp->dev, "no modules need scratch buffer\n");
+ mutex_unlock(&dsp->mutex);
+ return 0;
+ }
+
+ /* allocate blocks for module scratch buffers */
+ dev_dbg(dsp->dev, "allocating scratch blocks\n");
+
+ ba.size = dsp->scratch_size;
+ ba.type = SST_MEM_DRAM;
+
+ /* do we need to allocate at fixed offset */
+ if (dsp->scratch_offset != 0) {
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
+ ba.size, ba.type, ba.offset);
+
+ ba.offset = dsp->scratch_offset;
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
+
+ } else {
+ dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
+ ba.size, ba.type);
+
+ ba.offset = 0;
+ ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
+ }
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
+ mutex_unlock(&dsp->mutex);
+ return ret;
+ }
+
+ ret = block_list_prepare(dsp, &dsp->scratch_block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: scratch block prepare failed\n");
+ mutex_unlock(&dsp->mutex);
+ return ret;
+ }
+
+ /* assign the same offset of scratch to each module */
+ dsp->scratch_offset = ba.offset;
+ mutex_unlock(&dsp->mutex);
+ return dsp->scratch_size;
+}
+EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
+
+/* free all scratch blocks */
+void sst_block_free_scratch(struct sst_dsp *dsp)
+{
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, &dsp->scratch_block_list);
+ mutex_unlock(&dsp->mutex);
+}
+EXPORT_SYMBOL_GPL(sst_block_free_scratch);
+
+/* get a module from it's unique ID */
+struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
+{
+ struct sst_module *module;
+
+ mutex_lock(&dsp->mutex);
+
+ list_for_each_entry(module, &dsp->module_list, list) {
+ if (module->id == id) {
+ mutex_unlock(&dsp->mutex);
+ return module;
+ }
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_module_get_from_id);
+
+struct sst_module_runtime *sst_module_runtime_get_from_id(
+ struct sst_module *module, u32 id)
+{
+ struct sst_module_runtime *runtime;
+ struct sst_dsp *dsp = module->dsp;
+
+ mutex_lock(&dsp->mutex);
+
+ list_for_each_entry(runtime, &module->runtime_list, list) {
+ if (runtime->id == id) {
+ mutex_unlock(&dsp->mutex);
+ return runtime;
+ }
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
+
+/* returns block address in DSP address space */
+u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
+ enum sst_mem_type type)
+{
+ switch (type) {
+ case SST_MEM_IRAM:
+ return offset - dsp->addr.iram_offset +
+ dsp->addr.dsp_iram_offset;
+ case SST_MEM_DRAM:
+ return offset - dsp->addr.dram_offset +
+ dsp->addr.dsp_dram_offset;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
diff --git a/kernel/sound/soc/intel/common/sst-ipc.c b/kernel/sound/soc/intel/common/sst-ipc.c
new file mode 100644
index 000000000..4b62a5538
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-ipc.c
@@ -0,0 +1,294 @@
+/*
+ * Intel SST generic IPC Support
+ *
+ * Copyright (C) 2015, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/kthread.h>
+#include <sound/asound.h>
+
+#include "sst-dsp.h"
+#include "sst-dsp-priv.h"
+#include "sst-ipc.h"
+
+/* IPC message timeout (msecs) */
+#define IPC_TIMEOUT_MSECS 300
+
+#define IPC_EMPTY_LIST_SIZE 8
+
+/* locks held by caller */
+static struct ipc_message *msg_get_empty(struct sst_generic_ipc *ipc)
+{
+ struct ipc_message *msg = NULL;
+
+ if (!list_empty(&ipc->empty_list)) {
+ msg = list_first_entry(&ipc->empty_list, struct ipc_message,
+ list);
+ list_del(&msg->list);
+ }
+
+ return msg;
+}
+
+static int tx_wait_done(struct sst_generic_ipc *ipc,
+ struct ipc_message *msg, void *rx_data)
+{
+ unsigned long flags;
+ int ret;
+
+ /* wait for DSP completion (in all cases atm inc pending) */
+ ret = wait_event_timeout(msg->waitq, msg->complete,
+ msecs_to_jiffies(IPC_TIMEOUT_MSECS));
+
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+ if (ret == 0) {
+ if (ipc->ops.shim_dbg != NULL)
+ ipc->ops.shim_dbg(ipc, "message timeout");
+
+ list_del(&msg->list);
+ ret = -ETIMEDOUT;
+ } else {
+
+ /* copy the data returned from DSP */
+ if (msg->rx_size)
+ memcpy(rx_data, msg->rx_data, msg->rx_size);
+ ret = msg->errno;
+ }
+
+ list_add_tail(&msg->list, &ipc->empty_list);
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return ret;
+}
+
+static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data,
+ size_t rx_bytes, int wait)
+{
+ struct ipc_message *msg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+
+ msg = msg_get_empty(ipc);
+ if (msg == NULL) {
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return -EBUSY;
+ }
+
+ msg->header = header;
+ msg->tx_size = tx_bytes;
+ msg->rx_size = rx_bytes;
+ msg->wait = wait;
+ msg->errno = 0;
+ msg->pending = false;
+ msg->complete = false;
+
+ if ((tx_bytes) && (ipc->ops.tx_data_copy != NULL))
+ ipc->ops.tx_data_copy(msg, tx_data, tx_bytes);
+
+ list_add_tail(&msg->list, &ipc->tx_list);
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+
+ queue_kthread_work(&ipc->kworker, &ipc->kwork);
+
+ if (wait)
+ return tx_wait_done(ipc, msg, rx_data);
+ else
+ return 0;
+}
+
+static int msg_empty_list_init(struct sst_generic_ipc *ipc)
+{
+ int i;
+
+ ipc->msg = kzalloc(sizeof(struct ipc_message) *
+ IPC_EMPTY_LIST_SIZE, GFP_KERNEL);
+ if (ipc->msg == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
+ init_waitqueue_head(&ipc->msg[i].waitq);
+ list_add(&ipc->msg[i].list, &ipc->empty_list);
+ }
+
+ return 0;
+}
+
+static void ipc_tx_msgs(struct kthread_work *work)
+{
+ struct sst_generic_ipc *ipc =
+ container_of(work, struct sst_generic_ipc, kwork);
+ struct ipc_message *msg;
+ unsigned long flags;
+ u64 ipcx;
+
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+
+ if (list_empty(&ipc->tx_list) || ipc->pending) {
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return;
+ }
+
+ /* if the DSP is busy, we will TX messages after IRQ.
+ * also postpone if we are in the middle of procesing completion irq*/
+ ipcx = sst_dsp_shim_read_unlocked(ipc->dsp, SST_IPCX);
+ if (ipcx & (SST_IPCX_BUSY | SST_IPCX_DONE)) {
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+ return;
+ }
+
+ msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
+ list_move(&msg->list, &ipc->rx_list);
+
+ if (ipc->ops.tx_msg != NULL)
+ ipc->ops.tx_msg(ipc, msg);
+
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+}
+
+int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
+{
+ return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+ rx_data, rx_bytes, 1);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait);
+
+int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes)
+{
+ return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+ NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait);
+
+struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
+ u64 header)
+{
+ struct ipc_message *msg;
+ u64 mask;
+
+ if (ipc->ops.reply_msg_match != NULL)
+ header = ipc->ops.reply_msg_match(header, &mask);
+
+ if (list_empty(&ipc->rx_list)) {
+ dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
+ header);
+ return NULL;
+ }
+
+ list_for_each_entry(msg, &ipc->rx_list, list) {
+ if ((msg->header & mask) == header)
+ return msg;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_ipc_reply_find_msg);
+
+/* locks held by caller */
+void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc,
+ struct ipc_message *msg)
+{
+ msg->complete = true;
+
+ if (!msg->wait)
+ list_add_tail(&msg->list, &ipc->empty_list);
+ else
+ wake_up(&msg->waitq);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_msg_reply_complete);
+
+void sst_ipc_drop_all(struct sst_generic_ipc *ipc)
+{
+ struct ipc_message *msg, *tmp;
+ unsigned long flags;
+ int tx_drop_cnt = 0, rx_drop_cnt = 0;
+
+ /* drop all TX and Rx messages before we stall + reset DSP */
+ spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+
+ list_for_each_entry_safe(msg, tmp, &ipc->tx_list, list) {
+ list_move(&msg->list, &ipc->empty_list);
+ tx_drop_cnt++;
+ }
+
+ list_for_each_entry_safe(msg, tmp, &ipc->rx_list, list) {
+ list_move(&msg->list, &ipc->empty_list);
+ rx_drop_cnt++;
+ }
+
+ spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+
+ if (tx_drop_cnt || rx_drop_cnt)
+ dev_err(ipc->dev, "dropped IPC msg RX=%d, TX=%d\n",
+ tx_drop_cnt, rx_drop_cnt);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_drop_all);
+
+int sst_ipc_init(struct sst_generic_ipc *ipc)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&ipc->tx_list);
+ INIT_LIST_HEAD(&ipc->rx_list);
+ INIT_LIST_HEAD(&ipc->empty_list);
+ init_waitqueue_head(&ipc->wait_txq);
+
+ ret = msg_empty_list_init(ipc);
+ if (ret < 0)
+ return -ENOMEM;
+
+ /* start the IPC message thread */
+ init_kthread_worker(&ipc->kworker);
+ ipc->tx_thread = kthread_run(kthread_worker_fn,
+ &ipc->kworker, "%s",
+ dev_name(ipc->dev));
+ if (IS_ERR(ipc->tx_thread)) {
+ dev_err(ipc->dev, "error: failed to create message TX task\n");
+ ret = PTR_ERR(ipc->tx_thread);
+ kfree(ipc->msg);
+ return ret;
+ }
+
+ init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_ipc_init);
+
+void sst_ipc_fini(struct sst_generic_ipc *ipc)
+{
+ if (ipc->tx_thread)
+ kthread_stop(ipc->tx_thread);
+
+ if (ipc->msg)
+ kfree(ipc->msg);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_fini);
+
+/* Module information */
+MODULE_AUTHOR("Jin Yao");
+MODULE_DESCRIPTION("Intel SST IPC generic");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/sound/soc/intel/common/sst-ipc.h b/kernel/sound/soc/intel/common/sst-ipc.h
new file mode 100644
index 000000000..125ea451a
--- /dev/null
+++ b/kernel/sound/soc/intel/common/sst-ipc.h
@@ -0,0 +1,91 @@
+/*
+ * Intel SST generic IPC Support
+ *
+ * Copyright (C) 2015, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SST_GENERIC_IPC_H
+#define __SST_GENERIC_IPC_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#define IPC_MAX_MAILBOX_BYTES 256
+
+struct ipc_message {
+ struct list_head list;
+ u64 header;
+
+ /* direction wrt host CPU */
+ char tx_data[IPC_MAX_MAILBOX_BYTES];
+ size_t tx_size;
+ char rx_data[IPC_MAX_MAILBOX_BYTES];
+ size_t rx_size;
+
+ wait_queue_head_t waitq;
+ bool pending;
+ bool complete;
+ bool wait;
+ int errno;
+};
+
+struct sst_generic_ipc;
+
+struct sst_plat_ipc_ops {
+ void (*tx_msg)(struct sst_generic_ipc *, struct ipc_message *);
+ void (*shim_dbg)(struct sst_generic_ipc *, const char *);
+ void (*tx_data_copy)(struct ipc_message *, char *, size_t);
+ u64 (*reply_msg_match)(u64 header, u64 *mask);
+};
+
+/* SST generic IPC data */
+struct sst_generic_ipc {
+ struct device *dev;
+ struct sst_dsp *dsp;
+
+ /* IPC messaging */
+ struct list_head tx_list;
+ struct list_head rx_list;
+ struct list_head empty_list;
+ wait_queue_head_t wait_txq;
+ struct task_struct *tx_thread;
+ struct kthread_worker kworker;
+ struct kthread_work kwork;
+ bool pending;
+ struct ipc_message *msg;
+
+ struct sst_plat_ipc_ops ops;
+};
+
+int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes);
+
+int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
+ void *tx_data, size_t tx_bytes);
+
+struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
+ u64 header);
+
+void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc,
+ struct ipc_message *msg);
+
+void sst_ipc_drop_all(struct sst_generic_ipc *ipc);
+int sst_ipc_init(struct sst_generic_ipc *ipc);
+void sst_ipc_fini(struct sst_generic_ipc *ipc);
+
+#endif