summaryrefslogtreecommitdiffstats
path: root/sound/soc/intel/sst-firmware.c
diff options
context:
space:
mode:
authorLiam Girdwood2014-10-28 18:37:12 +0100
committerMark Brown2014-10-28 23:25:02 +0100
commite9600bc166d529cf03862afae51fb2e3cf987d02 (patch)
treec85a52b6ebdd39c244a7881a320da540406559ff /sound/soc/intel/sst-firmware.c
parentASoC: Intel: mrfld: Define sst_res_info for acpi (diff)
downloadkernel-qcow2-linux-e9600bc166d529cf03862afae51fb2e3cf987d02.tar.gz
kernel-qcow2-linux-e9600bc166d529cf03862afae51fb2e3cf987d02.tar.xz
kernel-qcow2-linux-e9600bc166d529cf03862afae51fb2e3cf987d02.zip
ASoC: Intel: Make ADSP memory block allocation more generic
Current block allocation is tied to block type and requestor type. Make the allocation more generic by removing the struct module parameter and adding a generic block allocator structure. Also pass in the list that the blocks have to be added too in order to remove dependence on block requestor type. ASoC: Intel: update scratch allocator to use generic block allocator Update the scratch allocator to use the generic block allocator and calculate total scratch buffer size. ASoC: Intel: Add call to calculate offsets internally within the DSP. A call to calculate internal DSP memory addresses used to allocate persistent and scartch buffers. ASoC: Intel: Add runtime module support. Add support for runtime module objects that can be created for every FW module that is parsed from the FW file. This gives a 1:N mapping between the FW module from file and the runtime instantiations of that module. We also need to make sure we remove every module and runtime module when we unload the FW. ASoC: Intel: Add DMA firmware loading support Add support for DMA to load firmware modules to the DSP memory blocks. Two DMA engines are supported, DesignWare and Intel MID. ASoC: Intel: Add runtime module lookup API call Add an API to allow quick lookup of runtime modules based on ID. ASoC: Intel: Provide streams with dynamic module information Remove the hard coded module paramaters and provide each module with dynamically generated buffer information for scratch and persistent buffers. Signed-off-by: Liam Girdwood <liam.r.girdwood@linux.intel.com> Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'sound/soc/intel/sst-firmware.c')
-rw-r--r--sound/soc/intel/sst-firmware.c929
1 files changed, 757 insertions, 172 deletions
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index cf3d19997126..692a6aef82df 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -23,6 +23,11 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+
+/* supported DMA engine drivers */
+#include <linux/platform_data/dma-dw.h>
+#include <linux/dma/dw.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -30,7 +35,20 @@
#include "sst-dsp.h"
#include "sst-dsp-priv.h"
-static void block_module_remove(struct sst_module *module);
+#define SST_DMA_RESOURCES 2
+#define SST_DSP_DMA_MAX_BURST 0x3
+#define SST_HSW_BLOCK_ANY 0xffffffff
+
+#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
+
+struct sst_dma {
+ struct sst_dsp *sst;
+
+ struct dw_dma_chip *chip;
+
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *ch;
+};
static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
{
@@ -38,6 +56,281 @@ static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 byte
__iowrite32_copy((void *)dest, src, bytes/4);
}
+static void sst_dma_transfer_complete(void *arg)
+{
+ struct sst_dsp *sst = (struct sst_dsp *)arg;
+
+ dev_dbg(sst->dev, "DMA: callback\n");
+}
+
+static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct sst_dma *dma = sst->dma;
+
+ if (dma->ch == NULL) {
+ dev_err(sst->dev, "error: no DMA channel\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
+ (unsigned long)src_addr, (unsigned long)dest_addr, size);
+
+ desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
+ src_addr, size, DMA_CTRL_ACK);
+ if (!desc){
+ dev_err(sst->dev, "error: dma prep memcpy failed\n");
+ return -EINVAL;
+ }
+
+ desc->callback = sst_dma_transfer_complete;
+ desc->callback_param = sst;
+
+ desc->tx_submit(desc);
+ dma_wait_for_async_tx(desc);
+
+ return 0;
+}
+
+/* copy to DSP */
+int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
+ src_addr, size);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
+
+/* copy from DSP */
+int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
+ dma_addr_t src_addr, size_t size)
+{
+ return sst_dsp_dma_copy(sst, dest_addr,
+ src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
+
+/* remove module from memory - callers hold locks */
+static void block_list_remove(struct sst_dsp *dsp,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block, *tmp;
+ int err;
+
+ /* disable each block */
+ list_for_each_entry(block, block_list, module_list) {
+
+ if (block->ops && block->ops->disable) {
+ err = block->ops->disable(block);
+ if (err < 0)
+ dev_err(dsp->dev,
+ "error: cant disable block %d:%d\n",
+ block->type, block->index);
+ }
+ }
+
+ /* mark each block as free */
+ list_for_each_entry_safe(block, tmp, block_list, module_list) {
+ list_del(&block->module_list);
+ list_move(&block->list, &dsp->free_block_list);
+ dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ }
+}
+
+/* prepare the memory block to receive data from host - callers hold locks */
+static int block_list_prepare(struct sst_dsp *dsp,
+ struct list_head *block_list)
+{
+ struct sst_mem_block *block;
+ int ret = 0;
+
+ /* enable each block so that's it'e ready for data */
+ list_for_each_entry(block, block_list, module_list) {
+
+ if (block->ops && block->ops->enable) {
+ ret = block->ops->enable(block);
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: cant disable block %d:%d\n",
+ block->type, block->index);
+ goto err;
+ }
+ }
+ }
+ return ret;
+
+err:
+ list_for_each_entry(block, block_list, module_list) {
+ if (block->ops && block->ops->disable)
+ block->ops->disable(block);
+ }
+ return ret;
+}
+
+struct dw_dma_platform_data dw_pdata = {
+ .is_private = 1,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+};
+
+static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
+ int irq)
+{
+ struct dw_dma_chip *chip;
+ int err;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ chip->irq = irq;
+ chip->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(chip->regs))
+ return ERR_CAST(chip->regs);
+
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
+ if (err)
+ return ERR_PTR(err);
+
+ chip->dev = dev;
+ err = dw_dma_probe(chip, &dw_pdata);
+ if (err)
+ return ERR_PTR(err);
+
+ return chip;
+}
+
+static void dw_remove(struct dw_dma_chip *chip)
+{
+ dw_dma_remove(chip);
+}
+
+static bool dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct sst_dsp *dsp = (struct sst_dsp *)param;
+
+ return chan->device->dev == dsp->dma_dev;
+}
+
+int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
+{
+ struct sst_dma *dma = dsp->dma;
+ struct dma_slave_config slave;
+ dma_cap_mask_t mask;
+ int ret;
+
+ /* The Intel MID DMA engine driver needs the slave config set but
+ * Synopsis DMA engine driver safely ignores the slave config */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
+ if (dma->ch == NULL) {
+ dev_err(dsp->dev, "error: DMA request channel failed\n");
+ return -EIO;
+ }
+
+ memset(&slave, 0, sizeof(slave));
+ slave.direction = DMA_MEM_TO_DEV;
+ slave.src_addr_width =
+ slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
+
+ ret = dmaengine_slave_config(dma->ch, &slave);
+ if (ret) {
+ dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
+ ret);
+ dma_release_channel(dma->ch);
+ dma->ch = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
+
+void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
+{
+ struct sst_dma *dma = dsp->dma;
+
+ if (!dma->ch)
+ return;
+
+ dma_release_channel(dma->ch);
+ dma->ch = NULL;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
+
+int sst_dma_new(struct sst_dsp *sst)
+{
+ struct sst_pdata *sst_pdata = sst->pdata;
+ struct sst_dma *dma;
+ struct resource mem;
+ const char *dma_dev_name;
+ int ret = 0;
+
+ /* configure the correct platform data for whatever DMA engine
+ * is attached to the ADSP IP. */
+ switch (sst->pdata->dma_engine) {
+ case SST_DMA_TYPE_DW:
+ dma_dev_name = "dw_dmac";
+ break;
+ case SST_DMA_TYPE_MID:
+ dma_dev_name = "Intel MID DMA";
+ break;
+ default:
+ dev_err(sst->dev, "error: invalid DMA engine %d\n",
+ sst->pdata->dma_engine);
+ return -EINVAL;
+ }
+
+ dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->sst = sst;
+
+ memset(&mem, 0, sizeof(mem));
+
+ mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
+ mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
+ mem.flags = IORESOURCE_MEM;
+
+ /* now register DMA engine device */
+ dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
+ if (IS_ERR(dma->chip)) {
+ dev_err(sst->dev, "error: DMA device register failed\n");
+ ret = PTR_ERR(dma->chip);
+ goto err_dma_dev;
+ }
+
+ sst->dma = dma;
+ sst->fw_use_dma = true;
+ return 0;
+
+err_dma_dev:
+ devm_kfree(sst->dev, dma);
+ return ret;
+}
+EXPORT_SYMBOL(sst_dma_new);
+
+void sst_dma_free(struct sst_dma *dma)
+{
+
+ if (dma == NULL)
+ return;
+
+ if (dma->ch)
+ dma_release_channel(dma->ch);
+
+ if (dma->chip)
+ dw_remove(dma->chip);
+
+}
+EXPORT_SYMBOL(sst_dma_free);
+
/* create new generic firmware object */
struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
const struct firmware *fw, void *private)
@@ -68,6 +361,12 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
/* copy FW data to DMA-able memory */
memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
+ if (dsp->fw_use_dma) {
+ err = sst_dsp_dma_get_channel(dsp, 0);
+ if (err < 0)
+ goto chan_err;
+ }
+
/* call core specific FW paser to load FW data into DSP */
err = dsp->ops->parse_fw(sst_fw);
if (err < 0) {
@@ -75,6 +374,9 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
goto parse_err;
}
+ if (dsp->fw_use_dma)
+ sst_dsp_dma_put_channel(dsp);
+
mutex_lock(&dsp->mutex);
list_add(&sst_fw->list, &dsp->fw_list);
mutex_unlock(&dsp->mutex);
@@ -82,9 +384,13 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
return sst_fw;
parse_err:
- dma_free_coherent(dsp->dev, sst_fw->size,
+ if (dsp->fw_use_dma)
+ sst_dsp_dma_put_channel(dsp);
+chan_err:
+ dma_free_coherent(dsp->dma_dev, sst_fw->size,
sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
+ sst_fw->dma_buf = NULL;
kfree(sst_fw);
return NULL;
}
@@ -108,21 +414,37 @@ EXPORT_SYMBOL_GPL(sst_fw_reload);
void sst_fw_unload(struct sst_fw *sst_fw)
{
- struct sst_dsp *dsp = sst_fw->dsp;
- struct sst_module *module, *tmp;
+ struct sst_dsp *dsp = sst_fw->dsp;
+ struct sst_module *module, *mtmp;
+ struct sst_module_runtime *runtime, *rtmp;
+
+ dev_dbg(dsp->dev, "unloading firmware\n");
- dev_dbg(dsp->dev, "unloading firmware\n");
+ mutex_lock(&dsp->mutex);
+
+ /* check module by module */
+ list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
+ if (module->sst_fw == sst_fw) {
+
+ /* remove runtime modules */
+ list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
+
+ block_list_remove(dsp, &runtime->block_list);
+ list_del(&runtime->list);
+ kfree(runtime);
+ }
+
+ /* now remove the module */
+ block_list_remove(dsp, &module->block_list);
+ list_del(&module->list);
+ kfree(module);
+ }
+ }
- mutex_lock(&dsp->mutex);
- list_for_each_entry_safe(module, tmp, &dsp->module_list, list) {
- if (module->sst_fw == sst_fw) {
- block_module_remove(module);
- list_del(&module->list);
- kfree(module);
- }
- }
+ /* remove all scratch blocks */
+ block_list_remove(dsp, &dsp->scratch_block_list);
- mutex_unlock(&dsp->mutex);
+ mutex_unlock(&dsp->mutex);
}
EXPORT_SYMBOL_GPL(sst_fw_unload);
@@ -135,7 +457,8 @@ void sst_fw_free(struct sst_fw *sst_fw)
list_del(&sst_fw->list);
mutex_unlock(&dsp->mutex);
- dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
+ if (sst_fw->dma_buf)
+ dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
kfree(sst_fw);
}
@@ -172,11 +495,11 @@ struct sst_module *sst_module_new(struct sst_fw *sst_fw,
sst_module->id = template->id;
sst_module->dsp = dsp;
sst_module->sst_fw = sst_fw;
-
- memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
- memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
+ sst_module->scratch_size = template->scratch_size;
+ sst_module->persistent_size = template->persistent_size;
INIT_LIST_HEAD(&sst_module->block_list);
+ INIT_LIST_HEAD(&sst_module->runtime_list);
mutex_lock(&dsp->mutex);
list_add(&sst_module->list, &dsp->module_list);
@@ -199,73 +522,122 @@ void sst_module_free(struct sst_module *sst_module)
}
EXPORT_SYMBOL_GPL(sst_module_free);
-static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
- u32 offset)
+struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
+ int id, void *private)
+{
+ struct sst_dsp *dsp = module->dsp;
+ struct sst_module_runtime *runtime;
+
+ runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+ if (runtime == NULL)
+ return NULL;
+
+ runtime->id = id;
+ runtime->dsp = dsp;
+ runtime->module = module;
+ INIT_LIST_HEAD(&runtime->block_list);
+
+ mutex_lock(&dsp->mutex);
+ list_add(&runtime->list, &module->runtime_list);
+ mutex_unlock(&dsp->mutex);
+
+ return runtime;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_new);
+
+void sst_module_runtime_free(struct sst_module_runtime *runtime)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+
+ mutex_lock(&dsp->mutex);
+ list_del(&runtime->list);
+ mutex_unlock(&dsp->mutex);
+
+ kfree(runtime);
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_free);
+
+static struct sst_mem_block *find_block(struct sst_dsp *dsp,
+ struct sst_block_allocator *ba)
{
struct sst_mem_block *block;
list_for_each_entry(block, &dsp->free_block_list, list) {
- if (block->type == type && block->offset == offset)
+ if (block->type == ba->type && block->offset == ba->offset)
return block;
}
return NULL;
}
-static int block_alloc_contiguous(struct sst_module *module,
- struct sst_module_data *data, u32 offset, int size)
+/* Block allocator must be on block boundary */
+static int block_alloc_contiguous(struct sst_dsp *dsp,
+ struct sst_block_allocator *ba, struct list_head *block_list)
{
struct list_head tmp = LIST_HEAD_INIT(tmp);
- struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block;
+ u32 block_start = SST_HSW_BLOCK_ANY;
+ int size = ba->size, offset = ba->offset;
- while (size > 0) {
- block = find_block(dsp, data->type, offset);
+ while (ba->size > 0) {
+
+ block = find_block(dsp, ba);
if (!block) {
list_splice(&tmp, &dsp->free_block_list);
+
+ ba->size = size;
+ ba->offset = offset;
return -ENOMEM;
}
list_move_tail(&block->list, &tmp);
- offset += block->size;
- size -= block->size;
+ ba->offset += block->size;
+ ba->size -= block->size;
}
+ ba->size = size;
+ ba->offset = offset;
+
+ list_for_each_entry(block, &tmp, list) {
+
+ if (block->offset < block_start)
+ block_start = block->offset;
+
+ list_add(&block->module_list, block_list);
- list_for_each_entry(block, &tmp, list)
- list_add(&block->module_list, &module->block_list);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
+ }
list_splice(&tmp, &dsp->used_block_list);
return 0;
}
-/* allocate free DSP blocks for module data - callers hold locks */
-static int block_alloc(struct sst_module *module,
- struct sst_module_data *data)
+/* allocate first free DSP blocks for data - callers hold locks */
+static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
{
- struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block, *tmp;
int ret = 0;
- if (data->size == 0)
+ if (ba->size == 0)
return 0;
/* find first free whole blocks that can hold module */
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
/* ignore blocks with wrong type */
- if (block->type != data->type)
+ if (block->type != ba->type)
continue;
- if (data->size > block->size)
+ if (ba->size > block->size)
continue;
- data->offset = block->offset;
- block->data_type = data->data_type;
- block->bytes_used = data->size % block->size;
- list_add(&block->module_list, &module->block_list);
+ ba->offset = block->offset;
+ block->bytes_used = ba->size % block->size;
+ list_add(&block->module_list, block_list);
list_move(&block->list, &dsp->used_block_list);
- dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
- module->id, block->type, block->index);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
return 0;
}
@@ -273,15 +645,19 @@ static int block_alloc(struct sst_module *module,
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
/* ignore blocks with wrong type */
- if (block->type != data->type)
+ if (block->type != ba->type)
continue;
/* do we span > 1 blocks */
- if (data->size > block->size) {
- ret = block_alloc_contiguous(module, data,
- block->offset, data->size);
+ if (ba->size > block->size) {
+
+ /* align ba to block boundary */
+ ba->offset = block->offset;
+
+ ret = block_alloc_contiguous(dsp, ba, block_list);
if (ret == 0)
return ret;
+
}
}
@@ -289,93 +665,74 @@ static int block_alloc(struct sst_module *module,
return -ENOMEM;
}
-/* remove module from memory - callers hold locks */
-static void block_module_remove(struct sst_module *module)
+int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
{
- struct sst_mem_block *block, *tmp;
- struct sst_dsp *dsp = module->dsp;
- int err;
+ int ret;
- /* disable each block */
- list_for_each_entry(block, &module->block_list, module_list) {
+ dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+ ba->size, ba->offset, ba->type);
- if (block->ops && block->ops->disable) {
- err = block->ops->disable(block);
- if (err < 0)
- dev_err(dsp->dev,
- "error: cant disable block %d:%d\n",
- block->type, block->index);
- }
- }
+ mutex_lock(&dsp->mutex);
- /* mark each block as free */
- list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
- list_del(&block->module_list);
- list_move(&block->list, &dsp->free_block_list);
+ ret = block_alloc(dsp, ba, block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
+ goto out;
}
-}
-/* prepare the memory block to receive data from host - callers hold locks */
-static int block_module_prepare(struct sst_module *module)
-{
- struct sst_mem_block *block;
- int ret = 0;
-
- /* enable each block so that's it'e ready for module P/S data */
- list_for_each_entry(block, &module->block_list, module_list) {
+ /* prepare DSP blocks for module usage */
+ ret = block_list_prepare(dsp, block_list);
+ if (ret < 0)
+ dev_err(dsp->dev, "error: prepare failed\n");
- if (block->ops && block->ops->enable) {
- ret = block->ops->enable(block);
- if (ret < 0) {
- dev_err(module->dsp->dev,
- "error: cant disable block %d:%d\n",
- block->type, block->index);
- goto err;
- }
- }
- }
+out:
+ mutex_unlock(&dsp->mutex);
return ret;
+}
+EXPORT_SYMBOL_GPL(sst_alloc_blocks);
-err:
- list_for_each_entry(block, &module->block_list, module_list) {
- if (block->ops && block->ops->disable)
- block->ops->disable(block);
- }
- return ret;
+int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
+{
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
}
+EXPORT_SYMBOL_GPL(sst_free_blocks);
/* allocate memory blocks for static module addresses - callers hold locks */
-static int block_alloc_fixed(struct sst_module *module,
- struct sst_module_data *data)
+static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
+ struct list_head *block_list)
{
- struct sst_dsp *dsp = module->dsp;
struct sst_mem_block *block, *tmp;
- u32 end = data->offset + data->size, block_end;
+ u32 end = ba->offset + ba->size, block_end;
int err;
/* only IRAM/DRAM blocks are managed */
- if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
+ if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
return 0;
/* are blocks already attached to this module */
- list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
+ list_for_each_entry_safe(block, tmp, block_list, module_list) {
- /* force compacting mem blocks of the same data_type */
- if (block->data_type != data->data_type)
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
continue;
block_end = block->offset + block->size;
/* find block that holds section */
- if (data->offset >= block->offset && end < block_end)
+ if (ba->offset >= block->offset && end <= block_end)
return 0;
/* does block span more than 1 section */
- if (data->offset >= block->offset && data->offset < block_end) {
+ if (ba->offset >= block->offset && ba->offset < block_end) {
- err = block_alloc_contiguous(module, data,
- block->offset + block->size,
- data->size - block->size);
+ /* align ba to block boundary */
+ ba->size -= block_end - ba->offset;
+ ba->offset = block_end;
+ err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
return -ENOMEM;
@@ -388,82 +745,270 @@ static int block_alloc_fixed(struct sst_module *module,
list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
block_end = block->offset + block->size;
+ /* ignore blocks with wrong type */
+ if (block->type != ba->type)
+ continue;
+
/* find block that holds section */
- if (data->offset >= block->offset && end < block_end) {
+ if (ba->offset >= block->offset && end <= block_end) {
/* add block */
- block->data_type = data->data_type;
list_move(&block->list, &dsp->used_block_list);
- list_add(&block->module_list, &module->block_list);
+ list_add(&block->module_list, block_list);
+ dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
+ block->type, block->index, block->offset);
return 0;
}
/* does block span more than 1 section */
- if (data->offset >= block->offset && data->offset < block_end) {
+ if (ba->offset >= block->offset && ba->offset < block_end) {
- err = block_alloc_contiguous(module, data,
- block->offset, data->size);
+ /* align ba to block boundary */
+ ba->offset = block->offset;
+
+ err = block_alloc_contiguous(dsp, ba, block_list);
if (err < 0)
return -ENOMEM;
return 0;
}
-
}
return -ENOMEM;
}
/* Load fixed module data into DSP memory blocks */
-int sst_module_insert_fixed_block(struct sst_module *module,
- struct sst_module_data *data)
+int sst_module_alloc_blocks(struct sst_module *module)
{
struct sst_dsp *dsp = module->dsp;
+ struct sst_fw *sst_fw = module->sst_fw;
+ struct sst_block_allocator ba;
int ret;
+ ba.size = module->size;
+ ba.type = module->type;
+ ba.offset = module->offset;
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
+ ba.size, ba.offset, ba.type);
+
mutex_lock(&dsp->mutex);
/* alloc blocks that includes this section */
- ret = block_alloc_fixed(module, data);
+ ret = block_alloc_fixed(dsp, &ba, &module->block_list);
if (ret < 0) {
dev_err(dsp->dev,
"error: no free blocks for section at offset 0x%x size 0x%x\n",
- data->offset, data->size);
+ module->offset, module->size);
mutex_unlock(&dsp->mutex);
return -ENOMEM;
}
/* prepare DSP blocks for module copy */
- ret = block_module_prepare(module);
+ ret = block_list_prepare(dsp, &module->block_list);
if (ret < 0) {
dev_err(dsp->dev, "error: fw module prepare failed\n");
goto err;
}
/* copy partial module data to blocks */
- sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
+ if (dsp->fw_use_dma) {
+ ret = sst_dsp_dma_copyto(dsp,
+ dsp->addr.lpe_base + module->offset,
+ sst_fw->dmable_fw_paddr + module->data_offset,
+ module->size);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: module copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
+ module->size);
mutex_unlock(&dsp->mutex);
return ret;
err:
- block_module_remove(module);
+ block_list_remove(dsp, &module->block_list);
mutex_unlock(&dsp->mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
+EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
/* Unload entire module from DSP memory */
-int sst_block_module_remove(struct sst_module *module)
+int sst_module_free_blocks(struct sst_module *module)
{
struct sst_dsp *dsp = module->dsp;
mutex_lock(&dsp->mutex);
- block_module_remove(module);
+ block_list_remove(dsp, &module->block_list);
+ mutex_unlock(&dsp->mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sst_module_free_blocks);
+
+int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
+ int offset)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ struct sst_block_allocator ba;
+ int ret;
+
+ if (module->persistent_size == 0)
+ return 0;
+
+ ba.size = module->persistent_size;
+ ba.type = SST_MEM_DRAM;
+
+ mutex_lock(&dsp->mutex);
+
+ /* do we need to allocate at a fixed address ? */
+ if (offset != 0) {
+
+ ba.offset = offset;
+
+ dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
+ ba.size, ba.type, ba.offset);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
+
+ } else {
+ dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
+ ba.size, ba.type);
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc(dsp, &ba, &runtime->block_list);
+ }
+ if (ret < 0) {
+ dev_err(dsp->dev,
+ "error: no free blocks for runtime module size 0x%x\n",
+ module->persistent_size);
+ mutex_unlock(&dsp->mutex);
+ return -ENOMEM;
+ }
+ runtime->persistent_offset = ba.offset;
+
+ /* prepare DSP blocks for module copy */
+ ret = block_list_prepare(dsp, &runtime->block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: runtime block prepare failed\n");
+ goto err;
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return ret;
+
+err:
+ block_list_remove(dsp, &module->block_list);
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
+
+int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+
+ mutex_lock(&dsp->mutex);
+ block_list_remove(dsp, &runtime->block_list);
mutex_unlock(&dsp->mutex);
return 0;
}
-EXPORT_SYMBOL_GPL(sst_block_module_remove);
+EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
+
+int sst_module_runtime_save(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ int ret = 0;
+
+ dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
+ runtime->id, runtime->persistent_offset,
+ module->persistent_size);
+
+ context->buffer = dma_alloc_coherent(dsp->dma_dev,
+ module->persistent_size,
+ &context->dma_buffer, GFP_DMA | GFP_KERNEL);
+ if (!context->buffer) {
+ dev_err(dsp->dev, "error: DMA context alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dsp->mutex);
+
+ if (dsp->fw_use_dma) {
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
+ dsp->addr.lpe_base + runtime->persistent_offset,
+ module->persistent_size);
+ sst_dsp_dma_put_channel(dsp);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: context copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(context->buffer, dsp->addr.lpe +
+ runtime->persistent_offset,
+ module->persistent_size);
+
+err:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_save);
+
+int sst_module_runtime_restore(struct sst_module_runtime *runtime,
+ struct sst_module_runtime_context *context)
+{
+ struct sst_dsp *dsp = runtime->dsp;
+ struct sst_module *module = runtime->module;
+ int ret = 0;
+
+ dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
+ runtime->id, runtime->persistent_offset,
+ module->persistent_size);
+
+ mutex_lock(&dsp->mutex);
+
+ if (!context->buffer) {
+ dev_info(dsp->dev, "no context buffer need to restore!\n");
+ goto err;
+ }
+
+ if (dsp->fw_use_dma) {
+
+ ret = sst_dsp_dma_get_channel(dsp, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = sst_dsp_dma_copyto(dsp,
+ dsp->addr.lpe_base + runtime->persistent_offset,
+ context->dma_buffer, module->persistent_size);
+ sst_dsp_dma_put_channel(dsp);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: module copy failed\n");
+ goto err;
+ }
+ } else
+ sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
+ context->buffer, module->persistent_size);
+
+ dma_free_coherent(dsp->dma_dev, module->persistent_size,
+ context->buffer, context->dma_buffer);
+ context->buffer = NULL;
+
+err:
+ mutex_unlock(&dsp->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
/* register a DSP memory block for use with FW based modules */
struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
@@ -516,80 +1061,83 @@ void sst_mem_block_unregister_all(struct sst_dsp *dsp)
EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
/* allocate scratch buffer blocks */
-struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
+int sst_block_alloc_scratch(struct sst_dsp *dsp)
{
- struct sst_module *sst_module, *scratch;
- struct sst_mem_block *block, *tmp;
- u32 block_size;
- int ret = 0;
-
- scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
- if (scratch == NULL)
- return NULL;
+ struct sst_module *module;
+ struct sst_block_allocator ba;
+ int ret;
mutex_lock(&dsp->mutex);
/* calculate required scratch size */
- list_for_each_entry(sst_module, &dsp->module_list, list) {
- if (scratch->s.size < sst_module->s.size)
- scratch->s.size = sst_module->s.size;
+ dsp->scratch_size = 0;
+ list_for_each_entry(module, &dsp->module_list, list) {
+ dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
+ module->id, module->scratch_size);
+ if (dsp->scratch_size < module->scratch_size)
+ dsp->scratch_size = module->scratch_size;
}
- dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
- scratch->s.size);
-
- /* init scratch module */
- scratch->dsp = dsp;
- scratch->s.type = SST_MEM_DRAM;
- scratch->s.data_type = SST_DATA_S;
- INIT_LIST_HEAD(&scratch->block_list);
+ dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
+ dsp->scratch_size);
- /* check free blocks before looking at used blocks for space */
- if (!list_empty(&dsp->free_block_list))
- block = list_first_entry(&dsp->free_block_list,
- struct sst_mem_block, list);
- else
- block = list_first_entry(&dsp->used_block_list,
- struct sst_mem_block, list);
- block_size = block->size;
+ if (dsp->scratch_size == 0) {
+ dev_info(dsp->dev, "no modules need scratch buffer\n");
+ mutex_unlock(&dsp->mutex);
+ return 0;
+ }
/* allocate blocks for module scratch buffers */
dev_dbg(dsp->dev, "allocating scratch blocks\n");
- ret = block_alloc(scratch, &scratch->s);
+
+ ba.size = dsp->scratch_size;
+ ba.type = SST_MEM_DRAM;
+
+ /* do we need to allocate at fixed offset */
+ if (dsp->scratch_offset != 0) {
+
+ dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
+ ba.size, ba.type, ba.offset);
+
+ ba.offset = dsp->scratch_offset;
+
+ /* alloc blocks that includes this section */
+ ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
+
+ } else {
+ dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
+ ba.size, ba.type);
+
+ ba.offset = 0;
+ ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
+ }
if (ret < 0) {
dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
- goto err;
+ mutex_unlock(&dsp->mutex);
+ return ret;
}
- /* assign the same offset of scratch to each module */
- list_for_each_entry(sst_module, &dsp->module_list, list)
- sst_module->s.offset = scratch->s.offset;
-
- mutex_unlock(&dsp->mutex);
- return scratch;
+ ret = block_list_prepare(dsp, &dsp->scratch_block_list);
+ if (ret < 0) {
+ dev_err(dsp->dev, "error: scratch block prepare failed\n");
+ return ret;
+ }
-err:
- list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
- list_del(&block->module_list);
+ /* assign the same offset of scratch to each module */
+ dsp->scratch_offset = ba.offset;
mutex_unlock(&dsp->mutex);
- return NULL;
+ return dsp->scratch_size;
}
-EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
+EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
/* free all scratch blocks */
-void sst_mem_block_free_scratch(struct sst_dsp *dsp,
- struct sst_module *scratch)
+void sst_block_free_scratch(struct sst_dsp *dsp)
{
- struct sst_mem_block *block, *tmp;
-
mutex_lock(&dsp->mutex);
-
- list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
- list_del(&block->module_list);
-
+ block_list_remove(dsp, &dsp->scratch_block_list);
mutex_unlock(&dsp->mutex);
}
-EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
+EXPORT_SYMBOL_GPL(sst_block_free_scratch);
/* get a module from it's unique ID */
struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
@@ -609,3 +1157,40 @@ struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
return NULL;
}
EXPORT_SYMBOL_GPL(sst_module_get_from_id);
+
+struct sst_module_runtime *sst_module_runtime_get_from_id(
+ struct sst_module *module, u32 id)
+{
+ struct sst_module_runtime *runtime;
+ struct sst_dsp *dsp = module->dsp;
+
+ mutex_lock(&dsp->mutex);
+
+ list_for_each_entry(runtime, &module->runtime_list, list) {
+ if (runtime->id == id) {
+ mutex_unlock(&dsp->mutex);
+ return runtime;
+ }
+ }
+
+ mutex_unlock(&dsp->mutex);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
+
+/* returns block address in DSP address space */
+u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
+ enum sst_mem_type type)
+{
+ switch (type) {
+ case SST_MEM_IRAM:
+ return offset - dsp->addr.iram_offset +
+ dsp->addr.dsp_iram_offset;
+ case SST_MEM_DRAM:
+ return offset - dsp->addr.dram_offset +
+ dsp->addr.dsp_dram_offset;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(sst_dsp_get_offset);