AnsweredAssumed Answered

Do SPI transfers through DMA block CPU ?

Question asked by Bastien Tunroc on Sep 11, 2018
Latest reply on Sep 17, 2018 by igorpadykov

Hello,

I am currently working with imx SPI driver on an imx6. I am using a kernel based on the 3.10 branch of linux-fslc.

I am not used to DMA transfer, but what I understand about DMA is that they should let the CPU do others tasks when a transfer through DMA is in progress.

 

But I have the feeling that when the DMA transfer is initialized and then triggered, the CPU is waiting actively for DMA transfer to finish.

 

In the following code, I see a call to wait_for_timeout_completion which is a not interruptible function. Can you confirm me that this means that when my CPU perform a SPI transfer it is blocked waiting the end of the DMA transfer ?

 

 

static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer)
{
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
int ret;
int left = 0;
struct spi_master *master = spi_imx->bitbang.master;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
if (tx) {
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
tx->sgl, tx->nents, DMA_TO_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
goto no_dma;
desc_tx->callback = spi_imx_dma_tx_callback;
desc_tx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_tx);
}
if (rx) {
struct scatterlist *sgl_last = &rx->sgl[rx->nents - 1];
unsigned int orig_length = sgl_last->length;
int wml_mask = ~(spi_imx->rx_wml - 1);
/*
* Adjust the transfer lenth of the last scattlist if there are
* some tail data, use PIO read to get the tail data since DMA
* sometimes miss the last tail interrupt.
*/
left = transfer->len % spi_imx->rx_wml;
if (left)
sgl_last->length = orig_length & wml_mask;
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
rx->sgl, rx->nents, DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
goto no_dma;
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_rx);
}
reinit_completion(&spi_imx->dma_rx_completion);
reinit_completion(&spi_imx->dma_tx_completion);
/* Trigger the cspi module. */
spi_imx->dma_finished = 0;
spi_imx->devtype_data->trigger(spi_imx);
dma_async_issue_pending(master->dma_tx);
dma_async_issue_pending(master->dma_rx);
/* Wait SDMA to finish the data transfer.*/
ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
IMX_DMA_TIMEOUT(transfer->len));
if (!ret) {
pr_warn("%s %s: I/O Error in DMA TX:%x\n",
dev_driver_string(&master->dev),
dev_name(&master->dev), transfer->len);
dmaengine_terminate_all(master->dma_tx);
} else {
ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
IMX_DMA_TIMEOUT(transfer->len));
if (!ret) {
pr_warn("%s %s: I/O Error in DMA RX:%x\n",
dev_driver_string(&master->dev),
dev_name(&master->dev), transfer->len);
spi_imx->devtype_data->reset(spi_imx);
dmaengine_terminate_all(master->dma_rx);
} else if (left) {
/* read the tail data by PIO */
void *tmpbuf = transfer->rx_buf + transfer->len - left;
while (readl(spi_imx->base + MX51_ECSPI_STAT) & 0x8) {
*(char *)tmpbuf =
readl(spi_imx->base + MXC_CSPIRXDATA);
tmpbuf++;
}
}
}
spi_imx->dma_finished = 1;
spi_imx->devtype_data->trigger(spi_imx);
if (!ret)
ret = -ETIMEDOUT;
else if (ret > 0)
ret = transfer->len;
return ret;
no_dma:
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&master->dev),
dev_name(&master->dev));
return -EAGAIN;

}

Outcomes