We are developing a system based on i.mx31, and the software is based on linux 2.6.28 release by freescale.
In our system, the cspi port is configured as a slave port, 32bit mode. As the released source code does not
support CSPI DMA., we re-write a linux driver to make it work. The driver do work ,but we get the big-endian
data.That is ,in the master data 0x44332211 was sent,but we get 0x11223344 in our dma callback. I don't know
whether if the i.mx31 can solve the byte-order.
Some codes are listed below. The driver is a char driver. Every time we open the device, the function "mxcspi_initdma" is called.
In our system the master port is work on 16MHz CLOCKs, 8 BIT MODE.
We have tried the "SWAP" bit in the TESTREG, but it doesn't work.
#define RXDMA_BUF_NUM 10
#define RXDMA_BUF_SIZE 32*1024 //(600 *1024)//(5120)
#define TXDMA_BUF_SIZE 32*1024
static void mxcspi_dmaread_callback(void *arg, int error, unsigned int cnt)
{
mxc_dma_requestbuf_t *readchnl_reqelem;
mxc_cspi_dmamap_t rx_buf_elem;
volatile unsigned int status;
int buff_id, num_bufs, i;
int len;
int ret;
#ifdef DEBUG_SPI_DRIVER
int usedspace, freespace;
#endif
cspi_driver_data_t *drv_data = (cspi_driver_data_t *)arg;
if (error != MXC_DMA_DONE)
printk("dma read error\n");
num_bufs = drv_data->num_rxdma_bufs;
buff_id = drv_data->dma_rxbuf_id;
readchnl_reqelem = drv_data->readchnl_reqelem;
if ((++drv_data->dma_rxbuf_id ) % (num_bufs) == 0){
drv_data->dma_rxbuf_id = 0;
}
rx_buf_elem = (mxc_cspi_dmamap_t *) (drv_data->rx_dmamap + buff_id);
status = __raw_readl(drv_data->regs + MXC_CSPISTAT);
if (status & MXC_CSPISTAT_RO){
printk("in dma read callback:rx fifo overflow\n");
}
/*
printk("dma read:0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x\n", \
rx_buf_elem->buf[0], rx_buf_elem->buf[1], rx_buf_elem->buf[2], rx_buf_elem->buf[3],
rx_buf_elem->buf[4], rx_buf_elem->buf[5], rx_buf_elem->buf[6], rx_buf_elem->buf[7]);
*/
#ifdef DEBUG_SPI_DRIVER
usedspace = kfifo_len(drv_data->readbuffer);
freespace = max_bufsiz - usedspace;
if (freespace <= cnt){
printk("kfifo free space is not enough(free:%d,required:%d)\n", freespace, cnt);
}
#endif
#if 1
// i = kfifo_put(drv_data->readbuffer, (unsigned char *)rx_buf_elem->buf, cnt);
#else
int loop = 0;
u32 tmp_data;
// printk("dma cnt:%d,cur buf:%d, rxbuf_id:%d\n", cnt, buff_id, drv_data->dma_rxbuf_id);
for (loop = 0; loop < cnt/4; loop++) {
tmp_data = (((rx_buf_elem->buf[loop] >> 24) & 0x000000FFL)
| ((rx_buf_elem->buf[loop] >> 8) & 0x0000FF00L)
| ((rx_buf_elem->buf[loop] << 8) & 0x00FF0000L)
| ((rx_buf_elem->buf[loop] << 24) & 0xFF000000L));
kfifo_put(drv_data->readbuffer, (char *)&tmp_data, 4);
}
#endif
if (kfifo_len(drv_data->readbuffer) >= drv_data->rx_cnt){
len = kfifo_len(drv_data->readbuffer);
wake_up_interruptible(&drv_data->rx_buf_wq);
}
/* Setup the DMA read request structures */
rx_buf_elem.src_addr = drv_data->mapbase;
rx_buf_elem.dst_addr = rx_buf_elem->dma_handle;
rx_buf_elem.num_of_bytes = = RXDMA_BUF_SIZE;
ret = mxc_dma_config(drv_data->rx_dma_channel, &readchnl_reqelem, 1 ,
MXC_DMA_MODE_READ);
if (ret != 0) {
printk("in dma read callback,error config dma\n");
return ;
}
ret = mxc_dma_callback_set(drv_data->rx_dma_channel, mxcspi_dmaread_callback,
drv_data);
if (ret != 0) {
printk("in dma read callback,error set dma callback\n");
return;
}
ret = mxc_dma_enable(drv_data->rx_dma_channel);
if (ret != 0){
printk("Can not enable dma channel:%d\n", drv_data->rx_dma_channel);
return;
}
}
static int mxcspi_initdma(cspi_driver_data_t *drv_data)
{
int status = 0, rxbufs, i;
struct device *dev;
mxc_dma_requestbuf_t *readchnl_reqelem;
mxc_cspi_dmamap_t *rx_buf_elem;
if (drv_data == NULL){
printk("drv_data is NULL\n");
return -1;
}
dev = &drv_data->pdev->dev;
drv_data->dma_rxbuf_id = 0;
drv_data->rx_dma_channel = mxc_dma_request(drv_data->dma_rx_id, "MXC CSPI Read");
if (drv_data->rx_dma_channel < 0) {
dev_err(dev, "Can not get CSPI DMA Read channel\n");
status = -ENODEV;
goto err_no_rxchan;
}
drv_data->tx_dma_channel = mxc_dma_request(drv_data->dma_tx_id, "MXC CSPI Write");
if (drv_data->tx_dma_channel < 0) {
dev_err(dev, "Can not get CSPI DMA Write channel\n");
status = -ENODEV;
goto err_no_txchan;
}
/* Setup the DMA read request structures */
readchnl_reqelem = drv_data->readchnl_reqelem;
rxbufs = drv_data->num_rxdma_bufs;
for (i = 0; i < rxbufs; i++) {
rx_buf_elem = (mxc_cspi_dmamap_t *) (drv_data->rx_dmamap + i);
(readchnl_reqelem + i)->src_addr = drv_data->mapbase;
(readchnl_reqelem + i)->dst_addr = rx_buf_elem->dma_handle;
(readchnl_reqelem + i)->num_of_bytes = RXDMA_BUF_SIZE;
}
status = mxc_dma_config(drv_data->rx_dma_channel, readchnl_reqelem, rxbufs,
MXC_DMA_MODE_READ);
if (status !=0 ){
dev_err(dev, "Can not config channel:%d\n", drv_data->rx_dma_channel);
goto err_config_dma;
}
status = mxc_dma_callback_set(drv_data->rx_dma_channel, mxcspi_dmaread_callback,
drv_data);
if (status != 0) {
dev_err(dev, "Can not set dma Callback\n");
goto err_set_dma_callback;
}
status = mxc_dma_enable(drv_data->rx_dma_channel);
if (status != 0){
dev_err(dev, "Can not enable dma channel:%d\n", drv_data->rx_dma_channel);
goto err_en_dma;
}
//NOTE!!!!!!!!!!!!
// tasklet_init(&drv_data->dma_rx_tasklet, dma_rx_do_tasklet, (unsigned long)drv_data);
drv_data->dma_txchnl_inuse = 0;
return status;
err_en_dma:
err_set_dma_callback:
err_config_dma:
mxc_dma_free(drv_data->tx_dma_channel);
drv_data->tx_dma_channel = -1;
err_no_txchan:
mxc_dma_free(drv_data->rx_dma_channel);
drv_data->rx_dma_channel = -1;
err_no_rxchan:
return status;
}
static int mxcspi_freedma(cspi_driver_data_t *drv_data)
{
mxc_dma_free(drv_data->tx_dma_channel);
mxc_dma_free(drv_data->rx_dma_channel);
drv_data->tx_dma_channel = -1;
drv_data->rx_dma_channel = -1;
return 0;
}
static int dma_mem_init(cspi_driver_data_t *drv_data)
{
int status = 0;
int rxbufs = 0;
int i,j;
struct device *dev;
mxc_cspi_dmamap_t *rx_buf_elem;
if (drv_data == NULL){
printk("drv_data is NULL\n");
return -1;
}
dev = &drv_data->pdev->dev;
drv_data->tx_buf = kmalloc(TXDMA_BUF_SIZE, GFP_KERNEL);
if (drv_data->tx_buf == NULL) {
dev_err(dev, "Can not allocate tx_buf\n");
status = -ENOMEM;
goto err_no_txbuff;
}
rxbufs = drv_data->num_rxdma_bufs = RXDMA_BUF_NUM;
drv_data->rx_dmamap = kmalloc(rxbufs * sizeof(mxc_cspi_dmamap_t), GFP_KERNEL);
if (drv_data->rx_dmamap == NULL) {
dev_err(dev, "Can not allocate rx_dmamap\n");
status = -ENOMEM;
goto err_no_rxbuff;
}
/* Allocate the DMA Receive Request structures */
drv_data->readchnl_reqelem = kmalloc(rxbufs * sizeof(mxc_dma_requestbuf_t), GFP_KERNEL);
if (drv_data->readchnl_reqelem == NULL) {
dev_err(dev, "Can not allocate readchnl_reqelem\n");
status = -ENOMEM;
goto err_no_dmareq;
}
for (i = 0; i< rxbufs; i++) {
rx_buf_elem = (mxc_cspi_dmamap_t *)(drv_data->rx_dmamap + i);
rx_buf_elem->buf =
dma_alloc_coherent(NULL, PAGE_ALIGN(RXDMA_BUF_SIZE),
&rx_buf_elem->dma_handle, GFP_DMA);
if (rx_buf_elem->buf == NULL) {
dev_err(dev, "Can not allocate rx_buf_elem->buf\n");
for (j = 0; j< i; j++){
rx_buf_elem =
(mxc_cspi_dmamap_t *)(drv_data->rx_dmamap + j);
dma_free_coherent(NULL, RXDMA_BUF_SIZE,
rx_buf_elem->buf,
rx_buf_elem->dma_handle);
}
status = -ENOMEM;
goto err_no_dmamem;
}
}
return status;
err_no_dmamem:
kfree(drv_data->readchnl_reqelem);
drv_data->readchnl_reqelem = NULL;
err_no_dmareq:
kfree(drv_data->rx_dmamap);
drv_data->rx_dmamap = NULL;
err_no_rxbuff:
kfree(drv_data->tx_buf);
drv_data->tx_buf = NULL;
err_no_txbuff:
return status;
}
static int dma_mem_free(cspi_driver_data_t *drv_data)
{
int i, rxbufs;
mxc_cspi_dmamap_t *rx_buf_elem;
if (drv_data == NULL){
printk("drv_data is NULL\n");
return -1;
}
rxbufs = drv_data->num_rxdma_bufs;
if (drv_data->rx_dmamap != NULL){
for (i = 0; i < rxbufs; i++) {
rx_buf_elem = (mxc_cspi_dmamap_t *) (drv_data->rx_dmamap + i);
dma_free_coherent(NULL, RXDMA_BUF_SIZE,
rx_buf_elem->buf, rx_buf_elem->dma_handle);
}
}
kfree(drv_data->readchnl_reqelem);
drv_data->readchnl_reqelem = NULL;
kfree(drv_data->rx_dmamap);
drv_data->rx_dmamap = NULL;
kfree(drv_data->tx_buf);
drv_data->tx_buf = NULL;
return 0;
}
Codes below are added to the arch/arm/mach-mx3/dma.c
static mxc_sdma_channel_params_t mxc_sdma_cspi1_rx_params = {
.chnl_params = {
.watermark_level = 32,
.per_address = CSPI1_BASE_ADDR,
.peripheral_type = CSPI,
.transfer_type = per_2_emi,
.event_id = DMA_REQ_CSPI1_RX,
.bd_number = 32,
.word_size = TRANSFER_32BIT,
},
.channel_num = MXC_DMA_CHANNEL_CSPI1RX,
.chnl_priority = MXC_SDMA_DEFAULT_PRIORITY,
};
static mxc_sdma_channel_params_t mxc_sdma_cspi1_tx_params = {
.chnl_params = {
.watermark_level = 32,
.per_address = CSPI1_BASE_ADDR + 0x4,
.peripheral_type = CSPI,
.transfer_type = emi_2_per,
.event_id = DMA_REQ_CSPI1_TX,
.bd_number = 32,
.word_size = TRANSFER_32BIT,
},
.channel_num = MXC_DMA_CHANNEL_CSPI1_TX,
.chnl_priority = MXC_SDMA_DEFAULT_PRIORITY,
};
{MXC_DMA_CSPI1_RX, &mxc_sdma_cspi1_rx_params},
{MXC_DMA_CSPI1_TX, &mxc_sdma_cspi1_tx_params},