Hello NXP:
My project is to use SDMA to transfer FPGA data via EIM interface.
I want to use SDMA external request to stop the action, when the FPGA data is not ready.
How can I use external SDMA request pins to start a logic channel transfer over the EIM interface ?
My EIM config :
Multiplexed Address/Data mode 16Bit
Asynchronous Read Access
The code is as floows:
//#include <linux/config.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mman.h>
#include <linux/kthread.h>
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-imx.h>
#include <linux/platform_data/dma-imx-sdma.h>
#include <linux/workqueue.h>
#define DMA_SOURCE_ADDR 0x08000000
#define DMA_START_ADDR 0x30000000
#define DMA_DATA_LENGTH 0x00020000
#define BUFF_START_ADDR 0x40000000
#define BUFF_DATA_LENGTH 0x00020000
struct timespec ts_start;
struct timespec ts_end ;
struct timeval tv_start;
struct timeval tv_end ;
int cnt;
struct thread_data {
int nr;
pid_t pid;
char * name;
};
struct dma_transfer {
struct dma_chan *ch;
struct dma_slave_config dma_m2m_config;
struct dma_async_tx_descriptor *dma_m2m_desc;
struct completion dma_m2m_ok;
unsigned int phys_from;
unsigned int phys_to;
};
static int dmatest_work(void *data);
static void dma_memcpy_callback_from_fpga(void *data);
static int dma_mem_transfer_to_store_buffer(void) ;
static unsigned int data_addr ;
static unsigned int buff_addr ;
static struct dma_transfer dma_data ;
static struct dma_transfer dma_buffer ;
static bool dma_m2m_filter (struct dma_chan *chan, void *param)
{
if (!imx_dma_is_general_purpose(chan))
return false;
chan->private = param;
return true;
}
static void dma_memcpy_callback_to_buffer(void *data)
{
static void dma_to_store_buffer(void)
{
struct dma_transfer* dma = &dma_buffer ;
dma->dma_m2m_config.direction = DMA_MEM_TO_MEM;
dma->dma_m2m_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmaengine_slave_config(dma->ch, &dma->dma_m2m_config);
dma->dma_m2m_desc = dma->ch->device->device_prep_dma_memcpy(dma->ch,
dma->phys_to, dma->phys_from,
0x00020000, 0);
dma->dma_m2m_desc->callback = dma_memcpy_callback_to_buffer;
dmaengine_submit(dma->dma_m2m_desc);
dma_async_issue_pending (dma->ch);
}
// dma to buffer channel init
static int dma_mem_transfer_to_store_buffer(void)
{
struct dma_transfer* dma = &dma_buffer ;
dma_cap_mask_t dma_m2m_mask;
struct imx_dma_data m2m_dma_data = {0};
dma_cap_zero (dma_m2m_mask);
dma_cap_set (DMA_SLAVE, dma_m2m_mask);
m2m_dma_data.peripheral_type = IMX_DMATYPE_MEMORY;
m2m_dma_data.priority = DMA_PRIO_HIGH;
memset(dma, 0, sizeof(struct dma_transfer));
dma->phys_from = DMA_START_ADDR ;
dma->phys_to = BUFF_START_ADDR ;
dma->ch = dma_request_channel(dma_m2m_mask, dma_m2m_filter, &m2m_dma_data);
if (!dma->ch) {
printk(KERN_ERR "Could not get DMA with dma_request_channel()\n");
return -ENOMEM;
}
return 0;
}
/*
* The callback gets called by the DMA interrupt handler after
* the transfer is complete.
*/
static void dma_memcpy_callback_from_fpga(void *data)
{
dma_to_store_buffer() ;
dma_data.dma_m2m_config.direction = DMA_MEM_TO_MEM;
dma_data.dma_m2m_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmaengine_slave_config(dma_data.ch, &dma_data.dma_m2m_config);
dma_data.dma_m2m_desc = dma_data.ch->device->device_prep_dma_memcpy(dma_data.ch,
dma_data.phys_to, dma_data.phys_from,
0x00020000, 0);
dma_data.dma_m2m_desc->callback = dma_memcpy_callback_from_fpga;
dmaengine_submit(dma_data.dma_m2m_desc);
dma_async_issue_pending (dma_data.ch);
return ;
}
static int dma_mem_transfer_from_fpga (void)
{
struct dma_transfer* dma = &dma_data ;
dma_cap_mask_t dma_m2m_mask;
struct imx_dma_data m2m_dma_data = {0};
dma_cap_zero (dma_m2m_mask);
dma_cap_set (DMA_SLAVE, dma_m2m_mask);
m2m_dma_data.peripheral_type = IMX_DMATYPE_MEMORY;
m2m_dma_data.priority = DMA_PRIO_HIGH;
memset(&dma_data, 0, sizeof(struct dma_transfer));
dma->phys_from = DMA_SOURCE_ADDR;
dma->phys_to = DMA_START_ADDR ;
dma->ch = dma_request_channel(dma_m2m_mask, dma_m2m_filter, &m2m_dma_data);
if (!dma->ch) {
printk(KERN_ERR "Could not get DMA with dma_request_channel()\n");
return -ENOMEM;
}
dma->dma_m2m_config.direction = DMA_MEM_TO_MEM;
dma->dma_m2m_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmaengine_slave_config(dma->ch, &dma->dma_m2m_config);
dma->dma_m2m_desc = dma->ch->device->device_prep_dma_memcpy(dma->ch,
dma->phys_to, dma->phys_from,
0x00020000,0);
dma->dma_m2m_desc->callback = dma_memcpy_callback_from_fpga;
dmaengine_submit(dma->dma_m2m_desc);
dma_async_issue_pending (dma->ch);
do_gettimeofday (&tv_start);
ts_start = current_kernel_time ();
return 0;
}
static int dmatest_work (void *data)
{
dma_mem_transfer_from_fpga();
// DMA data to store buffer
dma_mem_transfer_to_store_buffer() ;
return 0;
}
static char *name = "dmatest";
static int __init dmatest_init(void)
{
struct thread_data * thread;
/* Schedule multiple concurrent dma tests */
thread = kmalloc(sizeof(struct thread_data), GFP_KERNEL);
if (!thread) {
goto free_threads;
}
memset(thread, 0, sizeof(struct thread_data));
thread->nr = 1;
thread->name = name;
/* Schedule the test thread */
kthread_run (dmatest_work, thread, thread->name);
request_mem_region(DMA_START_ADDR, DMA_DATA_LENGTH, "dma_data");
data_addr = (unsigned int )ioremap(DMA_START_ADDR, DMA_DATA_LENGTH);
request_mem_region(BUFF_START_ADDR, BUFF_DATA_LENGTH , "buffer_data");
buff_addr = (unsigned int )ioremap( BUFF_START_ADDR , BUFF_DATA_LENGTH );
printk("<0>dma module is running !\n");
return 0;
free_threads:
kfree(thread);
return -ENOMEM;
}
static void __exit dmatest_exit(void)
{
return;
}
MODULE_LICENSE("Dual BSD/GPL");
module_init(dmatest_init);
module_exit(dmatest_exit);