DMA驱动框架流程编写
Posted 贺二公子
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了DMA驱动框架流程编写相关的知识,希望对你有一定的参考价值。
原文地址:https://blog.csdn.net/softwoker/article/details/45114725
文章目录
本文主要是针对Xilinx DMA驱动流程框架编写
DMA驱动一致性和流式的基本认识
-
一致性DMA内存申请:
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
-
流式DMA内存申请:
dma_addr_t dma_map_single(struct device *dev, void *buffer, size_t size, enum dma_data_direction direction); // 如果映射成功,返回的是总线地址,否则返回NULL // 最后一个参数DMA的方向,可能取DMA_TO_DEVICE, DMA_FORM_DEVICE, DMA_BIDIRECTIONAL和DMA_NONE
一致性DMA与流式DMA主要是内存的申请方式和访问控制权限不一样
- 一致性申请的内存区域DMA和CPU能够同时访问,所以可以认为一致性是同步的
- 流式申请的内存区域DMA和CPU不能同时访问,DMA释放之后CPU才能访问,所以认为是异步的
驱动流程框架
初始化DMA
-
dma_cap_zero(mask); //宏定义如下 #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) static inline void __dma_cap_zero(dma_cap_mask_t *dstp) bitmap_zero(dstp->bits, DMA_TX_TYPE_END); //将mask清零;
-
dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);//设置mask,DMA传输通道类型 direction = DMA_DEV_TO_MEM;//DMA传输方向 match = (direction & 0xFF) | XILINX_DMA_IP_DMA;//DMA匹配类型(外设ID)
-
//DMA通道申请 dma_request_channel(mask, axi_adc_filter, &match); static bool axi_adc_filter(struct dma_chan *chan, void *param)
申请内存
-
一致性内存申请
dma_alloc_coherent(rx_dev->dev,AXI_XADC_BUFF_SIZE,&(axi_xadc_dev->dma_dsts),GFP_ATOMIC); //设备申请AXI_XADC_BUFF_SIZE字节大小的一致性内存; //函数返回两个参数,一个是申请的虚拟内存的首地址; // 另一个是内存的物理地址保存在axi_xadc_dev->dma_dsts给DMA操作; //GFP_ATOMIC为申请的内存flags。
-
流式内存映射
axi_xadc_dev->dma_dsts = dma_map_single(rx_dev->dev, axi_xadc_dev->dsts, AXI_XADC_BUFF_SIZE, DMA_DEV_TO_MEM);
一致性内存:设备和CPU能同时访问这块内存,无需担心cache的影响;
流式内存:DMA必须释放内存之后,CPU才能访问这块内存
通道的配置
config.coalesc = 1;//中断级联门限
config.delay = 0;//延时
rx_dev->device_control(axi_xadc_dev->rx_chan, DMA_SLAVE_CONFIG, (unsigned long) &config);
驱动接口函数为:
xilinx_dma_device_control(struct dma_chan *dchan,enum dma_ctrl_cmd cmd, unsigned long arg)
// DMA_SLAVE_CONFIG:DMA从配置命令字
// Config:配置参数
获取BD描述符
dmaengine_prep_slave_single(struct dma_chan *chan, dma_addr_t buf, size_t len,
enum dma_transfer_direction dir, unsigned long flags)
struct scatterlist sg;
sg_init_table(&sg, 1);初始化sg数组
sg_dma_address(&sg) = buf;dma地址
sg_dma_len(&sg) = len;dma数据长度
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
本文直接调用驱动接口函数
sg_init_table(&axi_xadc_dev->rx_sg, AXI_XADC_BUF_COUNT);
sg_dma_address(&axi_xadc_dev->rx_sg) = axi_xadc_dev->dma_dsts;
sg_dma_len(&axi_xadc_dev->rx_sg) = AXI_XADC_BUFF_SIZE;
device_prep_slave_sg(axi_xadc_dev->rx_chan,
&axi_xadc_dev->rx_sg,
AXI_XADC_BUF_COUNT,
DMA_DEV_TO_MEM,
axi_xadc_dev->flags, NULL);
钩子挂接函数接口为:
static struct dma_async_tx_descriptor * xilinx_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags, void *context)
//参数说明:struct dma_chan *dchan:DMA通道
// struct scatterlist *sgl:sg数组首地址,头指针
// unsigned int sg_len:sg数组表项个数
// enum dma_transfer_direction direction:枚举类型,传输方向
// unsigned long flags:传输响应标志
struct xilinx_dma_desc_hw *hw;
struct xilinx_dma_transfer *t;
struct xilinx_dma_chan *chan;
unsigned int total_len = 0;
unsigned int num_descs = 0;
struct scatterlist *sg;
dma_addr_t dma_src;
size_t num_bytes;
size_t sg_used;
unsigned int i, j;
if (!dchan)
return NULL;
chan = to_xilinx_chan(dchan);//获取DMA通道
if (chan->direction != direction)
return NULL;
for_each_sg(sgl, sg, sg_len, i) //遍历sg 计算sd_dma的长度和sg的个数
total_len += sg_dma_len(sg);
num_descs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_len);
t = xilinx_dma_alloc_transfer(chan, num_descs);分配num_descs个bd描述符
if (!t)
return NULL;
/*
* Build transactions using information in the scatter gather list
*/
j = 0;
for_each_sg(sgl, sg, sg_len, i) //遍历sg数组给bd填充讯息
sg_used = 0;
/* Loop until the entire scatterlist entry is used */
while (sg_used < sg_dma_len(sg))
/*
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
num_bytes = min_t(size_t, sg_dma_len(sg) - sg_used,
chan->max_len);
dma_src = sg_dma_address(sg) + sg_used;
hw = t->descs[j].hw;
hw->buf_addr = dma_src;
hw->control = num_bytes;
sg_used += num_bytes;
j++;
/* Set EOP to the last link descriptor of new list and
SOP to the first link descriptor. */
t->descs[0].hw->control |= XILINX_DMA_BD_SOP;
t->descs[t->num_descs-1].hw->control |= XILINX_DMA_BD_EOP;
t->async_tx.flags = flags;
return &t->async_tx;
填写完成后的回调函数并提交到队列
rxd->callback = axi_xadc_slave_rx_callback;//回调函数
rxd->callback_param = &axi_xadc_dev->rx_cmp;//回调函数参数
axi_xadc_dev->rx_cookie = rxd->tx_submit(rxd);//Place transaction to DMA engine pending queue
调用驱动层接口函数为:
xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
//函数功能:赋值cookie到每一个描述符,将描述符添加到pending list列表
static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
struct xilinx_dma_transfer *t = container_of(tx,
struct xilinx_dma_transfer, async_tx);
unsigned long flags;
dma_cookie_t cookie;
spin_lock_irqsave(&chan->lock, flags);
if (chan->cyclic)
goto err;
if (chan->err)
/* If reset fails, need to hard reset the system.
* Channel is no longer functional
*/
if (!xilinx_dma_reset(chan))
chan->err = 0;
else
goto err;
cookie = dma_cookie_assign(tx);//将DMA cookie赋值给描述符cookie
/* put this transaction onto the tail of the pending queue */
append_desc_queue(chan, t);将传输的描述符添加到pending队列的末尾
if (t->cyclic)
chan->cyclic = true;
spin_unlock_irqrestore(&chan->lock, flags);
return cookie;
err:
spin_unlock_irqrestore(&chan->lock, flags);
xilinx_dma_free_transfer(chan, t);
return -EBUSY;
添加申请的描述符到pending队列
/* Append the descriptor list to the pending list */
static void append_desc_queue(struct xilinx_dma_chan *chan,
struct xilinx_dma_transfer *t)
struct xilinx_dma_transfer *tail = container_of(chan->pending_list.prev,
struct xilinx_dma_transfer, head);
struct xilinx_dma_desc_hw *hw;
if (!list_empty(&chan->pending_list)) //判断pending列表为空
/* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory.
*/
hw = tail->descs[tail->num_descs-1].hw;
hw->next_desc = t->descs[0].phys;
/* Add the software descriptor and all children to the list
* of pending transactions
*/
list_add_tail(&t->head, &chan->pending_list);//描述符添加到pending队列
启动传输
dma_async_issue_pending(axi_xadc_dev->rx_chan);
调用驱动接口函数为:
static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
struct xilinx_dma_transfer *last_transfer, *first_transfer;
dma_addr_t first_addr, last_addr;
struct xilinx_dma_desc_hw *hw;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
if (list_empty(&chan->pending_list))//判断pending_list是否为空,为空直接返回,此处判断不为空
goto out_unlock;
if (chan->err)
dev_err(chan->dev, "Failed to start transfer\\n");
goto out_unlock;
/* If hardware is busy, cannot submit*/
if (xilinx_dma_is_running(chan) && !xilinx_dma_is_idle(chan)) //判断通道是否处于空闲和运行状态
DMA_OUT(&chan->regs->cr,
DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
goto out_unlock;
if (xilinx_dma_has_errors(chan))
xilinx_dma_reset(chan);
/* If hardware is idle, then all descriptors on active list are
* done, start new transfers
*/
dma_halt(chan);通道是空闲的和停止运行状态,此处再次停止通道传输意义不大
if (chan->err)
goto out_unlock;
first_transfer = list_first_entry(&chan->pending_list,
struct xilinx_dma_transfer, head);获取pending队列第一个描述符指针
if (chan->has_SG) //判断是否为SG模式
uint32_t status;
last_transfer = list_entry(chan->pending_list.prev,
struct xilinx_dma_transfer, head);//获取pending最后一个描述符的指针
first_addr = first_transfer->descs[0].phys;传输的首地址(第一个描述符的地址)
last_addr = last_transfer->descs[last_transfer->num_descs-1].phys;最后一个描述符的地址
DMA_OUT(&chan->regs->cdr, first_addr);//将首地址填写到当前描述符开始寄存器中
dma_start(chan);//设置控制寄存器中dma开始掩码
if (chan->err)
goto out_unlock;
list_splice_tail_init(&chan->pending_list, &chan->active_list);将pending描述符加到active链表中,重新初始化pending链表
/* Clear pending interrupts and enable interrupts */
DMA_OUT(&chan->regs->sr, XILINX_DMA_XR_IRQ_ALL_MASK);//写状态寄存器打开错误检测掩码
DMA_OUT(&chan->regs->cr,
DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);控制寄存器中使能检测
status = DMA_IN(&chan->regs->sr)
/* Update tail ptr register and start the transfer
*/
DMA_OUT(&chan->regs->tdr, last_addr);//描述符结束地址写入末尾描述符寄存器中触发DMA开始传输
else //此分支语句程序不进入
/* In simple mode */
list_move_tail(&first_transfer->head, &chan->active_list);
dma_start(chan);
if (chan->err)
goto out_unlock;
hw = first_transfer->descs[0].hw;
/* Enable interrupts
*/
DMA_OUT(&chan->regs->cr,
DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
DMA_OUT(&chan->regs->src, hw->buf_addr);
/* Start the transfer
*/
DMA_OUT(&chan->regs->btt_ref,
hw->control & XILINX_DMA_MAX_TRANS_LEN);
out_unlock:
spin_unlock_irqrestore(&chan->lock, flags);
DMA完成数据接收之后会产生中断,中断服务程序
static irqreturn_t dma_intr_handler(int irq, void *data)
struct xilinx_dma_chan *chan = data;
u32 stat;
xilinx_dma_free_transfer_list(chan, &chan->removed_list);//释放在准备删除队列中的BD缓存,目前自己写的驱动中removed链表没有用到(没有调用xilinx_dma_terminate_all函数),在xilinx_dma_terminate_all函数的时候,会将active链表放到remove链表中然后初始化active链表
stat = DMA_IN(&chan->regs->sr);读取状态寄存器
if (!(stat & XILINX_DMA_XR_IRQ_ALL_MASK))
return IRQ_NONE;
/* Ack the interrupts
*/
DMA_OUT(&chan->regs->sr, XILINX_DMA_XR_IRQ_ALL_MASK);//标志位检测开启
if (stat & XILINX_DMA_XR_IRQ_ERROR_MASK)
dev_err(chan->dev, "Channel %x has errors %x, cr %x, cdr %x tdr %x\\n",
(unsigned int)chan, (unsigned int)stat,
(unsigned int)DMA_IN(&chan->regs->cr),
(unsigned int)DMA_IN(&chan->regs->cdr),
(unsigned int)DMA_IN(&chan->regs->tdr));
chan->err = 1;
dma_halt(chan);
/* Device takes too long to do the transfer when user requires
* responsiveness
*/
if (stat & XILINX_DMA_XR_IRQ_DELAY_MASK)//判断是否超时
dev_dbg(chan->dev, "Inter-packet latency too long\\n");
if (stat & XILINX_DMA_XR_IRQ_IOC_MASK) //判断是否完成数据传输
xilinx_dma_update_completed_cookie(chan);//将传输的数据更新到接收通道的cookie中实现同步
chan->start_transfer(chan);//循环传输在等待队列中的描述符数据,在循环模式下会调用此处
//DMA数据传输完成,下半部处理回调函数并释放所有描述符
tasklet_schedule(&chan->tasklet);//任务调度,下半部执行回调函数,完成DMA的读取
return IRQ_HANDLED;
下半部的tasklet调度函数为(主要是清理缓存和调用回调函数)
static void xilinx_chan_desc_cleanup(struct xilinx_dma_chan *chan)
struct xilinx_dma_transfer *t;
dma_async_tx_callback callback;
void *callback_param;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
/* terminate_all might be called from the callback, so we can't iterate over
* the list using list_for_each_entry_safe */
while (!list_empty(&chan->active_list)) 判断active链表是否不为空
t = list_first_entry(&chan->active_list, struct xilinx_dma_transfer, head);//获取active_list链表第一个描述符首地址
if (t->cyclic) //判断是否为循环模式,我们的为否
xilinx_dma_chan_handle_cyclic(chan, t, &flags);为循环模式会调用此接口
break;
if (xilinx_dma_desc_status(chan, t) == DMA_IN_PROGRESS)//判断chan通道是否还在数据传输过程中国
break;
list_del(&t->head);删除xilinx_dma_transfer节点指针
callback = t->async_tx.callback;//回调函数
callback_param = t->async_tx.callback_param;//回调函数参数
if (callback)
spin_unlock_irqrestore(&chan->lock, flags);
callback(callback_param);//调用回调函数
spin_lock_irqsave(&chan->lock, flags);
dma_run_dependencies(&t->async_tx);//处理其他通道数据(针对多通道数据传输)以上是关于DMA驱动框架流程编写的主要内容,如果未能解决你的问题,请参考以下文章
小熊派 FreeRTOS+SPI+DMA 驱动 TFT-LCD