xref: /openwifi/driver/xilinx_dma/xilinx_dma.c (revision 838a9007cf9f63d72c4524b84ee37e8c5fd046bc)
1 /*
2  * DMA driver for Xilinx Video DMA Engine
3  *
4  * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
5  *
6  * Based on the Freescale DMA driver.
7  *
8  * Modified by Xianjun Jiao. [email protected]; [email protected]
9  *
10  * Description:
11  * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
12  * core that provides high-bandwidth direct memory access between memory
13  * and AXI4-Stream type video target peripherals. The core provides efficient
14  * two dimensional DMA operations with independent asynchronous read (S2MM)
15  * and write (MM2S) channel operation. It can be configured to have either
16  * one channel or two channels. If configured as two channels, one is to
17  * transmit to the video device (MM2S) and another is to receive from the
18  * video device (S2MM). Initialization, status, interrupt and management
19  * registers are accessed through an AXI4-Lite slave interface.
20  *
21  * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
22  * provides high-bandwidth one dimensional direct memory access between memory
23  * and AXI4-Stream target peripherals. It supports one receive and one
24  * transmit channel, both of them optional at synthesis time.
25  *
26  * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
27  * Access (DMA) between a memory-mapped source address and a memory-mapped
28  * destination address.
29  *
30  * This program is free software: you can redistribute it and/or modify
31  * it under the terms of the GNU General Public License as published by
32  * the Free Software Foundation, either version 2 of the License, or
33  * (at your option) any later version.
34  */
35 
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of_address.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_platform.h>
47 #include <linux/of_irq.h>
48 #include <linux/slab.h>
49 #include <linux/clk.h>
50 #include <linux/io-64-nonatomic-lo-hi.h>
51 
52 #include "../dmaengine.h"
53 
54 /* Register/Descriptor Offsets */
55 #define XILINX_DMA_MM2S_CTRL_OFFSET		0x0000
56 #define XILINX_DMA_S2MM_CTRL_OFFSET		0x0030
57 #define XILINX_VDMA_MM2S_DESC_OFFSET		0x0050
58 #define XILINX_VDMA_S2MM_DESC_OFFSET		0x00a0
59 
60 /* Control Registers */
61 #define XILINX_DMA_REG_DMACR			0x0000
62 #define XILINX_DMA_DMACR_DELAY_MAX		0xff
63 #define XILINX_DMA_DMACR_DELAY_SHIFT		24
64 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX	0xff
65 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT	16
66 #define XILINX_DMA_DMACR_ERR_IRQ		BIT(14)
67 #define XILINX_DMA_DMACR_DLY_CNT_IRQ		BIT(13)
68 #define XILINX_DMA_DMACR_FRM_CNT_IRQ		BIT(12)
69 #define XILINX_DMA_DMACR_MASTER_SHIFT		8
70 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT	5
71 #define XILINX_DMA_DMACR_FRAMECNT_EN		BIT(4)
72 #define XILINX_DMA_DMACR_GENLOCK_EN		BIT(3)
73 #define XILINX_DMA_DMACR_RESET			BIT(2)
74 #define XILINX_DMA_DMACR_CIRC_EN		BIT(1)
75 #define XILINX_DMA_DMACR_RUNSTOP		BIT(0)
76 #define XILINX_DMA_DMACR_FSYNCSRC_MASK		GENMASK(6, 5)
77 
78 #define XILINX_DMA_REG_DMASR			0x0004
79 #define XILINX_DMA_DMASR_EOL_LATE_ERR		BIT(15)
80 #define XILINX_DMA_DMASR_ERR_IRQ		BIT(14)
81 #define XILINX_DMA_DMASR_DLY_CNT_IRQ		BIT(13)
82 #define XILINX_DMA_DMASR_FRM_CNT_IRQ		BIT(12)
83 #define XILINX_DMA_DMASR_SOF_LATE_ERR		BIT(11)
84 #define XILINX_DMA_DMASR_SG_DEC_ERR		BIT(10)
85 #define XILINX_DMA_DMASR_SG_SLV_ERR		BIT(9)
86 #define XILINX_DMA_DMASR_EOF_EARLY_ERR		BIT(8)
87 #define XILINX_DMA_DMASR_SOF_EARLY_ERR		BIT(7)
88 #define XILINX_DMA_DMASR_DMA_DEC_ERR		BIT(6)
89 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR		BIT(5)
90 #define XILINX_DMA_DMASR_DMA_INT_ERR		BIT(4)
91 #define XILINX_DMA_DMASR_IDLE			BIT(1)
92 #define XILINX_DMA_DMASR_HALTED		BIT(0)
93 #define XILINX_DMA_DMASR_DELAY_MASK		GENMASK(31, 24)
94 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK	GENMASK(23, 16)
95 
96 #define XILINX_DMA_REG_CURDESC			0x0008
97 #define XILINX_DMA_REG_TAILDESC		0x0010
98 #define XILINX_DMA_REG_REG_INDEX		0x0014
99 #define XILINX_DMA_REG_FRMSTORE		0x0018
100 #define XILINX_DMA_REG_THRESHOLD		0x001c
101 #define XILINX_DMA_REG_FRMPTR_STS		0x0024
102 #define XILINX_DMA_REG_PARK_PTR		0x0028
103 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT	8
104 #define XILINX_DMA_PARK_PTR_WR_REF_MASK		GENMASK(12, 8)
105 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT	0
106 #define XILINX_DMA_PARK_PTR_RD_REF_MASK		GENMASK(4, 0)
107 #define XILINX_DMA_REG_VDMA_VERSION		0x002c
108 
109 /* Register Direct Mode Registers */
110 #define XILINX_DMA_REG_VSIZE			0x0000
111 #define XILINX_DMA_REG_HSIZE			0x0004
112 
113 #define XILINX_DMA_REG_FRMDLY_STRIDE		0x0008
114 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT	24
115 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT	0
116 
117 #define XILINX_VDMA_REG_START_ADDRESS(n)	(0x000c + 4 * (n))
118 #define XILINX_VDMA_REG_START_ADDRESS_64(n)	(0x000c + 8 * (n))
119 
120 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP	0x00ec
121 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP	BIT(0)
122 
123 /* HW specific definitions */
124 #define XILINX_DMA_MAX_CHANS_PER_DEVICE	0x20
125 
126 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK	\
127 		(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
128 		 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
129 		 XILINX_DMA_DMASR_ERR_IRQ)
130 
131 #define XILINX_DMA_DMASR_ALL_ERR_MASK	\
132 		(XILINX_DMA_DMASR_EOL_LATE_ERR | \
133 		 XILINX_DMA_DMASR_SOF_LATE_ERR | \
134 		 XILINX_DMA_DMASR_SG_DEC_ERR | \
135 		 XILINX_DMA_DMASR_SG_SLV_ERR | \
136 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
137 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
138 		 XILINX_DMA_DMASR_DMA_DEC_ERR | \
139 		 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
140 		 XILINX_DMA_DMASR_DMA_INT_ERR)
141 
142 /*
143  * Recoverable errors are DMA Internal error, SOF Early, EOF Early
144  * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
145  * is enabled in the h/w system.
146  */
147 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK	\
148 		(XILINX_DMA_DMASR_SOF_LATE_ERR | \
149 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
150 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
151 		 XILINX_DMA_DMASR_DMA_INT_ERR)
152 
153 /* Axi VDMA Flush on Fsync bits */
154 #define XILINX_DMA_FLUSH_S2MM		3
155 #define XILINX_DMA_FLUSH_MM2S		2
156 #define XILINX_DMA_FLUSH_BOTH		1
157 
158 /* Delay loop counter to prevent hardware failure */
159 #define XILINX_DMA_LOOP_COUNT		1000000
160 
161 /* AXI DMA Specific Registers/Offsets */
162 #define XILINX_DMA_REG_SRCDSTADDR	0x18
163 #define XILINX_DMA_REG_BTT		0x28
164 
165 /* AXI DMA Specific Masks/Bit fields */
166 #define XILINX_DMA_MAX_TRANS_LEN_MIN	8
167 #define XILINX_DMA_MAX_TRANS_LEN_MAX	23
168 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX	26
169 #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
170 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
171 #define XILINX_DMA_CR_COALESCE_SHIFT	16
172 #define XILINX_DMA_BD_SOP		BIT(27)
173 #define XILINX_DMA_BD_EOP		BIT(26)
174 #define XILINX_DMA_COALESCE_MAX		255
175 #define XILINX_DMA_NUM_DESCS		255
176 #define XILINX_DMA_NUM_APP_WORDS	5
177 
178 /* Multi-Channel DMA Descriptor offsets*/
179 #define XILINX_DMA_MCRX_CDESC(x)	(0x40 + (x-1) * 0x20)
180 #define XILINX_DMA_MCRX_TDESC(x)	(0x48 + (x-1) * 0x20)
181 
182 /* Multi-Channel DMA Masks/Shifts */
183 #define XILINX_DMA_BD_HSIZE_MASK	GENMASK(15, 0)
184 #define XILINX_DMA_BD_STRIDE_MASK	GENMASK(15, 0)
185 #define XILINX_DMA_BD_VSIZE_MASK	GENMASK(31, 19)
186 #define XILINX_DMA_BD_TDEST_MASK	GENMASK(4, 0)
187 #define XILINX_DMA_BD_STRIDE_SHIFT	0
188 #define XILINX_DMA_BD_VSIZE_SHIFT	19
189 
190 /* AXI CDMA Specific Registers/Offsets */
191 #define XILINX_CDMA_REG_SRCADDR		0x18
192 #define XILINX_CDMA_REG_DSTADDR		0x20
193 
194 /* AXI CDMA Specific Masks */
195 #define XILINX_CDMA_CR_SGMODE          BIT(3)
196 
197 /**
198  * struct xilinx_vdma_desc_hw - Hardware Descriptor
199  * @next_desc: Next Descriptor Pointer @0x00
200  * @pad1: Reserved @0x04
201  * @buf_addr: Buffer address @0x08
202  * @buf_addr_msb: MSB of Buffer address @0x0C
203  * @vsize: Vertical Size @0x10
204  * @hsize: Horizontal Size @0x14
205  * @stride: Number of bytes between the first
206  *	    pixels of each horizontal line @0x18
207  */
208 struct xilinx_vdma_desc_hw {
209 	u32 next_desc;
210 	u32 pad1;
211 	u32 buf_addr;
212 	u32 buf_addr_msb;
213 	u32 vsize;
214 	u32 hsize;
215 	u32 stride;
216 } __aligned(64);
217 
218 /**
219  * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
220  * @next_desc: Next Descriptor Pointer @0x00
221  * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
222  * @buf_addr: Buffer address @0x08
223  * @buf_addr_msb: MSB of Buffer address @0x0C
224  * @mcdma_control: Control field for mcdma @0x10
225  * @vsize_stride: Vsize and Stride field for mcdma @0x14
226  * @control: Control field @0x18
227  * @status: Status field @0x1C
228  * @app: APP Fields @0x20 - 0x30
229  */
230 struct xilinx_axidma_desc_hw {
231 	u32 next_desc;
232 	u32 next_desc_msb;
233 	u32 buf_addr;
234 	u32 buf_addr_msb;
235 	u32 mcdma_control;
236 	u32 vsize_stride;
237 	u32 control;
238 	u32 status;
239 	u32 app[XILINX_DMA_NUM_APP_WORDS];
240 } __aligned(64);
241 
242 /**
243  * struct xilinx_cdma_desc_hw - Hardware Descriptor
244  * @next_desc: Next Descriptor Pointer @0x00
245  * @next_desc_msb: Next Descriptor Pointer MSB @0x04
246  * @src_addr: Source address @0x08
247  * @src_addr_msb: Source address MSB @0x0C
248  * @dest_addr: Destination address @0x10
249  * @dest_addr_msb: Destination address MSB @0x14
250  * @control: Control field @0x18
251  * @status: Status field @0x1C
252  */
253 struct xilinx_cdma_desc_hw {
254 	u32 next_desc;
255 	u32 next_desc_msb;
256 	u32 src_addr;
257 	u32 src_addr_msb;
258 	u32 dest_addr;
259 	u32 dest_addr_msb;
260 	u32 control;
261 	u32 status;
262 } __aligned(64);
263 
264 /**
265  * struct xilinx_vdma_tx_segment - Descriptor segment
266  * @hw: Hardware descriptor
267  * @node: Node in the descriptor segments list
268  * @phys: Physical address of segment
269  */
270 struct xilinx_vdma_tx_segment {
271 	struct xilinx_vdma_desc_hw hw;
272 	struct list_head node;
273 	dma_addr_t phys;
274 } __aligned(64);
275 
276 /**
277  * struct xilinx_axidma_tx_segment - Descriptor segment
278  * @hw: Hardware descriptor
279  * @node: Node in the descriptor segments list
280  * @phys: Physical address of segment
281  */
282 struct xilinx_axidma_tx_segment {
283 	struct xilinx_axidma_desc_hw hw;
284 	struct list_head node;
285 	dma_addr_t phys;
286 } __aligned(64);
287 
288 /**
289  * struct xilinx_cdma_tx_segment - Descriptor segment
290  * @hw: Hardware descriptor
291  * @node: Node in the descriptor segments list
292  * @phys: Physical address of segment
293  */
294 struct xilinx_cdma_tx_segment {
295 	struct xilinx_cdma_desc_hw hw;
296 	struct list_head node;
297 	dma_addr_t phys;
298 } __aligned(64);
299 
300 /**
301  * struct xilinx_dma_tx_descriptor - Per Transaction structure
302  * @async_tx: Async transaction descriptor
303  * @segments: TX segments list
304  * @node: Node in the channel descriptors list
305  * @cyclic: Check for cyclic transfers.
306  */
307 struct xilinx_dma_tx_descriptor {
308 	struct dma_async_tx_descriptor async_tx;
309 	struct list_head segments;
310 	struct list_head node;
311 	bool cyclic;
312 };
313 
314 /**
315  * struct xilinx_dma_chan - Driver specific DMA channel structure
316  * @xdev: Driver specific device structure
317  * @ctrl_offset: Control registers offset
318  * @desc_offset: TX descriptor registers offset
319  * @lock: Descriptor operation lock
320  * @pending_list: Descriptors waiting
321  * @active_list: Descriptors ready to submit
322  * @done_list: Complete descriptors
323  * @free_seg_list: Free descriptors
324  * @common: DMA common channel
325  * @desc_pool: Descriptors pool
326  * @dev: The dma device
327  * @irq: Channel IRQ
328  * @id: Channel ID
329  * @direction: Transfer direction
330  * @num_frms: Number of frames
331  * @has_sg: Support scatter transfers
332  * @cyclic: Check for cyclic transfers.
333  * @genlock: Support genlock mode
334  * @err: Channel has errors
335  * @idle: Check for channel idle
336  * @tasklet: Cleanup work after irq
337  * @config: Device configuration info
338  * @flush_on_fsync: Flush on Frame sync
339  * @desc_pendingcount: Descriptor pending count
340  * @ext_addr: Indicates 64 bit addressing is supported by dma channel
341  * @desc_submitcount: Descriptor h/w submitted count
342  * @residue: Residue for AXI DMA
343  * @seg_v: Statically allocated segments base
344  * @seg_p: Physical allocated segments base
345  * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
346  * @cyclic_seg_p: Physical allocated segments base for cyclic dma
347  * @start_transfer: Differentiate b/w DMA IP's transfer
348  * @stop_transfer: Differentiate b/w DMA IP's quiesce
349  * @tdest: TDEST value for mcdma
350  * @has_vflip: S2MM vertical flip
351  */
352 struct xilinx_dma_chan {
353 	struct xilinx_dma_device *xdev;
354 	u32 ctrl_offset;
355 	u32 desc_offset;
356 	spinlock_t lock;
357 	struct list_head pending_list;
358 	struct list_head active_list;
359 	struct list_head done_list;
360 	struct list_head free_seg_list;
361 	struct dma_chan common;
362 	struct dma_pool *desc_pool;
363 	struct device *dev;
364 	int irq;
365 	int id;
366 	enum dma_transfer_direction direction;
367 	int num_frms;
368 	bool has_sg;
369 	bool cyclic;
370 	bool genlock;
371 	bool err;
372 	bool idle;
373 	struct tasklet_struct tasklet;
374 	struct xilinx_vdma_config config;
375 	bool flush_on_fsync;
376 	u32 desc_pendingcount;
377 	bool ext_addr;
378 	u32 desc_submitcount;
379 	u32 residue;
380 	struct xilinx_axidma_tx_segment *seg_v;
381 	dma_addr_t seg_p;
382 	struct xilinx_axidma_tx_segment *cyclic_seg_v;
383 	dma_addr_t cyclic_seg_p;
384 	void (*start_transfer)(struct xilinx_dma_chan *chan);
385 	int (*stop_transfer)(struct xilinx_dma_chan *chan);
386 	u16 tdest;
387 	bool has_vflip;
388 	u32 buf_idx; // each irq this value increase 1. in cyclic mode, we use residue return this idx via device_tx_status/xilinx_dma_tx_status
389 };
390 
391 /**
392  * enum xdma_ip_type - DMA IP type.
393  *
394  * @XDMA_TYPE_AXIDMA: Axi dma ip.
395  * @XDMA_TYPE_CDMA: Axi cdma ip.
396  * @XDMA_TYPE_VDMA: Axi vdma ip.
397  *
398  */
399 enum xdma_ip_type {
400 	XDMA_TYPE_AXIDMA = 0,
401 	XDMA_TYPE_CDMA,
402 	XDMA_TYPE_VDMA,
403 };
404 
405 struct xilinx_dma_config {
406 	enum xdma_ip_type dmatype;
407 	int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
408 			struct clk **tx_clk, struct clk **txs_clk,
409 			struct clk **rx_clk, struct clk **rxs_clk);
410 };
411 
412 /**
413  * struct xilinx_dma_device - DMA device structure
414  * @regs: I/O mapped base address
415  * @dev: Device Structure
416  * @common: DMA device structure
417  * @chan: Driver specific DMA channel
418  * @has_sg: Specifies whether Scatter-Gather is present or not
419  * @mcdma: Specifies whether Multi-Channel is present or not
420  * @flush_on_fsync: Flush on frame sync
421  * @ext_addr: Indicates 64 bit addressing is supported by dma device
422  * @pdev: Platform device structure pointer
423  * @dma_config: DMA config structure
424  * @axi_clk: DMA Axi4-lite interace clock
425  * @tx_clk: DMA mm2s clock
426  * @txs_clk: DMA mm2s stream clock
427  * @rx_clk: DMA s2mm clock
428  * @rxs_clk: DMA s2mm stream clock
429  * @nr_channels: Number of channels DMA device supports
430  * @chan_id: DMA channel identifier
431  * @max_buffer_len: Max buffer length
432  */
433 struct xilinx_dma_device {
434 	void __iomem *regs;
435 	struct device *dev;
436 	struct dma_device common;
437 	struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
438 	bool has_sg;
439 	bool mcdma;
440 	u32 flush_on_fsync;
441 	bool ext_addr;
442 	struct platform_device  *pdev;
443 	const struct xilinx_dma_config *dma_config;
444 	struct clk *axi_clk;
445 	struct clk *tx_clk;
446 	struct clk *txs_clk;
447 	struct clk *rx_clk;
448 	struct clk *rxs_clk;
449 	u32 nr_channels;
450 	u32 chan_id;
451 	u32 max_buffer_len;
452 };
453 
454 /* Macros */
455 #define to_xilinx_chan(chan) \
456 	container_of(chan, struct xilinx_dma_chan, common)
457 #define to_dma_tx_descriptor(tx) \
458 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
459 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
460 	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
461 			   cond, delay_us, timeout_us)
462 
463 /* IO accessors */
464 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
465 {
466 	return ioread32(chan->xdev->regs + reg);
467 }
468 
469 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
470 {
471 	iowrite32(value, chan->xdev->regs + reg);
472 }
473 
474 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
475 				   u32 value)
476 {
477 	dma_write(chan, chan->desc_offset + reg, value);
478 }
479 
480 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
481 {
482 	return dma_read(chan, chan->ctrl_offset + reg);
483 }
484 
485 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
486 				   u32 value)
487 {
488 	dma_write(chan, chan->ctrl_offset + reg, value);
489 }
490 
491 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
492 				 u32 clr)
493 {
494 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
495 }
496 
497 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
498 				 u32 set)
499 {
500 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
501 }
502 
503 /**
504  * vdma_desc_write_64 - 64-bit descriptor write
505  * @chan: Driver specific VDMA channel
506  * @reg: Register to write
507  * @value_lsb: lower address of the descriptor.
508  * @value_msb: upper address of the descriptor.
509  *
510  * Since vdma driver is trying to write to a register offset which is not a
511  * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
512  * instead of a single 64 bit register write.
513  */
514 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
515 				      u32 value_lsb, u32 value_msb)
516 {
517 	/* Write the lsb 32 bits*/
518 	writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
519 
520 	/* Write the msb 32 bits */
521 	writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
522 }
523 
524 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
525 {
526 	lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
527 }
528 
529 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
530 				dma_addr_t addr)
531 {
532 	if (chan->ext_addr)
533 		dma_writeq(chan, reg, addr);
534 	else
535 		dma_ctrl_write(chan, reg, addr);
536 }
537 
538 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
539 				     struct xilinx_axidma_desc_hw *hw,
540 				     dma_addr_t buf_addr, size_t sg_used,
541 				     size_t period_len)
542 {
543 	if (chan->ext_addr) {
544 		hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
545 		hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
546 						 period_len);
547 	} else {
548 		hw->buf_addr = buf_addr + sg_used + period_len;
549 	}
550 }
551 
552 /* -----------------------------------------------------------------------------
553  * Descriptors and segments alloc and free
554  */
555 
556 /**
557  * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
558  * @chan: Driver specific DMA channel
559  *
560  * Return: The allocated segment on success and NULL on failure.
561  */
562 static struct xilinx_vdma_tx_segment *
563 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
564 {
565 	struct xilinx_vdma_tx_segment *segment;
566 	dma_addr_t phys;
567 
568 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
569 	if (!segment)
570 		return NULL;
571 
572 	segment->phys = phys;
573 
574 	return segment;
575 }
576 
577 /**
578  * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
579  * @chan: Driver specific DMA channel
580  *
581  * Return: The allocated segment on success and NULL on failure.
582  */
583 static struct xilinx_cdma_tx_segment *
584 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
585 {
586 	struct xilinx_cdma_tx_segment *segment;
587 	dma_addr_t phys;
588 
589 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
590 	if (!segment)
591 		return NULL;
592 
593 	segment->phys = phys;
594 
595 	return segment;
596 }
597 
598 /**
599  * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
600  * @chan: Driver specific DMA channel
601  *
602  * Return: The allocated segment on success and NULL on failure.
603  */
604 static struct xilinx_axidma_tx_segment *
605 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
606 {
607 	struct xilinx_axidma_tx_segment *segment = NULL;
608 	unsigned long flags;
609 
610 	spin_lock_irqsave(&chan->lock, flags);
611 	if (!list_empty(&chan->free_seg_list)) {
612 		segment = list_first_entry(&chan->free_seg_list,
613 					   struct xilinx_axidma_tx_segment,
614 					   node);
615 		list_del(&segment->node);
616 	}
617 	spin_unlock_irqrestore(&chan->lock, flags);
618 
619 	return segment;
620 }
621 
622 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
623 {
624 	u32 next_desc = hw->next_desc;
625 	u32 next_desc_msb = hw->next_desc_msb;
626 
627 	memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
628 
629 	hw->next_desc = next_desc;
630 	hw->next_desc_msb = next_desc_msb;
631 }
632 
633 /**
634  * xilinx_dma_free_tx_segment - Free transaction segment
635  * @chan: Driver specific DMA channel
636  * @segment: DMA transaction segment
637  */
638 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
639 				struct xilinx_axidma_tx_segment *segment)
640 {
641 	xilinx_dma_clean_hw_desc(&segment->hw);
642 
643 	list_add_tail(&segment->node, &chan->free_seg_list);
644 }
645 
646 /**
647  * xilinx_cdma_free_tx_segment - Free transaction segment
648  * @chan: Driver specific DMA channel
649  * @segment: DMA transaction segment
650  */
651 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
652 				struct xilinx_cdma_tx_segment *segment)
653 {
654 	dma_pool_free(chan->desc_pool, segment, segment->phys);
655 }
656 
657 /**
658  * xilinx_vdma_free_tx_segment - Free transaction segment
659  * @chan: Driver specific DMA channel
660  * @segment: DMA transaction segment
661  */
662 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
663 					struct xilinx_vdma_tx_segment *segment)
664 {
665 	dma_pool_free(chan->desc_pool, segment, segment->phys);
666 }
667 
668 /**
669  * xilinx_dma_tx_descriptor - Allocate transaction descriptor
670  * @chan: Driver specific DMA channel
671  *
672  * Return: The allocated descriptor on success and NULL on failure.
673  */
674 static struct xilinx_dma_tx_descriptor *
675 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
676 {
677 	struct xilinx_dma_tx_descriptor *desc;
678 
679 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
680 	if (!desc)
681 		return NULL;
682 
683 	INIT_LIST_HEAD(&desc->segments);
684 
685 	return desc;
686 }
687 
688 /**
689  * xilinx_dma_free_tx_descriptor - Free transaction descriptor
690  * @chan: Driver specific DMA channel
691  * @desc: DMA transaction descriptor
692  */
693 static void
694 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
695 			       struct xilinx_dma_tx_descriptor *desc)
696 {
697 	struct xilinx_vdma_tx_segment *segment, *next;
698 	struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
699 	struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
700 
701 	if (!desc)
702 		return;
703 
704 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
705 		list_for_each_entry_safe(segment, next, &desc->segments, node) {
706 			list_del(&segment->node);
707 			xilinx_vdma_free_tx_segment(chan, segment);
708 		}
709 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
710 		list_for_each_entry_safe(cdma_segment, cdma_next,
711 					 &desc->segments, node) {
712 			list_del(&cdma_segment->node);
713 			xilinx_cdma_free_tx_segment(chan, cdma_segment);
714 		}
715 	} else {
716 		list_for_each_entry_safe(axidma_segment, axidma_next,
717 					 &desc->segments, node) {
718 			list_del(&axidma_segment->node);
719 			xilinx_dma_free_tx_segment(chan, axidma_segment);
720 		}
721 	}
722 
723 	kfree(desc);
724 }
725 
726 /* Required functions */
727 
728 /**
729  * xilinx_dma_free_desc_list - Free descriptors list
730  * @chan: Driver specific DMA channel
731  * @list: List to parse and delete the descriptor
732  */
733 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
734 					struct list_head *list)
735 {
736 	struct xilinx_dma_tx_descriptor *desc, *next;
737 
738 	list_for_each_entry_safe(desc, next, list, node) {
739 		list_del(&desc->node);
740 		xilinx_dma_free_tx_descriptor(chan, desc);
741 	}
742 }
743 
744 /**
745  * xilinx_dma_free_descriptors - Free channel descriptors
746  * @chan: Driver specific DMA channel
747  */
748 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
749 {
750 	unsigned long flags;
751 
752 	spin_lock_irqsave(&chan->lock, flags);
753 
754 	xilinx_dma_free_desc_list(chan, &chan->pending_list);
755 	xilinx_dma_free_desc_list(chan, &chan->done_list);
756 	xilinx_dma_free_desc_list(chan, &chan->active_list);
757 
758 	spin_unlock_irqrestore(&chan->lock, flags);
759 }
760 
761 /**
762  * xilinx_dma_free_chan_resources - Free channel resources
763  * @dchan: DMA channel
764  */
765 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
766 {
767 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
768 	unsigned long flags;
769 
770 	dev_dbg(chan->dev, "Free all channel resources.\n");
771 
772 	xilinx_dma_free_descriptors(chan);
773 
774 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
775 		spin_lock_irqsave(&chan->lock, flags);
776 		INIT_LIST_HEAD(&chan->free_seg_list);
777 		spin_unlock_irqrestore(&chan->lock, flags);
778 
779 		/* Free memory that is allocated for BD */
780 		dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
781 				  XILINX_DMA_NUM_DESCS, chan->seg_v,
782 				  chan->seg_p);
783 
784 		/* Free Memory that is allocated for cyclic DMA Mode */
785 		dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
786 				  chan->cyclic_seg_v, chan->cyclic_seg_p);
787 	}
788 
789 	if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
790 		dma_pool_destroy(chan->desc_pool);
791 		chan->desc_pool = NULL;
792 	}
793 }
794 
795 /**
796  * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
797  * @chan: Driver specific dma channel
798  * @desc: dma transaction descriptor
799  * @flags: flags for spin lock
800  */
801 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
802 					  struct xilinx_dma_tx_descriptor *desc,
803 					  unsigned long *flags)
804 {
805 	dma_async_tx_callback callback;
806 	void *callback_param;
807 
808 	callback = desc->async_tx.callback;
809 	callback_param = desc->async_tx.callback_param;
810 	if (callback) {
811 		spin_unlock_irqrestore(&chan->lock, *flags);
812 		callback(callback_param);
813 		spin_lock_irqsave(&chan->lock, *flags);
814 	}
815 }
816 
817 /**
818  * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
819  * @chan: Driver specific DMA channel
820  */
821 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
822 {
823 	struct xilinx_dma_tx_descriptor *desc, *next;
824 	unsigned long flags;
825 
826 	spin_lock_irqsave(&chan->lock, flags);
827 
828 	list_for_each_entry_safe(desc, next, &chan->done_list, node) {
829 		struct dmaengine_desc_callback cb;
830 
831 		if (desc->cyclic) {
832 			xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
833 			break;
834 		}
835 
836 		/* Remove from the list of running transactions */
837 		list_del(&desc->node);
838 
839 		/* Run the link descriptor callback function */
840 		dmaengine_desc_get_callback(&desc->async_tx, &cb);
841 		if (dmaengine_desc_callback_valid(&cb)) {
842 			spin_unlock_irqrestore(&chan->lock, flags);
843 			dmaengine_desc_callback_invoke(&cb, NULL);
844 			spin_lock_irqsave(&chan->lock, flags);
845 		}
846 
847 		/* Run any dependencies, then free the descriptor */
848 		dma_run_dependencies(&desc->async_tx);
849 		xilinx_dma_free_tx_descriptor(chan, desc);
850 	}
851 
852 	spin_unlock_irqrestore(&chan->lock, flags);
853 }
854 
855 /**
856  * xilinx_dma_do_tasklet - Schedule completion tasklet
857  * @data: Pointer to the Xilinx DMA channel structure
858  */
859 static void xilinx_dma_do_tasklet(unsigned long data)
860 {
861 	struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
862 
863 	xilinx_dma_chan_desc_cleanup(chan);
864 }
865 
866 /**
867  * xilinx_dma_alloc_chan_resources - Allocate channel resources
868  * @dchan: DMA channel
869  *
870  * Return: '0' on success and failure value on error
871  */
872 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
873 {
874 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
875 	int i;
876 
877 	/* Has this channel already been allocated? */
878 	if (chan->desc_pool)
879 		return 0;
880 
881 	/*
882 	 * We need the descriptor to be aligned to 64bytes
883 	 * for meeting Xilinx VDMA specification requirement.
884 	 */
885 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
886 		/* Allocate the buffer descriptors. */
887 		chan->seg_v = dma_zalloc_coherent(chan->dev,
888 						  sizeof(*chan->seg_v) *
889 						  XILINX_DMA_NUM_DESCS,
890 						  &chan->seg_p, GFP_KERNEL);
891 		if (!chan->seg_v) {
892 			dev_err(chan->dev,
893 				"unable to allocate channel %d descriptors\n",
894 				chan->id);
895 			return -ENOMEM;
896 		}
897 		/*
898 		 * For cyclic DMA mode we need to program the tail Descriptor
899 		 * register with a value which is not a part of the BD chain
900 		 * so allocating a desc segment during channel allocation for
901 		 * programming tail descriptor.
902 		 */
903 		chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
904 					sizeof(*chan->cyclic_seg_v),
905 					&chan->cyclic_seg_p, GFP_KERNEL);
906 		if (!chan->cyclic_seg_v) {
907 			dev_err(chan->dev,
908 				"unable to allocate desc segment for cyclic DMA\n");
909 			dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
910 				XILINX_DMA_NUM_DESCS, chan->seg_v,
911 				chan->seg_p);
912 			return -ENOMEM;
913 		}
914 		chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
915 
916 		for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
917 			chan->seg_v[i].hw.next_desc =
918 			lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
919 				((i + 1) % XILINX_DMA_NUM_DESCS));
920 			chan->seg_v[i].hw.next_desc_msb =
921 			upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
922 				((i + 1) % XILINX_DMA_NUM_DESCS));
923 			chan->seg_v[i].phys = chan->seg_p +
924 				sizeof(*chan->seg_v) * i;
925 			list_add_tail(&chan->seg_v[i].node,
926 				      &chan->free_seg_list);
927 		}
928 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
929 		chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
930 				   chan->dev,
931 				   sizeof(struct xilinx_cdma_tx_segment),
932 				   __alignof__(struct xilinx_cdma_tx_segment),
933 				   0);
934 	} else {
935 		chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
936 				     chan->dev,
937 				     sizeof(struct xilinx_vdma_tx_segment),
938 				     __alignof__(struct xilinx_vdma_tx_segment),
939 				     0);
940 	}
941 
942 	if (!chan->desc_pool &&
943 	    (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
944 		dev_err(chan->dev,
945 			"unable to allocate channel %d descriptor pool\n",
946 			chan->id);
947 		return -ENOMEM;
948 	}
949 
950 	dma_cookie_init(dchan);
951 
952 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
953 		/* For AXI DMA resetting once channel will reset the
954 		 * other channel as well so enable the interrupts here.
955 		 */
956 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
957 			      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
958 	}
959 
960 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
961 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
962 			     XILINX_CDMA_CR_SGMODE);
963 
964 	return 0;
965 }
966 
967 /**
968  * xilinx_dma_tx_status - Get DMA transaction status
969  * @dchan: DMA channel
970  * @cookie: Transaction identifier
971  * @txstate: Transaction state
972  *
973  * Return: DMA transaction status
974  */
975 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
976 					dma_cookie_t cookie,
977 					struct dma_tx_state *txstate)
978 {
979 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
980 	struct xilinx_dma_tx_descriptor *desc;
981 	struct xilinx_axidma_tx_segment *segment;
982 	struct xilinx_axidma_desc_hw *hw;
983 	enum dma_status ret;
984 	unsigned long flags;
985 	u32 residue = 0;
986 
987 	ret = dma_cookie_status(dchan, cookie, txstate);
988 	if (ret == DMA_COMPLETE || !txstate)
989 		return ret;
990 
991 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
992 		spin_lock_irqsave(&chan->lock, flags);
993 
994 		desc = list_last_entry(&chan->active_list,
995 				       struct xilinx_dma_tx_descriptor, node);
996 		if (chan->has_sg) {
997 			list_for_each_entry(segment, &desc->segments, node) {
998 				hw = &segment->hw;
999 				residue += (hw->control - hw->status) &
1000 					   chan->xdev->max_buffer_len;
1001 			}
1002 		}
1003 		spin_unlock_irqrestore(&chan->lock, flags);
1004 
1005 		chan->residue = residue;
1006 		if (chan->cyclic)
1007 			dma_set_residue(txstate, chan->buf_idx);
1008 		else
1009 			dma_set_residue(txstate, chan->residue);
1010 	}
1011 
1012 	return ret;
1013 }
1014 
1015 /**
1016  * xilinx_dma_stop_transfer - Halt DMA channel
1017  * @chan: Driver specific DMA channel
1018  *
1019  * Return: '0' on success and failure value on error
1020  */
1021 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1022 {
1023 	u32 val;
1024 
1025 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1026 
1027 	/* Wait for the hardware to halt */
1028 	return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1029 				       val & XILINX_DMA_DMASR_HALTED, 0,
1030 				       XILINX_DMA_LOOP_COUNT);
1031 }
1032 
1033 /**
1034  * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1035  * @chan: Driver specific DMA channel
1036  *
1037  * Return: '0' on success and failure value on error
1038  */
1039 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1040 {
1041 	u32 val;
1042 
1043 	return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1044 				       val & XILINX_DMA_DMASR_IDLE, 0,
1045 				       XILINX_DMA_LOOP_COUNT);
1046 }
1047 
1048 /**
1049  * xilinx_dma_start - Start DMA channel
1050  * @chan: Driver specific DMA channel
1051  */
1052 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1053 {
1054 	int err;
1055 	u32 val;
1056 
1057 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1058 
1059 	/* Wait for the hardware to start */
1060 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1061 				      !(val & XILINX_DMA_DMASR_HALTED), 0,
1062 				      XILINX_DMA_LOOP_COUNT);
1063 
1064 	if (err) {
1065 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
1066 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1067 
1068 		chan->err = true;
1069 	}
1070 }
1071 
1072 /**
1073  * xilinx_vdma_start_transfer - Starts VDMA transfer
1074  * @chan: Driver specific channel struct pointer
1075  */
1076 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1077 {
1078 	struct xilinx_vdma_config *config = &chan->config;
1079 	struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1080 	u32 reg, j;
1081 	struct xilinx_vdma_tx_segment *tail_segment;
1082 
1083 	/* This function was invoked with lock held */
1084 	if (chan->err)
1085 		return;
1086 
1087 	if (!chan->idle)
1088 		return;
1089 
1090 	if (list_empty(&chan->pending_list))
1091 		return;
1092 
1093 	desc = list_first_entry(&chan->pending_list,
1094 				struct xilinx_dma_tx_descriptor, node);
1095 	tail_desc = list_last_entry(&chan->pending_list,
1096 				    struct xilinx_dma_tx_descriptor, node);
1097 
1098 	tail_segment = list_last_entry(&tail_desc->segments,
1099 				       struct xilinx_vdma_tx_segment, node);
1100 
1101 	/*
1102 	 * If hardware is idle, then all descriptors on the running lists are
1103 	 * done, start new transfers
1104 	 */
1105 	if (chan->has_sg)
1106 		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1107 				desc->async_tx.phys);
1108 
1109 	/* Configure the hardware using info in the config structure */
1110 	if (chan->has_vflip) {
1111 		reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1112 		reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1113 		reg |= config->vflip_en;
1114 		dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1115 			  reg);
1116 	}
1117 
1118 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1119 
1120 	if (config->frm_cnt_en)
1121 		reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1122 	else
1123 		reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1124 
1125 	/*
1126 	 * With SG, start with circular mode, so that BDs can be fetched.
1127 	 * In direct register mode, if not parking, enable circular mode
1128 	 */
1129 	if (chan->has_sg || !config->park)
1130 		reg |= XILINX_DMA_DMACR_CIRC_EN;
1131 
1132 	if (config->park)
1133 		reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1134 
1135 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1136 
1137 	j = chan->desc_submitcount;
1138 	reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1139 	if (chan->direction == DMA_MEM_TO_DEV) {
1140 		reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1141 		reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1142 	} else {
1143 		reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1144 		reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1145 	}
1146 	dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1147 
1148 	/* Start the hardware */
1149 	xilinx_dma_start(chan);
1150 
1151 	if (chan->err)
1152 		return;
1153 
1154 	/* Start the transfer */
1155 	if (chan->has_sg) {
1156 		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1157 				tail_segment->phys);
1158 		list_splice_tail_init(&chan->pending_list, &chan->active_list);
1159 		chan->desc_pendingcount = 0;
1160 	} else {
1161 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
1162 		int i = 0;
1163 
1164 		if (chan->desc_submitcount < chan->num_frms)
1165 			i = chan->desc_submitcount;
1166 
1167 		list_for_each_entry(segment, &desc->segments, node) {
1168 			if (chan->ext_addr)
1169 				vdma_desc_write_64(chan,
1170 					XILINX_VDMA_REG_START_ADDRESS_64(i++),
1171 					segment->hw.buf_addr,
1172 					segment->hw.buf_addr_msb);
1173 			else
1174 				vdma_desc_write(chan,
1175 					XILINX_VDMA_REG_START_ADDRESS(i++),
1176 					segment->hw.buf_addr);
1177 
1178 			last = segment;
1179 		}
1180 
1181 		if (!last)
1182 			return;
1183 
1184 		/* HW expects these parameters to be same for one transaction */
1185 		vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1186 		vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1187 				last->hw.stride);
1188 		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1189 
1190 		chan->desc_submitcount++;
1191 		chan->desc_pendingcount--;
1192 		list_del(&desc->node);
1193 		list_add_tail(&desc->node, &chan->active_list);
1194 		if (chan->desc_submitcount == chan->num_frms)
1195 			chan->desc_submitcount = 0;
1196 	}
1197 
1198 	chan->idle = false;
1199 }
1200 
1201 /**
1202  * xilinx_cdma_start_transfer - Starts cdma transfer
1203  * @chan: Driver specific channel struct pointer
1204  */
1205 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1206 {
1207 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1208 	struct xilinx_cdma_tx_segment *tail_segment;
1209 	u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1210 
1211 	if (chan->err)
1212 		return;
1213 
1214 	if (!chan->idle)
1215 		return;
1216 
1217 	if (list_empty(&chan->pending_list))
1218 		return;
1219 
1220 	head_desc = list_first_entry(&chan->pending_list,
1221 				     struct xilinx_dma_tx_descriptor, node);
1222 	tail_desc = list_last_entry(&chan->pending_list,
1223 				    struct xilinx_dma_tx_descriptor, node);
1224 	tail_segment = list_last_entry(&tail_desc->segments,
1225 				       struct xilinx_cdma_tx_segment, node);
1226 
1227 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1228 		ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1229 		ctrl_reg |= chan->desc_pendingcount <<
1230 				XILINX_DMA_CR_COALESCE_SHIFT;
1231 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1232 	}
1233 
1234 	if (chan->has_sg) {
1235 		dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1236 			     XILINX_CDMA_CR_SGMODE);
1237 
1238 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1239 			     XILINX_CDMA_CR_SGMODE);
1240 
1241 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1242 			     head_desc->async_tx.phys);
1243 
1244 		/* Update tail ptr register which will start the transfer */
1245 		xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1246 			     tail_segment->phys);
1247 	} else {
1248 		/* In simple mode */
1249 		struct xilinx_cdma_tx_segment *segment;
1250 		struct xilinx_cdma_desc_hw *hw;
1251 
1252 		segment = list_first_entry(&head_desc->segments,
1253 					   struct xilinx_cdma_tx_segment,
1254 					   node);
1255 
1256 		hw = &segment->hw;
1257 
1258 		xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, (dma_addr_t)
1259 			     ((u64)hw->src_addr_msb << 32 | hw->src_addr));
1260 		xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, (dma_addr_t)
1261 			     ((u64)hw->dest_addr_msb << 32 | hw->dest_addr));
1262 
1263 		/* Start the transfer */
1264 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1265 				hw->control & chan->xdev->max_buffer_len);
1266 	}
1267 
1268 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1269 	chan->desc_pendingcount = 0;
1270 	chan->idle = false;
1271 }
1272 
1273 /**
1274  * xilinx_dma_start_transfer - Starts DMA transfer
1275  * @chan: Driver specific channel struct pointer
1276  */
1277 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1278 {
1279 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1280 	struct xilinx_axidma_tx_segment *tail_segment;
1281 	u32 reg;
1282 
1283 	if (chan->err)
1284 		return;
1285 
1286 	if (!chan->idle)
1287 		return;
1288 
1289 	if (list_empty(&chan->pending_list))
1290 		return;
1291 
1292 	head_desc = list_first_entry(&chan->pending_list,
1293 				     struct xilinx_dma_tx_descriptor, node);
1294 	tail_desc = list_last_entry(&chan->pending_list,
1295 				    struct xilinx_dma_tx_descriptor, node);
1296 	tail_segment = list_last_entry(&tail_desc->segments,
1297 				       struct xilinx_axidma_tx_segment, node);
1298 
1299 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1300 
1301 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1302 		reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1303 		reg |= chan->desc_pendingcount <<
1304 				  XILINX_DMA_CR_COALESCE_SHIFT;
1305 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1306 	}
1307 
1308 	if (chan->has_sg && !chan->xdev->mcdma)
1309 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1310 			     head_desc->async_tx.phys);
1311 
1312 	if (chan->has_sg && chan->xdev->mcdma) {
1313 		if (chan->direction == DMA_MEM_TO_DEV) {
1314 			dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1315 				       head_desc->async_tx.phys);
1316 		} else {
1317 			if (!chan->tdest) {
1318 				dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1319 				       head_desc->async_tx.phys);
1320 			} else {
1321 				dma_ctrl_write(chan,
1322 					XILINX_DMA_MCRX_CDESC(chan->tdest),
1323 				       head_desc->async_tx.phys);
1324 			}
1325 		}
1326 	}
1327 
1328 	xilinx_dma_start(chan);
1329 
1330 	if (chan->err)
1331 		return;
1332 
1333 	/* Start the transfer */
1334 	if (chan->has_sg && !chan->xdev->mcdma) {
1335 		if (chan->cyclic)
1336 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1337 				     chan->cyclic_seg_v->phys);
1338 		else
1339 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1340 				     tail_segment->phys);
1341 	} else if (chan->has_sg && chan->xdev->mcdma) {
1342 		if (chan->direction == DMA_MEM_TO_DEV) {
1343 			dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1344 			       tail_segment->phys);
1345 		} else {
1346 			if (!chan->tdest) {
1347 				dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1348 					       tail_segment->phys);
1349 			} else {
1350 				dma_ctrl_write(chan,
1351 					XILINX_DMA_MCRX_TDESC(chan->tdest),
1352 					tail_segment->phys);
1353 			}
1354 		}
1355 	} else {
1356 		struct xilinx_axidma_tx_segment *segment;
1357 		struct xilinx_axidma_desc_hw *hw;
1358 
1359 		segment = list_first_entry(&head_desc->segments,
1360 					   struct xilinx_axidma_tx_segment,
1361 					   node);
1362 		hw = &segment->hw;
1363 
1364 		xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1365 
1366 		/* Start the transfer */
1367 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1368 			       hw->control & chan->xdev->max_buffer_len);
1369 	}
1370 
1371 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1372 	chan->desc_pendingcount = 0;
1373 	chan->idle = false;
1374 }
1375 
1376 /**
1377  * xilinx_dma_issue_pending - Issue pending transactions
1378  * @dchan: DMA channel
1379  */
1380 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1381 {
1382 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1383 	unsigned long flags;
1384 
1385 	spin_lock_irqsave(&chan->lock, flags);
1386 	chan->start_transfer(chan);
1387 	spin_unlock_irqrestore(&chan->lock, flags);
1388 }
1389 
1390 /**
1391  * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1392  * @chan : xilinx DMA channel
1393  *
1394  * CONTEXT: hardirq
1395  */
1396 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1397 {
1398 	struct xilinx_dma_tx_descriptor *desc, *next;
1399 
1400 	/* This function was invoked with lock held */
1401 	if (list_empty(&chan->active_list))
1402 		return;
1403 
1404 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1405 		list_del(&desc->node);
1406 		if (!desc->cyclic)
1407 			dma_cookie_complete(&desc->async_tx);
1408 		list_add_tail(&desc->node, &chan->done_list);
1409 	}
1410 }
1411 
1412 /**
1413  * xilinx_dma_reset - Reset DMA channel
1414  * @chan: Driver specific DMA channel
1415  *
1416  * Return: '0' on success and failure value on error
1417  */
1418 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1419 {
1420 	int err;
1421 	u32 tmp;
1422 
1423 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1424 
1425 	/* Wait for the hardware to finish reset */
1426 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1427 				      !(tmp & XILINX_DMA_DMACR_RESET), 0,
1428 				      XILINX_DMA_LOOP_COUNT);
1429 
1430 	if (err) {
1431 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1432 			dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1433 			dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1434 		return -ETIMEDOUT;
1435 	}
1436 
1437 	chan->err = false;
1438 	chan->idle = true;
1439 	chan->desc_submitcount = 0;
1440 
1441 	return err;
1442 }
1443 
1444 /**
1445  * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1446  * @chan: Driver specific DMA channel
1447  *
1448  * Return: '0' on success and failure value on error
1449  */
1450 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1451 {
1452 	int err;
1453 
1454 	/* Reset VDMA */
1455 	err = xilinx_dma_reset(chan);
1456 	if (err)
1457 		return err;
1458 
1459 	/* Enable interrupts */
1460 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1461 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * xilinx_dma_irq_handler - DMA Interrupt handler
1468  * @irq: IRQ number
1469  * @data: Pointer to the Xilinx DMA channel structure
1470  *
1471  * Return: IRQ_HANDLED/IRQ_NONE
1472  */
1473 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1474 {
1475 	struct xilinx_dma_chan *chan = data;
1476 	u32 status;
1477 
1478 	/* Read the status and ack the interrupts. */
1479 	status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1480 	if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1481 		return IRQ_NONE;
1482 
1483 	dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1484 			status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1485 
1486 	if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1487 		/*
1488 		 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1489 		 * error is recoverable, ignore it. Otherwise flag the error.
1490 		 *
1491 		 * Only recoverable errors can be cleared in the DMASR register,
1492 		 * make sure not to write to other error bits to 1.
1493 		 */
1494 		u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1495 
1496 		dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1497 				errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1498 
1499 		if (!chan->flush_on_fsync ||
1500 		    (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1501 			dev_err(chan->dev,
1502 				"Channel %p has errors %x, cdr %x tdr %x\n",
1503 				chan, errors,
1504 				dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1505 				dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1506 			chan->err = true;
1507 		}
1508 	}
1509 
1510 	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1511 		/*
1512 		 * Device takes too long to do the transfer when user requires
1513 		 * responsiveness.
1514 		 */
1515 		dev_dbg(chan->dev, "Inter-packet latency too long\n");
1516 	}
1517 
1518 	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1519 		spin_lock(&chan->lock);
1520 		xilinx_dma_complete_descriptor(chan);
1521 		chan->idle = true;
1522 		chan->start_transfer(chan);
1523 		chan->buf_idx++;
1524 		spin_unlock(&chan->lock);
1525 	}
1526 
1527 	tasklet_schedule(&chan->tasklet);
1528 	return IRQ_HANDLED;
1529 }
1530 
1531 /**
1532  * append_desc_queue - Queuing descriptor
1533  * @chan: Driver specific dma channel
1534  * @desc: dma transaction descriptor
1535  */
1536 static void append_desc_queue(struct xilinx_dma_chan *chan,
1537 			      struct xilinx_dma_tx_descriptor *desc)
1538 {
1539 	struct xilinx_vdma_tx_segment *tail_segment;
1540 	struct xilinx_dma_tx_descriptor *tail_desc;
1541 	struct xilinx_axidma_tx_segment *axidma_tail_segment;
1542 	struct xilinx_cdma_tx_segment *cdma_tail_segment;
1543 
1544 	if (list_empty(&chan->pending_list))
1545 		goto append;
1546 
1547 	/*
1548 	 * Add the hardware descriptor to the chain of hardware descriptors
1549 	 * that already exists in memory.
1550 	 */
1551 	tail_desc = list_last_entry(&chan->pending_list,
1552 				    struct xilinx_dma_tx_descriptor, node);
1553 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1554 		tail_segment = list_last_entry(&tail_desc->segments,
1555 					       struct xilinx_vdma_tx_segment,
1556 					       node);
1557 		tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1558 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1559 		cdma_tail_segment = list_last_entry(&tail_desc->segments,
1560 						struct xilinx_cdma_tx_segment,
1561 						node);
1562 		cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1563 	} else {
1564 		axidma_tail_segment = list_last_entry(&tail_desc->segments,
1565 					       struct xilinx_axidma_tx_segment,
1566 					       node);
1567 		axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1568 	}
1569 
1570 	/*
1571 	 * Add the software descriptor and all children to the list
1572 	 * of pending transactions
1573 	 */
1574 append:
1575 	list_add_tail(&desc->node, &chan->pending_list);
1576 	chan->desc_pendingcount++;
1577 
1578 	if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1579 	    && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1580 		dev_dbg(chan->dev, "desc pendingcount is too high\n");
1581 		chan->desc_pendingcount = chan->num_frms;
1582 	}
1583 }
1584 
1585 /**
1586  * xilinx_dma_tx_submit - Submit DMA transaction
1587  * @tx: Async transaction descriptor
1588  *
1589  * Return: cookie value on success and failure value on error
1590  */
1591 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1592 {
1593 	struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1594 	struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1595 	dma_cookie_t cookie;
1596 	unsigned long flags;
1597 	int err;
1598 
1599 	if (chan->cyclic) {
1600 		xilinx_dma_free_tx_descriptor(chan, desc);
1601 		return -EBUSY;
1602 	}
1603 
1604 	if (chan->err) {
1605 		/*
1606 		 * If reset fails, need to hard reset the system.
1607 		 * Channel is no longer functional
1608 		 */
1609 		err = xilinx_dma_chan_reset(chan);
1610 		if (err < 0)
1611 			return err;
1612 	}
1613 
1614 	spin_lock_irqsave(&chan->lock, flags);
1615 
1616 	cookie = dma_cookie_assign(tx);
1617 
1618 	/* Put this transaction onto the tail of the pending queue */
1619 	append_desc_queue(chan, desc);
1620 
1621 	if (desc->cyclic)
1622 		chan->cyclic = true;
1623 
1624 	spin_unlock_irqrestore(&chan->lock, flags);
1625 
1626 	return cookie;
1627 }
1628 
1629 /**
1630  * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1631  *	DMA_SLAVE transaction
1632  * @dchan: DMA channel
1633  * @xt: Interleaved template pointer
1634  * @flags: transfer ack flags
1635  *
1636  * Return: Async transaction descriptor on success and NULL on failure
1637  */
1638 static struct dma_async_tx_descriptor *
1639 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1640 				 struct dma_interleaved_template *xt,
1641 				 unsigned long flags)
1642 {
1643 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1644 	struct xilinx_dma_tx_descriptor *desc;
1645 	struct xilinx_vdma_tx_segment *segment;
1646 	struct xilinx_vdma_desc_hw *hw;
1647 
1648 	if (!is_slave_direction(xt->dir))
1649 		return NULL;
1650 
1651 	if (!xt->numf || !xt->sgl[0].size)
1652 		return NULL;
1653 
1654 	if (xt->frame_size != 1)
1655 		return NULL;
1656 
1657 	/* Allocate a transaction descriptor. */
1658 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1659 	if (!desc)
1660 		return NULL;
1661 
1662 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1663 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1664 	async_tx_ack(&desc->async_tx);
1665 
1666 	/* Allocate the link descriptor from DMA pool */
1667 	segment = xilinx_vdma_alloc_tx_segment(chan);
1668 	if (!segment)
1669 		goto error;
1670 
1671 	/* Fill in the hardware descriptor */
1672 	hw = &segment->hw;
1673 	hw->vsize = xt->numf;
1674 	hw->hsize = xt->sgl[0].size;
1675 	hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1676 			XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1677 	hw->stride |= chan->config.frm_dly <<
1678 			XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1679 
1680 	if (xt->dir != DMA_MEM_TO_DEV) {
1681 		if (chan->ext_addr) {
1682 			hw->buf_addr = lower_32_bits(xt->dst_start);
1683 			hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1684 		} else {
1685 			hw->buf_addr = xt->dst_start;
1686 		}
1687 	} else {
1688 		if (chan->ext_addr) {
1689 			hw->buf_addr = lower_32_bits(xt->src_start);
1690 			hw->buf_addr_msb = upper_32_bits(xt->src_start);
1691 		} else {
1692 			hw->buf_addr = xt->src_start;
1693 		}
1694 	}
1695 
1696 	/* Insert the segment into the descriptor segments list. */
1697 	list_add_tail(&segment->node, &desc->segments);
1698 
1699 	/* Link the last hardware descriptor with the first. */
1700 	segment = list_first_entry(&desc->segments,
1701 				   struct xilinx_vdma_tx_segment, node);
1702 	desc->async_tx.phys = segment->phys;
1703 
1704 	return &desc->async_tx;
1705 
1706 error:
1707 	xilinx_dma_free_tx_descriptor(chan, desc);
1708 	return NULL;
1709 }
1710 
1711 /**
1712  * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1713  * @dchan: DMA channel
1714  * @dma_dst: destination address
1715  * @dma_src: source address
1716  * @len: transfer length
1717  * @flags: transfer ack flags
1718  *
1719  * Return: Async transaction descriptor on success and NULL on failure
1720  */
1721 static struct dma_async_tx_descriptor *
1722 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1723 			dma_addr_t dma_src, size_t len, unsigned long flags)
1724 {
1725 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1726 	struct xilinx_dma_tx_descriptor *desc;
1727 	struct xilinx_cdma_tx_segment *segment;
1728 	struct xilinx_cdma_desc_hw *hw;
1729 
1730 	if (!len || len > chan->xdev->max_buffer_len)
1731 		return NULL;
1732 
1733 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1734 	if (!desc)
1735 		return NULL;
1736 
1737 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1738 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1739 
1740 	/* Allocate the link descriptor from DMA pool */
1741 	segment = xilinx_cdma_alloc_tx_segment(chan);
1742 	if (!segment)
1743 		goto error;
1744 
1745 	hw = &segment->hw;
1746 	hw->control = len;
1747 	hw->src_addr = dma_src;
1748 	hw->dest_addr = dma_dst;
1749 	if (chan->ext_addr) {
1750 		hw->src_addr_msb = upper_32_bits(dma_src);
1751 		hw->dest_addr_msb = upper_32_bits(dma_dst);
1752 	}
1753 
1754 	/* Insert the segment into the descriptor segments list. */
1755 	list_add_tail(&segment->node, &desc->segments);
1756 
1757 	desc->async_tx.phys = segment->phys;
1758 	hw->next_desc = segment->phys;
1759 
1760 	return &desc->async_tx;
1761 
1762 error:
1763 	xilinx_dma_free_tx_descriptor(chan, desc);
1764 	return NULL;
1765 }
1766 
1767 /**
1768  * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction
1769  * @dchan: DMA channel
1770  * @dst_sg: Destination scatter list
1771  * @dst_sg_len: Number of entries in destination scatter list
1772  * @src_sg: Source scatter list
1773  * @src_sg_len: Number of entries in source scatter list
1774  * @flags: transfer ack flags
1775  *
1776  * Return: Async transaction descriptor on success and NULL on failure
1777  */
1778 static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
1779 			struct dma_chan *dchan, struct scatterlist *dst_sg,
1780 			unsigned int dst_sg_len, struct scatterlist *src_sg,
1781 			unsigned int src_sg_len, unsigned long flags)
1782 {
1783 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1784 	struct xilinx_dma_tx_descriptor *desc;
1785 	struct xilinx_cdma_tx_segment *segment, *prev = NULL;
1786 	struct xilinx_cdma_desc_hw *hw;
1787 	size_t len, dst_avail, src_avail;
1788 	dma_addr_t dma_dst, dma_src;
1789 
1790 	if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
1791 		return NULL;
1792 
1793 	if (unlikely(dst_sg == NULL || src_sg == NULL))
1794 		return NULL;
1795 
1796 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1797 	if (!desc)
1798 		return NULL;
1799 
1800 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1801 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1802 
1803 	dst_avail = sg_dma_len(dst_sg);
1804 	src_avail = sg_dma_len(src_sg);
1805 	/*
1806 	 * loop until there is either no more source or no more destination
1807 	 * scatterlist entry
1808 	 */
1809 	while (true) {
1810 		len = min_t(size_t, src_avail, dst_avail);
1811 		len = min_t(size_t, len, chan->xdev->max_buffer_len);
1812 		if (len == 0)
1813 			goto fetch;
1814 
1815 		/* Allocate the link descriptor from DMA pool */
1816 		segment = xilinx_cdma_alloc_tx_segment(chan);
1817 		if (!segment)
1818 			goto error;
1819 
1820 		dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
1821 			dst_avail;
1822 		dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
1823 			src_avail;
1824 		hw = &segment->hw;
1825 		hw->control = len;
1826 		hw->src_addr = dma_src;
1827 		hw->dest_addr = dma_dst;
1828 		if (chan->ext_addr) {
1829 			hw->src_addr_msb = upper_32_bits(dma_src);
1830 			hw->dest_addr_msb = upper_32_bits(dma_dst);
1831 		}
1832 
1833 		if (prev)
1834 			prev->hw.next_desc = segment->phys;
1835 
1836 		prev = segment;
1837 		dst_avail -= len;
1838 		src_avail -= len;
1839 		list_add_tail(&segment->node, &desc->segments);
1840 
1841 fetch:
1842 		/* Fetch the next dst scatterlist entry */
1843 		if (dst_avail == 0) {
1844 			if (dst_sg_len == 0)
1845 				break;
1846 			dst_sg = sg_next(dst_sg);
1847 			if (dst_sg == NULL)
1848 				break;
1849 			dst_sg_len--;
1850 			dst_avail = sg_dma_len(dst_sg);
1851 		}
1852 		/* Fetch the next src scatterlist entry */
1853 		if (src_avail == 0) {
1854 			if (src_sg_len == 0)
1855 				break;
1856 			src_sg = sg_next(src_sg);
1857 			if (src_sg == NULL)
1858 				break;
1859 			src_sg_len--;
1860 			src_avail = sg_dma_len(src_sg);
1861 		}
1862 	}
1863 
1864 	/* Link the last hardware descriptor with the first. */
1865 	segment = list_first_entry(&desc->segments,
1866 				struct xilinx_cdma_tx_segment, node);
1867 	desc->async_tx.phys = segment->phys;
1868 	prev->hw.next_desc = segment->phys;
1869 
1870 	return &desc->async_tx;
1871 
1872 error:
1873 	xilinx_dma_free_tx_descriptor(chan, desc);
1874 	return NULL;
1875 }
1876 
1877 /**
1878  * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1879  * @dchan: DMA channel
1880  * @sgl: scatterlist to transfer to/from
1881  * @sg_len: number of entries in @scatterlist
1882  * @direction: DMA direction
1883  * @flags: transfer ack flags
1884  * @context: APP words of the descriptor
1885  *
1886  * Return: Async transaction descriptor on success and NULL on failure
1887  */
1888 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1889 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1890 	enum dma_transfer_direction direction, unsigned long flags,
1891 	void *context)
1892 {
1893 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1894 	struct xilinx_dma_tx_descriptor *desc;
1895 	struct xilinx_axidma_tx_segment *segment = NULL;
1896 	u32 *app_w = (u32 *)context;
1897 	struct scatterlist *sg;
1898 	size_t copy;
1899 	size_t sg_used;
1900 	unsigned int i;
1901 
1902 	if (!is_slave_direction(direction))
1903 		return NULL;
1904 
1905 	/* Allocate a transaction descriptor. */
1906 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1907 	if (!desc)
1908 		return NULL;
1909 
1910 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1911 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1912 
1913 	/* Build transactions using information in the scatter gather list */
1914 	for_each_sg(sgl, sg, sg_len, i) {
1915 		sg_used = 0;
1916 
1917 		/* Loop until the entire scatterlist entry is used */
1918 		while (sg_used < sg_dma_len(sg)) {
1919 			struct xilinx_axidma_desc_hw *hw;
1920 
1921 			/* Get a free segment */
1922 			segment = xilinx_axidma_alloc_tx_segment(chan);
1923 			if (!segment)
1924 				goto error;
1925 
1926 			/*
1927 			 * Calculate the maximum number of bytes to transfer,
1928 			 * making sure it is less than the hw limit
1929 			 */
1930 			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1931 				     chan->xdev->max_buffer_len);
1932 			hw = &segment->hw;
1933 
1934 			/* Fill in the descriptor */
1935 			xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1936 					  sg_used, 0);
1937 
1938 			hw->control = copy;
1939 
1940 			if (chan->direction == DMA_MEM_TO_DEV) {
1941 				if (app_w)
1942 					memcpy(hw->app, app_w, sizeof(u32) *
1943 					       XILINX_DMA_NUM_APP_WORDS);
1944 			}
1945 
1946 			sg_used += copy;
1947 
1948 			/*
1949 			 * Insert the segment into the descriptor segments
1950 			 * list.
1951 			 */
1952 			list_add_tail(&segment->node, &desc->segments);
1953 		}
1954 	}
1955 
1956 	segment = list_first_entry(&desc->segments,
1957 				   struct xilinx_axidma_tx_segment, node);
1958 	desc->async_tx.phys = segment->phys;
1959 
1960 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1961 	if (chan->direction == DMA_MEM_TO_DEV) {
1962 		segment->hw.control |= XILINX_DMA_BD_SOP;
1963 		segment = list_last_entry(&desc->segments,
1964 					  struct xilinx_axidma_tx_segment,
1965 					  node);
1966 		segment->hw.control |= XILINX_DMA_BD_EOP;
1967 	}
1968 
1969 	return &desc->async_tx;
1970 
1971 error:
1972 	xilinx_dma_free_tx_descriptor(chan, desc);
1973 	return NULL;
1974 }
1975 
1976 /**
1977  * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1978  * @dchan: DMA channel
1979  * @buf_addr: Physical address of the buffer
1980  * @buf_len: Total length of the cyclic buffers
1981  * @period_len: length of individual cyclic buffer
1982  * @direction: DMA direction
1983  * @flags: transfer ack flags
1984  *
1985  * Return: Async transaction descriptor on success and NULL on failure
1986  */
1987 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1988 	struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1989 	size_t period_len, enum dma_transfer_direction direction,
1990 	unsigned long flags)
1991 {
1992 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1993 	struct xilinx_dma_tx_descriptor *desc;
1994 	struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1995 	size_t copy, sg_used;
1996 	unsigned int num_periods;
1997 	int i;
1998 	u32 reg;
1999 
2000 	if (!period_len)
2001 		return NULL;
2002 
2003 	num_periods = buf_len / period_len;
2004 
2005 	if (!num_periods)
2006 		return NULL;
2007 
2008 	if (!is_slave_direction(direction))
2009 		return NULL;
2010 
2011 	/* Allocate a transaction descriptor. */
2012 	desc = xilinx_dma_alloc_tx_descriptor(chan);
2013 	if (!desc)
2014 		return NULL;
2015 
2016 	chan->direction = direction;
2017 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2018 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2019 
2020 	chan->buf_idx = 0;
2021 
2022 	for (i = 0; i < num_periods; ++i) {
2023 		sg_used = 0;
2024 
2025 		while (sg_used < period_len) {
2026 			struct xilinx_axidma_desc_hw *hw;
2027 
2028 			/* Get a free segment */
2029 			segment = xilinx_axidma_alloc_tx_segment(chan);
2030 			if (!segment)
2031 				goto error;
2032 
2033 			/*
2034 			 * Calculate the maximum number of bytes to transfer,
2035 			 * making sure it is less than the hw limit
2036 			 */
2037 			copy = min_t(size_t, period_len - sg_used,
2038 				     chan->xdev->max_buffer_len);
2039 			hw = &segment->hw;
2040 			xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2041 					  period_len * i);
2042 			hw->control = copy;
2043 
2044 			if (prev)
2045 				prev->hw.next_desc = segment->phys;
2046 
2047 			prev = segment;
2048 			sg_used += copy;
2049 
2050 			/*
2051 			 * Insert the segment into the descriptor segments
2052 			 * list.
2053 			 */
2054 			list_add_tail(&segment->node, &desc->segments);
2055 		}
2056 	}
2057 
2058 	head_segment = list_first_entry(&desc->segments,
2059 				   struct xilinx_axidma_tx_segment, node);
2060 	desc->async_tx.phys = head_segment->phys;
2061 
2062 	desc->cyclic = true;
2063 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2064 	reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2065 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2066 
2067 	segment = list_last_entry(&desc->segments,
2068 				  struct xilinx_axidma_tx_segment,
2069 				  node);
2070 	segment->hw.next_desc = (u32) head_segment->phys;
2071 
2072 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
2073 	if (direction == DMA_MEM_TO_DEV) {
2074 		head_segment->hw.control |= XILINX_DMA_BD_SOP;
2075 		segment->hw.control |= XILINX_DMA_BD_EOP;
2076 	}
2077 
2078 	return &desc->async_tx;
2079 
2080 error:
2081 	xilinx_dma_free_tx_descriptor(chan, desc);
2082 	return NULL;
2083 }
2084 
2085 /**
2086  * xilinx_dma_prep_interleaved - prepare a descriptor for a
2087  *	DMA_SLAVE transaction
2088  * @dchan: DMA channel
2089  * @xt: Interleaved template pointer
2090  * @flags: transfer ack flags
2091  *
2092  * Return: Async transaction descriptor on success and NULL on failure
2093  */
2094 static struct dma_async_tx_descriptor *
2095 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
2096 				 struct dma_interleaved_template *xt,
2097 				 unsigned long flags)
2098 {
2099 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2100 	struct xilinx_dma_tx_descriptor *desc;
2101 	struct xilinx_axidma_tx_segment *segment;
2102 	struct xilinx_axidma_desc_hw *hw;
2103 
2104 	if (!is_slave_direction(xt->dir))
2105 		return NULL;
2106 
2107 	if (!xt->numf || !xt->sgl[0].size)
2108 		return NULL;
2109 
2110 	if (xt->frame_size != 1)
2111 		return NULL;
2112 
2113 	/* Allocate a transaction descriptor. */
2114 	desc = xilinx_dma_alloc_tx_descriptor(chan);
2115 	if (!desc)
2116 		return NULL;
2117 
2118 	chan->direction = xt->dir;
2119 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2120 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2121 
2122 	/* Get a free segment */
2123 	segment = xilinx_axidma_alloc_tx_segment(chan);
2124 	if (!segment)
2125 		goto error;
2126 
2127 	hw = &segment->hw;
2128 
2129 	/* Fill in the descriptor */
2130 	if (xt->dir != DMA_MEM_TO_DEV)
2131 		hw->buf_addr = xt->dst_start;
2132 	else
2133 		hw->buf_addr = xt->src_start;
2134 
2135 	hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2136 	hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2137 			    XILINX_DMA_BD_VSIZE_MASK;
2138 	hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2139 			    XILINX_DMA_BD_STRIDE_MASK;
2140 	hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2141 
2142 	/*
2143 	 * Insert the segment into the descriptor segments
2144 	 * list.
2145 	 */
2146 	list_add_tail(&segment->node, &desc->segments);
2147 
2148 
2149 	segment = list_first_entry(&desc->segments,
2150 				   struct xilinx_axidma_tx_segment, node);
2151 	desc->async_tx.phys = segment->phys;
2152 
2153 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
2154 	if (xt->dir == DMA_MEM_TO_DEV) {
2155 		segment->hw.control |= XILINX_DMA_BD_SOP;
2156 		segment = list_last_entry(&desc->segments,
2157 					  struct xilinx_axidma_tx_segment,
2158 					  node);
2159 		segment->hw.control |= XILINX_DMA_BD_EOP;
2160 	}
2161 
2162 	return &desc->async_tx;
2163 
2164 error:
2165 	xilinx_dma_free_tx_descriptor(chan, desc);
2166 	return NULL;
2167 }
2168 
2169 /**
2170  * xilinx_dma_terminate_all - Halt the channel and free descriptors
2171  * @dchan: Driver specific DMA Channel pointer
2172  *
2173  * Return: '0' always.
2174  */
2175 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2176 {
2177 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2178 	u32 reg;
2179 	int err;
2180 
2181 	if (!chan->cyclic) {
2182 		err = chan->stop_transfer(chan);
2183 		if (err) {
2184 			dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2185 				chan, dma_ctrl_read(chan,
2186 				XILINX_DMA_REG_DMASR));
2187 			chan->err = true;
2188 		}
2189 	}
2190 
2191 	xilinx_dma_chan_reset(chan);
2192 	/* Remove and free all of the descriptors in the lists */
2193 	xilinx_dma_free_descriptors(chan);
2194 	chan->idle = true;
2195 
2196 	if (chan->cyclic) {
2197 		reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2198 		reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2199 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2200 		chan->cyclic = false;
2201 	}
2202 
2203 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2204 		dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2205 			     XILINX_CDMA_CR_SGMODE);
2206 
2207 	return 0;
2208 }
2209 
2210 /**
2211  * xilinx_dma_channel_set_config - Configure VDMA channel
2212  * Run-time configuration for Axi VDMA, supports:
2213  * . halt the channel
2214  * . configure interrupt coalescing and inter-packet delay threshold
2215  * . start/stop parking
2216  * . enable genlock
2217  *
2218  * @dchan: DMA channel
2219  * @cfg: VDMA device configuration pointer
2220  *
2221  * Return: '0' on success and failure value on error
2222  */
2223 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2224 					struct xilinx_vdma_config *cfg)
2225 {
2226 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2227 	u32 dmacr;
2228 
2229 	if (cfg->reset)
2230 		return xilinx_dma_chan_reset(chan);
2231 
2232 	dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2233 
2234 	chan->config.frm_dly = cfg->frm_dly;
2235 	chan->config.park = cfg->park;
2236 
2237 	/* genlock settings */
2238 	chan->config.gen_lock = cfg->gen_lock;
2239 	chan->config.master = cfg->master;
2240 
2241 	if (cfg->gen_lock && chan->genlock) {
2242 		dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2243 		dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2244 	}
2245 
2246 	chan->config.frm_cnt_en = cfg->frm_cnt_en;
2247 	chan->config.vflip_en = cfg->vflip_en;
2248 
2249 	if (cfg->park)
2250 		chan->config.park_frm = cfg->park_frm;
2251 	else
2252 		chan->config.park_frm = -1;
2253 
2254 	chan->config.coalesc = cfg->coalesc;
2255 	chan->config.delay = cfg->delay;
2256 
2257 	if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2258 		dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2259 		chan->config.coalesc = cfg->coalesc;
2260 	}
2261 
2262 	if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2263 		dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2264 		chan->config.delay = cfg->delay;
2265 	}
2266 
2267 	/* FSync Source selection */
2268 	dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2269 	dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2270 
2271 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2272 
2273 	return 0;
2274 }
2275 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2276 
2277 /* -----------------------------------------------------------------------------
2278  * Probe and remove
2279  */
2280 
2281 /**
2282  * xilinx_dma_chan_remove - Per Channel remove function
2283  * @chan: Driver specific DMA channel
2284  */
2285 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2286 {
2287 	/* Disable all interrupts */
2288 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2289 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2290 
2291 	if (chan->irq > 0)
2292 		free_irq(chan->irq, chan);
2293 
2294 	tasklet_kill(&chan->tasklet);
2295 
2296 	list_del(&chan->common.device_node);
2297 }
2298 
2299 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2300 			    struct clk **tx_clk, struct clk **rx_clk,
2301 			    struct clk **sg_clk, struct clk **tmp_clk)
2302 {
2303 	int err;
2304 
2305 	*tmp_clk = NULL;
2306 
2307 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2308 	if (IS_ERR(*axi_clk)) {
2309 		err = PTR_ERR(*axi_clk);
2310 		dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2311 		return err;
2312 	}
2313 
2314 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2315 	if (IS_ERR(*tx_clk))
2316 		*tx_clk = NULL;
2317 
2318 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2319 	if (IS_ERR(*rx_clk))
2320 		*rx_clk = NULL;
2321 
2322 	*sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2323 	if (IS_ERR(*sg_clk))
2324 		*sg_clk = NULL;
2325 
2326 	err = clk_prepare_enable(*axi_clk);
2327 	if (err) {
2328 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2329 		return err;
2330 	}
2331 
2332 	err = clk_prepare_enable(*tx_clk);
2333 	if (err) {
2334 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2335 		goto err_disable_axiclk;
2336 	}
2337 
2338 	err = clk_prepare_enable(*rx_clk);
2339 	if (err) {
2340 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2341 		goto err_disable_txclk;
2342 	}
2343 
2344 	err = clk_prepare_enable(*sg_clk);
2345 	if (err) {
2346 		dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2347 		goto err_disable_rxclk;
2348 	}
2349 
2350 	return 0;
2351 
2352 err_disable_rxclk:
2353 	clk_disable_unprepare(*rx_clk);
2354 err_disable_txclk:
2355 	clk_disable_unprepare(*tx_clk);
2356 err_disable_axiclk:
2357 	clk_disable_unprepare(*axi_clk);
2358 
2359 	return err;
2360 }
2361 
2362 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2363 			    struct clk **dev_clk, struct clk **tmp_clk,
2364 			    struct clk **tmp1_clk, struct clk **tmp2_clk)
2365 {
2366 	int err;
2367 
2368 	*tmp_clk = NULL;
2369 	*tmp1_clk = NULL;
2370 	*tmp2_clk = NULL;
2371 
2372 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2373 	if (IS_ERR(*axi_clk)) {
2374 		err = PTR_ERR(*axi_clk);
2375 		dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2376 		return err;
2377 	}
2378 
2379 	*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2380 	if (IS_ERR(*dev_clk)) {
2381 		err = PTR_ERR(*dev_clk);
2382 		dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2383 		return err;
2384 	}
2385 
2386 	err = clk_prepare_enable(*axi_clk);
2387 	if (err) {
2388 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2389 		return err;
2390 	}
2391 
2392 	err = clk_prepare_enable(*dev_clk);
2393 	if (err) {
2394 		dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2395 		goto err_disable_axiclk;
2396 	}
2397 
2398 	return 0;
2399 
2400 err_disable_axiclk:
2401 	clk_disable_unprepare(*axi_clk);
2402 
2403 	return err;
2404 }
2405 
2406 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2407 			    struct clk **tx_clk, struct clk **txs_clk,
2408 			    struct clk **rx_clk, struct clk **rxs_clk)
2409 {
2410 	int err;
2411 
2412 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2413 	if (IS_ERR(*axi_clk)) {
2414 		err = PTR_ERR(*axi_clk);
2415 		dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2416 		return err;
2417 	}
2418 
2419 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2420 	if (IS_ERR(*tx_clk))
2421 		*tx_clk = NULL;
2422 
2423 	*txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2424 	if (IS_ERR(*txs_clk))
2425 		*txs_clk = NULL;
2426 
2427 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2428 	if (IS_ERR(*rx_clk))
2429 		*rx_clk = NULL;
2430 
2431 	*rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2432 	if (IS_ERR(*rxs_clk))
2433 		*rxs_clk = NULL;
2434 
2435 	err = clk_prepare_enable(*axi_clk);
2436 	if (err) {
2437 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2438 		return err;
2439 	}
2440 
2441 	err = clk_prepare_enable(*tx_clk);
2442 	if (err) {
2443 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2444 		goto err_disable_axiclk;
2445 	}
2446 
2447 	err = clk_prepare_enable(*txs_clk);
2448 	if (err) {
2449 		dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2450 		goto err_disable_txclk;
2451 	}
2452 
2453 	err = clk_prepare_enable(*rx_clk);
2454 	if (err) {
2455 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2456 		goto err_disable_txsclk;
2457 	}
2458 
2459 	err = clk_prepare_enable(*rxs_clk);
2460 	if (err) {
2461 		dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2462 		goto err_disable_rxclk;
2463 	}
2464 
2465 	return 0;
2466 
2467 err_disable_rxclk:
2468 	clk_disable_unprepare(*rx_clk);
2469 err_disable_txsclk:
2470 	clk_disable_unprepare(*txs_clk);
2471 err_disable_txclk:
2472 	clk_disable_unprepare(*tx_clk);
2473 err_disable_axiclk:
2474 	clk_disable_unprepare(*axi_clk);
2475 
2476 	return err;
2477 }
2478 
2479 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2480 {
2481 	clk_disable_unprepare(xdev->rxs_clk);
2482 	clk_disable_unprepare(xdev->rx_clk);
2483 	clk_disable_unprepare(xdev->txs_clk);
2484 	clk_disable_unprepare(xdev->tx_clk);
2485 	clk_disable_unprepare(xdev->axi_clk);
2486 }
2487 
2488 /**
2489  * xilinx_dma_chan_probe - Per Channel Probing
2490  * It get channel features from the device tree entry and
2491  * initialize special channel handling routines
2492  *
2493  * @xdev: Driver specific device structure
2494  * @node: Device node
2495  * @chan_id: DMA Channel id
2496  *
2497  * Return: '0' on success and failure value on error
2498  */
2499 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2500 				  struct device_node *node, int chan_id)
2501 {
2502 	struct xilinx_dma_chan *chan;
2503 	bool has_dre = false;
2504 	u32 value, width;
2505 	int err;
2506 
2507 	/* Allocate and initialize the channel structure */
2508 	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2509 	if (!chan)
2510 		return -ENOMEM;
2511 
2512 	chan->dev = xdev->dev;
2513 	chan->xdev = xdev;
2514 	chan->has_sg = xdev->has_sg;
2515 	chan->desc_pendingcount = 0x0;
2516 	chan->ext_addr = xdev->ext_addr;
2517 	/* This variable ensures that descriptors are not
2518 	 * Submitted when dma engine is in progress. This variable is
2519 	 * Added to avoid polling for a bit in the status register to
2520 	 * Know dma state in the driver hot path.
2521 	 */
2522 	chan->idle = true;
2523 
2524 	spin_lock_init(&chan->lock);
2525 	INIT_LIST_HEAD(&chan->pending_list);
2526 	INIT_LIST_HEAD(&chan->done_list);
2527 	INIT_LIST_HEAD(&chan->active_list);
2528 	INIT_LIST_HEAD(&chan->free_seg_list);
2529 
2530 	/* Retrieve the channel properties from the device tree */
2531 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
2532 
2533 	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2534 
2535 	err = of_property_read_u32(node, "xlnx,datawidth", &value);
2536 	if (err) {
2537 		dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2538 		return err;
2539 	}
2540 	width = value >> 3; /* Convert bits to bytes */
2541 
2542 	/* If data width is greater than 8 bytes, DRE is not in hw */
2543 	if (width > 8)
2544 		has_dre = false;
2545 
2546 	if (!has_dre)
2547 		xdev->common.copy_align = fls(width - 1);
2548 
2549 	if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2550 	    of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2551 	    of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2552 		chan->direction = DMA_MEM_TO_DEV;
2553 		chan->id = chan_id;
2554 		chan->tdest = chan_id;
2555 		xdev->common.directions = BIT(DMA_MEM_TO_DEV);
2556 
2557 		chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2558 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2559 			chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2560 			chan->config.park = 1;
2561 
2562 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2563 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2564 				chan->flush_on_fsync = true;
2565 		}
2566 	} else if (of_device_is_compatible(node,
2567 					   "xlnx,axi-vdma-s2mm-channel") ||
2568 		   of_device_is_compatible(node,
2569 					   "xlnx,axi-dma-s2mm-channel")) {
2570 		chan->direction = DMA_DEV_TO_MEM;
2571 		chan->id = chan_id;
2572 		chan->tdest = chan_id - xdev->nr_channels;
2573 		xdev->common.directions |= BIT(DMA_DEV_TO_MEM);
2574 		chan->has_vflip = of_property_read_bool(node,
2575 					"xlnx,enable-vert-flip");
2576 		if (chan->has_vflip) {
2577 			chan->config.vflip_en = dma_read(chan,
2578 				XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2579 				XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2580 		}
2581 
2582 		chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2583 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2584 			chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2585 			chan->config.park = 1;
2586 
2587 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2588 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2589 				chan->flush_on_fsync = true;
2590 		}
2591 	} else {
2592 		dev_err(xdev->dev, "Invalid channel compatible node\n");
2593 		return -EINVAL;
2594 	}
2595 
2596 	/* Request the interrupt */
2597 	chan->irq = irq_of_parse_and_map(node, 0);
2598 	err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2599 			  "xilinx-dma-controller", chan);
2600 	if (err) {
2601 		dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2602 		return err;
2603 	}
2604 
2605 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2606 		chan->start_transfer = xilinx_dma_start_transfer;
2607 		chan->stop_transfer = xilinx_dma_stop_transfer;
2608 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2609 		chan->start_transfer = xilinx_cdma_start_transfer;
2610 		chan->stop_transfer = xilinx_cdma_stop_transfer;
2611 	} else {
2612 		chan->start_transfer = xilinx_vdma_start_transfer;
2613 		chan->stop_transfer = xilinx_dma_stop_transfer;
2614 	}
2615 
2616 	/* Initialize the tasklet */
2617 	tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2618 			(unsigned long)chan);
2619 
2620 	/*
2621 	 * Initialize the DMA channel and add it to the DMA engine channels
2622 	 * list.
2623 	 */
2624 	chan->common.device = &xdev->common;
2625 
2626 	list_add_tail(&chan->common.device_node, &xdev->common.channels);
2627 	xdev->chan[chan->id] = chan;
2628 
2629 	/* Reset the channel */
2630 	err = xilinx_dma_chan_reset(chan);
2631 	if (err < 0) {
2632 		dev_err(xdev->dev, "Reset channel failed\n");
2633 		return err;
2634 	}
2635 
2636 	return 0;
2637 }
2638 
2639 /**
2640  * xilinx_dma_child_probe - Per child node probe
2641  * It get number of dma-channels per child node from
2642  * device-tree and initializes all the channels.
2643  *
2644  * @xdev: Driver specific device structure
2645  * @node: Device node
2646  *
2647  * Return: 0 always.
2648  */
2649 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2650 				    struct device_node *node)
2651 {
2652 	int ret, i, nr_channels = 1;
2653 
2654 	ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2655 	if ((ret < 0) && xdev->mcdma)
2656 		dev_warn(xdev->dev, "missing dma-channels property\n");
2657 
2658 	for (i = 0; i < nr_channels; i++)
2659 		xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2660 
2661 	xdev->nr_channels += nr_channels;
2662 
2663 	return 0;
2664 }
2665 
2666 /**
2667  * of_dma_xilinx_xlate - Translation function
2668  * @dma_spec: Pointer to DMA specifier as found in the device tree
2669  * @ofdma: Pointer to DMA controller data
2670  *
2671  * Return: DMA channel pointer on success and NULL on error
2672  */
2673 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2674 						struct of_dma *ofdma)
2675 {
2676 	struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2677 	int chan_id = dma_spec->args[0];
2678 
2679 	if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2680 		return NULL;
2681 
2682 	return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2683 }
2684 
2685 static const struct xilinx_dma_config axidma_config = {
2686 	.dmatype = XDMA_TYPE_AXIDMA,
2687 	.clk_init = axidma_clk_init,
2688 };
2689 
2690 static const struct xilinx_dma_config axicdma_config = {
2691 	.dmatype = XDMA_TYPE_CDMA,
2692 	.clk_init = axicdma_clk_init,
2693 };
2694 
2695 static const struct xilinx_dma_config axivdma_config = {
2696 	.dmatype = XDMA_TYPE_VDMA,
2697 	.clk_init = axivdma_clk_init,
2698 };
2699 
2700 static const struct of_device_id xilinx_dma_of_ids[] = {
2701 	{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2702 	{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2703 	{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2704 	{}
2705 };
2706 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2707 
2708 /**
2709  * xilinx_dma_probe - Driver probe function
2710  * @pdev: Pointer to the platform_device structure
2711  *
2712  * Return: '0' on success and failure value on error
2713  */
2714 static int xilinx_dma_probe(struct platform_device *pdev)
2715 {
2716 	int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2717 			struct clk **, struct clk **, struct clk **)
2718 					= axivdma_clk_init;
2719 	struct device_node *node = pdev->dev.of_node;
2720 	struct xilinx_dma_device *xdev;
2721 	struct device_node *child, *np = pdev->dev.of_node;
2722 	struct resource *io;
2723 	u32 num_frames, addr_width, len_width;
2724 	int i, err;
2725 
2726 	/* Allocate and initialize the DMA engine structure */
2727 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2728 	if (!xdev)
2729 		return -ENOMEM;
2730 
2731 	xdev->dev = &pdev->dev;
2732 	if (np) {
2733 		const struct of_device_id *match;
2734 
2735 		match = of_match_node(xilinx_dma_of_ids, np);
2736 		if (match && match->data) {
2737 			xdev->dma_config = match->data;
2738 			clk_init = xdev->dma_config->clk_init;
2739 		}
2740 	}
2741 
2742 	err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2743 		       &xdev->rx_clk, &xdev->rxs_clk);
2744 	if (err)
2745 		return err;
2746 
2747 	/* Request and map I/O memory */
2748 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2749 	xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2750 	if (IS_ERR(xdev->regs))
2751 		return PTR_ERR(xdev->regs);
2752 
2753 	/* Retrieve the DMA engine properties from the device tree */
2754 	xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2755 	xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2756 
2757 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2758 		xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2759 		if (!of_property_read_u32(node, "xlnx,sg-length-width",
2760 					  &len_width)) {
2761 			if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2762 			    len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2763 				dev_warn(xdev->dev,
2764 					 "invalid xlnx,sg-length-width property value using default width\n");
2765 			} else {
2766 				if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2767 					dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2768 
2769 				xdev->max_buffer_len = GENMASK(len_width - 1, 0);
2770 			}
2771 		}
2772 	}
2773 
2774 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2775 		err = of_property_read_u32(node, "xlnx,num-fstores",
2776 					   &num_frames);
2777 		if (err < 0) {
2778 			dev_err(xdev->dev,
2779 				"missing xlnx,num-fstores property\n");
2780 			return err;
2781 		}
2782 
2783 		err = of_property_read_u32(node, "xlnx,flush-fsync",
2784 					   &xdev->flush_on_fsync);
2785 		if (err < 0)
2786 			dev_warn(xdev->dev,
2787 				 "missing xlnx,flush-fsync property\n");
2788 	}
2789 
2790 	err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2791 	if (err < 0)
2792 		dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2793 
2794 	if (addr_width > 32)
2795 		xdev->ext_addr = true;
2796 	else
2797 		xdev->ext_addr = false;
2798 
2799 	/* Set the dma mask bits */
2800 	dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2801 
2802 	/* Initialize the DMA engine */
2803 	xdev->common.dev = &pdev->dev;
2804 
2805 	INIT_LIST_HEAD(&xdev->common.channels);
2806 	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2807 		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2808 		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2809 	}
2810 
2811 	xdev->common.dst_addr_widths = BIT(addr_width / 8);
2812 	xdev->common.src_addr_widths = BIT(addr_width / 8);
2813 	xdev->common.device_alloc_chan_resources =
2814 				xilinx_dma_alloc_chan_resources;
2815 	xdev->common.device_free_chan_resources =
2816 				xilinx_dma_free_chan_resources;
2817 	xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2818 	xdev->common.device_tx_status = xilinx_dma_tx_status;
2819 	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2820 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2821 		dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2822 		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2823 		xdev->common.device_prep_dma_cyclic =
2824 					  xilinx_dma_prep_dma_cyclic;
2825 		xdev->common.device_prep_interleaved_dma =
2826 					xilinx_dma_prep_interleaved;
2827 		/* Residue calculation is supported by only AXI DMA */
2828 		xdev->common.residue_granularity =
2829 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
2830 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2831 		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2832 		dma_cap_set(DMA_SG, xdev->common.cap_mask);
2833 		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2834 		xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg;
2835 	} else {
2836 		xdev->common.device_prep_interleaved_dma =
2837 				xilinx_vdma_dma_prep_interleaved;
2838 	}
2839 
2840 	platform_set_drvdata(pdev, xdev);
2841 
2842 	/* Initialize the channels */
2843 	for_each_child_of_node(node, child) {
2844 		err = xilinx_dma_child_probe(xdev, child);
2845 		if (err < 0)
2846 			goto disable_clks;
2847 	}
2848 
2849 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2850 		for (i = 0; i < xdev->nr_channels; i++)
2851 			if (xdev->chan[i])
2852 				xdev->chan[i]->num_frms = num_frames;
2853 	}
2854 
2855 	/* Register the DMA engine with the core */
2856 	dma_async_device_register(&xdev->common);
2857 
2858 	err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2859 					 xdev);
2860 	if (err < 0) {
2861 		dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2862 		dma_async_device_unregister(&xdev->common);
2863 		goto error;
2864 	}
2865 
2866 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2867 		dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2868 	else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2869 		dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2870 	else
2871 		dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2872 
2873 	return 0;
2874 
2875 disable_clks:
2876 	xdma_disable_allclks(xdev);
2877 error:
2878 	for (i = 0; i < xdev->nr_channels; i++)
2879 		if (xdev->chan[i])
2880 			xilinx_dma_chan_remove(xdev->chan[i]);
2881 
2882 	return err;
2883 }
2884 
2885 /**
2886  * xilinx_dma_remove - Driver remove function
2887  * @pdev: Pointer to the platform_device structure
2888  *
2889  * Return: Always '0'
2890  */
2891 static int xilinx_dma_remove(struct platform_device *pdev)
2892 {
2893 	struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2894 	int i;
2895 
2896 	of_dma_controller_free(pdev->dev.of_node);
2897 
2898 	dma_async_device_unregister(&xdev->common);
2899 
2900 	for (i = 0; i < xdev->nr_channels; i++)
2901 		if (xdev->chan[i])
2902 			xilinx_dma_chan_remove(xdev->chan[i]);
2903 
2904 	xdma_disable_allclks(xdev);
2905 
2906 	return 0;
2907 }
2908 
2909 static struct platform_driver xilinx_vdma_driver = {
2910 	.driver = {
2911 		.name = "xilinx-vdma",
2912 		.of_match_table = xilinx_dma_of_ids,
2913 	},
2914 	.probe = xilinx_dma_probe,
2915 	.remove = xilinx_dma_remove,
2916 };
2917 
2918 module_platform_driver(xilinx_vdma_driver);
2919 
2920 MODULE_AUTHOR("Xilinx, Inc. and Xianjun Jiao");
2921 MODULE_DESCRIPTION("Xilinx VDMA driver");
2922 MODULE_LICENSE("GPL v2");
2923