1 /* 2 * DMA driver for Xilinx Video DMA Engine 3 * SPDX-FileCopyrightText: Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved 4 * Based on the Freescale DMA driver 5 * Modified by Xianjun Jiao 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 * 8 * Description: 9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 10 * core that provides high-bandwidth direct memory access between memory 11 * and AXI4-Stream type video target peripherals. The core provides efficient 12 * two dimensional DMA operations with independent asynchronous read (S2MM) 13 * and write (MM2S) channel operation. It can be configured to have either 14 * one channel or two channels. If configured as two channels, one is to 15 * transmit to the video device (MM2S) and another is to receive from the 16 * video device (S2MM). Initialization, status, interrupt and management 17 * registers are accessed through an AXI4-Lite slave interface. 18 * 19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 20 * provides high-bandwidth one dimensional direct memory access between memory 21 * and AXI4-Stream target peripherals. It supports one receive and one 22 * transmit channel, both of them optional at synthesis time. 23 * 24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 25 * Access (DMA) between a memory-mapped source address and a memory-mapped 26 * destination address. 27 * 28 * This program is free software: you can redistribute it and/or modify 29 * it under the terms of the GNU General Public License as published by 30 * the Free Software Foundation, either version 2 of the License, or 31 * (at your option) any later version. 32 */ 33 34 #include <linux/bitops.h> 35 #include <linux/dmapool.h> 36 #include <linux/dma/xilinx_dma.h> 37 #include <linux/init.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/iopoll.h> 41 #include <linux/module.h> 42 #include <linux/of_address.h> 43 #include <linux/of_dma.h> 44 #include <linux/of_platform.h> 45 #include <linux/of_irq.h> 46 #include <linux/slab.h> 47 #include <linux/clk.h> 48 #include <linux/io-64-nonatomic-lo-hi.h> 49 50 #include "../dmaengine.h" 51 52 /* Register/Descriptor Offsets */ 53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 57 58 /* Control Registers */ 59 #define XILINX_DMA_REG_DMACR 0x0000 60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff 61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24 62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8 68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 71 #define XILINX_DMA_DMACR_RESET BIT(2) 72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 75 76 #define XILINX_DMA_REG_DMASR 0x0004 77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 89 #define XILINX_DMA_DMASR_IDLE BIT(1) 90 #define XILINX_DMA_DMASR_HALTED BIT(0) 91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 93 94 #define XILINX_DMA_REG_CURDESC 0x0008 95 #define XILINX_DMA_REG_TAILDESC 0x0010 96 #define XILINX_DMA_REG_REG_INDEX 0x0014 97 #define XILINX_DMA_REG_FRMSTORE 0x0018 98 #define XILINX_DMA_REG_THRESHOLD 0x001c 99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024 100 #define XILINX_DMA_REG_PARK_PTR 0x0028 101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) 103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) 105 #define XILINX_DMA_REG_VDMA_VERSION 0x002c 106 107 /* Register Direct Mode Registers */ 108 #define XILINX_DMA_REG_VSIZE 0x0000 109 #define XILINX_DMA_REG_HSIZE 0x0004 110 111 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 114 115 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 116 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 117 118 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 119 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 120 121 /* HW specific definitions */ 122 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 123 124 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 125 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 126 XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 127 XILINX_DMA_DMASR_ERR_IRQ) 128 129 #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 130 (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 131 XILINX_DMA_DMASR_SOF_LATE_ERR | \ 132 XILINX_DMA_DMASR_SG_DEC_ERR | \ 133 XILINX_DMA_DMASR_SG_SLV_ERR | \ 134 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 135 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 136 XILINX_DMA_DMASR_DMA_DEC_ERR | \ 137 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 138 XILINX_DMA_DMASR_DMA_INT_ERR) 139 140 /* 141 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 142 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 143 * is enabled in the h/w system. 144 */ 145 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 146 (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 147 XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 148 XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 149 XILINX_DMA_DMASR_DMA_INT_ERR) 150 151 /* Axi VDMA Flush on Fsync bits */ 152 #define XILINX_DMA_FLUSH_S2MM 3 153 #define XILINX_DMA_FLUSH_MM2S 2 154 #define XILINX_DMA_FLUSH_BOTH 1 155 156 /* Delay loop counter to prevent hardware failure */ 157 #define XILINX_DMA_LOOP_COUNT 1000000 158 159 /* AXI DMA Specific Registers/Offsets */ 160 #define XILINX_DMA_REG_SRCDSTADDR 0x18 161 #define XILINX_DMA_REG_BTT 0x28 162 163 /* AXI DMA Specific Masks/Bit fields */ 164 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 165 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 166 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 167 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 168 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 169 #define XILINX_DMA_CR_COALESCE_SHIFT 16 170 #define XILINX_DMA_BD_SOP BIT(27) 171 #define XILINX_DMA_BD_EOP BIT(26) 172 #define XILINX_DMA_COALESCE_MAX 255 173 #define XILINX_DMA_NUM_DESCS 255 174 #define XILINX_DMA_NUM_APP_WORDS 5 175 176 /* Multi-Channel DMA Descriptor offsets*/ 177 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 178 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 179 180 /* Multi-Channel DMA Masks/Shifts */ 181 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 182 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 183 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 184 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 185 #define XILINX_DMA_BD_STRIDE_SHIFT 0 186 #define XILINX_DMA_BD_VSIZE_SHIFT 19 187 188 /* AXI CDMA Specific Registers/Offsets */ 189 #define XILINX_CDMA_REG_SRCADDR 0x18 190 #define XILINX_CDMA_REG_DSTADDR 0x20 191 192 /* AXI CDMA Specific Masks */ 193 #define XILINX_CDMA_CR_SGMODE BIT(3) 194 195 /** 196 * struct xilinx_vdma_desc_hw - Hardware Descriptor 197 * @next_desc: Next Descriptor Pointer @0x00 198 * @pad1: Reserved @0x04 199 * @buf_addr: Buffer address @0x08 200 * @buf_addr_msb: MSB of Buffer address @0x0C 201 * @vsize: Vertical Size @0x10 202 * @hsize: Horizontal Size @0x14 203 * @stride: Number of bytes between the first 204 * pixels of each horizontal line @0x18 205 */ 206 struct xilinx_vdma_desc_hw { 207 u32 next_desc; 208 u32 pad1; 209 u32 buf_addr; 210 u32 buf_addr_msb; 211 u32 vsize; 212 u32 hsize; 213 u32 stride; 214 } __aligned(64); 215 216 /** 217 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 218 * @next_desc: Next Descriptor Pointer @0x00 219 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 220 * @buf_addr: Buffer address @0x08 221 * @buf_addr_msb: MSB of Buffer address @0x0C 222 * @mcdma_control: Control field for mcdma @0x10 223 * @vsize_stride: Vsize and Stride field for mcdma @0x14 224 * @control: Control field @0x18 225 * @status: Status field @0x1C 226 * @app: APP Fields @0x20 - 0x30 227 */ 228 struct xilinx_axidma_desc_hw { 229 u32 next_desc; 230 u32 next_desc_msb; 231 u32 buf_addr; 232 u32 buf_addr_msb; 233 u32 mcdma_control; 234 u32 vsize_stride; 235 u32 control; 236 u32 status; 237 u32 app[XILINX_DMA_NUM_APP_WORDS]; 238 } __aligned(64); 239 240 /** 241 * struct xilinx_cdma_desc_hw - Hardware Descriptor 242 * @next_desc: Next Descriptor Pointer @0x00 243 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 244 * @src_addr: Source address @0x08 245 * @src_addr_msb: Source address MSB @0x0C 246 * @dest_addr: Destination address @0x10 247 * @dest_addr_msb: Destination address MSB @0x14 248 * @control: Control field @0x18 249 * @status: Status field @0x1C 250 */ 251 struct xilinx_cdma_desc_hw { 252 u32 next_desc; 253 u32 next_desc_msb; 254 u32 src_addr; 255 u32 src_addr_msb; 256 u32 dest_addr; 257 u32 dest_addr_msb; 258 u32 control; 259 u32 status; 260 } __aligned(64); 261 262 /** 263 * struct xilinx_vdma_tx_segment - Descriptor segment 264 * @hw: Hardware descriptor 265 * @node: Node in the descriptor segments list 266 * @phys: Physical address of segment 267 */ 268 struct xilinx_vdma_tx_segment { 269 struct xilinx_vdma_desc_hw hw; 270 struct list_head node; 271 dma_addr_t phys; 272 } __aligned(64); 273 274 /** 275 * struct xilinx_axidma_tx_segment - Descriptor segment 276 * @hw: Hardware descriptor 277 * @node: Node in the descriptor segments list 278 * @phys: Physical address of segment 279 */ 280 struct xilinx_axidma_tx_segment { 281 struct xilinx_axidma_desc_hw hw; 282 struct list_head node; 283 dma_addr_t phys; 284 } __aligned(64); 285 286 /** 287 * struct xilinx_cdma_tx_segment - Descriptor segment 288 * @hw: Hardware descriptor 289 * @node: Node in the descriptor segments list 290 * @phys: Physical address of segment 291 */ 292 struct xilinx_cdma_tx_segment { 293 struct xilinx_cdma_desc_hw hw; 294 struct list_head node; 295 dma_addr_t phys; 296 } __aligned(64); 297 298 /** 299 * struct xilinx_dma_tx_descriptor - Per Transaction structure 300 * @async_tx: Async transaction descriptor 301 * @segments: TX segments list 302 * @node: Node in the channel descriptors list 303 * @cyclic: Check for cyclic transfers. 304 */ 305 struct xilinx_dma_tx_descriptor { 306 struct dma_async_tx_descriptor async_tx; 307 struct list_head segments; 308 struct list_head node; 309 bool cyclic; 310 }; 311 312 /** 313 * struct xilinx_dma_chan - Driver specific DMA channel structure 314 * @xdev: Driver specific device structure 315 * @ctrl_offset: Control registers offset 316 * @desc_offset: TX descriptor registers offset 317 * @lock: Descriptor operation lock 318 * @pending_list: Descriptors waiting 319 * @active_list: Descriptors ready to submit 320 * @done_list: Complete descriptors 321 * @free_seg_list: Free descriptors 322 * @common: DMA common channel 323 * @desc_pool: Descriptors pool 324 * @dev: The dma device 325 * @irq: Channel IRQ 326 * @id: Channel ID 327 * @direction: Transfer direction 328 * @num_frms: Number of frames 329 * @has_sg: Support scatter transfers 330 * @cyclic: Check for cyclic transfers. 331 * @genlock: Support genlock mode 332 * @err: Channel has errors 333 * @idle: Check for channel idle 334 * @tasklet: Cleanup work after irq 335 * @config: Device configuration info 336 * @flush_on_fsync: Flush on Frame sync 337 * @desc_pendingcount: Descriptor pending count 338 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 339 * @desc_submitcount: Descriptor h/w submitted count 340 * @residue: Residue for AXI DMA 341 * @seg_v: Statically allocated segments base 342 * @seg_p: Physical allocated segments base 343 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 344 * @cyclic_seg_p: Physical allocated segments base for cyclic dma 345 * @start_transfer: Differentiate b/w DMA IP's transfer 346 * @stop_transfer: Differentiate b/w DMA IP's quiesce 347 * @tdest: TDEST value for mcdma 348 * @has_vflip: S2MM vertical flip 349 */ 350 struct xilinx_dma_chan { 351 struct xilinx_dma_device *xdev; 352 u32 ctrl_offset; 353 u32 desc_offset; 354 spinlock_t lock; 355 struct list_head pending_list; 356 struct list_head active_list; 357 struct list_head done_list; 358 struct list_head free_seg_list; 359 struct dma_chan common; 360 struct dma_pool *desc_pool; 361 struct device *dev; 362 int irq; 363 int id; 364 enum dma_transfer_direction direction; 365 int num_frms; 366 bool has_sg; 367 bool cyclic; 368 bool genlock; 369 bool err; 370 bool idle; 371 struct tasklet_struct tasklet; 372 struct xilinx_vdma_config config; 373 bool flush_on_fsync; 374 u32 desc_pendingcount; 375 bool ext_addr; 376 u32 desc_submitcount; 377 u32 residue; 378 struct xilinx_axidma_tx_segment *seg_v; 379 dma_addr_t seg_p; 380 struct xilinx_axidma_tx_segment *cyclic_seg_v; 381 dma_addr_t cyclic_seg_p; 382 void (*start_transfer)(struct xilinx_dma_chan *chan); 383 int (*stop_transfer)(struct xilinx_dma_chan *chan); 384 u16 tdest; 385 bool has_vflip; 386 u32 buf_idx; // each irq this value increase 1. in cyclic mode, we use residue return this idx via device_tx_status/xilinx_dma_tx_status 387 }; 388 389 /** 390 * enum xdma_ip_type - DMA IP type. 391 * 392 * @XDMA_TYPE_AXIDMA: Axi dma ip. 393 * @XDMA_TYPE_CDMA: Axi cdma ip. 394 * @XDMA_TYPE_VDMA: Axi vdma ip. 395 * 396 */ 397 enum xdma_ip_type { 398 XDMA_TYPE_AXIDMA = 0, 399 XDMA_TYPE_CDMA, 400 XDMA_TYPE_VDMA, 401 }; 402 403 struct xilinx_dma_config { 404 enum xdma_ip_type dmatype; 405 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 406 struct clk **tx_clk, struct clk **txs_clk, 407 struct clk **rx_clk, struct clk **rxs_clk); 408 }; 409 410 /** 411 * struct xilinx_dma_device - DMA device structure 412 * @regs: I/O mapped base address 413 * @dev: Device Structure 414 * @common: DMA device structure 415 * @chan: Driver specific DMA channel 416 * @has_sg: Specifies whether Scatter-Gather is present or not 417 * @mcdma: Specifies whether Multi-Channel is present or not 418 * @flush_on_fsync: Flush on frame sync 419 * @ext_addr: Indicates 64 bit addressing is supported by dma device 420 * @pdev: Platform device structure pointer 421 * @dma_config: DMA config structure 422 * @axi_clk: DMA Axi4-lite interace clock 423 * @tx_clk: DMA mm2s clock 424 * @txs_clk: DMA mm2s stream clock 425 * @rx_clk: DMA s2mm clock 426 * @rxs_clk: DMA s2mm stream clock 427 * @nr_channels: Number of channels DMA device supports 428 * @chan_id: DMA channel identifier 429 * @max_buffer_len: Max buffer length 430 */ 431 struct xilinx_dma_device { 432 void __iomem *regs; 433 struct device *dev; 434 struct dma_device common; 435 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 436 bool has_sg; 437 bool mcdma; 438 u32 flush_on_fsync; 439 bool ext_addr; 440 struct platform_device *pdev; 441 const struct xilinx_dma_config *dma_config; 442 struct clk *axi_clk; 443 struct clk *tx_clk; 444 struct clk *txs_clk; 445 struct clk *rx_clk; 446 struct clk *rxs_clk; 447 u32 nr_channels; 448 u32 chan_id; 449 u32 max_buffer_len; 450 }; 451 452 /* Macros */ 453 #define to_xilinx_chan(chan) \ 454 container_of(chan, struct xilinx_dma_chan, common) 455 #define to_dma_tx_descriptor(tx) \ 456 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 457 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 458 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 459 cond, delay_us, timeout_us) 460 461 /* IO accessors */ 462 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 463 { 464 return ioread32(chan->xdev->regs + reg); 465 } 466 467 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 468 { 469 iowrite32(value, chan->xdev->regs + reg); 470 } 471 472 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 473 u32 value) 474 { 475 dma_write(chan, chan->desc_offset + reg, value); 476 } 477 478 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 479 { 480 return dma_read(chan, chan->ctrl_offset + reg); 481 } 482 483 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 484 u32 value) 485 { 486 dma_write(chan, chan->ctrl_offset + reg, value); 487 } 488 489 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 490 u32 clr) 491 { 492 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 493 } 494 495 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 496 u32 set) 497 { 498 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 499 } 500 501 /** 502 * vdma_desc_write_64 - 64-bit descriptor write 503 * @chan: Driver specific VDMA channel 504 * @reg: Register to write 505 * @value_lsb: lower address of the descriptor. 506 * @value_msb: upper address of the descriptor. 507 * 508 * Since vdma driver is trying to write to a register offset which is not a 509 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 510 * instead of a single 64 bit register write. 511 */ 512 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 513 u32 value_lsb, u32 value_msb) 514 { 515 /* Write the lsb 32 bits*/ 516 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 517 518 /* Write the msb 32 bits */ 519 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 520 } 521 522 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 523 { 524 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 525 } 526 527 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 528 dma_addr_t addr) 529 { 530 if (chan->ext_addr) 531 dma_writeq(chan, reg, addr); 532 else 533 dma_ctrl_write(chan, reg, addr); 534 } 535 536 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 537 struct xilinx_axidma_desc_hw *hw, 538 dma_addr_t buf_addr, size_t sg_used, 539 size_t period_len) 540 { 541 if (chan->ext_addr) { 542 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 543 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 544 period_len); 545 } else { 546 hw->buf_addr = buf_addr + sg_used + period_len; 547 } 548 } 549 550 /* ----------------------------------------------------------------------------- 551 * Descriptors and segments alloc and free 552 */ 553 554 /** 555 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 556 * @chan: Driver specific DMA channel 557 * 558 * Return: The allocated segment on success and NULL on failure. 559 */ 560 static struct xilinx_vdma_tx_segment * 561 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 562 { 563 struct xilinx_vdma_tx_segment *segment; 564 dma_addr_t phys; 565 566 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 567 if (!segment) 568 return NULL; 569 570 segment->phys = phys; 571 572 return segment; 573 } 574 575 /** 576 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 577 * @chan: Driver specific DMA channel 578 * 579 * Return: The allocated segment on success and NULL on failure. 580 */ 581 static struct xilinx_cdma_tx_segment * 582 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 583 { 584 struct xilinx_cdma_tx_segment *segment; 585 dma_addr_t phys; 586 587 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 588 if (!segment) 589 return NULL; 590 591 segment->phys = phys; 592 593 return segment; 594 } 595 596 /** 597 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 598 * @chan: Driver specific DMA channel 599 * 600 * Return: The allocated segment on success and NULL on failure. 601 */ 602 static struct xilinx_axidma_tx_segment * 603 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 604 { 605 struct xilinx_axidma_tx_segment *segment = NULL; 606 unsigned long flags; 607 608 spin_lock_irqsave(&chan->lock, flags); 609 if (!list_empty(&chan->free_seg_list)) { 610 segment = list_first_entry(&chan->free_seg_list, 611 struct xilinx_axidma_tx_segment, 612 node); 613 list_del(&segment->node); 614 } 615 spin_unlock_irqrestore(&chan->lock, flags); 616 617 return segment; 618 } 619 620 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 621 { 622 u32 next_desc = hw->next_desc; 623 u32 next_desc_msb = hw->next_desc_msb; 624 625 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 626 627 hw->next_desc = next_desc; 628 hw->next_desc_msb = next_desc_msb; 629 } 630 631 /** 632 * xilinx_dma_free_tx_segment - Free transaction segment 633 * @chan: Driver specific DMA channel 634 * @segment: DMA transaction segment 635 */ 636 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 637 struct xilinx_axidma_tx_segment *segment) 638 { 639 xilinx_dma_clean_hw_desc(&segment->hw); 640 641 list_add_tail(&segment->node, &chan->free_seg_list); 642 } 643 644 /** 645 * xilinx_cdma_free_tx_segment - Free transaction segment 646 * @chan: Driver specific DMA channel 647 * @segment: DMA transaction segment 648 */ 649 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 650 struct xilinx_cdma_tx_segment *segment) 651 { 652 dma_pool_free(chan->desc_pool, segment, segment->phys); 653 } 654 655 /** 656 * xilinx_vdma_free_tx_segment - Free transaction segment 657 * @chan: Driver specific DMA channel 658 * @segment: DMA transaction segment 659 */ 660 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 661 struct xilinx_vdma_tx_segment *segment) 662 { 663 dma_pool_free(chan->desc_pool, segment, segment->phys); 664 } 665 666 /** 667 * xilinx_dma_tx_descriptor - Allocate transaction descriptor 668 * @chan: Driver specific DMA channel 669 * 670 * Return: The allocated descriptor on success and NULL on failure. 671 */ 672 static struct xilinx_dma_tx_descriptor * 673 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 674 { 675 struct xilinx_dma_tx_descriptor *desc; 676 677 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 678 if (!desc) 679 return NULL; 680 681 INIT_LIST_HEAD(&desc->segments); 682 683 return desc; 684 } 685 686 /** 687 * xilinx_dma_free_tx_descriptor - Free transaction descriptor 688 * @chan: Driver specific DMA channel 689 * @desc: DMA transaction descriptor 690 */ 691 static void 692 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 693 struct xilinx_dma_tx_descriptor *desc) 694 { 695 struct xilinx_vdma_tx_segment *segment, *next; 696 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 697 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 698 699 if (!desc) 700 return; 701 702 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 703 list_for_each_entry_safe(segment, next, &desc->segments, node) { 704 list_del(&segment->node); 705 xilinx_vdma_free_tx_segment(chan, segment); 706 } 707 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 708 list_for_each_entry_safe(cdma_segment, cdma_next, 709 &desc->segments, node) { 710 list_del(&cdma_segment->node); 711 xilinx_cdma_free_tx_segment(chan, cdma_segment); 712 } 713 } else { 714 list_for_each_entry_safe(axidma_segment, axidma_next, 715 &desc->segments, node) { 716 list_del(&axidma_segment->node); 717 xilinx_dma_free_tx_segment(chan, axidma_segment); 718 } 719 } 720 721 kfree(desc); 722 } 723 724 /* Required functions */ 725 726 /** 727 * xilinx_dma_free_desc_list - Free descriptors list 728 * @chan: Driver specific DMA channel 729 * @list: List to parse and delete the descriptor 730 */ 731 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 732 struct list_head *list) 733 { 734 struct xilinx_dma_tx_descriptor *desc, *next; 735 736 list_for_each_entry_safe(desc, next, list, node) { 737 list_del(&desc->node); 738 xilinx_dma_free_tx_descriptor(chan, desc); 739 } 740 } 741 742 /** 743 * xilinx_dma_free_descriptors - Free channel descriptors 744 * @chan: Driver specific DMA channel 745 */ 746 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 747 { 748 unsigned long flags; 749 750 spin_lock_irqsave(&chan->lock, flags); 751 752 xilinx_dma_free_desc_list(chan, &chan->pending_list); 753 xilinx_dma_free_desc_list(chan, &chan->done_list); 754 xilinx_dma_free_desc_list(chan, &chan->active_list); 755 756 spin_unlock_irqrestore(&chan->lock, flags); 757 } 758 759 /** 760 * xilinx_dma_free_chan_resources - Free channel resources 761 * @dchan: DMA channel 762 */ 763 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 764 { 765 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 766 unsigned long flags; 767 768 dev_dbg(chan->dev, "Free all channel resources.\n"); 769 770 xilinx_dma_free_descriptors(chan); 771 772 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 773 spin_lock_irqsave(&chan->lock, flags); 774 INIT_LIST_HEAD(&chan->free_seg_list); 775 spin_unlock_irqrestore(&chan->lock, flags); 776 777 /* Free memory that is allocated for BD */ 778 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 779 XILINX_DMA_NUM_DESCS, chan->seg_v, 780 chan->seg_p); 781 782 /* Free Memory that is allocated for cyclic DMA Mode */ 783 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), 784 chan->cyclic_seg_v, chan->cyclic_seg_p); 785 } 786 787 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { 788 dma_pool_destroy(chan->desc_pool); 789 chan->desc_pool = NULL; 790 } 791 } 792 793 /** 794 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 795 * @chan: Driver specific dma channel 796 * @desc: dma transaction descriptor 797 * @flags: flags for spin lock 798 */ 799 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 800 struct xilinx_dma_tx_descriptor *desc, 801 unsigned long *flags) 802 { 803 dma_async_tx_callback callback; 804 void *callback_param; 805 806 callback = desc->async_tx.callback; 807 callback_param = desc->async_tx.callback_param; 808 if (callback) { 809 spin_unlock_irqrestore(&chan->lock, *flags); 810 callback(callback_param); 811 spin_lock_irqsave(&chan->lock, *flags); 812 } 813 } 814 815 /** 816 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 817 * @chan: Driver specific DMA channel 818 */ 819 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 820 { 821 struct xilinx_dma_tx_descriptor *desc, *next; 822 unsigned long flags; 823 824 spin_lock_irqsave(&chan->lock, flags); 825 826 list_for_each_entry_safe(desc, next, &chan->done_list, node) { 827 struct dmaengine_desc_callback cb; 828 829 if (desc->cyclic) { 830 xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 831 break; 832 } 833 834 /* Remove from the list of running transactions */ 835 list_del(&desc->node); 836 837 /* Run the link descriptor callback function */ 838 dmaengine_desc_get_callback(&desc->async_tx, &cb); 839 if (dmaengine_desc_callback_valid(&cb)) { 840 spin_unlock_irqrestore(&chan->lock, flags); 841 dmaengine_desc_callback_invoke(&cb, NULL); 842 spin_lock_irqsave(&chan->lock, flags); 843 } 844 845 /* Run any dependencies, then free the descriptor */ 846 dma_run_dependencies(&desc->async_tx); 847 xilinx_dma_free_tx_descriptor(chan, desc); 848 } 849 850 spin_unlock_irqrestore(&chan->lock, flags); 851 } 852 853 /** 854 * xilinx_dma_do_tasklet - Schedule completion tasklet 855 * @data: Pointer to the Xilinx DMA channel structure 856 */ 857 static void xilinx_dma_do_tasklet(unsigned long data) 858 { 859 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 860 861 xilinx_dma_chan_desc_cleanup(chan); 862 } 863 864 /** 865 * xilinx_dma_alloc_chan_resources - Allocate channel resources 866 * @dchan: DMA channel 867 * 868 * Return: '0' on success and failure value on error 869 */ 870 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 871 { 872 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 873 int i; 874 875 /* Has this channel already been allocated? */ 876 if (chan->desc_pool) 877 return 0; 878 879 /* 880 * We need the descriptor to be aligned to 64bytes 881 * for meeting Xilinx VDMA specification requirement. 882 */ 883 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 884 /* Allocate the buffer descriptors. */ 885 chan->seg_v = dma_zalloc_coherent(chan->dev, 886 sizeof(*chan->seg_v) * 887 XILINX_DMA_NUM_DESCS, 888 &chan->seg_p, GFP_KERNEL); 889 if (!chan->seg_v) { 890 dev_err(chan->dev, 891 "unable to allocate channel %d descriptors\n", 892 chan->id); 893 return -ENOMEM; 894 } 895 /* 896 * For cyclic DMA mode we need to program the tail Descriptor 897 * register with a value which is not a part of the BD chain 898 * so allocating a desc segment during channel allocation for 899 * programming tail descriptor. 900 */ 901 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 902 sizeof(*chan->cyclic_seg_v), 903 &chan->cyclic_seg_p, GFP_KERNEL); 904 if (!chan->cyclic_seg_v) { 905 dev_err(chan->dev, 906 "unable to allocate desc segment for cyclic DMA\n"); 907 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 908 XILINX_DMA_NUM_DESCS, chan->seg_v, 909 chan->seg_p); 910 return -ENOMEM; 911 } 912 chan->cyclic_seg_v->phys = chan->cyclic_seg_p; 913 914 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 915 chan->seg_v[i].hw.next_desc = 916 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 917 ((i + 1) % XILINX_DMA_NUM_DESCS)); 918 chan->seg_v[i].hw.next_desc_msb = 919 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 920 ((i + 1) % XILINX_DMA_NUM_DESCS)); 921 chan->seg_v[i].phys = chan->seg_p + 922 sizeof(*chan->seg_v) * i; 923 list_add_tail(&chan->seg_v[i].node, 924 &chan->free_seg_list); 925 } 926 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 927 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 928 chan->dev, 929 sizeof(struct xilinx_cdma_tx_segment), 930 __alignof__(struct xilinx_cdma_tx_segment), 931 0); 932 } else { 933 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 934 chan->dev, 935 sizeof(struct xilinx_vdma_tx_segment), 936 __alignof__(struct xilinx_vdma_tx_segment), 937 0); 938 } 939 940 if (!chan->desc_pool && 941 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { 942 dev_err(chan->dev, 943 "unable to allocate channel %d descriptor pool\n", 944 chan->id); 945 return -ENOMEM; 946 } 947 948 dma_cookie_init(dchan); 949 950 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 951 /* For AXI DMA resetting once channel will reset the 952 * other channel as well so enable the interrupts here. 953 */ 954 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 955 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 956 } 957 958 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 959 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 960 XILINX_CDMA_CR_SGMODE); 961 962 return 0; 963 } 964 965 /** 966 * xilinx_dma_tx_status - Get DMA transaction status 967 * @dchan: DMA channel 968 * @cookie: Transaction identifier 969 * @txstate: Transaction state 970 * 971 * Return: DMA transaction status 972 */ 973 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 974 dma_cookie_t cookie, 975 struct dma_tx_state *txstate) 976 { 977 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 978 struct xilinx_dma_tx_descriptor *desc; 979 struct xilinx_axidma_tx_segment *segment; 980 struct xilinx_axidma_desc_hw *hw; 981 enum dma_status ret; 982 unsigned long flags; 983 u32 residue = 0; 984 985 ret = dma_cookie_status(dchan, cookie, txstate); 986 if (ret == DMA_COMPLETE || !txstate) 987 return ret; 988 989 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 990 spin_lock_irqsave(&chan->lock, flags); 991 992 desc = list_last_entry(&chan->active_list, 993 struct xilinx_dma_tx_descriptor, node); 994 if (chan->has_sg) { 995 list_for_each_entry(segment, &desc->segments, node) { 996 hw = &segment->hw; 997 residue += (hw->control - hw->status) & 998 chan->xdev->max_buffer_len; 999 } 1000 } 1001 spin_unlock_irqrestore(&chan->lock, flags); 1002 1003 chan->residue = residue; 1004 if (chan->cyclic) 1005 dma_set_residue(txstate, chan->buf_idx); 1006 else 1007 dma_set_residue(txstate, chan->residue); 1008 } 1009 1010 return ret; 1011 } 1012 1013 /** 1014 * xilinx_dma_stop_transfer - Halt DMA channel 1015 * @chan: Driver specific DMA channel 1016 * 1017 * Return: '0' on success and failure value on error 1018 */ 1019 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 1020 { 1021 u32 val; 1022 1023 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1024 1025 /* Wait for the hardware to halt */ 1026 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1027 val & XILINX_DMA_DMASR_HALTED, 0, 1028 XILINX_DMA_LOOP_COUNT); 1029 } 1030 1031 /** 1032 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 1033 * @chan: Driver specific DMA channel 1034 * 1035 * Return: '0' on success and failure value on error 1036 */ 1037 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 1038 { 1039 u32 val; 1040 1041 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1042 val & XILINX_DMA_DMASR_IDLE, 0, 1043 XILINX_DMA_LOOP_COUNT); 1044 } 1045 1046 /** 1047 * xilinx_dma_start - Start DMA channel 1048 * @chan: Driver specific DMA channel 1049 */ 1050 static void xilinx_dma_start(struct xilinx_dma_chan *chan) 1051 { 1052 int err; 1053 u32 val; 1054 1055 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 1056 1057 /* Wait for the hardware to start */ 1058 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1059 !(val & XILINX_DMA_DMASR_HALTED), 0, 1060 XILINX_DMA_LOOP_COUNT); 1061 1062 if (err) { 1063 dev_err(chan->dev, "Cannot start channel %p: %x\n", 1064 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1065 1066 chan->err = true; 1067 } 1068 } 1069 1070 /** 1071 * xilinx_vdma_start_transfer - Starts VDMA transfer 1072 * @chan: Driver specific channel struct pointer 1073 */ 1074 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1075 { 1076 struct xilinx_vdma_config *config = &chan->config; 1077 struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1078 u32 reg, j; 1079 struct xilinx_vdma_tx_segment *tail_segment; 1080 1081 /* This function was invoked with lock held */ 1082 if (chan->err) 1083 return; 1084 1085 if (!chan->idle) 1086 return; 1087 1088 if (list_empty(&chan->pending_list)) 1089 return; 1090 1091 desc = list_first_entry(&chan->pending_list, 1092 struct xilinx_dma_tx_descriptor, node); 1093 tail_desc = list_last_entry(&chan->pending_list, 1094 struct xilinx_dma_tx_descriptor, node); 1095 1096 tail_segment = list_last_entry(&tail_desc->segments, 1097 struct xilinx_vdma_tx_segment, node); 1098 1099 /* 1100 * If hardware is idle, then all descriptors on the running lists are 1101 * done, start new transfers 1102 */ 1103 if (chan->has_sg) 1104 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1105 desc->async_tx.phys); 1106 1107 /* Configure the hardware using info in the config structure */ 1108 if (chan->has_vflip) { 1109 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1110 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 1111 reg |= config->vflip_en; 1112 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 1113 reg); 1114 } 1115 1116 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1117 1118 if (config->frm_cnt_en) 1119 reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 1120 else 1121 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 1122 1123 /* 1124 * With SG, start with circular mode, so that BDs can be fetched. 1125 * In direct register mode, if not parking, enable circular mode 1126 */ 1127 if (chan->has_sg || !config->park) 1128 reg |= XILINX_DMA_DMACR_CIRC_EN; 1129 1130 if (config->park) 1131 reg &= ~XILINX_DMA_DMACR_CIRC_EN; 1132 1133 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1134 1135 j = chan->desc_submitcount; 1136 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); 1137 if (chan->direction == DMA_MEM_TO_DEV) { 1138 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; 1139 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; 1140 } else { 1141 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; 1142 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; 1143 } 1144 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); 1145 1146 /* Start the hardware */ 1147 xilinx_dma_start(chan); 1148 1149 if (chan->err) 1150 return; 1151 1152 /* Start the transfer */ 1153 if (chan->has_sg) { 1154 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1155 tail_segment->phys); 1156 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1157 chan->desc_pendingcount = 0; 1158 } else { 1159 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1160 int i = 0; 1161 1162 if (chan->desc_submitcount < chan->num_frms) 1163 i = chan->desc_submitcount; 1164 1165 list_for_each_entry(segment, &desc->segments, node) { 1166 if (chan->ext_addr) 1167 vdma_desc_write_64(chan, 1168 XILINX_VDMA_REG_START_ADDRESS_64(i++), 1169 segment->hw.buf_addr, 1170 segment->hw.buf_addr_msb); 1171 else 1172 vdma_desc_write(chan, 1173 XILINX_VDMA_REG_START_ADDRESS(i++), 1174 segment->hw.buf_addr); 1175 1176 last = segment; 1177 } 1178 1179 if (!last) 1180 return; 1181 1182 /* HW expects these parameters to be same for one transaction */ 1183 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 1184 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 1185 last->hw.stride); 1186 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 1187 1188 chan->desc_submitcount++; 1189 chan->desc_pendingcount--; 1190 list_del(&desc->node); 1191 list_add_tail(&desc->node, &chan->active_list); 1192 if (chan->desc_submitcount == chan->num_frms) 1193 chan->desc_submitcount = 0; 1194 } 1195 1196 chan->idle = false; 1197 } 1198 1199 /** 1200 * xilinx_cdma_start_transfer - Starts cdma transfer 1201 * @chan: Driver specific channel struct pointer 1202 */ 1203 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 1204 { 1205 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1206 struct xilinx_cdma_tx_segment *tail_segment; 1207 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 1208 1209 if (chan->err) 1210 return; 1211 1212 if (!chan->idle) 1213 return; 1214 1215 if (list_empty(&chan->pending_list)) 1216 return; 1217 1218 head_desc = list_first_entry(&chan->pending_list, 1219 struct xilinx_dma_tx_descriptor, node); 1220 tail_desc = list_last_entry(&chan->pending_list, 1221 struct xilinx_dma_tx_descriptor, node); 1222 tail_segment = list_last_entry(&tail_desc->segments, 1223 struct xilinx_cdma_tx_segment, node); 1224 1225 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1226 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1227 ctrl_reg |= chan->desc_pendingcount << 1228 XILINX_DMA_CR_COALESCE_SHIFT; 1229 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 1230 } 1231 1232 if (chan->has_sg) { 1233 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 1234 XILINX_CDMA_CR_SGMODE); 1235 1236 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1237 XILINX_CDMA_CR_SGMODE); 1238 1239 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1240 head_desc->async_tx.phys); 1241 1242 /* Update tail ptr register which will start the transfer */ 1243 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1244 tail_segment->phys); 1245 } else { 1246 /* In simple mode */ 1247 struct xilinx_cdma_tx_segment *segment; 1248 struct xilinx_cdma_desc_hw *hw; 1249 1250 segment = list_first_entry(&head_desc->segments, 1251 struct xilinx_cdma_tx_segment, 1252 node); 1253 1254 hw = &segment->hw; 1255 1256 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, (dma_addr_t) 1257 ((u64)hw->src_addr_msb << 32 | hw->src_addr)); 1258 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, (dma_addr_t) 1259 ((u64)hw->dest_addr_msb << 32 | hw->dest_addr)); 1260 1261 /* Start the transfer */ 1262 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1263 hw->control & chan->xdev->max_buffer_len); 1264 } 1265 1266 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1267 chan->desc_pendingcount = 0; 1268 chan->idle = false; 1269 } 1270 1271 /** 1272 * xilinx_dma_start_transfer - Starts DMA transfer 1273 * @chan: Driver specific channel struct pointer 1274 */ 1275 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 1276 { 1277 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1278 struct xilinx_axidma_tx_segment *tail_segment; 1279 u32 reg; 1280 1281 if (chan->err) 1282 return; 1283 1284 if (!chan->idle) 1285 return; 1286 1287 if (list_empty(&chan->pending_list)) 1288 return; 1289 1290 head_desc = list_first_entry(&chan->pending_list, 1291 struct xilinx_dma_tx_descriptor, node); 1292 tail_desc = list_last_entry(&chan->pending_list, 1293 struct xilinx_dma_tx_descriptor, node); 1294 tail_segment = list_last_entry(&tail_desc->segments, 1295 struct xilinx_axidma_tx_segment, node); 1296 1297 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1298 1299 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 1300 reg &= ~XILINX_DMA_CR_COALESCE_MAX; 1301 reg |= chan->desc_pendingcount << 1302 XILINX_DMA_CR_COALESCE_SHIFT; 1303 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1304 } 1305 1306 if (chan->has_sg && !chan->xdev->mcdma) 1307 xilinx_write(chan, XILINX_DMA_REG_CURDESC, 1308 head_desc->async_tx.phys); 1309 1310 if (chan->has_sg && chan->xdev->mcdma) { 1311 if (chan->direction == DMA_MEM_TO_DEV) { 1312 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1313 head_desc->async_tx.phys); 1314 } else { 1315 if (!chan->tdest) { 1316 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1317 head_desc->async_tx.phys); 1318 } else { 1319 dma_ctrl_write(chan, 1320 XILINX_DMA_MCRX_CDESC(chan->tdest), 1321 head_desc->async_tx.phys); 1322 } 1323 } 1324 } 1325 1326 xilinx_dma_start(chan); 1327 1328 if (chan->err) 1329 return; 1330 1331 /* Start the transfer */ 1332 if (chan->has_sg && !chan->xdev->mcdma) { 1333 if (chan->cyclic) 1334 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1335 chan->cyclic_seg_v->phys); 1336 else 1337 xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 1338 tail_segment->phys); 1339 } else if (chan->has_sg && chan->xdev->mcdma) { 1340 if (chan->direction == DMA_MEM_TO_DEV) { 1341 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1342 tail_segment->phys); 1343 } else { 1344 if (!chan->tdest) { 1345 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1346 tail_segment->phys); 1347 } else { 1348 dma_ctrl_write(chan, 1349 XILINX_DMA_MCRX_TDESC(chan->tdest), 1350 tail_segment->phys); 1351 } 1352 } 1353 } else { 1354 struct xilinx_axidma_tx_segment *segment; 1355 struct xilinx_axidma_desc_hw *hw; 1356 1357 segment = list_first_entry(&head_desc->segments, 1358 struct xilinx_axidma_tx_segment, 1359 node); 1360 hw = &segment->hw; 1361 1362 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1363 1364 /* Start the transfer */ 1365 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1366 hw->control & chan->xdev->max_buffer_len); 1367 } 1368 1369 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1370 chan->desc_pendingcount = 0; 1371 chan->idle = false; 1372 } 1373 1374 /** 1375 * xilinx_dma_issue_pending - Issue pending transactions 1376 * @dchan: DMA channel 1377 */ 1378 static void xilinx_dma_issue_pending(struct dma_chan *dchan) 1379 { 1380 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1381 unsigned long flags; 1382 1383 spin_lock_irqsave(&chan->lock, flags); 1384 chan->start_transfer(chan); 1385 spin_unlock_irqrestore(&chan->lock, flags); 1386 } 1387 1388 /** 1389 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 1390 * @chan : xilinx DMA channel 1391 * 1392 * CONTEXT: hardirq 1393 */ 1394 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 1395 { 1396 struct xilinx_dma_tx_descriptor *desc, *next; 1397 1398 /* This function was invoked with lock held */ 1399 if (list_empty(&chan->active_list)) 1400 return; 1401 1402 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1403 list_del(&desc->node); 1404 if (!desc->cyclic) 1405 dma_cookie_complete(&desc->async_tx); 1406 list_add_tail(&desc->node, &chan->done_list); 1407 } 1408 } 1409 1410 /** 1411 * xilinx_dma_reset - Reset DMA channel 1412 * @chan: Driver specific DMA channel 1413 * 1414 * Return: '0' on success and failure value on error 1415 */ 1416 static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 1417 { 1418 int err; 1419 u32 tmp; 1420 1421 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 1422 1423 /* Wait for the hardware to finish reset */ 1424 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 1425 !(tmp & XILINX_DMA_DMACR_RESET), 0, 1426 XILINX_DMA_LOOP_COUNT); 1427 1428 if (err) { 1429 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1430 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 1431 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 1432 return -ETIMEDOUT; 1433 } 1434 1435 chan->err = false; 1436 chan->idle = true; 1437 chan->desc_submitcount = 0; 1438 1439 return err; 1440 } 1441 1442 /** 1443 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 1444 * @chan: Driver specific DMA channel 1445 * 1446 * Return: '0' on success and failure value on error 1447 */ 1448 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 1449 { 1450 int err; 1451 1452 /* Reset VDMA */ 1453 err = xilinx_dma_reset(chan); 1454 if (err) 1455 return err; 1456 1457 /* Enable interrupts */ 1458 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 1459 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1460 1461 return 0; 1462 } 1463 1464 /** 1465 * xilinx_dma_irq_handler - DMA Interrupt handler 1466 * @irq: IRQ number 1467 * @data: Pointer to the Xilinx DMA channel structure 1468 * 1469 * Return: IRQ_HANDLED/IRQ_NONE 1470 */ 1471 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 1472 { 1473 struct xilinx_dma_chan *chan = data; 1474 u32 status; 1475 1476 /* Read the status and ack the interrupts. */ 1477 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 1478 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 1479 return IRQ_NONE; 1480 1481 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1482 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1483 1484 if (status & XILINX_DMA_DMASR_ERR_IRQ) { 1485 /* 1486 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1487 * error is recoverable, ignore it. Otherwise flag the error. 1488 * 1489 * Only recoverable errors can be cleared in the DMASR register, 1490 * make sure not to write to other error bits to 1. 1491 */ 1492 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 1493 1494 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 1495 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 1496 1497 if (!chan->flush_on_fsync || 1498 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 1499 dev_err(chan->dev, 1500 "Channel %p has errors %x, cdr %x tdr %x\n", 1501 chan, errors, 1502 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 1503 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 1504 chan->err = true; 1505 } 1506 } 1507 1508 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 1509 /* 1510 * Device takes too long to do the transfer when user requires 1511 * responsiveness. 1512 */ 1513 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1514 } 1515 1516 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 1517 spin_lock(&chan->lock); 1518 xilinx_dma_complete_descriptor(chan); 1519 chan->idle = true; 1520 chan->start_transfer(chan); 1521 chan->buf_idx++; 1522 spin_unlock(&chan->lock); 1523 } 1524 1525 tasklet_schedule(&chan->tasklet); 1526 return IRQ_HANDLED; 1527 } 1528 1529 /** 1530 * append_desc_queue - Queuing descriptor 1531 * @chan: Driver specific dma channel 1532 * @desc: dma transaction descriptor 1533 */ 1534 static void append_desc_queue(struct xilinx_dma_chan *chan, 1535 struct xilinx_dma_tx_descriptor *desc) 1536 { 1537 struct xilinx_vdma_tx_segment *tail_segment; 1538 struct xilinx_dma_tx_descriptor *tail_desc; 1539 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1540 struct xilinx_cdma_tx_segment *cdma_tail_segment; 1541 1542 if (list_empty(&chan->pending_list)) 1543 goto append; 1544 1545 /* 1546 * Add the hardware descriptor to the chain of hardware descriptors 1547 * that already exists in memory. 1548 */ 1549 tail_desc = list_last_entry(&chan->pending_list, 1550 struct xilinx_dma_tx_descriptor, node); 1551 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 1552 tail_segment = list_last_entry(&tail_desc->segments, 1553 struct xilinx_vdma_tx_segment, 1554 node); 1555 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1556 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1557 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1558 struct xilinx_cdma_tx_segment, 1559 node); 1560 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1561 } else { 1562 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1563 struct xilinx_axidma_tx_segment, 1564 node); 1565 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1566 } 1567 1568 /* 1569 * Add the software descriptor and all children to the list 1570 * of pending transactions 1571 */ 1572 append: 1573 list_add_tail(&desc->node, &chan->pending_list); 1574 chan->desc_pendingcount++; 1575 1576 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 1577 && unlikely(chan->desc_pendingcount > chan->num_frms)) { 1578 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1579 chan->desc_pendingcount = chan->num_frms; 1580 } 1581 } 1582 1583 /** 1584 * xilinx_dma_tx_submit - Submit DMA transaction 1585 * @tx: Async transaction descriptor 1586 * 1587 * Return: cookie value on success and failure value on error 1588 */ 1589 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 1590 { 1591 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 1592 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 1593 dma_cookie_t cookie; 1594 unsigned long flags; 1595 int err; 1596 1597 if (chan->cyclic) { 1598 xilinx_dma_free_tx_descriptor(chan, desc); 1599 return -EBUSY; 1600 } 1601 1602 if (chan->err) { 1603 /* 1604 * If reset fails, need to hard reset the system. 1605 * Channel is no longer functional 1606 */ 1607 err = xilinx_dma_chan_reset(chan); 1608 if (err < 0) 1609 return err; 1610 } 1611 1612 spin_lock_irqsave(&chan->lock, flags); 1613 1614 cookie = dma_cookie_assign(tx); 1615 1616 /* Put this transaction onto the tail of the pending queue */ 1617 append_desc_queue(chan, desc); 1618 1619 if (desc->cyclic) 1620 chan->cyclic = true; 1621 1622 spin_unlock_irqrestore(&chan->lock, flags); 1623 1624 return cookie; 1625 } 1626 1627 /** 1628 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 1629 * DMA_SLAVE transaction 1630 * @dchan: DMA channel 1631 * @xt: Interleaved template pointer 1632 * @flags: transfer ack flags 1633 * 1634 * Return: Async transaction descriptor on success and NULL on failure 1635 */ 1636 static struct dma_async_tx_descriptor * 1637 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 1638 struct dma_interleaved_template *xt, 1639 unsigned long flags) 1640 { 1641 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1642 struct xilinx_dma_tx_descriptor *desc; 1643 struct xilinx_vdma_tx_segment *segment; 1644 struct xilinx_vdma_desc_hw *hw; 1645 1646 if (!is_slave_direction(xt->dir)) 1647 return NULL; 1648 1649 if (!xt->numf || !xt->sgl[0].size) 1650 return NULL; 1651 1652 if (xt->frame_size != 1) 1653 return NULL; 1654 1655 /* Allocate a transaction descriptor. */ 1656 desc = xilinx_dma_alloc_tx_descriptor(chan); 1657 if (!desc) 1658 return NULL; 1659 1660 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1661 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1662 async_tx_ack(&desc->async_tx); 1663 1664 /* Allocate the link descriptor from DMA pool */ 1665 segment = xilinx_vdma_alloc_tx_segment(chan); 1666 if (!segment) 1667 goto error; 1668 1669 /* Fill in the hardware descriptor */ 1670 hw = &segment->hw; 1671 hw->vsize = xt->numf; 1672 hw->hsize = xt->sgl[0].size; 1673 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1674 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1675 hw->stride |= chan->config.frm_dly << 1676 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1677 1678 if (xt->dir != DMA_MEM_TO_DEV) { 1679 if (chan->ext_addr) { 1680 hw->buf_addr = lower_32_bits(xt->dst_start); 1681 hw->buf_addr_msb = upper_32_bits(xt->dst_start); 1682 } else { 1683 hw->buf_addr = xt->dst_start; 1684 } 1685 } else { 1686 if (chan->ext_addr) { 1687 hw->buf_addr = lower_32_bits(xt->src_start); 1688 hw->buf_addr_msb = upper_32_bits(xt->src_start); 1689 } else { 1690 hw->buf_addr = xt->src_start; 1691 } 1692 } 1693 1694 /* Insert the segment into the descriptor segments list. */ 1695 list_add_tail(&segment->node, &desc->segments); 1696 1697 /* Link the last hardware descriptor with the first. */ 1698 segment = list_first_entry(&desc->segments, 1699 struct xilinx_vdma_tx_segment, node); 1700 desc->async_tx.phys = segment->phys; 1701 1702 return &desc->async_tx; 1703 1704 error: 1705 xilinx_dma_free_tx_descriptor(chan, desc); 1706 return NULL; 1707 } 1708 1709 /** 1710 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 1711 * @dchan: DMA channel 1712 * @dma_dst: destination address 1713 * @dma_src: source address 1714 * @len: transfer length 1715 * @flags: transfer ack flags 1716 * 1717 * Return: Async transaction descriptor on success and NULL on failure 1718 */ 1719 static struct dma_async_tx_descriptor * 1720 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 1721 dma_addr_t dma_src, size_t len, unsigned long flags) 1722 { 1723 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1724 struct xilinx_dma_tx_descriptor *desc; 1725 struct xilinx_cdma_tx_segment *segment; 1726 struct xilinx_cdma_desc_hw *hw; 1727 1728 if (!len || len > chan->xdev->max_buffer_len) 1729 return NULL; 1730 1731 desc = xilinx_dma_alloc_tx_descriptor(chan); 1732 if (!desc) 1733 return NULL; 1734 1735 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1736 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1737 1738 /* Allocate the link descriptor from DMA pool */ 1739 segment = xilinx_cdma_alloc_tx_segment(chan); 1740 if (!segment) 1741 goto error; 1742 1743 hw = &segment->hw; 1744 hw->control = len; 1745 hw->src_addr = dma_src; 1746 hw->dest_addr = dma_dst; 1747 if (chan->ext_addr) { 1748 hw->src_addr_msb = upper_32_bits(dma_src); 1749 hw->dest_addr_msb = upper_32_bits(dma_dst); 1750 } 1751 1752 /* Insert the segment into the descriptor segments list. */ 1753 list_add_tail(&segment->node, &desc->segments); 1754 1755 desc->async_tx.phys = segment->phys; 1756 hw->next_desc = segment->phys; 1757 1758 return &desc->async_tx; 1759 1760 error: 1761 xilinx_dma_free_tx_descriptor(chan, desc); 1762 return NULL; 1763 } 1764 1765 /** 1766 * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction 1767 * @dchan: DMA channel 1768 * @dst_sg: Destination scatter list 1769 * @dst_sg_len: Number of entries in destination scatter list 1770 * @src_sg: Source scatter list 1771 * @src_sg_len: Number of entries in source scatter list 1772 * @flags: transfer ack flags 1773 * 1774 * Return: Async transaction descriptor on success and NULL on failure 1775 */ 1776 static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg( 1777 struct dma_chan *dchan, struct scatterlist *dst_sg, 1778 unsigned int dst_sg_len, struct scatterlist *src_sg, 1779 unsigned int src_sg_len, unsigned long flags) 1780 { 1781 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1782 struct xilinx_dma_tx_descriptor *desc; 1783 struct xilinx_cdma_tx_segment *segment, *prev = NULL; 1784 struct xilinx_cdma_desc_hw *hw; 1785 size_t len, dst_avail, src_avail; 1786 dma_addr_t dma_dst, dma_src; 1787 1788 if (unlikely(dst_sg_len == 0 || src_sg_len == 0)) 1789 return NULL; 1790 1791 if (unlikely(dst_sg == NULL || src_sg == NULL)) 1792 return NULL; 1793 1794 desc = xilinx_dma_alloc_tx_descriptor(chan); 1795 if (!desc) 1796 return NULL; 1797 1798 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1799 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1800 1801 dst_avail = sg_dma_len(dst_sg); 1802 src_avail = sg_dma_len(src_sg); 1803 /* 1804 * loop until there is either no more source or no more destination 1805 * scatterlist entry 1806 */ 1807 while (true) { 1808 len = min_t(size_t, src_avail, dst_avail); 1809 len = min_t(size_t, len, chan->xdev->max_buffer_len); 1810 if (len == 0) 1811 goto fetch; 1812 1813 /* Allocate the link descriptor from DMA pool */ 1814 segment = xilinx_cdma_alloc_tx_segment(chan); 1815 if (!segment) 1816 goto error; 1817 1818 dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - 1819 dst_avail; 1820 dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - 1821 src_avail; 1822 hw = &segment->hw; 1823 hw->control = len; 1824 hw->src_addr = dma_src; 1825 hw->dest_addr = dma_dst; 1826 if (chan->ext_addr) { 1827 hw->src_addr_msb = upper_32_bits(dma_src); 1828 hw->dest_addr_msb = upper_32_bits(dma_dst); 1829 } 1830 1831 if (prev) 1832 prev->hw.next_desc = segment->phys; 1833 1834 prev = segment; 1835 dst_avail -= len; 1836 src_avail -= len; 1837 list_add_tail(&segment->node, &desc->segments); 1838 1839 fetch: 1840 /* Fetch the next dst scatterlist entry */ 1841 if (dst_avail == 0) { 1842 if (dst_sg_len == 0) 1843 break; 1844 dst_sg = sg_next(dst_sg); 1845 if (dst_sg == NULL) 1846 break; 1847 dst_sg_len--; 1848 dst_avail = sg_dma_len(dst_sg); 1849 } 1850 /* Fetch the next src scatterlist entry */ 1851 if (src_avail == 0) { 1852 if (src_sg_len == 0) 1853 break; 1854 src_sg = sg_next(src_sg); 1855 if (src_sg == NULL) 1856 break; 1857 src_sg_len--; 1858 src_avail = sg_dma_len(src_sg); 1859 } 1860 } 1861 1862 /* Link the last hardware descriptor with the first. */ 1863 segment = list_first_entry(&desc->segments, 1864 struct xilinx_cdma_tx_segment, node); 1865 desc->async_tx.phys = segment->phys; 1866 prev->hw.next_desc = segment->phys; 1867 1868 return &desc->async_tx; 1869 1870 error: 1871 xilinx_dma_free_tx_descriptor(chan, desc); 1872 return NULL; 1873 } 1874 1875 /** 1876 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1877 * @dchan: DMA channel 1878 * @sgl: scatterlist to transfer to/from 1879 * @sg_len: number of entries in @scatterlist 1880 * @direction: DMA direction 1881 * @flags: transfer ack flags 1882 * @context: APP words of the descriptor 1883 * 1884 * Return: Async transaction descriptor on success and NULL on failure 1885 */ 1886 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 1887 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1888 enum dma_transfer_direction direction, unsigned long flags, 1889 void *context) 1890 { 1891 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1892 struct xilinx_dma_tx_descriptor *desc; 1893 struct xilinx_axidma_tx_segment *segment = NULL; 1894 u32 *app_w = (u32 *)context; 1895 struct scatterlist *sg; 1896 size_t copy; 1897 size_t sg_used; 1898 unsigned int i; 1899 1900 if (!is_slave_direction(direction)) 1901 return NULL; 1902 1903 /* Allocate a transaction descriptor. */ 1904 desc = xilinx_dma_alloc_tx_descriptor(chan); 1905 if (!desc) 1906 return NULL; 1907 1908 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1909 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 1910 1911 /* Build transactions using information in the scatter gather list */ 1912 for_each_sg(sgl, sg, sg_len, i) { 1913 sg_used = 0; 1914 1915 /* Loop until the entire scatterlist entry is used */ 1916 while (sg_used < sg_dma_len(sg)) { 1917 struct xilinx_axidma_desc_hw *hw; 1918 1919 /* Get a free segment */ 1920 segment = xilinx_axidma_alloc_tx_segment(chan); 1921 if (!segment) 1922 goto error; 1923 1924 /* 1925 * Calculate the maximum number of bytes to transfer, 1926 * making sure it is less than the hw limit 1927 */ 1928 copy = min_t(size_t, sg_dma_len(sg) - sg_used, 1929 chan->xdev->max_buffer_len); 1930 hw = &segment->hw; 1931 1932 /* Fill in the descriptor */ 1933 xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 1934 sg_used, 0); 1935 1936 hw->control = copy; 1937 1938 if (chan->direction == DMA_MEM_TO_DEV) { 1939 if (app_w) 1940 memcpy(hw->app, app_w, sizeof(u32) * 1941 XILINX_DMA_NUM_APP_WORDS); 1942 } 1943 1944 sg_used += copy; 1945 1946 /* 1947 * Insert the segment into the descriptor segments 1948 * list. 1949 */ 1950 list_add_tail(&segment->node, &desc->segments); 1951 } 1952 } 1953 1954 segment = list_first_entry(&desc->segments, 1955 struct xilinx_axidma_tx_segment, node); 1956 desc->async_tx.phys = segment->phys; 1957 1958 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 1959 if (chan->direction == DMA_MEM_TO_DEV) { 1960 segment->hw.control |= XILINX_DMA_BD_SOP; 1961 segment = list_last_entry(&desc->segments, 1962 struct xilinx_axidma_tx_segment, 1963 node); 1964 segment->hw.control |= XILINX_DMA_BD_EOP; 1965 } 1966 1967 return &desc->async_tx; 1968 1969 error: 1970 xilinx_dma_free_tx_descriptor(chan, desc); 1971 return NULL; 1972 } 1973 1974 /** 1975 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 1976 * @dchan: DMA channel 1977 * @buf_addr: Physical address of the buffer 1978 * @buf_len: Total length of the cyclic buffers 1979 * @period_len: length of individual cyclic buffer 1980 * @direction: DMA direction 1981 * @flags: transfer ack flags 1982 * 1983 * Return: Async transaction descriptor on success and NULL on failure 1984 */ 1985 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 1986 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 1987 size_t period_len, enum dma_transfer_direction direction, 1988 unsigned long flags) 1989 { 1990 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1991 struct xilinx_dma_tx_descriptor *desc; 1992 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 1993 size_t copy, sg_used; 1994 unsigned int num_periods; 1995 int i; 1996 u32 reg; 1997 1998 if (!period_len) 1999 return NULL; 2000 2001 num_periods = buf_len / period_len; 2002 2003 if (!num_periods) 2004 return NULL; 2005 2006 if (!is_slave_direction(direction)) 2007 return NULL; 2008 2009 /* Allocate a transaction descriptor. */ 2010 desc = xilinx_dma_alloc_tx_descriptor(chan); 2011 if (!desc) 2012 return NULL; 2013 2014 chan->direction = direction; 2015 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2016 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2017 2018 chan->buf_idx = 0; 2019 2020 for (i = 0; i < num_periods; ++i) { 2021 sg_used = 0; 2022 2023 while (sg_used < period_len) { 2024 struct xilinx_axidma_desc_hw *hw; 2025 2026 /* Get a free segment */ 2027 segment = xilinx_axidma_alloc_tx_segment(chan); 2028 if (!segment) 2029 goto error; 2030 2031 /* 2032 * Calculate the maximum number of bytes to transfer, 2033 * making sure it is less than the hw limit 2034 */ 2035 copy = min_t(size_t, period_len - sg_used, 2036 chan->xdev->max_buffer_len); 2037 hw = &segment->hw; 2038 xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 2039 period_len * i); 2040 hw->control = copy; 2041 2042 if (prev) 2043 prev->hw.next_desc = segment->phys; 2044 2045 prev = segment; 2046 sg_used += copy; 2047 2048 /* 2049 * Insert the segment into the descriptor segments 2050 * list. 2051 */ 2052 list_add_tail(&segment->node, &desc->segments); 2053 } 2054 } 2055 2056 head_segment = list_first_entry(&desc->segments, 2057 struct xilinx_axidma_tx_segment, node); 2058 desc->async_tx.phys = head_segment->phys; 2059 2060 desc->cyclic = true; 2061 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2062 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2063 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2064 2065 segment = list_last_entry(&desc->segments, 2066 struct xilinx_axidma_tx_segment, 2067 node); 2068 segment->hw.next_desc = (u32) head_segment->phys; 2069 2070 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2071 if (direction == DMA_MEM_TO_DEV) { 2072 head_segment->hw.control |= XILINX_DMA_BD_SOP; 2073 segment->hw.control |= XILINX_DMA_BD_EOP; 2074 } 2075 2076 return &desc->async_tx; 2077 2078 error: 2079 xilinx_dma_free_tx_descriptor(chan, desc); 2080 return NULL; 2081 } 2082 2083 /** 2084 * xilinx_dma_prep_interleaved - prepare a descriptor for a 2085 * DMA_SLAVE transaction 2086 * @dchan: DMA channel 2087 * @xt: Interleaved template pointer 2088 * @flags: transfer ack flags 2089 * 2090 * Return: Async transaction descriptor on success and NULL on failure 2091 */ 2092 static struct dma_async_tx_descriptor * 2093 xilinx_dma_prep_interleaved(struct dma_chan *dchan, 2094 struct dma_interleaved_template *xt, 2095 unsigned long flags) 2096 { 2097 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2098 struct xilinx_dma_tx_descriptor *desc; 2099 struct xilinx_axidma_tx_segment *segment; 2100 struct xilinx_axidma_desc_hw *hw; 2101 2102 if (!is_slave_direction(xt->dir)) 2103 return NULL; 2104 2105 if (!xt->numf || !xt->sgl[0].size) 2106 return NULL; 2107 2108 if (xt->frame_size != 1) 2109 return NULL; 2110 2111 /* Allocate a transaction descriptor. */ 2112 desc = xilinx_dma_alloc_tx_descriptor(chan); 2113 if (!desc) 2114 return NULL; 2115 2116 chan->direction = xt->dir; 2117 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2118 desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2119 2120 /* Get a free segment */ 2121 segment = xilinx_axidma_alloc_tx_segment(chan); 2122 if (!segment) 2123 goto error; 2124 2125 hw = &segment->hw; 2126 2127 /* Fill in the descriptor */ 2128 if (xt->dir != DMA_MEM_TO_DEV) 2129 hw->buf_addr = xt->dst_start; 2130 else 2131 hw->buf_addr = xt->src_start; 2132 2133 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 2134 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 2135 XILINX_DMA_BD_VSIZE_MASK; 2136 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 2137 XILINX_DMA_BD_STRIDE_MASK; 2138 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 2139 2140 /* 2141 * Insert the segment into the descriptor segments 2142 * list. 2143 */ 2144 list_add_tail(&segment->node, &desc->segments); 2145 2146 2147 segment = list_first_entry(&desc->segments, 2148 struct xilinx_axidma_tx_segment, node); 2149 desc->async_tx.phys = segment->phys; 2150 2151 /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2152 if (xt->dir == DMA_MEM_TO_DEV) { 2153 segment->hw.control |= XILINX_DMA_BD_SOP; 2154 segment = list_last_entry(&desc->segments, 2155 struct xilinx_axidma_tx_segment, 2156 node); 2157 segment->hw.control |= XILINX_DMA_BD_EOP; 2158 } 2159 2160 return &desc->async_tx; 2161 2162 error: 2163 xilinx_dma_free_tx_descriptor(chan, desc); 2164 return NULL; 2165 } 2166 2167 /** 2168 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2169 * @dchan: Driver specific DMA Channel pointer 2170 * 2171 * Return: '0' always. 2172 */ 2173 static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2174 { 2175 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2176 u32 reg; 2177 int err; 2178 2179 if (!chan->cyclic) { 2180 err = chan->stop_transfer(chan); 2181 if (err) { 2182 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 2183 chan, dma_ctrl_read(chan, 2184 XILINX_DMA_REG_DMASR)); 2185 chan->err = true; 2186 } 2187 } 2188 2189 xilinx_dma_chan_reset(chan); 2190 /* Remove and free all of the descriptors in the lists */ 2191 xilinx_dma_free_descriptors(chan); 2192 chan->idle = true; 2193 2194 if (chan->cyclic) { 2195 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2196 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 2197 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 2198 chan->cyclic = false; 2199 } 2200 2201 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 2202 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2203 XILINX_CDMA_CR_SGMODE); 2204 2205 return 0; 2206 } 2207 2208 /** 2209 * xilinx_dma_channel_set_config - Configure VDMA channel 2210 * Run-time configuration for Axi VDMA, supports: 2211 * . halt the channel 2212 * . configure interrupt coalescing and inter-packet delay threshold 2213 * . start/stop parking 2214 * . enable genlock 2215 * 2216 * @dchan: DMA channel 2217 * @cfg: VDMA device configuration pointer 2218 * 2219 * Return: '0' on success and failure value on error 2220 */ 2221 int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 2222 struct xilinx_vdma_config *cfg) 2223 { 2224 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2225 u32 dmacr; 2226 2227 if (cfg->reset) 2228 return xilinx_dma_chan_reset(chan); 2229 2230 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 2231 2232 chan->config.frm_dly = cfg->frm_dly; 2233 chan->config.park = cfg->park; 2234 2235 /* genlock settings */ 2236 chan->config.gen_lock = cfg->gen_lock; 2237 chan->config.master = cfg->master; 2238 2239 if (cfg->gen_lock && chan->genlock) { 2240 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 2241 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 2242 } 2243 2244 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2245 chan->config.vflip_en = cfg->vflip_en; 2246 2247 if (cfg->park) 2248 chan->config.park_frm = cfg->park_frm; 2249 else 2250 chan->config.park_frm = -1; 2251 2252 chan->config.coalesc = cfg->coalesc; 2253 chan->config.delay = cfg->delay; 2254 2255 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 2256 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 2257 chan->config.coalesc = cfg->coalesc; 2258 } 2259 2260 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 2261 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 2262 chan->config.delay = cfg->delay; 2263 } 2264 2265 /* FSync Source selection */ 2266 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 2267 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 2268 2269 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 2270 2271 return 0; 2272 } 2273 EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 2274 2275 /* ----------------------------------------------------------------------------- 2276 * Probe and remove 2277 */ 2278 2279 /** 2280 * xilinx_dma_chan_remove - Per Channel remove function 2281 * @chan: Driver specific DMA channel 2282 */ 2283 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 2284 { 2285 /* Disable all interrupts */ 2286 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 2287 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 2288 2289 if (chan->irq > 0) 2290 free_irq(chan->irq, chan); 2291 2292 tasklet_kill(&chan->tasklet); 2293 2294 list_del(&chan->common.device_node); 2295 } 2296 2297 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2298 struct clk **tx_clk, struct clk **rx_clk, 2299 struct clk **sg_clk, struct clk **tmp_clk) 2300 { 2301 int err; 2302 2303 *tmp_clk = NULL; 2304 2305 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2306 if (IS_ERR(*axi_clk)) { 2307 err = PTR_ERR(*axi_clk); 2308 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2309 return err; 2310 } 2311 2312 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2313 if (IS_ERR(*tx_clk)) 2314 *tx_clk = NULL; 2315 2316 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2317 if (IS_ERR(*rx_clk)) 2318 *rx_clk = NULL; 2319 2320 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 2321 if (IS_ERR(*sg_clk)) 2322 *sg_clk = NULL; 2323 2324 err = clk_prepare_enable(*axi_clk); 2325 if (err) { 2326 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2327 return err; 2328 } 2329 2330 err = clk_prepare_enable(*tx_clk); 2331 if (err) { 2332 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2333 goto err_disable_axiclk; 2334 } 2335 2336 err = clk_prepare_enable(*rx_clk); 2337 if (err) { 2338 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2339 goto err_disable_txclk; 2340 } 2341 2342 err = clk_prepare_enable(*sg_clk); 2343 if (err) { 2344 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 2345 goto err_disable_rxclk; 2346 } 2347 2348 return 0; 2349 2350 err_disable_rxclk: 2351 clk_disable_unprepare(*rx_clk); 2352 err_disable_txclk: 2353 clk_disable_unprepare(*tx_clk); 2354 err_disable_axiclk: 2355 clk_disable_unprepare(*axi_clk); 2356 2357 return err; 2358 } 2359 2360 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2361 struct clk **dev_clk, struct clk **tmp_clk, 2362 struct clk **tmp1_clk, struct clk **tmp2_clk) 2363 { 2364 int err; 2365 2366 *tmp_clk = NULL; 2367 *tmp1_clk = NULL; 2368 *tmp2_clk = NULL; 2369 2370 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2371 if (IS_ERR(*axi_clk)) { 2372 err = PTR_ERR(*axi_clk); 2373 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); 2374 return err; 2375 } 2376 2377 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 2378 if (IS_ERR(*dev_clk)) { 2379 err = PTR_ERR(*dev_clk); 2380 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); 2381 return err; 2382 } 2383 2384 err = clk_prepare_enable(*axi_clk); 2385 if (err) { 2386 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2387 return err; 2388 } 2389 2390 err = clk_prepare_enable(*dev_clk); 2391 if (err) { 2392 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 2393 goto err_disable_axiclk; 2394 } 2395 2396 return 0; 2397 2398 err_disable_axiclk: 2399 clk_disable_unprepare(*axi_clk); 2400 2401 return err; 2402 } 2403 2404 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 2405 struct clk **tx_clk, struct clk **txs_clk, 2406 struct clk **rx_clk, struct clk **rxs_clk) 2407 { 2408 int err; 2409 2410 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 2411 if (IS_ERR(*axi_clk)) { 2412 err = PTR_ERR(*axi_clk); 2413 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 2414 return err; 2415 } 2416 2417 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 2418 if (IS_ERR(*tx_clk)) 2419 *tx_clk = NULL; 2420 2421 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 2422 if (IS_ERR(*txs_clk)) 2423 *txs_clk = NULL; 2424 2425 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 2426 if (IS_ERR(*rx_clk)) 2427 *rx_clk = NULL; 2428 2429 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 2430 if (IS_ERR(*rxs_clk)) 2431 *rxs_clk = NULL; 2432 2433 err = clk_prepare_enable(*axi_clk); 2434 if (err) { 2435 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 2436 return err; 2437 } 2438 2439 err = clk_prepare_enable(*tx_clk); 2440 if (err) { 2441 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 2442 goto err_disable_axiclk; 2443 } 2444 2445 err = clk_prepare_enable(*txs_clk); 2446 if (err) { 2447 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 2448 goto err_disable_txclk; 2449 } 2450 2451 err = clk_prepare_enable(*rx_clk); 2452 if (err) { 2453 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 2454 goto err_disable_txsclk; 2455 } 2456 2457 err = clk_prepare_enable(*rxs_clk); 2458 if (err) { 2459 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 2460 goto err_disable_rxclk; 2461 } 2462 2463 return 0; 2464 2465 err_disable_rxclk: 2466 clk_disable_unprepare(*rx_clk); 2467 err_disable_txsclk: 2468 clk_disable_unprepare(*txs_clk); 2469 err_disable_txclk: 2470 clk_disable_unprepare(*tx_clk); 2471 err_disable_axiclk: 2472 clk_disable_unprepare(*axi_clk); 2473 2474 return err; 2475 } 2476 2477 static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 2478 { 2479 clk_disable_unprepare(xdev->rxs_clk); 2480 clk_disable_unprepare(xdev->rx_clk); 2481 clk_disable_unprepare(xdev->txs_clk); 2482 clk_disable_unprepare(xdev->tx_clk); 2483 clk_disable_unprepare(xdev->axi_clk); 2484 } 2485 2486 /** 2487 * xilinx_dma_chan_probe - Per Channel Probing 2488 * It get channel features from the device tree entry and 2489 * initialize special channel handling routines 2490 * 2491 * @xdev: Driver specific device structure 2492 * @node: Device node 2493 * @chan_id: DMA Channel id 2494 * 2495 * Return: '0' on success and failure value on error 2496 */ 2497 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2498 struct device_node *node, int chan_id) 2499 { 2500 struct xilinx_dma_chan *chan; 2501 bool has_dre = false; 2502 u32 value, width; 2503 int err; 2504 2505 /* Allocate and initialize the channel structure */ 2506 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 2507 if (!chan) 2508 return -ENOMEM; 2509 2510 chan->dev = xdev->dev; 2511 chan->xdev = xdev; 2512 chan->has_sg = xdev->has_sg; 2513 chan->desc_pendingcount = 0x0; 2514 chan->ext_addr = xdev->ext_addr; 2515 /* This variable ensures that descriptors are not 2516 * Submitted when dma engine is in progress. This variable is 2517 * Added to avoid polling for a bit in the status register to 2518 * Know dma state in the driver hot path. 2519 */ 2520 chan->idle = true; 2521 2522 spin_lock_init(&chan->lock); 2523 INIT_LIST_HEAD(&chan->pending_list); 2524 INIT_LIST_HEAD(&chan->done_list); 2525 INIT_LIST_HEAD(&chan->active_list); 2526 INIT_LIST_HEAD(&chan->free_seg_list); 2527 2528 /* Retrieve the channel properties from the device tree */ 2529 has_dre = of_property_read_bool(node, "xlnx,include-dre"); 2530 2531 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 2532 2533 err = of_property_read_u32(node, "xlnx,datawidth", &value); 2534 if (err) { 2535 dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 2536 return err; 2537 } 2538 width = value >> 3; /* Convert bits to bytes */ 2539 2540 /* If data width is greater than 8 bytes, DRE is not in hw */ 2541 if (width > 8) 2542 has_dre = false; 2543 2544 if (!has_dre) 2545 xdev->common.copy_align = fls(width - 1); 2546 2547 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 2548 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 2549 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2550 chan->direction = DMA_MEM_TO_DEV; 2551 chan->id = chan_id; 2552 chan->tdest = chan_id; 2553 xdev->common.directions = BIT(DMA_MEM_TO_DEV); 2554 2555 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2556 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2557 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2558 chan->config.park = 1; 2559 2560 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2561 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 2562 chan->flush_on_fsync = true; 2563 } 2564 } else if (of_device_is_compatible(node, 2565 "xlnx,axi-vdma-s2mm-channel") || 2566 of_device_is_compatible(node, 2567 "xlnx,axi-dma-s2mm-channel")) { 2568 chan->direction = DMA_DEV_TO_MEM; 2569 chan->id = chan_id; 2570 chan->tdest = chan_id - xdev->nr_channels; 2571 xdev->common.directions |= BIT(DMA_DEV_TO_MEM); 2572 chan->has_vflip = of_property_read_bool(node, 2573 "xlnx,enable-vert-flip"); 2574 if (chan->has_vflip) { 2575 chan->config.vflip_en = dma_read(chan, 2576 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 2577 XILINX_VDMA_ENABLE_VERTICAL_FLIP; 2578 } 2579 2580 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2581 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2582 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2583 chan->config.park = 1; 2584 2585 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 2586 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 2587 chan->flush_on_fsync = true; 2588 } 2589 } else { 2590 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2591 return -EINVAL; 2592 } 2593 2594 /* Request the interrupt */ 2595 chan->irq = irq_of_parse_and_map(node, 0); 2596 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 2597 "xilinx-dma-controller", chan); 2598 if (err) { 2599 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2600 return err; 2601 } 2602 2603 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2604 chan->start_transfer = xilinx_dma_start_transfer; 2605 chan->stop_transfer = xilinx_dma_stop_transfer; 2606 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2607 chan->start_transfer = xilinx_cdma_start_transfer; 2608 chan->stop_transfer = xilinx_cdma_stop_transfer; 2609 } else { 2610 chan->start_transfer = xilinx_vdma_start_transfer; 2611 chan->stop_transfer = xilinx_dma_stop_transfer; 2612 } 2613 2614 /* Initialize the tasklet */ 2615 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 2616 (unsigned long)chan); 2617 2618 /* 2619 * Initialize the DMA channel and add it to the DMA engine channels 2620 * list. 2621 */ 2622 chan->common.device = &xdev->common; 2623 2624 list_add_tail(&chan->common.device_node, &xdev->common.channels); 2625 xdev->chan[chan->id] = chan; 2626 2627 /* Reset the channel */ 2628 err = xilinx_dma_chan_reset(chan); 2629 if (err < 0) { 2630 dev_err(xdev->dev, "Reset channel failed\n"); 2631 return err; 2632 } 2633 2634 return 0; 2635 } 2636 2637 /** 2638 * xilinx_dma_child_probe - Per child node probe 2639 * It get number of dma-channels per child node from 2640 * device-tree and initializes all the channels. 2641 * 2642 * @xdev: Driver specific device structure 2643 * @node: Device node 2644 * 2645 * Return: 0 always. 2646 */ 2647 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2648 struct device_node *node) 2649 { 2650 int ret, i, nr_channels = 1; 2651 2652 ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2653 if ((ret < 0) && xdev->mcdma) 2654 dev_warn(xdev->dev, "missing dma-channels property\n"); 2655 2656 for (i = 0; i < nr_channels; i++) 2657 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 2658 2659 xdev->nr_channels += nr_channels; 2660 2661 return 0; 2662 } 2663 2664 /** 2665 * of_dma_xilinx_xlate - Translation function 2666 * @dma_spec: Pointer to DMA specifier as found in the device tree 2667 * @ofdma: Pointer to DMA controller data 2668 * 2669 * Return: DMA channel pointer on success and NULL on error 2670 */ 2671 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2672 struct of_dma *ofdma) 2673 { 2674 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2675 int chan_id = dma_spec->args[0]; 2676 2677 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 2678 return NULL; 2679 2680 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2681 } 2682 2683 static const struct xilinx_dma_config axidma_config = { 2684 .dmatype = XDMA_TYPE_AXIDMA, 2685 .clk_init = axidma_clk_init, 2686 }; 2687 2688 static const struct xilinx_dma_config axicdma_config = { 2689 .dmatype = XDMA_TYPE_CDMA, 2690 .clk_init = axicdma_clk_init, 2691 }; 2692 2693 static const struct xilinx_dma_config axivdma_config = { 2694 .dmatype = XDMA_TYPE_VDMA, 2695 .clk_init = axivdma_clk_init, 2696 }; 2697 2698 static const struct of_device_id xilinx_dma_of_ids[] = { 2699 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2700 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2701 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2702 {} 2703 }; 2704 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 2705 2706 /** 2707 * xilinx_dma_probe - Driver probe function 2708 * @pdev: Pointer to the platform_device structure 2709 * 2710 * Return: '0' on success and failure value on error 2711 */ 2712 static int xilinx_dma_probe(struct platform_device *pdev) 2713 { 2714 int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 2715 struct clk **, struct clk **, struct clk **) 2716 = axivdma_clk_init; 2717 struct device_node *node = pdev->dev.of_node; 2718 struct xilinx_dma_device *xdev; 2719 struct device_node *child, *np = pdev->dev.of_node; 2720 struct resource *io; 2721 u32 num_frames, addr_width, len_width; 2722 int i, err; 2723 2724 /* Allocate and initialize the DMA engine structure */ 2725 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 2726 if (!xdev) 2727 return -ENOMEM; 2728 2729 xdev->dev = &pdev->dev; 2730 if (np) { 2731 const struct of_device_id *match; 2732 2733 match = of_match_node(xilinx_dma_of_ids, np); 2734 if (match && match->data) { 2735 xdev->dma_config = match->data; 2736 clk_init = xdev->dma_config->clk_init; 2737 } 2738 } 2739 2740 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 2741 &xdev->rx_clk, &xdev->rxs_clk); 2742 if (err) 2743 return err; 2744 2745 /* Request and map I/O memory */ 2746 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2747 xdev->regs = devm_ioremap_resource(&pdev->dev, io); 2748 if (IS_ERR(xdev->regs)) 2749 return PTR_ERR(xdev->regs); 2750 2751 /* Retrieve the DMA engine properties from the device tree */ 2752 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 2753 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 2754 2755 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2756 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 2757 if (!of_property_read_u32(node, "xlnx,sg-length-width", 2758 &len_width)) { 2759 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || 2760 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { 2761 dev_warn(xdev->dev, 2762 "invalid xlnx,sg-length-width property value using default width\n"); 2763 } else { 2764 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) 2765 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); 2766 2767 xdev->max_buffer_len = GENMASK(len_width - 1, 0); 2768 } 2769 } 2770 } 2771 2772 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2773 err = of_property_read_u32(node, "xlnx,num-fstores", 2774 &num_frames); 2775 if (err < 0) { 2776 dev_err(xdev->dev, 2777 "missing xlnx,num-fstores property\n"); 2778 return err; 2779 } 2780 2781 err = of_property_read_u32(node, "xlnx,flush-fsync", 2782 &xdev->flush_on_fsync); 2783 if (err < 0) 2784 dev_warn(xdev->dev, 2785 "missing xlnx,flush-fsync property\n"); 2786 } 2787 2788 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 2789 if (err < 0) 2790 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 2791 2792 if (addr_width > 32) 2793 xdev->ext_addr = true; 2794 else 2795 xdev->ext_addr = false; 2796 2797 /* Set the dma mask bits */ 2798 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 2799 2800 /* Initialize the DMA engine */ 2801 xdev->common.dev = &pdev->dev; 2802 2803 INIT_LIST_HEAD(&xdev->common.channels); 2804 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 2805 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2806 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2807 } 2808 2809 xdev->common.dst_addr_widths = BIT(addr_width / 8); 2810 xdev->common.src_addr_widths = BIT(addr_width / 8); 2811 xdev->common.device_alloc_chan_resources = 2812 xilinx_dma_alloc_chan_resources; 2813 xdev->common.device_free_chan_resources = 2814 xilinx_dma_free_chan_resources; 2815 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 2816 xdev->common.device_tx_status = xilinx_dma_tx_status; 2817 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2818 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2819 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 2820 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2821 xdev->common.device_prep_dma_cyclic = 2822 xilinx_dma_prep_dma_cyclic; 2823 xdev->common.device_prep_interleaved_dma = 2824 xilinx_dma_prep_interleaved; 2825 /* Residue calculation is supported by only AXI DMA */ 2826 xdev->common.residue_granularity = 2827 DMA_RESIDUE_GRANULARITY_SEGMENT; 2828 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2829 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2830 dma_cap_set(DMA_SG, xdev->common.cap_mask); 2831 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2832 xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg; 2833 } else { 2834 xdev->common.device_prep_interleaved_dma = 2835 xilinx_vdma_dma_prep_interleaved; 2836 } 2837 2838 platform_set_drvdata(pdev, xdev); 2839 2840 /* Initialize the channels */ 2841 for_each_child_of_node(node, child) { 2842 err = xilinx_dma_child_probe(xdev, child); 2843 if (err < 0) 2844 goto disable_clks; 2845 } 2846 2847 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2848 for (i = 0; i < xdev->nr_channels; i++) 2849 if (xdev->chan[i]) 2850 xdev->chan[i]->num_frms = num_frames; 2851 } 2852 2853 /* Register the DMA engine with the core */ 2854 dma_async_device_register(&xdev->common); 2855 2856 err = of_dma_controller_register(node, of_dma_xilinx_xlate, 2857 xdev); 2858 if (err < 0) { 2859 dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 2860 dma_async_device_unregister(&xdev->common); 2861 goto error; 2862 } 2863 2864 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 2865 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 2866 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 2867 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 2868 else 2869 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 2870 2871 return 0; 2872 2873 disable_clks: 2874 xdma_disable_allclks(xdev); 2875 error: 2876 for (i = 0; i < xdev->nr_channels; i++) 2877 if (xdev->chan[i]) 2878 xilinx_dma_chan_remove(xdev->chan[i]); 2879 2880 return err; 2881 } 2882 2883 /** 2884 * xilinx_dma_remove - Driver remove function 2885 * @pdev: Pointer to the platform_device structure 2886 * 2887 * Return: Always '0' 2888 */ 2889 static int xilinx_dma_remove(struct platform_device *pdev) 2890 { 2891 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 2892 int i; 2893 2894 of_dma_controller_free(pdev->dev.of_node); 2895 2896 dma_async_device_unregister(&xdev->common); 2897 2898 for (i = 0; i < xdev->nr_channels; i++) 2899 if (xdev->chan[i]) 2900 xilinx_dma_chan_remove(xdev->chan[i]); 2901 2902 xdma_disable_allclks(xdev); 2903 2904 return 0; 2905 } 2906 2907 static struct platform_driver xilinx_vdma_driver = { 2908 .driver = { 2909 .name = "xilinx-vdma", 2910 .of_match_table = xilinx_dma_of_ids, 2911 }, 2912 .probe = xilinx_dma_probe, 2913 .remove = xilinx_dma_remove, 2914 }; 2915 2916 module_platform_driver(xilinx_vdma_driver); 2917 2918 MODULE_AUTHOR("Xilinx, Inc. and Xianjun Jiao"); 2919 MODULE_DESCRIPTION("Xilinx VDMA driver"); 2920 MODULE_LICENSE("GPL v2"); 2921