Lines Matching +full:dma +full:- +full:maxburst

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 #include <linux/dma-mapping.h>
22 #include "virt-dma.h"
24 /** Common macros to normal and dedicated DMA registers **/
42 /** Normal DMA register values **/
44 /* Normal DMA source/destination data request type values */
51 /** Normal DMA register layout **/
53 /* Dedicated DMA source/destination address mode values */
57 /* Normal DMA configuration register layout */
67 /** Dedicated DMA register values **/
69 /* Dedicated DMA source/destination address mode values */
75 /* Dedicated DMA source/destination data request type values */
82 /** Dedicated DMA register layout **/
84 /* Dedicated DMA configuration register layout */
91 /* Dedicated DMA parameter register layout */
92 #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
93 #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
94 #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
95 #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
97 /** DMA register offsets **/
103 /* Normal DMA register offsets */
110 /* Dedicated DMA register offsets */
118 /** DMA Driver **/
121 * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
123 * respectively. Given that the Normal DMA endpoints (other than
130 #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
137 #define SUNIV_NDMA_NR_MAX_VCHANS (24 * 2 - 1)
149 * Normal DMA supports individual transfers (segments) up to 128k.
150 * Dedicated DMA supports transfers up to 16M. We can only report
175 int (*convert_burst)(u32 maxburst);
251 return &chan->dev->device; in chan2dev()
274 static int convert_burst_a10(u32 maxburst) in convert_burst_a10() argument
276 if (maxburst > 8) in convert_burst_a10()
277 return -EINVAL; in convert_burst_a10()
279 /* 1 -> 0, 4 -> 1, 8 -> 2 */ in convert_burst_a10()
280 return (maxburst >> 2); in convert_burst_a10()
283 static int convert_burst_f1c100s(u32 maxburst) in convert_burst_f1c100s() argument
285 if (maxburst > 4) in convert_burst_f1c100s()
286 return -EINVAL; in convert_burst_f1c100s()
288 /* 1 -> 0, 4 -> 1 */ in convert_burst_f1c100s()
289 return (maxburst >> 2); in convert_burst_f1c100s()
295 return -EINVAL; in convert_buswidth()
297 /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */ in convert_buswidth()
305 vchan_free_chan_resources(&vchan->vc); in sun4i_dma_free_chan_resources()
311 struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans; in find_and_use_pchan()
316 * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and in find_and_use_pchan()
317 * priv->cfg->ndma_nr_max_channels+ are dedicated ones in find_and_use_pchan()
319 if (vchan->is_dedicated) { in find_and_use_pchan()
320 i = priv->cfg->ndma_nr_max_channels; in find_and_use_pchan()
321 max = priv->cfg->dma_nr_max_channels; in find_and_use_pchan()
324 max = priv->cfg->ndma_nr_max_channels; in find_and_use_pchan()
327 spin_lock_irqsave(&priv->lock, flags); in find_and_use_pchan()
328 for_each_clear_bit_from(i, priv->pchans_used, max) { in find_and_use_pchan()
330 pchan->vchan = vchan; in find_and_use_pchan()
331 set_bit(i, priv->pchans_used); in find_and_use_pchan()
334 spin_unlock_irqrestore(&priv->lock, flags); in find_and_use_pchan()
343 int nr = pchan - priv->pchans; in release_pchan()
345 spin_lock_irqsave(&priv->lock, flags); in release_pchan()
347 pchan->vchan = NULL; in release_pchan()
348 clear_bit(nr, priv->pchans_used); in release_pchan()
350 spin_unlock_irqrestore(&priv->lock, flags); in release_pchan()
360 if (pchan->is_dedicated) { in configure_pchan()
361 writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG); in configure_pchan()
362 writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG); in configure_pchan()
363 writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); in configure_pchan()
364 writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG); in configure_pchan()
365 writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG); in configure_pchan()
367 writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG); in configure_pchan()
368 writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG); in configure_pchan()
369 writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); in configure_pchan()
370 writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG); in configure_pchan()
379 int pchan_number = pchan - priv->pchans; in set_pchan_interrupt()
382 spin_lock_irqsave(&priv->lock, flags); in set_pchan_interrupt()
384 reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); in set_pchan_interrupt()
396 writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); in set_pchan_interrupt()
398 spin_unlock_irqrestore(&priv->lock, flags); in set_pchan_interrupt()
408 * This function must be called with &vchan->vc.lock held.
419 lockdep_assert_held(&vchan->vc.lock); in __execute_vchan_pending()
424 return -EBUSY; in __execute_vchan_pending()
430 if (vchan->processing) { in __execute_vchan_pending()
431 dev_dbg(chan2dev(&vchan->vc.chan), in __execute_vchan_pending()
433 ret = -EBUSY; in __execute_vchan_pending()
439 vd = vchan_next_desc(&vchan->vc); in __execute_vchan_pending()
441 dev_dbg(chan2dev(&vchan->vc.chan), in __execute_vchan_pending()
448 if (list_empty(&contract->demands)) { in __execute_vchan_pending()
450 list_del(&contract->vd.node); in __execute_vchan_pending()
451 vchan_cookie_complete(&contract->vd); in __execute_vchan_pending()
452 dev_dbg(chan2dev(&vchan->vc.chan), in __execute_vchan_pending()
455 } while (list_empty(&contract->demands)); in __execute_vchan_pending()
458 promise = list_first_entry(&contract->demands, in __execute_vchan_pending()
460 vchan->processing = promise; in __execute_vchan_pending()
464 vchan->contract = contract; in __execute_vchan_pending()
465 vchan->pchan = pchan; in __execute_vchan_pending()
466 set_pchan_interrupt(priv, pchan, contract->use_half_int, 1); in __execute_vchan_pending()
482 if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || in sanitize_config()
483 !sconfig->dst_maxburst) in sanitize_config()
484 return -EINVAL; in sanitize_config()
486 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) in sanitize_config()
487 sconfig->src_addr_width = sconfig->dst_addr_width; in sanitize_config()
489 if (!sconfig->src_maxburst) in sanitize_config()
490 sconfig->src_maxburst = sconfig->dst_maxburst; in sanitize_config()
495 if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || in sanitize_config()
496 !sconfig->src_maxburst) in sanitize_config()
497 return -EINVAL; in sanitize_config()
499 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) in sanitize_config()
500 sconfig->dst_addr_width = sconfig->src_addr_width; in sanitize_config()
502 if (!sconfig->dst_maxburst) in sanitize_config()
503 sconfig->dst_maxburst = sconfig->src_maxburst; in sanitize_config()
514 * Generate a promise, to be used in a normal DMA contract.
517 * normal part of the DMA Engine and get data copied. A non-executed
527 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in generate_ndma_promise()
539 promise->src = src; in generate_ndma_promise()
540 promise->dst = dest; in generate_ndma_promise()
541 promise->len = len; in generate_ndma_promise()
542 promise->cfg = SUN4I_DMA_CFG_LOADING | in generate_ndma_promise()
547 sconfig->src_maxburst, sconfig->dst_maxburst, in generate_ndma_promise()
548 sconfig->src_addr_width, sconfig->dst_addr_width); in generate_ndma_promise()
551 ret = priv->cfg->convert_burst(sconfig->src_maxburst); in generate_ndma_promise()
554 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); in generate_ndma_promise()
557 ret = priv->cfg->convert_burst(sconfig->dst_maxburst); in generate_ndma_promise()
560 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); in generate_ndma_promise()
563 ret = convert_buswidth(sconfig->src_addr_width); in generate_ndma_promise()
566 priv->cfg->set_src_data_width(&promise->cfg, ret); in generate_ndma_promise()
569 ret = convert_buswidth(sconfig->dst_addr_width); in generate_ndma_promise()
572 priv->cfg->set_dst_data_width(&promise->cfg, ret); in generate_ndma_promise()
582 * Generate a promise, to be used in a dedicated DMA contract.
585 * Dedicated part of the DMA Engine and get data copied. A non-executed
594 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in generate_ddma_promise()
602 promise->src = src; in generate_ddma_promise()
603 promise->dst = dest; in generate_ddma_promise()
604 promise->len = len; in generate_ddma_promise()
605 promise->cfg = SUN4I_DMA_CFG_LOADING | in generate_ddma_promise()
609 ret = priv->cfg->convert_burst(sconfig->src_maxburst); in generate_ddma_promise()
612 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); in generate_ddma_promise()
615 ret = priv->cfg->convert_burst(sconfig->dst_maxburst); in generate_ddma_promise()
618 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); in generate_ddma_promise()
621 ret = convert_buswidth(sconfig->src_addr_width); in generate_ddma_promise()
624 priv->cfg->set_src_data_width(&promise->cfg, ret); in generate_ddma_promise()
627 ret = convert_buswidth(sconfig->dst_addr_width); in generate_ddma_promise()
630 priv->cfg->set_dst_data_width(&promise->cfg, ret); in generate_ddma_promise()
642 * Contracts function as DMA descriptors. As our hardware does not support
655 INIT_LIST_HEAD(&contract->demands); in generate_dma_contract()
656 INIT_LIST_HEAD(&contract->completed_demands); in generate_dma_contract()
673 promise = list_first_entry_or_null(&contract->demands, in get_next_cyclic_promise()
676 list_splice_init(&contract->completed_demands, in get_next_cyclic_promise()
677 &contract->demands); in get_next_cyclic_promise()
678 promise = list_first_entry(&contract->demands, in get_next_cyclic_promise()
694 list_for_each_entry_safe(promise, tmp, &contract->demands, list) in sun4i_dma_free_contract()
697 list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) in sun4i_dma_free_contract()
707 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in sun4i_dma_prep_dma_memcpy()
709 struct dma_slave_config *sconfig = &vchan->cfg; in sun4i_dma_prep_dma_memcpy()
722 sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; in sun4i_dma_prep_dma_memcpy()
723 sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; in sun4i_dma_prep_dma_memcpy()
724 sconfig->src_maxburst = priv->cfg->max_burst; in sun4i_dma_prep_dma_memcpy()
725 sconfig->dst_maxburst = priv->cfg->max_burst; in sun4i_dma_prep_dma_memcpy()
727 if (vchan->is_dedicated) in sun4i_dma_prep_dma_memcpy()
739 if (vchan->is_dedicated) { in sun4i_dma_prep_dma_memcpy()
740 promise->cfg |= in sun4i_dma_prep_dma_memcpy()
741 SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) | in sun4i_dma_prep_dma_memcpy()
742 SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram); in sun4i_dma_prep_dma_memcpy()
744 promise->cfg |= in sun4i_dma_prep_dma_memcpy()
745 SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) | in sun4i_dma_prep_dma_memcpy()
746 SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram); in sun4i_dma_prep_dma_memcpy()
750 list_add_tail(&promise->list, &contract->demands); in sun4i_dma_prep_dma_memcpy()
753 return vchan_tx_prep(&vchan->vc, &contract->vd, flags); in sun4i_dma_prep_dma_memcpy()
761 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in sun4i_dma_prep_dma_cyclic()
763 struct dma_slave_config *sconfig = &vchan->cfg; in sun4i_dma_prep_dma_cyclic()
772 dev_err(chan2dev(chan), "Invalid DMA direction\n"); in sun4i_dma_prep_dma_cyclic()
780 contract->is_cyclic = 1; in sun4i_dma_prep_dma_cyclic()
782 if (vchan->is_dedicated) { in sun4i_dma_prep_dma_cyclic()
785 ram_type = priv->cfg->ddma_drq_sdram; in sun4i_dma_prep_dma_cyclic()
789 ram_type = priv->cfg->ndma_drq_sdram; in sun4i_dma_prep_dma_cyclic()
794 dest = sconfig->dst_addr; in sun4i_dma_prep_dma_cyclic()
795 endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | in sun4i_dma_prep_dma_cyclic()
800 src = sconfig->src_addr; in sun4i_dma_prep_dma_cyclic()
804 SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | in sun4i_dma_prep_dma_cyclic()
810 * out of a promise, so we need to program the DMA engine less in sun4i_dma_prep_dma_cyclic()
815 * The engine can interrupt on half-transfer, so we can use in sun4i_dma_prep_dma_cyclic()
824 * |---|---|---|---| (periods / promises) in sun4i_dma_prep_dma_cyclic()
829 * |-------|-------| (promises as configured on hw) in sun4i_dma_prep_dma_cyclic()
830 * |---|---|---|---| (periods) in sun4i_dma_prep_dma_cyclic()
837 * always be the case for dedicated DMA, where the hardware has a much in sun4i_dma_prep_dma_cyclic()
840 if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) { in sun4i_dma_prep_dma_cyclic()
842 contract->use_half_int = 1; in sun4i_dma_prep_dma_cyclic()
849 plength = min((len - offset), period_len); in sun4i_dma_prep_dma_cyclic()
856 if (vchan->is_dedicated) in sun4i_dma_prep_dma_cyclic()
867 promise->cfg |= endpoints; in sun4i_dma_prep_dma_cyclic()
870 list_add_tail(&promise->list, &contract->demands); in sun4i_dma_prep_dma_cyclic()
874 return vchan_tx_prep(&vchan->vc, &contract->vd, flags); in sun4i_dma_prep_dma_cyclic()
882 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in sun4i_dma_prep_slave_sg()
884 struct dma_slave_config *sconfig = &vchan->cfg; in sun4i_dma_prep_slave_sg()
897 dev_err(chan2dev(chan), "Invalid DMA direction\n"); in sun4i_dma_prep_slave_sg()
905 if (vchan->is_dedicated) { in sun4i_dma_prep_slave_sg()
908 ram_type = priv->cfg->ddma_drq_sdram; in sun4i_dma_prep_slave_sg()
912 ram_type = priv->cfg->ndma_drq_sdram; in sun4i_dma_prep_slave_sg()
916 endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | in sun4i_dma_prep_slave_sg()
923 SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | in sun4i_dma_prep_slave_sg()
930 dstaddr = sconfig->dst_addr; in sun4i_dma_prep_slave_sg()
932 srcaddr = sconfig->src_addr; in sun4i_dma_prep_slave_sg()
937 * These are the magic DMA engine timings that keep SPI going. in sun4i_dma_prep_slave_sg()
948 if (vchan->is_dedicated) in sun4i_dma_prep_slave_sg()
960 promise->cfg |= endpoints; in sun4i_dma_prep_slave_sg()
961 promise->para = para; in sun4i_dma_prep_slave_sg()
964 list_add_tail(&promise->list, &contract->demands); in sun4i_dma_prep_slave_sg()
971 return vchan_tx_prep(&vchan->vc, &contract->vd, flags); in sun4i_dma_prep_slave_sg()
976 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in sun4i_dma_terminate_all()
978 struct sun4i_dma_pchan *pchan = vchan->pchan; in sun4i_dma_terminate_all()
982 spin_lock_irqsave(&vchan->vc.lock, flags); in sun4i_dma_terminate_all()
983 vchan_get_all_descriptors(&vchan->vc, &head); in sun4i_dma_terminate_all()
984 spin_unlock_irqrestore(&vchan->vc.lock, flags); in sun4i_dma_terminate_all()
991 if (pchan->is_dedicated) in sun4i_dma_terminate_all()
992 writel(0, pchan->base + SUN4I_DDMA_CFG_REG); in sun4i_dma_terminate_all()
994 writel(0, pchan->base + SUN4I_NDMA_CFG_REG); in sun4i_dma_terminate_all()
999 spin_lock_irqsave(&vchan->vc.lock, flags); in sun4i_dma_terminate_all()
1001 vchan->processing = NULL; in sun4i_dma_terminate_all()
1002 vchan->pchan = NULL; in sun4i_dma_terminate_all()
1003 spin_unlock_irqrestore(&vchan->vc.lock, flags); in sun4i_dma_terminate_all()
1005 vchan_dma_desc_free_list(&vchan->vc, &head); in sun4i_dma_terminate_all()
1015 memcpy(&vchan->cfg, config, sizeof(*config)); in sun4i_dma_config()
1023 struct sun4i_dma_dev *priv = ofdma->of_dma_data; in sun4i_dma_of_xlate()
1026 u8 is_dedicated = dma_spec->args[0]; in sun4i_dma_of_xlate()
1027 u8 endpoint = dma_spec->args[1]; in sun4i_dma_of_xlate()
1038 chan = dma_get_any_slave_channel(&priv->slave); in sun4i_dma_of_xlate()
1044 vchan->is_dedicated = is_dedicated; in sun4i_dma_of_xlate()
1045 vchan->endpoint = endpoint; in sun4i_dma_of_xlate()
1055 struct sun4i_dma_pchan *pchan = vchan->pchan; in sun4i_dma_tx_status()
1067 spin_lock_irqsave(&vchan->vc.lock, flags); in sun4i_dma_tx_status()
1068 vd = vchan_find_desc(&vchan->vc, cookie); in sun4i_dma_tx_status()
1073 list_for_each_entry(promise, &contract->demands, list) in sun4i_dma_tx_status()
1074 bytes += promise->len; in sun4i_dma_tx_status()
1081 promise = list_first_entry_or_null(&contract->demands, in sun4i_dma_tx_status()
1084 bytes -= promise->len; in sun4i_dma_tx_status()
1085 if (pchan->is_dedicated) in sun4i_dma_tx_status()
1086 bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); in sun4i_dma_tx_status()
1088 bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); in sun4i_dma_tx_status()
1094 spin_unlock_irqrestore(&vchan->vc.lock, flags); in sun4i_dma_tx_status()
1101 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); in sun4i_dma_issue_pending()
1105 spin_lock_irqsave(&vchan->vc.lock, flags); in sun4i_dma_issue_pending()
1111 if (vchan_issue_pending(&vchan->vc)) in sun4i_dma_issue_pending()
1114 spin_unlock_irqrestore(&vchan->vc.lock, flags); in sun4i_dma_issue_pending()
1120 struct sun4i_dma_pchan *pchans = priv->pchans, *pchan; in sun4i_dma_interrupt()
1127 pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); in sun4i_dma_interrupt()
1136 vchan = pchan->vchan; in sun4i_dma_interrupt()
1139 contract = vchan->contract; in sun4i_dma_interrupt()
1146 spin_lock(&vchan->vc.lock); in sun4i_dma_interrupt()
1152 list_move_tail(&vchan->processing->list, in sun4i_dma_interrupt()
1153 &contract->completed_demands); in sun4i_dma_interrupt()
1156 * Cyclic DMA transfers are special: in sun4i_dma_interrupt()
1157 * - There's always something we can dispatch in sun4i_dma_interrupt()
1158 * - We need to run the callback in sun4i_dma_interrupt()
1159 * - Latency is very important, as this is used by audio in sun4i_dma_interrupt()
1164 * For non-cyclic transfers we need to look around, in sun4i_dma_interrupt()
1168 if (contract->is_cyclic) { in sun4i_dma_interrupt()
1170 vchan->processing = promise; in sun4i_dma_interrupt()
1172 vchan_cyclic_callback(&contract->vd); in sun4i_dma_interrupt()
1174 vchan->processing = NULL; in sun4i_dma_interrupt()
1175 vchan->pchan = NULL; in sun4i_dma_interrupt()
1182 spin_unlock(&vchan->vc.lock); in sun4i_dma_interrupt()
1185 if (contract->is_cyclic) in sun4i_dma_interrupt()
1186 vchan_cyclic_callback(&contract->vd); in sun4i_dma_interrupt()
1193 spin_lock(&priv->lock); in sun4i_dma_interrupt()
1194 irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); in sun4i_dma_interrupt()
1196 priv->base + SUN4I_DMA_IRQ_ENABLE_REG); in sun4i_dma_interrupt()
1197 spin_unlock(&priv->lock); in sun4i_dma_interrupt()
1200 writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); in sun4i_dma_interrupt()
1208 vchan = &priv->vchans[i]; in sun4i_dma_interrupt()
1209 spin_lock(&vchan->vc.lock); in sun4i_dma_interrupt()
1211 spin_unlock(&vchan->vc.lock); in sun4i_dma_interrupt()
1220 pendirq = readl_relaxed(priv->base + in sun4i_dma_interrupt()
1236 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); in sun4i_dma_probe()
1238 return -ENOMEM; in sun4i_dma_probe()
1240 priv->cfg = of_device_get_match_data(&pdev->dev); in sun4i_dma_probe()
1241 if (!priv->cfg) in sun4i_dma_probe()
1242 return -ENODEV; in sun4i_dma_probe()
1244 priv->base = devm_platform_ioremap_resource(pdev, 0); in sun4i_dma_probe()
1245 if (IS_ERR(priv->base)) in sun4i_dma_probe()
1246 return PTR_ERR(priv->base); in sun4i_dma_probe()
1248 priv->irq = platform_get_irq(pdev, 0); in sun4i_dma_probe()
1249 if (priv->irq < 0) in sun4i_dma_probe()
1250 return priv->irq; in sun4i_dma_probe()
1252 priv->clk = devm_clk_get(&pdev->dev, NULL); in sun4i_dma_probe()
1253 if (IS_ERR(priv->clk)) { in sun4i_dma_probe()
1254 dev_err(&pdev->dev, "No clock specified\n"); in sun4i_dma_probe()
1255 return PTR_ERR(priv->clk); in sun4i_dma_probe()
1258 if (priv->cfg->has_reset) { in sun4i_dma_probe()
1259 priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL); in sun4i_dma_probe()
1260 if (IS_ERR(priv->rst)) in sun4i_dma_probe()
1261 return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst), in sun4i_dma_probe()
1266 spin_lock_init(&priv->lock); in sun4i_dma_probe()
1268 dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE); in sun4i_dma_probe()
1270 dma_cap_zero(priv->slave.cap_mask); in sun4i_dma_probe()
1271 dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask); in sun4i_dma_probe()
1272 dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask); in sun4i_dma_probe()
1273 dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask); in sun4i_dma_probe()
1274 dma_cap_set(DMA_SLAVE, priv->slave.cap_mask); in sun4i_dma_probe()
1276 INIT_LIST_HEAD(&priv->slave.channels); in sun4i_dma_probe()
1277 priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources; in sun4i_dma_probe()
1278 priv->slave.device_tx_status = sun4i_dma_tx_status; in sun4i_dma_probe()
1279 priv->slave.device_issue_pending = sun4i_dma_issue_pending; in sun4i_dma_probe()
1280 priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg; in sun4i_dma_probe()
1281 priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy; in sun4i_dma_probe()
1282 priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic; in sun4i_dma_probe()
1283 priv->slave.device_config = sun4i_dma_config; in sun4i_dma_probe()
1284 priv->slave.device_terminate_all = sun4i_dma_terminate_all; in sun4i_dma_probe()
1285 priv->slave.copy_align = 2; in sun4i_dma_probe()
1286 priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | in sun4i_dma_probe()
1289 priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | in sun4i_dma_probe()
1292 priv->slave.directions = BIT(DMA_DEV_TO_MEM) | in sun4i_dma_probe()
1294 priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in sun4i_dma_probe()
1296 priv->slave.dev = &pdev->dev; in sun4i_dma_probe()
1298 priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels, in sun4i_dma_probe()
1300 priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS, in sun4i_dma_probe()
1302 priv->pchans_used = devm_kcalloc(&pdev->dev, in sun4i_dma_probe()
1303 BITS_TO_LONGS(priv->cfg->dma_nr_max_channels), in sun4i_dma_probe()
1305 if (!priv->vchans || !priv->pchans || !priv->pchans_used) in sun4i_dma_probe()
1306 return -ENOMEM; in sun4i_dma_probe()
1309 * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and in sun4i_dma_probe()
1310 * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are in sun4i_dma_probe()
1313 for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++) in sun4i_dma_probe()
1314 priv->pchans[i].base = priv->base + in sun4i_dma_probe()
1317 for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) { in sun4i_dma_probe()
1318 priv->pchans[i].base = priv->base + in sun4i_dma_probe()
1320 priv->pchans[i].is_dedicated = 1; in sun4i_dma_probe()
1324 struct sun4i_dma_vchan *vchan = &priv->vchans[i]; in sun4i_dma_probe()
1326 spin_lock_init(&vchan->vc.lock); in sun4i_dma_probe()
1327 vchan->vc.desc_free = sun4i_dma_free_contract; in sun4i_dma_probe()
1328 vchan_init(&vchan->vc, &priv->slave); in sun4i_dma_probe()
1331 ret = clk_prepare_enable(priv->clk); in sun4i_dma_probe()
1333 dev_err(&pdev->dev, "Couldn't enable the clock\n"); in sun4i_dma_probe()
1341 writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); in sun4i_dma_probe()
1342 writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); in sun4i_dma_probe()
1344 ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt, in sun4i_dma_probe()
1345 0, dev_name(&pdev->dev), priv); in sun4i_dma_probe()
1347 dev_err(&pdev->dev, "Cannot request IRQ\n"); in sun4i_dma_probe()
1351 ret = dma_async_device_register(&priv->slave); in sun4i_dma_probe()
1353 dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); in sun4i_dma_probe()
1357 ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate, in sun4i_dma_probe()
1360 dev_err(&pdev->dev, "of_dma_controller_register failed\n"); in sun4i_dma_probe()
1364 dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n"); in sun4i_dma_probe()
1369 dma_async_device_unregister(&priv->slave); in sun4i_dma_probe()
1371 clk_disable_unprepare(priv->clk); in sun4i_dma_probe()
1380 disable_irq(priv->irq); in sun4i_dma_remove()
1382 of_dma_controller_free(pdev->dev.of_node); in sun4i_dma_remove()
1383 dma_async_device_unregister(&priv->slave); in sun4i_dma_remove()
1385 clk_disable_unprepare(priv->clk); in sun4i_dma_remove()
1430 { .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
1431 { .compatible = "allwinner,suniv-f1c100s-dma",
1441 .name = "sun4i-dma",
1448 MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");