Lines Matching +full:t +full:- +full:head +full:- +full:semi
1 // SPDX-License-Identifier: GPL-2.0+
3 // Actions Semi Owl SoCs DMA driver
5 // Copyright (c) 2014 Actions Semi Inc.
6 // Author: David Liu <liuwei@actions-semi.com>
15 #include <linux/dma-mapping.h>
27 #include "virt-dma.h"
122 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
128 * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
160 * struct owl_dma_lli - Link list for dma transfer
172 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
184 * struct owl_dma_pchan - Holder for the physical channels
196 * struct owl_dma_vchan - Wrapper for DMA ENGINE channel
212 * struct owl_dma - Holder for the Owl DMA controller
246 regval = readl(pchan->base + reg); in pchan_update()
253 writel(regval, pchan->base + reg); in pchan_update()
258 writel(data, pchan->base + reg); in pchan_writel()
263 return readl(pchan->base + reg); in pchan_readl()
270 regval = readl(od->base + reg); in dma_update()
277 writel(regval, od->base + reg); in dma_update()
282 writel(data, od->base + reg); in dma_writel()
287 return readl(od->base + reg); in dma_readl()
297 return &chan->dev->device; in chan2dev()
339 return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0); in llc_hw_flen()
345 list_del(&lli->node); in owl_dma_free_lli()
346 dma_pool_free(od->lli_pool, lli, lli->phys); in owl_dma_free_lli()
354 lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); in owl_dma_alloc_lli()
358 INIT_LIST_HEAD(&lli->node); in owl_dma_alloc_lli()
359 lli->phys = phys; in owl_dma_alloc_lli()
370 list_add_tail(&next->node, &txd->lli_list); in owl_dma_add_lli()
373 prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; in owl_dma_add_lli()
374 prev->hw[OWL_DMADESC_CTRLA] |= in owl_dma_add_lli()
388 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_cfg_lli()
401 mode |= OWL_DMA_MODE_TS(vchan->drq) in owl_dma_cfg_lli()
409 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) in owl_dma_cfg_lli()
414 mode |= OWL_DMA_MODE_TS(vchan->drq) in owl_dma_cfg_lli()
422 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) in owl_dma_cfg_lli()
427 return -EINVAL; in owl_dma_cfg_lli()
430 lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode, in owl_dma_cfg_lli()
439 lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */ in owl_dma_cfg_lli()
440 lli->hw[OWL_DMADESC_SADDR] = src; in owl_dma_cfg_lli()
441 lli->hw[OWL_DMADESC_DADDR] = dst; in owl_dma_cfg_lli()
442 lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; in owl_dma_cfg_lli()
443 lli->hw[OWL_DMADESC_DST_STRIDE] = 0; in owl_dma_cfg_lli()
445 if (od->devid == S700_DMA) { in owl_dma_cfg_lli()
447 lli->hw[OWL_DMADESC_FLEN] = len; in owl_dma_cfg_lli()
453 lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb; in owl_dma_cfg_lli()
461 lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; in owl_dma_cfg_lli()
462 lli->hw[OWL_DMADESC_CTRLB] = ctrlb; in owl_dma_cfg_lli()
475 for (i = 0; i < od->nr_pchans; i++) { in owl_dma_get_pchan()
476 pchan = &od->pchans[i]; in owl_dma_get_pchan()
478 spin_lock_irqsave(&od->lock, flags); in owl_dma_get_pchan()
479 if (!pchan->vchan) { in owl_dma_get_pchan()
480 pchan->vchan = vchan; in owl_dma_get_pchan()
481 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_get_pchan()
485 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_get_pchan()
497 return !(val & (1 << pchan->id)); in owl_dma_pchan_busy()
509 spin_lock_irqsave(&od->lock, flags); in owl_dma_terminate_pchan()
510 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); in owl_dma_terminate_pchan()
513 if (irq_pd & (1 << pchan->id)) { in owl_dma_terminate_pchan()
514 dev_warn(od->dma.dev, in owl_dma_terminate_pchan()
516 pchan->id); in owl_dma_terminate_pchan()
517 dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); in owl_dma_terminate_pchan()
520 pchan->vchan = NULL; in owl_dma_terminate_pchan()
522 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_terminate_pchan()
537 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_start_next_txd()
538 struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); in owl_dma_start_next_txd()
539 struct owl_dma_pchan *pchan = vchan->pchan; in owl_dma_start_next_txd()
540 struct owl_dma_txd *txd = to_owl_txd(&vd->tx); in owl_dma_start_next_txd()
545 list_del(&vd->node); in owl_dma_start_next_txd()
547 vchan->txd = txd; in owl_dma_start_next_txd()
553 lli = list_first_entry(&txd->lli_list, in owl_dma_start_next_txd()
556 if (txd->cyclic) in owl_dma_start_next_txd()
564 pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); in owl_dma_start_next_txd()
570 spin_lock_irqsave(&od->lock, flags); in owl_dma_start_next_txd()
572 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); in owl_dma_start_next_txd()
574 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_start_next_txd()
576 dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); in owl_dma_start_next_txd()
587 owl_dma_terminate_pchan(od, vchan->pchan); in owl_dma_phy_free()
589 vchan->pchan = NULL; in owl_dma_phy_free()
601 spin_lock(&od->lock); in owl_dma_interrupt()
606 for_each_set_bit(i, &pending, od->nr_pchans) { in owl_dma_interrupt()
607 pchan = &od->pchans[i]; in owl_dma_interrupt()
615 for (i = 0; i < od->nr_pchans; i++) { in owl_dma_interrupt()
616 pchan = &od->pchans[i]; in owl_dma_interrupt()
626 dev_dbg(od->dma.dev, in owl_dma_interrupt()
638 spin_unlock(&od->lock); in owl_dma_interrupt()
640 for_each_set_bit(i, &pending, od->nr_pchans) { in owl_dma_interrupt()
643 pchan = &od->pchans[i]; in owl_dma_interrupt()
645 vchan = pchan->vchan; in owl_dma_interrupt()
647 dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", in owl_dma_interrupt()
648 pchan->id); in owl_dma_interrupt()
652 spin_lock(&vchan->vc.lock); in owl_dma_interrupt()
654 txd = vchan->txd; in owl_dma_interrupt()
656 vchan->txd = NULL; in owl_dma_interrupt()
658 vchan_cookie_complete(&txd->vd); in owl_dma_interrupt()
664 if (vchan_next_desc(&vchan->vc)) in owl_dma_interrupt()
670 spin_unlock(&vchan->vc.lock); in owl_dma_interrupt()
683 list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) in owl_dma_free_txd()
691 struct owl_dma *od = to_owl_dma(vd->tx.chan->device); in owl_dma_desc_free()
692 struct owl_dma_txd *txd = to_owl_txd(&vd->tx); in owl_dma_desc_free()
699 struct owl_dma *od = to_owl_dma(chan->device); in owl_dma_terminate_all()
702 LIST_HEAD(head); in owl_dma_terminate_all()
704 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_terminate_all()
706 if (vchan->pchan) in owl_dma_terminate_all()
709 if (vchan->txd) { in owl_dma_terminate_all()
710 owl_dma_desc_free(&vchan->txd->vd); in owl_dma_terminate_all()
711 vchan->txd = NULL; in owl_dma_terminate_all()
714 vchan_get_all_descriptors(&vchan->vc, &head); in owl_dma_terminate_all()
716 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_terminate_all()
718 vchan_dma_desc_free_list(&vchan->vc, &head); in owl_dma_terminate_all()
729 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || in owl_dma_config()
730 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) in owl_dma_config()
731 return -EINVAL; in owl_dma_config()
733 memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); in owl_dma_config()
743 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_pause()
745 owl_dma_pause_pchan(vchan->pchan); in owl_dma_pause()
747 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_pause()
757 if (!vchan->pchan && !vchan->txd) in owl_dma_resume()
760 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); in owl_dma_resume()
762 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_resume()
764 owl_dma_resume_pchan(vchan->pchan); in owl_dma_resume()
766 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_resume()
779 pchan = vchan->pchan; in owl_dma_getbytes_chan()
780 txd = vchan->txd; in owl_dma_getbytes_chan()
791 list_for_each_entry(lli, &txd->lli_list, node) { in owl_dma_getbytes_chan()
793 if (lli->phys == next_lli_phy) { in owl_dma_getbytes_chan()
794 list_for_each_entry(lli, &txd->lli_list, node) in owl_dma_getbytes_chan()
820 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_tx_status()
822 vd = vchan_find_desc(&vchan->vc, cookie); in owl_dma_tx_status()
824 txd = to_owl_txd(&vd->tx); in owl_dma_tx_status()
825 list_for_each_entry(lli, &txd->lli_list, node) in owl_dma_tx_status()
831 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_tx_status()
840 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_phy_alloc_and_start()
847 dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); in owl_dma_phy_alloc_and_start()
849 vchan->pchan = pchan; in owl_dma_phy_alloc_and_start()
858 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_issue_pending()
859 if (vchan_issue_pending(&vchan->vc)) { in owl_dma_issue_pending()
860 if (!vchan->pchan) in owl_dma_issue_pending()
863 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_issue_pending()
871 struct owl_dma *od = to_owl_dma(chan->device); in owl_dma_prep_memcpy()
885 INIT_LIST_HEAD(&txd->lli_list); in owl_dma_prep_memcpy()
895 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); in owl_dma_prep_memcpy()
899 &vchan->cfg, txd->cyclic); in owl_dma_prep_memcpy()
908 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); in owl_dma_prep_memcpy()
922 struct owl_dma *od = to_owl_dma(chan->device); in owl_dma_prep_slave_sg()
924 struct dma_slave_config *sconfig = &vchan->cfg; in owl_dma_prep_slave_sg()
936 INIT_LIST_HEAD(&txd->lli_list); in owl_dma_prep_slave_sg()
943 dev_err(od->dma.dev, in owl_dma_prep_slave_sg()
956 dst = sconfig->dst_addr; in owl_dma_prep_slave_sg()
958 src = sconfig->src_addr; in owl_dma_prep_slave_sg()
963 txd->cyclic); in owl_dma_prep_slave_sg()
972 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); in owl_dma_prep_slave_sg()
987 struct owl_dma *od = to_owl_dma(chan->device); in owl_prep_dma_cyclic()
989 struct dma_slave_config *sconfig = &vchan->cfg; in owl_prep_dma_cyclic()
1000 INIT_LIST_HEAD(&txd->lli_list); in owl_prep_dma_cyclic()
1001 txd->cyclic = true; in owl_prep_dma_cyclic()
1012 dst = sconfig->dst_addr; in owl_prep_dma_cyclic()
1014 src = sconfig->src_addr; in owl_prep_dma_cyclic()
1019 dir, sconfig, txd->cyclic); in owl_prep_dma_cyclic()
1034 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); in owl_prep_dma_cyclic()
1047 vchan_free_chan_resources(&vchan->vc); in owl_dma_free_chan_resources()
1056 next, &od->dma.channels, vc.chan.device_node) { in owl_dma_free()
1057 list_del(&vchan->vc.chan.device_node); in owl_dma_free()
1058 tasklet_kill(&vchan->vc.task); in owl_dma_free()
1065 struct owl_dma *od = ofdma->of_dma_data; in owl_dma_of_xlate()
1068 u8 drq = dma_spec->args[0]; in owl_dma_of_xlate()
1070 if (drq > od->nr_vchans) in owl_dma_of_xlate()
1073 chan = dma_get_any_slave_channel(&od->dma); in owl_dma_of_xlate()
1078 vchan->drq = drq; in owl_dma_of_xlate()
1084 { .compatible = "actions,s500-dma", .data = (void *)S900_DMA,},
1085 { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
1086 { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
1093 struct device_node *np = pdev->dev.of_node; in owl_dma_probe()
1097 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); in owl_dma_probe()
1099 return -ENOMEM; in owl_dma_probe()
1101 od->base = devm_platform_ioremap_resource(pdev, 0); in owl_dma_probe()
1102 if (IS_ERR(od->base)) in owl_dma_probe()
1103 return PTR_ERR(od->base); in owl_dma_probe()
1105 ret = of_property_read_u32(np, "dma-channels", &nr_channels); in owl_dma_probe()
1107 dev_err(&pdev->dev, "can't get dma-channels\n"); in owl_dma_probe()
1111 ret = of_property_read_u32(np, "dma-requests", &nr_requests); in owl_dma_probe()
1113 dev_err(&pdev->dev, "can't get dma-requests\n"); in owl_dma_probe()
1117 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", in owl_dma_probe()
1120 od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev); in owl_dma_probe()
1122 od->nr_pchans = nr_channels; in owl_dma_probe()
1123 od->nr_vchans = nr_requests; in owl_dma_probe()
1125 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); in owl_dma_probe()
1128 spin_lock_init(&od->lock); in owl_dma_probe()
1130 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); in owl_dma_probe()
1131 dma_cap_set(DMA_SLAVE, od->dma.cap_mask); in owl_dma_probe()
1132 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); in owl_dma_probe()
1134 od->dma.dev = &pdev->dev; in owl_dma_probe()
1135 od->dma.device_free_chan_resources = owl_dma_free_chan_resources; in owl_dma_probe()
1136 od->dma.device_tx_status = owl_dma_tx_status; in owl_dma_probe()
1137 od->dma.device_issue_pending = owl_dma_issue_pending; in owl_dma_probe()
1138 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; in owl_dma_probe()
1139 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; in owl_dma_probe()
1140 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; in owl_dma_probe()
1141 od->dma.device_config = owl_dma_config; in owl_dma_probe()
1142 od->dma.device_pause = owl_dma_pause; in owl_dma_probe()
1143 od->dma.device_resume = owl_dma_resume; in owl_dma_probe()
1144 od->dma.device_terminate_all = owl_dma_terminate_all; in owl_dma_probe()
1145 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); in owl_dma_probe()
1146 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); in owl_dma_probe()
1147 od->dma.directions = BIT(DMA_MEM_TO_MEM); in owl_dma_probe()
1148 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in owl_dma_probe()
1150 INIT_LIST_HEAD(&od->dma.channels); in owl_dma_probe()
1152 od->clk = devm_clk_get(&pdev->dev, NULL); in owl_dma_probe()
1153 if (IS_ERR(od->clk)) { in owl_dma_probe()
1154 dev_err(&pdev->dev, "unable to get clock\n"); in owl_dma_probe()
1155 return PTR_ERR(od->clk); in owl_dma_probe()
1163 od->irq = platform_get_irq(pdev, 0); in owl_dma_probe()
1164 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, in owl_dma_probe()
1165 dev_name(&pdev->dev), od); in owl_dma_probe()
1167 dev_err(&pdev->dev, "unable to request IRQ\n"); in owl_dma_probe()
1172 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, in owl_dma_probe()
1174 if (!od->pchans) in owl_dma_probe()
1175 return -ENOMEM; in owl_dma_probe()
1177 for (i = 0; i < od->nr_pchans; i++) { in owl_dma_probe()
1178 struct owl_dma_pchan *pchan = &od->pchans[i]; in owl_dma_probe()
1180 pchan->id = i; in owl_dma_probe()
1181 pchan->base = od->base + OWL_DMA_CHAN_BASE(i); in owl_dma_probe()
1185 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, in owl_dma_probe()
1187 if (!od->vchans) in owl_dma_probe()
1188 return -ENOMEM; in owl_dma_probe()
1190 for (i = 0; i < od->nr_vchans; i++) { in owl_dma_probe()
1191 struct owl_dma_vchan *vchan = &od->vchans[i]; in owl_dma_probe()
1193 vchan->vc.desc_free = owl_dma_desc_free; in owl_dma_probe()
1194 vchan_init(&vchan->vc, &od->dma); in owl_dma_probe()
1198 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, in owl_dma_probe()
1202 if (!od->lli_pool) { in owl_dma_probe()
1203 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); in owl_dma_probe()
1204 return -ENOMEM; in owl_dma_probe()
1207 clk_prepare_enable(od->clk); in owl_dma_probe()
1209 ret = dma_async_device_register(&od->dma); in owl_dma_probe()
1211 dev_err(&pdev->dev, "failed to register DMA engine device\n"); in owl_dma_probe()
1215 /* Device-tree DMA controller registration */ in owl_dma_probe()
1216 ret = of_dma_controller_register(pdev->dev.of_node, in owl_dma_probe()
1219 dev_err(&pdev->dev, "of_dma_controller_register failed\n"); in owl_dma_probe()
1226 dma_async_device_unregister(&od->dma); in owl_dma_probe()
1228 clk_disable_unprepare(od->clk); in owl_dma_probe()
1229 dma_pool_destroy(od->lli_pool); in owl_dma_probe()
1238 of_dma_controller_free(pdev->dev.of_node); in owl_dma_remove()
1239 dma_async_device_unregister(&od->dma); in owl_dma_remove()
1244 /* Make sure we won't have any further interrupts */ in owl_dma_remove()
1245 devm_free_irq(od->dma.dev, od->irq, od); in owl_dma_remove()
1249 clk_disable_unprepare(od->clk); in owl_dma_remove()
1250 dma_pool_destroy(od->lli_pool); in owl_dma_remove()
1257 .name = "dma-owl",
1274 MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
1276 MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");