Lines Matching +full:multi +full:- +full:threaded

1 // SPDX-License-Identifier: GPL-2.0-only
3 * IMG Multi-threaded DMA Controller (MDC)
10 #include <linux/dma-mapping.h>
28 #include "virt-dma.h"
150 return readl(mdma->regs + reg); in mdc_readl()
155 writel(val, mdma->regs + reg); in mdc_writel()
160 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg); in mdc_chan_readl()
165 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg); in mdc_chan_writel()
182 return mdma->dma_dev.dev; in mdma2dev()
187 return ffs(bytes) - 1; in to_mdc_width()
193 ldesc->gen_conf |= to_mdc_width(bytes) << in mdc_set_read_width()
200 ldesc->gen_conf |= to_mdc_width(bytes) << in mdc_set_write_width()
209 struct mdc_dma *mdma = mchan->mdma; in mdc_list_desc_config()
212 ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN | in mdc_list_desc_config()
215 ldesc->readport_conf = in mdc_list_desc_config()
216 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | in mdc_list_desc_config()
217 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | in mdc_list_desc_config()
218 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); in mdc_list_desc_config()
219 ldesc->read_addr = src; in mdc_list_desc_config()
220 ldesc->write_addr = dst; in mdc_list_desc_config()
221 ldesc->xfer_size = len - 1; in mdc_list_desc_config()
222 ldesc->node_addr = 0; in mdc_list_desc_config()
223 ldesc->cmds_done = 0; in mdc_list_desc_config()
224 ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN | in mdc_list_desc_config()
226 ldesc->next_desc = NULL; in mdc_list_desc_config()
228 if (IS_ALIGNED(dst, mdma->bus_width) && in mdc_list_desc_config()
229 IS_ALIGNED(src, mdma->bus_width)) in mdc_list_desc_config()
230 max_burst = mdma->bus_width * mdma->max_burst_mult; in mdc_list_desc_config()
232 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1); in mdc_list_desc_config()
235 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R; in mdc_list_desc_config()
236 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; in mdc_list_desc_config()
237 mdc_set_read_width(ldesc, mdma->bus_width); in mdc_list_desc_config()
238 mdc_set_write_width(ldesc, mchan->config.dst_addr_width); in mdc_list_desc_config()
239 burst_size = min(max_burst, mchan->config.dst_maxburst * in mdc_list_desc_config()
240 mchan->config.dst_addr_width); in mdc_list_desc_config()
242 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W; in mdc_list_desc_config()
243 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; in mdc_list_desc_config()
244 mdc_set_read_width(ldesc, mchan->config.src_addr_width); in mdc_list_desc_config()
245 mdc_set_write_width(ldesc, mdma->bus_width); in mdc_list_desc_config()
246 burst_size = min(max_burst, mchan->config.src_maxburst * in mdc_list_desc_config()
247 mchan->config.src_addr_width); in mdc_list_desc_config()
249 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R | in mdc_list_desc_config()
251 mdc_set_read_width(ldesc, mdma->bus_width); in mdc_list_desc_config()
252 mdc_set_write_width(ldesc, mdma->bus_width); in mdc_list_desc_config()
255 ldesc->readport_conf |= (burst_size - 1) << in mdc_list_desc_config()
261 struct mdc_dma *mdma = mdesc->chan->mdma; in mdc_list_desc_free()
265 curr = mdesc->list; in mdc_list_desc_free()
266 curr_phys = mdesc->list_phys; in mdc_list_desc_free()
268 next = curr->next_desc; in mdc_list_desc_free()
269 next_phys = curr->node_addr; in mdc_list_desc_free()
270 dma_pool_free(mdma->desc_pool, curr, curr_phys); in mdc_list_desc_free()
278 struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx); in mdc_desc_free()
289 struct mdc_dma *mdma = mchan->mdma; in mdc_prep_dma_memcpy()
300 mdesc->chan = mchan; in mdc_prep_dma_memcpy()
301 mdesc->list_xfer_size = len; in mdc_prep_dma_memcpy()
306 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); in mdc_prep_dma_memcpy()
311 prev->node_addr = curr_phys; in mdc_prep_dma_memcpy()
312 prev->next_desc = curr; in mdc_prep_dma_memcpy()
314 mdesc->list_phys = curr_phys; in mdc_prep_dma_memcpy()
315 mdesc->list = curr; in mdc_prep_dma_memcpy()
318 xfer_size = min_t(size_t, mdma->max_xfer_size, len); in mdc_prep_dma_memcpy()
325 mdesc->list_len++; in mdc_prep_dma_memcpy()
328 len -= xfer_size; in mdc_prep_dma_memcpy()
331 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); in mdc_prep_dma_memcpy()
334 mdc_desc_free(&mdesc->vd); in mdc_prep_dma_memcpy()
345 width = mchan->config.dst_addr_width; in mdc_check_slave_width()
347 width = mchan->config.src_addr_width; in mdc_check_slave_width()
356 return -EINVAL; in mdc_check_slave_width()
359 if (width > mchan->mdma->bus_width) in mdc_check_slave_width()
360 return -EINVAL; in mdc_check_slave_width()
371 struct mdc_dma *mdma = mchan->mdma; in mdc_prep_dma_cyclic()
388 mdesc->chan = mchan; in mdc_prep_dma_cyclic()
389 mdesc->cyclic = true; in mdc_prep_dma_cyclic()
390 mdesc->list_xfer_size = buf_len; in mdc_prep_dma_cyclic()
391 mdesc->list_period_len = DIV_ROUND_UP(period_len, in mdc_prep_dma_cyclic()
392 mdma->max_xfer_size); in mdc_prep_dma_cyclic()
400 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, in mdc_prep_dma_cyclic()
406 mdesc->list_phys = curr_phys; in mdc_prep_dma_cyclic()
407 mdesc->list = curr; in mdc_prep_dma_cyclic()
409 prev->node_addr = curr_phys; in mdc_prep_dma_cyclic()
410 prev->next_desc = curr; in mdc_prep_dma_cyclic()
413 xfer_size = min_t(size_t, mdma->max_xfer_size, in mdc_prep_dma_cyclic()
419 mchan->config.dst_addr, in mdc_prep_dma_cyclic()
423 mchan->config.src_addr, in mdc_prep_dma_cyclic()
430 mdesc->list_len++; in mdc_prep_dma_cyclic()
432 buf_len -= xfer_size; in mdc_prep_dma_cyclic()
433 remainder -= xfer_size; in mdc_prep_dma_cyclic()
436 prev->node_addr = mdesc->list_phys; in mdc_prep_dma_cyclic()
438 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); in mdc_prep_dma_cyclic()
441 mdc_desc_free(&mdesc->vd); in mdc_prep_dma_cyclic()
452 struct mdc_dma *mdma = mchan->mdma; in mdc_prep_slave_sg()
471 mdesc->chan = mchan; in mdc_prep_slave_sg()
480 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, in mdc_prep_slave_sg()
486 mdesc->list_phys = curr_phys; in mdc_prep_slave_sg()
487 mdesc->list = curr; in mdc_prep_slave_sg()
489 prev->node_addr = curr_phys; in mdc_prep_slave_sg()
490 prev->next_desc = curr; in mdc_prep_slave_sg()
493 xfer_size = min_t(size_t, mdma->max_xfer_size, in mdc_prep_slave_sg()
498 mchan->config.dst_addr, in mdc_prep_slave_sg()
502 mchan->config.src_addr, in mdc_prep_slave_sg()
508 mdesc->list_len++; in mdc_prep_slave_sg()
509 mdesc->list_xfer_size += xfer_size; in mdc_prep_slave_sg()
511 buf_len -= xfer_size; in mdc_prep_slave_sg()
515 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); in mdc_prep_slave_sg()
518 mdc_desc_free(&mdesc->vd); in mdc_prep_slave_sg()
525 struct mdc_dma *mdma = mchan->mdma; in mdc_issue_desc()
530 vd = vchan_next_desc(&mchan->vc); in mdc_issue_desc()
534 list_del(&vd->node); in mdc_issue_desc()
536 mdesc = to_mdc_desc(&vd->tx); in mdc_issue_desc()
537 mchan->desc = mdesc; in mdc_issue_desc()
540 mchan->chan_nr); in mdc_issue_desc()
542 mdma->soc->enable_chan(mchan); in mdc_issue_desc()
549 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | in mdc_issue_desc()
550 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | in mdc_issue_desc()
551 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); in mdc_issue_desc()
553 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS); in mdc_issue_desc()
564 spin_lock_irqsave(&mchan->vc.lock, flags); in mdc_issue_pending()
565 if (vchan_issue_pending(&mchan->vc) && !mchan->desc) in mdc_issue_pending()
567 spin_unlock_irqrestore(&mchan->vc.lock, flags); in mdc_issue_pending()
587 spin_lock_irqsave(&mchan->vc.lock, flags); in mdc_tx_status()
588 vd = vchan_find_desc(&mchan->vc, cookie); in mdc_tx_status()
590 mdesc = to_mdc_desc(&vd->tx); in mdc_tx_status()
591 bytes = mdesc->list_xfer_size; in mdc_tx_status()
592 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) { in mdc_tx_status()
597 mdesc = mchan->desc; in mdc_tx_status()
616 cmds = (done - processed) % in mdc_tx_status()
623 if (!mdesc->cmd_loaded) in mdc_tx_status()
624 cmds--; in mdc_tx_status()
626 cmds += mdesc->list_cmds_done; in mdc_tx_status()
628 bytes = mdesc->list_xfer_size; in mdc_tx_status()
629 ldesc = mdesc->list; in mdc_tx_status()
631 bytes -= ldesc->xfer_size + 1; in mdc_tx_status()
632 ldesc = ldesc->next_desc; in mdc_tx_status()
636 bytes -= ldesc->xfer_size - residue; in mdc_tx_status()
638 bytes -= ldesc->xfer_size + 1; in mdc_tx_status()
641 spin_unlock_irqrestore(&mchan->vc.lock, flags); in mdc_tx_status()
658 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we in mdc_get_new_events()
682 ret = done1 - processed; in mdc_get_new_events()
684 ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) - in mdc_get_new_events()
696 spin_lock_irqsave(&mchan->vc.lock, flags); in mdc_terminate_all()
701 if (mchan->desc) { in mdc_terminate_all()
702 vchan_terminate_vdesc(&mchan->desc->vd); in mdc_terminate_all()
703 mchan->desc = NULL; in mdc_terminate_all()
705 vchan_get_all_descriptors(&mchan->vc, &head); in mdc_terminate_all()
709 spin_unlock_irqrestore(&mchan->vc.lock, flags); in mdc_terminate_all()
711 vchan_dma_desc_free_list(&mchan->vc, &head); in mdc_terminate_all()
720 vchan_synchronize(&mchan->vc); in mdc_synchronize()
729 spin_lock_irqsave(&mchan->vc.lock, flags); in mdc_slave_config()
730 mchan->config = *config; in mdc_slave_config()
731 spin_unlock_irqrestore(&mchan->vc.lock, flags); in mdc_slave_config()
739 struct device *dev = mdma2dev(mchan->mdma); in mdc_alloc_chan_resources()
747 struct mdc_dma *mdma = mchan->mdma; in mdc_free_chan_resources()
751 mdma->soc->disable_chan(mchan); in mdc_free_chan_resources()
761 spin_lock(&mchan->vc.lock); in mdc_chan_irq()
763 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); in mdc_chan_irq()
770 mdesc = mchan->desc; in mdc_chan_irq()
772 dev_warn(mdma2dev(mchan->mdma), in mdc_chan_irq()
774 mchan->chan_nr); in mdc_chan_irq()
784 if (!mdesc->cmd_loaded) { in mdc_chan_irq()
785 mdesc->cmd_loaded = true; in mdc_chan_irq()
789 mdesc->list_cmds_done++; in mdc_chan_irq()
790 if (mdesc->cyclic) { in mdc_chan_irq()
791 mdesc->list_cmds_done %= mdesc->list_len; in mdc_chan_irq()
792 if (mdesc->list_cmds_done % mdesc->list_period_len == 0) in mdc_chan_irq()
793 vchan_cyclic_callback(&mdesc->vd); in mdc_chan_irq()
794 } else if (mdesc->list_cmds_done == mdesc->list_len) { in mdc_chan_irq()
795 mchan->desc = NULL; in mdc_chan_irq()
796 vchan_cookie_complete(&mdesc->vd); in mdc_chan_irq()
802 spin_unlock(&mchan->vc.lock); in mdc_chan_irq()
810 struct mdc_dma *mdma = ofdma->of_dma_data; in mdc_of_xlate()
813 if (dma_spec->args_count != 3) in mdc_of_xlate()
816 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) { in mdc_of_xlate()
819 if (!(dma_spec->args[1] & BIT(mchan->chan_nr))) in mdc_of_xlate()
822 mchan->periph = dma_spec->args[0]; in mdc_of_xlate()
823 mchan->thread = dma_spec->args[2]; in mdc_of_xlate()
837 struct mdc_dma *mdma = mchan->mdma; in pistachio_mdc_enable_chan()
839 regmap_update_bits(mdma->periph_regs, in pistachio_mdc_enable_chan()
840 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), in pistachio_mdc_enable_chan()
842 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), in pistachio_mdc_enable_chan()
843 mchan->periph << in pistachio_mdc_enable_chan()
844 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr)); in pistachio_mdc_enable_chan()
849 struct mdc_dma *mdma = mchan->mdma; in pistachio_mdc_disable_chan()
851 regmap_update_bits(mdma->periph_regs, in pistachio_mdc_disable_chan()
852 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), in pistachio_mdc_disable_chan()
854 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), in pistachio_mdc_disable_chan()
864 { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
873 clk_disable_unprepare(mdma->clk); in img_mdc_runtime_suspend()
882 return clk_prepare_enable(mdma->clk); in img_mdc_runtime_resume()
892 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL); in mdc_dma_probe()
894 return -ENOMEM; in mdc_dma_probe()
897 mdma->soc = of_device_get_match_data(&pdev->dev); in mdc_dma_probe()
899 mdma->regs = devm_platform_ioremap_resource(pdev, 0); in mdc_dma_probe()
900 if (IS_ERR(mdma->regs)) in mdc_dma_probe()
901 return PTR_ERR(mdma->regs); in mdc_dma_probe()
903 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mdc_dma_probe()
904 "img,cr-periph"); in mdc_dma_probe()
905 if (IS_ERR(mdma->periph_regs)) in mdc_dma_probe()
906 return PTR_ERR(mdma->periph_regs); in mdc_dma_probe()
908 mdma->clk = devm_clk_get(&pdev->dev, "sys"); in mdc_dma_probe()
909 if (IS_ERR(mdma->clk)) in mdc_dma_probe()
910 return PTR_ERR(mdma->clk); in mdc_dma_probe()
912 dma_cap_zero(mdma->dma_dev.cap_mask); in mdc_dma_probe()
913 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); in mdc_dma_probe()
914 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); in mdc_dma_probe()
915 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask); in mdc_dma_probe()
916 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask); in mdc_dma_probe()
919 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) & in mdc_dma_probe()
921 mdma->nr_threads = in mdc_dma_probe()
924 mdma->bus_width = in mdc_dma_probe()
930 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size in mdc_dma_probe()
933 * ambiguity, restrict transfer sizes to one bus-width less than the in mdc_dma_probe()
936 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width; in mdc_dma_probe()
938 of_property_read_u32(pdev->dev.of_node, "dma-channels", in mdc_dma_probe()
939 &mdma->nr_channels); in mdc_dma_probe()
940 ret = of_property_read_u32(pdev->dev.of_node, in mdc_dma_probe()
941 "img,max-burst-multiplier", in mdc_dma_probe()
942 &mdma->max_burst_mult); in mdc_dma_probe()
946 mdma->dma_dev.dev = &pdev->dev; in mdc_dma_probe()
947 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; in mdc_dma_probe()
948 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; in mdc_dma_probe()
949 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; in mdc_dma_probe()
950 mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; in mdc_dma_probe()
951 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; in mdc_dma_probe()
952 mdma->dma_dev.device_tx_status = mdc_tx_status; in mdc_dma_probe()
953 mdma->dma_dev.device_issue_pending = mdc_issue_pending; in mdc_dma_probe()
954 mdma->dma_dev.device_terminate_all = mdc_terminate_all; in mdc_dma_probe()
955 mdma->dma_dev.device_synchronize = mdc_synchronize; in mdc_dma_probe()
956 mdma->dma_dev.device_config = mdc_slave_config; in mdc_dma_probe()
958 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in mdc_dma_probe()
959 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in mdc_dma_probe()
960 for (i = 1; i <= mdma->bus_width; i <<= 1) { in mdc_dma_probe()
961 mdma->dma_dev.src_addr_widths |= BIT(i); in mdc_dma_probe()
962 mdma->dma_dev.dst_addr_widths |= BIT(i); in mdc_dma_probe()
965 INIT_LIST_HEAD(&mdma->dma_dev.channels); in mdc_dma_probe()
966 for (i = 0; i < mdma->nr_channels; i++) { in mdc_dma_probe()
967 struct mdc_chan *mchan = &mdma->channels[i]; in mdc_dma_probe()
969 mchan->mdma = mdma; in mdc_dma_probe()
970 mchan->chan_nr = i; in mdc_dma_probe()
971 mchan->irq = platform_get_irq(pdev, i); in mdc_dma_probe()
972 if (mchan->irq < 0) in mdc_dma_probe()
973 return mchan->irq; in mdc_dma_probe()
975 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, in mdc_dma_probe()
977 dev_name(&pdev->dev), mchan); in mdc_dma_probe()
981 mchan->vc.desc_free = mdc_desc_free; in mdc_dma_probe()
982 vchan_init(&mchan->vc, &mdma->dma_dev); in mdc_dma_probe()
985 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in mdc_dma_probe()
988 if (!mdma->desc_pool) in mdc_dma_probe()
989 return -ENOMEM; in mdc_dma_probe()
991 pm_runtime_enable(&pdev->dev); in mdc_dma_probe()
992 if (!pm_runtime_enabled(&pdev->dev)) { in mdc_dma_probe()
993 ret = img_mdc_runtime_resume(&pdev->dev); in mdc_dma_probe()
998 ret = dma_async_device_register(&mdma->dma_dev); in mdc_dma_probe()
1002 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); in mdc_dma_probe()
1006 dev_info(&pdev->dev, "MDC with %u channels and %u threads\n", in mdc_dma_probe()
1007 mdma->nr_channels, mdma->nr_threads); in mdc_dma_probe()
1012 dma_async_device_unregister(&mdma->dma_dev); in mdc_dma_probe()
1014 if (!pm_runtime_enabled(&pdev->dev)) in mdc_dma_probe()
1015 img_mdc_runtime_suspend(&pdev->dev); in mdc_dma_probe()
1016 pm_runtime_disable(&pdev->dev); in mdc_dma_probe()
1025 of_dma_controller_free(pdev->dev.of_node); in mdc_dma_remove()
1026 dma_async_device_unregister(&mdma->dma_dev); in mdc_dma_remove()
1028 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels, in mdc_dma_remove()
1030 list_del(&mchan->vc.chan.device_node); in mdc_dma_remove()
1032 devm_free_irq(&pdev->dev, mchan->irq, mchan); in mdc_dma_remove()
1034 tasklet_kill(&mchan->vc.task); in mdc_dma_remove()
1037 pm_runtime_disable(&pdev->dev); in mdc_dma_remove()
1038 if (!pm_runtime_status_suspended(&pdev->dev)) in mdc_dma_remove()
1039 img_mdc_runtime_suspend(&pdev->dev); in mdc_dma_remove()
1049 for (i = 0; i < mdma->nr_channels; i++) { in img_mdc_suspend_late()
1050 struct mdc_chan *mchan = &mdma->channels[i]; in img_mdc_suspend_late()
1052 if (unlikely(mchan->desc)) in img_mdc_suspend_late()
1053 return -EBUSY; in img_mdc_suspend_late()
1074 .name = "img-mdc-dma",
1083 MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");