Lines Matching +full:mt7623 +full:- +full:hsdma

1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017-2018 MediaTek Inc.
5 * Driver for MediaTek High-Speed DMA Controller
14 #include <linux/dma-mapping.h>
26 #include "../virt-dma.h"
41 #define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1))
42 #define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1))
106 * struct mtk_hsdma_pdesc - This is the struct holding info describing physical
108 * 4-bytes alignment in little endian order.
122 * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual
139 * struct mtk_hsdma_cb - This is the struct holding extra info required for RX
152 * struct mtk_hsdma_ring - This struct holds info describing underlying ring
175 * struct mtk_hsdma_pchan - This is the struct holding info describing physical
191 * struct mtk_hsdma_vchan - This is the struct holding info describing virtual
207 * struct mtk_hsdma_soc - This is the struct holding differences among SoCs
217 * struct mtk_hsdma_device - This is the struct holding info describing HSDMA
250 return container_of(chan->device, struct mtk_hsdma_device, ddev); in to_hsdma_dev()
263 static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma) in hsdma2dev() argument
265 return hsdma->ddev.dev; in hsdma2dev()
268 static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg) in mtk_dma_read() argument
270 return readl(hsdma->base + reg); in mtk_dma_read()
273 static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) in mtk_dma_write() argument
275 writel(val, hsdma->base + reg); in mtk_dma_write()
278 static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg, in mtk_dma_rmw() argument
283 val = mtk_dma_read(hsdma, reg); in mtk_dma_rmw()
286 mtk_dma_write(hsdma, reg, val); in mtk_dma_rmw()
289 static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) in mtk_dma_set() argument
291 mtk_dma_rmw(hsdma, reg, 0, val); in mtk_dma_set()
294 static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) in mtk_dma_clr() argument
296 mtk_dma_rmw(hsdma, reg, val, 0); in mtk_dma_clr()
304 static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma) in mtk_hsdma_busy_wait() argument
308 return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status, in mtk_hsdma_busy_wait()
314 static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, in mtk_hsdma_alloc_pchan() argument
317 struct mtk_hsdma_ring *ring = &pc->ring; in mtk_hsdma_alloc_pchan()
323 * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring in mtk_hsdma_alloc_pchan()
324 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. in mtk_hsdma_alloc_pchan()
326 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); in mtk_hsdma_alloc_pchan()
327 ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring, in mtk_hsdma_alloc_pchan()
328 &ring->tphys, GFP_NOWAIT); in mtk_hsdma_alloc_pchan()
329 if (!ring->txd) in mtk_hsdma_alloc_pchan()
330 return -ENOMEM; in mtk_hsdma_alloc_pchan()
332 ring->rxd = &ring->txd[MTK_DMA_SIZE]; in mtk_hsdma_alloc_pchan()
333 ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd); in mtk_hsdma_alloc_pchan()
334 ring->cur_tptr = 0; in mtk_hsdma_alloc_pchan()
335 ring->cur_rptr = MTK_DMA_SIZE - 1; in mtk_hsdma_alloc_pchan()
337 ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT); in mtk_hsdma_alloc_pchan()
338 if (!ring->cb) { in mtk_hsdma_alloc_pchan()
339 err = -ENOMEM; in mtk_hsdma_alloc_pchan()
343 atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1); in mtk_hsdma_alloc_pchan()
345 /* Disable HSDMA and wait for the completion */ in mtk_hsdma_alloc_pchan()
346 mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); in mtk_hsdma_alloc_pchan()
347 err = mtk_hsdma_busy_wait(hsdma); in mtk_hsdma_alloc_pchan()
352 mtk_dma_set(hsdma, MTK_HSDMA_RESET, in mtk_hsdma_alloc_pchan()
354 mtk_dma_clr(hsdma, MTK_HSDMA_RESET, in mtk_hsdma_alloc_pchan()
357 /* Setup HSDMA initial pointer in the ring */ in mtk_hsdma_alloc_pchan()
358 mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys); in mtk_hsdma_alloc_pchan()
359 mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE); in mtk_hsdma_alloc_pchan()
360 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); in mtk_hsdma_alloc_pchan()
361 mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0); in mtk_hsdma_alloc_pchan()
362 mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys); in mtk_hsdma_alloc_pchan()
363 mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE); in mtk_hsdma_alloc_pchan()
364 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr); in mtk_hsdma_alloc_pchan()
365 mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0); in mtk_hsdma_alloc_pchan()
367 /* Enable HSDMA */ in mtk_hsdma_alloc_pchan()
368 mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); in mtk_hsdma_alloc_pchan()
371 mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT); in mtk_hsdma_alloc_pchan()
374 mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); in mtk_hsdma_alloc_pchan()
379 kfree(ring->cb); in mtk_hsdma_alloc_pchan()
382 dma_free_coherent(hsdma2dev(hsdma), in mtk_hsdma_alloc_pchan()
383 pc->sz_ring, ring->txd, ring->tphys); in mtk_hsdma_alloc_pchan()
387 static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma, in mtk_hsdma_free_pchan() argument
390 struct mtk_hsdma_ring *ring = &pc->ring; in mtk_hsdma_free_pchan()
392 /* Disable HSDMA and then wait for the completion */ in mtk_hsdma_free_pchan()
393 mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); in mtk_hsdma_free_pchan()
394 mtk_hsdma_busy_wait(hsdma); in mtk_hsdma_free_pchan()
397 mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); in mtk_hsdma_free_pchan()
398 mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0); in mtk_hsdma_free_pchan()
399 mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0); in mtk_hsdma_free_pchan()
400 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0); in mtk_hsdma_free_pchan()
401 mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0); in mtk_hsdma_free_pchan()
402 mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0); in mtk_hsdma_free_pchan()
403 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1); in mtk_hsdma_free_pchan()
405 kfree(ring->cb); in mtk_hsdma_free_pchan()
407 dma_free_coherent(hsdma2dev(hsdma), in mtk_hsdma_free_pchan()
408 pc->sz_ring, ring->txd, ring->tphys); in mtk_hsdma_free_pchan()
411 static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma, in mtk_hsdma_issue_pending_vdesc() argument
415 struct mtk_hsdma_ring *ring = &pc->ring; in mtk_hsdma_issue_pending_vdesc()
421 spin_lock_irqsave(&hsdma->lock, flags); in mtk_hsdma_issue_pending_vdesc()
424 * Reserve rooms, where pc->nr_free is used to track how many free in mtk_hsdma_issue_pending_vdesc()
427 num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN); in mtk_hsdma_issue_pending_vdesc()
428 reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free)); in mtk_hsdma_issue_pending_vdesc()
431 spin_unlock_irqrestore(&hsdma->lock, flags); in mtk_hsdma_issue_pending_vdesc()
432 return -ENOSPC; in mtk_hsdma_issue_pending_vdesc()
435 atomic_sub(reserved, &pc->nr_free); in mtk_hsdma_issue_pending_vdesc()
437 while (reserved--) { in mtk_hsdma_issue_pending_vdesc()
439 tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ? in mtk_hsdma_issue_pending_vdesc()
440 MTK_HSDMA_MAX_LEN : hvd->len; in mtk_hsdma_issue_pending_vdesc()
449 txd = &ring->txd[ring->cur_tptr]; in mtk_hsdma_issue_pending_vdesc()
450 WRITE_ONCE(txd->desc1, hvd->src); in mtk_hsdma_issue_pending_vdesc()
451 WRITE_ONCE(txd->desc2, in mtk_hsdma_issue_pending_vdesc()
452 hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen)); in mtk_hsdma_issue_pending_vdesc()
454 rxd = &ring->rxd[ring->cur_tptr]; in mtk_hsdma_issue_pending_vdesc()
455 WRITE_ONCE(rxd->desc1, hvd->dest); in mtk_hsdma_issue_pending_vdesc()
456 WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen)); in mtk_hsdma_issue_pending_vdesc()
459 ring->cb[ring->cur_tptr].vd = &hvd->vd; in mtk_hsdma_issue_pending_vdesc()
462 ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr, in mtk_hsdma_issue_pending_vdesc()
466 hvd->src += tlen; in mtk_hsdma_issue_pending_vdesc()
467 hvd->dest += tlen; in mtk_hsdma_issue_pending_vdesc()
468 hvd->len -= tlen; in mtk_hsdma_issue_pending_vdesc()
475 if (!hvd->len) { in mtk_hsdma_issue_pending_vdesc()
476 prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE); in mtk_hsdma_issue_pending_vdesc()
477 ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED; in mtk_hsdma_issue_pending_vdesc()
484 * Updating into hardware the pointer of TX ring lets HSDMA to take in mtk_hsdma_issue_pending_vdesc()
487 mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); in mtk_hsdma_issue_pending_vdesc()
489 spin_unlock_irqrestore(&hsdma->lock, flags); in mtk_hsdma_issue_pending_vdesc()
494 static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma, in mtk_hsdma_issue_vchan_pending() argument
500 lockdep_assert_held(&hvc->vc.lock); in mtk_hsdma_issue_vchan_pending()
502 list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) { in mtk_hsdma_issue_vchan_pending()
508 err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd); in mtk_hsdma_issue_vchan_pending()
517 if (err == -ENOSPC || hvd->len > 0) in mtk_hsdma_issue_vchan_pending()
528 list_move_tail(&vd->node, &hvc->desc_hw_processing); in mtk_hsdma_issue_vchan_pending()
532 static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) in mtk_hsdma_free_rooms_in_ring() argument
545 status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS); in mtk_hsdma_free_rooms_in_ring()
549 pc = hsdma->pc; in mtk_hsdma_free_rooms_in_ring()
552 * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to in mtk_hsdma_free_rooms_in_ring()
558 while (i--) { in mtk_hsdma_free_rooms_in_ring()
559 next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr, in mtk_hsdma_free_rooms_in_ring()
561 rxd = &pc->ring.rxd[next]; in mtk_hsdma_free_rooms_in_ring()
567 desc2 = READ_ONCE(rxd->desc2); in mtk_hsdma_free_rooms_in_ring()
568 if (!(desc2 & hsdma->soc->ddone)) in mtk_hsdma_free_rooms_in_ring()
571 cb = &pc->ring.cb[next]; in mtk_hsdma_free_rooms_in_ring()
572 if (unlikely(!cb->vd)) { in mtk_hsdma_free_rooms_in_ring()
573 dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n"); in mtk_hsdma_free_rooms_in_ring()
578 hvd = to_hsdma_vdesc(cb->vd); in mtk_hsdma_free_rooms_in_ring()
579 hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2); in mtk_hsdma_free_rooms_in_ring()
582 if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) { in mtk_hsdma_free_rooms_in_ring()
583 hvc = to_hsdma_vchan(cb->vd->tx.chan); in mtk_hsdma_free_rooms_in_ring()
585 spin_lock(&hvc->vc.lock); in mtk_hsdma_free_rooms_in_ring()
588 list_del(&cb->vd->node); in mtk_hsdma_free_rooms_in_ring()
591 vchan_cookie_complete(cb->vd); in mtk_hsdma_free_rooms_in_ring()
593 if (hvc->issue_synchronize && in mtk_hsdma_free_rooms_in_ring()
594 list_empty(&hvc->desc_hw_processing)) { in mtk_hsdma_free_rooms_in_ring()
595 complete(&hvc->issue_completion); in mtk_hsdma_free_rooms_in_ring()
596 hvc->issue_synchronize = false; in mtk_hsdma_free_rooms_in_ring()
598 spin_unlock(&hvc->vc.lock); in mtk_hsdma_free_rooms_in_ring()
600 cb->flag = 0; in mtk_hsdma_free_rooms_in_ring()
603 cb->vd = NULL; in mtk_hsdma_free_rooms_in_ring()
609 WRITE_ONCE(rxd->desc1, 0); in mtk_hsdma_free_rooms_in_ring()
610 WRITE_ONCE(rxd->desc2, 0); in mtk_hsdma_free_rooms_in_ring()
611 pc->ring.cur_rptr = next; in mtk_hsdma_free_rooms_in_ring()
614 atomic_inc(&pc->nr_free); in mtk_hsdma_free_rooms_in_ring()
621 mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr); in mtk_hsdma_free_rooms_in_ring()
628 if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1) in mtk_hsdma_free_rooms_in_ring()
629 mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status); in mtk_hsdma_free_rooms_in_ring()
632 for (i = 0; i < hsdma->dma_requests; i++) { in mtk_hsdma_free_rooms_in_ring()
633 hvc = &hsdma->vc[i]; in mtk_hsdma_free_rooms_in_ring()
634 spin_lock(&hvc->vc.lock); in mtk_hsdma_free_rooms_in_ring()
635 mtk_hsdma_issue_vchan_pending(hsdma, hvc); in mtk_hsdma_free_rooms_in_ring()
636 spin_unlock(&hvc->vc.lock); in mtk_hsdma_free_rooms_in_ring()
641 mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); in mtk_hsdma_free_rooms_in_ring()
646 struct mtk_hsdma_device *hsdma = devid; in mtk_hsdma_irq() local
652 mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); in mtk_hsdma_irq()
654 mtk_hsdma_free_rooms_in_ring(hsdma); in mtk_hsdma_irq()
665 list_for_each_entry(vd, &hvc->desc_hw_processing, node) in mtk_hsdma_find_active_desc()
666 if (vd->tx.cookie == cookie) in mtk_hsdma_find_active_desc()
669 list_for_each_entry(vd, &hvc->vc.desc_issued, node) in mtk_hsdma_find_active_desc()
670 if (vd->tx.cookie == cookie) in mtk_hsdma_find_active_desc()
691 spin_lock_irqsave(&hvc->vc.lock, flags); in mtk_hsdma_tx_status()
693 spin_unlock_irqrestore(&hvc->vc.lock, flags); in mtk_hsdma_tx_status()
697 bytes = hvd->residue; in mtk_hsdma_tx_status()
707 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); in mtk_hsdma_issue_pending() local
711 spin_lock_irqsave(&hvc->vc.lock, flags); in mtk_hsdma_issue_pending()
713 if (vchan_issue_pending(&hvc->vc)) in mtk_hsdma_issue_pending()
714 mtk_hsdma_issue_vchan_pending(hsdma, hvc); in mtk_hsdma_issue_pending()
716 spin_unlock_irqrestore(&hvc->vc.lock, flags); in mtk_hsdma_issue_pending()
729 hvd->len = len; in mtk_hsdma_prep_dma_memcpy()
730 hvd->residue = len; in mtk_hsdma_prep_dma_memcpy()
731 hvd->src = src; in mtk_hsdma_prep_dma_memcpy()
732 hvd->dest = dest; in mtk_hsdma_prep_dma_memcpy()
734 return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags); in mtk_hsdma_prep_dma_memcpy()
743 spin_lock_irqsave(&vc->lock, flags); in mtk_hsdma_free_inactive_desc()
744 list_splice_tail_init(&vc->desc_allocated, &head); in mtk_hsdma_free_inactive_desc()
745 list_splice_tail_init(&vc->desc_submitted, &head); in mtk_hsdma_free_inactive_desc()
746 list_splice_tail_init(&vc->desc_issued, &head); in mtk_hsdma_free_inactive_desc()
747 spin_unlock_irqrestore(&vc->lock, flags); in mtk_hsdma_free_inactive_desc()
765 spin_lock(&hvc->vc.lock); in mtk_hsdma_free_active_desc()
766 if (!list_empty(&hvc->desc_hw_processing)) { in mtk_hsdma_free_active_desc()
767 hvc->issue_synchronize = true; in mtk_hsdma_free_active_desc()
770 spin_unlock(&hvc->vc.lock); in mtk_hsdma_free_active_desc()
773 wait_for_completion(&hvc->issue_completion); in mtk_hsdma_free_active_desc()
778 WARN_ONCE(!list_empty(&hvc->desc_hw_processing), in mtk_hsdma_free_active_desc()
782 vchan_synchronize(&hvc->vc); in mtk_hsdma_free_active_desc()
784 WARN_ONCE(!list_empty(&hvc->vc.desc_completed), in mtk_hsdma_free_active_desc()
809 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); in mtk_hsdma_alloc_chan_resources() local
813 * Since HSDMA has only one PC, the resource for PC is being allocated in mtk_hsdma_alloc_chan_resources()
817 if (!refcount_read(&hsdma->pc_refcnt)) { in mtk_hsdma_alloc_chan_resources()
818 err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc); in mtk_hsdma_alloc_chan_resources()
822 * refcount_inc would complain increment on 0; use-after-free. in mtk_hsdma_alloc_chan_resources()
825 refcount_set(&hsdma->pc_refcnt, 1); in mtk_hsdma_alloc_chan_resources()
827 refcount_inc(&hsdma->pc_refcnt); in mtk_hsdma_alloc_chan_resources()
835 struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); in mtk_hsdma_free_chan_resources() local
841 if (!refcount_dec_and_test(&hsdma->pc_refcnt)) in mtk_hsdma_free_chan_resources()
844 mtk_hsdma_free_pchan(hsdma, hsdma->pc); in mtk_hsdma_free_chan_resources()
847 static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma) in mtk_hsdma_hw_init() argument
851 pm_runtime_enable(hsdma2dev(hsdma)); in mtk_hsdma_hw_init()
852 pm_runtime_get_sync(hsdma2dev(hsdma)); in mtk_hsdma_hw_init()
854 err = clk_prepare_enable(hsdma->clk); in mtk_hsdma_hw_init()
858 mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); in mtk_hsdma_hw_init()
859 mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT); in mtk_hsdma_hw_init()
864 static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma) in mtk_hsdma_hw_deinit() argument
866 mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0); in mtk_hsdma_hw_deinit()
868 clk_disable_unprepare(hsdma->clk); in mtk_hsdma_hw_deinit()
870 pm_runtime_put_sync(hsdma2dev(hsdma)); in mtk_hsdma_hw_deinit()
871 pm_runtime_disable(hsdma2dev(hsdma)); in mtk_hsdma_hw_deinit()
887 { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc},
888 { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc},
895 struct mtk_hsdma_device *hsdma; in mtk_hsdma_probe() local
900 hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); in mtk_hsdma_probe()
901 if (!hsdma) in mtk_hsdma_probe()
902 return -ENOMEM; in mtk_hsdma_probe()
904 dd = &hsdma->ddev; in mtk_hsdma_probe()
906 hsdma->base = devm_platform_ioremap_resource(pdev, 0); in mtk_hsdma_probe()
907 if (IS_ERR(hsdma->base)) in mtk_hsdma_probe()
908 return PTR_ERR(hsdma->base); in mtk_hsdma_probe()
910 hsdma->soc = of_device_get_match_data(&pdev->dev); in mtk_hsdma_probe()
911 if (!hsdma->soc) { in mtk_hsdma_probe()
912 dev_err(&pdev->dev, "No device match found\n"); in mtk_hsdma_probe()
913 return -ENODEV; in mtk_hsdma_probe()
916 hsdma->clk = devm_clk_get(&pdev->dev, "hsdma"); in mtk_hsdma_probe()
917 if (IS_ERR(hsdma->clk)) { in mtk_hsdma_probe()
918 dev_err(&pdev->dev, "No clock for %s\n", in mtk_hsdma_probe()
919 dev_name(&pdev->dev)); in mtk_hsdma_probe()
920 return PTR_ERR(hsdma->clk); in mtk_hsdma_probe()
926 hsdma->irq = err; in mtk_hsdma_probe()
928 refcount_set(&hsdma->pc_refcnt, 0); in mtk_hsdma_probe()
929 spin_lock_init(&hsdma->lock); in mtk_hsdma_probe()
931 dma_cap_set(DMA_MEMCPY, dd->cap_mask); in mtk_hsdma_probe()
933 dd->copy_align = MTK_HSDMA_ALIGN_SIZE; in mtk_hsdma_probe()
934 dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources; in mtk_hsdma_probe()
935 dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; in mtk_hsdma_probe()
936 dd->device_tx_status = mtk_hsdma_tx_status; in mtk_hsdma_probe()
937 dd->device_issue_pending = mtk_hsdma_issue_pending; in mtk_hsdma_probe()
938 dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; in mtk_hsdma_probe()
939 dd->device_terminate_all = mtk_hsdma_terminate_all; in mtk_hsdma_probe()
940 dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; in mtk_hsdma_probe()
941 dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; in mtk_hsdma_probe()
942 dd->directions = BIT(DMA_MEM_TO_MEM); in mtk_hsdma_probe()
943 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; in mtk_hsdma_probe()
944 dd->dev = &pdev->dev; in mtk_hsdma_probe()
945 INIT_LIST_HEAD(&dd->channels); in mtk_hsdma_probe()
947 hsdma->dma_requests = MTK_HSDMA_NR_VCHANS; in mtk_hsdma_probe()
948 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, in mtk_hsdma_probe()
949 "dma-requests", in mtk_hsdma_probe()
950 &hsdma->dma_requests)) { in mtk_hsdma_probe()
951 dev_info(&pdev->dev, in mtk_hsdma_probe()
952 "Using %u as missing dma-requests property\n", in mtk_hsdma_probe()
956 hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS, in mtk_hsdma_probe()
957 sizeof(*hsdma->pc), GFP_KERNEL); in mtk_hsdma_probe()
958 if (!hsdma->pc) in mtk_hsdma_probe()
959 return -ENOMEM; in mtk_hsdma_probe()
961 hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests, in mtk_hsdma_probe()
962 sizeof(*hsdma->vc), GFP_KERNEL); in mtk_hsdma_probe()
963 if (!hsdma->vc) in mtk_hsdma_probe()
964 return -ENOMEM; in mtk_hsdma_probe()
966 for (i = 0; i < hsdma->dma_requests; i++) { in mtk_hsdma_probe()
967 vc = &hsdma->vc[i]; in mtk_hsdma_probe()
968 vc->vc.desc_free = mtk_hsdma_vdesc_free; in mtk_hsdma_probe()
969 vchan_init(&vc->vc, dd); in mtk_hsdma_probe()
970 init_completion(&vc->issue_completion); in mtk_hsdma_probe()
971 INIT_LIST_HEAD(&vc->desc_hw_processing); in mtk_hsdma_probe()
978 err = of_dma_controller_register(pdev->dev.of_node, in mtk_hsdma_probe()
979 of_dma_xlate_by_chan_id, hsdma); in mtk_hsdma_probe()
981 dev_err(&pdev->dev, in mtk_hsdma_probe()
982 "MediaTek HSDMA OF registration failed %d\n", err); in mtk_hsdma_probe()
986 mtk_hsdma_hw_init(hsdma); in mtk_hsdma_probe()
988 err = devm_request_irq(&pdev->dev, hsdma->irq, in mtk_hsdma_probe()
990 dev_name(&pdev->dev), hsdma); in mtk_hsdma_probe()
992 dev_err(&pdev->dev, in mtk_hsdma_probe()
997 platform_set_drvdata(pdev, hsdma); in mtk_hsdma_probe()
999 dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n"); in mtk_hsdma_probe()
1004 mtk_hsdma_hw_deinit(hsdma); in mtk_hsdma_probe()
1005 of_dma_controller_free(pdev->dev.of_node); in mtk_hsdma_probe()
1014 struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev); in mtk_hsdma_remove() local
1019 for (i = 0; i < hsdma->dma_requests; i++) { in mtk_hsdma_remove()
1020 vc = &hsdma->vc[i]; in mtk_hsdma_remove()
1022 list_del(&vc->vc.chan.device_node); in mtk_hsdma_remove()
1023 tasklet_kill(&vc->vc.task); in mtk_hsdma_remove()
1027 mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); in mtk_hsdma_remove()
1030 synchronize_irq(hsdma->irq); in mtk_hsdma_remove()
1033 mtk_hsdma_hw_deinit(hsdma); in mtk_hsdma_remove()
1035 dma_async_device_unregister(&hsdma->ddev); in mtk_hsdma_remove()
1036 of_dma_controller_free(pdev->dev.of_node); in mtk_hsdma_remove()
1049 MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver");