Lines Matching +full:1 +full:q
189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
191 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
192 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx()
193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx()
195 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
196 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx()
197 q->tail = q->head; in mt76_dma_sync_idx()
200 void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, in __mt76_dma_queue_reset() argument
203 if (!q || !q->ndesc) in __mt76_dma_queue_reset()
206 if (!mt76_queue_is_wed_rro_ind(q)) { in __mt76_dma_queue_reset()
210 for (i = 0; i < q->ndesc; i++) in __mt76_dma_queue_reset()
211 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in __mt76_dma_queue_reset()
215 Q_WRITE(q, cpu_idx, 0); in __mt76_dma_queue_reset()
216 Q_WRITE(q, dma_idx, 0); in __mt76_dma_queue_reset()
218 mt76_dma_sync_idx(dev, q); in __mt76_dma_queue_reset()
221 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
223 __mt76_dma_queue_reset(dev, q, true); in mt76_dma_queue_reset()
227 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
230 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
233 int idx = q->head; in mt76_dma_add_rx_buf()
237 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_add_rx_buf()
240 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_add_rx_buf()
241 data = &rro_desc[q->head]; in mt76_dma_add_rx_buf()
245 desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
251 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_add_rx_buf()
278 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
279 q->queued++; in mt76_dma_add_rx_buf()
285 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
291 int i, idx = -1; in mt76_dma_add_buf()
295 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
296 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
302 idx = q->head; in mt76_dma_add_buf()
303 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
305 desc = &q->desc[idx]; in mt76_dma_add_buf()
306 entry = &q->entry[idx]; in mt76_dma_add_buf()
310 entry->skip_buf1 = i == nbufs - 1; in mt76_dma_add_buf()
319 if (i < nbufs - 1) { in mt76_dma_add_buf()
320 entry->dma_addr[1] = buf[1].addr; in mt76_dma_add_buf()
321 entry->dma_len[1] = buf[1].len; in mt76_dma_add_buf()
322 buf1 = buf[1].addr; in mt76_dma_add_buf()
323 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); in mt76_dma_add_buf()
326 buf[1].addr >> 32); in mt76_dma_add_buf()
328 if (buf[1].skip_unmap) in mt76_dma_add_buf()
332 if (i == nbufs - 1) in mt76_dma_add_buf()
342 q->head = next; in mt76_dma_add_buf()
343 q->queued++; in mt76_dma_add_buf()
346 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
347 q->entry[idx].skb = skb; in mt76_dma_add_buf()
348 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
354 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
357 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
364 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], in mt76_dma_tx_cleanup_idx()
375 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
378 Q_WRITE(q, cpu_idx, q->head); in mt76_dma_kick_queue()
382 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
387 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
390 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
392 last = -1; in mt76_dma_tx_cleanup()
394 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
396 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
397 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
398 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
405 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
406 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
408 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
411 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
412 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
413 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
414 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
417 if (!q->queued) in mt76_dma_tx_cleanup()
422 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
425 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
426 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
430 if (mt76_queue_is_wed_rro_ind(q)) in mt76_dma_get_buf()
446 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_get_buf()
454 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
455 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
466 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
467 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
476 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
479 int idx = q->tail; in mt76_dma_dequeue()
482 if (!q->queued) in mt76_dma_dequeue()
485 if (mt76_queue_is_wed_rro_data(q)) in mt76_dma_dequeue()
488 if (!mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_dequeue()
490 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
491 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
495 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
496 q->queued--; in mt76_dma_dequeue()
498 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); in mt76_dma_dequeue()
502 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
511 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
522 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
523 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
524 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
525 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
535 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
590 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
603 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
635 mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill_buf() argument
638 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill_buf()
641 if (!q->ndesc) in mt76_dma_rx_fill_buf()
644 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill_buf()
651 if (mt76_queue_is_wed_rro_ind(q)) in mt76_dma_rx_fill_buf()
654 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill_buf()
659 dir = page_pool_get_dma_dir(q->page_pool); in mt76_dma_rx_fill_buf()
662 qbuf.addr = addr + q->buf_offset; in mt76_dma_rx_fill_buf()
664 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill_buf()
666 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill_buf()
673 if (frames || mt76_queue_is_wed_rx(q)) in mt76_dma_rx_fill_buf()
674 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill_buf()
679 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
684 if (!q->ndesc) in mt76_dma_rx_fill()
687 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
688 frames = mt76_dma_rx_fill_buf(dev, q, allow_direct); in mt76_dma_rx_fill()
689 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
695 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
701 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
702 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
704 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
705 q->ndesc = n_desc; in mt76_dma_alloc_queue()
706 q->buf_size = bufsize; in mt76_dma_alloc_queue()
707 q->hw_idx = idx; in mt76_dma_alloc_queue()
709 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) in mt76_dma_alloc_queue()
711 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, in mt76_dma_alloc_queue()
712 &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
713 if (!q->desc) in mt76_dma_alloc_queue()
716 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_alloc_queue()
720 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_alloc_queue()
721 for (i = 0; i < q->ndesc; i++) { in mt76_dma_alloc_queue()
725 cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; in mt76_dma_alloc_queue()
729 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
730 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
731 if (!q->entry) in mt76_dma_alloc_queue()
734 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
738 ret = mt76_wed_dma_setup(dev, q, false); in mt76_dma_alloc_queue()
743 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || in mt76_dma_alloc_queue()
744 mt76_queue_is_wed_tx_free(q)) in mt76_dma_alloc_queue()
748 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
754 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
759 if (!q->ndesc) in mt76_dma_rx_cleanup()
763 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
764 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
765 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
770 if (!mt76_queue_is_wed_rro(q)) in mt76_dma_rx_cleanup()
772 } while (1); in mt76_dma_rx_cleanup()
774 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
775 if (q->rx_head) { in mt76_dma_rx_cleanup()
776 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
777 q->rx_head = NULL; in mt76_dma_rx_cleanup()
780 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
786 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
788 if (!q->ndesc) in mt76_dma_rx_reset()
791 if (!mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_rx_reset()
794 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
795 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
798 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
801 mt76_wed_dma_setup(dev, q, true); in mt76_dma_rx_reset()
803 if (mt76_queue_is_wed_tx_free(q)) in mt76_dma_rx_reset()
807 mt76_queue_is_wed_rro(q)) in mt76_dma_rx_reset()
810 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
811 mt76_dma_rx_fill_buf(dev, q, false); in mt76_dma_rx_reset()
815 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
818 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
824 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
826 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
834 q->rx_head = NULL; in mt76_add_fragment()
836 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
842 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
848 bool allow_direct = !mt76_queue_is_wed_rx(q); in mt76_dma_rx_process()
852 mt76_queue_is_wed_tx_free(q)) { in mt76_dma_rx_process()
853 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
862 if (q->tail == dma_idx) in mt76_dma_rx_process()
863 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
865 if (q->tail == dma_idx) in mt76_dma_rx_process()
869 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
877 if (q->rx_head) in mt76_dma_rx_process()
878 data_len = q->buf_size; in mt76_dma_rx_process()
880 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
882 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
883 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
884 q->rx_head = NULL; in mt76_dma_rx_process()
888 if (q->rx_head) { in mt76_dma_rx_process()
889 mt76_add_fragment(dev, q, data, len, more, info, in mt76_dma_rx_process()
898 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
902 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
911 q->rx_head = skb; in mt76_dma_rx_process()
915 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
922 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
978 dev->napi_dev->threaded = 1; in mt76_dma_init()
1031 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup() local
1034 mt76_queue_is_wed_rro(q)) in mt76_dma_cleanup()
1038 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
1040 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()