Lines Matching +full:dma +full:- +full:info
1 // SPDX-License-Identifier: GPL-2.0-or-later
6 DMA ringbuffer and descriptor allocation/management
18 #include "dma.h"
23 #include <linux/dma-mapping.h>
30 /* 32bit DMA ops. */
38 *meta = &(ring->meta[slot]); in op32_idx2desc()
39 desc = ring->descbase; in op32_idx2desc()
50 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
56 slot = (int)(desc - descbase); in op32_fill_descriptor()
57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
62 addr |= ring->dev->dma.translation; in op32_fill_descriptor()
63 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor()
65 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
76 desc->control = cpu_to_le32(ctl); in op32_fill_descriptor()
77 desc->address = cpu_to_le32(addr); in op32_fill_descriptor()
119 return (ring->nr_slots - ring->used_slots); in free_slots()
124 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); in next_slot()
125 if (slot == ring->nr_slots - 1) in next_slot()
134 if (current_used_slots <= ring->max_used_slots) in update_max_used_slots()
136 ring->max_used_slots = current_used_slots; in update_max_used_slots()
137 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) in update_max_used_slots()
138 b43legacydbg(ring->dev->wl, in update_max_used_slots()
140 ring->max_used_slots, in update_max_used_slots()
141 ring->tx ? "TX" : "RX", in update_max_used_slots()
142 ring->index); in update_max_used_slots()
157 B43legacy_WARN_ON(!ring->tx); in request_slot()
158 B43legacy_WARN_ON(ring->stopped); in request_slot()
161 slot = next_slot(ring, ring->current_slot); in request_slot()
162 ring->current_slot = slot; in request_slot()
163 ring->used_slots++; in request_slot()
165 update_max_used_slots(ring, ring->used_slots); in request_slot()
170 /* Mac80211-queue to b43legacy-ring mapping */
177 /*FIXME: For now we always run on TX-ring-1 */ in priority_to_txring()
178 return dev->dma.tx_ring1; in priority_to_txring()
186 ring = dev->dma.tx_ring3; in priority_to_txring()
189 ring = dev->dma.tx_ring2; in priority_to_txring()
192 ring = dev->dma.tx_ring1; in priority_to_txring()
195 ring = dev->dma.tx_ring0; in priority_to_txring()
198 ring = dev->dma.tx_ring4; in priority_to_txring()
201 ring = dev->dma.tx_ring5; in priority_to_txring()
234 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, in map_descbuffer()
238 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, in map_descbuffer()
252 dma_unmap_single(ring->dev->dev->dma_dev, in unmap_descbuffer()
256 dma_unmap_single(ring->dev->dev->dma_dev, in unmap_descbuffer()
266 B43legacy_WARN_ON(ring->tx); in sync_descbuffer_for_cpu()
268 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, in sync_descbuffer_for_cpu()
277 B43legacy_WARN_ON(ring->tx); in sync_descbuffer_for_device()
279 dma_sync_single_for_device(ring->dev->dev->dma_dev, in sync_descbuffer_for_device()
288 if (meta->skb) { in free_descriptor_buffer()
290 dev_kfree_skb_irq(meta->skb); in free_descriptor_buffer()
292 dev_kfree_skb(meta->skb); in free_descriptor_buffer()
293 meta->skb = NULL; in free_descriptor_buffer()
300 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, in alloc_ringmemory()
302 &(ring->dmabase), GFP_KERNEL); in alloc_ringmemory()
303 if (!ring->descbase) in alloc_ringmemory()
304 return -ENOMEM; in alloc_ringmemory()
311 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, in free_ringmemory()
312 ring->descbase, ring->dmabase); in free_ringmemory()
315 /* Reset the RX DMA channel */
333 i = -1; in b43legacy_dmacontroller_rx_reset()
338 if (i != -1) { in b43legacy_dmacontroller_rx_reset()
339 b43legacyerr(dev->wl, "DMA RX reset timed out\n"); in b43legacy_dmacontroller_rx_reset()
340 return -ENODEV; in b43legacy_dmacontroller_rx_reset()
346 /* Reset the RX DMA channel */
374 i = -1; in b43legacy_dmacontroller_tx_reset()
379 if (i != -1) { in b43legacy_dmacontroller_tx_reset()
380 b43legacyerr(dev->wl, "DMA TX reset timed out\n"); in b43legacy_dmacontroller_tx_reset()
381 return -ENODEV; in b43legacy_dmacontroller_tx_reset()
389 /* Check if a DMA mapping address is invalid. */
395 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) in b43legacy_dma_mapping_error()
398 switch (ring->type) { in b43legacy_dma_mapping_error()
429 B43legacy_WARN_ON(ring->tx); in setup_rx_descbuffer()
431 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); in setup_rx_descbuffer()
433 return -ENOMEM; in setup_rx_descbuffer()
434 dmaaddr = map_descbuffer(ring, skb->data, in setup_rx_descbuffer()
435 ring->rx_buffersize, 0); in setup_rx_descbuffer()
436 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { in setup_rx_descbuffer()
442 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); in setup_rx_descbuffer()
444 return -ENOMEM; in setup_rx_descbuffer()
445 dmaaddr = map_descbuffer(ring, skb->data, in setup_rx_descbuffer()
446 ring->rx_buffersize, 0); in setup_rx_descbuffer()
449 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { in setup_rx_descbuffer()
451 return -EIO; in setup_rx_descbuffer()
454 meta->skb = skb; in setup_rx_descbuffer()
455 meta->dmaaddr = dmaaddr; in setup_rx_descbuffer()
456 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); in setup_rx_descbuffer()
458 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); in setup_rx_descbuffer()
459 rxhdr->frame_len = 0; in setup_rx_descbuffer()
460 txstat = (struct b43legacy_hwtxstatus *)(skb->data); in setup_rx_descbuffer()
461 txstat->cookie = 0; in setup_rx_descbuffer()
472 int err = -ENOMEM; in alloc_initial_descbuffers()
476 for (i = 0; i < ring->nr_slots; i++) { in alloc_initial_descbuffers()
481 b43legacyerr(ring->dev->wl, in alloc_initial_descbuffers()
487 ring->used_slots = ring->nr_slots; in alloc_initial_descbuffers()
493 for (i--; i >= 0; i--) { in alloc_initial_descbuffers()
496 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); in alloc_initial_descbuffers()
497 dev_kfree_skb(meta->skb); in alloc_initial_descbuffers()
502 /* Do initial setup of the DMA controller.
511 u32 trans = ring->dev->dma.translation; in dmacontroller_setup()
512 u32 ringbase = (u32)(ring->dmabase); in dmacontroller_setup()
514 if (ring->tx) { in dmacontroller_setup()
531 value = (ring->frameoffset << in dmacontroller_setup()
547 /* Shutdown the DMA controller. */
550 if (ring->tx) { in dmacontroller_cleanup()
551 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, in dmacontroller_cleanup()
552 ring->type); in dmacontroller_cleanup()
555 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, in dmacontroller_cleanup()
556 ring->type); in dmacontroller_cleanup()
566 if (!ring->used_slots) in free_all_descbuffers()
568 for (i = 0; i < ring->nr_slots; i++) { in free_all_descbuffers()
571 if (!meta->skb) { in free_all_descbuffers()
572 B43legacy_WARN_ON(!ring->tx); in free_all_descbuffers()
575 if (ring->tx) in free_all_descbuffers()
576 unmap_descbuffer(ring, meta->dmaaddr, in free_all_descbuffers()
577 meta->skb->len, 1); in free_all_descbuffers()
579 unmap_descbuffer(ring, meta->dmaaddr, in free_all_descbuffers()
580 ring->rx_buffersize, 0); in free_all_descbuffers()
616 ring->type = type; in b43legacy_setup_dmaring()
617 ring->dev = dev; in b43legacy_setup_dmaring()
623 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), in b43legacy_setup_dmaring()
625 if (!ring->meta) in b43legacy_setup_dmaring()
628 ring->txhdr_cache = kcalloc(nr_slots, in b43legacy_setup_dmaring()
631 if (!ring->txhdr_cache) in b43legacy_setup_dmaring()
634 /* test for ability to dma to txhdr_cache */ in b43legacy_setup_dmaring()
635 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, in b43legacy_setup_dmaring()
642 kfree(ring->txhdr_cache); in b43legacy_setup_dmaring()
643 ring->txhdr_cache = kcalloc(nr_slots, in b43legacy_setup_dmaring()
646 if (!ring->txhdr_cache) in b43legacy_setup_dmaring()
649 dma_test = dma_map_single(dev->dev->dma_dev, in b43legacy_setup_dmaring()
650 ring->txhdr_cache, in b43legacy_setup_dmaring()
659 dma_unmap_single(dev->dev->dma_dev, dma_test, in b43legacy_setup_dmaring()
664 ring->nr_slots = nr_slots; in b43legacy_setup_dmaring()
665 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); in b43legacy_setup_dmaring()
666 ring->index = controller_index; in b43legacy_setup_dmaring()
668 ring->tx = true; in b43legacy_setup_dmaring()
669 ring->current_slot = -1; in b43legacy_setup_dmaring()
671 if (ring->index == 0) { in b43legacy_setup_dmaring()
672 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; in b43legacy_setup_dmaring()
673 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; in b43legacy_setup_dmaring()
674 } else if (ring->index == 3) { in b43legacy_setup_dmaring()
675 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; in b43legacy_setup_dmaring()
676 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; in b43legacy_setup_dmaring()
681 ring->last_injected_overflow = jiffies; in b43legacy_setup_dmaring()
697 kfree(ring->txhdr_cache); in b43legacy_setup_dmaring()
699 kfree(ring->meta); in b43legacy_setup_dmaring()
712 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" in b43legacy_destroy_dmaring()
713 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, in b43legacy_destroy_dmaring()
714 (ring->tx) ? "TX" : "RX", ring->max_used_slots, in b43legacy_destroy_dmaring()
715 ring->nr_slots); in b43legacy_destroy_dmaring()
723 kfree(ring->txhdr_cache); in b43legacy_destroy_dmaring()
724 kfree(ring->meta); in b43legacy_destroy_dmaring()
730 struct b43legacy_dma *dma; in b43legacy_dma_free() local
734 dma = &dev->dma; in b43legacy_dma_free()
736 b43legacy_destroy_dmaring(dma->rx_ring3); in b43legacy_dma_free()
737 dma->rx_ring3 = NULL; in b43legacy_dma_free()
738 b43legacy_destroy_dmaring(dma->rx_ring0); in b43legacy_dma_free()
739 dma->rx_ring0 = NULL; in b43legacy_dma_free()
741 b43legacy_destroy_dmaring(dma->tx_ring5); in b43legacy_dma_free()
742 dma->tx_ring5 = NULL; in b43legacy_dma_free()
743 b43legacy_destroy_dmaring(dma->tx_ring4); in b43legacy_dma_free()
744 dma->tx_ring4 = NULL; in b43legacy_dma_free()
745 b43legacy_destroy_dmaring(dma->tx_ring3); in b43legacy_dma_free()
746 dma->tx_ring3 = NULL; in b43legacy_dma_free()
747 b43legacy_destroy_dmaring(dma->tx_ring2); in b43legacy_dma_free()
748 dma->tx_ring2 = NULL; in b43legacy_dma_free()
749 b43legacy_destroy_dmaring(dma->tx_ring1); in b43legacy_dma_free()
750 dma->tx_ring1 = NULL; in b43legacy_dma_free()
751 b43legacy_destroy_dmaring(dma->tx_ring0); in b43legacy_dma_free()
752 dma->tx_ring0 = NULL; in b43legacy_dma_free()
757 struct b43legacy_dma *dma = &dev->dma; in b43legacy_dma_init() local
762 err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type)); in b43legacy_dma_init()
765 b43legacywarn(dev->wl, "DMA for this device not supported. " in b43legacy_dma_init()
767 dev->__using_pio = true; in b43legacy_dma_init()
768 return -EAGAIN; in b43legacy_dma_init()
770 b43legacyerr(dev->wl, "DMA for this device not supported and " in b43legacy_dma_init()
772 return -EOPNOTSUPP; in b43legacy_dma_init()
775 dma->translation = ssb_dma_translation(dev->dev); in b43legacy_dma_init()
777 err = -ENOMEM; in b43legacy_dma_init()
778 /* setup TX DMA channels. */ in b43legacy_dma_init()
782 dma->tx_ring0 = ring; in b43legacy_dma_init()
787 dma->tx_ring1 = ring; in b43legacy_dma_init()
792 dma->tx_ring2 = ring; in b43legacy_dma_init()
797 dma->tx_ring3 = ring; in b43legacy_dma_init()
802 dma->tx_ring4 = ring; in b43legacy_dma_init()
807 dma->tx_ring5 = ring; in b43legacy_dma_init()
809 /* setup RX DMA channels. */ in b43legacy_dma_init()
813 dma->rx_ring0 = ring; in b43legacy_dma_init()
815 if (dev->dev->id.revision < 5) { in b43legacy_dma_init()
819 dma->rx_ring3 = ring; in b43legacy_dma_init()
822 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); in b43legacy_dma_init()
828 b43legacy_destroy_dmaring(dma->rx_ring0); in b43legacy_dma_init()
829 dma->rx_ring0 = NULL; in b43legacy_dma_init()
831 b43legacy_destroy_dmaring(dma->tx_ring5); in b43legacy_dma_init()
832 dma->tx_ring5 = NULL; in b43legacy_dma_init()
834 b43legacy_destroy_dmaring(dma->tx_ring4); in b43legacy_dma_init()
835 dma->tx_ring4 = NULL; in b43legacy_dma_init()
837 b43legacy_destroy_dmaring(dma->tx_ring3); in b43legacy_dma_init()
838 dma->tx_ring3 = NULL; in b43legacy_dma_init()
840 b43legacy_destroy_dmaring(dma->tx_ring2); in b43legacy_dma_init()
841 dma->tx_ring2 = NULL; in b43legacy_dma_init()
843 b43legacy_destroy_dmaring(dma->tx_ring1); in b43legacy_dma_init()
844 dma->tx_ring1 = NULL; in b43legacy_dma_init()
846 b43legacy_destroy_dmaring(dma->tx_ring0); in b43legacy_dma_init()
847 dma->tx_ring0 = NULL; in b43legacy_dma_init()
858 * DMA controller ID and store the slot number in generate_cookie()
863 switch (ring->index) { in generate_cookie()
894 struct b43legacy_dma *dma = &dev->dma; in parse_cookie() local
899 ring = dma->tx_ring0; in parse_cookie()
902 ring = dma->tx_ring1; in parse_cookie()
905 ring = dma->tx_ring2; in parse_cookie()
908 ring = dma->tx_ring3; in parse_cookie()
911 ring = dma->tx_ring4; in parse_cookie()
914 ring = dma->tx_ring5; in parse_cookie()
920 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); in parse_cookie()
929 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); in dma_tx_fragment() local
939 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); in dma_tx_fragment()
941 old_top_slot = ring->current_slot; in dma_tx_fragment()
942 old_used_slots = ring->used_slots; in dma_tx_fragment()
949 header = &(ring->txhdr_cache[slot * sizeof( in dma_tx_fragment()
951 err = b43legacy_generate_txhdr(ring->dev, header, in dma_tx_fragment()
952 skb->data, skb->len, info, in dma_tx_fragment()
955 ring->current_slot = old_top_slot; in dma_tx_fragment()
956 ring->used_slots = old_used_slots; in dma_tx_fragment()
960 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, in dma_tx_fragment()
962 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, in dma_tx_fragment()
964 ring->current_slot = old_top_slot; in dma_tx_fragment()
965 ring->used_slots = old_used_slots; in dma_tx_fragment()
966 return -EIO; in dma_tx_fragment()
968 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr, in dma_tx_fragment()
976 meta->skb = skb; in dma_tx_fragment()
977 meta->is_last_fragment = true; in dma_tx_fragment()
979 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); in dma_tx_fragment()
981 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { in dma_tx_fragment()
982 bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA); in dma_tx_fragment()
984 ring->current_slot = old_top_slot; in dma_tx_fragment()
985 ring->used_slots = old_used_slots; in dma_tx_fragment()
986 err = -ENOMEM; in dma_tx_fragment()
990 skb_put_data(bounce_skb, skb->data, skb->len); in dma_tx_fragment()
991 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); in dma_tx_fragment()
992 bounce_skb->dev = skb->dev; in dma_tx_fragment()
994 info = IEEE80211_SKB_CB(bounce_skb); in dma_tx_fragment()
999 meta->skb = skb; in dma_tx_fragment()
1000 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); in dma_tx_fragment()
1001 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { in dma_tx_fragment()
1002 ring->current_slot = old_top_slot; in dma_tx_fragment()
1003 ring->used_slots = old_used_slots; in dma_tx_fragment()
1004 err = -EIO; in dma_tx_fragment()
1009 op32_fill_descriptor(ring, desc, meta->dmaaddr, in dma_tx_fragment()
1010 skb->len, 0, 1, 1); in dma_tx_fragment()
1020 unmap_descbuffer(ring, meta_hdr->dmaaddr, in dma_tx_fragment()
1029 if (unlikely(b43legacy_debug(ring->dev, in should_inject_overflow()
1035 next_overflow = ring->last_injected_overflow + HZ; in should_inject_overflow()
1037 ring->last_injected_overflow = jiffies; in should_inject_overflow()
1038 b43legacydbg(ring->dev->wl, in should_inject_overflow()
1040 "DMA controller %d\n", ring->index); in should_inject_overflow()
1055 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_tx()
1057 if (unlikely(ring->stopped)) { in b43legacy_dma_tx()
1063 b43legacyerr(dev->wl, "Packet after queue stopped\n"); in b43legacy_dma_tx()
1064 return -ENOSPC; in b43legacy_dma_tx()
1070 b43legacyerr(dev->wl, "DMA queue overflow\n"); in b43legacy_dma_tx()
1071 return -ENOSPC; in b43legacy_dma_tx()
1077 if (unlikely(err == -ENOKEY)) { in b43legacy_dma_tx()
1084 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); in b43legacy_dma_tx()
1091 ieee80211_stop_queue(dev->wl->hw, skb_mapping); in b43legacy_dma_tx()
1092 dev->wl->tx_queue_stopped[skb_mapping] = 1; in b43legacy_dma_tx()
1093 ring->stopped = true; in b43legacy_dma_tx()
1095 b43legacydbg(dev->wl, "Stopped TX ring %d\n", in b43legacy_dma_tx()
1096 ring->index); in b43legacy_dma_tx()
1110 ring = parse_cookie(dev, status->cookie, &slot); in b43legacy_dma_handle_txstatus()
1113 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_handle_txstatus()
1115 /* Sanity check: TX packets are processed in-order on one ring. in b43legacy_dma_handle_txstatus()
1118 firstused = ring->current_slot - ring->used_slots + 1; in b43legacy_dma_handle_txstatus()
1120 firstused = ring->nr_slots + firstused; in b43legacy_dma_handle_txstatus()
1123 * malfunction, memory leaks and/or stall of DMA functionality. in b43legacy_dma_handle_txstatus()
1125 b43legacydbg(dev->wl, "Out of order TX status report on DMA " in b43legacy_dma_handle_txstatus()
1127 ring->index, firstused, slot); in b43legacy_dma_handle_txstatus()
1132 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in b43legacy_dma_handle_txstatus()
1135 if (meta->skb) in b43legacy_dma_handle_txstatus()
1136 unmap_descbuffer(ring, meta->dmaaddr, in b43legacy_dma_handle_txstatus()
1137 meta->skb->len, 1); in b43legacy_dma_handle_txstatus()
1139 unmap_descbuffer(ring, meta->dmaaddr, in b43legacy_dma_handle_txstatus()
1143 if (meta->is_last_fragment) { in b43legacy_dma_handle_txstatus()
1144 struct ieee80211_tx_info *info; in b43legacy_dma_handle_txstatus() local
1145 BUG_ON(!meta->skb); in b43legacy_dma_handle_txstatus()
1146 info = IEEE80211_SKB_CB(meta->skb); in b43legacy_dma_handle_txstatus()
1151 retry_limit = info->status.rates[0].count; in b43legacy_dma_handle_txstatus()
1152 ieee80211_tx_info_clear_status(info); in b43legacy_dma_handle_txstatus()
1154 if (status->acked) in b43legacy_dma_handle_txstatus()
1155 info->flags |= IEEE80211_TX_STAT_ACK; in b43legacy_dma_handle_txstatus()
1157 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { in b43legacy_dma_handle_txstatus()
1165 info->status.rates[0].count = 0; in b43legacy_dma_handle_txstatus()
1166 info->status.rates[1].count = status->frame_count; in b43legacy_dma_handle_txstatus()
1168 if (status->frame_count > retry_limit) { in b43legacy_dma_handle_txstatus()
1169 info->status.rates[0].count = retry_limit; in b43legacy_dma_handle_txstatus()
1170 info->status.rates[1].count = status->frame_count - in b43legacy_dma_handle_txstatus()
1174 info->status.rates[0].count = status->frame_count; in b43legacy_dma_handle_txstatus()
1175 info->status.rates[1].idx = -1; in b43legacy_dma_handle_txstatus()
1183 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); in b43legacy_dma_handle_txstatus()
1185 meta->skb = NULL; in b43legacy_dma_handle_txstatus()
1190 B43legacy_WARN_ON(meta->skb != NULL); in b43legacy_dma_handle_txstatus()
1194 ring->used_slots--; in b43legacy_dma_handle_txstatus()
1196 if (meta->is_last_fragment) in b43legacy_dma_handle_txstatus()
1200 dev->stats.last_tx = jiffies; in b43legacy_dma_handle_txstatus()
1201 if (ring->stopped) { in b43legacy_dma_handle_txstatus()
1203 ring->stopped = false; in b43legacy_dma_handle_txstatus()
1206 if (dev->wl->tx_queue_stopped[ring->queue_prio]) { in b43legacy_dma_handle_txstatus()
1207 dev->wl->tx_queue_stopped[ring->queue_prio] = 0; in b43legacy_dma_handle_txstatus()
1211 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); in b43legacy_dma_handle_txstatus()
1213 b43legacydbg(dev->wl, "Woke up TX ring %d\n", in b43legacy_dma_handle_txstatus()
1214 ring->index); in b43legacy_dma_handle_txstatus()
1217 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); in b43legacy_dma_handle_txstatus()
1233 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); in dma_rx()
1234 skb = meta->skb; in dma_rx()
1236 if (ring->index == 3) { in dma_rx()
1239 (struct b43legacy_hwtxstatus *)skb->data; in dma_rx()
1242 while (hw->cookie == 0) { in dma_rx()
1249 b43legacy_handle_hwtxstatus(ring->dev, hw); in dma_rx()
1251 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1252 ring->rx_buffersize); in dma_rx()
1256 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; in dma_rx()
1257 len = le16_to_cpu(rxhdr->frame_len); in dma_rx()
1264 len = le16_to_cpu(rxhdr->frame_len); in dma_rx()
1268 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1269 ring->rx_buffersize); in dma_rx()
1273 if (unlikely(len > ring->rx_buffersize)) { in dma_rx()
1285 sync_descbuffer_for_device(ring, meta->dmaaddr, in dma_rx()
1286 ring->rx_buffersize); in dma_rx()
1289 tmp -= ring->rx_buffersize; in dma_rx()
1293 b43legacyerr(ring->dev->wl, "DMA RX buffer too small " in dma_rx()
1294 "(len: %u, buffer: %u, nr-dropped: %d)\n", in dma_rx()
1295 len, ring->rx_buffersize, cnt); in dma_rx()
1299 dmaaddr = meta->dmaaddr; in dma_rx()
1302 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" in dma_rx()
1305 ring->rx_buffersize); in dma_rx()
1309 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); in dma_rx()
1310 skb_put(skb, len + ring->frameoffset); in dma_rx()
1311 skb_pull(skb, ring->frameoffset); in dma_rx()
1313 b43legacy_rx(ring->dev, skb, rxhdr); in dma_rx()
1324 B43legacy_WARN_ON(ring->tx); in b43legacy_dma_rx()
1327 ring->nr_slots)); in b43legacy_dma_rx()
1329 slot = ring->current_slot; in b43legacy_dma_rx()
1335 ring->current_slot = slot; in b43legacy_dma_rx()
1340 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_tx_suspend_ring()
1346 B43legacy_WARN_ON(!ring->tx); in b43legacy_dma_tx_resume_ring()
1352 b43legacy_power_saving_ctl_bits(dev, -1, 1); in b43legacy_dma_tx_suspend()
1353 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); in b43legacy_dma_tx_suspend()
1354 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); in b43legacy_dma_tx_suspend()
1355 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); in b43legacy_dma_tx_suspend()
1356 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); in b43legacy_dma_tx_suspend()
1357 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); in b43legacy_dma_tx_suspend()
1358 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); in b43legacy_dma_tx_suspend()
1363 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); in b43legacy_dma_tx_resume()
1364 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); in b43legacy_dma_tx_resume()
1365 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); in b43legacy_dma_tx_resume()
1366 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); in b43legacy_dma_tx_resume()
1367 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); in b43legacy_dma_tx_resume()
1368 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); in b43legacy_dma_tx_resume()
1369 b43legacy_power_saving_ctl_bits(dev, -1, -1); in b43legacy_dma_tx_resume()