Lines Matching +full:local +full:- +full:bd +full:- +full:address
118 #include "xgbe-common.h"
131 if (ring->rdata) { in xgbe_free_ring()
132 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_free_ring()
137 kfree(ring->rdata); in xgbe_free_ring()
138 ring->rdata = NULL; in xgbe_free_ring()
141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, in xgbe_free_ring()
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); in xgbe_free_ring()
144 put_page(ring->rx_hdr_pa.pages); in xgbe_free_ring()
146 ring->rx_hdr_pa.pages = NULL; in xgbe_free_ring()
147 ring->rx_hdr_pa.pages_len = 0; in xgbe_free_ring()
148 ring->rx_hdr_pa.pages_offset = 0; in xgbe_free_ring()
149 ring->rx_hdr_pa.pages_dma = 0; in xgbe_free_ring()
152 if (ring->rx_buf_pa.pages) { in xgbe_free_ring()
153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, in xgbe_free_ring()
154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); in xgbe_free_ring()
155 put_page(ring->rx_buf_pa.pages); in xgbe_free_ring()
157 ring->rx_buf_pa.pages = NULL; in xgbe_free_ring()
158 ring->rx_buf_pa.pages_len = 0; in xgbe_free_ring()
159 ring->rx_buf_pa.pages_offset = 0; in xgbe_free_ring()
160 ring->rx_buf_pa.pages_dma = 0; in xgbe_free_ring()
163 if (ring->rdesc) { in xgbe_free_ring()
164 dma_free_coherent(pdata->dev, in xgbe_free_ring()
166 ring->rdesc_count), in xgbe_free_ring()
167 ring->rdesc, ring->rdesc_dma); in xgbe_free_ring()
168 ring->rdesc = NULL; in xgbe_free_ring()
177 DBGPR("-->xgbe_free_ring_resources\n"); in xgbe_free_ring_resources()
179 for (i = 0; i < pdata->channel_count; i++) { in xgbe_free_ring_resources()
180 channel = pdata->channel[i]; in xgbe_free_ring_resources()
181 xgbe_free_ring(pdata, channel->tx_ring); in xgbe_free_ring_resources()
182 xgbe_free_ring(pdata, channel->rx_ring); in xgbe_free_ring_resources()
185 DBGPR("<--xgbe_free_ring_resources\n"); in xgbe_free_ring_resources()
226 ring->rdesc_count = rdesc_count; in xgbe_init_ring()
227 ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma, in xgbe_init_ring()
228 ring->node); in xgbe_init_ring()
229 if (!ring->rdesc) in xgbe_init_ring()
230 return -ENOMEM; in xgbe_init_ring()
235 ring->rdata = xgbe_alloc_node(size, ring->node); in xgbe_init_ring()
236 if (!ring->rdata) in xgbe_init_ring()
237 return -ENOMEM; in xgbe_init_ring()
239 netif_dbg(pdata, drv, pdata->netdev, in xgbe_init_ring()
241 ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node); in xgbe_init_ring()
252 for (i = 0; i < pdata->channel_count; i++) { in xgbe_alloc_ring_resources()
253 channel = pdata->channel[i]; in xgbe_alloc_ring_resources()
254 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", in xgbe_alloc_ring_resources()
255 channel->name); in xgbe_alloc_ring_resources()
257 ret = xgbe_init_ring(pdata, channel->tx_ring, in xgbe_alloc_ring_resources()
258 pdata->tx_desc_count); in xgbe_alloc_ring_resources()
260 netdev_alert(pdata->netdev, in xgbe_alloc_ring_resources()
265 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", in xgbe_alloc_ring_resources()
266 channel->name); in xgbe_alloc_ring_resources()
268 ret = xgbe_init_ring(pdata, channel->rx_ring, in xgbe_alloc_ring_resources()
269 pdata->rx_desc_count); in xgbe_alloc_ring_resources()
271 netdev_alert(pdata->netdev, in xgbe_alloc_ring_resources()
304 order--; in xgbe_alloc_pages()
307 /* If we couldn't get local pages, try getting from anywhere */ in xgbe_alloc_pages()
314 return -ENOMEM; in xgbe_alloc_pages()
317 pages_dma = dma_map_page(pdata->dev, pages, 0, in xgbe_alloc_pages()
319 if (dma_mapping_error(pdata->dev, pages_dma)) { in xgbe_alloc_pages()
321 return -ENOMEM; in xgbe_alloc_pages()
324 pa->pages = pages; in xgbe_alloc_pages()
325 pa->pages_len = PAGE_SIZE << order; in xgbe_alloc_pages()
326 pa->pages_offset = 0; in xgbe_alloc_pages()
327 pa->pages_dma = pages_dma; in xgbe_alloc_pages()
332 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, in xgbe_set_buffer_data() argument
336 get_page(pa->pages); in xgbe_set_buffer_data()
337 bd->pa = *pa; in xgbe_set_buffer_data()
339 bd->dma_base = pa->pages_dma; in xgbe_set_buffer_data()
340 bd->dma_off = pa->pages_offset; in xgbe_set_buffer_data()
341 bd->dma_len = len; in xgbe_set_buffer_data()
343 pa->pages_offset += len; in xgbe_set_buffer_data()
344 if ((pa->pages_offset + len) > pa->pages_len) { in xgbe_set_buffer_data()
346 bd->pa_unmap = *pa; in xgbe_set_buffer_data()
349 pa->pages = NULL; in xgbe_set_buffer_data()
350 pa->pages_len = 0; in xgbe_set_buffer_data()
351 pa->pages_offset = 0; in xgbe_set_buffer_data()
352 pa->pages_dma = 0; in xgbe_set_buffer_data()
362 if (!ring->rx_hdr_pa.pages) { in xgbe_map_rx_buffer()
363 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node); in xgbe_map_rx_buffer()
368 if (!ring->rx_buf_pa.pages) { in xgbe_map_rx_buffer()
369 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, in xgbe_map_rx_buffer()
370 PAGE_ALLOC_COSTLY_ORDER, ring->node); in xgbe_map_rx_buffer()
376 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, in xgbe_map_rx_buffer()
380 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, in xgbe_map_rx_buffer()
381 pdata->rx_buf_size); in xgbe_map_rx_buffer()
388 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_wrapper_tx_descriptor_init()
396 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); in xgbe_wrapper_tx_descriptor_init()
398 for (i = 0; i < pdata->channel_count; i++) { in xgbe_wrapper_tx_descriptor_init()
399 channel = pdata->channel[i]; in xgbe_wrapper_tx_descriptor_init()
400 ring = channel->tx_ring; in xgbe_wrapper_tx_descriptor_init()
404 rdesc = ring->rdesc; in xgbe_wrapper_tx_descriptor_init()
405 rdesc_dma = ring->rdesc_dma; in xgbe_wrapper_tx_descriptor_init()
407 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_wrapper_tx_descriptor_init()
410 rdata->rdesc = rdesc; in xgbe_wrapper_tx_descriptor_init()
411 rdata->rdesc_dma = rdesc_dma; in xgbe_wrapper_tx_descriptor_init()
417 ring->cur = 0; in xgbe_wrapper_tx_descriptor_init()
418 ring->dirty = 0; in xgbe_wrapper_tx_descriptor_init()
419 memset(&ring->tx, 0, sizeof(ring->tx)); in xgbe_wrapper_tx_descriptor_init()
421 hw_if->tx_desc_init(channel); in xgbe_wrapper_tx_descriptor_init()
424 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n"); in xgbe_wrapper_tx_descriptor_init()
429 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_wrapper_rx_descriptor_init()
437 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); in xgbe_wrapper_rx_descriptor_init()
439 for (i = 0; i < pdata->channel_count; i++) { in xgbe_wrapper_rx_descriptor_init()
440 channel = pdata->channel[i]; in xgbe_wrapper_rx_descriptor_init()
441 ring = channel->rx_ring; in xgbe_wrapper_rx_descriptor_init()
445 rdesc = ring->rdesc; in xgbe_wrapper_rx_descriptor_init()
446 rdesc_dma = ring->rdesc_dma; in xgbe_wrapper_rx_descriptor_init()
448 for (j = 0; j < ring->rdesc_count; j++) { in xgbe_wrapper_rx_descriptor_init()
451 rdata->rdesc = rdesc; in xgbe_wrapper_rx_descriptor_init()
452 rdata->rdesc_dma = rdesc_dma; in xgbe_wrapper_rx_descriptor_init()
461 ring->cur = 0; in xgbe_wrapper_rx_descriptor_init()
462 ring->dirty = 0; in xgbe_wrapper_rx_descriptor_init()
464 hw_if->rx_desc_init(channel); in xgbe_wrapper_rx_descriptor_init()
467 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); in xgbe_wrapper_rx_descriptor_init()
473 if (rdata->skb_dma) { in xgbe_unmap_rdata()
474 if (rdata->mapped_as_page) { in xgbe_unmap_rdata()
475 dma_unmap_page(pdata->dev, rdata->skb_dma, in xgbe_unmap_rdata()
476 rdata->skb_dma_len, DMA_TO_DEVICE); in xgbe_unmap_rdata()
478 dma_unmap_single(pdata->dev, rdata->skb_dma, in xgbe_unmap_rdata()
479 rdata->skb_dma_len, DMA_TO_DEVICE); in xgbe_unmap_rdata()
481 rdata->skb_dma = 0; in xgbe_unmap_rdata()
482 rdata->skb_dma_len = 0; in xgbe_unmap_rdata()
485 if (rdata->skb) { in xgbe_unmap_rdata()
486 dev_kfree_skb_any(rdata->skb); in xgbe_unmap_rdata()
487 rdata->skb = NULL; in xgbe_unmap_rdata()
490 if (rdata->rx.hdr.pa.pages) in xgbe_unmap_rdata()
491 put_page(rdata->rx.hdr.pa.pages); in xgbe_unmap_rdata()
493 if (rdata->rx.hdr.pa_unmap.pages) { in xgbe_unmap_rdata()
494 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, in xgbe_unmap_rdata()
495 rdata->rx.hdr.pa_unmap.pages_len, in xgbe_unmap_rdata()
497 put_page(rdata->rx.hdr.pa_unmap.pages); in xgbe_unmap_rdata()
500 if (rdata->rx.buf.pa.pages) in xgbe_unmap_rdata()
501 put_page(rdata->rx.buf.pa.pages); in xgbe_unmap_rdata()
503 if (rdata->rx.buf.pa_unmap.pages) { in xgbe_unmap_rdata()
504 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma, in xgbe_unmap_rdata()
505 rdata->rx.buf.pa_unmap.pages_len, in xgbe_unmap_rdata()
507 put_page(rdata->rx.buf.pa_unmap.pages); in xgbe_unmap_rdata()
510 memset(&rdata->tx, 0, sizeof(rdata->tx)); in xgbe_unmap_rdata()
511 memset(&rdata->rx, 0, sizeof(rdata->rx)); in xgbe_unmap_rdata()
513 rdata->mapped_as_page = 0; in xgbe_unmap_rdata()
515 if (rdata->state_saved) { in xgbe_unmap_rdata()
516 rdata->state_saved = 0; in xgbe_unmap_rdata()
517 rdata->state.skb = NULL; in xgbe_unmap_rdata()
518 rdata->state.len = 0; in xgbe_unmap_rdata()
519 rdata->state.error = 0; in xgbe_unmap_rdata()
525 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_map_tx_skb()
526 struct xgbe_ring *ring = channel->tx_ring; in xgbe_map_tx_skb()
535 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); in xgbe_map_tx_skb()
538 start_index = ring->cur; in xgbe_map_tx_skb()
539 cur_index = ring->cur; in xgbe_map_tx_skb()
541 packet = &ring->packet_data; in xgbe_map_tx_skb()
542 packet->rdesc_count = 0; in xgbe_map_tx_skb()
543 packet->length = 0; in xgbe_map_tx_skb()
545 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_map_tx_skb()
547 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_map_tx_skb()
551 if ((tso && (packet->mss != ring->tx.cur_mss)) || in xgbe_map_tx_skb()
552 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) in xgbe_map_tx_skb()
558 skb_dma = dma_map_single(pdata->dev, skb->data, in xgbe_map_tx_skb()
559 packet->header_len, DMA_TO_DEVICE); in xgbe_map_tx_skb()
560 if (dma_mapping_error(pdata->dev, skb_dma)) { in xgbe_map_tx_skb()
561 netdev_alert(pdata->netdev, "dma_map_single failed\n"); in xgbe_map_tx_skb()
564 rdata->skb_dma = skb_dma; in xgbe_map_tx_skb()
565 rdata->skb_dma_len = packet->header_len; in xgbe_map_tx_skb()
566 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
568 cur_index, &skb_dma, packet->header_len); in xgbe_map_tx_skb()
570 offset = packet->header_len; in xgbe_map_tx_skb()
572 packet->length += packet->header_len; in xgbe_map_tx_skb()
579 for (datalen = skb_headlen(skb) - offset; datalen; ) { in xgbe_map_tx_skb()
582 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, in xgbe_map_tx_skb()
584 if (dma_mapping_error(pdata->dev, skb_dma)) { in xgbe_map_tx_skb()
585 netdev_alert(pdata->netdev, "dma_map_single failed\n"); in xgbe_map_tx_skb()
588 rdata->skb_dma = skb_dma; in xgbe_map_tx_skb()
589 rdata->skb_dma_len = len; in xgbe_map_tx_skb()
590 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
594 datalen -= len; in xgbe_map_tx_skb()
597 packet->length += len; in xgbe_map_tx_skb()
603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in xgbe_map_tx_skb()
604 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
607 frag = &skb_shinfo(skb)->frags[i]; in xgbe_map_tx_skb()
614 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, in xgbe_map_tx_skb()
616 if (dma_mapping_error(pdata->dev, skb_dma)) { in xgbe_map_tx_skb()
617 netdev_alert(pdata->netdev, in xgbe_map_tx_skb()
621 rdata->skb_dma = skb_dma; in xgbe_map_tx_skb()
622 rdata->skb_dma_len = len; in xgbe_map_tx_skb()
623 rdata->mapped_as_page = 1; in xgbe_map_tx_skb()
624 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_map_tx_skb()
628 datalen -= len; in xgbe_map_tx_skb()
631 packet->length += len; in xgbe_map_tx_skb()
638 /* Save the skb address in the last entry. We always have some data in xgbe_map_tx_skb()
640 * piece of mapped data - use the entry pointed to by cur_index - 1. in xgbe_map_tx_skb()
642 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); in xgbe_map_tx_skb()
643 rdata->skb = skb; in xgbe_map_tx_skb()
646 packet->rdesc_count = cur_index - start_index; in xgbe_map_tx_skb()
648 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count); in xgbe_map_tx_skb()
650 return packet->rdesc_count; in xgbe_map_tx_skb()
658 DBGPR("<--xgbe_map_tx_skb: count=0\n"); in xgbe_map_tx_skb()
665 DBGPR("-->xgbe_init_function_ptrs_desc\n"); in xgbe_init_function_ptrs_desc()
667 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; in xgbe_init_function_ptrs_desc()
668 desc_if->free_ring_resources = xgbe_free_ring_resources; in xgbe_init_function_ptrs_desc()
669 desc_if->map_tx_skb = xgbe_map_tx_skb; in xgbe_init_function_ptrs_desc()
670 desc_if->map_rx_buffer = xgbe_map_rx_buffer; in xgbe_init_function_ptrs_desc()
671 desc_if->unmap_rdata = xgbe_unmap_rdata; in xgbe_init_function_ptrs_desc()
672 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; in xgbe_init_function_ptrs_desc()
673 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; in xgbe_init_function_ptrs_desc()
675 DBGPR("<--xgbe_init_function_ptrs_desc\n"); in xgbe_init_function_ptrs_desc()