Lines Matching +full:tf +full:- +full:a
1 // SPDX-License-Identifier: GPL-2.0
42 (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
52 * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
56 * @frame_count: how many frames assembles a full packet
58 * Each data frame passed to the high-speed DMA ring has this header. If
149 * struct tbnet - ThunderboltIP network driver private data
172 * @rx_hdr: Copy of the currently processed Rx frame. Used when a
204 /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
209 /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
218 MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
227 length_sn = (size - 3 * 4) / 4; in tbnet_fill_header()
230 hdr->route_hi = upper_32_bits(route); in tbnet_fill_header()
231 hdr->route_lo = lower_32_bits(route); in tbnet_fill_header()
232 hdr->length_sn = length_sn; in tbnet_fill_header()
233 uuid_copy(&hdr->uuid, &tbnet_svc_uuid); in tbnet_fill_header()
234 uuid_copy(&hdr->initiator_uuid, initiator_uuid); in tbnet_fill_header()
235 uuid_copy(&hdr->target_uuid, target_uuid); in tbnet_fill_header()
236 hdr->type = type; in tbnet_fill_header()
237 hdr->command_id = command_id; in tbnet_fill_header()
244 struct tb_xdomain *xd = net->xd; in tbnet_login_response()
247 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_login_response()
248 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), in tbnet_login_response()
250 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); in tbnet_login_response()
261 struct tb_xdomain *xd = net->xd; in tbnet_login_request()
264 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, in tbnet_login_request()
265 xd->remote_uuid, TBIP_LOGIN, sizeof(request), in tbnet_login_request()
266 atomic_inc_return(&net->command_id)); in tbnet_login_request()
269 request.transmit_path = net->local_transmit_path; in tbnet_login_request()
281 struct tb_xdomain *xd = net->xd; in tbnet_logout_response()
284 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_logout_response()
285 xd->remote_uuid, TBIP_STATUS, sizeof(reply), in tbnet_logout_response()
286 atomic_inc_return(&net->command_id)); in tbnet_logout_response()
295 struct tb_xdomain *xd = net->xd; in tbnet_logout_request()
298 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, in tbnet_logout_request()
299 xd->remote_uuid, TBIP_LOGOUT, sizeof(request), in tbnet_logout_request()
300 atomic_inc_return(&net->command_id)); in tbnet_logout_request()
310 netdev_dbg(net->dev, "login started\n"); in start_login()
312 mutex_lock(&net->connection_lock); in start_login()
313 net->login_sent = false; in start_login()
314 net->login_received = false; in start_login()
315 mutex_unlock(&net->connection_lock); in start_login()
317 queue_delayed_work(system_long_wq, &net->login_work, in start_login()
323 cancel_delayed_work_sync(&net->login_work); in stop_login()
324 cancel_work_sync(&net->connected_work); in stop_login()
326 netdev_dbg(net->dev, "login stopped\n"); in stop_login()
329 static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) in tbnet_frame_size() argument
331 return tf->frame.size ? : TBNET_FRAME_SIZE; in tbnet_frame_size()
339 struct device *dma_dev = tb_ring_dma_device(ring->ring); in tbnet_free_buffers()
340 struct tbnet_frame *tf = &ring->frames[i]; in tbnet_free_buffers() local
345 if (!tf->page) in tbnet_free_buffers()
348 if (ring->ring->is_tx) { in tbnet_free_buffers()
358 trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir); in tbnet_free_buffers()
360 if (tf->frame.buffer_phy) in tbnet_free_buffers()
361 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, in tbnet_free_buffers()
364 __free_pages(tf->page, order); in tbnet_free_buffers()
365 tf->page = NULL; in tbnet_free_buffers()
368 ring->cons = 0; in tbnet_free_buffers()
369 ring->prod = 0; in tbnet_free_buffers()
374 netif_carrier_off(net->dev); in tbnet_tear_down()
375 netif_stop_queue(net->dev); in tbnet_tear_down()
379 mutex_lock(&net->connection_lock); in tbnet_tear_down()
381 if (net->login_sent && net->login_received) { in tbnet_tear_down()
384 while (send_logout && retries-- > 0) { in tbnet_tear_down()
385 netdev_dbg(net->dev, "sending logout request %u\n", in tbnet_tear_down()
388 if (ret != -ETIMEDOUT) in tbnet_tear_down()
392 tb_ring_stop(net->rx_ring.ring); in tbnet_tear_down()
393 tb_ring_stop(net->tx_ring.ring); in tbnet_tear_down()
394 tbnet_free_buffers(&net->rx_ring); in tbnet_tear_down()
395 tbnet_free_buffers(&net->tx_ring); in tbnet_tear_down()
397 ret = tb_xdomain_disable_paths(net->xd, in tbnet_tear_down()
398 net->local_transmit_path, in tbnet_tear_down()
399 net->rx_ring.ring->hop, in tbnet_tear_down()
400 net->remote_transmit_path, in tbnet_tear_down()
401 net->tx_ring.ring->hop); in tbnet_tear_down()
403 netdev_warn(net->dev, "failed to disable DMA paths\n"); in tbnet_tear_down()
405 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); in tbnet_tear_down()
406 net->remote_transmit_path = 0; in tbnet_tear_down()
409 net->login_retries = 0; in tbnet_tear_down()
410 net->login_sent = false; in tbnet_tear_down()
411 net->login_received = false; in tbnet_tear_down()
413 netdev_dbg(net->dev, "network traffic stopped\n"); in tbnet_tear_down()
415 mutex_unlock(&net->connection_lock); in tbnet_tear_down()
430 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
432 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
435 route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo; in tbnet_handle_packet()
437 if (route != net->xd->route) in tbnet_handle_packet()
440 sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK; in tbnet_handle_packet()
442 command_id = pkg->hdr.command_id; in tbnet_handle_packet()
444 switch (pkg->hdr.type) { in tbnet_handle_packet()
446 netdev_dbg(net->dev, "remote login request received\n"); in tbnet_handle_packet()
447 if (!netif_running(net->dev)) in tbnet_handle_packet()
451 pkg->hdr.command_id); in tbnet_handle_packet()
453 netdev_dbg(net->dev, "remote login response sent\n"); in tbnet_handle_packet()
455 mutex_lock(&net->connection_lock); in tbnet_handle_packet()
456 net->login_received = true; in tbnet_handle_packet()
457 net->remote_transmit_path = pkg->transmit_path; in tbnet_handle_packet()
463 if (net->login_retries >= TBNET_LOGIN_RETRIES || in tbnet_handle_packet()
464 !net->login_sent) { in tbnet_handle_packet()
465 net->login_retries = 0; in tbnet_handle_packet()
467 &net->login_work, 0); in tbnet_handle_packet()
469 mutex_unlock(&net->connection_lock); in tbnet_handle_packet()
471 queue_work(system_long_wq, &net->connected_work); in tbnet_handle_packet()
476 netdev_dbg(net->dev, "remote logout request received\n"); in tbnet_handle_packet()
479 netdev_dbg(net->dev, "remote logout response sent\n"); in tbnet_handle_packet()
480 queue_work(system_long_wq, &net->disconnect_work); in tbnet_handle_packet()
489 netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); in tbnet_handle_packet()
496 return ring->prod - ring->cons; in tbnet_available_buffers()
501 struct tbnet_ring *ring = &net->rx_ring; in tbnet_alloc_rx_buffers()
504 while (nbuffers--) { in tbnet_alloc_rx_buffers()
505 struct device *dma_dev = tb_ring_dma_device(ring->ring); in tbnet_alloc_rx_buffers()
506 unsigned int index = ring->prod & (TBNET_RING_SIZE - 1); in tbnet_alloc_rx_buffers()
507 struct tbnet_frame *tf = &ring->frames[index]; in tbnet_alloc_rx_buffers() local
510 if (tf->page) in tbnet_alloc_rx_buffers()
517 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER); in tbnet_alloc_rx_buffers()
518 if (!tf->page) { in tbnet_alloc_rx_buffers()
519 ret = -ENOMEM; in tbnet_alloc_rx_buffers()
523 dma_addr = dma_map_page(dma_dev, tf->page, 0, in tbnet_alloc_rx_buffers()
526 ret = -ENOMEM; in tbnet_alloc_rx_buffers()
530 tf->frame.buffer_phy = dma_addr; in tbnet_alloc_rx_buffers()
531 tf->dev = net->dev; in tbnet_alloc_rx_buffers()
533 trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr, in tbnet_alloc_rx_buffers()
536 tb_ring_rx(ring->ring, &tf->frame); in tbnet_alloc_rx_buffers()
538 ring->prod++; in tbnet_alloc_rx_buffers()
550 struct tbnet_ring *ring = &net->tx_ring; in tbnet_get_tx_buffer()
551 struct device *dma_dev = tb_ring_dma_device(ring->ring); in tbnet_get_tx_buffer()
552 struct tbnet_frame *tf; in tbnet_get_tx_buffer() local
558 index = ring->cons++ & (TBNET_RING_SIZE - 1); in tbnet_get_tx_buffer()
560 tf = &ring->frames[index]; in tbnet_get_tx_buffer()
561 tf->frame.size = 0; in tbnet_get_tx_buffer()
563 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, in tbnet_get_tx_buffer()
564 tbnet_frame_size(tf), DMA_TO_DEVICE); in tbnet_get_tx_buffer()
566 return tf; in tbnet_get_tx_buffer()
572 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); in tbnet_tx_callback() local
573 struct tbnet *net = netdev_priv(tf->dev); in tbnet_tx_callback()
576 net->tx_ring.prod++; in tbnet_tx_callback()
578 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) in tbnet_tx_callback()
579 netif_wake_queue(net->dev); in tbnet_tx_callback()
584 struct tbnet_ring *ring = &net->tx_ring; in tbnet_alloc_tx_buffers()
585 struct device *dma_dev = tb_ring_dma_device(ring->ring); in tbnet_alloc_tx_buffers()
589 struct tbnet_frame *tf = &ring->frames[i]; in tbnet_alloc_tx_buffers() local
592 tf->page = alloc_page(GFP_KERNEL); in tbnet_alloc_tx_buffers()
593 if (!tf->page) { in tbnet_alloc_tx_buffers()
595 return -ENOMEM; in tbnet_alloc_tx_buffers()
598 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, in tbnet_alloc_tx_buffers()
601 __free_page(tf->page); in tbnet_alloc_tx_buffers()
602 tf->page = NULL; in tbnet_alloc_tx_buffers()
604 return -ENOMEM; in tbnet_alloc_tx_buffers()
607 tf->dev = net->dev; in tbnet_alloc_tx_buffers()
608 tf->frame.buffer_phy = dma_addr; in tbnet_alloc_tx_buffers()
609 tf->frame.callback = tbnet_tx_callback; in tbnet_alloc_tx_buffers()
610 tf->frame.sof = TBIP_PDF_FRAME_START; in tbnet_alloc_tx_buffers()
611 tf->frame.eof = TBIP_PDF_FRAME_END; in tbnet_alloc_tx_buffers()
613 trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE); in tbnet_alloc_tx_buffers()
616 ring->cons = 0; in tbnet_alloc_tx_buffers()
617 ring->prod = TBNET_RING_SIZE - 1; in tbnet_alloc_tx_buffers()
628 if (netif_carrier_ok(net->dev)) in tbnet_connected_work()
631 mutex_lock(&net->connection_lock); in tbnet_connected_work()
632 connected = net->login_sent && net->login_received; in tbnet_connected_work()
633 mutex_unlock(&net->connection_lock); in tbnet_connected_work()
638 netdev_dbg(net->dev, "login successful, enabling paths\n"); in tbnet_connected_work()
640 ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); in tbnet_connected_work()
641 if (ret != net->remote_transmit_path) { in tbnet_connected_work()
642 netdev_err(net->dev, "failed to allocate Rx HopID\n"); in tbnet_connected_work()
646 /* Both logins successful so enable the rings, high-speed DMA in tbnet_connected_work()
653 tb_ring_start(net->tx_ring.ring); in tbnet_connected_work()
654 tb_ring_start(net->rx_ring.ring); in tbnet_connected_work()
664 ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, in tbnet_connected_work()
665 net->rx_ring.ring->hop, in tbnet_connected_work()
666 net->remote_transmit_path, in tbnet_connected_work()
667 net->tx_ring.ring->hop); in tbnet_connected_work()
669 netdev_err(net->dev, "failed to enable DMA paths\n"); in tbnet_connected_work()
673 netif_carrier_on(net->dev); in tbnet_connected_work()
674 netif_start_queue(net->dev); in tbnet_connected_work()
676 netdev_dbg(net->dev, "network traffic started\n"); in tbnet_connected_work()
680 tbnet_free_buffers(&net->tx_ring); in tbnet_connected_work()
682 tbnet_free_buffers(&net->rx_ring); in tbnet_connected_work()
684 tb_ring_stop(net->rx_ring.ring); in tbnet_connected_work()
685 tb_ring_stop(net->tx_ring.ring); in tbnet_connected_work()
686 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); in tbnet_connected_work()
695 if (netif_carrier_ok(net->dev)) in tbnet_login_work()
698 netdev_dbg(net->dev, "sending login request, retries=%u\n", in tbnet_login_work()
699 net->login_retries); in tbnet_login_work()
701 ret = tbnet_login_request(net, net->login_retries % 4); in tbnet_login_work()
703 netdev_dbg(net->dev, "sending login request failed, ret=%d\n", in tbnet_login_work()
705 if (net->login_retries++ < TBNET_LOGIN_RETRIES) { in tbnet_login_work()
706 queue_delayed_work(system_long_wq, &net->login_work, in tbnet_login_work()
709 netdev_info(net->dev, "ThunderboltIP login timed out\n"); in tbnet_login_work()
712 netdev_dbg(net->dev, "received login reply\n"); in tbnet_login_work()
714 net->login_retries = 0; in tbnet_login_work()
716 mutex_lock(&net->connection_lock); in tbnet_login_work()
717 net->login_sent = true; in tbnet_login_work()
718 mutex_unlock(&net->connection_lock); in tbnet_login_work()
720 queue_work(system_long_wq, &net->connected_work); in tbnet_login_work()
731 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, in tbnet_check_frame() argument
737 if (tf->frame.flags & RING_DESC_CRC_ERROR) { in tbnet_check_frame()
738 net->stats.rx_crc_errors++; in tbnet_check_frame()
740 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) { in tbnet_check_frame()
741 net->stats.rx_over_errors++; in tbnet_check_frame()
746 size = tbnet_frame_size(tf); in tbnet_check_frame()
748 net->stats.rx_length_errors++; in tbnet_check_frame()
752 frame_count = le32_to_cpu(hdr->frame_count); in tbnet_check_frame()
753 frame_size = le32_to_cpu(hdr->frame_size); in tbnet_check_frame()
754 frame_index = le16_to_cpu(hdr->frame_index); in tbnet_check_frame()
755 frame_id = le16_to_cpu(hdr->frame_id); in tbnet_check_frame()
757 if ((frame_size > size - sizeof(*hdr)) || !frame_size) { in tbnet_check_frame()
758 net->stats.rx_length_errors++; in tbnet_check_frame()
765 if (net->skb && net->rx_hdr.frame_count) { in tbnet_check_frame()
767 if (frame_count != le32_to_cpu(net->rx_hdr.frame_count)) { in tbnet_check_frame()
768 net->stats.rx_length_errors++; in tbnet_check_frame()
775 if (frame_index != le16_to_cpu(net->rx_hdr.frame_index) + 1 || in tbnet_check_frame()
776 frame_id != le16_to_cpu(net->rx_hdr.frame_id)) { in tbnet_check_frame()
777 net->stats.rx_missed_errors++; in tbnet_check_frame()
781 if (net->skb->len + frame_size > TBNET_MAX_MTU) { in tbnet_check_frame()
782 net->stats.rx_length_errors++; in tbnet_check_frame()
791 net->stats.rx_length_errors++; in tbnet_check_frame()
795 net->stats.rx_missed_errors++; in tbnet_check_frame()
805 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); in tbnet_poll()
806 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); in tbnet_poll()
814 struct tbnet_frame *tf; in tbnet_poll() local
819 /* Return some buffers to hardware, one at a time is too in tbnet_poll()
828 frame = tb_ring_poll(net->rx_ring.ring); in tbnet_poll()
832 dma_unmap_page(dma_dev, frame->buffer_phy, in tbnet_poll()
835 tf = container_of(frame, typeof(*tf), frame); in tbnet_poll()
837 page = tf->page; in tbnet_poll()
838 tf->page = NULL; in tbnet_poll()
839 net->rx_ring.cons++; in tbnet_poll()
843 if (!tbnet_check_frame(net, tf, hdr)) { in tbnet_poll()
844 trace_tbnet_invalid_rx_ip_frame(hdr->frame_size, in tbnet_poll()
845 hdr->frame_id, hdr->frame_index, hdr->frame_count); in tbnet_poll()
847 dev_kfree_skb_any(net->skb); in tbnet_poll()
848 net->skb = NULL; in tbnet_poll()
852 trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id, in tbnet_poll()
853 hdr->frame_index, hdr->frame_count); in tbnet_poll()
854 frame_size = le32_to_cpu(hdr->frame_size); in tbnet_poll()
856 skb = net->skb; in tbnet_poll()
862 net->stats.rx_errors++; in tbnet_poll()
869 net->skb = skb; in tbnet_poll()
871 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in tbnet_poll()
873 TBNET_RX_PAGE_SIZE - hdr_size); in tbnet_poll()
876 net->rx_hdr.frame_size = hdr->frame_size; in tbnet_poll()
877 net->rx_hdr.frame_count = hdr->frame_count; in tbnet_poll()
878 net->rx_hdr.frame_index = hdr->frame_index; in tbnet_poll()
879 net->rx_hdr.frame_id = hdr->frame_id; in tbnet_poll()
880 last = le16_to_cpu(net->rx_hdr.frame_index) == in tbnet_poll()
881 le32_to_cpu(net->rx_hdr.frame_count) - 1; in tbnet_poll()
884 net->stats.rx_bytes += frame_size; in tbnet_poll()
887 skb->protocol = eth_type_trans(skb, net->dev); in tbnet_poll()
889 napi_gro_receive(&net->napi, skb); in tbnet_poll()
890 net->skb = NULL; in tbnet_poll()
894 net->stats.rx_packets += rx_packets; in tbnet_poll()
903 /* Re-enable the ring interrupt */ in tbnet_poll()
904 tb_ring_poll_complete(net->rx_ring.ring); in tbnet_poll()
913 napi_schedule(&net->napi); in tbnet_start_poll()
919 struct tb_xdomain *xd = net->xd; in tbnet_open()
927 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, in tbnet_open()
931 return -ENOMEM; in tbnet_open()
933 net->tx_ring.ring = ring; in tbnet_open()
935 hopid = tb_xdomain_alloc_out_hopid(xd, -1); in tbnet_open()
938 tb_ring_free(net->tx_ring.ring); in tbnet_open()
939 net->tx_ring.ring = NULL; in tbnet_open()
942 net->local_transmit_path = hopid; in tbnet_open()
949 if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) in tbnet_open()
952 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags, in tbnet_open()
953 net->tx_ring.ring->hop, sof_mask, in tbnet_open()
958 tb_ring_free(net->tx_ring.ring); in tbnet_open()
959 net->tx_ring.ring = NULL; in tbnet_open()
960 return -ENOMEM; in tbnet_open()
962 net->rx_ring.ring = ring; in tbnet_open()
964 napi_enable(&net->napi); in tbnet_open()
974 napi_disable(&net->napi); in tbnet_stop()
976 cancel_work_sync(&net->disconnect_work); in tbnet_stop()
979 tb_ring_free(net->rx_ring.ring); in tbnet_stop()
980 net->rx_ring.ring = NULL; in tbnet_stop()
982 tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path); in tbnet_stop()
983 tb_ring_free(net->tx_ring.ring); in tbnet_stop()
984 net->tx_ring.ring = NULL; in tbnet_stop()
992 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); in tbnet_xmit_csum_and_map()
993 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); in tbnet_xmit_csum_and_map()
996 u32 paylen = skb->len - skb_transport_offset(skb); in tbnet_xmit_csum_and_map()
998 __be16 protocol = skb->protocol; in tbnet_xmit_csum_and_map()
999 void *data = skb->data; in tbnet_xmit_csum_and_map()
1003 if (skb->ip_summed != CHECKSUM_PARTIAL) { in tbnet_xmit_csum_and_map()
1008 hdr = page_address(frames[i]->page); in tbnet_xmit_csum_and_map()
1009 hdr->frame_count = cpu_to_le32(frame_count); in tbnet_xmit_csum_and_map()
1010 trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id, in tbnet_xmit_csum_and_map()
1011 hdr->frame_index, hdr->frame_count); in tbnet_xmit_csum_and_map()
1013 frames[i]->frame.buffer_phy, in tbnet_xmit_csum_and_map()
1027 protocol = vhdr->h_vlan_encapsulated_proto; in tbnet_xmit_csum_and_map()
1036 __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
1040 ip_hdr(skb)->ihl); in tbnet_xmit_csum_and_map()
1042 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in tbnet_xmit_csum_and_map()
1043 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
1044 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) in tbnet_xmit_csum_and_map()
1045 tucso = dest + ((void *)&(udp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
1049 *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in tbnet_xmit_csum_and_map()
1050 ip_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
1051 ip_hdr(skb)->protocol, 0); in tbnet_xmit_csum_and_map()
1053 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
1054 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in tbnet_xmit_csum_and_map()
1055 &ipv6_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
1058 tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; in tbnet_xmit_csum_and_map()
1059 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in tbnet_xmit_csum_and_map()
1060 &ipv6_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
1061 ipv6_hdr(skb)->nexthdr, 0); in tbnet_xmit_csum_and_map()
1070 hdr = page_address(frames[i]->page); in tbnet_xmit_csum_and_map()
1072 len = le32_to_cpu(hdr->frame_size) - offset; in tbnet_xmit_csum_and_map()
1074 hdr->frame_count = cpu_to_le32(frame_count); in tbnet_xmit_csum_and_map()
1075 trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id, in tbnet_xmit_csum_and_map()
1076 hdr->frame_index, hdr->frame_count); in tbnet_xmit_csum_and_map()
1087 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, in tbnet_xmit_csum_and_map()
1097 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; in tbnet_kmap_frag()
1108 u16 frame_id = atomic_read(&net->frame_id); in tbnet_start_xmit()
1111 unsigned int data_len = skb->len; in tbnet_start_xmit()
1114 void *src = skb->data; in tbnet_start_xmit()
1122 if (tbnet_available_buffers(&net->tx_ring) < nframes) { in tbnet_start_xmit()
1123 netif_stop_queue(net->dev); in tbnet_start_xmit()
1131 hdr = page_address(frames[frame_index]->page); in tbnet_start_xmit()
1138 hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE); in tbnet_start_xmit()
1139 hdr->frame_index = cpu_to_le16(frame_index); in tbnet_start_xmit()
1140 hdr->frame_id = cpu_to_le16(frame_id); in tbnet_start_xmit()
1149 len -= size_left; in tbnet_start_xmit()
1156 size_left -= len; in tbnet_start_xmit()
1165 if (frag < skb_shinfo(skb)->nr_frags) { in tbnet_start_xmit()
1174 data_len -= TBNET_MAX_PAYLOAD_SIZE; in tbnet_start_xmit()
1181 hdr = page_address(frames[frame_index]->page); in tbnet_start_xmit()
1185 hdr->frame_size = cpu_to_le32(data_len); in tbnet_start_xmit()
1186 hdr->frame_index = cpu_to_le16(frame_index); in tbnet_start_xmit()
1187 hdr->frame_id = cpu_to_le16(frame_id); in tbnet_start_xmit()
1189 frames[frame_index]->frame.size = data_len + sizeof(*hdr); in tbnet_start_xmit()
1191 /* In case the remaining data_len is smaller than a frame */ in tbnet_start_xmit()
1194 data_len -= len; in tbnet_start_xmit()
1202 if (frag < skb_shinfo(skb)->nr_frags) { in tbnet_start_xmit()
1219 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); in tbnet_start_xmit()
1221 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) in tbnet_start_xmit()
1222 atomic_inc(&net->frame_id); in tbnet_start_xmit()
1224 net->stats.tx_packets++; in tbnet_start_xmit()
1225 net->stats.tx_bytes += skb->len; in tbnet_start_xmit()
1233 /* We can re-use the buffers */ in tbnet_start_xmit()
1234 net->tx_ring.cons -= frame_index; in tbnet_start_xmit()
1237 net->stats.tx_errors++; in tbnet_start_xmit()
1247 stats->tx_packets = net->stats.tx_packets; in tbnet_get_stats64()
1248 stats->rx_packets = net->stats.rx_packets; in tbnet_get_stats64()
1249 stats->tx_bytes = net->stats.tx_bytes; in tbnet_get_stats64()
1250 stats->rx_bytes = net->stats.rx_bytes; in tbnet_get_stats64()
1251 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + in tbnet_get_stats64()
1252 net->stats.rx_over_errors + net->stats.rx_crc_errors + in tbnet_get_stats64()
1253 net->stats.rx_missed_errors; in tbnet_get_stats64()
1254 stats->tx_errors = net->stats.tx_errors; in tbnet_get_stats64()
1255 stats->rx_length_errors = net->stats.rx_length_errors; in tbnet_get_stats64()
1256 stats->rx_over_errors = net->stats.rx_over_errors; in tbnet_get_stats64()
1257 stats->rx_crc_errors = net->stats.rx_crc_errors; in tbnet_get_stats64()
1258 stats->rx_missed_errors = net->stats.rx_missed_errors; in tbnet_get_stats64()
1271 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac()
1276 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); in tbnet_generate_mac()
1280 hash = jhash2((u32 *)xd->local_uuid, 4, 0); in tbnet_generate_mac()
1282 hash = jhash2((u32 *)xd->local_uuid, 4, hash); in tbnet_generate_mac()
1296 return -ENOMEM; in tbnet_probe()
1298 SET_NETDEV_DEV(dev, &svc->dev); in tbnet_probe()
1301 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); in tbnet_probe()
1302 INIT_WORK(&net->connected_work, tbnet_connected_work); in tbnet_probe()
1303 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); in tbnet_probe()
1304 mutex_init(&net->connection_lock); in tbnet_probe()
1305 atomic_set(&net->command_id, 0); in tbnet_probe()
1306 atomic_set(&net->frame_id, 0); in tbnet_probe()
1307 net->svc = svc; in tbnet_probe()
1308 net->dev = dev; in tbnet_probe()
1309 net->xd = xd; in tbnet_probe()
1313 strcpy(dev->name, "thunderbolt%d"); in tbnet_probe()
1314 dev->netdev_ops = &tbnet_netdev_ops; in tbnet_probe()
1329 dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO | in tbnet_probe()
1331 dev->features = dev->hw_features | NETIF_F_HIGHDMA; in tbnet_probe()
1332 dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header); in tbnet_probe()
1334 netif_napi_add(dev, &net->napi, tbnet_poll); in tbnet_probe()
1336 /* MTU range: 68 - 65522 */ in tbnet_probe()
1337 dev->min_mtu = ETH_MIN_MTU; in tbnet_probe()
1338 dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; in tbnet_probe()
1340 net->handler.uuid = &tbnet_svc_uuid; in tbnet_probe()
1341 net->handler.callback = tbnet_handle_packet; in tbnet_probe()
1342 net->handler.data = net; in tbnet_probe()
1343 tb_register_protocol_handler(&net->handler); in tbnet_probe()
1349 tb_unregister_protocol_handler(&net->handler); in tbnet_probe()
1361 unregister_netdev(net->dev); in tbnet_remove()
1362 tb_unregister_protocol_handler(&net->handler); in tbnet_remove()
1363 free_netdev(net->dev); in tbnet_remove()
1377 if (netif_running(net->dev)) { in tbnet_suspend()
1378 netif_device_detach(net->dev); in tbnet_suspend()
1382 tb_unregister_protocol_handler(&net->handler); in tbnet_suspend()
1391 tb_register_protocol_handler(&net->handler); in tbnet_resume()
1393 netif_carrier_off(net->dev); in tbnet_resume()
1394 if (netif_running(net->dev)) { in tbnet_resume()
1395 netif_device_attach(net->dev); in tbnet_resume()
1413 .name = "thunderbolt-net",
1429 return -ENOMEM; in tbnet_init()