Lines Matching +full:txs +full:- +full:delta

2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
110 {0, 0}, /* 0 - 4 Gbps */
111 {0, 3}, /* 4 - 10 Gbps */
117 int numa_node = dev_to_node(&enic->pdev->dev); in enic_init_affinity_hint()
120 for (i = 0; i < enic->intr_count; i++) { in enic_init_affinity_hint()
122 (cpumask_available(enic->msix[i].affinity_mask) && in enic_init_affinity_hint()
123 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint()
125 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint()
128 enic->msix[i].affinity_mask); in enic_init_affinity_hint()
136 for (i = 0; i < enic->intr_count; i++) { in enic_free_affinity_hint()
139 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint()
148 for (i = 0; i < enic->intr_count; i++) { in enic_set_affinity_hint()
151 !cpumask_available(enic->msix[i].affinity_mask) || in enic_set_affinity_hint()
152 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint()
154 err = irq_update_affinity_hint(enic->msix_entry[i].vector, in enic_set_affinity_hint()
155 enic->msix[i].affinity_mask); in enic_set_affinity_hint()
157 netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n", in enic_set_affinity_hint()
161 for (i = 0; i < enic->wq_count; i++) { in enic_set_affinity_hint()
164 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && in enic_set_affinity_hint()
165 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint()
166 netif_set_xps_queue(enic->netdev, in enic_set_affinity_hint()
167 enic->msix[wq_intr].affinity_mask, in enic_set_affinity_hint()
176 for (i = 0; i < enic->intr_count; i++) in enic_unset_affinity_hint()
177 irq_update_affinity_hint(enic->msix_entry[i].vector, NULL); in enic_unset_affinity_hint()
187 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
189 err = vnic_dev_overlay_offload_cfg(enic->vdev, in enic_udp_tunnel_set_port()
191 ntohs(ti->port)); in enic_udp_tunnel_set_port()
195 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_set_port()
196 enic->vxlan.patch_level); in enic_udp_tunnel_set_port()
200 enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); in enic_udp_tunnel_set_port()
202 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
214 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
216 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_unset_port()
221 enic->vxlan.vxlan_udp_port_number = 0; in enic_udp_tunnel_unset_port()
224 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
254 if (!skb->encapsulation) in enic_features_check()
261 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) in enic_features_check()
263 proto = ipv6_hdr(skb)->nexthdr; in enic_features_check()
266 proto = ip_hdr(skb)->protocol; in enic_features_check()
272 switch (eth->h_proto) { in enic_features_check()
274 if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) in enic_features_check()
286 port = be16_to_cpu(udph->dest); in enic_features_check()
292 if (port != enic->vxlan.vxlan_udp_port_number) in enic_features_check()
303 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; in enic_is_dynamic()
308 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; in enic_sriov_enabled()
313 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; in enic_is_sriov_vf()
319 return vf >= 0 && vf < enic->num_vfs; in enic_is_valid_vf()
327 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf()
329 if (buf->sop) in enic_free_wq_buf()
330 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_wq_buf()
333 dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_wq_buf()
336 if (buf->os_buf) in enic_free_wq_buf()
337 dev_kfree_skb_any(buf->os_buf); in enic_free_wq_buf()
343 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_wq_free_buf()
345 enic->wq[wq->index].stats.cq_work++; in enic_wq_free_buf()
346 enic->wq[wq->index].stats.cq_bytes += buf->len; in enic_wq_free_buf()
355 spin_lock(&enic->wq[q_number].lock); in enic_wq_service()
357 vnic_wq_service(&enic->wq[q_number].vwq, cq_desc, in enic_wq_service()
361 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && in enic_wq_service()
362 vnic_wq_desc_avail(&enic->wq[q_number].vwq) >= in enic_wq_service()
364 netif_wake_subqueue(enic->netdev, q_number); in enic_wq_service()
365 enic->wq[q_number].stats.wake++; in enic_wq_service()
368 spin_unlock(&enic->wq[q_number].lock); in enic_wq_service()
379 for (i = 0; i < enic->wq_count; i++) { in enic_log_q_error()
380 error_status = vnic_wq_error_status(&enic->wq[i].vwq); in enic_log_q_error()
383 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", in enic_log_q_error()
387 for (i = 0; i < enic->rq_count; i++) { in enic_log_q_error()
388 error_status = vnic_rq_error_status(&enic->rq[i].vrq); in enic_log_q_error()
391 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", in enic_log_q_error()
400 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); in enic_msglvl_check()
402 if (msg_enable != enic->msg_enable) { in enic_msglvl_check()
403 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", in enic_msglvl_check()
404 enic->msg_enable, msg_enable); in enic_msglvl_check()
405 enic->msg_enable = msg_enable; in enic_msglvl_check()
411 u32 mtu = vnic_dev_mtu(enic->vdev); in enic_mtu_check()
412 struct net_device *netdev = enic->netdev; in enic_mtu_check()
414 if (mtu && mtu != enic->port_mtu) { in enic_mtu_check()
415 enic->port_mtu = mtu; in enic_mtu_check()
419 if (mtu != netdev->mtu) in enic_mtu_check()
420 schedule_work(&enic->change_mtu_work); in enic_mtu_check()
422 if (mtu < netdev->mtu) in enic_mtu_check()
426 netdev->mtu, mtu); in enic_mtu_check()
434 int index = -1; in enic_set_rx_coal_setting()
435 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_set_rx_coal_setting()
439 * 3. Update it in enic->rx_coalesce_setting in enic_set_rx_coal_setting()
441 speed = vnic_dev_port_speed(enic->vdev); in enic_set_rx_coal_setting()
449 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; in enic_set_rx_coal_setting()
450 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; in enic_set_rx_coal_setting()
451 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; in enic_set_rx_coal_setting()
454 for (index = 0; index < enic->rq_count; index++) in enic_set_rx_coal_setting()
455 enic->cq[index].cur_rx_coal_timeval = in enic_set_rx_coal_setting()
456 enic->config.intr_timer_usec; in enic_set_rx_coal_setting()
458 rx_coal->use_adaptive_rx_coalesce = 1; in enic_set_rx_coal_setting()
463 int link_status = vnic_dev_link_status(enic->vdev); in enic_link_check()
464 int carrier_ok = netif_carrier_ok(enic->netdev); in enic_link_check()
467 netdev_info(enic->netdev, "Link UP\n"); in enic_link_check()
468 netif_carrier_on(enic->netdev); in enic_link_check()
471 netdev_info(enic->netdev, "Link DOWN\n"); in enic_link_check()
472 netif_carrier_off(enic->netdev); in enic_link_check()
494 vnic_intr_mask(&enic->intr[io_intr]); in enic_isr_legacy()
496 pba = vnic_intr_legacy_pba(enic->legacy_pba); in enic_isr_legacy()
498 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
504 vnic_intr_return_all_credits(&enic->intr[notify_intr]); in enic_isr_legacy()
508 vnic_intr_return_all_credits(&enic->intr[err_intr]); in enic_isr_legacy()
511 schedule_work(&enic->reset); in enic_isr_legacy()
516 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_legacy()
518 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
529 * is not providing per-vector masking, so the OS will not in enic_isr_msi()
543 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_msi()
562 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_err()
566 schedule_work(&enic->reset); in enic_isr_msix_err()
577 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_notify()
590 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { in enic_queue_wq_skb_cont()
591 len_left -= skb_frag_size(frag); in enic_queue_wq_skb_cont()
592 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, in enic_queue_wq_skb_cont()
596 return -ENOMEM; in enic_queue_wq_skb_cont()
610 unsigned int len_left = skb->len - head_len; in enic_queue_wq_skb_vlan()
615 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_vlan()
618 return -ENOMEM; in enic_queue_wq_skb_vlan()
632 enic->wq[wq->index].stats.csum_none++; in enic_queue_wq_skb_vlan()
633 enic->wq[wq->index].stats.packets++; in enic_queue_wq_skb_vlan()
634 enic->wq[wq->index].stats.bytes += skb->len; in enic_queue_wq_skb_vlan()
644 unsigned int len_left = skb->len - head_len; in enic_queue_wq_skb_csum_l4()
646 unsigned int csum_offset = hdr_len + skb->csum_offset; in enic_queue_wq_skb_csum_l4()
651 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_csum_l4()
654 return -ENOMEM; in enic_queue_wq_skb_csum_l4()
668 enic->wq[wq->index].stats.csum_partial++; in enic_queue_wq_skb_csum_l4()
669 enic->wq[wq->index].stats.packets++; in enic_queue_wq_skb_csum_l4()
670 enic->wq[wq->index].stats.bytes += skb->len; in enic_queue_wq_skb_csum_l4()
679 switch (eth->h_proto) { in enic_preload_tcp_csum_encap()
681 inner_ip_hdr(skb)->check = 0; in enic_preload_tcp_csum_encap()
682 inner_tcp_hdr(skb)->check = in enic_preload_tcp_csum_encap()
683 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, in enic_preload_tcp_csum_encap()
684 inner_ip_hdr(skb)->daddr, 0, in enic_preload_tcp_csum_encap()
688 inner_tcp_hdr(skb)->check = in enic_preload_tcp_csum_encap()
689 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, in enic_preload_tcp_csum_encap()
690 &inner_ipv6_hdr(skb)->daddr, 0, in enic_preload_tcp_csum_encap()
706 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { in enic_preload_tcp_csum()
707 ip_hdr(skb)->check = 0; in enic_preload_tcp_csum()
708 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in enic_preload_tcp_csum()
709 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); in enic_preload_tcp_csum()
710 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { in enic_preload_tcp_csum()
721 unsigned int len_left = skb->len - frag_len_left; in enic_queue_wq_skb_tso()
730 if (skb->encapsulation) { in enic_queue_wq_skb_tso()
733 enic->wq[wq->index].stats.encap_tso++; in enic_queue_wq_skb_tso()
737 enic->wq[wq->index].stats.tso++; in enic_queue_wq_skb_tso()
745 dma_addr = dma_map_single(&enic->pdev->dev, in enic_queue_wq_skb_tso()
746 skb->data + offset, len, in enic_queue_wq_skb_tso()
749 return -ENOMEM; in enic_queue_wq_skb_tso()
753 frag_len_left -= len; in enic_queue_wq_skb_tso()
763 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { in enic_queue_wq_skb_tso()
764 len_left -= skb_frag_size(frag); in enic_queue_wq_skb_tso()
771 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, in enic_queue_wq_skb_tso()
775 return -ENOMEM; in enic_queue_wq_skb_tso()
780 frag_len_left -= len; in enic_queue_wq_skb_tso()
787 len = skb->len - hdr_len; in enic_queue_wq_skb_tso()
791 enic->wq[wq->index].stats.packets += pkts; in enic_queue_wq_skb_tso()
792 enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len)); in enic_queue_wq_skb_tso()
803 unsigned int len_left = skb->len - head_len; in enic_queue_wq_skb_encap()
814 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_encap()
817 return -ENOMEM; in enic_queue_wq_skb_encap()
826 enic->wq[wq->index].stats.encap_csum++; in enic_queue_wq_skb_encap()
827 enic->wq[wq->index].stats.packets++; in enic_queue_wq_skb_encap()
828 enic->wq[wq->index].stats.bytes += skb->len; in enic_queue_wq_skb_encap()
836 unsigned int mss = skb_shinfo(skb)->gso_size; in enic_queue_wq_skb()
846 enic->wq[wq->index].stats.add_vlan++; in enic_queue_wq_skb()
847 } else if (enic->loop_enable) { in enic_queue_wq_skb()
848 vlan_tag = enic->loop_tag; in enic_queue_wq_skb()
856 else if (skb->encapsulation) in enic_queue_wq_skb()
859 else if (skb->ip_summed == CHECKSUM_PARTIAL) in enic_queue_wq_skb()
868 buf = wq->to_use->prev; in enic_queue_wq_skb()
872 while (!buf->os_buf && (buf->next != wq->to_clean)) { in enic_queue_wq_skb()
874 wq->ring.desc_avail++; in enic_queue_wq_skb()
875 buf = buf->prev; in enic_queue_wq_skb()
877 wq->to_use = buf->next; in enic_queue_wq_skb()
892 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; in enic_hard_start_xmit()
893 wq = &enic->wq[txq_map].vwq; in enic_hard_start_xmit()
895 if (skb->len <= 0) { in enic_hard_start_xmit()
897 enic->wq[wq->index].stats.null_pkt++; in enic_hard_start_xmit()
903 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, in enic_hard_start_xmit()
908 if (skb_shinfo(skb)->gso_size == 0 && in enic_hard_start_xmit()
909 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && in enic_hard_start_xmit()
912 enic->wq[wq->index].stats.skb_linear_fail++; in enic_hard_start_xmit()
916 spin_lock(&enic->wq[txq_map].lock); in enic_hard_start_xmit()
919 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { in enic_hard_start_xmit()
923 spin_unlock(&enic->wq[txq_map].lock); in enic_hard_start_xmit()
924 enic->wq[wq->index].stats.desc_full_awake++; in enic_hard_start_xmit()
933 enic->wq[wq->index].stats.stopped++; in enic_hard_start_xmit()
940 spin_unlock(&enic->wq[txq_map].lock); in enic_hard_start_xmit()
961 if (err == -ENOMEM) in enic_get_stats()
964 net_stats->tx_packets = stats->tx.tx_frames_ok; in enic_get_stats()
965 net_stats->tx_bytes = stats->tx.tx_bytes_ok; in enic_get_stats()
966 net_stats->tx_errors = stats->tx.tx_errors; in enic_get_stats()
967 net_stats->tx_dropped = stats->tx.tx_drops; in enic_get_stats()
969 net_stats->rx_packets = stats->rx.rx_frames_ok; in enic_get_stats()
970 net_stats->rx_bytes = stats->rx.rx_bytes_ok; in enic_get_stats()
971 net_stats->rx_errors = stats->rx.rx_errors; in enic_get_stats()
972 net_stats->multicast = stats->rx.rx_multicast_frames_ok; in enic_get_stats()
974 for (i = 0; i < enic->rq_count; i++) { in enic_get_stats()
975 struct enic_rq_stats *rqs = &enic->rq[i].stats; in enic_get_stats()
977 if (!enic->rq[i].vrq.ctrl) in enic_get_stats()
979 pkt_truncated += rqs->pkt_truncated; in enic_get_stats()
980 bad_fcs += rqs->bad_fcs; in enic_get_stats()
982 net_stats->rx_over_errors = pkt_truncated; in enic_get_stats()
983 net_stats->rx_crc_errors = bad_fcs; in enic_get_stats()
984 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; in enic_get_stats()
991 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { in enic_mc_sync()
997 return -ENOSPC; in enic_mc_sync()
1001 enic->mc_count++; in enic_mc_sync()
1011 enic->mc_count--; in enic_mc_unsync()
1020 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { in enic_uc_sync()
1026 return -ENOSPC; in enic_uc_sync()
1030 enic->uc_count++; in enic_uc_sync()
1040 enic->uc_count--; in enic_uc_unsync()
1047 struct net_device *netdev = enic->netdev; in enic_reset_addr_lists()
1052 enic->mc_count = 0; in enic_reset_addr_lists()
1053 enic->uc_count = 0; in enic_reset_addr_lists()
1054 enic->flags = 0; in enic_reset_addr_lists()
1063 return -EADDRNOTAVAIL; in enic_set_mac_addr()
1066 return -EADDRNOTAVAIL; in enic_set_mac_addr()
1078 char *addr = saddr->sa_data; in enic_set_mac_address_dynamic()
1081 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1091 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1103 char *addr = saddr->sa_data; in enic_set_mac_address()
1123 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; in enic_set_rx_mode()
1124 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; in enic_set_rx_mode()
1125 int promisc = (netdev->flags & IFF_PROMISC) || in enic_set_rx_mode()
1127 int allmulti = (netdev->flags & IFF_ALLMULTI) || in enic_set_rx_mode()
1129 unsigned int flags = netdev->flags | in enic_set_rx_mode()
1133 if (enic->flags != flags) { in enic_set_rx_mode()
1134 enic->flags = flags; in enic_set_rx_mode()
1150 schedule_work(&enic->tx_hang_reset); in enic_tx_timeout()
1165 memcpy(pp->vf_mac, mac, ETH_ALEN); in enic_set_vf_mac()
1176 return -EINVAL; in enic_set_vf_mac()
1193 return -EOPNOTSUPP; in enic_set_vf_port()
1195 memcpy(&prev_pp, pp, sizeof(*enic->pp)); in enic_set_vf_port()
1196 memset(pp, 0, sizeof(*enic->pp)); in enic_set_vf_port()
1198 pp->set |= ENIC_SET_REQUEST; in enic_set_vf_port()
1199 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); in enic_set_vf_port()
1204 return -EINVAL; in enic_set_vf_port()
1206 pp->set |= ENIC_SET_NAME; in enic_set_vf_port()
1207 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), in enic_set_vf_port()
1214 return -EINVAL; in enic_set_vf_port()
1216 pp->set |= ENIC_SET_INSTANCE; in enic_set_vf_port()
1217 memcpy(pp->instance_uuid, in enic_set_vf_port()
1224 return -EINVAL; in enic_set_vf_port()
1226 pp->set |= ENIC_SET_HOST; in enic_set_vf_port()
1227 memcpy(pp->host_uuid, in enic_set_vf_port()
1234 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); in enic_set_vf_port()
1236 if (is_zero_ether_addr(netdev->dev_addr)) in enic_set_vf_port()
1239 /* SR-IOV VF: get mac from adapter */ in enic_set_vf_port()
1241 vnic_dev_get_mac_addr, pp->mac_addr); in enic_set_vf_port()
1265 pp->set |= ENIC_PORT_REQUEST_APPLIED; in enic_set_vf_port()
1268 if (pp->request == PORT_REQUEST_DISASSOCIATE) { in enic_set_vf_port()
1269 eth_zero_addr(pp->mac_addr); in enic_set_vf_port()
1276 eth_zero_addr(pp->vf_mac); in enic_set_vf_port()
1293 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) in enic_get_vf_port()
1294 return -ENODATA; in enic_get_vf_port()
1296 err = enic_process_get_pp_request(enic, vf, pp->request, &response); in enic_get_vf_port()
1300 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || in enic_get_vf_port()
1302 ((pp->set & ENIC_SET_NAME) && in enic_get_vf_port()
1303 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || in enic_get_vf_port()
1304 ((pp->set & ENIC_SET_INSTANCE) && in enic_get_vf_port()
1306 pp->instance_uuid)) || in enic_get_vf_port()
1307 ((pp->set & ENIC_SET_HOST) && in enic_get_vf_port()
1308 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) in enic_get_vf_port()
1313 return -EMSGSIZE; in enic_get_vf_port()
1318 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf()
1320 if (!buf->os_buf) in enic_free_rq_buf()
1323 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_rq_buf()
1325 dev_kfree_skb_any(buf->os_buf); in enic_free_rq_buf()
1326 buf->os_buf = NULL; in enic_free_rq_buf()
1331 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf()
1332 struct net_device *netdev = enic->netdev; in enic_rq_alloc_buf()
1334 unsigned int len = netdev->mtu + VLAN_ETH_HLEN; in enic_rq_alloc_buf()
1337 struct vnic_rq_buf *buf = rq->to_use; in enic_rq_alloc_buf()
1339 if (buf->os_buf) { in enic_rq_alloc_buf()
1340 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, in enic_rq_alloc_buf()
1341 buf->len); in enic_rq_alloc_buf()
1347 enic->rq[rq->index].stats.no_skb++; in enic_rq_alloc_buf()
1348 return -ENOMEM; in enic_rq_alloc_buf()
1351 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, in enic_rq_alloc_buf()
1355 return -ENOMEM; in enic_rq_alloc_buf()
1368 pkt_size->large_pkt_bytes_cnt += pkt_len; in enic_intr_update_pkt_size()
1370 pkt_size->small_pkt_bytes_cnt += pkt_len; in enic_intr_update_pkt_size()
1379 if (len > enic->rx_copybreak) in enic_rxcopybreak()
1384 dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, in enic_rxcopybreak()
1386 memcpy(new_skb->data, (*skb)->data, len); in enic_rxcopybreak()
1396 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf()
1397 struct net_device *netdev = enic->netdev; in enic_rq_indicate_buf()
1399 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_rq_indicate_buf()
1400 struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats; in enic_rq_indicate_buf()
1411 rqstats->packets++; in enic_rq_indicate_buf()
1413 rqstats->desc_skip++; in enic_rq_indicate_buf()
1417 skb = buf->os_buf; in enic_rq_indicate_buf()
1433 rqstats->bad_fcs++; in enic_rq_indicate_buf()
1435 rqstats->pkt_truncated++; in enic_rq_indicate_buf()
1438 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1441 buf->os_buf = NULL; in enic_rq_indicate_buf()
1450 rqstats->bytes += bytes_written; in enic_rq_indicate_buf()
1452 buf->os_buf = NULL; in enic_rq_indicate_buf()
1453 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, in enic_rq_indicate_buf()
1454 buf->len, DMA_FROM_DEVICE); in enic_rq_indicate_buf()
1456 prefetch(skb->data - NET_IP_ALIGN); in enic_rq_indicate_buf()
1459 skb->protocol = eth_type_trans(skb, netdev); in enic_rq_indicate_buf()
1461 if ((netdev->features & NETIF_F_RXHASH) && rss_hash && in enic_rq_indicate_buf()
1468 rqstats->l4_rss_hash++; in enic_rq_indicate_buf()
1474 rqstats->l3_rss_hash++; in enic_rq_indicate_buf()
1478 if (enic->vxlan.vxlan_udp_port_number) { in enic_rq_indicate_buf()
1479 switch (enic->vxlan.patch_level) { in enic_rq_indicate_buf()
1506 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && in enic_rq_indicate_buf()
1509 skb->ip_summed = CHECKSUM_UNNECESSARY; in enic_rq_indicate_buf()
1510 skb->csum_level = encap; in enic_rq_indicate_buf()
1512 rqstats->csum_unnecessary_encap++; in enic_rq_indicate_buf()
1514 rqstats->csum_unnecessary++; in enic_rq_indicate_buf()
1519 rqstats->vlan_stripped++; in enic_rq_indicate_buf()
1521 skb_mark_napi_id(skb, &enic->napi[rq->index]); in enic_rq_indicate_buf()
1522 if (!(netdev->features & NETIF_F_GRO)) in enic_rq_indicate_buf()
1525 napi_gro_receive(&enic->napi[q_number], skb); in enic_rq_indicate_buf()
1526 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_rq_indicate_buf()
1527 enic_intr_update_pkt_size(&cq->pkt_size_counter, in enic_rq_indicate_buf()
1533 rqstats->pkt_truncated++; in enic_rq_indicate_buf()
1534 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1537 buf->os_buf = NULL; in enic_rq_indicate_buf()
1546 vnic_rq_service(&enic->rq[q_number].vrq, cq_desc, in enic_rq_service()
1555 unsigned int intr = enic_msix_rq_intr(enic, rq->index); in enic_set_int_moderation()
1556 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation()
1557 u32 timer = cq->tobe_rx_coal_timeval; in enic_set_int_moderation()
1559 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { in enic_set_int_moderation()
1560 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); in enic_set_int_moderation()
1561 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; in enic_set_int_moderation()
1567 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_calc_int_moderation()
1568 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation()
1569 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; in enic_calc_int_moderation()
1574 u64 delta; in enic_calc_int_moderation() local
1577 delta = ktime_us_delta(now, cq->prev_ts); in enic_calc_int_moderation()
1578 if (delta < ENIC_AIC_TS_BREAK) in enic_calc_int_moderation()
1580 cq->prev_ts = now; in enic_calc_int_moderation()
1582 traffic = pkt_size_counter->large_pkt_bytes_cnt + in enic_calc_int_moderation()
1583 pkt_size_counter->small_pkt_bytes_cnt; in enic_calc_int_moderation()
1586 * traffic *= (10^6 / delta) => bps in enic_calc_int_moderation()
1589 * Combining, traffic *= (8 / delta) in enic_calc_int_moderation()
1593 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; in enic_calc_int_moderation()
1598 range_start = (pkt_size_counter->small_pkt_bytes_cnt > in enic_calc_int_moderation()
1599 pkt_size_counter->large_pkt_bytes_cnt << 1) ? in enic_calc_int_moderation()
1600 rx_coal->small_pkt_range_start : in enic_calc_int_moderation()
1601 rx_coal->large_pkt_range_start; in enic_calc_int_moderation()
1602 timer = range_start + ((rx_coal->range_end - range_start) * in enic_calc_int_moderation()
1605 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; in enic_calc_int_moderation()
1607 pkt_size_counter->large_pkt_bytes_cnt = 0; in enic_calc_int_moderation()
1608 pkt_size_counter->small_pkt_bytes_cnt = 0; in enic_calc_int_moderation()
1613 struct net_device *netdev = napi->dev; in enic_poll()
1623 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, in enic_poll()
1627 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], in enic_poll()
1638 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1643 err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf); in enic_poll()
1651 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1655 enic_calc_int_moderation(enic, &enic->rq[0].vrq); in enic_poll()
1663 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1664 enic_set_int_moderation(enic, &enic->rq[0].vrq); in enic_poll()
1665 vnic_intr_unmask(&enic->intr[intr]); in enic_poll()
1666 enic->rq[0].stats.napi_complete++; in enic_poll()
1668 enic->rq[0].stats.napi_repoll++; in enic_poll()
1677 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); in enic_free_rx_cpu_rmap()
1678 enic->netdev->rx_cpu_rmap = NULL; in enic_free_rx_cpu_rmap()
1685 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { in enic_set_rx_cpu_rmap()
1686 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); in enic_set_rx_cpu_rmap()
1687 if (unlikely(!enic->netdev->rx_cpu_rmap)) in enic_set_rx_cpu_rmap()
1689 for (i = 0; i < enic->rq_count; i++) { in enic_set_rx_cpu_rmap()
1690 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, in enic_set_rx_cpu_rmap()
1691 enic->msix_entry[i].vector); in enic_set_rx_cpu_rmap()
1714 struct net_device *netdev = napi->dev; in enic_poll_msix_wq()
1716 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; in enic_poll_msix_wq()
1717 struct vnic_wq *wq = &enic->wq[wq_index].vwq; in enic_poll_msix_wq()
1724 wq_irq = wq->index; in enic_poll_msix_wq()
1727 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, in enic_poll_msix_wq()
1730 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, in enic_poll_msix_wq()
1735 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_wq()
1744 struct net_device *netdev = napi->dev; in enic_poll_msix_rq()
1746 unsigned int rq = (napi - &enic->napi[0]); in enic_poll_msix_rq()
1757 work_done = vnic_cq_service(&enic->cq[cq], in enic_poll_msix_rq()
1766 vnic_intr_return_credits(&enic->intr[intr], in enic_poll_msix_rq()
1771 err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf); in enic_poll_msix_rq()
1779 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1783 enic_calc_int_moderation(enic, &enic->rq[rq].vrq); in enic_poll_msix_rq()
1791 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1792 enic_set_int_moderation(enic, &enic->rq[rq].vrq); in enic_poll_msix_rq()
1793 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_rq()
1794 enic->rq[rq].stats.napi_complete++; in enic_poll_msix_rq()
1796 enic->rq[rq].stats.napi_repoll++; in enic_poll_msix_rq()
1808 mod_timer(&enic->notify_timer, in enic_notify_timer()
1814 struct net_device *netdev = enic->netdev; in enic_free_intr()
1818 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_free_intr()
1820 free_irq(enic->pdev->irq, netdev); in enic_free_intr()
1823 free_irq(enic->pdev->irq, enic); in enic_free_intr()
1826 for (i = 0; i < enic->intr_count; i++) in enic_free_intr()
1827 if (enic->msix[i].requested) in enic_free_intr()
1828 free_irq(enic->msix_entry[i].vector, in enic_free_intr()
1829 enic->msix[i].devid); in enic_free_intr()
1838 struct net_device *netdev = enic->netdev; in enic_request_intr()
1843 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_request_intr()
1847 err = request_irq(enic->pdev->irq, enic_isr_legacy, in enic_request_intr()
1848 IRQF_SHARED, netdev->name, netdev); in enic_request_intr()
1853 err = request_irq(enic->pdev->irq, enic_isr_msi, in enic_request_intr()
1854 0, netdev->name, enic); in enic_request_intr()
1859 for (i = 0; i < enic->rq_count; i++) { in enic_request_intr()
1861 snprintf(enic->msix[intr].devname, in enic_request_intr()
1862 sizeof(enic->msix[intr].devname), in enic_request_intr()
1863 "%s-rx-%u", netdev->name, i); in enic_request_intr()
1864 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1865 enic->msix[intr].devid = &enic->napi[i]; in enic_request_intr()
1868 for (i = 0; i < enic->wq_count; i++) { in enic_request_intr()
1872 snprintf(enic->msix[intr].devname, in enic_request_intr()
1873 sizeof(enic->msix[intr].devname), in enic_request_intr()
1874 "%s-tx-%u", netdev->name, i); in enic_request_intr()
1875 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1876 enic->msix[intr].devid = &enic->napi[wq]; in enic_request_intr()
1880 snprintf(enic->msix[intr].devname, in enic_request_intr()
1881 sizeof(enic->msix[intr].devname), in enic_request_intr()
1882 "%s-err", netdev->name); in enic_request_intr()
1883 enic->msix[intr].isr = enic_isr_msix_err; in enic_request_intr()
1884 enic->msix[intr].devid = enic; in enic_request_intr()
1887 snprintf(enic->msix[intr].devname, in enic_request_intr()
1888 sizeof(enic->msix[intr].devname), in enic_request_intr()
1889 "%s-notify", netdev->name); in enic_request_intr()
1890 enic->msix[intr].isr = enic_isr_msix_notify; in enic_request_intr()
1891 enic->msix[intr].devid = enic; in enic_request_intr()
1893 for (i = 0; i < enic->intr_count; i++) in enic_request_intr()
1894 enic->msix[i].requested = 0; in enic_request_intr()
1896 for (i = 0; i < enic->intr_count; i++) { in enic_request_intr()
1897 err = request_irq(enic->msix_entry[i].vector, in enic_request_intr()
1898 enic->msix[i].isr, 0, in enic_request_intr()
1899 enic->msix[i].devname, in enic_request_intr()
1900 enic->msix[i].devid); in enic_request_intr()
1905 enic->msix[i].requested = 1; in enic_request_intr()
1921 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_synchronize_irqs()
1924 synchronize_irq(enic->pdev->irq); in enic_synchronize_irqs()
1927 for (i = 0; i < enic->intr_count; i++) in enic_synchronize_irqs()
1928 synchronize_irq(enic->msix_entry[i].vector); in enic_synchronize_irqs()
1939 spin_lock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1940 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_notify_set()
1942 err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR); in enic_dev_notify_set()
1945 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1949 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); in enic_dev_notify_set()
1952 spin_unlock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1959 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_notify_timer_start()
1961 mod_timer(&enic->notify_timer, jiffies); in enic_notify_timer_start()
1964 /* Using intr for notification for INTx/MSI-X */ in enic_notify_timer_start()
1991 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1993 vnic_rq_enable(&enic->rq[i].vrq); in enic_open()
1994 vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf); in enic_open()
1996 if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) { in enic_open()
1998 err = -ENOMEM; in enic_open()
2003 for (i = 0; i < enic->wq_count; i++) in enic_open()
2004 vnic_wq_enable(&enic->wq[i].vwq); in enic_open()
2013 for (i = 0; i < enic->rq_count; i++) in enic_open()
2014 napi_enable(&enic->napi[i]); in enic_open()
2016 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_open()
2017 for (i = 0; i < enic->wq_count; i++) in enic_open()
2018 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); in enic_open()
2021 for (i = 0; i < enic->intr_count; i++) in enic_open()
2022 vnic_intr_unmask(&enic->intr[i]); in enic_open()
2030 for (i = 0; i < enic->rq_count; i++) { in enic_open()
2031 ret = vnic_rq_disable(&enic->rq[i].vrq); in enic_open()
2033 vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); in enic_open()
2050 for (i = 0; i < enic->intr_count; i++) { in enic_stop()
2051 vnic_intr_mask(&enic->intr[i]); in enic_stop()
2052 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ in enic_stop()
2057 del_timer_sync(&enic->notify_timer); in enic_stop()
2062 for (i = 0; i < enic->rq_count; i++) in enic_stop()
2063 napi_disable(&enic->napi[i]); in enic_stop()
2066 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_stop()
2067 for (i = 0; i < enic->wq_count; i++) in enic_stop()
2068 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); in enic_stop()
2074 for (i = 0; i < enic->wq_count; i++) { in enic_stop()
2075 err = vnic_wq_disable(&enic->wq[i].vwq); in enic_stop()
2079 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
2080 err = vnic_rq_disable(&enic->rq[i].vrq); in enic_stop()
2089 for (i = 0; i < enic->wq_count; i++) in enic_stop()
2090 vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf); in enic_stop()
2091 for (i = 0; i < enic->rq_count; i++) in enic_stop()
2092 vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); in enic_stop()
2093 for (i = 0; i < enic->cq_count; i++) in enic_stop()
2094 vnic_cq_clean(&enic->cq[i]); in enic_stop()
2095 for (i = 0; i < enic->intr_count; i++) in enic_stop()
2096 vnic_intr_clean(&enic->intr[i]); in enic_stop()
2113 WRITE_ONCE(netdev->mtu, new_mtu); in _enic_change_mtu()
2129 return -EOPNOTSUPP; in enic_change_mtu()
2131 if (netdev->mtu > enic->port_mtu) in enic_change_mtu()
2134 netdev->mtu, enic->port_mtu); in enic_change_mtu()
2142 struct net_device *netdev = enic->netdev; in enic_change_mtu_work()
2143 int new_mtu = vnic_dev_mtu(enic->vdev); in enic_change_mtu_work()
2149 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); in enic_change_mtu_work()
2156 struct vnic_dev *vdev = enic->vdev; in enic_poll_controller()
2161 for (i = 0; i < enic->rq_count; i++) { in enic_poll_controller()
2163 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
2164 &enic->napi[i]); in enic_poll_controller()
2167 for (i = 0; i < enic->wq_count; i++) { in enic_poll_controller()
2169 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
2170 &enic->napi[enic_cq_wq(enic, i)]); in enic_poll_controller()
2175 enic_isr_msi(enic->pdev->irq, enic); in enic_poll_controller()
2178 enic_isr_legacy(enic->pdev->irq, netdev); in enic_poll_controller()
2216 return -ETIMEDOUT; in enic_dev_wait()
2224 err = enic_dev_wait(enic->vdev, vnic_dev_open, in enic_dev_open()
2237 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, in enic_dev_soft_reset()
2240 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", in enic_dev_soft_reset()
2250 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, in enic_dev_hang_reset()
2253 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", in enic_dev_hang_reset()
2265 rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, in __enic_set_rsskey()
2269 return -ENOMEM; in __enic_set_rsskey()
2274 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; in __enic_set_rsskey()
2276 spin_lock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2280 spin_unlock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2282 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), in __enic_set_rsskey()
2290 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); in enic_set_rsskey()
2302 rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, in enic_set_rsscpu()
2306 return -ENOMEM; in enic_set_rsscpu()
2309 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; in enic_set_rsscpu()
2311 spin_lock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2315 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2317 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), in enic_set_rsscpu()
2333 spin_lock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2339 spin_unlock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2352 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); in enic_set_rss_nic_cfg()
2354 spin_lock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2355 res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); in enic_set_rss_nic_cfg()
2356 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2385 spin_lock(&enic->enic_api_lock); in enic_set_api_busy()
2386 enic->enic_api_busy = busy; in enic_set_api_busy()
2387 spin_unlock(&enic->enic_api_lock); in enic_set_api_busy()
2394 if (!netif_running(enic->netdev)) in enic_reset()
2402 enic_stop(enic->netdev); in enic_reset()
2408 enic_open(enic->netdev); in enic_reset()
2413 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_reset()
2428 enic_stop(enic->netdev); in enic_tx_hang_reset()
2434 enic_open(enic->netdev); in enic_tx_hang_reset()
2439 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_tx_hang_reset()
2449 /* Set interrupt mode (INTx, MSI, MSI-X) depending in enic_set_intr_mode()
2452 * Try MSI-X first in enic_set_intr_mode()
2455 if (enic->config.intr_mode < 1 && in enic_set_intr_mode()
2456 enic->intr_avail >= ENIC_MSIX_MIN_INTR) { in enic_set_intr_mode()
2457 for (i = 0; i < enic->intr_avail; i++) in enic_set_intr_mode()
2458 enic->msix_entry[i].entry = i; in enic_set_intr_mode()
2460 num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2462 enic->intr_avail); in enic_set_intr_mode()
2464 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2466 enic->intr_avail = num_intr; in enic_set_intr_mode()
2476 if (enic->config.intr_mode < 2 && in enic_set_intr_mode()
2477 enic->intr_avail >= 1 && in enic_set_intr_mode()
2478 !pci_enable_msi(enic->pdev)) { in enic_set_intr_mode()
2479 enic->intr_avail = 1; in enic_set_intr_mode()
2480 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); in enic_set_intr_mode()
2492 if (enic->config.intr_mode < 3 && in enic_set_intr_mode()
2493 enic->intr_avail >= 3) { in enic_set_intr_mode()
2494 enic->intr_avail = 3; in enic_set_intr_mode()
2495 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); in enic_set_intr_mode()
2499 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_set_intr_mode()
2501 return -EINVAL; in enic_set_intr_mode()
2506 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_clear_intr_mode()
2508 pci_disable_msix(enic->pdev); in enic_clear_intr_mode()
2511 pci_disable_msi(enic->pdev); in enic_clear_intr_mode()
2517 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_clear_intr_mode()
2527 if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) { in enic_adjust_resources()
2530 enic->rq_avail, enic->wq_avail, in enic_adjust_resources()
2531 enic->cq_avail); in enic_adjust_resources()
2532 return -ENOSPC; in enic_adjust_resources()
2537 enic->rq_avail = 1; in enic_adjust_resources()
2538 enic->wq_avail = 1; in enic_adjust_resources()
2539 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; in enic_adjust_resources()
2540 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; in enic_adjust_resources()
2541 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); in enic_adjust_resources()
2546 enic->rq_avail = 1; in enic_adjust_resources()
2548 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_adjust_resources()
2551 enic->rq_count = 1; in enic_adjust_resources()
2552 enic->wq_count = 1; in enic_adjust_resources()
2553 enic->cq_count = 2; in enic_adjust_resources()
2554 enic->intr_count = enic->intr_avail; in enic_adjust_resources()
2560 wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); in enic_adjust_resources()
2562 rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); in enic_adjust_resources()
2563 max_queues = min(enic->cq_avail, in enic_adjust_resources()
2564 enic->intr_avail - ENIC_MSIX_RESERVED_INTR); in enic_adjust_resources()
2566 enic->rq_count = rq_avail; in enic_adjust_resources()
2567 enic->wq_count = wq_avail; in enic_adjust_resources()
2571 enic->rq_count = min(rq_avail, max_queues / 2); in enic_adjust_resources()
2572 enic->wq_count = max_queues - enic->rq_count; in enic_adjust_resources()
2574 enic->wq_count = min(wq_avail, max_queues / 2); in enic_adjust_resources()
2575 enic->rq_count = max_queues - enic->wq_count; in enic_adjust_resources()
2578 enic->cq_count = enic->rq_count + enic->wq_count; in enic_adjust_resources()
2579 enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR; in enic_adjust_resources()
2584 return -EINVAL; in enic_adjust_resources()
2594 struct enic_rq_stats *rqstats = &enic->rq[idx].stats; in enic_get_queue_stats_rx()
2596 rxs->bytes = rqstats->bytes; in enic_get_queue_stats_rx()
2597 rxs->packets = rqstats->packets; in enic_get_queue_stats_rx()
2598 rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated; in enic_get_queue_stats_rx()
2599 rxs->hw_drop_overruns = rqstats->pkt_truncated; in enic_get_queue_stats_rx()
2600 rxs->csum_unnecessary = rqstats->csum_unnecessary + in enic_get_queue_stats_rx()
2601 rqstats->csum_unnecessary_encap; in enic_get_queue_stats_rx()
2605 struct netdev_queue_stats_tx *txs) in enic_get_queue_stats_tx() argument
2608 struct enic_wq_stats *wqstats = &enic->wq[idx].stats; in enic_get_queue_stats_tx()
2610 txs->bytes = wqstats->bytes; in enic_get_queue_stats_tx()
2611 txs->packets = wqstats->packets; in enic_get_queue_stats_tx()
2612 txs->csum_none = wqstats->csum_none; in enic_get_queue_stats_tx()
2613 txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum + in enic_get_queue_stats_tx()
2614 wqstats->tso; in enic_get_queue_stats_tx()
2615 txs->hw_gso_packets = wqstats->tso; in enic_get_queue_stats_tx()
2616 txs->stop = wqstats->stopped; in enic_get_queue_stats_tx()
2617 txs->wake = wqstats->wake; in enic_get_queue_stats_tx()
2622 struct netdev_queue_stats_tx *txs) in enic_get_base_stats() argument
2624 rxs->bytes = 0; in enic_get_base_stats()
2625 rxs->packets = 0; in enic_get_base_stats()
2626 rxs->hw_drops = 0; in enic_get_base_stats()
2627 rxs->hw_drop_overruns = 0; in enic_get_base_stats()
2628 rxs->csum_unnecessary = 0; in enic_get_base_stats()
2629 txs->bytes = 0; in enic_get_base_stats()
2630 txs->packets = 0; in enic_get_base_stats()
2631 txs->csum_none = 0; in enic_get_base_stats()
2632 txs->needs_csum = 0; in enic_get_base_stats()
2633 txs->hw_gso_packets = 0; in enic_get_base_stats()
2634 txs->stop = 0; in enic_get_base_stats()
2635 txs->wake = 0; in enic_get_base_stats()
2694 kfree(enic->wq); in enic_free_enic_resources()
2695 enic->wq = NULL; in enic_free_enic_resources()
2697 kfree(enic->rq); in enic_free_enic_resources()
2698 enic->rq = NULL; in enic_free_enic_resources()
2700 kfree(enic->cq); in enic_free_enic_resources()
2701 enic->cq = NULL; in enic_free_enic_resources()
2703 kfree(enic->napi); in enic_free_enic_resources()
2704 enic->napi = NULL; in enic_free_enic_resources()
2706 kfree(enic->msix_entry); in enic_free_enic_resources()
2707 enic->msix_entry = NULL; in enic_free_enic_resources()
2709 kfree(enic->msix); in enic_free_enic_resources()
2710 enic->msix = NULL; in enic_free_enic_resources()
2712 kfree(enic->intr); in enic_free_enic_resources()
2713 enic->intr = NULL; in enic_free_enic_resources()
2718 enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL); in enic_alloc_enic_resources()
2719 if (!enic->wq) in enic_alloc_enic_resources()
2722 enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL); in enic_alloc_enic_resources()
2723 if (!enic->rq) in enic_alloc_enic_resources()
2726 enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL); in enic_alloc_enic_resources()
2727 if (!enic->cq) in enic_alloc_enic_resources()
2730 enic->napi = kcalloc(enic->wq_avail + enic->rq_avail, in enic_alloc_enic_resources()
2732 if (!enic->napi) in enic_alloc_enic_resources()
2735 enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry), in enic_alloc_enic_resources()
2737 if (!enic->msix_entry) in enic_alloc_enic_resources()
2740 enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry), in enic_alloc_enic_resources()
2742 if (!enic->msix) in enic_alloc_enic_resources()
2745 enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr), in enic_alloc_enic_resources()
2747 if (!enic->intr) in enic_alloc_enic_resources()
2754 return -ENOMEM; in enic_alloc_enic_resources()
2761 for (i = 0; i < enic->rq_count; i++) in enic_dev_deinit()
2762 __netif_napi_del(&enic->napi[i]); in enic_dev_deinit()
2764 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_dev_deinit()
2765 for (i = 0; i < enic->wq_count; i++) in enic_dev_deinit()
2766 __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); in enic_dev_deinit()
2780 struct net_device *netdev = enic->netdev; in enic_dev_init()
2789 vnic_dev_intr_coal_timer_info_default(enic->vdev); in enic_dev_init()
2845 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_init()
2847 netif_napi_add(netdev, &enic->napi[0], enic_poll); in enic_dev_init()
2850 for (i = 0; i < enic->rq_count; i++) { in enic_dev_init()
2851 netif_napi_add(netdev, &enic->napi[i], in enic_dev_init()
2854 for (i = 0; i < enic->wq_count; i++) in enic_dev_init()
2856 &enic->napi[enic_cq_wq(enic, i)], in enic_dev_init()
2876 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) in enic_iounmap()
2877 if (enic->bar[i].vaddr) in enic_iounmap()
2878 iounmap(enic->bar[i].vaddr); in enic_iounmap()
2883 struct device *dev = &pdev->dev; in enic_probe()
2901 return -ENOMEM; in enic_probe()
2905 SET_NETDEV_DEV(netdev, &pdev->dev); in enic_probe()
2908 enic->netdev = netdev; in enic_probe()
2909 enic->pdev = pdev; in enic_probe()
2929 * limitation for the device. Try 47-bit first, and in enic_probe()
2930 * fail to 32-bit. in enic_probe()
2933 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); in enic_probe()
2935 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in enic_probe()
2944 /* Map vNIC resources from BAR0-5 in enic_probe()
2947 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { in enic_probe()
2950 enic->bar[i].len = pci_resource_len(pdev, i); in enic_probe()
2951 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); in enic_probe()
2952 if (!enic->bar[i].vaddr) { in enic_probe()
2953 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); in enic_probe()
2954 err = -ENODEV; in enic_probe()
2957 enic->bar[i].bus_addr = pci_resource_start(pdev, i); in enic_probe()
2963 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, in enic_probe()
2964 ARRAY_SIZE(enic->bar)); in enic_probe()
2965 if (!enic->vdev) { in enic_probe()
2967 err = -ENODEV; in enic_probe()
2971 err = vnic_devcmd_init(enic->vdev); in enic_probe()
2981 &enic->num_vfs); in enic_probe()
2982 if (enic->num_vfs) { in enic_probe()
2983 err = pci_enable_sriov(pdev, enic->num_vfs); in enic_probe()
2990 enic->priv_flags |= ENIC_SRIOV_ENABLED; in enic_probe()
2991 num_pps = enic->num_vfs; in enic_probe()
2997 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); in enic_probe()
2998 if (!enic->pp) { in enic_probe()
2999 err = -ENOMEM; in enic_probe()
3015 spin_lock_init(&enic->devcmd_lock); in enic_probe()
3016 spin_lock_init(&enic->enic_api_lock); in enic_probe()
3029 /* Issue device init to initialize the vnic-to-switch link. in enic_probe()
3032 * to wait here for the vnic-to-switch link initialization in enic_probe()
3045 err = vnic_dev_init(enic->vdev, 0); in enic_probe()
3058 netif_set_real_num_tx_queues(netdev, enic->wq_count); in enic_probe()
3059 netif_set_real_num_rx_queues(netdev, enic->rq_count); in enic_probe()
3064 timer_setup(&enic->notify_timer, enic_notify_timer, 0); in enic_probe()
3067 INIT_WORK(&enic->reset, enic_reset); in enic_probe()
3068 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); in enic_probe()
3069 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); in enic_probe()
3071 for (i = 0; i < enic->wq_count; i++) in enic_probe()
3072 spin_lock_init(&enic->wq[i].lock); in enic_probe()
3077 enic->port_mtu = enic->config.mtu; in enic_probe()
3079 err = enic_set_mac_addr(netdev, enic->mac_addr); in enic_probe()
3085 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; in enic_probe()
3089 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; in enic_probe()
3092 netdev->netdev_ops = &enic_netdev_dynamic_ops; in enic_probe()
3094 netdev->netdev_ops = &enic_netdev_ops; in enic_probe()
3095 netdev->stat_ops = &enic_netdev_stat_ops; in enic_probe()
3097 netdev->watchdog_timeo = 2 * HZ; in enic_probe()
3100 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; in enic_probe()
3102 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; in enic_probe()
3103 enic->loop_enable = 1; in enic_probe()
3104 enic->loop_tag = enic->config.loop_tag; in enic_probe()
3105 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); in enic_probe()
3108 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; in enic_probe()
3110 netdev->hw_features |= NETIF_F_TSO | in enic_probe()
3113 netdev->hw_features |= NETIF_F_RXHASH; in enic_probe()
3115 netdev->hw_features |= NETIF_F_RXCSUM; in enic_probe()
3120 netdev->hw_enc_features |= NETIF_F_RXCSUM | in enic_probe()
3127 netdev->hw_features |= netdev->hw_enc_features; in enic_probe()
3139 err = vnic_dev_get_supported_feature_ver(enic->vdev, in enic_probe()
3144 enic->vxlan.flags = (u8)a1; in enic_probe()
3149 patch_level = patch_level ? patch_level - 1 : 0; in enic_probe()
3150 enic->vxlan.patch_level = patch_level; in enic_probe()
3152 if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || in enic_probe()
3153 enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { in enic_probe()
3154 netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4; in enic_probe()
3155 if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) in enic_probe()
3156 netdev->udp_tunnel_nic_info = &enic_udp_tunnels; in enic_probe()
3160 netdev->features |= netdev->hw_features; in enic_probe()
3161 netdev->vlan_features |= netdev->features; in enic_probe()
3164 netdev->hw_features |= NETIF_F_NTUPLE; in enic_probe()
3168 netdev->features |= NETIF_F_HIGHDMA; in enic_probe()
3170 netdev->priv_flags |= IFF_UNICAST_FLT; in enic_probe()
3172 /* MTU range: 68 - 9000 */ in enic_probe()
3173 netdev->min_mtu = ENIC_MIN_MTU; in enic_probe()
3174 netdev->max_mtu = ENIC_MAX_MTU; in enic_probe()
3175 netdev->mtu = enic->port_mtu; in enic_probe()
3182 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; in enic_probe()
3189 vnic_dev_close(enic->vdev); in enic_probe()
3191 kfree(enic->pp); in enic_probe()
3196 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_probe()
3200 vnic_dev_unregister(enic->vdev); in enic_probe()
3220 cancel_work_sync(&enic->reset); in enic_remove()
3221 cancel_work_sync(&enic->change_mtu_work); in enic_remove()
3224 vnic_dev_close(enic->vdev); in enic_remove()
3228 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_remove()
3231 kfree(enic->pp); in enic_remove()
3232 vnic_dev_unregister(enic->vdev); in enic_remove()