Lines Matching +full:dcb +full:- +full:algorithm

4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
109 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
126 #define FW4_CFNAME "cxgb4/t4-config.txt"
127 #define FW5_CFNAME "cxgb4/t5-config.txt"
128 #define FW6_CFNAME "cxgb4/t6-config.txt"
144 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
154 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
158 * offset by 2 bytes in order to have the IP headers line up on 4-byte
160 * a machine check fault if an attempt is made to access one of the 4-byte IP
161 * header fields on a non-4-byte boundary. And it's a major performance issue
164 * edge-case performance sensitive applications (like forwarding large volumes
166 * PCI-E Bus transfers enough to measurably affect performance.
170 /* TX Queue select used to determine what algorithm to use for selecting TX
199 switch (p->link_cfg.speed) { in link_report()
223 dev->name, p->link_cfg.speed); in link_report()
227 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, in link_report()
228 fc[p->link_cfg.fc]); in link_report()
237 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable()
238 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
241 /* We use a simple mapping of Port TX Queue Index to DCB in dcb_tx_queue_prio_enable()
242 * Priority when we're enabling DCB. in dcb_tx_queue_prio_enable()
244 for (i = 0; i < pi->nqsets; i++, txq++) { in dcb_tx_queue_prio_enable()
251 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); in dcb_tx_queue_prio_enable()
258 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
260 -FW_CMD_MAX_TIMEOUT); in dcb_tx_queue_prio_enable()
263 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
264 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", in dcb_tx_queue_prio_enable()
265 enable ? "set" : "unset", pi->port_id, i, -err); in dcb_tx_queue_prio_enable()
267 txq->dcb_prio = enable ? value : 0; in dcb_tx_queue_prio_enable()
275 if (!pi->dcb.enabled) in cxgb4_dcb_enabled()
278 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || in cxgb4_dcb_enabled()
279 (pi->dcb.state == CXGB4_DCB_STATE_HOST)); in cxgb4_dcb_enabled()
285 struct net_device *dev = adapter->port[port_id]; in t4_os_link_changed()
311 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
314 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4_os_portmod_changed()
316 else if (pi->mod_type < ARRAY_SIZE(mod_str)) in t4_os_portmod_changed()
317 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); in t4_os_portmod_changed()
318 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4_os_portmod_changed()
320 dev->name); in t4_os_portmod_changed()
321 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4_os_portmod_changed()
323 dev->name); in t4_os_portmod_changed()
324 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) in t4_os_portmod_changed()
325 netdev_info(dev, "%s: transceiver module error\n", dev->name); in t4_os_portmod_changed()
328 dev->name, pi->mod_type); in t4_os_portmod_changed()
333 pi->link_cfg.redo_l1cfg = netif_running(dev); in t4_os_portmod_changed()
350 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash()
356 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
357 ucast |= is_unicast_ether_addr(entry->addr); in cxgb4_set_addr_hash()
358 vec |= (1ULL << hash_mac_addr(entry->addr)); in cxgb4_set_addr_hash()
360 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
367 struct adapter *adap = pi->adapter; in cxgb4_mac_sync()
382 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
393 return -ENOMEM; in cxgb4_mac_sync()
394 ether_addr_copy(new_entry->addr, mac_addr); in cxgb4_mac_sync()
395 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
405 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync()
413 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
414 if (ether_addr_equal(entry->addr, mac_addr)) { in cxgb4_mac_unsync()
415 list_del(&entry->list); in cxgb4_mac_unsync()
421 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
422 return ret < 0 ? -EINVAL : 0; in cxgb4_mac_unsync()
427 * If @mtu is -1 it is left unchanged.
432 struct adapter *adapter = pi->adapter; in set_rxmode()
437 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, in set_rxmode()
438 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in set_rxmode()
439 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, in set_rxmode()
444 * cxgb4_change_mac - Update match filter for a MAC address.
448 * or -1
464 struct adapter *adapter = pi->adapter; in cxgb4_change_mac()
468 ret = t4_change_mac(adapter, adapter->mbox, viid, in cxgb4_change_mac()
471 if (ret == -ENOMEM) { in cxgb4_change_mac()
475 list_for_each_entry(entry, &adapter->mac_hlist, list) { in cxgb4_change_mac()
476 if (entry->iface_mac) { in cxgb4_change_mac()
477 ether_addr_copy(entry->addr, addr); in cxgb4_change_mac()
483 return -ENOMEM; in cxgb4_change_mac()
484 ether_addr_copy(new_entry->addr, addr); in cxgb4_change_mac()
485 new_entry->iface_mac = true; in cxgb4_change_mac()
486 list_add_tail(&new_entry->list, &adapter->mac_hlist); in cxgb4_change_mac()
498 * link_start - enable a port
506 unsigned int mb = pi->adapter->mbox; in link_start()
513 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, in link_start()
514 dev->mtu, -1, -1, -1, in link_start()
515 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in link_start()
517 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in link_start()
518 dev->dev_addr, true, &pi->smt_idx); in link_start()
520 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, in link_start()
521 &pi->link_cfg); in link_start()
524 ret = t4_enable_pi_params(pi->adapter, mb, pi, true, in link_start()
536 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); in dcb_rpl()
537 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
544 /* If the DCB has become enabled or disabled on the port then we're in dcb_rpl()
545 * going to need to set up/tear down DCB Priority parameters for the in dcb_rpl()
558 u8 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
565 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { in fwevtq_handler()
567 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
570 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
578 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); in fwevtq_handler()
581 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
582 txq->restarts++; in fwevtq_handler()
583 if (txq->q_type == CXGB4_TXQ_ETH) { in fwevtq_handler()
587 t4_sge_eth_txq_egress_update(q->adap, eq, -1); in fwevtq_handler()
592 tasklet_schedule(&oq->qresume_tsk); in fwevtq_handler()
598 const struct fw_port_cmd *pcmd = (const void *)p->data; in fwevtq_handler()
599 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); in fwevtq_handler()
601 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); in fwevtq_handler()
607 be32_to_cpu(pcmd->op_to_portid)); in fwevtq_handler()
611 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
613 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) in fwevtq_handler()
614 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) in fwevtq_handler()
625 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
628 if (p->type == 0) in fwevtq_handler()
629 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
633 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
637 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
641 filter_rpl(q->adap, p); in fwevtq_handler()
645 hash_filter_rpl(q->adap, p); in fwevtq_handler()
649 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
653 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
655 dev_err(q->adap->pdev_dev, in fwevtq_handler()
663 if (adapter->flags & CXGB4_USING_MSIX) { in disable_msi()
664 pci_disable_msix(adapter->pdev); in disable_msi()
665 adapter->flags &= ~CXGB4_USING_MSIX; in disable_msi()
666 } else if (adapter->flags & CXGB4_USING_MSI) { in disable_msi()
667 pci_disable_msi(adapter->pdev); in disable_msi()
668 adapter->flags &= ~CXGB4_USING_MSI; in disable_msi()
673 * Interrupt handler for non-data events used with MSI-X.
681 adap->swintr = 1; in t4_nondata_intr()
684 if (adap->flags & CXGB4_MASTER_PF) in t4_nondata_intr()
695 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); in cxgb4_set_msix_aff()
696 return -ENOMEM; in cxgb4_set_msix_aff()
699 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), in cxgb4_set_msix_aff()
704 dev_warn(adap->pdev_dev, in cxgb4_set_msix_aff()
719 struct sge *s = &adap->sge; in request_msix_queue_irqs()
723 if (s->fwevtq_msix_idx < 0) in request_msix_queue_irqs()
724 return -ENOMEM; in request_msix_queue_irqs()
726 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, in request_msix_queue_irqs()
728 adap->msix_info[s->fwevtq_msix_idx].desc, in request_msix_queue_irqs()
729 &s->fw_evtq); in request_msix_queue_irqs()
734 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
735 err = request_irq(minfo->vec, in request_msix_queue_irqs()
737 minfo->desc, in request_msix_queue_irqs()
738 &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
742 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs()
743 &minfo->aff_mask, ethqidx); in request_msix_queue_irqs()
748 while (--ethqidx >= 0) { in request_msix_queue_irqs()
749 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
750 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in request_msix_queue_irqs()
751 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
753 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in request_msix_queue_irqs()
759 struct sge *s = &adap->sge; in free_msix_queue_irqs()
763 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in free_msix_queue_irqs()
765 minfo = s->ethrxq[i].msix; in free_msix_queue_irqs()
766 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in free_msix_queue_irqs()
767 free_irq(minfo->vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
785 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
787 dev_warn(adap->pdev_dev, in setup_ppod_edram()
790 return -1; in setup_ppod_edram()
794 return -1; in setup_ppod_edram()
796 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
798 dev_err(adap->pdev_dev, in setup_ppod_edram()
800 return -1; in setup_ppod_edram()
814 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hpfilter()
821 dev_err(adapter->pdev_dev, in adap_config_hpfilter()
828 struct adapter *adap = pi->adapter; in cxgb4_config_rss()
831 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, in cxgb4_config_rss()
841 return t4_config_vi_rss(adap, adap->mbox, viid, in cxgb4_config_rss()
851 * cxgb4_write_rss - write the RSS table for a given port
861 struct adapter *adapter = pi->adapter; in cxgb4_write_rss()
866 rxq = &adapter->sge.ethrxq[pi->first_qset]; in cxgb4_write_rss()
867 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_write_rss()
869 return -ENOMEM; in cxgb4_write_rss()
872 for (i = 0; i < pi->rss_size; i++, queues++) in cxgb4_write_rss()
875 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); in cxgb4_write_rss()
881 * setup_rss - configure RSS
894 for (j = 0; j < pi->rss_size; j++) in setup_rss()
895 pi->rss[j] = j % pi->nqsets; in setup_rss()
897 err = cxgb4_write_rss(pi, pi->rss); in setup_rss()
909 qid -= p->ingr_start; in rxq_to_chan()
910 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; in rxq_to_chan()
915 if (q->handler) in cxgb4_quiesce_rx()
916 napi_disable(&q->napi); in cxgb4_quiesce_rx()
926 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
927 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
939 struct sge *s = &adap->sge; in disable_interrupts()
941 if (adap->flags & CXGB4_FULL_INIT_DONE) { in disable_interrupts()
943 if (adap->flags & CXGB4_USING_MSIX) { in disable_interrupts()
945 free_irq(adap->msix_info[s->nd_msix_idx].vec, in disable_interrupts()
948 free_irq(adap->pdev->irq, adap); in disable_interrupts()
956 if (q->handler) in cxgb4_enable_rx()
957 napi_enable(&q->napi); in cxgb4_enable_rx()
959 /* 0-increment GTS to start the timer and enable interrupts */ in cxgb4_enable_rx()
961 SEINTARM_V(q->intr_params) | in cxgb4_enable_rx()
962 INGRESSQID_V(q->cntxt_id)); in cxgb4_enable_rx()
972 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
973 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
986 adap->sge.nd_msix_idx = -1; in setup_non_data_intr()
987 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_non_data_intr()
990 /* Request MSI-X vector for non-data interrupt */ in setup_non_data_intr()
993 return -ENOMEM; in setup_non_data_intr()
995 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
996 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
997 "%s", adap->port[0]->name); in setup_non_data_intr()
999 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1005 struct sge *s = &adap->sge; in setup_fw_sge_queues()
1008 bitmap_zero(s->starving_fl, s->egr_sz); in setup_fw_sge_queues()
1009 bitmap_zero(s->txq_maperr, s->egr_sz); in setup_fw_sge_queues()
1011 if (adap->flags & CXGB4_USING_MSIX) { in setup_fw_sge_queues()
1012 s->fwevtq_msix_idx = -1; in setup_fw_sge_queues()
1015 return -ENOMEM; in setup_fw_sge_queues()
1017 snprintf(adap->msix_info[msix].desc, in setup_fw_sge_queues()
1018 sizeof(adap->msix_info[msix].desc), in setup_fw_sge_queues()
1019 "%s-FWeventq", adap->port[0]->name); in setup_fw_sge_queues()
1021 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
1022 NULL, NULL, NULL, -1); in setup_fw_sge_queues()
1025 msix = -((int)s->intrq.abs_id + 1); in setup_fw_sge_queues()
1028 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
1029 msix, NULL, fwevtq_handler, NULL, -1); in setup_fw_sge_queues()
1033 s->fwevtq_msix_idx = msix; in setup_fw_sge_queues()
1038 * setup_sge_queues - configure SGE Tx/Rx/response queues
1042 * We support multiple queue sets per port if we have MSI-X, otherwise
1048 struct sge *s = &adap->sge; in setup_sge_queues()
1053 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; in setup_sge_queues()
1055 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_sge_queues()
1056 msix = -((int)s->intrq.abs_id + 1); in setup_sge_queues()
1059 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1061 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1062 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; in setup_sge_queues()
1064 for (j = 0; j < pi->nqsets; j++, q++) { in setup_sge_queues()
1072 snprintf(adap->msix_info[msix].desc, in setup_sge_queues()
1073 sizeof(adap->msix_info[msix].desc), in setup_sge_queues()
1074 "%s-Rx%d", dev->name, j); in setup_sge_queues()
1075 q->msix = &adap->msix_info[msix]; in setup_sge_queues()
1078 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1079 msix, &q->fl, in setup_sge_queues()
1083 pi->tx_chan)); in setup_sge_queues()
1086 q->rspq.idx = j; in setup_sge_queues()
1087 memset(&q->stats, 0, sizeof(q->stats)); in setup_sge_queues()
1090 q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1091 for (j = 0; j < pi->nqsets; j++, t++, q++) { in setup_sge_queues()
1094 q->rspq.cntxt_id, in setup_sge_queues()
1095 !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); in setup_sge_queues()
1106 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; in setup_sge_queues()
1108 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1109 s->fw_evtq.cntxt_id, cmplqid); in setup_sge_queues()
1114 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
1115 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
1116 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
1117 , s->fw_evtq.cntxt_id, false); in setup_sge_queues()
1122 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1125 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1126 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); in setup_sge_queues()
1129 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
1153 "TX Packet without VLAN Tag on DCB Link\n"); in cxgb_select_queue()
1158 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb_select_queue()
1159 txq = skb->priority & 0x7; in cxgb_select_queue()
1166 if (dev->num_tc) { in cxgb_select_queue()
1170 ver = ip_hdr(skb)->version; in cxgb_select_queue()
1171 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : in cxgb_select_queue()
1172 ip_hdr(skb)->protocol; in cxgb_select_queue()
1177 skb->encapsulation || in cxgb_select_queue()
1180 txq = txq % pi->nqsets; in cxgb_select_queue()
1190 while (unlikely(txq >= dev->real_num_tx_queues)) in cxgb_select_queue()
1191 txq -= dev->real_num_tx_queues; in cxgb_select_queue()
1196 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in cxgb_select_queue()
1203 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { in closest_timer()
1204 delta = time - s->timer_val[i]; in closest_timer()
1206 delta = -delta; in closest_timer()
1219 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { in closest_thres()
1220 delta = thres - s->counter_val[i]; in closest_thres()
1222 delta = -delta; in closest_thres()
1232 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1234 * @us: the hold-off time in us, or 0 to disable timer
1235 * @cnt: the hold-off packet count, or 0 to disable counter
1237 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1243 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params()
1252 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1253 if (q->desc && q->pktcnt_idx != new_idx) { in cxgb4_set_rspq_intr_params()
1258 FW_PARAMS_PARAM_YZ_V(q->cntxt_id); in cxgb4_set_rspq_intr_params()
1259 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1264 q->pktcnt_idx = new_idx; in cxgb4_set_rspq_intr_params()
1267 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1268 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); in cxgb4_set_rspq_intr_params()
1274 netdev_features_t changed = dev->features ^ features; in cxgb_set_features()
1281 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_set_features()
1282 pi->viid_mirror, -1, -1, -1, -1, in cxgb_set_features()
1285 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; in cxgb_set_features()
1291 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1292 return -1; in setup_debugfs()
1303 if ((adap->flags & CXGB4_FULL_INIT_DONE) && in cxgb4_port_mirror_free_rxq()
1304 !(adap->flags & CXGB4_SHUTTING_DOWN)) in cxgb4_port_mirror_free_rxq()
1305 cxgb4_quiesce_rx(&mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1307 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_free_rxq()
1308 cxgb4_clear_msix_aff(mirror_rxq->msix->vec, in cxgb4_port_mirror_free_rxq()
1309 mirror_rxq->msix->aff_mask); in cxgb4_port_mirror_free_rxq()
1310 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1311 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_free_rxq()
1314 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_free_rxq()
1322 struct sge *s = &adap->sge; in cxgb4_port_mirror_alloc_queues()
1327 if (!pi->vi_mirror_count) in cxgb4_port_mirror_alloc_queues()
1330 if (s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_alloc_queues()
1333 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1335 return -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1337 s->mirror_rxq[pi->port_id] = mirror_rxq; in cxgb4_port_mirror_alloc_queues()
1339 if (!(adap->flags & CXGB4_USING_MSIX)) in cxgb4_port_mirror_alloc_queues()
1340 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_port_mirror_alloc_queues()
1342 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { in cxgb4_port_mirror_alloc_queues()
1343 mirror_rxq = &s->mirror_rxq[pi->port_id][i]; in cxgb4_port_mirror_alloc_queues()
1353 mirror_rxq->msix = &adap->msix_info[msix]; in cxgb4_port_mirror_alloc_queues()
1354 snprintf(mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1355 sizeof(mirror_rxq->msix->desc), in cxgb4_port_mirror_alloc_queues()
1356 "%s-mirrorrxq%d", dev->name, i); in cxgb4_port_mirror_alloc_queues()
1359 init_rspq(adap, &mirror_rxq->rspq, in cxgb4_port_mirror_alloc_queues()
1365 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; in cxgb4_port_mirror_alloc_queues()
1367 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, in cxgb4_port_mirror_alloc_queues()
1368 dev, msix, &mirror_rxq->fl, in cxgb4_port_mirror_alloc_queues()
1373 /* Setup MSI-X vectors for Mirror Rxqs */ in cxgb4_port_mirror_alloc_queues()
1374 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_alloc_queues()
1375 ret = request_irq(mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1377 mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1378 &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1382 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1383 &mirror_rxq->msix->aff_mask, i); in cxgb4_port_mirror_alloc_queues()
1387 cxgb4_enable_rx(adap, &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1391 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1393 ret = -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1397 mirror_rxq = &s->mirror_rxq[pi->port_id][0]; in cxgb4_port_mirror_alloc_queues()
1398 for (i = 0; i < pi->rss_size; i++) in cxgb4_port_mirror_alloc_queues()
1399 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; in cxgb4_port_mirror_alloc_queues()
1401 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); in cxgb4_port_mirror_alloc_queues()
1409 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_alloc_queues()
1412 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_alloc_queues()
1415 while (rxqid-- > 0) in cxgb4_port_mirror_alloc_queues()
1417 &s->mirror_rxq[pi->port_id][rxqid]); in cxgb4_port_mirror_alloc_queues()
1419 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_alloc_queues()
1420 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_alloc_queues()
1428 struct sge *s = &adap->sge; in cxgb4_port_mirror_free_queues()
1431 if (!pi->vi_mirror_count) in cxgb4_port_mirror_free_queues()
1434 if (!s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_free_queues()
1437 for (i = 0; i < pi->nmirrorqsets; i++) in cxgb4_port_mirror_free_queues()
1439 &s->mirror_rxq[pi->port_id][i]); in cxgb4_port_mirror_free_queues()
1441 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_free_queues()
1442 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_free_queues()
1449 int ret, idx = -1; in cxgb4_port_mirror_start()
1451 if (!pi->vi_mirror_count) in cxgb4_port_mirror_start()
1459 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, in cxgb4_port_mirror_start()
1460 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in cxgb4_port_mirror_start()
1461 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, in cxgb4_port_mirror_start()
1462 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in cxgb4_port_mirror_start()
1464 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1466 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1474 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, in cxgb4_port_mirror_start()
1475 dev->dev_addr, true, NULL); in cxgb4_port_mirror_start()
1477 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1479 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1490 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, in cxgb4_port_mirror_start()
1494 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1496 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1506 if (!pi->vi_mirror_count) in cxgb4_port_mirror_stop()
1509 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, in cxgb4_port_mirror_stop()
1519 if (!pi->nmirrorqsets) in cxgb4_port_mirror_alloc()
1520 return -EOPNOTSUPP; in cxgb4_port_mirror_alloc()
1522 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1523 if (pi->viid_mirror) { in cxgb4_port_mirror_alloc()
1524 pi->vi_mirror_count++; in cxgb4_port_mirror_alloc()
1528 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, in cxgb4_port_mirror_alloc()
1529 &pi->viid_mirror); in cxgb4_port_mirror_alloc()
1533 pi->vi_mirror_count = 1; in cxgb4_port_mirror_alloc()
1535 if (adap->flags & CXGB4_FULL_INIT_DONE) { in cxgb4_port_mirror_alloc()
1545 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1552 pi->vi_mirror_count = 0; in cxgb4_port_mirror_alloc()
1553 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_alloc()
1554 pi->viid_mirror = 0; in cxgb4_port_mirror_alloc()
1557 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1566 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1567 if (!pi->viid_mirror) in cxgb4_port_mirror_free()
1570 if (pi->vi_mirror_count > 1) { in cxgb4_port_mirror_free()
1571 pi->vi_mirror_count--; in cxgb4_port_mirror_free()
1578 pi->vi_mirror_count = 0; in cxgb4_port_mirror_free()
1579 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_free()
1580 pi->viid_mirror = 0; in cxgb4_port_mirror_free()
1583 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1587 * upper-layer driver support
1591 * Allocate an active-open TID and set it to the supplied value.
1595 int atid = -1; in cxgb4_alloc_atid()
1597 spin_lock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1598 if (t->afree) { in cxgb4_alloc_atid()
1599 union aopen_entry *p = t->afree; in cxgb4_alloc_atid()
1601 atid = (p - t->atid_tab) + t->atid_base; in cxgb4_alloc_atid()
1602 t->afree = p->next; in cxgb4_alloc_atid()
1603 p->data = data; in cxgb4_alloc_atid()
1604 t->atids_in_use++; in cxgb4_alloc_atid()
1606 spin_unlock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1612 * Release an active-open TID.
1616 union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; in cxgb4_free_atid()
1618 spin_lock_bh(&t->atid_lock); in cxgb4_free_atid()
1619 p->next = t->afree; in cxgb4_free_atid()
1620 t->afree = p; in cxgb4_free_atid()
1621 t->atids_in_use--; in cxgb4_free_atid()
1622 spin_unlock_bh(&t->atid_lock); in cxgb4_free_atid()
1633 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1635 stid = find_first_zero_bit(t->stid_bmap, t->nstids); in cxgb4_alloc_stid()
1636 if (stid < t->nstids) in cxgb4_alloc_stid()
1637 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_stid()
1639 stid = -1; in cxgb4_alloc_stid()
1641 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); in cxgb4_alloc_stid()
1643 stid = -1; in cxgb4_alloc_stid()
1646 t->stid_tab[stid].data = data; in cxgb4_alloc_stid()
1647 stid += t->stid_base; in cxgb4_alloc_stid()
1653 t->stids_in_use += 2; in cxgb4_alloc_stid()
1654 t->v6_stids_in_use += 2; in cxgb4_alloc_stid()
1656 t->stids_in_use++; in cxgb4_alloc_stid()
1659 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1670 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1672 stid = find_next_zero_bit(t->stid_bmap, in cxgb4_alloc_sftid()
1673 t->nstids + t->nsftids, t->nstids); in cxgb4_alloc_sftid()
1674 if (stid < (t->nstids + t->nsftids)) in cxgb4_alloc_sftid()
1675 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_sftid()
1677 stid = -1; in cxgb4_alloc_sftid()
1679 stid = -1; in cxgb4_alloc_sftid()
1682 t->stid_tab[stid].data = data; in cxgb4_alloc_sftid()
1683 stid -= t->nstids; in cxgb4_alloc_sftid()
1684 stid += t->sftid_base; in cxgb4_alloc_sftid()
1685 t->sftids_in_use++; in cxgb4_alloc_sftid()
1687 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1697 if (t->nsftids && (stid >= t->sftid_base)) { in cxgb4_free_stid()
1698 stid -= t->sftid_base; in cxgb4_free_stid()
1699 stid += t->nstids; in cxgb4_free_stid()
1701 stid -= t->stid_base; in cxgb4_free_stid()
1704 spin_lock_bh(&t->stid_lock); in cxgb4_free_stid()
1706 __clear_bit(stid, t->stid_bmap); in cxgb4_free_stid()
1708 bitmap_release_region(t->stid_bmap, stid, 1); in cxgb4_free_stid()
1709 t->stid_tab[stid].data = NULL; in cxgb4_free_stid()
1710 if (stid < t->nstids) { in cxgb4_free_stid()
1712 t->stids_in_use -= 2; in cxgb4_free_stid()
1713 t->v6_stids_in_use -= 2; in cxgb4_free_stid()
1715 t->stids_in_use--; in cxgb4_free_stid()
1718 t->sftids_in_use--; in cxgb4_free_stid()
1721 spin_unlock_bh(&t->stid_lock); in cxgb4_free_stid()
1747 void **p = &t->tid_tab[tid - t->tid_base]; in cxgb4_queue_tid_release()
1749 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1750 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1752 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1753 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1754 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1755 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1757 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1770 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1771 while (adap->tid_release_head) { in process_tid_release_list()
1772 void **p = adap->tid_release_head; in process_tid_release_list()
1774 p = (void *)p - chan; in process_tid_release_list()
1776 adap->tid_release_head = *p; in process_tid_release_list()
1778 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1784 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1786 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1788 adap->tid_release_task_busy = false; in process_tid_release_list()
1789 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1802 if (tid_out_of_range(&adap->tids, tid)) { in cxgb4_remove_tid()
1803 dev_err(adap->pdev_dev, "tid %d out of range\n", tid); in cxgb4_remove_tid()
1807 if (t->tid_tab[tid - adap->tids.tid_base]) { in cxgb4_remove_tid()
1808 t->tid_tab[tid - adap->tids.tid_base] = NULL; in cxgb4_remove_tid()
1809 atomic_dec(&t->conns_in_use); in cxgb4_remove_tid()
1810 if (t->hash_base && (tid >= t->hash_base)) { in cxgb4_remove_tid()
1812 atomic_sub(2, &t->hash_tids_in_use); in cxgb4_remove_tid()
1814 atomic_dec(&t->hash_tids_in_use); in cxgb4_remove_tid()
1817 atomic_sub(2, &t->tids_in_use); in cxgb4_remove_tid()
1819 atomic_dec(&t->tids_in_use); in cxgb4_remove_tid()
1838 unsigned int max_ftids = t->nftids + t->nsftids; in tid_init()
1839 unsigned int natids = t->natids; in tid_init()
1846 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); in tid_init()
1847 ftid_bmap_size = BITS_TO_LONGS(t->nftids); in tid_init()
1848 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); in tid_init()
1849 eotid_bmap_size = BITS_TO_LONGS(t->neotids); in tid_init()
1850 size = t->ntids * sizeof(*t->tid_tab) + in tid_init()
1851 natids * sizeof(*t->atid_tab) + in tid_init()
1852 t->nstids * sizeof(*t->stid_tab) + in tid_init()
1853 t->nsftids * sizeof(*t->stid_tab) + in tid_init()
1855 t->nhpftids * sizeof(*t->hpftid_tab) + in tid_init()
1857 max_ftids * sizeof(*t->ftid_tab) + in tid_init()
1859 t->neotids * sizeof(*t->eotid_tab) + in tid_init()
1862 t->tid_tab = kvzalloc(size, GFP_KERNEL); in tid_init()
1863 if (!t->tid_tab) in tid_init()
1864 return -ENOMEM; in tid_init()
1866 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; in tid_init()
1867 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; in tid_init()
1868 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; in tid_init()
1869 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; in tid_init()
1870 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; in tid_init()
1871 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; in tid_init()
1872 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; in tid_init()
1873 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; in tid_init()
1874 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; in tid_init()
1875 spin_lock_init(&t->stid_lock); in tid_init()
1876 spin_lock_init(&t->atid_lock); in tid_init()
1877 spin_lock_init(&t->ftid_lock); in tid_init()
1879 t->stids_in_use = 0; in tid_init()
1880 t->v6_stids_in_use = 0; in tid_init()
1881 t->sftids_in_use = 0; in tid_init()
1882 t->afree = NULL; in tid_init()
1883 t->atids_in_use = 0; in tid_init()
1884 atomic_set(&t->tids_in_use, 0); in tid_init()
1885 atomic_set(&t->conns_in_use, 0); in tid_init()
1886 atomic_set(&t->hash_tids_in_use, 0); in tid_init()
1887 atomic_set(&t->eotids_in_use, 0); in tid_init()
1891 while (--natids) in tid_init()
1892 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; in tid_init()
1893 t->afree = t->atid_tab; in tid_init()
1897 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); in tid_init()
1899 if (!t->stid_base && in tid_init()
1900 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1901 __set_bit(0, t->stid_bmap); in tid_init()
1903 if (t->neotids) in tid_init()
1904 bitmap_zero(t->eotid_bmap, t->neotids); in tid_init()
1907 if (t->nhpftids) in tid_init()
1908 bitmap_zero(t->hpftid_bmap, t->nhpftids); in tid_init()
1909 bitmap_zero(t->ftid_bmap, t->nftids); in tid_init()
1914 * cxgb4_create_server - create an IP server
1937 return -ENOMEM; in cxgb4_create_server()
1943 req->local_port = sport; in cxgb4_create_server()
1944 req->peer_port = htons(0); in cxgb4_create_server()
1945 req->local_ip = sip; in cxgb4_create_server()
1946 req->peer_ip = htonl(0); in cxgb4_create_server()
1947 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1948 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server()
1949 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server()
1956 /* cxgb4_create_server6 - create an IPv6 server
1978 return -ENOMEM; in cxgb4_create_server6()
1984 req->local_port = sport; in cxgb4_create_server6()
1985 req->peer_port = htons(0); in cxgb4_create_server6()
1986 req->local_ip_hi = *(__be64 *)(sip->s6_addr); in cxgb4_create_server6()
1987 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); in cxgb4_create_server6()
1988 req->peer_ip_hi = cpu_to_be64(0); in cxgb4_create_server6()
1989 req->peer_ip_lo = cpu_to_be64(0); in cxgb4_create_server6()
1990 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1991 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server6()
1992 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server6()
2011 return -ENOMEM; in cxgb4_remove_server()
2016 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : in cxgb4_remove_server()
2024 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2038 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) in cxgb4_best_mtu()
2047 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2069 unsigned short data_size_align_mask = data_size_align - 1; in cxgb4_best_aligned_mtu()
2077 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { in cxgb4_best_aligned_mtu()
2078 unsigned short data_size = mtus[mtu_idx] - header_size; in cxgb4_best_aligned_mtu()
2098 mtu_idx--; in cxgb4_best_aligned_mtu()
2105 mtu_idx - aligned_mtu_idx <= 1) in cxgb4_best_aligned_mtu()
2118 * cxgb4_port_chan - get the HW channel of a port
2125 return netdev2pinfo(dev)->tx_chan; in cxgb4_port_chan()
2130 * cxgb4_port_e2cchan - get the HW c-channel of a port
2133 * Return the HW RX c-channel of the given port.
2137 return netdev2pinfo(dev)->rx_cchan; in cxgb4_port_e2cchan()
2148 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2160 * cxgb4_port_viid - get the VI id of a port
2167 return netdev2pinfo(dev)->viid; in cxgb4_port_viid()
2172 * cxgb4_port_idx - get the index of a port
2179 return netdev2pinfo(dev)->port_id; in cxgb4_port_idx()
2188 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2190 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2198 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
2208 spin_lock(&adap->win0_lock); in read_eq_indices()
2212 spin_unlock(&adap->win0_lock); in read_eq_indices()
2236 delta = pidx - hw_pidx; in cxgb4_sync_txq_pidx()
2238 delta = size - hw_pidx + pidx; in cxgb4_sync_txq_pidx()
2240 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2264 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2292 memaddr = offset - edc0_end; in cxgb4_read_tpte()
2296 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2299 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2300 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2306 memaddr = offset - mc0_end; in cxgb4_read_tpte()
2317 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2319 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2323 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2325 return -EINVAL; in cxgb4_read_tpte()
2365 const struct net_device *netdev = neigh->dev; in check_neigh_update()
2369 parent = netdev->dev.parent; in check_neigh_update()
2370 if (parent && parent->driver == &cxgb4_driver.driver) in check_neigh_update()
2400 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2419 spin_lock_irqsave(&q->db_lock, flags); in disable_txq_db()
2420 q->db_disabled = 1; in disable_txq_db()
2421 spin_unlock_irqrestore(&q->db_lock, flags); in disable_txq_db()
2426 spin_lock_irq(&q->db_lock); in enable_txq_db()
2427 if (q->db_pidx_inc) { in enable_txq_db()
2433 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); in enable_txq_db()
2434 q->db_pidx_inc = 0; in enable_txq_db()
2436 q->db_disabled = 0; in enable_txq_db()
2437 spin_unlock_irq(&q->db_lock); in enable_txq_db()
2444 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2445 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2448 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
2451 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
2452 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in disable_dbs()
2454 disable_txq_db(&txq->q); in disable_dbs()
2459 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2466 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2467 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2470 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
2473 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
2474 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in enable_dbs()
2476 enable_txq_db(adap, &txq->q); in enable_dbs()
2481 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2488 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
2489 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
2501 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2515 spin_lock_irq(&q->db_lock); in sync_txq_pidx()
2516 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2519 if (q->db_pidx != hw_pidx) { in sync_txq_pidx()
2523 if (q->db_pidx >= hw_pidx) in sync_txq_pidx()
2524 delta = q->db_pidx - hw_pidx; in sync_txq_pidx()
2526 delta = q->size - hw_pidx + q->db_pidx; in sync_txq_pidx()
2528 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2534 QID_V(q->cntxt_id) | val); in sync_txq_pidx()
2537 q->db_disabled = 0; in sync_txq_pidx()
2538 q->db_pidx_inc = 0; in sync_txq_pidx()
2539 spin_unlock_irq(&q->db_lock); in sync_txq_pidx()
2548 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2549 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2552 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2554 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2555 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in recover_all_queues()
2557 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2562 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2571 if (is_t4(adap->params.chip)) { in process_db_drop()
2579 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2590 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2594 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2596 /* Re-enable BAR2 WC */ in process_db_drop()
2600 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2606 if (is_t4(adap->params.chip)) { in t4_db_full()
2611 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2617 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2621 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2640 list_del(&adap->list_node); in detach_ulds()
2643 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2644 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2660 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2661 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2671 struct net_device *event_dev = ifa->idev->dev; in cxgb4_inet6addr_handler()
2679 if (event_dev->flags & IFF_MASTER) { in cxgb4_inet6addr_handler()
2683 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2687 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2699 parent = event_dev->dev.parent; in cxgb4_inet6addr_handler()
2701 if (parent && parent->driver == &cxgb4_driver.driver) { in cxgb4_inet6addr_handler()
2730 dev = adap->port[i]; in update_clip()
2744 * cxgb_up - enable the adapter
2755 struct sge *s = &adap->sge; in cxgb_up()
2766 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_up()
2767 if (s->nd_msix_idx < 0) { in cxgb_up()
2768 err = -ENOMEM; in cxgb_up()
2772 err = request_irq(adap->msix_info[s->nd_msix_idx].vec, in cxgb_up()
2774 adap->msix_info[s->nd_msix_idx].desc, adap); in cxgb_up()
2782 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2783 (adap->flags & CXGB4_USING_MSI) ? 0 in cxgb_up()
2785 adap->port[0]->name, adap); in cxgb_up()
2793 adap->flags |= CXGB4_FULL_INIT_DONE; in cxgb_up()
2803 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); in cxgb_up()
2805 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2815 cancel_work_sync(&adapter->tid_release_task); in cxgb_down()
2816 cancel_work_sync(&adapter->db_full_task); in cxgb_down()
2817 cancel_work_sync(&adapter->db_drop_task); in cxgb_down()
2818 adapter->tid_release_task_busy = false; in cxgb_down()
2819 adapter->tid_release_head = NULL; in cxgb_down()
2824 adapter->flags &= ~CXGB4_FULL_INIT_DONE; in cxgb_down()
2833 struct adapter *adapter = pi->adapter; in cxgb_open()
2838 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_open()
2855 if (pi->nmirrorqsets) { in cxgb_open()
2856 mutex_lock(&pi->vi_mirror_mutex); in cxgb_open()
2864 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2874 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2881 struct adapter *adapter = pi->adapter; in cxgb_close()
2886 ret = t4_enable_pi_params(adapter, adapter->pf, pi, in cxgb_close()
2895 if (pi->nmirrorqsets) { in cxgb_close()
2896 mutex_lock(&pi->vi_mirror_mutex); in cxgb_close()
2899 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_close()
2918 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2919 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2923 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2931 if (f->valid) in cxgb4_create_server_filter()
2935 memset(&f->fs, 0, sizeof(struct ch_filter_specification)); in cxgb4_create_server_filter()
2936 f->fs.val.lport = be16_to_cpu(sport); in cxgb4_create_server_filter()
2937 f->fs.mask.lport = ~0; in cxgb4_create_server_filter()
2941 f->fs.val.lip[i] = val[i]; in cxgb4_create_server_filter()
2942 f->fs.mask.lip[i] = ~0; in cxgb4_create_server_filter()
2944 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2945 f->fs.val.iport = port; in cxgb4_create_server_filter()
2946 f->fs.mask.iport = mask; in cxgb4_create_server_filter()
2950 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2951 f->fs.val.proto = IPPROTO_TCP; in cxgb4_create_server_filter()
2952 f->fs.mask.proto = ~0; in cxgb4_create_server_filter()
2955 f->fs.dirsteer = 1; in cxgb4_create_server_filter()
2956 f->fs.iq = queue; in cxgb4_create_server_filter()
2958 f->locked = 1; in cxgb4_create_server_filter()
2959 f->fs.rpttid = 1; in cxgb4_create_server_filter()
2964 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2984 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2985 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2987 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2989 f->locked = 0; in cxgb4_remove_server_filter()
3000 struct adapter *adapter = p->adapter; in cxgb_get_stats()
3006 spin_lock(&adapter->stats_lock); in cxgb_get_stats()
3008 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3011 t4_get_port_stats_offset(adapter, p->tx_chan, &stats, in cxgb_get_stats()
3012 &p->stats_base); in cxgb_get_stats()
3013 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3015 ns->tx_bytes = stats.tx_octets; in cxgb_get_stats()
3016 ns->tx_packets = stats.tx_frames; in cxgb_get_stats()
3017 ns->rx_bytes = stats.rx_octets; in cxgb_get_stats()
3018 ns->rx_packets = stats.rx_frames; in cxgb_get_stats()
3019 ns->multicast = stats.rx_mcast_frames; in cxgb_get_stats()
3022 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + in cxgb_get_stats()
3024 ns->rx_over_errors = 0; in cxgb_get_stats()
3025 ns->rx_crc_errors = stats.rx_fcs_err; in cxgb_get_stats()
3026 ns->rx_frame_errors = stats.rx_symbol_err; in cxgb_get_stats()
3027 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + in cxgb_get_stats()
3031 ns->rx_missed_errors = 0; in cxgb_get_stats()
3034 ns->tx_aborted_errors = 0; in cxgb_get_stats()
3035 ns->tx_carrier_errors = 0; in cxgb_get_stats()
3036 ns->tx_fifo_errors = 0; in cxgb_get_stats()
3037 ns->tx_heartbeat_errors = 0; in cxgb_get_stats()
3038 ns->tx_window_errors = 0; in cxgb_get_stats()
3040 ns->tx_errors = stats.tx_error_frames; in cxgb_get_stats()
3041 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + in cxgb_get_stats()
3042 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; in cxgb_get_stats()
3050 struct adapter *adapter = pi->adapter; in cxgb_ioctl()
3051 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; in cxgb_ioctl()
3055 if (pi->mdio_addr < 0) in cxgb_ioctl()
3056 return -EOPNOTSUPP; in cxgb_ioctl()
3057 data->phy_id = pi->mdio_addr; in cxgb_ioctl()
3061 if (mdio_phy_id_is_c45(data->phy_id)) { in cxgb_ioctl()
3062 prtad = mdio_phy_id_prtad(data->phy_id); in cxgb_ioctl()
3063 devad = mdio_phy_id_devad(data->phy_id); in cxgb_ioctl()
3064 } else if (data->phy_id < 32) { in cxgb_ioctl()
3065 prtad = data->phy_id; in cxgb_ioctl()
3067 data->reg_num &= 0x1f; in cxgb_ioctl()
3069 return -EINVAL; in cxgb_ioctl()
3071 mbox = pi->adapter->pf; in cxgb_ioctl()
3073 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3074 data->reg_num, &data->val_out); in cxgb_ioctl()
3076 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3077 data->reg_num, data->val_in); in cxgb_ioctl()
3080 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3081 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3082 -EFAULT : 0; in cxgb_ioctl()
3084 if (copy_from_user(&pi->tstamp_config, req->ifr_data, in cxgb_ioctl()
3085 sizeof(pi->tstamp_config))) in cxgb_ioctl()
3086 return -EFAULT; in cxgb_ioctl()
3088 if (!is_t4(adapter->params.chip)) { in cxgb_ioctl()
3089 switch (pi->tstamp_config.tx_type) { in cxgb_ioctl()
3094 return -ERANGE; in cxgb_ioctl()
3097 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3099 pi->rxtstamp = false; in cxgb_ioctl()
3103 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3107 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3115 pi->rxtstamp = true; in cxgb_ioctl()
3118 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3120 return -ERANGE; in cxgb_ioctl()
3123 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && in cxgb_ioctl()
3124 (pi->tstamp_config.rx_filter == in cxgb_ioctl()
3126 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) in cxgb_ioctl()
3127 pi->ptp_enable = false; in cxgb_ioctl()
3130 if (pi->tstamp_config.rx_filter != in cxgb_ioctl()
3134 pi->ptp_enable = true; in cxgb_ioctl()
3138 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3140 pi->rxtstamp = false; in cxgb_ioctl()
3143 pi->rxtstamp = true; in cxgb_ioctl()
3146 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3148 return -ERANGE; in cxgb_ioctl()
3151 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3152 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3153 -EFAULT : 0; in cxgb_ioctl()
3155 return -EOPNOTSUPP; in cxgb_ioctl()
3163 set_rxmode(dev, -1, false); in cxgb_set_rxmode()
3171 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_change_mtu()
3172 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); in cxgb_change_mtu()
3174 WRITE_ONCE(dev->mtu, new_mtu); in cxgb_change_mtu()
3197 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
3201 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
3217 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
3219 macaddr[5] = adap->pf * nvfs + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
3220 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
3227 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac()
3232 dev_err(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3235 return -EINVAL; in cxgb4_mgmt_set_vf_mac()
3238 dev_info(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3240 ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac); in cxgb4_mgmt_set_vf_mac()
3242 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
3250 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config()
3253 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
3254 return -EINVAL; in cxgb4_mgmt_get_vf_config()
3255 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
3257 ivi->vf = vf; in cxgb4_mgmt_get_vf_config()
3258 ivi->max_tx_rate = vfinfo->tx_rate; in cxgb4_mgmt_get_vf_config()
3259 ivi->min_tx_rate = 0; in cxgb4_mgmt_get_vf_config()
3260 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); in cxgb4_mgmt_get_vf_config()
3261 ivi->vlan = vfinfo->vlan; in cxgb4_mgmt_get_vf_config()
3262 ivi->linkstate = vfinfo->link_state; in cxgb4_mgmt_get_vf_config()
3272 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; in cxgb4_mgmt_get_phys_port_id()
3273 ppid->id_len = sizeof(phy_port_id); in cxgb4_mgmt_get_phys_port_id()
3274 memcpy(ppid->id, &phy_port_id, ppid->id_len); in cxgb4_mgmt_get_phys_port_id()
3282 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate()
3289 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
3290 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3293 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3296 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3305 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_rate()
3308 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3310 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3311 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3313 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3315 adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3316 adap->vfinfo[vf].tx_rate = 0; in cxgb4_mgmt_set_vf_rate()
3322 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3324 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3328 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
3329 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3333 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3334 "Max tx rate %d for VF %d can't be > link-speed %u", in cxgb4_mgmt_set_vf_rate()
3336 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3341 pktsize = pktsize - sizeof(struct ethhdr) - 4; in cxgb4_mgmt_set_vf_rate()
3343 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); in cxgb4_mgmt_set_vf_rate()
3344 /* configure Traffic Class for rate-limiting */ in cxgb4_mgmt_set_vf_rate()
3350 pi->tx_chan, class_id, 0, in cxgb4_mgmt_set_vf_rate()
3353 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
3355 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3357 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3365 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
3368 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3370 ret, adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3371 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3373 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
3374 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3375 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
3383 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan()
3386 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
3387 return -EINVAL; in cxgb4_mgmt_set_vf_vlan()
3390 return -EPROTONOSUPPORT; in cxgb4_mgmt_set_vf_vlan()
3392 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
3394 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
3398 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
3399 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
3407 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_link_state()
3411 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_link_state()
3412 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3428 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3433 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_link_state()
3436 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_link_state()
3438 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_link_state()
3439 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3442 adap->vfinfo[vf].link_state = link; in cxgb4_mgmt_set_vf_link_state()
3453 if (!is_valid_ether_addr(addr->sa_data)) in cxgb_set_mac_addr()
3454 return -EADDRNOTAVAIL; in cxgb_set_mac_addr()
3456 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in cxgb_set_mac_addr()
3457 addr->sa_data, true, &pi->smt_idx); in cxgb_set_mac_addr()
3461 eth_hw_addr_set(dev, addr->sa_data); in cxgb_set_mac_addr()
3469 struct adapter *adap = pi->adapter; in cxgb_netpoll()
3471 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_netpoll()
3473 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3475 for (i = pi->nqsets; i; i--, rx++) in cxgb_netpoll()
3476 t4_sge_intr_msix(0, &rx->rspq); in cxgb_netpoll()
3485 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate()
3493 return -ENOTSUPP; in cxgb_set_tx_maxrate()
3495 if (index < 0 || index > pi->nqsets - 1) in cxgb_set_tx_maxrate()
3496 return -EINVAL; in cxgb_set_tx_maxrate()
3498 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
3499 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3502 return -EINVAL; in cxgb_set_tx_maxrate()
3507 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { in cxgb_set_tx_maxrate()
3508 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3510 index, e->idx, e->info.u.params.level); in cxgb_set_tx_maxrate()
3511 return -EBUSY; in cxgb_set_tx_maxrate()
3519 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3522 return -ERANGE; in cxgb_set_tx_maxrate()
3532 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3534 index, pi->port_id, err); in cxgb_set_tx_maxrate()
3548 p.u.params.channel = pi->tx_chan; in cxgb_set_tx_maxrate()
3553 p.u.params.pktsize = dev->mtu; in cxgb_set_tx_maxrate()
3557 return -ENOMEM; in cxgb_set_tx_maxrate()
3562 qe.class = e->idx; in cxgb_set_tx_maxrate()
3566 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3574 switch (cls_flower->command) { in cxgb_setup_tc_flower()
3582 return -EOPNOTSUPP; in cxgb_setup_tc_flower()
3589 switch (cls_u32->command) { in cxgb_setup_tc_cls_u32()
3596 return -EOPNOTSUPP; in cxgb_setup_tc_cls_u32()
3606 if (!adap->tc_matchall) in cxgb_setup_tc_matchall()
3607 return -ENOMEM; in cxgb_setup_tc_matchall()
3609 switch (cls_matchall->command) { in cxgb_setup_tc_matchall()
3622 return -EOPNOTSUPP; in cxgb_setup_tc_matchall()
3632 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_ingress_cb()
3633 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_ingress_cb()
3635 pi->port_id); in cxgb_setup_tc_block_ingress_cb()
3636 return -EINVAL; in cxgb_setup_tc_block_ingress_cb()
3640 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3650 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3661 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_egress_cb()
3662 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_egress_cb()
3664 pi->port_id); in cxgb_setup_tc_block_egress_cb()
3665 return -EINVAL; in cxgb_setup_tc_block_egress_cb()
3669 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3678 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3686 if (!is_ethofld(adap) || !adap->tc_mqprio) in cxgb_setup_tc_mqprio()
3687 return -ENOMEM; in cxgb_setup_tc_mqprio()
3701 pi->tc_block_shared = f->block_shared; in cxgb_setup_tc_block()
3702 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { in cxgb_setup_tc_block()
3723 return -EOPNOTSUPP; in cxgb_setup_tc()
3732 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_unset_port()
3736 switch (ti->type) { in cxgb_udp_tunnel_unset_port()
3738 adapter->vxlan_port = 0; in cxgb_udp_tunnel_unset_port()
3742 adapter->geneve_port = 0; in cxgb_udp_tunnel_unset_port()
3746 return -EINVAL; in cxgb_udp_tunnel_unset_port()
3752 if (!adapter->rawf_cnt) in cxgb_udp_tunnel_unset_port()
3756 ret = t4_free_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_unset_port()
3758 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_unset_port()
3759 1, pi->port_id, false); in cxgb_udp_tunnel_unset_port()
3775 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_set_port()
3779 switch (ti->type) { in cxgb_udp_tunnel_set_port()
3781 adapter->vxlan_port = ti->port; in cxgb_udp_tunnel_set_port()
3783 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); in cxgb_udp_tunnel_set_port()
3786 adapter->geneve_port = ti->port; in cxgb_udp_tunnel_set_port()
3788 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); in cxgb_udp_tunnel_set_port()
3791 return -EINVAL; in cxgb_udp_tunnel_set_port()
3803 ret = t4_alloc_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_set_port()
3806 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_set_port()
3807 1, pi->port_id, false); in cxgb_udp_tunnel_set_port()
3810 be16_to_cpu(ti->port)); in cxgb_udp_tunnel_set_port()
3832 struct adapter *adapter = pi->adapter; in cxgb_features_check()
3834 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in cxgb_features_check()
3838 if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) in cxgb_features_check()
3896 strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); in cxgb4_mgmt_get_drvinfo()
3897 strscpy(info->bus_info, pci_name(adapter->pdev), in cxgb4_mgmt_get_drvinfo()
3898 sizeof(info->bus_info)); in cxgb4_mgmt_get_drvinfo()
3918 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3926 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3937 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3938 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3950 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3956 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3957 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3963 adap->vres.ocq.start); in setup_memwin_rdma()
3996 if (!adapter->hma.sgt) in adap_free_hma_mem()
3999 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { in adap_free_hma_mem()
4000 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, in adap_free_hma_mem()
4001 adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); in adap_free_hma_mem()
4002 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; in adap_free_hma_mem()
4005 for_each_sg(adapter->hma.sgt->sgl, iter, in adap_free_hma_mem()
4006 adapter->hma.sgt->orig_nents, i) { in adap_free_hma_mem()
4012 kfree(adapter->hma.phy_addr); in adap_free_hma_mem()
4013 sg_free_table(adapter->hma.sgt); in adap_free_hma_mem()
4014 kfree(adapter->hma.sgt); in adap_free_hma_mem()
4015 adapter->hma.sgt = NULL; in adap_free_hma_mem()
4034 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in adap_config_hma()
4040 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hma()
4050 dev_err(adapter->pdev_dev, in adap_config_hma()
4051 "HMA size %uMB beyond bounds(%u-%lu)MB\n", in adap_config_hma()
4053 return -EINVAL; in adap_config_hma()
4058 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL); in adap_config_hma()
4059 if (unlikely(!adapter->hma.sgt)) { in adap_config_hma()
4060 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n"); in adap_config_hma()
4061 return -ENOMEM; in adap_config_hma()
4063 sgt = adapter->hma.sgt; in adap_config_hma()
4066 sgt->orig_nents = (hma_size << 20) / (page_size << page_order); in adap_config_hma()
4067 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) { in adap_config_hma()
4068 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n"); in adap_config_hma()
4069 kfree(adapter->hma.sgt); in adap_config_hma()
4070 adapter->hma.sgt = NULL; in adap_config_hma()
4071 return -ENOMEM; in adap_config_hma()
4074 sgl = adapter->hma.sgt->sgl; in adap_config_hma()
4075 node = dev_to_node(adapter->pdev_dev); in adap_config_hma()
4076 for_each_sg(sgl, iter, sgt->orig_nents, i) { in adap_config_hma()
4080 dev_err(adapter->pdev_dev, in adap_config_hma()
4082 ret = -ENOMEM; in adap_config_hma()
4088 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents, in adap_config_hma()
4090 if (!sgt->nents) { in adap_config_hma()
4091 dev_err(adapter->pdev_dev, in adap_config_hma()
4093 ret = -ENOMEM; in adap_config_hma()
4096 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG; in adap_config_hma()
4098 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t), in adap_config_hma()
4100 if (unlikely(!adapter->hma.phy_addr)) in adap_config_hma()
4103 for_each_sg(sgl, iter, sgt->nents, i) { in adap_config_hma()
4105 adapter->hma.phy_addr[i] = sg_dma_address(iter); in adap_config_hma()
4108 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD); in adap_config_hma()
4117 eoc = (i == ncmds - 1) ? 1 : 0; in adap_config_hma()
4122 if (i == ncmds - 1) { in adap_config_hma()
4123 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD; in adap_config_hma()
4148 cpu_to_be64(adapter->hma.phy_addr[j + k]); in adap_config_hma()
4150 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd, in adap_config_hma()
4153 dev_err(adapter->pdev_dev, in adap_config_hma()
4160 dev_info(adapter->pdev_dev, in adap_config_hma()
4179 dev_err(adap->pdev_dev, in adap_init1()
4186 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4188 c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); in adap_init1()
4189 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
4193 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4195 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
4199 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
4206 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
4216 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
4222 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
4224 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
4250 return t4_early_init(adap, adap->pf); in adap_init1()
4268 * them) but need to be explicitly set if we're using hard-coded
4272 * Configuration Files and hard-coded initialization ...
4277 * Fix up various Host-Dependent Parameters like Page Size, Cache in adap_init0_tweaks()
4287 dev_err(&adapter->pdev->dev, in adap_init0_tweaks()
4306 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4369 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4382 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
4384 dev_warn(adap->pdev_dev, in adap_init0_phy()
4386 return -EOPNOTSUPP; in adap_init0_phy()
4394 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file, in adap_init0_phy()
4395 adap->pdev_dev); in adap_init0_phy()
4403 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
4405 phy_info->phy_fw_file, -ret); in adap_init0_phy()
4406 if (phy_info->phy_flash) { in adap_init0_phy()
4410 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
4420 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, in adap_init0_phy()
4421 (u8 *)phyf->data, phyf->size); in adap_init0_phy()
4423 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
4424 -ret); in adap_init0_phy()
4428 if (phy_info->phy_fw_version) in adap_init0_phy()
4429 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, in adap_init0_phy()
4430 phyf->size); in adap_init0_phy()
4431 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4433 phy_info->phy_fw_file, new_phy_fw_ver); in adap_init0_phy()
4459 ret = t4_fw_reset(adapter, adapter->mbox, in adap_init0_config()
4465 /* If this is a 10Gb/s-BT adapter make sure the chip-external in adap_init0_config()
4466 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs in adap_init0_config()
4470 if (is_10gbt_device(adapter->pdev->device)) { in adap_init0_config()
4480 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { in adap_init0_config()
4491 dev_err(adapter->pdev_dev, "Device %d is not supported\n", in adap_init0_config()
4492 adapter->pdev->device); in adap_init0_config()
4493 ret = -EINVAL; in adap_init0_config()
4497 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); in adap_init0_config()
4509 if (cf->size >= FLASH_CFG_MAX_SIZE) in adap_init0_config()
4510 ret = -ENOMEM; in adap_init0_config()
4514 ret = t4_query_params(adapter, adapter->mbox, in adap_init0_config()
4515 adapter->pf, 0, 1, params, val); in adap_init0_config()
4527 size_t resid = cf->size & 0x3; in adap_init0_config()
4528 size_t size = cf->size & ~0x3; in adap_init0_config()
4529 __be32 *data = (__be32 *)cf->data; in adap_init0_config()
4534 spin_lock(&adapter->win0_lock); in adap_init0_config()
4552 spin_unlock(&adapter->win0_lock); in adap_init0_config()
4568 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_init0_config()
4575 dev_warn(adapter->pdev_dev, in adap_init0_config()
4595 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4604 if (ret == -ENOENT) { in adap_init0_config()
4611 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, in adap_init0_config()
4624 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ in adap_init0_config()
4636 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4652 dev_err(adapter->pdev_dev, in adap_init0_config()
4655 if (is_t6(adapter->params.chip)) { in adap_init0_config()
4659 dev_info(adapter->pdev_dev, "Successfully enabled " in adap_init0_config()
4667 ret = t4_fw_initialize(adapter, adapter->mbox); in adap_init0_config()
4674 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ in adap_init0_config()
4685 if (config_issued && ret != -ENOENT) in adap_init0_config()
4686 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", in adap_init0_config()
4687 config_name, -ret); in adap_init0_config()
4769 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4772 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4776 if (ret == adap->mbox) in adap_init0()
4777 adap->flags |= CXGB4_MASTER_PF; in adap_init0()
4792 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4802 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4804 dev_err(adap->pdev_dev, in adap_init0()
4806 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4807 return -EINVAL; in adap_init0()
4815 ret = -ENOMEM; in adap_init0()
4820 ret = request_firmware(&fw, fw_info->fw_mod_name, in adap_init0()
4821 adap->pdev_dev); in adap_init0()
4823 dev_err(adap->pdev_dev, in adap_init0()
4825 fw_info->fw_mod_name, ret); in adap_init0()
4827 fw_data = fw->data; in adap_init0()
4828 fw_size = fw->size; in adap_init0()
4849 dev_err(adap->pdev_dev, in adap_init0()
4852 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4854 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4856 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4864 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4871 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4881 if (ret == -ENOENT) { in adap_init0()
4882 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4887 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4888 "adapter, error %d\n", -ret); in adap_init0()
4899 dev_err(adap->pdev_dev, in adap_init0()
4915 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4927 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4931 adap->params.nports = hweight32(port_vec); in adap_init0()
4932 adap->params.portvec = port_vec; in adap_init0()
4947 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4951 adap->sge.dbqtimer_tick = val[0]; in adap_init0()
4953 ARRAY_SIZE(adap->sge.dbqtimer_val), in adap_init0()
4954 adap->sge.dbqtimer_val); in adap_init0()
4958 adap->flags |= CXGB4_SGE_DBQ_TIMER; in adap_init0()
4960 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4961 adap->params.bypass = 1; in adap_init0()
4972 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4975 adap->sge.egr_start = val[0]; in adap_init0()
4976 adap->l2t_start = val[1]; in adap_init0()
4977 adap->l2t_end = val[2]; in adap_init0()
4978 adap->tids.ftid_base = val[3]; in adap_init0()
4979 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4980 adap->sge.ingr_start = val[5]; in adap_init0()
4982 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4985 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4990 adap->tids.hpftid_base = val[0]; in adap_init0()
4991 adap->tids.nhpftids = val[1] - val[0] + 1; in adap_init0()
4998 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5001 adap->rawf_start = val[0]; in adap_init0()
5002 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
5005 adap->tids.tid_base = in adap_init0()
5017 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5020 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
5021 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
5023 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
5024 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
5025 if (!adap->sge.egr_map) { in adap_init0()
5026 ret = -ENOMEM; in adap_init0()
5030 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
5031 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
5032 if (!adap->sge.ingr_map) { in adap_init0()
5033 ret = -ENOMEM; in adap_init0()
5040 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5041 if (!adap->sge.starving_fl) { in adap_init0()
5042 ret = -ENOMEM; in adap_init0()
5046 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5047 if (!adap->sge.txq_maperr) { in adap_init0()
5048 ret = -ENOMEM; in adap_init0()
5053 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5054 if (!adap->sge.blocked_fl) { in adap_init0()
5055 ret = -ENOMEM; in adap_init0()
5062 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5065 adap->clipt_start = val[0]; in adap_init0()
5066 adap->clipt_end = val[1]; in adap_init0()
5070 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5076 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
5078 adap->params.nsched_cls = val[0]; in adap_init0()
5084 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5089 adap->flags |= CXGB4_FW_OFLD_CONN; in adap_init0()
5090 adap->tids.aftid_base = val[0]; in adap_init0()
5091 adap->tids.aftid_end = val[1]; in adap_init0()
5101 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5109 if (is_t4(adap->params.chip)) { in adap_init0()
5110 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
5113 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5115 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
5120 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5122 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5125 if (is_t4(adap->params.chip)) { in adap_init0()
5126 adap->params.filter2_wr_support = false; in adap_init0()
5129 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5131 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5139 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5141 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); in adap_init0()
5151 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
5161 adap->params.offload = 1; in adap_init0()
5166 /* query offload-related parameters */ in adap_init0()
5173 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5177 adap->tids.ntids = val[0]; in adap_init0()
5178 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
5179 adap->tids.stid_base = val[1]; in adap_init0()
5180 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
5190 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
5191 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
5192 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5193 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
5194 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5195 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
5196 adap->tids.ftid_base; in adap_init0()
5198 adap->vres.ddp.start = val[3]; in adap_init0()
5199 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
5200 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
5205 adap->num_ofld_uld += 1; in adap_init0()
5211 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5214 adap->tids.eotid_base = val[0]; in adap_init0()
5215 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5216 val[1] - val[0] + 1); in adap_init0()
5217 adap->params.ethofld = 1; in adap_init0()
5228 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5232 adap->vres.stag.start = val[0]; in adap_init0()
5233 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
5234 adap->vres.rq.start = val[2]; in adap_init0()
5235 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
5236 adap->vres.pbl.start = val[4]; in adap_init0()
5237 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
5241 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5244 adap->vres.srq.start = val[0]; in adap_init0()
5245 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
5247 if (adap->vres.srq.size) { in adap_init0()
5248 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
5249 if (!adap->srq) in adap_init0()
5250 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
5259 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
5263 adap->vres.qp.start = val[0]; in adap_init0()
5264 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
5265 adap->vres.cq.start = val[2]; in adap_init0()
5266 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
5267 adap->vres.ocq.start = val[4]; in adap_init0()
5268 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
5272 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
5275 adap->params.max_ordird_qp = 8; in adap_init0()
5276 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
5279 adap->params.max_ordird_qp = val[0]; in adap_init0()
5280 adap->params.max_ird_adapter = val[1]; in adap_init0()
5282 dev_info(adap->pdev_dev, in adap_init0()
5284 adap->params.max_ordird_qp, in adap_init0()
5285 adap->params.max_ird_adapter); in adap_init0()
5289 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5291 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
5295 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5297 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
5298 adap->num_ofld_uld += 2; in adap_init0()
5303 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5307 adap->vres.iscsi.start = val[0]; in adap_init0()
5308 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
5309 if (is_t6(adap->params.chip)) { in adap_init0()
5312 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5315 adap->vres.ppod_edram.start = val[0]; in adap_init0()
5316 adap->vres.ppod_edram.size = in adap_init0()
5317 val[1] - val[0] + 1; in adap_init0()
5319 dev_info(adap->pdev_dev, in adap_init0()
5322 adap->vres.ppod_edram.size); in adap_init0()
5326 adap->num_ofld_uld += 2; in adap_init0()
5332 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5335 if (ret != -EINVAL) in adap_init0()
5338 adap->vres.ncrypto_fc = val[0]; in adap_init0()
5340 adap->num_ofld_uld += 1; in adap_init0()
5346 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5350 adap->vres.key.start = val[0]; in adap_init0()
5351 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
5352 adap->num_uld += 1; in adap_init0()
5354 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
5362 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
5368 * a multiple of 8 +/- 4 bytes apart near this popular MTU. in adap_init0()
5373 * options are in use, then we have a 20-byte IP header and a in adap_init0()
5374 * 20-byte TCP header. In this case, a 1500-byte MSS would in adap_init0()
5375 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes in adap_init0()
5378 * is a multiple of 8. On the other hand, if 12-byte TCP Time in adap_init0()
5384 if (adap->params.mtus[i] == 1492) { in adap_init0()
5385 adap->params.mtus[i] = 1488; in adap_init0()
5389 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
5390 adap->params.b_wnd); in adap_init0()
5393 adap->flags |= CXGB4_FW_OK; in adap_init0()
5404 kfree(adap->sge.egr_map); in adap_init0()
5405 kfree(adap->sge.ingr_map); in adap_init0()
5406 bitmap_free(adap->sge.starving_fl); in adap_init0()
5407 bitmap_free(adap->sge.txq_maperr); in adap_init0()
5409 bitmap_free(adap->sge.blocked_fl); in adap_init0()
5411 if (ret != -ETIMEDOUT && ret != -EIO) in adap_init0()
5412 t4_fw_bye(adap, adap->mbox); in adap_init0()
5428 adap->flags &= ~CXGB4_FW_OK; in eeh_err_detected()
5430 spin_lock(&adap->stats_lock); in eeh_err_detected()
5432 struct net_device *dev = adap->port[i]; in eeh_err_detected()
5438 spin_unlock(&adap->stats_lock); in eeh_err_detected()
5440 if (adap->flags & CXGB4_FULL_INIT_DONE) in eeh_err_detected()
5443 if ((adap->flags & CXGB4_DEV_ENABLED)) { in eeh_err_detected()
5445 adap->flags &= ~CXGB4_DEV_ENABLED; in eeh_err_detected()
5463 if (!(adap->flags & CXGB4_DEV_ENABLED)) { in eeh_slot_reset()
5465 dev_err(&pdev->dev, "Cannot reenable PCI " in eeh_slot_reset()
5469 adap->flags |= CXGB4_DEV_ENABLED; in eeh_slot_reset()
5476 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
5478 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
5480 adap->flags |= CXGB4_FW_OK; in eeh_slot_reset()
5488 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
5492 pi->viid = ret; in eeh_slot_reset()
5493 pi->xact_addr_filt = -1; in eeh_slot_reset()
5497 if (adap->params.viid_smt_extn_support) { in eeh_slot_reset()
5498 pi->vivld = vivld; in eeh_slot_reset()
5499 pi->vin = vin; in eeh_slot_reset()
5502 pi->vivld = FW_VIID_VIVLD_G(pi->viid); in eeh_slot_reset()
5503 pi->vin = FW_VIID_VIN_G(pi->viid); in eeh_slot_reset()
5507 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
5508 adap->params.b_wnd); in eeh_slot_reset()
5525 struct net_device *dev = adap->port[i]; in eeh_resume()
5542 if (adapter->pf != 4) in eeh_reset_prepare()
5545 adapter->flags &= ~CXGB4_FW_OK; in eeh_reset_prepare()
5550 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_prepare()
5551 cxgb_close(adapter->port[i]); in eeh_reset_prepare()
5558 if (adapter->flags & CXGB4_FULL_INIT_DONE) in eeh_reset_prepare()
5567 if (adapter->pf != 4) in eeh_reset_done()
5570 err = t4_wait_dev_ready(adapter->regs); in eeh_reset_done()
5572 dev_err(adapter->pdev_dev, in eeh_reset_done()
5581 dev_err(adapter->pdev_dev, in eeh_reset_done()
5588 if (adapter->flags & CXGB4_FW_OK) { in eeh_reset_done()
5589 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0); in eeh_reset_done()
5591 dev_err(adapter->pdev_dev, in eeh_reset_done()
5599 dev_err(adapter->pdev_dev, in eeh_reset_done()
5608 dev_err(adapter->pdev_dev, in eeh_reset_done()
5614 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_done()
5615 cxgb_open(adapter->port[i]); in eeh_reset_done()
5633 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); in is_x_10g_port()
5649 struct sge *s = &adap->sge; in cfg_queues()
5655 adap->params.offload = 0; in cfg_queues()
5656 adap->params.crypto = 0; in cfg_queues()
5657 adap->params.ethofld = 0; in cfg_queues()
5672 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
5673 if (!(adap->flags & CXGB4_USING_MSIX)) in cfg_queues()
5674 niqflint--; in cfg_queues()
5675 neq = adap->params.pfres.neq / 2; in cfg_queues()
5678 if (avail_qsets < adap->params.nports) { in cfg_queues()
5679 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
5680 avail_qsets, adap->params.nports); in cfg_queues()
5681 return -ENOMEM; in cfg_queues()
5686 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
5690 /* We default to 1 queue per non-10G port and up to # of cores queues in cfg_queues()
5694 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
5699 * own TX Queue in order to prevent Head-Of-Line Blocking. in cfg_queues()
5702 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
5703 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
5704 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
5705 return -ENOMEM; in cfg_queues()
5708 if (adap->params.nports * ncpus < avail_eth_qsets) in cfg_queues()
5714 (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) in cfg_queues()
5715 q10g--; in cfg_queues()
5729 pi->first_qset = qidx; in cfg_queues()
5730 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; in cfg_queues()
5731 qidx += pi->nqsets; in cfg_queues()
5734 s->ethqsets = qidx; in cfg_queues()
5735 s->max_ethqsets = qidx; /* MSI-X may lower it later */ in cfg_queues()
5736 avail_qsets -= qidx; in cfg_queues()
5743 num_ulds = adap->num_uld + adap->num_ofld_uld; in cfg_queues()
5745 avail_uld_qsets = roundup(i, adap->params.nports); in cfg_queues()
5746 if (avail_qsets < num_ulds * adap->params.nports) { in cfg_queues()
5747 adap->params.offload = 0; in cfg_queues()
5748 adap->params.crypto = 0; in cfg_queues()
5749 s->ofldqsets = 0; in cfg_queues()
5751 s->ofldqsets = adap->params.nports; in cfg_queues()
5753 s->ofldqsets = avail_uld_qsets; in cfg_queues()
5756 avail_qsets -= num_ulds * s->ofldqsets; in cfg_queues()
5763 if (avail_qsets < s->max_ethqsets) { in cfg_queues()
5764 adap->params.ethofld = 0; in cfg_queues()
5765 s->eoqsets = 0; in cfg_queues()
5767 s->eoqsets = s->max_ethqsets; in cfg_queues()
5769 avail_qsets -= s->eoqsets; in cfg_queues()
5777 if (avail_qsets >= s->max_ethqsets) in cfg_queues()
5778 s->mirrorqsets = s->max_ethqsets; in cfg_queues()
5779 else if (avail_qsets >= adap->params.nports) in cfg_queues()
5780 s->mirrorqsets = adap->params.nports; in cfg_queues()
5782 s->mirrorqsets = 0; in cfg_queues()
5783 avail_qsets -= s->mirrorqsets; in cfg_queues()
5785 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { in cfg_queues()
5786 struct sge_eth_rxq *r = &s->ethrxq[i]; in cfg_queues()
5788 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
5789 r->fl.size = 72; in cfg_queues()
5792 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) in cfg_queues()
5793 s->ethtxq[i].q.size = 1024; in cfg_queues()
5795 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) in cfg_queues()
5796 s->ctrlq[i].q.size = 512; in cfg_queues()
5798 if (!is_t4(adap->params.chip)) in cfg_queues()
5799 s->ptptxq.q.size = 8; in cfg_queues()
5801 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
5802 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
5816 while (n < adap->sge.ethqsets) in reduce_ethqs()
5819 if (pi->nqsets > 1) { in reduce_ethqs()
5820 pi->nqsets--; in reduce_ethqs()
5821 adap->sge.ethqsets--; in reduce_ethqs()
5822 if (adap->sge.ethqsets <= n) in reduce_ethqs()
5830 pi->first_qset = n; in reduce_ethqs()
5831 n += pi->nqsets; in reduce_ethqs()
5841 return -ENOMEM; in alloc_msix_info()
5843 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL); in alloc_msix_info()
5844 if (!adap->msix_bmap.msix_bmap) { in alloc_msix_info()
5846 return -ENOMEM; in alloc_msix_info()
5849 spin_lock_init(&adap->msix_bmap.lock); in alloc_msix_info()
5850 adap->msix_bmap.mapsize = num_vec; in alloc_msix_info()
5852 adap->msix_info = msix_info; in alloc_msix_info()
5858 bitmap_free(adap->msix_bmap.msix_bmap); in free_msix_info()
5859 kfree(adap->msix_info); in free_msix_info()
5864 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_get_msix_idx_from_bmap()
5868 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5869 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); in cxgb4_get_msix_idx_from_bmap()
5870 if (msix_idx < bmap->mapsize) { in cxgb4_get_msix_idx_from_bmap()
5871 __set_bit(msix_idx, bmap->msix_bmap); in cxgb4_get_msix_idx_from_bmap()
5873 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5874 return -ENOSPC; in cxgb4_get_msix_idx_from_bmap()
5877 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5884 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_free_msix_idx_in_bmap()
5887 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5888 __clear_bit(msix_idx, bmap->msix_bmap); in cxgb4_free_msix_idx_in_bmap()
5889 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5892 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5899 u8 num_uld = 0, nchan = adap->params.nports; in enable_msix()
5901 struct sge *s = &adap->sge; in enable_msix()
5906 want = s->max_ethqsets; in enable_msix()
5917 num_uld = adap->num_ofld_uld + adap->num_uld; in enable_msix()
5918 want += num_uld * s->ofldqsets; in enable_msix()
5924 want += s->eoqsets; in enable_msix()
5929 if (s->mirrorqsets) { in enable_msix()
5930 want += s->mirrorqsets; in enable_msix()
5940 return -ENOMEM; in enable_msix()
5945 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5950 want = s->max_ethqsets + EXTRA_VECS; in enable_msix()
5952 allocated = pci_enable_msix_range(adap->pdev, entries, in enable_msix()
5955 dev_info(adap->pdev_dev, in enable_msix()
5956 "Disabling MSI-X due to insufficient MSI-X vectors\n"); in enable_msix()
5961 dev_info(adap->pdev_dev, in enable_msix()
5962 "Disabling offload due to insufficient MSI-X vectors\n"); in enable_msix()
5963 adap->params.offload = 0; in enable_msix()
5964 adap->params.crypto = 0; in enable_msix()
5965 adap->params.ethofld = 0; in enable_msix()
5966 s->ofldqsets = 0; in enable_msix()
5967 s->eoqsets = 0; in enable_msix()
5968 s->mirrorqsets = 0; in enable_msix()
5985 if (s->mirrorqsets) in enable_msix()
5988 num_vec -= need; in enable_msix()
5991 ethqsets > s->max_ethqsets) in enable_msix()
5996 if (pi->nqsets < 2) in enable_msix()
6000 num_vec--; in enable_msix()
6003 num_vec--; in enable_msix()
6011 ofldqsets > s->ofldqsets) in enable_msix()
6015 num_vec -= uld_need; in enable_msix()
6019 if (s->mirrorqsets) { in enable_msix()
6022 mirrorqsets > s->mirrorqsets) in enable_msix()
6026 num_vec -= mirror_need; in enable_msix()
6030 ethqsets = s->max_ethqsets; in enable_msix()
6032 ofldqsets = s->ofldqsets; in enable_msix()
6034 eoqsets = s->eoqsets; in enable_msix()
6035 if (s->mirrorqsets) in enable_msix()
6036 mirrorqsets = s->mirrorqsets; in enable_msix()
6039 if (ethqsets < s->max_ethqsets) { in enable_msix()
6040 s->max_ethqsets = ethqsets; in enable_msix()
6045 s->ofldqsets = ofldqsets; in enable_msix()
6046 s->nqs_per_uld = s->ofldqsets; in enable_msix()
6050 s->eoqsets = eoqsets; in enable_msix()
6052 if (s->mirrorqsets) { in enable_msix()
6053 s->mirrorqsets = mirrorqsets; in enable_msix()
6056 pi->nmirrorqsets = s->mirrorqsets / nchan; in enable_msix()
6057 mutex_init(&pi->vi_mirror_mutex); in enable_msix()
6067 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
6068 adap->msix_info[i].idx = i; in enable_msix()
6071 dev_info(adap->pdev_dev, in enable_msix()
6072 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", in enable_msix()
6073 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, in enable_msix()
6074 s->mirrorqsets); in enable_msix()
6080 pci_disable_msix(adap->pdev); in enable_msix()
6094 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
6101 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in init_rss()
6102 if (!pi->rss) in init_rss()
6103 return -ENOMEM; in init_rss()
6115 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", in print_adapter_info()
6117 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : in print_adapter_info()
6118 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""), in print_adapter_info()
6119 is_offload(adapter) ? "Offload" : "non-Offload"); in print_adapter_info()
6127 const struct adapter *adap = pi->adapter; in print_port_info()
6129 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) in print_port_info()
6131 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) in print_port_info()
6133 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) in print_port_info()
6135 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) in print_port_info()
6137 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) in print_port_info()
6139 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) in print_port_info()
6141 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) in print_port_info()
6143 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) in print_port_info()
6145 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) in print_port_info()
6148 --bufp; in print_port_info()
6149 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); in print_port_info()
6151 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); in print_port_info()
6156 * - memory used for tables
6157 * - MSI/MSI-X
6158 * - net devices
6159 * - resources FW is holding for us
6165 kvfree(adapter->smt); in free_some_resources()
6166 kvfree(adapter->l2t); in free_some_resources()
6167 kvfree(adapter->srq); in free_some_resources()
6169 kvfree(adapter->tids.tid_tab); in free_some_resources()
6175 kfree(adapter->sge.egr_map); in free_some_resources()
6176 kfree(adapter->sge.ingr_map); in free_some_resources()
6177 bitmap_free(adapter->sge.starving_fl); in free_some_resources()
6178 bitmap_free(adapter->sge.txq_maperr); in free_some_resources()
6180 bitmap_free(adapter->sge.blocked_fl); in free_some_resources()
6185 if (adapter->port[i]) { in free_some_resources()
6188 if (pi->viid != 0) in free_some_resources()
6189 t4_free_vi(adapter, adapter->mbox, adapter->pf, in free_some_resources()
6190 0, pi->viid); in free_some_resources()
6191 kfree(adap2pinfo(adapter, i)->rss); in free_some_resources()
6192 free_netdev(adapter->port[i]); in free_some_resources()
6194 if (adapter->flags & CXGB4_FW_OK) in free_some_resources()
6195 t4_fw_bye(adapter, adapter->pf); in free_some_resources()
6218 return -EINVAL; in t4_get_chip_type()
6224 dev->type = ARPHRD_NONE; in cxgb4_mgmt_setup()
6225 dev->mtu = 0; in cxgb4_mgmt_setup()
6226 dev->hard_header_len = 0; in cxgb4_mgmt_setup()
6227 dev->addr_len = 0; in cxgb4_mgmt_setup()
6228 dev->tx_queue_len = 0; in cxgb4_mgmt_setup()
6229 dev->flags |= IFF_NOARP; in cxgb4_mgmt_setup()
6230 dev->priv_flags |= IFF_NO_QUEUE; in cxgb4_mgmt_setup()
6233 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; in cxgb4_mgmt_setup()
6234 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; in cxgb4_mgmt_setup()
6244 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
6247 dev_warn(&pdev->dev, "Device not initialized\n"); in cxgb4_iov_configure()
6248 return -EOPNOTSUPP; in cxgb4_iov_configure()
6255 dev_err(&pdev->dev, in cxgb4_iov_configure()
6256 "Cannot modify SR-IOV while VFs are assigned\n"); in cxgb4_iov_configure()
6259 /* Note that the upper-level code ensures that we're never called with in cxgb4_iov_configure()
6260 * a non-zero "num_vfs" when we already have VFs instantiated. But in cxgb4_iov_configure()
6264 return -EBUSY; in cxgb4_iov_configure()
6274 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6275 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6276 adap->port[0] = NULL; in cxgb4_iov_configure()
6279 adap->num_vfs = 0; in cxgb4_iov_configure()
6280 kfree(adap->vfinfo); in cxgb4_iov_configure()
6281 adap->vfinfo = NULL; in cxgb4_iov_configure()
6296 * parent bridge's PCI-E needs to support Alternative Routing in cxgb4_iov_configure()
6300 pbridge = pdev->bus->self; in cxgb4_iov_configure()
6310 …dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Fu… in cxgb4_iov_configure()
6311 pbridge->bus->number, PCI_SLOT(pbridge->devfn), in cxgb4_iov_configure()
6312 PCI_FUNC(pbridge->devfn)); in cxgb4_iov_configure()
6313 return -ENOTSUPP; in cxgb4_iov_configure()
6319 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
6322 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
6327 port = ffs(pmask) - 1; in cxgb4_iov_configure()
6329 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
6330 adap->pf); in cxgb4_iov_configure()
6334 return -ENOMEM; in cxgb4_iov_configure()
6337 pi->adapter = adap; in cxgb4_iov_configure()
6338 pi->lport = port; in cxgb4_iov_configure()
6339 pi->tx_chan = port; in cxgb4_iov_configure()
6340 SET_NETDEV_DEV(netdev, &pdev->dev); in cxgb4_iov_configure()
6342 adap->port[0] = netdev; in cxgb4_iov_configure()
6343 pi->port_id = 0; in cxgb4_iov_configure()
6345 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
6348 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6349 adap->port[0] = NULL; in cxgb4_iov_configure()
6353 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
6355 if (!adap->vfinfo) { in cxgb4_iov_configure()
6356 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6357 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6358 adap->port[0] = NULL; in cxgb4_iov_configure()
6359 return -ENOMEM; in cxgb4_iov_configure()
6368 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6369 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6370 adap->port[0] = NULL; in cxgb4_iov_configure()
6371 kfree(adap->vfinfo); in cxgb4_iov_configure()
6372 adap->vfinfo = NULL; in cxgb4_iov_configure()
6377 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
6390 if (!adap->uld[CXGB4_ULD_KTLS].handle) { in chcr_offload_state()
6391 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); in chcr_offload_state()
6392 return -EOPNOTSUPP; in chcr_offload_state()
6394 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { in chcr_offload_state()
6395 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6397 return -EOPNOTSUPP; in chcr_offload_state()
6403 if (!adap->uld[CXGB4_ULD_IPSEC].handle) { in chcr_offload_state()
6404 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); in chcr_offload_state()
6405 return -EOPNOTSUPP; in chcr_offload_state()
6407 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { in chcr_offload_state()
6408 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6410 return -EOPNOTSUPP; in chcr_offload_state()
6415 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6417 return -EOPNOTSUPP; in chcr_offload_state()
6444 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, in cxgb4_ktls_dev_add()
6467 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, in cxgb4_ktls_dev_del()
6486 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_add_state()
6491 return -EBUSY; in cxgb4_xfrm_add_state()
6497 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); in cxgb4_xfrm_add_state()
6507 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_del_state()
6510 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_del_state()
6517 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); in cxgb4_xfrm_del_state()
6525 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_free_state()
6528 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_free_state()
6535 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); in cxgb4_xfrm_free_state()
6543 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_ipsec_offload_ok()
6547 dev_dbg(adap->pdev_dev, in cxgb4_ipsec_offload_ok()
6554 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); in cxgb4_ipsec_offload_ok()
6563 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_advance_esn_state()
6565 if (x->xso.dir != XFRM_DEV_OFFLOAD_IN) in cxgb4_advance_esn_state()
6569 dev_dbg(adap->pdev_dev, in cxgb4_advance_esn_state()
6576 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); in cxgb4_advance_esn_state()
6609 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); in init_one()
6615 dev_err(&pdev->dev, "cannot enable PCI device\n"); in init_one()
6621 dev_err(&pdev->dev, "cannot map device registers\n"); in init_one()
6622 err = -ENOMEM; in init_one()
6628 err = -ENOMEM; in init_one()
6632 adapter->regs = regs; in init_one()
6642 dev_err(&pdev->dev, "Device %d is not supported\n", device_id); in init_one()
6650 adapter->pdev = pdev; in init_one()
6651 adapter->pdev_dev = &pdev->dev; in init_one()
6652 adapter->name = pci_name(pdev); in init_one()
6653 adapter->mbox = func; in init_one()
6654 adapter->pf = func; in init_one()
6655 adapter->params.chip = chip; in init_one()
6656 adapter->adap_idx = adap_idx; in init_one()
6657 adapter->msg_enable = DFLT_MSG_ENABLE; in init_one()
6658 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + in init_one()
6662 if (!adapter->mbox_log) { in init_one()
6663 err = -ENOMEM; in init_one()
6666 spin_lock_init(&adapter->mbox_lock); in init_one()
6667 INIT_LIST_HEAD(&adapter->mlist.list); in init_one()
6668 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; in init_one()
6671 if (func != ent->driver_data) { in init_one()
6673 pci_save_state(pdev); /* to restore SR-IOV later */ in init_one()
6677 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in init_one()
6679 dev_err(&pdev->dev, "no usable DMA configuration\n"); in init_one()
6686 adapter->workq = create_singlethread_workqueue("cxgb4"); in init_one()
6687 if (!adapter->workq) { in init_one()
6688 err = -ENOMEM; in init_one()
6693 adapter->flags |= CXGB4_DEV_ENABLED; in init_one()
6694 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); in init_one()
6711 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; in init_one()
6713 spin_lock_init(&adapter->stats_lock); in init_one()
6714 spin_lock_init(&adapter->tid_release_lock); in init_one()
6715 spin_lock_init(&adapter->win0_lock); in init_one()
6717 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); in init_one()
6718 INIT_WORK(&adapter->db_full_task, process_db_full); in init_one()
6719 INIT_WORK(&adapter->db_drop_task, process_db_drop); in init_one()
6720 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err); in init_one()
6730 dev_warn(adapter->pdev_dev, in init_one()
6737 if (!is_t4(adapter->params.chip)) { in init_one()
6739 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * in init_one()
6740 adapter->pf); in init_one()
6751 dev_err(&pdev->dev, in init_one()
6753 err = -EINVAL; in init_one()
6756 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), in init_one()
6758 if (!adapter->bar2) { in init_one()
6759 dev_err(&pdev->dev, "cannot map device bar2 region\n"); in init_one()
6760 err = -ENOMEM; in init_one()
6773 if (!is_t4(adapter->params.chip)) in init_one()
6775 (is_t5(adapter->params.chip) ? STATMODE_V(0) : in init_one()
6779 INIT_LIST_HEAD(&adapter->mac_hlist); in init_one()
6791 err = -ENOMEM; in init_one()
6795 SET_NETDEV_DEV(netdev, &pdev->dev); in init_one()
6797 adapter->port[i] = netdev; in init_one()
6799 pi->adapter = adapter; in init_one()
6800 pi->xact_addr_filt = -1; in init_one()
6801 pi->port_id = i; in init_one()
6802 netdev->irq = pdev->irq; in init_one()
6804 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | in init_one()
6811 netdev->hw_enc_features |= NETIF_F_IP_CSUM | in init_one()
6818 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in init_one()
6822 if (adapter->rawf_cnt) in init_one()
6823 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; in init_one()
6826 netdev->features |= netdev->hw_features; in init_one()
6827 netdev->vlan_features = netdev->features & VLAN_FEAT; in init_one()
6829 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { in init_one()
6830 netdev->hw_features |= NETIF_F_HW_TLS_TX; in init_one()
6831 netdev->tlsdev_ops = &cxgb4_ktls_ops; in init_one()
6833 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); in init_one()
6837 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { in init_one()
6838 netdev->hw_enc_features |= NETIF_F_HW_ESP; in init_one()
6839 netdev->features |= NETIF_F_HW_ESP; in init_one()
6840 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; in init_one()
6844 netdev->priv_flags |= IFF_UNICAST_FLT; in init_one()
6846 /* MTU range: 81 - 9600 */ in init_one()
6847 netdev->min_mtu = 81; /* accommodate SACK */ in init_one()
6848 netdev->max_mtu = MAX_MTU; in init_one()
6850 netdev->netdev_ops = &cxgb4_netdev_ops; in init_one()
6852 netdev->dcbnl_ops = &cxgb4_dcb_ops; in init_one()
6863 if (adapter->flags & CXGB4_FW_OK) { in init_one()
6867 } else if (adapter->params.nports == 1) { in init_one()
6868 /* If we don't have a connection to the firmware -- possibly in init_one()
6869 * because of an error -- grab the raw VPD parameters so we in init_one()
6874 u8 *na = adapter->params.vpd.na; in init_one()
6876 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); in init_one()
6885 if (!(adapter->flags & CXGB4_FW_OK)) in init_one()
6895 adapter->smt = t4_init_smt(); in init_one()
6896 if (!adapter->smt) { in init_one()
6898 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n"); in init_one()
6901 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); in init_one()
6902 if (!adapter->l2t) { in init_one()
6904 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); in init_one()
6905 adapter->params.offload = 0; in init_one()
6914 dev_warn(&pdev->dev, in init_one()
6916 adapter->params.offload = 0; in init_one()
6918 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, in init_one()
6919 adapter->clipt_end); in init_one()
6920 if (!adapter->clipt) { in init_one()
6924 dev_warn(&pdev->dev, in init_one()
6926 adapter->params.offload = 0; in init_one()
6933 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); in init_one()
6934 if (!pi->sched_tbl) in init_one()
6935 dev_warn(&pdev->dev, in init_one()
6946 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); in init_one()
6948 adapter->tids.hash_base = v / 4; in init_one()
6950 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; in init_one()
6953 adapter->tids.hash_base = v; in init_one()
6958 if (tid_init(&adapter->tids) < 0) { in init_one()
6959 dev_warn(&pdev->dev, "could not allocate TID table, " in init_one()
6961 adapter->params.offload = 0; in init_one()
6963 adapter->tc_u32 = cxgb4_init_tc_u32(adapter); in init_one()
6964 if (!adapter->tc_u32) in init_one()
6965 dev_warn(&pdev->dev, in init_one()
6969 dev_warn(&pdev->dev, in init_one()
6973 dev_warn(&pdev->dev, in init_one()
6977 dev_warn(&pdev->dev, in init_one()
6980 dev_warn(&pdev->dev, in init_one()
6986 adapter->flags |= CXGB4_USING_MSIX; in init_one()
6988 adapter->flags |= CXGB4_USING_MSI; in init_one()
7004 dev_err(adapter->pdev_dev, in init_one()
7011 dev_err(adapter->pdev_dev, in init_one()
7025 adapter->port[i]->dev_port = pi->lport; in init_one()
7026 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); in init_one()
7027 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); in init_one()
7029 netif_carrier_off(adapter->port[i]); in init_one()
7031 err = register_netdev(adapter->port[i]); in init_one()
7034 adapter->chan_map[pi->tx_chan] = i; in init_one()
7035 print_port_info(adapter->port[i]); in init_one()
7038 dev_err(&pdev->dev, "could not register any net devices\n"); in init_one()
7042 dev_warn(&pdev->dev, "only %d net devices registered\n", i); in init_one()
7047 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), in init_one()
7053 pdev->needs_freset = 1; in init_one()
7058 if (!is_t4(adapter->params.chip)) in init_one()
7062 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) in init_one()
7071 if (adapter->flags & CXGB4_USING_MSIX) in init_one()
7073 if (adapter->num_uld || adapter->num_ofld_uld) in init_one()
7076 if (!is_t4(adapter->params.chip)) in init_one()
7077 iounmap(adapter->bar2); in init_one()
7079 if (adapter->workq) in init_one()
7080 destroy_workqueue(adapter->workq); in init_one()
7082 kfree(adapter->mbox_log); in init_one()
7108 adapter->flags |= CXGB4_SHUTTING_DOWN; in remove_one()
7110 if (adapter->pf == 4) { in remove_one()
7113 /* Tear down per-adapter Work Queue first since it can contain in remove_one()
7116 destroy_workqueue(adapter->workq); in remove_one()
7121 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in remove_one()
7122 unregister_netdev(adapter->port[i]); in remove_one()
7132 debugfs_remove_recursive(adapter->debugfs_root); in remove_one()
7134 if (!is_t4(adapter->params.chip)) in remove_one()
7139 if (adapter->flags & CXGB4_FULL_INIT_DONE) in remove_one()
7142 if (adapter->flags & CXGB4_USING_MSIX) in remove_one()
7144 if (adapter->num_uld || adapter->num_ofld_uld) in remove_one()
7147 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, in remove_one()
7149 list_del(&entry->list); in remove_one()
7156 if (!is_t4(adapter->params.chip)) in remove_one()
7157 iounmap(adapter->bar2); in remove_one()
7161 cxgb4_iov_configure(adapter->pdev, 0); in remove_one()
7164 iounmap(adapter->regs); in remove_one()
7165 if ((adapter->flags & CXGB4_DEV_ENABLED)) { in remove_one()
7167 adapter->flags &= ~CXGB4_DEV_ENABLED; in remove_one()
7170 kfree(adapter->mbox_log); in remove_one()
7193 adapter->flags |= CXGB4_SHUTTING_DOWN; in shutdown_one()
7195 if (adapter->pf == 4) { in shutdown_one()
7199 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in shutdown_one()
7200 cxgb_close(adapter->port[i]); in shutdown_one()
7215 if (adapter->flags & CXGB4_FW_OK) in shutdown_one()
7216 t4_fw_bye(adapter, adapter->mbox); in shutdown_one()