Lines Matching +full:qman +full:- +full:fqd
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
29 #include <linux/dma-mapping.h>
35 #include <soc/fsl/qman.h>
47 static int debug = -1;
72 * The size in bytes of the ingress tail-drop threshold on FMan ports.
73 * Traffic piling up above this value will be rejected by QMan and discarded
87 * - avoiding the device staying congested for a prolonged time (risking
88 * the netdev watchdog to fire - see also the tx_timeout module param);
89 * - affecting performance of protocols such as TCP, which otherwise
91 * - preventing the Tx cores from tightly-looping (as if the congestion
93 * - running out of memory if the CS threshold is set too high.
101 /* Largest value that the FQD's OAL field can hold */
158 XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
160 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
189 ~(DPAA_A050385_ALIGN - 1))
199 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
208 struct device *dev = net_dev->dev.parent; in dpaa_netdev_init()
209 struct mac_device *mac_dev = priv->mac_dev; in dpaa_netdev_init()
218 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_netdev_init()
219 percpu_priv->net_dev = net_dev; in dpaa_netdev_init()
222 net_dev->netdev_ops = dpaa_ops; in dpaa_netdev_init()
223 mac_addr = mac_dev->addr; in dpaa_netdev_init()
225 net_dev->mem_start = (unsigned long)priv->mac_dev->res->start; in dpaa_netdev_init()
226 net_dev->mem_end = (unsigned long)priv->mac_dev->res->end; in dpaa_netdev_init()
228 net_dev->min_mtu = ETH_MIN_MTU; in dpaa_netdev_init()
229 net_dev->max_mtu = dpaa_get_max_mtu(); in dpaa_netdev_init()
231 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in dpaa_netdev_init()
234 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; in dpaa_netdev_init()
238 net_dev->features |= NETIF_F_GSO; in dpaa_netdev_init()
239 net_dev->features |= NETIF_F_RXCSUM; in dpaa_netdev_init()
241 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in dpaa_netdev_init()
242 net_dev->lltx = true; in dpaa_netdev_init()
244 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; in dpaa_netdev_init()
246 net_dev->features |= net_dev->hw_features; in dpaa_netdev_init()
247 net_dev->vlan_features = net_dev->features; in dpaa_netdev_init()
249 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | in dpaa_netdev_init()
254 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); in dpaa_netdev_init()
258 err = mac_dev->change_addr(mac_dev->fman_mac, in dpaa_netdev_init()
259 (const enet_addr_t *)net_dev->dev_addr); in dpaa_netdev_init()
262 return -EINVAL; in dpaa_netdev_init()
265 net_dev->dev_addr); in dpaa_netdev_init()
268 net_dev->ethtool_ops = &dpaa_ethtool_ops; in dpaa_netdev_init()
270 net_dev->needed_headroom = priv->tx_headroom; in dpaa_netdev_init()
271 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); in dpaa_netdev_init()
274 mac_dev->phylink_config.dev = &net_dev->dev; in dpaa_netdev_init()
275 mac_dev->phylink_config.type = PHYLINK_NETDEV; in dpaa_netdev_init()
276 mac_dev->update_speed = dpaa_eth_cgr_set_speed; in dpaa_netdev_init()
277 mac_dev->phylink = phylink_create(&mac_dev->phylink_config, in dpaa_netdev_init()
278 dev_fwnode(mac_dev->dev), in dpaa_netdev_init()
279 mac_dev->phy_if, in dpaa_netdev_init()
280 mac_dev->phylink_ops); in dpaa_netdev_init()
281 if (IS_ERR(mac_dev->phylink)) { in dpaa_netdev_init()
282 err = PTR_ERR(mac_dev->phylink); in dpaa_netdev_init()
293 phylink_destroy(mac_dev->phylink); in dpaa_netdev_init()
308 mac_dev = priv->mac_dev; in dpaa_stop()
311 /* Allow the Fman (Tx) port to process in-flight frames before we in dpaa_stop()
316 phylink_stop(mac_dev->phylink); in dpaa_stop()
317 mac_dev->disable(mac_dev->fman_mac); in dpaa_stop()
319 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { in dpaa_stop()
320 error = fman_port_disable(mac_dev->port[i]); in dpaa_stop()
325 phylink_disconnect_phy(mac_dev->phylink); in dpaa_stop()
326 net_dev->phydev = NULL; in dpaa_stop()
339 percpu_priv = this_cpu_ptr(priv->percpu_priv); in dpaa_tx_timeout()
342 jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); in dpaa_tx_timeout()
344 percpu_priv->stats.tx_errors++; in dpaa_tx_timeout()
361 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_get_stats64()
363 cpustats = (u64 *)&percpu_priv->stats; in dpaa_get_stats64()
381 return -EOPNOTSUPP; in dpaa_setup_tc()
383 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in dpaa_setup_tc()
384 num_tc = mqprio->num_tc; in dpaa_setup_tc()
386 if (num_tc == priv->num_tc) in dpaa_setup_tc()
397 return -EINVAL; in dpaa_setup_tc()
407 priv->num_tc = num_tc ? : 1; in dpaa_setup_tc()
408 netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); in dpaa_setup_tc()
418 dpaa_dev = &pdev->dev; in dpaa_mac_dev_get()
419 eth_data = dpaa_dev->platform_data; in dpaa_mac_dev_get()
422 return ERR_PTR(-ENODEV); in dpaa_mac_dev_get()
424 mac_dev = eth_data->mac_dev; in dpaa_mac_dev_get()
427 return ERR_PTR(-EINVAL); in dpaa_mac_dev_get()
442 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); in dpaa_set_mac_address()
450 mac_dev = priv->mac_dev; in dpaa_set_mac_address()
452 err = mac_dev->change_addr(mac_dev->fman_mac, in dpaa_set_mac_address()
453 (const enet_addr_t *)net_dev->dev_addr); in dpaa_set_mac_address()
455 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", in dpaa_set_mac_address()
470 return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac, in dpaa_addr_sync()
478 return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac, in dpaa_addr_unsync()
489 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { in dpaa_set_rx_mode()
490 priv->mac_dev->promisc = !priv->mac_dev->promisc; in dpaa_set_rx_mode()
491 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, in dpaa_set_rx_mode()
492 priv->mac_dev->promisc); in dpaa_set_rx_mode()
495 "mac_dev->set_promisc() = %d\n", in dpaa_set_rx_mode()
499 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) { in dpaa_set_rx_mode()
500 priv->mac_dev->allmulti = !priv->mac_dev->allmulti; in dpaa_set_rx_mode()
501 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac, in dpaa_set_rx_mode()
502 priv->mac_dev->allmulti); in dpaa_set_rx_mode()
505 "mac_dev->set_allmulti() = %d\n", in dpaa_set_rx_mode()
527 refcount_inc(&dpaa_bp_array[bpid]->refs); in dpaa_bpid2pool_use()
538 refcount_set(&dpaa_bp->refs, 1); in dpaa_bpid2pool_map()
545 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { in dpaa_bp_alloc_pool()
548 return -EINVAL; in dpaa_bp_alloc_pool()
552 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && in dpaa_bp_alloc_pool()
553 dpaa_bpid2pool_use(dpaa_bp->bpid)) in dpaa_bp_alloc_pool()
556 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { in dpaa_bp_alloc_pool()
557 dpaa_bp->pool = bman_new_pool(); in dpaa_bp_alloc_pool()
558 if (!dpaa_bp->pool) { in dpaa_bp_alloc_pool()
561 return -ENODEV; in dpaa_bp_alloc_pool()
564 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); in dpaa_bp_alloc_pool()
567 if (dpaa_bp->seed_cb) { in dpaa_bp_alloc_pool()
568 err = dpaa_bp->seed_cb(dpaa_bp); in dpaa_bp_alloc_pool()
573 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); in dpaa_bp_alloc_pool()
579 bman_free_pool(dpaa_bp->pool); in dpaa_bp_alloc_pool()
594 ret = bman_acquire(bp->pool, bmb, num); in dpaa_bp_drain()
609 if (bp->free_buf_cb) in dpaa_bp_drain()
611 bp->free_buf_cb(bp, &bmb[i]); in dpaa_bp_drain()
617 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); in dpaa_bp_free()
626 if (!refcount_dec_and_test(&bp->refs)) in dpaa_bp_free()
629 if (bp->free_buf_cb) in dpaa_bp_free()
632 dpaa_bp_array[bp->bpid] = NULL; in dpaa_bp_free()
633 bman_free_pool(bp->pool); in dpaa_bp_free()
638 dpaa_bp_free(priv->dpaa_bp); in dpaa_bps_free()
642 * - Tx Confirmation queues go to WQ1.
643 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
645 * - Rx Default goes to WQ6.
646 * - Tx queues go to different WQs depending on their priority. Equal
649 * This ensures that Tx-confirmed buffers are timely released. In particular,
652 * dequeue scheduling is round-robin.
656 switch (fq->fq_type) { in dpaa_assign_wq()
659 fq->wq = 1; in dpaa_assign_wq()
663 fq->wq = 5; in dpaa_assign_wq()
667 fq->wq = 6; in dpaa_assign_wq()
673 fq->wq = 6; in dpaa_assign_wq()
677 fq->wq = 2; in dpaa_assign_wq()
681 fq->wq = 1; in dpaa_assign_wq()
685 fq->wq = 0; in dpaa_assign_wq()
694 fq->fq_type, fq->fqid); in dpaa_assign_wq()
733 port_fqs->rx_errq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
739 port_fqs->rx_defq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
759 port_fqs->rx_pcdq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
769 port_fqs->tx_errq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
775 port_fqs->tx_defq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
784 return -ENOMEM; in dpaa_alloc_all_fqs()
804 return -ENOMEM; in dpaa_get_channel()
830 * Also updates some CGR-related stats.
839 priv->cgr_data.congestion_start_jiffies = jiffies; in dpaa_eth_cgscn()
840 netif_tx_stop_all_queues(priv->net_dev); in dpaa_eth_cgscn()
841 priv->cgr_data.cgr_congested_count++; in dpaa_eth_cgscn()
843 priv->cgr_data.congested_jiffies += in dpaa_eth_cgscn()
844 (jiffies - priv->cgr_data.congestion_start_jiffies); in dpaa_eth_cgscn()
845 netif_tx_wake_all_queues(priv->net_dev); in dpaa_eth_cgscn()
855 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); in dpaa_eth_cgr_init()
862 priv->cgr_data.cgr.cb = dpaa_eth_cgscn; in dpaa_eth_cgr_init()
874 if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD) in dpaa_eth_cgr_init()
883 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, in dpaa_eth_cgr_init()
888 __func__, err, priv->cgr_data.cgr.cgrid); in dpaa_eth_cgr_init()
889 qman_release_cgrid(priv->cgr_data.cgr.cgrid); in dpaa_eth_cgr_init()
893 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", in dpaa_eth_cgr_init()
894 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, in dpaa_eth_cgr_init()
895 priv->cgr_data.cgr.chan); in dpaa_eth_cgr_init()
903 struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev); in dpaa_eth_cgr_set_speed()
921 err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts); in dpaa_eth_cgr_set_speed()
930 fq->fq_base = *template; in dpaa_setup_ingress()
931 fq->net_dev = priv->net_dev; in dpaa_setup_ingress()
933 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; in dpaa_setup_ingress()
934 fq->channel = priv->channel; in dpaa_setup_ingress()
942 fq->fq_base = *template; in dpaa_setup_egress()
943 fq->net_dev = priv->net_dev; in dpaa_setup_egress()
946 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; in dpaa_setup_egress()
947 fq->channel = (u16)fman_port_get_qman_channel_id(port); in dpaa_setup_egress()
949 fq->flags = QMAN_FQ_FLAG_NO_MODIFY; in dpaa_setup_egress()
964 return -ENOMEM; in dpaa_fq_setup()
970 dev_err(priv->net_dev->dev.parent, in dpaa_fq_setup()
971 "No Qman software (affine) channels found\n"); in dpaa_fq_setup()
974 list_for_each_entry(fq, &priv->dpaa_fq_list, list) { in dpaa_fq_setup()
975 switch (fq->fq_type) { in dpaa_fq_setup()
977 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); in dpaa_fq_setup()
980 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); in dpaa_fq_setup()
985 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); in dpaa_fq_setup()
986 fq->channel = channels[portal_cnt++ % num_portals]; in dpaa_fq_setup()
990 &fq_cbs->egress_ern); in dpaa_fq_setup()
991 priv->egress_fqs[egress_cnt++] = &fq->fq_base; in dpaa_fq_setup()
994 priv->conf_fqs[conf_cnt++] = &fq->fq_base; in dpaa_fq_setup()
997 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); in dpaa_fq_setup()
1000 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); in dpaa_fq_setup()
1003 dev_warn(priv->net_dev->dev.parent, in dpaa_fq_setup()
1020 if (priv->egress_fqs[i] == tx_fq) in dpaa_tx_fq_to_id()
1023 return -EINVAL; in dpaa_tx_fq_to_id()
1036 priv = netdev_priv(dpaa_fq->net_dev); in dpaa_fq_init()
1037 dev = dpaa_fq->net_dev->dev.parent; in dpaa_fq_init()
1039 if (dpaa_fq->fqid == 0) in dpaa_fq_init()
1040 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; in dpaa_fq_init()
1042 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); in dpaa_fq_init()
1044 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); in dpaa_fq_init()
1049 fq = &dpaa_fq->fq_base; in dpaa_fq_init()
1051 if (dpaa_fq->init) { in dpaa_fq_init()
1056 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); in dpaa_fq_init()
1061 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) in dpaa_fq_init()
1062 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); in dpaa_fq_init()
1067 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); in dpaa_fq_init()
1071 * rather than Tx - but they nonetheless account for the in dpaa_fq_init()
1075 if (dpaa_fq->fq_type == FQ_TYPE_TX || in dpaa_fq_init()
1076 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || in dpaa_fq_init()
1077 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { in dpaa_fq_init()
1079 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); in dpaa_fq_init()
1080 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; in dpaa_fq_init()
1082 * reduce the impact of fixed-size skb shells and the in dpaa_fq_init()
1086 * Unfortunately, QMan's OAL value is capped to an in dpaa_fq_init()
1091 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); in dpaa_fq_init()
1092 qm_fqd_set_oal(&initfq.fqd, in dpaa_fq_init()
1094 priv->tx_headroom, in dpaa_fq_init()
1100 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); in dpaa_fq_init()
1101 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); in dpaa_fq_init()
1104 if (dpaa_fq->fq_type == FQ_TYPE_TX) { in dpaa_fq_init()
1105 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); in dpaa_fq_init()
1107 confq = priv->conf_fqs[queue_id]; in dpaa_fq_init()
1118 qm_fqd_context_a_set64(&initfq.fqd, in dpaa_fq_init()
1124 if (priv->use_ingress_cgr && in dpaa_fq_init()
1125 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || in dpaa_fq_init()
1126 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR || in dpaa_fq_init()
1127 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) { in dpaa_fq_init()
1129 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); in dpaa_fq_init()
1130 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; in dpaa_fq_init()
1135 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); in dpaa_fq_init()
1136 qm_fqd_set_oal(&initfq.fqd, in dpaa_fq_init()
1138 priv->tx_headroom, in dpaa_fq_init()
1143 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { in dpaa_fq_init()
1145 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | in dpaa_fq_init()
1147 initfq.fqd.context_a.stashing.exclusive = in dpaa_fq_init()
1150 qm_fqd_set_stashing(&initfq.fqd, 1, 2, in dpaa_fq_init()
1164 dpaa_fq->fqid = qman_fq_fqid(fq); in dpaa_fq_init()
1166 if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || in dpaa_fq_init()
1167 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) { in dpaa_fq_init()
1168 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev, in dpaa_fq_init()
1169 dpaa_fq->fqid, 0); in dpaa_fq_init()
1175 err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq, in dpaa_fq_init()
1180 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); in dpaa_fq_init()
1197 priv = netdev_priv(dpaa_fq->net_dev); in dpaa_fq_free_entry()
1199 if (dpaa_fq->init) { in dpaa_fq_free_entry()
1214 if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || in dpaa_fq_free_entry()
1215 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) && in dpaa_fq_free_entry()
1216 xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq)) in dpaa_fq_free_entry()
1217 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); in dpaa_fq_free_entry()
1220 list_del(&dpaa_fq->list); in dpaa_fq_free_entry()
1251 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; in dpaa_eth_init_tx_port()
1257 params.specific_params.non_rx_params.err_fqid = errq->fqid; in dpaa_eth_init_tx_port()
1258 params.specific_params.non_rx_params.dflt_fqid = defq->fqid; in dpaa_eth_init_tx_port()
1293 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; in dpaa_eth_init_rx_port()
1300 rx_p->err_fqid = errq->fqid; in dpaa_eth_init_rx_port()
1301 rx_p->dflt_fqid = defq->fqid; in dpaa_eth_init_rx_port()
1303 rx_p->pcd_base_fqid = pcdq->fqid; in dpaa_eth_init_rx_port()
1304 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; in dpaa_eth_init_rx_port()
1307 rx_p->ext_buf_pools.num_of_pools_used = 1; in dpaa_eth_init_rx_port()
1308 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid; in dpaa_eth_init_rx_port()
1309 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size; in dpaa_eth_init_rx_port()
1337 struct fman_port *rxport = mac_dev->port[RX]; in dpaa_eth_init_ports()
1338 struct fman_port *txport = mac_dev->port[TX]; in dpaa_eth_init_ports()
1341 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, in dpaa_eth_init_ports()
1342 port_fqs->tx_defq, &buf_layout[TX]); in dpaa_eth_init_ports()
1346 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq, in dpaa_eth_init_ports()
1347 port_fqs->rx_defq, port_fqs->rx_pcdq, in dpaa_eth_init_ports()
1358 err = bman_release(dpaa_bp->pool, bmb, cnt); in dpaa_bman_release()
1360 if (WARN_ON(err) && dpaa_bp->free_buf_cb) in dpaa_bman_release()
1361 while (cnt-- > 0) in dpaa_bman_release()
1362 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); in dpaa_bman_release()
1388 !qm_sg_entry_is_final(&sgt[i - 1]) && in dpaa_release_sgt_members()
1389 sgt[i - 1].bpid == sgt[i].bpid); in dpaa_release_sgt_members()
1392 } while (!qm_sg_entry_is_final(&sgt[i - 1])); in dpaa_release_sgt_members()
1407 dpaa_bp = dpaa_bpid2pool(fd->bpid); in dpaa_fd_release()
1415 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), in dpaa_fd_release()
1420 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, in dpaa_fd_release()
1423 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { in dpaa_fd_release()
1436 switch (msg->ern.rc & QM_MR_RC_MASK) { in count_ern()
1438 percpu_priv->ern_cnt.cg_tdrop++; in count_ern()
1441 percpu_priv->ern_cnt.wred++; in count_ern()
1444 percpu_priv->ern_cnt.err_cond++; in count_ern()
1447 percpu_priv->ern_cnt.early_window++; in count_ern()
1450 percpu_priv->ern_cnt.late_window++; in count_ern()
1453 percpu_priv->ern_cnt.fq_tdrop++; in count_ern()
1456 percpu_priv->ern_cnt.fq_retired++; in count_ern()
1459 percpu_priv->ern_cnt.orp_zero++; in count_ern()
1471 * Note that this function may modify the fd->cmd field and the skb data buffer
1480 u16 ethertype = ntohs(skb->protocol); in dpaa_enable_tx_csum()
1486 if (skb->ip_summed != CHECKSUM_PARTIAL) in dpaa_enable_tx_csum()
1500 ethertype = ntohs(skb_vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); in dpaa_enable_tx_csum()
1507 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); in dpaa_enable_tx_csum()
1510 l4_proto = iph->protocol; in dpaa_enable_tx_csum()
1513 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); in dpaa_enable_tx_csum()
1516 l4_proto = ipv6h->nexthdr; in dpaa_enable_tx_csum()
1521 netif_alert(priv, tx_err, priv->net_dev, in dpaa_enable_tx_csum()
1523 ntohs(skb->protocol)); in dpaa_enable_tx_csum()
1524 retval = -EIO; in dpaa_enable_tx_csum()
1531 parse_result->l4r = FM_L4_PARSE_RESULT_UDP; in dpaa_enable_tx_csum()
1534 parse_result->l4r = FM_L4_PARSE_RESULT_TCP; in dpaa_enable_tx_csum()
1538 netif_alert(priv, tx_err, priv->net_dev, in dpaa_enable_tx_csum()
1541 retval = -EIO; in dpaa_enable_tx_csum()
1546 parse_result->ip_off[0] = (u8)skb_network_offset(skb); in dpaa_enable_tx_csum()
1547 parse_result->l4_off = (u8)skb_transport_offset(skb); in dpaa_enable_tx_csum()
1550 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); in dpaa_enable_tx_csum()
1552 /* On P1023 and similar platforms fd->cmd interpretation could in dpaa_enable_tx_csum()
1564 struct net_device *net_dev = dpaa_bp->priv->net_dev; in dpaa_bp_add_8_bufs()
1577 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0, in dpaa_bp_add_8_bufs()
1579 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, in dpaa_bp_add_8_bufs()
1611 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); in dpaa_bp_seed()
1617 for (j = 0; j < dpaa_bp->config_count; j += 8) in dpaa_bp_seed()
1646 return -ENOMEM; in dpaa_eth_refill_bpool()
1657 dpaa_bp = priv->dpaa_bp; in dpaa_eth_refill_bpools()
1659 return -EINVAL; in dpaa_eth_refill_bpools()
1660 countptr = this_cpu_ptr(dpaa_bp->percpu_count); in dpaa_eth_refill_bpools()
1684 struct device *dev = priv->net_dev->dev.parent; in dpaa_cleanup_tx_fd()
1695 dma_unmap_page(priv->tx_dma_dev, addr, in dpaa_cleanup_tx_fd()
1704 /* sgt[0] is from lowmem, was dma_map_single()-ed */ in dpaa_cleanup_tx_fd()
1705 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), in dpaa_cleanup_tx_fd()
1710 !qm_sg_entry_is_final(&sgt[i - 1]); i++) { in dpaa_cleanup_tx_fd()
1713 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), in dpaa_cleanup_tx_fd()
1717 dma_unmap_single(priv->tx_dma_dev, addr, in dpaa_cleanup_tx_fd()
1723 skb = swbp->skb; in dpaa_cleanup_tx_fd()
1729 xdp_return_frame(swbp->xdpf); in dpaa_cleanup_tx_fd()
1734 if (ts && priv->tx_tstamp && in dpaa_cleanup_tx_fd()
1735 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in dpaa_cleanup_tx_fd()
1738 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr, in dpaa_cleanup_tx_fd()
1760 if ((priv->net_dev->features & NETIF_F_RXCSUM) && in rx_csum_offload()
1761 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) in rx_csum_offload()
1789 dpaa_bp = dpaa_bpid2pool(fd->bpid); in contig_fd_to_skb()
1793 skb = build_skb(vaddr, dpaa_bp->size + in contig_fd_to_skb()
1800 skb->ip_summed = rx_csum_offload(priv, fd); in contig_fd_to_skb()
1844 dma_unmap_page(priv->rx_dma_dev, sg_addr, in sg_fd_to_skb()
1853 sz = dpaa_bp->size + in sg_fd_to_skb()
1859 skb->ip_summed = rx_csum_offload(priv, fd); in sg_fd_to_skb()
1864 WARN_ON(fd_off != priv->rx_headroom); in sg_fd_to_skb()
1885 (PAGE_SIZE - 1)) + in sg_fd_to_skb()
1886 (page_address(page) - page_address(head_page)); in sg_fd_to_skb()
1888 /* Non-initial SGT entries should not have a buffer in sg_fd_to_skb()
1897 skb_add_rx_frag(skb, i - 1, head_page, page_offset, in sg_fd_to_skb()
1899 dpaa_bp->size); in sg_fd_to_skb()
1903 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); in sg_fd_to_skb()
1904 (*count_ptr)--; in sg_fd_to_skb()
1923 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), in sg_fd_to_skb()
1926 /* counters 0..i-1 were decremented */ in sg_fd_to_skb()
1930 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); in sg_fd_to_skb()
1931 (*count_ptr)--; in sg_fd_to_skb()
1948 struct net_device *net_dev = priv->net_dev; in skb_to_contig_fd()
1958 fd->bpid = FSL_DPAA_BPID_INV; in skb_to_contig_fd()
1959 buff_start = skb->data - priv->tx_headroom; in skb_to_contig_fd()
1963 swbp->skb = skb; in skb_to_contig_fd()
1980 qm_fd_set_contig(fd, priv->tx_headroom, skb->len); in skb_to_contig_fd()
1981 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); in skb_to_contig_fd()
1984 addr = dma_map_single(priv->tx_dma_dev, buff_start, in skb_to_contig_fd()
1985 priv->tx_headroom + skb->len, dma_dir); in skb_to_contig_fd()
1986 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_contig_fd()
1989 return -EINVAL; in skb_to_contig_fd()
2000 const int nr_frags = skb_shinfo(skb)->nr_frags; in skb_to_sg_fd()
2001 struct net_device *net_dev = priv->net_dev; in skb_to_sg_fd()
2015 return -ENOMEM; in skb_to_sg_fd()
2034 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom); in skb_to_sg_fd()
2039 addr = dma_map_single(priv->tx_dma_dev, skb->data, in skb_to_sg_fd()
2041 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_sg_fd()
2042 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2043 err = -EINVAL; in skb_to_sg_fd()
2050 frag = &skb_shinfo(skb)->frags[i]; in skb_to_sg_fd()
2053 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0, in skb_to_sg_fd()
2055 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_sg_fd()
2056 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2057 err = -EINVAL; in skb_to_sg_fd()
2072 /* set fd offset to priv->tx_headroom */ in skb_to_sg_fd()
2073 qm_fd_set_sg(fd, priv->tx_headroom, skb->len); in skb_to_sg_fd()
2077 swbp->skb = skb; in skb_to_sg_fd()
2079 addr = dma_map_page(priv->tx_dma_dev, p, 0, in skb_to_sg_fd()
2080 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); in skb_to_sg_fd()
2081 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_sg_fd()
2082 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2083 err = -EINVAL; in skb_to_sg_fd()
2087 fd->bpid = FSL_DPAA_BPID_INV; in skb_to_sg_fd()
2088 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); in skb_to_sg_fd()
2096 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]), in skb_to_sg_fd()
2113 egress_fq = priv->egress_fqs[queue]; in dpaa_xmit()
2114 if (fd->bpid == FSL_DPAA_BPID_INV) in dpaa_xmit()
2115 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); in dpaa_xmit()
2118 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); in dpaa_xmit()
2122 if (err != -EBUSY) in dpaa_xmit()
2127 percpu_stats->tx_fifo_errors++; in dpaa_xmit()
2131 percpu_stats->tx_packets++; in dpaa_xmit()
2132 percpu_stats->tx_bytes += qm_fd_get_length(fd); in dpaa_xmit()
2145 if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) in dpaa_a050385_wa_skb()
2156 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in dpaa_a050385_wa_skb()
2157 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in dpaa_a050385_wa_skb()
2165 (i < skb_shinfo(skb)->nr_frags - 1)) in dpaa_a050385_wa_skb()
2173 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + in dpaa_a050385_wa_skb()
2174 priv->tx_headroom); in dpaa_a050385_wa_skb()
2176 return -ENOMEM; in dpaa_a050385_wa_skb()
2179 skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); in dpaa_a050385_wa_skb()
2182 start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); in dpaa_a050385_wa_skb()
2183 if (start - new_skb->data) in dpaa_a050385_wa_skb()
2184 skb_reserve(new_skb, start - new_skb->data); in dpaa_a050385_wa_skb()
2186 skb_put(new_skb, skb->len); in dpaa_a050385_wa_skb()
2187 skb_copy_bits(skb, 0, new_skb->data, skb->len); in dpaa_a050385_wa_skb()
2189 new_skb->dev = skb->dev; in dpaa_a050385_wa_skb()
2192 if (priv->tx_tstamp) { in dpaa_a050385_wa_skb()
2193 skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags; in dpaa_a050385_wa_skb()
2194 skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps; in dpaa_a050385_wa_skb()
2195 skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey; in dpaa_a050385_wa_skb()
2196 if (skb->sk) in dpaa_a050385_wa_skb()
2197 skb_set_owner_w(new_skb, skb->sk); in dpaa_a050385_wa_skb()
2230 if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) && in dpaa_a050385_wa_xdpf()
2231 xdpf->headroom >= priv->tx_headroom) { in dpaa_a050385_wa_xdpf()
2232 xdpf->headroom = priv->tx_headroom; in dpaa_a050385_wa_xdpf()
2240 aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT); in dpaa_a050385_wa_xdpf()
2241 data_shift = xdpf->data - aligned_data; in dpaa_a050385_wa_xdpf()
2246 if (xdpf->headroom >= data_shift + priv->tx_headroom) { in dpaa_a050385_wa_xdpf()
2247 memmove(aligned_data, xdpf->data, xdpf->len); in dpaa_a050385_wa_xdpf()
2248 xdpf->data = aligned_data; in dpaa_a050385_wa_xdpf()
2249 xdpf->headroom = priv->tx_headroom; in dpaa_a050385_wa_xdpf()
2258 headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom, in dpaa_a050385_wa_xdpf()
2264 if (headroom + xdpf->len > DPAA_BP_RAW_SIZE - in dpaa_a050385_wa_xdpf()
2266 return -ENOMEM; in dpaa_a050385_wa_xdpf()
2270 return -ENOMEM; in dpaa_a050385_wa_xdpf()
2274 memcpy(new_buff + headroom, xdpf->data, xdpf->len); in dpaa_a050385_wa_xdpf()
2280 new_xdpf->data = new_buff + headroom; in dpaa_a050385_wa_xdpf()
2281 new_xdpf->len = xdpf->len; in dpaa_a050385_wa_xdpf()
2282 new_xdpf->headroom = priv->tx_headroom; in dpaa_a050385_wa_xdpf()
2283 new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; in dpaa_a050385_wa_xdpf()
2284 new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; in dpaa_a050385_wa_xdpf()
2308 percpu_priv = this_cpu_ptr(priv->percpu_priv); in dpaa_start_xmit()
2309 percpu_stats = &percpu_priv->stats; in dpaa_start_xmit()
2313 /* Packet data is always read as 32-bit words, so zero out any part of in dpaa_start_xmit()
2324 * We've made sure skb is not shared in dev->priv_flags, in dpaa_start_xmit()
2327 if (skb_cow_head(skb, priv->tx_headroom)) in dpaa_start_xmit()
2337 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) { in dpaa_start_xmit()
2358 percpu_priv->tx_frag_skbuffs++; in dpaa_start_xmit()
2371 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in dpaa_start_xmit()
2373 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in dpaa_start_xmit()
2382 percpu_stats->tx_errors++; in dpaa_start_xmit()
2395 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); in dpaa_rx_error()
2397 percpu_priv->stats.rx_errors++; in dpaa_rx_error()
2399 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) in dpaa_rx_error()
2400 percpu_priv->rx_errors.dme++; in dpaa_rx_error()
2401 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) in dpaa_rx_error()
2402 percpu_priv->rx_errors.fpe++; in dpaa_rx_error()
2403 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) in dpaa_rx_error()
2404 percpu_priv->rx_errors.fse++; in dpaa_rx_error()
2405 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) in dpaa_rx_error()
2406 percpu_priv->rx_errors.phe++; in dpaa_rx_error()
2421 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); in dpaa_tx_error()
2423 percpu_priv->stats.tx_errors++; in dpaa_tx_error()
2435 np->xdp_act = 0; in dpaa_eth_poll()
2437 cleaned = qman_p_poll_dqrr(np->p, budget); in dpaa_eth_poll()
2439 if (np->xdp_act & XDP_REDIRECT) in dpaa_eth_poll()
2444 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); in dpaa_eth_poll()
2445 } else if (np->down) { in dpaa_eth_poll()
2446 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); in dpaa_eth_poll()
2460 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { in dpaa_tx_conf()
2463 be32_to_cpu(fd->status) & in dpaa_tx_conf()
2466 percpu_priv->stats.tx_errors++; in dpaa_tx_conf()
2469 percpu_priv->tx_confirm++; in dpaa_tx_conf()
2480 /* Disable QMan IRQ and invoke NAPI */ in dpaa_eth_napi_schedule()
2483 percpu_priv->np.p = portal; in dpaa_eth_napi_schedule()
2484 napi_schedule(&percpu_priv->np.napi); in dpaa_eth_napi_schedule()
2485 percpu_priv->in_interrupt++; in dpaa_eth_napi_schedule()
2502 net_dev = dpaa_fq->net_dev; in rx_error_dqrr()
2504 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); in rx_error_dqrr()
2508 percpu_priv = this_cpu_ptr(priv->percpu_priv); in rx_error_dqrr()
2514 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in rx_error_dqrr()
2532 percpu_priv = this_cpu_ptr(priv->percpu_priv); in dpaa_xdp_xmit_frame()
2533 percpu_stats = &percpu_priv->stats; in dpaa_xdp_xmit_frame()
2538 err = -ENOMEM; in dpaa_xdp_xmit_frame()
2544 if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) { in dpaa_xdp_xmit_frame()
2545 err = -EINVAL; in dpaa_xdp_xmit_frame()
2549 buff_start = xdpf->data - xdpf->headroom; in dpaa_xdp_xmit_frame()
2555 swbp->skb = NULL; in dpaa_xdp_xmit_frame()
2556 swbp->xdpf = xdpf; in dpaa_xdp_xmit_frame()
2561 qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len); in dpaa_xdp_xmit_frame()
2563 addr = dma_map_single(priv->tx_dma_dev, buff_start, in dpaa_xdp_xmit_frame()
2564 xdpf->headroom + xdpf->len, in dpaa_xdp_xmit_frame()
2566 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in dpaa_xdp_xmit_frame()
2567 err = -EINVAL; in dpaa_xdp_xmit_frame()
2579 dma_unmap_single(priv->tx_dma_dev, addr, in dpaa_xdp_xmit_frame()
2588 percpu_stats->tx_errors++; in dpaa_xdp_xmit_frame()
2602 xdp_prog = READ_ONCE(priv->xdp_prog); in dpaa_run_xdp()
2606 xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE, in dpaa_run_xdp()
2607 &dpaa_fq->xdp_rxq); in dpaa_run_xdp()
2608 xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM, in dpaa_run_xdp()
2628 qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data); in dpaa_run_xdp()
2634 xdp.data - xdp.data_meta; in dpaa_run_xdp()
2636 *xdp_meta_len = xdp.data - xdp.data_meta; in dpaa_run_xdp()
2651 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf)) in dpaa_run_xdp()
2660 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); in dpaa_run_xdp()
2662 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2667 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2670 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2691 const struct qm_fd *fd = &dq->fd; in rx_default_dqrr()
2709 fd_status = be32_to_cpu(fd->status); in rx_default_dqrr()
2711 net_dev = dpaa_fq->net_dev; in rx_default_dqrr()
2713 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); in rx_default_dqrr()
2718 trace_dpaa_rx_fd(net_dev, fq, &dq->fd); in rx_default_dqrr()
2720 percpu_priv = this_cpu_ptr(priv->percpu_priv); in rx_default_dqrr()
2721 percpu_stats = &percpu_priv->stats; in rx_default_dqrr()
2722 np = &percpu_priv->np; in rx_default_dqrr()
2733 dpaa_fd_release(net_dev, &dq->fd); in rx_default_dqrr()
2742 percpu_stats->rx_errors++; in rx_default_dqrr()
2747 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, in rx_default_dqrr()
2760 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); in rx_default_dqrr()
2761 (*count_ptr)--; in rx_default_dqrr()
2764 if (priv->rx_tstamp) { in rx_default_dqrr()
2765 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) in rx_default_dqrr()
2772 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && in rx_default_dqrr()
2773 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], in rx_default_dqrr()
2782 np->xdp_act |= xdp_act; in rx_default_dqrr()
2784 percpu_stats->rx_packets++; in rx_default_dqrr()
2785 percpu_stats->rx_bytes += qm_fd_get_length(fd); in rx_default_dqrr()
2793 if (READ_ONCE(priv->xdp_prog)) { in rx_default_dqrr()
2812 shhwtstamps->hwtstamp = ns_to_ktime(ns); in rx_default_dqrr()
2815 skb->protocol = eth_type_trans(skb, net_dev); in rx_default_dqrr()
2822 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? in rx_default_dqrr()
2827 skb_len = skb->len; in rx_default_dqrr()
2830 percpu_stats->rx_dropped++; in rx_default_dqrr()
2834 percpu_stats->rx_packets++; in rx_default_dqrr()
2835 percpu_stats->rx_bytes += skb_len; in rx_default_dqrr()
2849 net_dev = ((struct dpaa_fq *)fq)->net_dev; in conf_error_dqrr()
2852 percpu_priv = this_cpu_ptr(priv->percpu_priv); in conf_error_dqrr()
2857 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in conf_error_dqrr()
2871 net_dev = ((struct dpaa_fq *)fq)->net_dev; in conf_dflt_dqrr()
2875 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); in conf_dflt_dqrr()
2877 percpu_priv = this_cpu_ptr(priv->percpu_priv); in conf_dflt_dqrr()
2882 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in conf_dflt_dqrr()
2891 const struct qm_fd *fd = &msg->ern.fd; in egress_ern()
2897 net_dev = ((struct dpaa_fq *)fq)->net_dev; in egress_ern()
2899 percpu_priv = this_cpu_ptr(priv->percpu_priv); in egress_ern()
2901 percpu_priv->stats.tx_dropped++; in egress_ern()
2902 percpu_priv->stats.tx_fifo_errors++; in egress_ern()
2923 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_eth_napi_enable()
2925 percpu_priv->np.down = false; in dpaa_eth_napi_enable()
2926 napi_enable(&percpu_priv->np.napi); in dpaa_eth_napi_enable()
2936 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_eth_napi_disable()
2938 percpu_priv->np.down = true; in dpaa_eth_napi_disable()
2939 napi_disable(&percpu_priv->np.napi); in dpaa_eth_napi_disable()
2950 mac_dev = priv->mac_dev; in dpaa_open()
2953 err = phylink_of_phy_connect(mac_dev->phylink, in dpaa_open()
2954 mac_dev->dev->of_node, 0); in dpaa_open()
2958 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { in dpaa_open()
2959 err = fman_port_enable(mac_dev->port[i]); in dpaa_open()
2964 err = priv->mac_dev->enable(mac_dev->fman_mac); in dpaa_open()
2966 netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err); in dpaa_open()
2969 phylink_start(mac_dev->phylink); in dpaa_open()
2976 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) in dpaa_open()
2977 fman_port_disable(mac_dev->port[i]); in dpaa_open()
2978 phylink_disconnect_phy(mac_dev->phylink); in dpaa_open()
3001 int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom; in xdp_validate_mtu()
3007 dev_warn(priv->net_dev->dev.parent, in xdp_validate_mtu()
3009 max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN); in xdp_validate_mtu()
3020 if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) in dpaa_change_mtu()
3021 return -EINVAL; in dpaa_change_mtu()
3023 WRITE_ONCE(net_dev->mtu, new_mtu); in dpaa_change_mtu()
3034 /* S/G fragments are not supported in XDP-mode */ in dpaa_setup_xdp()
3035 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) { in dpaa_setup_xdp()
3036 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); in dpaa_setup_xdp()
3037 return -EINVAL; in dpaa_setup_xdp()
3045 old_prog = xchg(&priv->xdp_prog, bpf->prog); in dpaa_setup_xdp()
3052 NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed"); in dpaa_setup_xdp()
3062 switch (xdp->command) { in dpaa_xdp()
3066 return -EINVAL; in dpaa_xdp()
3077 return -EINVAL; in dpaa_xdp_xmit()
3080 return -ENETDOWN; in dpaa_xdp_xmit()
3097 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) in dpaa_ts_ioctl()
3098 return -EFAULT; in dpaa_ts_ioctl()
3105 priv->tx_tstamp = false; in dpaa_ts_ioctl()
3108 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); in dpaa_ts_ioctl()
3109 priv->tx_tstamp = true; in dpaa_ts_ioctl()
3112 return -ERANGE; in dpaa_ts_ioctl()
3119 priv->rx_tstamp = false; in dpaa_ts_ioctl()
3121 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); in dpaa_ts_ioctl()
3122 priv->rx_tstamp = true; in dpaa_ts_ioctl()
3127 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? in dpaa_ts_ioctl()
3128 -EFAULT : 0; in dpaa_ts_ioctl()
3133 int ret = -EINVAL; in dpaa_ioctl()
3137 if (net_dev->phydev) in dpaa_ioctl()
3138 return phylink_mii_ioctl(priv->mac_dev->phylink, rq, in dpaa_ioctl()
3172 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); in dpaa_napi_add()
3174 netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll); in dpaa_napi_add()
3187 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); in dpaa_napi_del()
3189 __netif_napi_del(&percpu_priv->np.napi); in dpaa_napi_del()
3199 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, in dpaa_bp_free_pf()
3212 return ERR_PTR(-ENOMEM); in dpaa_bp_alloc()
3214 dpaa_bp->bpid = FSL_DPAA_BPID_INV; in dpaa_bp_alloc()
3215 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); in dpaa_bp_alloc()
3216 if (!dpaa_bp->percpu_count) in dpaa_bp_alloc()
3217 return ERR_PTR(-ENOMEM); in dpaa_bp_alloc()
3219 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; in dpaa_bp_alloc()
3221 dpaa_bp->seed_cb = dpaa_bp_seed; in dpaa_bp_alloc()
3222 dpaa_bp->free_buf_cb = dpaa_bp_free_pf; in dpaa_bp_alloc()
3238 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); in dpaa_ingress_cgr_init()
3258 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, in dpaa_ingress_cgr_init()
3263 err, priv->ingress_cgr.cgrid); in dpaa_ingress_cgr_init()
3264 qman_release_cgrid(priv->ingress_cgr.cgrid); in dpaa_ingress_cgr_init()
3269 priv->ingress_cgr.cgrid, priv->mac_dev->addr); in dpaa_ingress_cgr_init()
3271 priv->use_ingress_cgr = true; in dpaa_ingress_cgr_init()
3283 * - the driver private data area in dpaa_get_headroom()
3284 * - parse results, hash results, timestamp if selected in dpaa_get_headroom()
3317 dev = &pdev->dev; in dpaa_eth_probe()
3321 return -EPROBE_DEFER; in dpaa_eth_probe()
3324 return -ENODEV; in dpaa_eth_probe()
3328 return -EPROBE_DEFER; in dpaa_eth_probe()
3330 dev_err(dev, "failing probe due to qman probe error\n"); in dpaa_eth_probe()
3331 return -ENODEV; in dpaa_eth_probe()
3335 return -EPROBE_DEFER; in dpaa_eth_probe()
3339 return -ENODEV; in dpaa_eth_probe()
3343 return -EPROBE_DEFER; in dpaa_eth_probe()
3346 "failing probe due to qman portals probe error\n"); in dpaa_eth_probe()
3347 return -ENODEV; in dpaa_eth_probe()
3356 return -ENOMEM; in dpaa_eth_probe()
3360 SET_NETDEV_DEV(net_dev, dev->parent); in dpaa_eth_probe()
3364 priv->net_dev = net_dev; in dpaa_eth_probe()
3366 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); in dpaa_eth_probe()
3368 priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), in dpaa_eth_probe()
3369 sizeof(*priv->egress_fqs), in dpaa_eth_probe()
3371 if (!priv->egress_fqs) { in dpaa_eth_probe()
3372 err = -ENOMEM; in dpaa_eth_probe()
3376 priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), in dpaa_eth_probe()
3377 sizeof(*priv->conf_fqs), in dpaa_eth_probe()
3379 if (!priv->conf_fqs) { in dpaa_eth_probe()
3380 err = -ENOMEM; in dpaa_eth_probe()
3392 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]); in dpaa_eth_probe()
3393 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]); in dpaa_eth_probe()
3394 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40)); in dpaa_eth_probe()
3396 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev, in dpaa_eth_probe()
3403 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, in dpaa_eth_probe()
3410 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); in dpaa_eth_probe()
3413 net_dev->mtu); in dpaa_eth_probe()
3415 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ in dpaa_eth_probe()
3416 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ in dpaa_eth_probe()
3425 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE; in dpaa_eth_probe()
3427 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size); in dpaa_eth_probe()
3428 dpaa_bp->priv = priv; in dpaa_eth_probe()
3433 priv->dpaa_bp = dpaa_bp; in dpaa_eth_probe()
3435 INIT_LIST_HEAD(&priv->dpaa_fq_list); in dpaa_eth_probe()
3439 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); in dpaa_eth_probe()
3445 priv->mac_dev = mac_dev; in dpaa_eth_probe()
3454 priv->channel = (u16)channel; in dpaa_eth_probe()
3459 dpaa_eth_add_channel(priv->channel, &pdev->dev); in dpaa_eth_probe()
3461 err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); in dpaa_eth_probe()
3466 * dynamically-allocated CGR ID. in dpaa_eth_probe()
3483 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_probe()
3489 priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX); in dpaa_eth_probe()
3490 priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX); in dpaa_eth_probe()
3494 &priv->buf_layout[0], dev); in dpaa_eth_probe()
3499 priv->keygen_in_use = true; in dpaa_eth_probe()
3501 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); in dpaa_eth_probe()
3502 if (!priv->percpu_priv) { in dpaa_eth_probe()
3504 err = -ENOMEM; in dpaa_eth_probe()
3508 priv->num_tc = 1; in dpaa_eth_probe()
3510 priv->num_tc * dpaa_num_txqs_per_tc()); in dpaa_eth_probe()
3521 dpaa_eth_sysfs_init(&net_dev->dev); in dpaa_eth_probe()
3524 net_dev->name); in dpaa_eth_probe()
3531 dpaa_fq_free(dev, &priv->dpaa_fq_list); in dpaa_eth_probe()
3532 qman_delete_cgr_safe(&priv->ingress_cgr); in dpaa_eth_probe()
3533 qman_release_cgrid(priv->ingress_cgr.cgrid); in dpaa_eth_probe()
3535 qman_delete_cgr_safe(&priv->cgr_data.cgr); in dpaa_eth_probe()
3536 qman_release_cgrid(priv->cgr_data.cgr.cgrid); in dpaa_eth_probe()
3553 dev = &pdev->dev; in dpaa_remove()
3562 phylink_destroy(priv->mac_dev->phylink); in dpaa_remove()
3564 err = dpaa_fq_free(dev, &priv->dpaa_fq_list); in dpaa_remove()
3569 qman_delete_cgr_safe(&priv->ingress_cgr); in dpaa_remove()
3570 qman_release_cgrid(priv->ingress_cgr.cgrid); in dpaa_remove()
3571 qman_delete_cgr_safe(&priv->cgr_data.cgr); in dpaa_remove()
3572 qman_release_cgrid(priv->cgr_data.cgr.cgrid); in dpaa_remove()
3583 .name = "dpaa-ethernet",