Lines Matching +full:qman +full:- +full:fqd
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
3 * Copyright 2008 - 2016 Freescale Semiconductor Inc.
29 #include <linux/dma-mapping.h>
34 #include <soc/fsl/qman.h>
46 static int debug = -1;
71 * The size in bytes of the ingress tail-drop threshold on FMan ports.
72 * Traffic piling up above this value will be rejected by QMan and discarded
86 * - avoiding the device staying congested for a prolonged time (risking
87 * the netdev watchdog to fire - see also the tx_timeout module param);
88 * - affecting performance of protocols such as TCP, which otherwise
90 * - preventing the Tx cores from tightly-looping (as if the congestion
92 * - running out of memory if the CS threshold is set too high.
100 /* Largest value that the FQD's OAL field can hold */
157 XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
159 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
188 ~(DPAA_A050385_ALIGN - 1))
198 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
207 struct device *dev = net_dev->dev.parent; in dpaa_netdev_init()
208 struct mac_device *mac_dev = priv->mac_dev; in dpaa_netdev_init()
217 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_netdev_init()
218 percpu_priv->net_dev = net_dev; in dpaa_netdev_init()
221 net_dev->netdev_ops = dpaa_ops; in dpaa_netdev_init()
222 mac_addr = mac_dev->addr; in dpaa_netdev_init()
224 net_dev->mem_start = (unsigned long)priv->mac_dev->res->start; in dpaa_netdev_init()
225 net_dev->mem_end = (unsigned long)priv->mac_dev->res->end; in dpaa_netdev_init()
227 net_dev->min_mtu = ETH_MIN_MTU; in dpaa_netdev_init()
228 net_dev->max_mtu = dpaa_get_max_mtu(); in dpaa_netdev_init()
230 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in dpaa_netdev_init()
233 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; in dpaa_netdev_init()
237 net_dev->features |= NETIF_F_GSO; in dpaa_netdev_init()
238 net_dev->features |= NETIF_F_RXCSUM; in dpaa_netdev_init()
240 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in dpaa_netdev_init()
241 net_dev->lltx = true; in dpaa_netdev_init()
243 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; in dpaa_netdev_init()
245 net_dev->features |= net_dev->hw_features; in dpaa_netdev_init()
246 net_dev->vlan_features = net_dev->features; in dpaa_netdev_init()
248 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | in dpaa_netdev_init()
253 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); in dpaa_netdev_init()
257 err = mac_dev->change_addr(mac_dev->fman_mac, in dpaa_netdev_init()
258 (const enet_addr_t *)net_dev->dev_addr); in dpaa_netdev_init()
261 return -EINVAL; in dpaa_netdev_init()
264 net_dev->dev_addr); in dpaa_netdev_init()
267 net_dev->ethtool_ops = &dpaa_ethtool_ops; in dpaa_netdev_init()
269 net_dev->needed_headroom = priv->tx_headroom; in dpaa_netdev_init()
270 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); in dpaa_netdev_init()
273 mac_dev->phylink_config.dev = &net_dev->dev; in dpaa_netdev_init()
274 mac_dev->phylink_config.type = PHYLINK_NETDEV; in dpaa_netdev_init()
275 mac_dev->update_speed = dpaa_eth_cgr_set_speed; in dpaa_netdev_init()
276 mac_dev->phylink = phylink_create(&mac_dev->phylink_config, in dpaa_netdev_init()
277 dev_fwnode(mac_dev->dev), in dpaa_netdev_init()
278 mac_dev->phy_if, in dpaa_netdev_init()
279 mac_dev->phylink_ops); in dpaa_netdev_init()
280 if (IS_ERR(mac_dev->phylink)) { in dpaa_netdev_init()
281 err = PTR_ERR(mac_dev->phylink); in dpaa_netdev_init()
292 phylink_destroy(mac_dev->phylink); in dpaa_netdev_init()
307 mac_dev = priv->mac_dev; in dpaa_stop()
310 /* Allow the Fman (Tx) port to process in-flight frames before we in dpaa_stop()
315 phylink_stop(mac_dev->phylink); in dpaa_stop()
316 mac_dev->disable(mac_dev->fman_mac); in dpaa_stop()
318 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { in dpaa_stop()
319 error = fman_port_disable(mac_dev->port[i]); in dpaa_stop()
324 phylink_disconnect_phy(mac_dev->phylink); in dpaa_stop()
325 net_dev->phydev = NULL; in dpaa_stop()
338 percpu_priv = this_cpu_ptr(priv->percpu_priv); in dpaa_tx_timeout()
341 jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); in dpaa_tx_timeout()
343 percpu_priv->stats.tx_errors++; in dpaa_tx_timeout()
360 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_get_stats64()
362 cpustats = (u64 *)&percpu_priv->stats; in dpaa_get_stats64()
380 return -EOPNOTSUPP; in dpaa_setup_tc()
382 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in dpaa_setup_tc()
383 num_tc = mqprio->num_tc; in dpaa_setup_tc()
385 if (num_tc == priv->num_tc) in dpaa_setup_tc()
396 return -EINVAL; in dpaa_setup_tc()
406 priv->num_tc = num_tc ? : 1; in dpaa_setup_tc()
407 netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); in dpaa_setup_tc()
417 dpaa_dev = &pdev->dev; in dpaa_mac_dev_get()
418 eth_data = dpaa_dev->platform_data; in dpaa_mac_dev_get()
421 return ERR_PTR(-ENODEV); in dpaa_mac_dev_get()
423 mac_dev = eth_data->mac_dev; in dpaa_mac_dev_get()
426 return ERR_PTR(-EINVAL); in dpaa_mac_dev_get()
441 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); in dpaa_set_mac_address()
449 mac_dev = priv->mac_dev; in dpaa_set_mac_address()
451 err = mac_dev->change_addr(mac_dev->fman_mac, in dpaa_set_mac_address()
452 (const enet_addr_t *)net_dev->dev_addr); in dpaa_set_mac_address()
454 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", in dpaa_set_mac_address()
469 return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac, in dpaa_addr_sync()
477 return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac, in dpaa_addr_unsync()
488 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { in dpaa_set_rx_mode()
489 priv->mac_dev->promisc = !priv->mac_dev->promisc; in dpaa_set_rx_mode()
490 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, in dpaa_set_rx_mode()
491 priv->mac_dev->promisc); in dpaa_set_rx_mode()
494 "mac_dev->set_promisc() = %d\n", in dpaa_set_rx_mode()
498 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) { in dpaa_set_rx_mode()
499 priv->mac_dev->allmulti = !priv->mac_dev->allmulti; in dpaa_set_rx_mode()
500 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac, in dpaa_set_rx_mode()
501 priv->mac_dev->allmulti); in dpaa_set_rx_mode()
504 "mac_dev->set_allmulti() = %d\n", in dpaa_set_rx_mode()
526 refcount_inc(&dpaa_bp_array[bpid]->refs); in dpaa_bpid2pool_use()
537 refcount_set(&dpaa_bp->refs, 1); in dpaa_bpid2pool_map()
544 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { in dpaa_bp_alloc_pool()
547 return -EINVAL; in dpaa_bp_alloc_pool()
551 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && in dpaa_bp_alloc_pool()
552 dpaa_bpid2pool_use(dpaa_bp->bpid)) in dpaa_bp_alloc_pool()
555 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { in dpaa_bp_alloc_pool()
556 dpaa_bp->pool = bman_new_pool(); in dpaa_bp_alloc_pool()
557 if (!dpaa_bp->pool) { in dpaa_bp_alloc_pool()
560 return -ENODEV; in dpaa_bp_alloc_pool()
563 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); in dpaa_bp_alloc_pool()
566 if (dpaa_bp->seed_cb) { in dpaa_bp_alloc_pool()
567 err = dpaa_bp->seed_cb(dpaa_bp); in dpaa_bp_alloc_pool()
572 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); in dpaa_bp_alloc_pool()
578 bman_free_pool(dpaa_bp->pool); in dpaa_bp_alloc_pool()
593 ret = bman_acquire(bp->pool, bmb, num); in dpaa_bp_drain()
608 if (bp->free_buf_cb) in dpaa_bp_drain()
610 bp->free_buf_cb(bp, &bmb[i]); in dpaa_bp_drain()
616 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); in dpaa_bp_free()
625 if (!refcount_dec_and_test(&bp->refs)) in dpaa_bp_free()
628 if (bp->free_buf_cb) in dpaa_bp_free()
631 dpaa_bp_array[bp->bpid] = NULL; in dpaa_bp_free()
632 bman_free_pool(bp->pool); in dpaa_bp_free()
637 dpaa_bp_free(priv->dpaa_bp); in dpaa_bps_free()
641 * - Tx Confirmation queues go to WQ1.
642 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
644 * - Rx Default goes to WQ6.
645 * - Tx queues go to different WQs depending on their priority. Equal
648 * This ensures that Tx-confirmed buffers are timely released. In particular,
651 * dequeue scheduling is round-robin.
655 switch (fq->fq_type) { in dpaa_assign_wq()
658 fq->wq = 1; in dpaa_assign_wq()
662 fq->wq = 5; in dpaa_assign_wq()
666 fq->wq = 6; in dpaa_assign_wq()
672 fq->wq = 6; in dpaa_assign_wq()
676 fq->wq = 2; in dpaa_assign_wq()
680 fq->wq = 1; in dpaa_assign_wq()
684 fq->wq = 0; in dpaa_assign_wq()
693 fq->fq_type, fq->fqid); in dpaa_assign_wq()
732 port_fqs->rx_errq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
738 port_fqs->rx_defq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
758 port_fqs->rx_pcdq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
768 port_fqs->tx_errq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
774 port_fqs->tx_defq = &dpaa_fq[0]; in dpaa_alloc_all_fqs()
783 return -ENOMEM; in dpaa_alloc_all_fqs()
803 return -ENOMEM; in dpaa_get_channel()
829 * Also updates some CGR-related stats.
838 priv->cgr_data.congestion_start_jiffies = jiffies; in dpaa_eth_cgscn()
839 netif_tx_stop_all_queues(priv->net_dev); in dpaa_eth_cgscn()
840 priv->cgr_data.cgr_congested_count++; in dpaa_eth_cgscn()
842 priv->cgr_data.congested_jiffies += in dpaa_eth_cgscn()
843 (jiffies - priv->cgr_data.congestion_start_jiffies); in dpaa_eth_cgscn()
844 netif_tx_wake_all_queues(priv->net_dev); in dpaa_eth_cgscn()
854 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); in dpaa_eth_cgr_init()
861 priv->cgr_data.cgr.cb = dpaa_eth_cgscn; in dpaa_eth_cgr_init()
873 if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD) in dpaa_eth_cgr_init()
882 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, in dpaa_eth_cgr_init()
887 __func__, err, priv->cgr_data.cgr.cgrid); in dpaa_eth_cgr_init()
888 qman_release_cgrid(priv->cgr_data.cgr.cgrid); in dpaa_eth_cgr_init()
892 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", in dpaa_eth_cgr_init()
893 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, in dpaa_eth_cgr_init()
894 priv->cgr_data.cgr.chan); in dpaa_eth_cgr_init()
902 struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev); in dpaa_eth_cgr_set_speed()
920 err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts); in dpaa_eth_cgr_set_speed()
929 fq->fq_base = *template; in dpaa_setup_ingress()
930 fq->net_dev = priv->net_dev; in dpaa_setup_ingress()
932 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; in dpaa_setup_ingress()
933 fq->channel = priv->channel; in dpaa_setup_ingress()
941 fq->fq_base = *template; in dpaa_setup_egress()
942 fq->net_dev = priv->net_dev; in dpaa_setup_egress()
945 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; in dpaa_setup_egress()
946 fq->channel = (u16)fman_port_get_qman_channel_id(port); in dpaa_setup_egress()
948 fq->flags = QMAN_FQ_FLAG_NO_MODIFY; in dpaa_setup_egress()
963 return -ENOMEM; in dpaa_fq_setup()
969 dev_err(priv->net_dev->dev.parent, in dpaa_fq_setup()
970 "No Qman software (affine) channels found\n"); in dpaa_fq_setup()
973 list_for_each_entry(fq, &priv->dpaa_fq_list, list) { in dpaa_fq_setup()
974 switch (fq->fq_type) { in dpaa_fq_setup()
976 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); in dpaa_fq_setup()
979 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); in dpaa_fq_setup()
984 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); in dpaa_fq_setup()
985 fq->channel = channels[portal_cnt++ % num_portals]; in dpaa_fq_setup()
989 &fq_cbs->egress_ern); in dpaa_fq_setup()
990 priv->egress_fqs[egress_cnt++] = &fq->fq_base; in dpaa_fq_setup()
993 priv->conf_fqs[conf_cnt++] = &fq->fq_base; in dpaa_fq_setup()
996 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); in dpaa_fq_setup()
999 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); in dpaa_fq_setup()
1002 dev_warn(priv->net_dev->dev.parent, in dpaa_fq_setup()
1019 if (priv->egress_fqs[i] == tx_fq) in dpaa_tx_fq_to_id()
1022 return -EINVAL; in dpaa_tx_fq_to_id()
1035 priv = netdev_priv(dpaa_fq->net_dev); in dpaa_fq_init()
1036 dev = dpaa_fq->net_dev->dev.parent; in dpaa_fq_init()
1038 if (dpaa_fq->fqid == 0) in dpaa_fq_init()
1039 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; in dpaa_fq_init()
1041 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); in dpaa_fq_init()
1043 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); in dpaa_fq_init()
1048 fq = &dpaa_fq->fq_base; in dpaa_fq_init()
1050 if (dpaa_fq->init) { in dpaa_fq_init()
1055 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); in dpaa_fq_init()
1060 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) in dpaa_fq_init()
1061 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); in dpaa_fq_init()
1066 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); in dpaa_fq_init()
1070 * rather than Tx - but they nonetheless account for the in dpaa_fq_init()
1074 if (dpaa_fq->fq_type == FQ_TYPE_TX || in dpaa_fq_init()
1075 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || in dpaa_fq_init()
1076 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { in dpaa_fq_init()
1078 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); in dpaa_fq_init()
1079 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; in dpaa_fq_init()
1081 * reduce the impact of fixed-size skb shells and the in dpaa_fq_init()
1085 * Unfortunately, QMan's OAL value is capped to an in dpaa_fq_init()
1090 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); in dpaa_fq_init()
1091 qm_fqd_set_oal(&initfq.fqd, in dpaa_fq_init()
1093 priv->tx_headroom, in dpaa_fq_init()
1099 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); in dpaa_fq_init()
1100 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); in dpaa_fq_init()
1103 if (dpaa_fq->fq_type == FQ_TYPE_TX) { in dpaa_fq_init()
1104 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); in dpaa_fq_init()
1106 confq = priv->conf_fqs[queue_id]; in dpaa_fq_init()
1117 qm_fqd_context_a_set64(&initfq.fqd, in dpaa_fq_init()
1123 if (priv->use_ingress_cgr && in dpaa_fq_init()
1124 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || in dpaa_fq_init()
1125 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR || in dpaa_fq_init()
1126 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) { in dpaa_fq_init()
1128 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); in dpaa_fq_init()
1129 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; in dpaa_fq_init()
1134 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); in dpaa_fq_init()
1135 qm_fqd_set_oal(&initfq.fqd, in dpaa_fq_init()
1137 priv->tx_headroom, in dpaa_fq_init()
1142 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { in dpaa_fq_init()
1144 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | in dpaa_fq_init()
1146 initfq.fqd.context_a.stashing.exclusive = in dpaa_fq_init()
1149 qm_fqd_set_stashing(&initfq.fqd, 1, 2, in dpaa_fq_init()
1163 dpaa_fq->fqid = qman_fq_fqid(fq); in dpaa_fq_init()
1165 if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || in dpaa_fq_init()
1166 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) { in dpaa_fq_init()
1167 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev, in dpaa_fq_init()
1168 dpaa_fq->fqid, 0); in dpaa_fq_init()
1174 err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq, in dpaa_fq_init()
1179 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); in dpaa_fq_init()
1196 priv = netdev_priv(dpaa_fq->net_dev); in dpaa_fq_free_entry()
1198 if (dpaa_fq->init) { in dpaa_fq_free_entry()
1213 if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || in dpaa_fq_free_entry()
1214 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) && in dpaa_fq_free_entry()
1215 xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq)) in dpaa_fq_free_entry()
1216 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); in dpaa_fq_free_entry()
1219 list_del(&dpaa_fq->list); in dpaa_fq_free_entry()
1250 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; in dpaa_eth_init_tx_port()
1256 params.specific_params.non_rx_params.err_fqid = errq->fqid; in dpaa_eth_init_tx_port()
1257 params.specific_params.non_rx_params.dflt_fqid = defq->fqid; in dpaa_eth_init_tx_port()
1292 buf_prefix_content.priv_data_size = buf_layout->priv_data_size; in dpaa_eth_init_rx_port()
1299 rx_p->err_fqid = errq->fqid; in dpaa_eth_init_rx_port()
1300 rx_p->dflt_fqid = defq->fqid; in dpaa_eth_init_rx_port()
1302 rx_p->pcd_base_fqid = pcdq->fqid; in dpaa_eth_init_rx_port()
1303 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; in dpaa_eth_init_rx_port()
1306 rx_p->ext_buf_pools.num_of_pools_used = 1; in dpaa_eth_init_rx_port()
1307 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid; in dpaa_eth_init_rx_port()
1308 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size; in dpaa_eth_init_rx_port()
1336 struct fman_port *rxport = mac_dev->port[RX]; in dpaa_eth_init_ports()
1337 struct fman_port *txport = mac_dev->port[TX]; in dpaa_eth_init_ports()
1340 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, in dpaa_eth_init_ports()
1341 port_fqs->tx_defq, &buf_layout[TX]); in dpaa_eth_init_ports()
1345 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq, in dpaa_eth_init_ports()
1346 port_fqs->rx_defq, port_fqs->rx_pcdq, in dpaa_eth_init_ports()
1357 err = bman_release(dpaa_bp->pool, bmb, cnt); in dpaa_bman_release()
1359 if (WARN_ON(err) && dpaa_bp->free_buf_cb) in dpaa_bman_release()
1360 while (cnt-- > 0) in dpaa_bman_release()
1361 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); in dpaa_bman_release()
1387 !qm_sg_entry_is_final(&sgt[i - 1]) && in dpaa_release_sgt_members()
1388 sgt[i - 1].bpid == sgt[i].bpid); in dpaa_release_sgt_members()
1391 } while (!qm_sg_entry_is_final(&sgt[i - 1])); in dpaa_release_sgt_members()
1406 dpaa_bp = dpaa_bpid2pool(fd->bpid); in dpaa_fd_release()
1414 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), in dpaa_fd_release()
1419 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, in dpaa_fd_release()
1422 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { in dpaa_fd_release()
1435 switch (msg->ern.rc & QM_MR_RC_MASK) { in count_ern()
1437 percpu_priv->ern_cnt.cg_tdrop++; in count_ern()
1440 percpu_priv->ern_cnt.wred++; in count_ern()
1443 percpu_priv->ern_cnt.err_cond++; in count_ern()
1446 percpu_priv->ern_cnt.early_window++; in count_ern()
1449 percpu_priv->ern_cnt.late_window++; in count_ern()
1452 percpu_priv->ern_cnt.fq_tdrop++; in count_ern()
1455 percpu_priv->ern_cnt.fq_retired++; in count_ern()
1458 percpu_priv->ern_cnt.orp_zero++; in count_ern()
1470 * Note that this function may modify the fd->cmd field and the skb data buffer
1479 u16 ethertype = ntohs(skb->protocol); in dpaa_enable_tx_csum()
1485 if (skb->ip_summed != CHECKSUM_PARTIAL) in dpaa_enable_tx_csum()
1499 ethertype = ntohs(skb_vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); in dpaa_enable_tx_csum()
1506 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); in dpaa_enable_tx_csum()
1509 l4_proto = iph->protocol; in dpaa_enable_tx_csum()
1512 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); in dpaa_enable_tx_csum()
1515 l4_proto = ipv6h->nexthdr; in dpaa_enable_tx_csum()
1520 netif_alert(priv, tx_err, priv->net_dev, in dpaa_enable_tx_csum()
1522 ntohs(skb->protocol)); in dpaa_enable_tx_csum()
1523 retval = -EIO; in dpaa_enable_tx_csum()
1530 parse_result->l4r = FM_L4_PARSE_RESULT_UDP; in dpaa_enable_tx_csum()
1533 parse_result->l4r = FM_L4_PARSE_RESULT_TCP; in dpaa_enable_tx_csum()
1537 netif_alert(priv, tx_err, priv->net_dev, in dpaa_enable_tx_csum()
1540 retval = -EIO; in dpaa_enable_tx_csum()
1545 parse_result->ip_off[0] = (u8)skb_network_offset(skb); in dpaa_enable_tx_csum()
1546 parse_result->l4_off = (u8)skb_transport_offset(skb); in dpaa_enable_tx_csum()
1549 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); in dpaa_enable_tx_csum()
1551 /* On P1023 and similar platforms fd->cmd interpretation could in dpaa_enable_tx_csum()
1563 struct net_device *net_dev = dpaa_bp->priv->net_dev; in dpaa_bp_add_8_bufs()
1576 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0, in dpaa_bp_add_8_bufs()
1578 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, in dpaa_bp_add_8_bufs()
1610 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); in dpaa_bp_seed()
1616 for (j = 0; j < dpaa_bp->config_count; j += 8) in dpaa_bp_seed()
1645 return -ENOMEM; in dpaa_eth_refill_bpool()
1656 dpaa_bp = priv->dpaa_bp; in dpaa_eth_refill_bpools()
1658 return -EINVAL; in dpaa_eth_refill_bpools()
1659 countptr = this_cpu_ptr(dpaa_bp->percpu_count); in dpaa_eth_refill_bpools()
1683 struct device *dev = priv->net_dev->dev.parent; in dpaa_cleanup_tx_fd()
1694 dma_unmap_page(priv->tx_dma_dev, addr, in dpaa_cleanup_tx_fd()
1703 /* sgt[0] is from lowmem, was dma_map_single()-ed */ in dpaa_cleanup_tx_fd()
1704 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), in dpaa_cleanup_tx_fd()
1709 !qm_sg_entry_is_final(&sgt[i - 1]); i++) { in dpaa_cleanup_tx_fd()
1712 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), in dpaa_cleanup_tx_fd()
1716 dma_unmap_single(priv->tx_dma_dev, addr, in dpaa_cleanup_tx_fd()
1722 skb = swbp->skb; in dpaa_cleanup_tx_fd()
1728 xdp_return_frame(swbp->xdpf); in dpaa_cleanup_tx_fd()
1733 if (ts && priv->tx_tstamp && in dpaa_cleanup_tx_fd()
1734 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in dpaa_cleanup_tx_fd()
1737 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr, in dpaa_cleanup_tx_fd()
1759 if ((priv->net_dev->features & NETIF_F_RXCSUM) && in rx_csum_offload()
1760 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) in rx_csum_offload()
1788 dpaa_bp = dpaa_bpid2pool(fd->bpid); in contig_fd_to_skb()
1792 skb = build_skb(vaddr, dpaa_bp->size + in contig_fd_to_skb()
1799 skb->ip_summed = rx_csum_offload(priv, fd); in contig_fd_to_skb()
1843 dma_unmap_page(priv->rx_dma_dev, sg_addr, in sg_fd_to_skb()
1852 sz = dpaa_bp->size + in sg_fd_to_skb()
1858 skb->ip_summed = rx_csum_offload(priv, fd); in sg_fd_to_skb()
1863 WARN_ON(fd_off != priv->rx_headroom); in sg_fd_to_skb()
1884 (PAGE_SIZE - 1)) + in sg_fd_to_skb()
1885 (page_address(page) - page_address(head_page)); in sg_fd_to_skb()
1887 /* Non-initial SGT entries should not have a buffer in sg_fd_to_skb()
1896 skb_add_rx_frag(skb, i - 1, head_page, page_offset, in sg_fd_to_skb()
1898 dpaa_bp->size); in sg_fd_to_skb()
1902 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); in sg_fd_to_skb()
1903 (*count_ptr)--; in sg_fd_to_skb()
1922 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), in sg_fd_to_skb()
1925 /* counters 0..i-1 were decremented */ in sg_fd_to_skb()
1929 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); in sg_fd_to_skb()
1930 (*count_ptr)--; in sg_fd_to_skb()
1947 struct net_device *net_dev = priv->net_dev; in skb_to_contig_fd()
1957 fd->bpid = FSL_DPAA_BPID_INV; in skb_to_contig_fd()
1958 buff_start = skb->data - priv->tx_headroom; in skb_to_contig_fd()
1962 swbp->skb = skb; in skb_to_contig_fd()
1979 qm_fd_set_contig(fd, priv->tx_headroom, skb->len); in skb_to_contig_fd()
1980 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); in skb_to_contig_fd()
1983 addr = dma_map_single(priv->tx_dma_dev, buff_start, in skb_to_contig_fd()
1984 priv->tx_headroom + skb->len, dma_dir); in skb_to_contig_fd()
1985 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_contig_fd()
1988 return -EINVAL; in skb_to_contig_fd()
1999 const int nr_frags = skb_shinfo(skb)->nr_frags; in skb_to_sg_fd()
2000 struct net_device *net_dev = priv->net_dev; in skb_to_sg_fd()
2014 return -ENOMEM; in skb_to_sg_fd()
2033 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom); in skb_to_sg_fd()
2038 addr = dma_map_single(priv->tx_dma_dev, skb->data, in skb_to_sg_fd()
2040 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_sg_fd()
2041 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2042 err = -EINVAL; in skb_to_sg_fd()
2049 frag = &skb_shinfo(skb)->frags[i]; in skb_to_sg_fd()
2052 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0, in skb_to_sg_fd()
2054 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_sg_fd()
2055 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2056 err = -EINVAL; in skb_to_sg_fd()
2071 /* set fd offset to priv->tx_headroom */ in skb_to_sg_fd()
2072 qm_fd_set_sg(fd, priv->tx_headroom, skb->len); in skb_to_sg_fd()
2076 swbp->skb = skb; in skb_to_sg_fd()
2078 addr = dma_map_page(priv->tx_dma_dev, p, 0, in skb_to_sg_fd()
2079 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); in skb_to_sg_fd()
2080 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in skb_to_sg_fd()
2081 netdev_err(priv->net_dev, "DMA mapping failed\n"); in skb_to_sg_fd()
2082 err = -EINVAL; in skb_to_sg_fd()
2086 fd->bpid = FSL_DPAA_BPID_INV; in skb_to_sg_fd()
2087 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); in skb_to_sg_fd()
2095 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]), in skb_to_sg_fd()
2112 egress_fq = priv->egress_fqs[queue]; in dpaa_xmit()
2113 if (fd->bpid == FSL_DPAA_BPID_INV) in dpaa_xmit()
2114 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); in dpaa_xmit()
2117 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); in dpaa_xmit()
2121 if (err != -EBUSY) in dpaa_xmit()
2126 percpu_stats->tx_fifo_errors++; in dpaa_xmit()
2130 percpu_stats->tx_packets++; in dpaa_xmit()
2131 percpu_stats->tx_bytes += qm_fd_get_length(fd); in dpaa_xmit()
2144 if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) in dpaa_a050385_wa_skb()
2155 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in dpaa_a050385_wa_skb()
2156 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in dpaa_a050385_wa_skb()
2164 (i < skb_shinfo(skb)->nr_frags - 1)) in dpaa_a050385_wa_skb()
2172 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + in dpaa_a050385_wa_skb()
2173 priv->tx_headroom); in dpaa_a050385_wa_skb()
2175 return -ENOMEM; in dpaa_a050385_wa_skb()
2178 skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); in dpaa_a050385_wa_skb()
2181 start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); in dpaa_a050385_wa_skb()
2182 if (start - new_skb->data) in dpaa_a050385_wa_skb()
2183 skb_reserve(new_skb, start - new_skb->data); in dpaa_a050385_wa_skb()
2185 skb_put(new_skb, skb->len); in dpaa_a050385_wa_skb()
2186 skb_copy_bits(skb, 0, new_skb->data, skb->len); in dpaa_a050385_wa_skb()
2188 new_skb->dev = skb->dev; in dpaa_a050385_wa_skb()
2191 if (priv->tx_tstamp) { in dpaa_a050385_wa_skb()
2192 skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags; in dpaa_a050385_wa_skb()
2193 skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps; in dpaa_a050385_wa_skb()
2194 skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey; in dpaa_a050385_wa_skb()
2195 if (skb->sk) in dpaa_a050385_wa_skb()
2196 skb_set_owner_w(new_skb, skb->sk); in dpaa_a050385_wa_skb()
2229 if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) && in dpaa_a050385_wa_xdpf()
2230 xdpf->headroom >= priv->tx_headroom) { in dpaa_a050385_wa_xdpf()
2231 xdpf->headroom = priv->tx_headroom; in dpaa_a050385_wa_xdpf()
2239 aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT); in dpaa_a050385_wa_xdpf()
2240 data_shift = xdpf->data - aligned_data; in dpaa_a050385_wa_xdpf()
2245 if (xdpf->headroom >= data_shift + priv->tx_headroom) { in dpaa_a050385_wa_xdpf()
2246 memmove(aligned_data, xdpf->data, xdpf->len); in dpaa_a050385_wa_xdpf()
2247 xdpf->data = aligned_data; in dpaa_a050385_wa_xdpf()
2248 xdpf->headroom = priv->tx_headroom; in dpaa_a050385_wa_xdpf()
2257 headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom, in dpaa_a050385_wa_xdpf()
2263 if (headroom + xdpf->len > DPAA_BP_RAW_SIZE - in dpaa_a050385_wa_xdpf()
2265 return -ENOMEM; in dpaa_a050385_wa_xdpf()
2269 return -ENOMEM; in dpaa_a050385_wa_xdpf()
2273 memcpy(new_buff + headroom, xdpf->data, xdpf->len); in dpaa_a050385_wa_xdpf()
2279 new_xdpf->data = new_buff + headroom; in dpaa_a050385_wa_xdpf()
2280 new_xdpf->len = xdpf->len; in dpaa_a050385_wa_xdpf()
2281 new_xdpf->headroom = priv->tx_headroom; in dpaa_a050385_wa_xdpf()
2282 new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; in dpaa_a050385_wa_xdpf()
2283 new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; in dpaa_a050385_wa_xdpf()
2307 percpu_priv = this_cpu_ptr(priv->percpu_priv); in dpaa_start_xmit()
2308 percpu_stats = &percpu_priv->stats; in dpaa_start_xmit()
2312 /* Packet data is always read as 32-bit words, so zero out any part of in dpaa_start_xmit()
2323 * We've made sure skb is not shared in dev->priv_flags, in dpaa_start_xmit()
2326 if (skb_cow_head(skb, priv->tx_headroom)) in dpaa_start_xmit()
2336 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) { in dpaa_start_xmit()
2357 percpu_priv->tx_frag_skbuffs++; in dpaa_start_xmit()
2370 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in dpaa_start_xmit()
2372 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in dpaa_start_xmit()
2381 percpu_stats->tx_errors++; in dpaa_start_xmit()
2394 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); in dpaa_rx_error()
2396 percpu_priv->stats.rx_errors++; in dpaa_rx_error()
2398 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) in dpaa_rx_error()
2399 percpu_priv->rx_errors.dme++; in dpaa_rx_error()
2400 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) in dpaa_rx_error()
2401 percpu_priv->rx_errors.fpe++; in dpaa_rx_error()
2402 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) in dpaa_rx_error()
2403 percpu_priv->rx_errors.fse++; in dpaa_rx_error()
2404 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) in dpaa_rx_error()
2405 percpu_priv->rx_errors.phe++; in dpaa_rx_error()
2420 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); in dpaa_tx_error()
2422 percpu_priv->stats.tx_errors++; in dpaa_tx_error()
2434 np->xdp_act = 0; in dpaa_eth_poll()
2436 cleaned = qman_p_poll_dqrr(np->p, budget); in dpaa_eth_poll()
2438 if (np->xdp_act & XDP_REDIRECT) in dpaa_eth_poll()
2443 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); in dpaa_eth_poll()
2444 } else if (np->down) { in dpaa_eth_poll()
2445 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); in dpaa_eth_poll()
2459 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { in dpaa_tx_conf()
2462 be32_to_cpu(fd->status) & in dpaa_tx_conf()
2465 percpu_priv->stats.tx_errors++; in dpaa_tx_conf()
2468 percpu_priv->tx_confirm++; in dpaa_tx_conf()
2479 /* Disable QMan IRQ and invoke NAPI */ in dpaa_eth_napi_schedule()
2482 percpu_priv->np.p = portal; in dpaa_eth_napi_schedule()
2483 napi_schedule(&percpu_priv->np.napi); in dpaa_eth_napi_schedule()
2484 percpu_priv->in_interrupt++; in dpaa_eth_napi_schedule()
2501 net_dev = dpaa_fq->net_dev; in rx_error_dqrr()
2503 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); in rx_error_dqrr()
2507 percpu_priv = this_cpu_ptr(priv->percpu_priv); in rx_error_dqrr()
2513 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in rx_error_dqrr()
2531 percpu_priv = this_cpu_ptr(priv->percpu_priv); in dpaa_xdp_xmit_frame()
2532 percpu_stats = &percpu_priv->stats; in dpaa_xdp_xmit_frame()
2537 err = -ENOMEM; in dpaa_xdp_xmit_frame()
2543 if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) { in dpaa_xdp_xmit_frame()
2544 err = -EINVAL; in dpaa_xdp_xmit_frame()
2548 buff_start = xdpf->data - xdpf->headroom; in dpaa_xdp_xmit_frame()
2554 swbp->skb = NULL; in dpaa_xdp_xmit_frame()
2555 swbp->xdpf = xdpf; in dpaa_xdp_xmit_frame()
2560 qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len); in dpaa_xdp_xmit_frame()
2562 addr = dma_map_single(priv->tx_dma_dev, buff_start, in dpaa_xdp_xmit_frame()
2563 xdpf->headroom + xdpf->len, in dpaa_xdp_xmit_frame()
2565 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { in dpaa_xdp_xmit_frame()
2566 err = -EINVAL; in dpaa_xdp_xmit_frame()
2578 dma_unmap_single(priv->tx_dma_dev, addr, in dpaa_xdp_xmit_frame()
2587 percpu_stats->tx_errors++; in dpaa_xdp_xmit_frame()
2601 xdp_prog = READ_ONCE(priv->xdp_prog); in dpaa_run_xdp()
2605 xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE, in dpaa_run_xdp()
2606 &dpaa_fq->xdp_rxq); in dpaa_run_xdp()
2607 xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM, in dpaa_run_xdp()
2627 qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data); in dpaa_run_xdp()
2633 xdp.data - xdp.data_meta; in dpaa_run_xdp()
2635 *xdp_meta_len = xdp.data - xdp.data_meta; in dpaa_run_xdp()
2650 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf)) in dpaa_run_xdp()
2659 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); in dpaa_run_xdp()
2661 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2666 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2669 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); in dpaa_run_xdp()
2690 const struct qm_fd *fd = &dq->fd; in rx_default_dqrr()
2708 fd_status = be32_to_cpu(fd->status); in rx_default_dqrr()
2710 net_dev = dpaa_fq->net_dev; in rx_default_dqrr()
2712 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); in rx_default_dqrr()
2717 trace_dpaa_rx_fd(net_dev, fq, &dq->fd); in rx_default_dqrr()
2719 percpu_priv = this_cpu_ptr(priv->percpu_priv); in rx_default_dqrr()
2720 percpu_stats = &percpu_priv->stats; in rx_default_dqrr()
2721 np = &percpu_priv->np; in rx_default_dqrr()
2732 dpaa_fd_release(net_dev, &dq->fd); in rx_default_dqrr()
2741 percpu_stats->rx_errors++; in rx_default_dqrr()
2746 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, in rx_default_dqrr()
2759 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); in rx_default_dqrr()
2760 (*count_ptr)--; in rx_default_dqrr()
2763 if (priv->rx_tstamp) { in rx_default_dqrr()
2764 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) in rx_default_dqrr()
2771 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && in rx_default_dqrr()
2772 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], in rx_default_dqrr()
2781 np->xdp_act |= xdp_act; in rx_default_dqrr()
2783 percpu_stats->rx_packets++; in rx_default_dqrr()
2784 percpu_stats->rx_bytes += qm_fd_get_length(fd); in rx_default_dqrr()
2792 if (READ_ONCE(priv->xdp_prog)) { in rx_default_dqrr()
2811 shhwtstamps->hwtstamp = ns_to_ktime(ns); in rx_default_dqrr()
2814 skb->protocol = eth_type_trans(skb, net_dev); in rx_default_dqrr()
2821 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? in rx_default_dqrr()
2826 skb_len = skb->len; in rx_default_dqrr()
2829 percpu_stats->rx_dropped++; in rx_default_dqrr()
2833 percpu_stats->rx_packets++; in rx_default_dqrr()
2834 percpu_stats->rx_bytes += skb_len; in rx_default_dqrr()
2848 net_dev = ((struct dpaa_fq *)fq)->net_dev; in conf_error_dqrr()
2851 percpu_priv = this_cpu_ptr(priv->percpu_priv); in conf_error_dqrr()
2856 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in conf_error_dqrr()
2870 net_dev = ((struct dpaa_fq *)fq)->net_dev; in conf_dflt_dqrr()
2874 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); in conf_dflt_dqrr()
2876 percpu_priv = this_cpu_ptr(priv->percpu_priv); in conf_dflt_dqrr()
2881 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); in conf_dflt_dqrr()
2890 const struct qm_fd *fd = &msg->ern.fd; in egress_ern()
2896 net_dev = ((struct dpaa_fq *)fq)->net_dev; in egress_ern()
2898 percpu_priv = this_cpu_ptr(priv->percpu_priv); in egress_ern()
2900 percpu_priv->stats.tx_dropped++; in egress_ern()
2901 percpu_priv->stats.tx_fifo_errors++; in egress_ern()
2922 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_eth_napi_enable()
2924 percpu_priv->np.down = false; in dpaa_eth_napi_enable()
2925 napi_enable(&percpu_priv->np.napi); in dpaa_eth_napi_enable()
2935 percpu_priv = per_cpu_ptr(priv->percpu_priv, i); in dpaa_eth_napi_disable()
2937 percpu_priv->np.down = true; in dpaa_eth_napi_disable()
2938 napi_disable(&percpu_priv->np.napi); in dpaa_eth_napi_disable()
2949 mac_dev = priv->mac_dev; in dpaa_open()
2952 err = phylink_of_phy_connect(mac_dev->phylink, in dpaa_open()
2953 mac_dev->dev->of_node, 0); in dpaa_open()
2957 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { in dpaa_open()
2958 err = fman_port_enable(mac_dev->port[i]); in dpaa_open()
2963 err = priv->mac_dev->enable(mac_dev->fman_mac); in dpaa_open()
2965 netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err); in dpaa_open()
2968 phylink_start(mac_dev->phylink); in dpaa_open()
2975 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) in dpaa_open()
2976 fman_port_disable(mac_dev->port[i]); in dpaa_open()
2977 phylink_disconnect_phy(mac_dev->phylink); in dpaa_open()
3000 int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom; in xdp_validate_mtu()
3006 dev_warn(priv->net_dev->dev.parent, in xdp_validate_mtu()
3008 max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN); in xdp_validate_mtu()
3019 if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) in dpaa_change_mtu()
3020 return -EINVAL; in dpaa_change_mtu()
3022 WRITE_ONCE(net_dev->mtu, new_mtu); in dpaa_change_mtu()
3033 /* S/G fragments are not supported in XDP-mode */ in dpaa_setup_xdp()
3034 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) { in dpaa_setup_xdp()
3035 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); in dpaa_setup_xdp()
3036 return -EINVAL; in dpaa_setup_xdp()
3044 old_prog = xchg(&priv->xdp_prog, bpf->prog); in dpaa_setup_xdp()
3051 NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed"); in dpaa_setup_xdp()
3061 switch (xdp->command) { in dpaa_xdp()
3065 return -EINVAL; in dpaa_xdp()
3076 return -EINVAL; in dpaa_xdp_xmit()
3079 return -ENETDOWN; in dpaa_xdp_xmit()
3096 config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; in dpaa_hwtstamp_get()
3097 config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL : in dpaa_hwtstamp_get()
3109 switch (config->tx_type) { in dpaa_hwtstamp_set()
3114 priv->tx_tstamp = false; in dpaa_hwtstamp_set()
3117 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); in dpaa_hwtstamp_set()
3118 priv->tx_tstamp = true; in dpaa_hwtstamp_set()
3121 return -ERANGE; in dpaa_hwtstamp_set()
3124 if (config->rx_filter == HWTSTAMP_FILTER_NONE) { in dpaa_hwtstamp_set()
3128 priv->rx_tstamp = false; in dpaa_hwtstamp_set()
3130 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); in dpaa_hwtstamp_set()
3131 priv->rx_tstamp = true; in dpaa_hwtstamp_set()
3133 config->rx_filter = HWTSTAMP_FILTER_ALL; in dpaa_hwtstamp_set()
3143 return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd); in dpaa_ioctl()
3171 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); in dpaa_napi_add()
3173 netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll); in dpaa_napi_add()
3186 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); in dpaa_napi_del()
3188 __netif_napi_del(&percpu_priv->np.napi); in dpaa_napi_del()
3198 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, in dpaa_bp_free_pf()
3211 return ERR_PTR(-ENOMEM); in dpaa_bp_alloc()
3213 dpaa_bp->bpid = FSL_DPAA_BPID_INV; in dpaa_bp_alloc()
3214 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); in dpaa_bp_alloc()
3215 if (!dpaa_bp->percpu_count) in dpaa_bp_alloc()
3216 return ERR_PTR(-ENOMEM); in dpaa_bp_alloc()
3218 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; in dpaa_bp_alloc()
3220 dpaa_bp->seed_cb = dpaa_bp_seed; in dpaa_bp_alloc()
3221 dpaa_bp->free_buf_cb = dpaa_bp_free_pf; in dpaa_bp_alloc()
3237 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); in dpaa_ingress_cgr_init()
3257 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, in dpaa_ingress_cgr_init()
3262 err, priv->ingress_cgr.cgrid); in dpaa_ingress_cgr_init()
3263 qman_release_cgrid(priv->ingress_cgr.cgrid); in dpaa_ingress_cgr_init()
3268 priv->ingress_cgr.cgrid, priv->mac_dev->addr); in dpaa_ingress_cgr_init()
3270 priv->use_ingress_cgr = true; in dpaa_ingress_cgr_init()
3282 * - the driver private data area in dpaa_get_headroom()
3283 * - parse results, hash results, timestamp if selected in dpaa_get_headroom()
3316 dev = &pdev->dev; in dpaa_eth_probe()
3320 return -EPROBE_DEFER; in dpaa_eth_probe()
3323 return -ENODEV; in dpaa_eth_probe()
3327 return -EPROBE_DEFER; in dpaa_eth_probe()
3329 dev_err(dev, "failing probe due to qman probe error\n"); in dpaa_eth_probe()
3330 return -ENODEV; in dpaa_eth_probe()
3334 return -EPROBE_DEFER; in dpaa_eth_probe()
3338 return -ENODEV; in dpaa_eth_probe()
3342 return -EPROBE_DEFER; in dpaa_eth_probe()
3345 "failing probe due to qman portals probe error\n"); in dpaa_eth_probe()
3346 return -ENODEV; in dpaa_eth_probe()
3355 return -ENOMEM; in dpaa_eth_probe()
3359 SET_NETDEV_DEV(net_dev, dev->parent); in dpaa_eth_probe()
3363 priv->net_dev = net_dev; in dpaa_eth_probe()
3365 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); in dpaa_eth_probe()
3367 priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), in dpaa_eth_probe()
3368 sizeof(*priv->egress_fqs), in dpaa_eth_probe()
3370 if (!priv->egress_fqs) { in dpaa_eth_probe()
3371 err = -ENOMEM; in dpaa_eth_probe()
3375 priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), in dpaa_eth_probe()
3376 sizeof(*priv->conf_fqs), in dpaa_eth_probe()
3378 if (!priv->conf_fqs) { in dpaa_eth_probe()
3379 err = -ENOMEM; in dpaa_eth_probe()
3391 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]); in dpaa_eth_probe()
3392 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]); in dpaa_eth_probe()
3393 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40)); in dpaa_eth_probe()
3395 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev, in dpaa_eth_probe()
3402 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, in dpaa_eth_probe()
3409 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); in dpaa_eth_probe()
3412 net_dev->mtu); in dpaa_eth_probe()
3414 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ in dpaa_eth_probe()
3415 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ in dpaa_eth_probe()
3424 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE; in dpaa_eth_probe()
3426 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size); in dpaa_eth_probe()
3427 dpaa_bp->priv = priv; in dpaa_eth_probe()
3432 priv->dpaa_bp = dpaa_bp; in dpaa_eth_probe()
3434 INIT_LIST_HEAD(&priv->dpaa_fq_list); in dpaa_eth_probe()
3438 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); in dpaa_eth_probe()
3444 priv->mac_dev = mac_dev; in dpaa_eth_probe()
3453 priv->channel = (u16)channel; in dpaa_eth_probe()
3458 dpaa_eth_add_channel(priv->channel, &pdev->dev); in dpaa_eth_probe()
3460 err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); in dpaa_eth_probe()
3465 * dynamically-allocated CGR ID. in dpaa_eth_probe()
3482 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_probe()
3488 priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX); in dpaa_eth_probe()
3489 priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX); in dpaa_eth_probe()
3493 &priv->buf_layout[0], dev); in dpaa_eth_probe()
3498 priv->keygen_in_use = true; in dpaa_eth_probe()
3500 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); in dpaa_eth_probe()
3501 if (!priv->percpu_priv) { in dpaa_eth_probe()
3503 err = -ENOMEM; in dpaa_eth_probe()
3507 priv->num_tc = 1; in dpaa_eth_probe()
3509 priv->num_tc * dpaa_num_txqs_per_tc()); in dpaa_eth_probe()
3520 dpaa_eth_sysfs_init(&net_dev->dev); in dpaa_eth_probe()
3523 net_dev->name); in dpaa_eth_probe()
3530 dpaa_fq_free(dev, &priv->dpaa_fq_list); in dpaa_eth_probe()
3531 qman_delete_cgr_safe(&priv->ingress_cgr); in dpaa_eth_probe()
3532 qman_release_cgrid(priv->ingress_cgr.cgrid); in dpaa_eth_probe()
3534 qman_delete_cgr_safe(&priv->cgr_data.cgr); in dpaa_eth_probe()
3535 qman_release_cgrid(priv->cgr_data.cgr.cgrid); in dpaa_eth_probe()
3552 dev = &pdev->dev; in dpaa_remove()
3561 phylink_destroy(priv->mac_dev->phylink); in dpaa_remove()
3563 err = dpaa_fq_free(dev, &priv->dpaa_fq_list); in dpaa_remove()
3568 qman_delete_cgr_safe(&priv->ingress_cgr); in dpaa_remove()
3569 qman_release_cgrid(priv->ingress_cgr.cgrid); in dpaa_remove()
3570 qman_delete_cgr_safe(&priv->cgr_data.cgr); in dpaa_remove()
3571 qman_release_cgrid(priv->cgr_data.cgr.cgrid); in dpaa_remove()
3582 .name = "dpaa-ethernet",