1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if.h>
15 #include <net/ip.h>
16 #include <linux/skbuff.h>
17
18 #include "bnge.h"
19 #include "bnge_hwrm_lib.h"
20 #include "bnge_ethtool.h"
21
bnge_start_xmit(struct sk_buff * skb,struct net_device * dev)22 static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
23 {
24 dev_kfree_skb_any(skb);
25
26 return NETDEV_TX_OK;
27 }
28
bnge_open(struct net_device * dev)29 static int bnge_open(struct net_device *dev)
30 {
31 return 0;
32 }
33
bnge_close(struct net_device * dev)34 static int bnge_close(struct net_device *dev)
35 {
36 return 0;
37 }
38
39 static const struct net_device_ops bnge_netdev_ops = {
40 .ndo_open = bnge_open,
41 .ndo_stop = bnge_close,
42 .ndo_start_xmit = bnge_start_xmit,
43 };
44
bnge_init_mac_addr(struct bnge_dev * bd)45 static void bnge_init_mac_addr(struct bnge_dev *bd)
46 {
47 eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
48 }
49
bnge_set_tpa_flags(struct bnge_dev * bd)50 static void bnge_set_tpa_flags(struct bnge_dev *bd)
51 {
52 struct bnge_net *bn = netdev_priv(bd->netdev);
53
54 bn->priv_flags &= ~BNGE_NET_EN_TPA;
55
56 if (bd->netdev->features & NETIF_F_LRO)
57 bn->priv_flags |= BNGE_NET_EN_LRO;
58 else if (bd->netdev->features & NETIF_F_GRO_HW)
59 bn->priv_flags |= BNGE_NET_EN_GRO;
60 }
61
bnge_init_l2_fltr_tbl(struct bnge_net * bn)62 static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
63 {
64 int i;
65
66 for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
67 INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
68 get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
69 }
70
bnge_set_ring_params(struct bnge_dev * bd)71 void bnge_set_ring_params(struct bnge_dev *bd)
72 {
73 struct bnge_net *bn = netdev_priv(bd->netdev);
74 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
75 u32 agg_factor = 0, agg_ring_size = 0;
76
77 /* 8 for CRC and VLAN */
78 rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
79
80 rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
81 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
82
83 bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
84 ring_size = bn->rx_ring_size;
85 bn->rx_agg_ring_size = 0;
86 bn->rx_agg_nr_pages = 0;
87
88 if (bn->priv_flags & BNGE_NET_EN_TPA)
89 agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
90
91 bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
92 if (rx_space > PAGE_SIZE) {
93 u32 jumbo_factor;
94
95 bn->priv_flags |= BNGE_NET_EN_JUMBO;
96 jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
97 if (jumbo_factor > agg_factor)
98 agg_factor = jumbo_factor;
99 }
100 if (agg_factor) {
101 if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
102 ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
103 netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
104 bn->rx_ring_size, ring_size);
105 bn->rx_ring_size = ring_size;
106 }
107 agg_ring_size = ring_size * agg_factor;
108
109 bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
110 RX_DESC_CNT);
111 if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
112 u32 tmp = agg_ring_size;
113
114 bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
115 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
116 netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
117 tmp, agg_ring_size);
118 }
119 bn->rx_agg_ring_size = agg_ring_size;
120 bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
121
122 rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
123 rx_space = rx_size + NET_SKB_PAD +
124 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
125 }
126
127 bn->rx_buf_use_size = rx_size;
128 bn->rx_buf_size = rx_space;
129
130 bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
131 bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
132
133 ring_size = bn->tx_ring_size;
134 bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
135 bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
136
137 max_rx_cmpl = bn->rx_ring_size;
138
139 if (bn->priv_flags & BNGE_NET_EN_TPA)
140 max_rx_cmpl += bd->max_tpa_v2;
141 ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
142 bn->cp_ring_size = ring_size;
143
144 bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
145 if (bn->cp_nr_pages > MAX_CP_PAGES) {
146 bn->cp_nr_pages = MAX_CP_PAGES;
147 bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
148 netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
149 ring_size, bn->cp_ring_size);
150 }
151 bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
152 bn->cp_ring_mask = bn->cp_bit - 1;
153 }
154
bnge_netdev_alloc(struct bnge_dev * bd,int max_irqs)155 int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
156 {
157 struct net_device *netdev;
158 struct bnge_net *bn;
159 int rc;
160
161 netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
162 max_irqs);
163 if (!netdev)
164 return -ENOMEM;
165
166 SET_NETDEV_DEV(netdev, bd->dev);
167 bd->netdev = netdev;
168
169 netdev->netdev_ops = &bnge_netdev_ops;
170
171 bnge_set_ethtool_ops(netdev);
172
173 bn = netdev_priv(netdev);
174 bn->netdev = netdev;
175 bn->bd = bd;
176
177 netdev->min_mtu = ETH_ZLEN;
178 netdev->max_mtu = bd->max_mtu;
179
180 netdev->hw_features = NETIF_F_IP_CSUM |
181 NETIF_F_IPV6_CSUM |
182 NETIF_F_SG |
183 NETIF_F_TSO |
184 NETIF_F_TSO6 |
185 NETIF_F_GSO_UDP_TUNNEL |
186 NETIF_F_GSO_GRE |
187 NETIF_F_GSO_IPXIP4 |
188 NETIF_F_GSO_UDP_TUNNEL_CSUM |
189 NETIF_F_GSO_GRE_CSUM |
190 NETIF_F_GSO_PARTIAL |
191 NETIF_F_RXHASH |
192 NETIF_F_RXCSUM |
193 NETIF_F_GRO;
194
195 if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
196 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
197
198 if (BNGE_SUPPORTS_TPA(bd))
199 netdev->hw_features |= NETIF_F_LRO;
200
201 netdev->hw_enc_features = NETIF_F_IP_CSUM |
202 NETIF_F_IPV6_CSUM |
203 NETIF_F_SG |
204 NETIF_F_TSO |
205 NETIF_F_TSO6 |
206 NETIF_F_GSO_UDP_TUNNEL |
207 NETIF_F_GSO_GRE |
208 NETIF_F_GSO_UDP_TUNNEL_CSUM |
209 NETIF_F_GSO_GRE_CSUM |
210 NETIF_F_GSO_IPXIP4 |
211 NETIF_F_GSO_PARTIAL;
212
213 if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
214 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
215
216 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
217 NETIF_F_GSO_GRE_CSUM;
218
219 netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
220 if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
221 netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
222 if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
223 netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
224
225 if (BNGE_SUPPORTS_TPA(bd))
226 netdev->hw_features |= NETIF_F_GRO_HW;
227
228 netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
229
230 if (netdev->features & NETIF_F_GRO_HW)
231 netdev->features &= ~NETIF_F_LRO;
232
233 netdev->priv_flags |= IFF_UNICAST_FLT;
234
235 netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
236 if (bd->tso_max_segs)
237 netif_set_tso_max_segs(netdev, bd->tso_max_segs);
238
239 bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
240 bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
241
242 bnge_set_tpa_flags(bd);
243 bnge_set_ring_params(bd);
244
245 bnge_init_l2_fltr_tbl(bn);
246 bnge_init_mac_addr(bd);
247
248 rc = register_netdev(netdev);
249 if (rc) {
250 dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
251 goto err_netdev;
252 }
253
254 return 0;
255
256 err_netdev:
257 free_netdev(netdev);
258 return rc;
259 }
260
bnge_netdev_free(struct bnge_dev * bd)261 void bnge_netdev_free(struct bnge_dev *bd)
262 {
263 struct net_device *netdev = bd->netdev;
264
265 unregister_netdev(netdev);
266 free_netdev(netdev);
267 bd->netdev = NULL;
268 }
269