xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_main.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_main.c: Main file for aQuantia Linux driver. */
9 
10 #include "aq_main.h"
11 #include "aq_nic.h"
12 #include "aq_pci_func.h"
13 #include "aq_ethtool.h"
14 #include "aq_ptp.h"
15 #include "aq_filters.h"
16 #include "aq_hw_utils.h"
17 #include "aq_vec.h"
18 
19 #include <linux/netdevice.h>
20 #include <linux/module.h>
21 #include <linux/ip.h>
22 #include <linux/udp.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <linux/filter.h>
26 
27 MODULE_LICENSE("GPL v2");
28 MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
29 MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
30 
31 DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
32 EXPORT_SYMBOL(aq_xdp_locking_key);
33 
34 static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
35 
36 static const struct net_device_ops aq_ndev_ops;
37 
38 static struct workqueue_struct *aq_ndev_wq;
39 
40 void aq_ndev_schedule_work(struct work_struct *work)
41 {
42 	queue_work(aq_ndev_wq, work);
43 }
44 
45 struct net_device *aq_ndev_alloc(void)
46 {
47 	struct net_device *ndev = NULL;
48 	struct aq_nic_s *aq_nic = NULL;
49 
50 	ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
51 	if (!ndev)
52 		return NULL;
53 
54 	aq_nic = netdev_priv(ndev);
55 	aq_nic->ndev = ndev;
56 	ndev->netdev_ops = &aq_ndev_ops;
57 	ndev->ethtool_ops = &aq_ethtool_ops;
58 
59 	return ndev;
60 }
61 
62 int aq_ndev_open(struct net_device *ndev)
63 {
64 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
65 	int err = 0;
66 
67 	err = aq_nic_init(aq_nic);
68 	if (err < 0)
69 		goto err_exit;
70 
71 	err = aq_reapply_rxnfc_all_rules(aq_nic);
72 	if (err < 0)
73 		goto err_exit;
74 
75 	err = aq_filters_vlans_update(aq_nic);
76 	if (err < 0)
77 		goto err_exit;
78 
79 	err = aq_nic_start(aq_nic);
80 	if (err < 0) {
81 		aq_nic_stop(aq_nic);
82 		goto err_exit;
83 	}
84 
85 err_exit:
86 	if (err < 0)
87 		aq_nic_deinit(aq_nic, true);
88 
89 	return err;
90 }
91 
92 int aq_ndev_close(struct net_device *ndev)
93 {
94 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
95 	int err = 0;
96 
97 	err = aq_nic_stop(aq_nic);
98 	aq_nic_deinit(aq_nic, true);
99 
100 	return err;
101 }
102 
103 static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
104 {
105 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
106 
107 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
108 	if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
109 		/* Hardware adds the Timestamp for PTPv2 802.AS1
110 		 * and PTPv2 IPv4 UDP.
111 		 * We have to push even general 320 port messages to the ptp
112 		 * queue explicitly. This is a limitation of current firmware
113 		 * and hardware PTP design of the chip. Otherwise ptp stream
114 		 * will fail to sync
115 		 */
116 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
117 		    unlikely((ip_hdr(skb)->version == 4) &&
118 			     (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
119 			     ((udp_hdr(skb)->dest == htons(319)) ||
120 			      (udp_hdr(skb)->dest == htons(320)))) ||
121 		    unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
122 			return aq_ptp_xmit(aq_nic, skb);
123 	}
124 #endif
125 
126 	skb_tx_timestamp(skb);
127 	return aq_nic_xmit(aq_nic, skb);
128 }
129 
130 static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
131 {
132 	int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
133 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
134 	struct bpf_prog *prog;
135 	int err;
136 
137 	prog = READ_ONCE(aq_nic->xdp_prog);
138 	if (prog && !prog->aux->xdp_has_frags &&
139 	    new_frame_size > AQ_CFG_RX_FRAME_MAX) {
140 		netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
141 			   ndev->mtu);
142 		return -EOPNOTSUPP;
143 	}
144 
145 	err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
146 
147 	if (err < 0)
148 		goto err_exit;
149 	WRITE_ONCE(ndev->mtu, new_mtu);
150 
151 err_exit:
152 	return err;
153 }
154 
155 static int aq_ndev_set_features(struct net_device *ndev,
156 				netdev_features_t features)
157 {
158 	bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX);
159 	bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
160 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
161 	bool need_ndev_restart = false;
162 	struct aq_nic_cfg_s *aq_cfg;
163 	bool is_lro = false;
164 	int err = 0;
165 
166 	aq_cfg = aq_nic_get_cfg(aq_nic);
167 
168 	if (!(features & NETIF_F_NTUPLE)) {
169 		if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
170 			err = aq_clear_rxnfc_all_rules(aq_nic);
171 			if (unlikely(err))
172 				goto err_exit;
173 		}
174 	}
175 	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
176 		if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
177 			err = aq_filters_vlan_offload_off(aq_nic);
178 			if (unlikely(err))
179 				goto err_exit;
180 		}
181 	}
182 
183 	aq_cfg->features = features;
184 
185 	if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
186 		is_lro = features & NETIF_F_LRO;
187 
188 		if (aq_cfg->is_lro != is_lro) {
189 			aq_cfg->is_lro = is_lro;
190 			need_ndev_restart = true;
191 		}
192 	}
193 
194 	if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM) {
195 		err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
196 							aq_cfg);
197 
198 		if (unlikely(err))
199 			goto err_exit;
200 	}
201 
202 	if (aq_cfg->is_vlan_rx_strip != is_vlan_rx_strip) {
203 		aq_cfg->is_vlan_rx_strip = is_vlan_rx_strip;
204 		need_ndev_restart = true;
205 	}
206 	if (aq_cfg->is_vlan_tx_insert != is_vlan_tx_insert) {
207 		aq_cfg->is_vlan_tx_insert = is_vlan_tx_insert;
208 		need_ndev_restart = true;
209 	}
210 
211 	if (need_ndev_restart && netif_running(ndev)) {
212 		aq_ndev_close(ndev);
213 		aq_ndev_open(ndev);
214 	}
215 
216 err_exit:
217 	return err;
218 }
219 
220 static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
221 					      netdev_features_t features)
222 {
223 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
224 	struct bpf_prog *prog;
225 
226 	if (!(features & NETIF_F_RXCSUM))
227 		features &= ~NETIF_F_LRO;
228 
229 	prog = READ_ONCE(aq_nic->xdp_prog);
230 	if (prog && !prog->aux->xdp_has_frags &&
231 	    aq_nic->xdp_prog && features & NETIF_F_LRO) {
232 		netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
233 		features &= ~NETIF_F_LRO;
234 	}
235 
236 	return features;
237 }
238 
239 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
240 {
241 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
242 	int err = 0;
243 
244 	err = eth_mac_addr(ndev, addr);
245 	if (err < 0)
246 		goto err_exit;
247 	err = aq_nic_set_mac(aq_nic, ndev);
248 	if (err < 0)
249 		goto err_exit;
250 
251 err_exit:
252 	return err;
253 }
254 
255 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
256 {
257 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
258 
259 	(void)aq_nic_set_multicast_list(aq_nic, ndev);
260 }
261 
262 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
263 static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
264 				   struct hwtstamp_config *config)
265 {
266 	switch (config->tx_type) {
267 	case HWTSTAMP_TX_OFF:
268 	case HWTSTAMP_TX_ON:
269 		break;
270 	default:
271 		return -ERANGE;
272 	}
273 
274 	switch (config->rx_filter) {
275 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
276 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
277 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
278 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
279 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
280 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
281 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
282 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
283 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
284 		break;
285 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
286 	case HWTSTAMP_FILTER_NONE:
287 		break;
288 	default:
289 		return -ERANGE;
290 	}
291 
292 	return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
293 }
294 #endif
295 
296 static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
297 {
298 	struct hwtstamp_config config;
299 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
300 	int ret_val;
301 #endif
302 
303 	if (!aq_nic->aq_ptp)
304 		return -EOPNOTSUPP;
305 
306 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
307 		return -EFAULT;
308 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
309 	ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
310 	if (ret_val)
311 		return ret_val;
312 #endif
313 
314 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
315 	       -EFAULT : 0;
316 }
317 
318 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
319 static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
320 {
321 	struct hwtstamp_config config;
322 
323 	if (!aq_nic->aq_ptp)
324 		return -EOPNOTSUPP;
325 
326 	aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
327 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
328 	       -EFAULT : 0;
329 }
330 #endif
331 
332 static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
333 {
334 	struct aq_nic_s *aq_nic = netdev_priv(netdev);
335 
336 	switch (cmd) {
337 	case SIOCSHWTSTAMP:
338 		return aq_ndev_hwtstamp_set(aq_nic, ifr);
339 
340 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
341 	case SIOCGHWTSTAMP:
342 		return aq_ndev_hwtstamp_get(aq_nic, ifr);
343 #endif
344 	}
345 
346 	return -EOPNOTSUPP;
347 }
348 
349 static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
350 				  u16 vid)
351 {
352 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
353 
354 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
355 		return -EOPNOTSUPP;
356 
357 	set_bit(vid, aq_nic->active_vlans);
358 
359 	return aq_filters_vlans_update(aq_nic);
360 }
361 
362 static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
363 				   u16 vid)
364 {
365 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
366 
367 	if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
368 		return -EOPNOTSUPP;
369 
370 	clear_bit(vid, aq_nic->active_vlans);
371 
372 	if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
373 		return aq_filters_vlans_update(aq_nic);
374 
375 	return 0;
376 }
377 
378 static int aq_validate_mqprio_opt(struct aq_nic_s *self,
379 				  struct tc_mqprio_qopt_offload *mqprio,
380 				  const unsigned int num_tc)
381 {
382 	const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
383 	struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
384 	const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
385 					   AQ_CFG_TCS_MAX);
386 
387 	if (num_tc > tcs_max) {
388 		netdev_err(self->ndev, "Too many TCs requested\n");
389 		return -EOPNOTSUPP;
390 	}
391 
392 	if (num_tc != 0 && !is_power_of_2(num_tc)) {
393 		netdev_err(self->ndev, "TC count should be power of 2\n");
394 		return -EOPNOTSUPP;
395 	}
396 
397 	if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
398 		netdev_err(self->ndev, "Min tx rate is not supported\n");
399 		return -EOPNOTSUPP;
400 	}
401 
402 	return 0;
403 }
404 
405 static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
406 			   void *type_data)
407 {
408 	struct tc_mqprio_qopt_offload *mqprio = type_data;
409 	struct aq_nic_s *aq_nic = netdev_priv(dev);
410 	bool has_min_rate;
411 	bool has_max_rate;
412 	int err;
413 	int i;
414 
415 	if (type != TC_SETUP_QDISC_MQPRIO)
416 		return -EOPNOTSUPP;
417 
418 	has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
419 	has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
420 
421 	err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
422 	if (err)
423 		return err;
424 
425 	for (i = 0; i < mqprio->qopt.num_tc; i++) {
426 		if (has_max_rate) {
427 			u64 max_rate = mqprio->max_rate[i];
428 
429 			do_div(max_rate, AQ_MBPS_DIVISOR);
430 			aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
431 		}
432 
433 		if (has_min_rate) {
434 			u64 min_rate = mqprio->min_rate[i];
435 
436 			do_div(min_rate, AQ_MBPS_DIVISOR);
437 			aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
438 		}
439 	}
440 
441 	return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
442 				      mqprio->qopt.prio_tc_map);
443 }
444 
445 static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
446 			struct netlink_ext_ack *extack)
447 {
448 	bool need_update, running = netif_running(ndev);
449 	struct aq_nic_s *aq_nic = netdev_priv(ndev);
450 	struct bpf_prog *old_prog;
451 
452 	if (prog && !prog->aux->xdp_has_frags) {
453 		if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
454 			NL_SET_ERR_MSG_MOD(extack,
455 					   "prog does not support XDP frags");
456 			return -EOPNOTSUPP;
457 		}
458 
459 		if (prog && ndev->features & NETIF_F_LRO) {
460 			netdev_err(ndev,
461 				   "LRO is not supported with single buffer XDP, disabling\n");
462 			ndev->features &= ~NETIF_F_LRO;
463 		}
464 	}
465 
466 	need_update = !!aq_nic->xdp_prog != !!prog;
467 	if (running && need_update)
468 		aq_ndev_close(ndev);
469 
470 	old_prog = xchg(&aq_nic->xdp_prog, prog);
471 	if (old_prog)
472 		bpf_prog_put(old_prog);
473 
474 	if (!old_prog && prog)
475 		static_branch_inc(&aq_xdp_locking_key);
476 	else if (old_prog && !prog)
477 		static_branch_dec(&aq_xdp_locking_key);
478 
479 	if (running && need_update)
480 		return aq_ndev_open(ndev);
481 
482 	return 0;
483 }
484 
485 static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
486 {
487 	switch (xdp->command) {
488 	case XDP_SETUP_PROG:
489 		return aq_xdp_setup(dev, xdp->prog, xdp->extack);
490 	default:
491 		return -EINVAL;
492 	}
493 }
494 
495 static const struct net_device_ops aq_ndev_ops = {
496 	.ndo_open = aq_ndev_open,
497 	.ndo_stop = aq_ndev_close,
498 	.ndo_start_xmit = aq_ndev_start_xmit,
499 	.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
500 	.ndo_change_mtu = aq_ndev_change_mtu,
501 	.ndo_set_mac_address = aq_ndev_set_mac_address,
502 	.ndo_set_features = aq_ndev_set_features,
503 	.ndo_fix_features = aq_ndev_fix_features,
504 	.ndo_eth_ioctl = aq_ndev_ioctl,
505 	.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
506 	.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
507 	.ndo_setup_tc = aq_ndo_setup_tc,
508 	.ndo_bpf = aq_xdp,
509 	.ndo_xdp_xmit = aq_xdp_xmit,
510 };
511 
512 static int __init aq_ndev_init_module(void)
513 {
514 	int ret;
515 
516 	aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
517 	if (!aq_ndev_wq) {
518 		pr_err("Failed to create workqueue\n");
519 		return -ENOMEM;
520 	}
521 
522 	ret = aq_pci_func_register_driver();
523 	if (ret) {
524 		destroy_workqueue(aq_ndev_wq);
525 		return ret;
526 	}
527 
528 	return 0;
529 }
530 
531 static void __exit aq_ndev_exit_module(void)
532 {
533 	aq_pci_func_unregister_driver();
534 
535 	if (aq_ndev_wq) {
536 		destroy_workqueue(aq_ndev_wq);
537 		aq_ndev_wq = NULL;
538 	}
539 }
540 
541 module_init(aq_ndev_init_module);
542 module_exit(aq_ndev_exit_module);
543