1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
16 #include <linux/debugfs.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool_netlink.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/slab.h>
23 #include <net/netdev_queues.h>
24 #include <net/netdev_rx_queue.h>
25 #include <net/page_pool/helpers.h>
26 #include <net/netlink.h>
27 #include <net/net_shaper.h>
28 #include <net/netdev_lock.h>
29 #include <net/pkt_cls.h>
30 #include <net/rtnetlink.h>
31 #include <net/udp_tunnel.h>
32 #include <net/busy_poll.h>
33
34 #include "netdevsim.h"
35
36 MODULE_IMPORT_NS("NETDEV_INTERNAL");
37
38 #define NSIM_RING_SIZE 256
39
nsim_start_peer_tx_queue(struct net_device * dev,struct nsim_rq * rq)40 static void nsim_start_peer_tx_queue(struct net_device *dev, struct nsim_rq *rq)
41 {
42 struct netdevsim *ns = netdev_priv(dev);
43 struct net_device *peer_dev;
44 struct netdevsim *peer_ns;
45 struct netdev_queue *txq;
46 u16 idx;
47
48 idx = rq->napi.index;
49 rcu_read_lock();
50 peer_ns = rcu_dereference(ns->peer);
51 if (!peer_ns)
52 goto out;
53
54 /* TX device */
55 peer_dev = peer_ns->netdev;
56 if (dev->real_num_tx_queues != peer_dev->num_rx_queues)
57 goto out;
58
59 txq = netdev_get_tx_queue(peer_dev, idx);
60 if (!netif_tx_queue_stopped(txq))
61 goto out;
62
63 netif_tx_wake_queue(txq);
64 out:
65 rcu_read_unlock();
66 }
67
nsim_stop_tx_queue(struct net_device * tx_dev,struct net_device * rx_dev,struct nsim_rq * rq,u16 idx)68 static void nsim_stop_tx_queue(struct net_device *tx_dev,
69 struct net_device *rx_dev,
70 struct nsim_rq *rq,
71 u16 idx)
72 {
73 /* If different queues size, do not stop, since it is not
74 * easy to find which TX queue is mapped here
75 */
76 if (rx_dev->real_num_tx_queues != tx_dev->num_rx_queues)
77 return;
78
79 /* rq is the queue on the receive side */
80 netif_subqueue_try_stop(tx_dev, idx,
81 NSIM_RING_SIZE - skb_queue_len(&rq->skb_queue),
82 NSIM_RING_SIZE / 2);
83 }
84
nsim_napi_rx(struct net_device * tx_dev,struct net_device * rx_dev,struct nsim_rq * rq,struct sk_buff * skb)85 static int nsim_napi_rx(struct net_device *tx_dev, struct net_device *rx_dev,
86 struct nsim_rq *rq, struct sk_buff *skb)
87 {
88 if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
89 dev_kfree_skb_any(skb);
90 return NET_RX_DROP;
91 }
92
93 skb_queue_tail(&rq->skb_queue, skb);
94
95 /* Stop the peer TX queue avoiding dropping packets later */
96 if (skb_queue_len(&rq->skb_queue) >= NSIM_RING_SIZE)
97 nsim_stop_tx_queue(tx_dev, rx_dev, rq,
98 skb_get_queue_mapping(skb));
99
100 return NET_RX_SUCCESS;
101 }
102
nsim_forward_skb(struct net_device * tx_dev,struct net_device * rx_dev,struct sk_buff * skb,struct nsim_rq * rq,struct skb_ext * psp_ext)103 static int nsim_forward_skb(struct net_device *tx_dev,
104 struct net_device *rx_dev,
105 struct sk_buff *skb,
106 struct nsim_rq *rq,
107 struct skb_ext *psp_ext)
108 {
109 int ret;
110
111 ret = __dev_forward_skb(rx_dev, skb);
112 if (ret)
113 return ret;
114
115 nsim_psp_handle_ext(skb, psp_ext);
116
117 return nsim_napi_rx(tx_dev, rx_dev, rq, skb);
118 }
119
nsim_start_xmit(struct sk_buff * skb,struct net_device * dev)120 static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
121 {
122 struct netdevsim *ns = netdev_priv(dev);
123 struct skb_ext *psp_ext = NULL;
124 struct net_device *peer_dev;
125 unsigned int len = skb->len;
126 struct netdevsim *peer_ns;
127 struct netdev_config *cfg;
128 struct nsim_rq *rq;
129 int rxq;
130 int dr;
131
132 rcu_read_lock();
133 if (!nsim_ipsec_tx(ns, skb))
134 goto out_drop_any;
135
136 /* Check if loopback mode is enabled */
137 if (dev->features & NETIF_F_LOOPBACK) {
138 peer_ns = ns;
139 peer_dev = dev;
140 } else {
141 peer_ns = rcu_dereference(ns->peer);
142 if (!peer_ns)
143 goto out_drop_any;
144 peer_dev = peer_ns->netdev;
145 }
146
147 dr = nsim_do_psp(skb, ns, peer_ns, &psp_ext);
148 if (dr)
149 goto out_drop_free;
150
151 rxq = skb_get_queue_mapping(skb);
152 if (rxq >= peer_dev->num_rx_queues)
153 rxq = rxq % peer_dev->num_rx_queues;
154 rq = peer_ns->rq[rxq];
155
156 cfg = peer_dev->cfg;
157 if (skb_is_nonlinear(skb) &&
158 (cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED ||
159 (cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
160 cfg->hds_thresh > len)))
161 skb_linearize(skb);
162
163 skb_tx_timestamp(skb);
164 if (unlikely(nsim_forward_skb(dev, peer_dev,
165 skb, rq, psp_ext) == NET_RX_DROP))
166 goto out_drop_cnt;
167
168 if (!hrtimer_active(&rq->napi_timer))
169 hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
170
171 rcu_read_unlock();
172 dev_dstats_tx_add(dev, len);
173 return NETDEV_TX_OK;
174
175 out_drop_any:
176 dr = SKB_DROP_REASON_NOT_SPECIFIED;
177 out_drop_free:
178 kfree_skb_reason(skb, dr);
179 out_drop_cnt:
180 rcu_read_unlock();
181 dev_dstats_tx_dropped(dev);
182 return NETDEV_TX_OK;
183 }
184
nsim_set_rx_mode(struct net_device * dev)185 static void nsim_set_rx_mode(struct net_device *dev)
186 {
187 }
188
nsim_change_mtu(struct net_device * dev,int new_mtu)189 static int nsim_change_mtu(struct net_device *dev, int new_mtu)
190 {
191 struct netdevsim *ns = netdev_priv(dev);
192
193 if (ns->xdp.prog && !ns->xdp.prog->aux->xdp_has_frags &&
194 new_mtu > NSIM_XDP_MAX_MTU)
195 return -EBUSY;
196
197 WRITE_ONCE(dev->mtu, new_mtu);
198
199 return 0;
200 }
201
202 static int
nsim_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)203 nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
204 {
205 return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
206 }
207
nsim_set_vf_mac(struct net_device * dev,int vf,u8 * mac)208 static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
209 {
210 struct netdevsim *ns = netdev_priv(dev);
211 struct nsim_dev *nsim_dev = ns->nsim_dev;
212
213 /* Only refuse multicast addresses, zero address can mean unset/any. */
214 if (vf >= nsim_dev_get_vfs(nsim_dev) || is_multicast_ether_addr(mac))
215 return -EINVAL;
216 memcpy(nsim_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
217
218 return 0;
219 }
220
nsim_set_vf_vlan(struct net_device * dev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)221 static int nsim_set_vf_vlan(struct net_device *dev, int vf,
222 u16 vlan, u8 qos, __be16 vlan_proto)
223 {
224 struct netdevsim *ns = netdev_priv(dev);
225 struct nsim_dev *nsim_dev = ns->nsim_dev;
226
227 if (vf >= nsim_dev_get_vfs(nsim_dev) || vlan > 4095 || qos > 7)
228 return -EINVAL;
229
230 nsim_dev->vfconfigs[vf].vlan = vlan;
231 nsim_dev->vfconfigs[vf].qos = qos;
232 nsim_dev->vfconfigs[vf].vlan_proto = vlan_proto;
233
234 return 0;
235 }
236
nsim_set_vf_rate(struct net_device * dev,int vf,int min,int max)237 static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
238 {
239 struct netdevsim *ns = netdev_priv(dev);
240 struct nsim_dev *nsim_dev = ns->nsim_dev;
241
242 if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) {
243 pr_err("Not supported in switchdev mode. Please use devlink API.\n");
244 return -EOPNOTSUPP;
245 }
246
247 if (vf >= nsim_dev_get_vfs(nsim_dev))
248 return -EINVAL;
249
250 nsim_dev->vfconfigs[vf].min_tx_rate = min;
251 nsim_dev->vfconfigs[vf].max_tx_rate = max;
252
253 return 0;
254 }
255
nsim_set_vf_spoofchk(struct net_device * dev,int vf,bool val)256 static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
257 {
258 struct netdevsim *ns = netdev_priv(dev);
259 struct nsim_dev *nsim_dev = ns->nsim_dev;
260
261 if (vf >= nsim_dev_get_vfs(nsim_dev))
262 return -EINVAL;
263 nsim_dev->vfconfigs[vf].spoofchk_enabled = val;
264
265 return 0;
266 }
267
nsim_set_vf_rss_query_en(struct net_device * dev,int vf,bool val)268 static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
269 {
270 struct netdevsim *ns = netdev_priv(dev);
271 struct nsim_dev *nsim_dev = ns->nsim_dev;
272
273 if (vf >= nsim_dev_get_vfs(nsim_dev))
274 return -EINVAL;
275 nsim_dev->vfconfigs[vf].rss_query_enabled = val;
276
277 return 0;
278 }
279
nsim_set_vf_trust(struct net_device * dev,int vf,bool val)280 static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
281 {
282 struct netdevsim *ns = netdev_priv(dev);
283 struct nsim_dev *nsim_dev = ns->nsim_dev;
284
285 if (vf >= nsim_dev_get_vfs(nsim_dev))
286 return -EINVAL;
287 nsim_dev->vfconfigs[vf].trusted = val;
288
289 return 0;
290 }
291
292 static int
nsim_get_vf_config(struct net_device * dev,int vf,struct ifla_vf_info * ivi)293 nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
294 {
295 struct netdevsim *ns = netdev_priv(dev);
296 struct nsim_dev *nsim_dev = ns->nsim_dev;
297
298 if (vf >= nsim_dev_get_vfs(nsim_dev))
299 return -EINVAL;
300
301 ivi->vf = vf;
302 ivi->linkstate = nsim_dev->vfconfigs[vf].link_state;
303 ivi->min_tx_rate = nsim_dev->vfconfigs[vf].min_tx_rate;
304 ivi->max_tx_rate = nsim_dev->vfconfigs[vf].max_tx_rate;
305 ivi->vlan = nsim_dev->vfconfigs[vf].vlan;
306 ivi->vlan_proto = nsim_dev->vfconfigs[vf].vlan_proto;
307 ivi->qos = nsim_dev->vfconfigs[vf].qos;
308 memcpy(&ivi->mac, nsim_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
309 ivi->spoofchk = nsim_dev->vfconfigs[vf].spoofchk_enabled;
310 ivi->trusted = nsim_dev->vfconfigs[vf].trusted;
311 ivi->rss_query_en = nsim_dev->vfconfigs[vf].rss_query_enabled;
312
313 return 0;
314 }
315
nsim_set_vf_link_state(struct net_device * dev,int vf,int state)316 static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
317 {
318 struct netdevsim *ns = netdev_priv(dev);
319 struct nsim_dev *nsim_dev = ns->nsim_dev;
320
321 if (vf >= nsim_dev_get_vfs(nsim_dev))
322 return -EINVAL;
323
324 switch (state) {
325 case IFLA_VF_LINK_STATE_AUTO:
326 case IFLA_VF_LINK_STATE_ENABLE:
327 case IFLA_VF_LINK_STATE_DISABLE:
328 break;
329 default:
330 return -EINVAL;
331 }
332
333 nsim_dev->vfconfigs[vf].link_state = state;
334
335 return 0;
336 }
337
nsim_taprio_stats(struct tc_taprio_qopt_stats * stats)338 static void nsim_taprio_stats(struct tc_taprio_qopt_stats *stats)
339 {
340 stats->window_drops = 0;
341 stats->tx_overruns = 0;
342 }
343
nsim_setup_tc_taprio(struct net_device * dev,struct tc_taprio_qopt_offload * offload)344 static int nsim_setup_tc_taprio(struct net_device *dev,
345 struct tc_taprio_qopt_offload *offload)
346 {
347 int err = 0;
348
349 switch (offload->cmd) {
350 case TAPRIO_CMD_REPLACE:
351 case TAPRIO_CMD_DESTROY:
352 break;
353 case TAPRIO_CMD_STATS:
354 nsim_taprio_stats(&offload->stats);
355 break;
356 default:
357 err = -EOPNOTSUPP;
358 }
359
360 return err;
361 }
362
363 static LIST_HEAD(nsim_block_cb_list);
364
365 static int
nsim_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)366 nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
367 {
368 struct netdevsim *ns = netdev_priv(dev);
369
370 switch (type) {
371 case TC_SETUP_QDISC_TAPRIO:
372 return nsim_setup_tc_taprio(dev, type_data);
373 case TC_SETUP_BLOCK:
374 return flow_block_cb_setup_simple(type_data,
375 &nsim_block_cb_list,
376 nsim_setup_tc_block_cb,
377 ns, ns, true);
378 default:
379 return -EOPNOTSUPP;
380 }
381 }
382
383 static int
nsim_set_features(struct net_device * dev,netdev_features_t features)384 nsim_set_features(struct net_device *dev, netdev_features_t features)
385 {
386 struct netdevsim *ns = netdev_priv(dev);
387
388 if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
389 return nsim_bpf_disable_tc(ns);
390
391 return 0;
392 }
393
nsim_get_iflink(const struct net_device * dev)394 static int nsim_get_iflink(const struct net_device *dev)
395 {
396 struct netdevsim *nsim, *peer;
397 int iflink;
398
399 nsim = netdev_priv(dev);
400
401 rcu_read_lock();
402 peer = rcu_dereference(nsim->peer);
403 iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
404 READ_ONCE(dev->ifindex);
405 rcu_read_unlock();
406
407 return iflink;
408 }
409
nsim_rcv(struct nsim_rq * rq,int budget)410 static int nsim_rcv(struct nsim_rq *rq, int budget)
411 {
412 struct net_device *dev = rq->napi.dev;
413 struct bpf_prog *xdp_prog;
414 struct netdevsim *ns;
415 struct sk_buff *skb;
416 unsigned int skblen;
417 int i, ret;
418
419 ns = netdev_priv(dev);
420 xdp_prog = READ_ONCE(ns->xdp.prog);
421
422 for (i = 0; i < budget; i++) {
423 if (skb_queue_empty(&rq->skb_queue))
424 break;
425
426 skb = skb_dequeue(&rq->skb_queue);
427
428 if (xdp_prog) {
429 /* skb might be freed directly by XDP, save the len */
430 skblen = skb->len;
431
432 if (skb->ip_summed == CHECKSUM_PARTIAL)
433 skb_checksum_help(skb);
434 ret = do_xdp_generic(xdp_prog, &skb);
435 if (ret != XDP_PASS) {
436 dev_dstats_rx_add(dev, skblen);
437 continue;
438 }
439 }
440
441 /* skb might be discard at netif_receive_skb, save the len */
442 dev_dstats_rx_add(dev, skb->len);
443 napi_gro_receive(&rq->napi, skb);
444 }
445
446 nsim_start_peer_tx_queue(dev, rq);
447 return i;
448 }
449
nsim_poll(struct napi_struct * napi,int budget)450 static int nsim_poll(struct napi_struct *napi, int budget)
451 {
452 struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
453 int done;
454
455 done = nsim_rcv(rq, budget);
456 if (done < budget)
457 napi_complete_done(napi, done);
458
459 return done;
460 }
461
nsim_create_page_pool(struct page_pool ** p,struct napi_struct * napi)462 static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi)
463 {
464 struct page_pool_params params = {
465 .order = 0,
466 .pool_size = NSIM_RING_SIZE,
467 .nid = NUMA_NO_NODE,
468 .dev = &napi->dev->dev,
469 .napi = napi,
470 .dma_dir = DMA_BIDIRECTIONAL,
471 .netdev = napi->dev,
472 };
473 struct page_pool *pool;
474
475 pool = page_pool_create(¶ms);
476 if (IS_ERR(pool))
477 return PTR_ERR(pool);
478
479 *p = pool;
480 return 0;
481 }
482
nsim_init_napi(struct netdevsim * ns)483 static int nsim_init_napi(struct netdevsim *ns)
484 {
485 struct net_device *dev = ns->netdev;
486 struct nsim_rq *rq;
487 int err, i;
488
489 for (i = 0; i < dev->num_rx_queues; i++) {
490 rq = ns->rq[i];
491
492 netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i);
493 }
494
495 for (i = 0; i < dev->num_rx_queues; i++) {
496 rq = ns->rq[i];
497
498 err = nsim_create_page_pool(&rq->page_pool, &rq->napi);
499 if (err)
500 goto err_pp_destroy;
501 }
502
503 return 0;
504
505 err_pp_destroy:
506 while (i--) {
507 page_pool_destroy(ns->rq[i]->page_pool);
508 ns->rq[i]->page_pool = NULL;
509 }
510
511 for (i = 0; i < dev->num_rx_queues; i++)
512 __netif_napi_del_locked(&ns->rq[i]->napi);
513
514 return err;
515 }
516
nsim_napi_schedule(struct hrtimer * timer)517 static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer)
518 {
519 struct nsim_rq *rq;
520
521 rq = container_of(timer, struct nsim_rq, napi_timer);
522 napi_schedule(&rq->napi);
523
524 return HRTIMER_NORESTART;
525 }
526
nsim_rq_timer_init(struct nsim_rq * rq)527 static void nsim_rq_timer_init(struct nsim_rq *rq)
528 {
529 hrtimer_setup(&rq->napi_timer, nsim_napi_schedule, CLOCK_MONOTONIC,
530 HRTIMER_MODE_REL);
531 }
532
nsim_enable_napi(struct netdevsim * ns)533 static void nsim_enable_napi(struct netdevsim *ns)
534 {
535 struct net_device *dev = ns->netdev;
536 int i;
537
538 for (i = 0; i < dev->num_rx_queues; i++) {
539 struct nsim_rq *rq = ns->rq[i];
540
541 netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
542 napi_enable_locked(&rq->napi);
543 }
544 }
545
nsim_open(struct net_device * dev)546 static int nsim_open(struct net_device *dev)
547 {
548 struct netdevsim *ns = netdev_priv(dev);
549 struct netdevsim *peer;
550 int err;
551
552 netdev_assert_locked(dev);
553
554 err = nsim_init_napi(ns);
555 if (err)
556 return err;
557
558 nsim_enable_napi(ns);
559
560 peer = rtnl_dereference(ns->peer);
561 if (peer && netif_running(peer->netdev)) {
562 netif_carrier_on(dev);
563 netif_carrier_on(peer->netdev);
564 }
565
566 return 0;
567 }
568
nsim_del_napi(struct netdevsim * ns)569 static void nsim_del_napi(struct netdevsim *ns)
570 {
571 struct net_device *dev = ns->netdev;
572 int i;
573
574 for (i = 0; i < dev->num_rx_queues; i++) {
575 struct nsim_rq *rq = ns->rq[i];
576
577 napi_disable_locked(&rq->napi);
578 __netif_napi_del_locked(&rq->napi);
579 }
580 synchronize_net();
581
582 for (i = 0; i < dev->num_rx_queues; i++) {
583 page_pool_destroy(ns->rq[i]->page_pool);
584 ns->rq[i]->page_pool = NULL;
585 }
586 }
587
nsim_stop(struct net_device * dev)588 static int nsim_stop(struct net_device *dev)
589 {
590 struct netdevsim *ns = netdev_priv(dev);
591 struct netdevsim *peer;
592
593 netdev_assert_locked(dev);
594
595 netif_carrier_off(dev);
596 peer = rtnl_dereference(ns->peer);
597 if (peer)
598 netif_carrier_off(peer->netdev);
599
600 nsim_del_napi(ns);
601
602 return 0;
603 }
604
nsim_shaper_set(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)605 static int nsim_shaper_set(struct net_shaper_binding *binding,
606 const struct net_shaper *shaper,
607 struct netlink_ext_ack *extack)
608 {
609 return 0;
610 }
611
nsim_shaper_del(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct netlink_ext_ack * extack)612 static int nsim_shaper_del(struct net_shaper_binding *binding,
613 const struct net_shaper_handle *handle,
614 struct netlink_ext_ack *extack)
615 {
616 return 0;
617 }
618
nsim_shaper_group(struct net_shaper_binding * binding,int leaves_count,const struct net_shaper * leaves,const struct net_shaper * root,struct netlink_ext_ack * extack)619 static int nsim_shaper_group(struct net_shaper_binding *binding,
620 int leaves_count,
621 const struct net_shaper *leaves,
622 const struct net_shaper *root,
623 struct netlink_ext_ack *extack)
624 {
625 return 0;
626 }
627
nsim_shaper_cap(struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long * flags)628 static void nsim_shaper_cap(struct net_shaper_binding *binding,
629 enum net_shaper_scope scope,
630 unsigned long *flags)
631 {
632 *flags = ULONG_MAX;
633 }
634
635 static const struct net_shaper_ops nsim_shaper_ops = {
636 .set = nsim_shaper_set,
637 .delete = nsim_shaper_del,
638 .group = nsim_shaper_group,
639 .capabilities = nsim_shaper_cap,
640 };
641
642 static const struct net_device_ops nsim_netdev_ops = {
643 .ndo_start_xmit = nsim_start_xmit,
644 .ndo_set_rx_mode = nsim_set_rx_mode,
645 .ndo_set_mac_address = eth_mac_addr,
646 .ndo_validate_addr = eth_validate_addr,
647 .ndo_change_mtu = nsim_change_mtu,
648 .ndo_set_vf_mac = nsim_set_vf_mac,
649 .ndo_set_vf_vlan = nsim_set_vf_vlan,
650 .ndo_set_vf_rate = nsim_set_vf_rate,
651 .ndo_set_vf_spoofchk = nsim_set_vf_spoofchk,
652 .ndo_set_vf_trust = nsim_set_vf_trust,
653 .ndo_get_vf_config = nsim_get_vf_config,
654 .ndo_set_vf_link_state = nsim_set_vf_link_state,
655 .ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
656 .ndo_setup_tc = nsim_setup_tc,
657 .ndo_set_features = nsim_set_features,
658 .ndo_get_iflink = nsim_get_iflink,
659 .ndo_bpf = nsim_bpf,
660 .ndo_open = nsim_open,
661 .ndo_stop = nsim_stop,
662 .net_shaper_ops = &nsim_shaper_ops,
663 };
664
665 static const struct net_device_ops nsim_vf_netdev_ops = {
666 .ndo_start_xmit = nsim_start_xmit,
667 .ndo_set_rx_mode = nsim_set_rx_mode,
668 .ndo_set_mac_address = eth_mac_addr,
669 .ndo_validate_addr = eth_validate_addr,
670 .ndo_change_mtu = nsim_change_mtu,
671 .ndo_setup_tc = nsim_setup_tc,
672 .ndo_set_features = nsim_set_features,
673 };
674
675 /* We don't have true per-queue stats, yet, so do some random fakery here.
676 * Only report stuff for queue 0.
677 */
nsim_get_queue_stats_rx(struct net_device * dev,int idx,struct netdev_queue_stats_rx * stats)678 static void nsim_get_queue_stats_rx(struct net_device *dev, int idx,
679 struct netdev_queue_stats_rx *stats)
680 {
681 struct rtnl_link_stats64 rtstats = {};
682
683 if (!idx)
684 dev_get_stats(dev, &rtstats);
685
686 stats->packets = rtstats.rx_packets - !!rtstats.rx_packets;
687 stats->bytes = rtstats.rx_bytes;
688 }
689
nsim_get_queue_stats_tx(struct net_device * dev,int idx,struct netdev_queue_stats_tx * stats)690 static void nsim_get_queue_stats_tx(struct net_device *dev, int idx,
691 struct netdev_queue_stats_tx *stats)
692 {
693 struct rtnl_link_stats64 rtstats = {};
694
695 if (!idx)
696 dev_get_stats(dev, &rtstats);
697
698 stats->packets = rtstats.tx_packets - !!rtstats.tx_packets;
699 stats->bytes = rtstats.tx_bytes;
700 }
701
nsim_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)702 static void nsim_get_base_stats(struct net_device *dev,
703 struct netdev_queue_stats_rx *rx,
704 struct netdev_queue_stats_tx *tx)
705 {
706 struct rtnl_link_stats64 rtstats = {};
707
708 dev_get_stats(dev, &rtstats);
709
710 rx->packets = !!rtstats.rx_packets;
711 rx->bytes = 0;
712 tx->packets = !!rtstats.tx_packets;
713 tx->bytes = 0;
714 }
715
716 static const struct netdev_stat_ops nsim_stat_ops = {
717 .get_queue_stats_tx = nsim_get_queue_stats_tx,
718 .get_queue_stats_rx = nsim_get_queue_stats_rx,
719 .get_base_stats = nsim_get_base_stats,
720 };
721
nsim_queue_alloc(void)722 static struct nsim_rq *nsim_queue_alloc(void)
723 {
724 struct nsim_rq *rq;
725
726 rq = kzalloc(sizeof(*rq), GFP_KERNEL_ACCOUNT);
727 if (!rq)
728 return NULL;
729
730 skb_queue_head_init(&rq->skb_queue);
731 nsim_rq_timer_init(rq);
732 return rq;
733 }
734
nsim_queue_free(struct net_device * dev,struct nsim_rq * rq)735 static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
736 {
737 hrtimer_cancel(&rq->napi_timer);
738
739 if (rq->skb_queue.qlen) {
740 local_bh_disable();
741 dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
742 local_bh_enable();
743 }
744
745 skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
746 kfree(rq);
747 }
748
749 /* Queue reset mode is controlled by ns->rq_reset_mode.
750 * - normal - new NAPI new pool (old NAPI enabled when new added)
751 * - mode 1 - allocate new pool (NAPI is only disabled / enabled)
752 * - mode 2 - new NAPI new pool (old NAPI removed before new added)
753 * - mode 3 - new NAPI new pool (old NAPI disabled when new added)
754 */
755 struct nsim_queue_mem {
756 struct nsim_rq *rq;
757 struct page_pool *pp;
758 };
759
760 static int
nsim_queue_mem_alloc(struct net_device * dev,void * per_queue_mem,int idx)761 nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
762 {
763 struct nsim_queue_mem *qmem = per_queue_mem;
764 struct netdevsim *ns = netdev_priv(dev);
765 int err;
766
767 if (ns->rq_reset_mode > 3)
768 return -EINVAL;
769
770 if (ns->rq_reset_mode == 1) {
771 if (!netif_running(ns->netdev))
772 return -ENETDOWN;
773 return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
774 }
775
776 qmem->rq = nsim_queue_alloc();
777 if (!qmem->rq)
778 return -ENOMEM;
779
780 err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi);
781 if (err)
782 goto err_free;
783
784 if (!ns->rq_reset_mode)
785 netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
786 idx);
787
788 return 0;
789
790 err_free:
791 nsim_queue_free(dev, qmem->rq);
792 return err;
793 }
794
nsim_queue_mem_free(struct net_device * dev,void * per_queue_mem)795 static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
796 {
797 struct nsim_queue_mem *qmem = per_queue_mem;
798 struct netdevsim *ns = netdev_priv(dev);
799
800 page_pool_destroy(qmem->pp);
801 if (qmem->rq) {
802 if (!ns->rq_reset_mode)
803 netif_napi_del_locked(&qmem->rq->napi);
804 page_pool_destroy(qmem->rq->page_pool);
805 nsim_queue_free(dev, qmem->rq);
806 }
807 }
808
809 static int
nsim_queue_start(struct net_device * dev,void * per_queue_mem,int idx)810 nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
811 {
812 struct nsim_queue_mem *qmem = per_queue_mem;
813 struct netdevsim *ns = netdev_priv(dev);
814
815 netdev_assert_locked(dev);
816
817 if (ns->rq_reset_mode == 1) {
818 ns->rq[idx]->page_pool = qmem->pp;
819 napi_enable_locked(&ns->rq[idx]->napi);
820 return 0;
821 }
822
823 /* netif_napi_add()/_del() should normally be called from alloc/free,
824 * here we want to test various call orders.
825 */
826 if (ns->rq_reset_mode == 2) {
827 netif_napi_del_locked(&ns->rq[idx]->napi);
828 netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
829 idx);
830 } else if (ns->rq_reset_mode == 3) {
831 netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
832 idx);
833 netif_napi_del_locked(&ns->rq[idx]->napi);
834 }
835
836 ns->rq[idx] = qmem->rq;
837 napi_enable_locked(&ns->rq[idx]->napi);
838
839 return 0;
840 }
841
nsim_queue_stop(struct net_device * dev,void * per_queue_mem,int idx)842 static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
843 {
844 struct nsim_queue_mem *qmem = per_queue_mem;
845 struct netdevsim *ns = netdev_priv(dev);
846
847 netdev_assert_locked(dev);
848
849 napi_disable_locked(&ns->rq[idx]->napi);
850
851 if (ns->rq_reset_mode == 1) {
852 qmem->pp = ns->rq[idx]->page_pool;
853 page_pool_disable_direct_recycling(qmem->pp);
854 } else {
855 qmem->rq = ns->rq[idx];
856 }
857
858 return 0;
859 }
860
861 static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = {
862 .ndo_queue_mem_size = sizeof(struct nsim_queue_mem),
863 .ndo_queue_mem_alloc = nsim_queue_mem_alloc,
864 .ndo_queue_mem_free = nsim_queue_mem_free,
865 .ndo_queue_start = nsim_queue_start,
866 .ndo_queue_stop = nsim_queue_stop,
867 };
868
869 static ssize_t
nsim_qreset_write(struct file * file,const char __user * data,size_t count,loff_t * ppos)870 nsim_qreset_write(struct file *file, const char __user *data,
871 size_t count, loff_t *ppos)
872 {
873 struct netdevsim *ns = file->private_data;
874 unsigned int queue, mode;
875 char buf[32];
876 ssize_t ret;
877
878 if (count >= sizeof(buf))
879 return -EINVAL;
880 if (copy_from_user(buf, data, count))
881 return -EFAULT;
882 buf[count] = '\0';
883
884 ret = sscanf(buf, "%u %u", &queue, &mode);
885 if (ret != 2)
886 return -EINVAL;
887
888 netdev_lock(ns->netdev);
889 if (queue >= ns->netdev->real_num_rx_queues) {
890 ret = -EINVAL;
891 goto exit_unlock;
892 }
893
894 ns->rq_reset_mode = mode;
895 ret = netdev_rx_queue_restart(ns->netdev, queue);
896 ns->rq_reset_mode = 0;
897 if (ret)
898 goto exit_unlock;
899
900 ret = count;
901 exit_unlock:
902 netdev_unlock(ns->netdev);
903 return ret;
904 }
905
906 static const struct file_operations nsim_qreset_fops = {
907 .open = simple_open,
908 .write = nsim_qreset_write,
909 .owner = THIS_MODULE,
910 };
911
912 static ssize_t
nsim_pp_hold_read(struct file * file,char __user * data,size_t count,loff_t * ppos)913 nsim_pp_hold_read(struct file *file, char __user *data,
914 size_t count, loff_t *ppos)
915 {
916 struct netdevsim *ns = file->private_data;
917 char buf[3] = "n\n";
918
919 if (ns->page)
920 buf[0] = 'y';
921
922 return simple_read_from_buffer(data, count, ppos, buf, 2);
923 }
924
925 static ssize_t
nsim_pp_hold_write(struct file * file,const char __user * data,size_t count,loff_t * ppos)926 nsim_pp_hold_write(struct file *file, const char __user *data,
927 size_t count, loff_t *ppos)
928 {
929 struct netdevsim *ns = file->private_data;
930 ssize_t ret;
931 bool val;
932
933 ret = kstrtobool_from_user(data, count, &val);
934 if (ret)
935 return ret;
936
937 rtnl_lock();
938 ret = count;
939 if (val == !!ns->page)
940 goto exit;
941
942 if (!netif_running(ns->netdev) && val) {
943 ret = -ENETDOWN;
944 } else if (val) {
945 ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool);
946 if (!ns->page)
947 ret = -ENOMEM;
948 } else {
949 page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,
950 ns->page, false);
951 ns->page = NULL;
952 }
953
954 exit:
955 rtnl_unlock();
956 return ret;
957 }
958
959 static const struct file_operations nsim_pp_hold_fops = {
960 .open = simple_open,
961 .read = nsim_pp_hold_read,
962 .write = nsim_pp_hold_write,
963 .llseek = generic_file_llseek,
964 .owner = THIS_MODULE,
965 };
966
nsim_setup(struct net_device * dev)967 static void nsim_setup(struct net_device *dev)
968 {
969 ether_setup(dev);
970 eth_hw_addr_random(dev);
971
972 dev->flags &= ~IFF_MULTICAST;
973 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
974 dev->features |= NETIF_F_HIGHDMA |
975 NETIF_F_SG |
976 NETIF_F_FRAGLIST |
977 NETIF_F_HW_CSUM |
978 NETIF_F_LRO |
979 NETIF_F_TSO;
980 dev->hw_features |= NETIF_F_HW_TC |
981 NETIF_F_SG |
982 NETIF_F_FRAGLIST |
983 NETIF_F_HW_CSUM |
984 NETIF_F_LRO |
985 NETIF_F_TSO |
986 NETIF_F_LOOPBACK;
987 dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
988 dev->max_mtu = ETH_MAX_MTU;
989 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_HW_OFFLOAD;
990 }
991
nsim_queue_init(struct netdevsim * ns)992 static int nsim_queue_init(struct netdevsim *ns)
993 {
994 struct net_device *dev = ns->netdev;
995 int i;
996
997 ns->rq = kcalloc(dev->num_rx_queues, sizeof(*ns->rq),
998 GFP_KERNEL_ACCOUNT);
999 if (!ns->rq)
1000 return -ENOMEM;
1001
1002 for (i = 0; i < dev->num_rx_queues; i++) {
1003 ns->rq[i] = nsim_queue_alloc();
1004 if (!ns->rq[i])
1005 goto err_free_prev;
1006 }
1007
1008 return 0;
1009
1010 err_free_prev:
1011 while (i--)
1012 kfree(ns->rq[i]);
1013 kfree(ns->rq);
1014 return -ENOMEM;
1015 }
1016
nsim_queue_uninit(struct netdevsim * ns)1017 static void nsim_queue_uninit(struct netdevsim *ns)
1018 {
1019 struct net_device *dev = ns->netdev;
1020 int i;
1021
1022 for (i = 0; i < dev->num_rx_queues; i++)
1023 nsim_queue_free(dev, ns->rq[i]);
1024
1025 kfree(ns->rq);
1026 ns->rq = NULL;
1027 }
1028
nsim_init_netdevsim(struct netdevsim * ns)1029 static int nsim_init_netdevsim(struct netdevsim *ns)
1030 {
1031 struct netdevsim *peer;
1032 struct mock_phc *phc;
1033 int err;
1034
1035 phc = mock_phc_create(&ns->nsim_bus_dev->dev);
1036 if (IS_ERR(phc))
1037 return PTR_ERR(phc);
1038
1039 ns->phc = phc;
1040 ns->netdev->netdev_ops = &nsim_netdev_ops;
1041 ns->netdev->stat_ops = &nsim_stat_ops;
1042 ns->netdev->queue_mgmt_ops = &nsim_queue_mgmt_ops;
1043 netdev_lockdep_set_classes(ns->netdev);
1044
1045 err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
1046 if (err)
1047 goto err_phc_destroy;
1048
1049 rtnl_lock();
1050 err = nsim_queue_init(ns);
1051 if (err)
1052 goto err_utn_destroy;
1053
1054 err = nsim_bpf_init(ns);
1055 if (err)
1056 goto err_rq_destroy;
1057
1058 nsim_macsec_init(ns);
1059 nsim_ipsec_init(ns);
1060
1061 err = register_netdevice(ns->netdev);
1062 if (err)
1063 goto err_ipsec_teardown;
1064 rtnl_unlock();
1065
1066 err = nsim_psp_init(ns);
1067 if (err)
1068 goto err_unregister_netdev;
1069
1070 if (IS_ENABLED(CONFIG_DEBUG_NET)) {
1071 ns->nb.notifier_call = netdev_debug_event;
1072 if (register_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
1073 &ns->nn))
1074 ns->nb.notifier_call = NULL;
1075 }
1076
1077 return 0;
1078
1079 err_unregister_netdev:
1080 rtnl_lock();
1081 peer = rtnl_dereference(ns->peer);
1082 if (peer)
1083 RCU_INIT_POINTER(peer->peer, NULL);
1084 RCU_INIT_POINTER(ns->peer, NULL);
1085 unregister_netdevice(ns->netdev);
1086 err_ipsec_teardown:
1087 nsim_ipsec_teardown(ns);
1088 nsim_macsec_teardown(ns);
1089 nsim_bpf_uninit(ns);
1090 err_rq_destroy:
1091 nsim_queue_uninit(ns);
1092 err_utn_destroy:
1093 rtnl_unlock();
1094 nsim_udp_tunnels_info_destroy(ns->netdev);
1095 err_phc_destroy:
1096 mock_phc_destroy(ns->phc);
1097 return err;
1098 }
1099
nsim_init_netdevsim_vf(struct netdevsim * ns)1100 static int nsim_init_netdevsim_vf(struct netdevsim *ns)
1101 {
1102 int err;
1103
1104 ns->netdev->netdev_ops = &nsim_vf_netdev_ops;
1105 rtnl_lock();
1106 err = register_netdevice(ns->netdev);
1107 rtnl_unlock();
1108 return err;
1109 }
1110
nsim_exit_netdevsim(struct netdevsim * ns)1111 static void nsim_exit_netdevsim(struct netdevsim *ns)
1112 {
1113 nsim_udp_tunnels_info_destroy(ns->netdev);
1114 mock_phc_destroy(ns->phc);
1115 }
1116
nsim_create(struct nsim_dev * nsim_dev,struct nsim_dev_port * nsim_dev_port,u8 perm_addr[ETH_ALEN])1117 struct netdevsim *nsim_create(struct nsim_dev *nsim_dev,
1118 struct nsim_dev_port *nsim_dev_port,
1119 u8 perm_addr[ETH_ALEN])
1120 {
1121 struct net_device *dev;
1122 struct netdevsim *ns;
1123 int err;
1124
1125 dev = alloc_netdev_mq(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup,
1126 nsim_dev->nsim_bus_dev->num_queues);
1127 if (!dev)
1128 return ERR_PTR(-ENOMEM);
1129
1130 if (perm_addr)
1131 memcpy(dev->perm_addr, perm_addr, ETH_ALEN);
1132
1133 dev_net_set(dev, nsim_dev_net(nsim_dev));
1134 ns = netdev_priv(dev);
1135 ns->netdev = dev;
1136 ns->nsim_dev = nsim_dev;
1137 ns->nsim_dev_port = nsim_dev_port;
1138 ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
1139 SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
1140 SET_NETDEV_DEVLINK_PORT(dev, &nsim_dev_port->devlink_port);
1141 nsim_ethtool_init(ns);
1142 if (nsim_dev_port_is_pf(nsim_dev_port))
1143 err = nsim_init_netdevsim(ns);
1144 else
1145 err = nsim_init_netdevsim_vf(ns);
1146 if (err)
1147 goto err_free_netdev;
1148
1149 ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
1150 ns, &nsim_pp_hold_fops);
1151 ns->qr_dfs = debugfs_create_file("queue_reset", 0200,
1152 nsim_dev_port->ddir, ns,
1153 &nsim_qreset_fops);
1154 return ns;
1155
1156 err_free_netdev:
1157 free_netdev(dev);
1158 return ERR_PTR(err);
1159 }
1160
nsim_destroy(struct netdevsim * ns)1161 void nsim_destroy(struct netdevsim *ns)
1162 {
1163 struct net_device *dev = ns->netdev;
1164 struct netdevsim *peer;
1165
1166 debugfs_remove(ns->qr_dfs);
1167 debugfs_remove(ns->pp_dfs);
1168
1169 if (ns->nb.notifier_call)
1170 unregister_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
1171 &ns->nn);
1172
1173 nsim_psp_uninit(ns);
1174
1175 rtnl_lock();
1176 peer = rtnl_dereference(ns->peer);
1177 if (peer)
1178 RCU_INIT_POINTER(peer->peer, NULL);
1179 RCU_INIT_POINTER(ns->peer, NULL);
1180 unregister_netdevice(dev);
1181 if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
1182 nsim_macsec_teardown(ns);
1183 nsim_ipsec_teardown(ns);
1184 nsim_bpf_uninit(ns);
1185 nsim_queue_uninit(ns);
1186 }
1187 rtnl_unlock();
1188 if (nsim_dev_port_is_pf(ns->nsim_dev_port))
1189 nsim_exit_netdevsim(ns);
1190
1191 /* Put this intentionally late to exercise the orphaning path */
1192 if (ns->page) {
1193 page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,
1194 ns->page, false);
1195 ns->page = NULL;
1196 }
1197
1198 free_netdev(dev);
1199 }
1200
netdev_is_nsim(struct net_device * dev)1201 bool netdev_is_nsim(struct net_device *dev)
1202 {
1203 return dev->netdev_ops == &nsim_netdev_ops;
1204 }
1205
nsim_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1206 static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
1207 struct netlink_ext_ack *extack)
1208 {
1209 NL_SET_ERR_MSG_MOD(extack,
1210 "Please use: echo \"[ID] [PORT_COUNT] [NUM_QUEUES]\" > /sys/bus/netdevsim/new_device");
1211 return -EOPNOTSUPP;
1212 }
1213
1214 static struct rtnl_link_ops nsim_link_ops __read_mostly = {
1215 .kind = DRV_NAME,
1216 .validate = nsim_validate,
1217 };
1218
nsim_module_init(void)1219 static int __init nsim_module_init(void)
1220 {
1221 int err;
1222
1223 err = nsim_dev_init();
1224 if (err)
1225 return err;
1226
1227 err = nsim_bus_init();
1228 if (err)
1229 goto err_dev_exit;
1230
1231 err = rtnl_link_register(&nsim_link_ops);
1232 if (err)
1233 goto err_bus_exit;
1234
1235 return 0;
1236
1237 err_bus_exit:
1238 nsim_bus_exit();
1239 err_dev_exit:
1240 nsim_dev_exit();
1241 return err;
1242 }
1243
nsim_module_exit(void)1244 static void __exit nsim_module_exit(void)
1245 {
1246 rtnl_link_unregister(&nsim_link_ops);
1247 nsim_bus_exit();
1248 nsim_dev_exit();
1249 }
1250
1251 module_init(nsim_module_init);
1252 module_exit(nsim_module_exit);
1253 MODULE_LICENSE("GPL");
1254 MODULE_DESCRIPTION("Simulated networking device for testing");
1255 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1256