1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Isovalent */
3
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/filter.h>
8 #include <linux/netfilter_netdev.h>
9 #include <linux/bpf_mprog.h>
10 #include <linux/indirect_call_wrapper.h>
11
12 #include <net/netkit.h>
13 #include <net/dst.h>
14 #include <net/tcx.h>
15
16 #define DRV_NAME "netkit"
17
18 struct netkit {
19 /* Needed in fast-path */
20 struct net_device __rcu *peer;
21 struct bpf_mprog_entry __rcu *active;
22 enum netkit_action policy;
23 enum netkit_scrub scrub;
24 struct bpf_mprog_bundle bundle;
25
26 /* Needed in slow-path */
27 enum netkit_mode mode;
28 bool primary;
29 u32 headroom;
30 };
31
32 struct netkit_link {
33 struct bpf_link link;
34 struct net_device *dev;
35 u32 location;
36 };
37
38 static __always_inline int
netkit_run(const struct bpf_mprog_entry * entry,struct sk_buff * skb,enum netkit_action ret)39 netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
40 enum netkit_action ret)
41 {
42 const struct bpf_mprog_fp *fp;
43 const struct bpf_prog *prog;
44
45 bpf_mprog_foreach_prog(entry, fp, prog) {
46 bpf_compute_data_pointers(skb);
47 ret = bpf_prog_run(prog, skb);
48 if (ret != NETKIT_NEXT)
49 break;
50 }
51 return ret;
52 }
53
netkit_xnet(struct sk_buff * skb)54 static void netkit_xnet(struct sk_buff *skb)
55 {
56 skb->priority = 0;
57 skb->mark = 0;
58 }
59
netkit_prep_forward(struct sk_buff * skb,bool xnet,bool xnet_scrub)60 static void netkit_prep_forward(struct sk_buff *skb,
61 bool xnet, bool xnet_scrub)
62 {
63 skb_scrub_packet(skb, false);
64 nf_skip_egress(skb, true);
65 skb_reset_mac_header(skb);
66 if (!xnet)
67 return;
68 ipvs_reset(skb);
69 skb_clear_tstamp(skb);
70 if (xnet_scrub)
71 netkit_xnet(skb);
72 }
73
netkit_priv(const struct net_device * dev)74 static struct netkit *netkit_priv(const struct net_device *dev)
75 {
76 return netdev_priv(dev);
77 }
78
netkit_xmit(struct sk_buff * skb,struct net_device * dev)79 static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
80 {
81 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
82 struct netkit *nk = netkit_priv(dev);
83 enum netkit_action ret = READ_ONCE(nk->policy);
84 netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
85 const struct bpf_mprog_entry *entry;
86 struct net_device *peer;
87 int len = skb->len;
88
89 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
90 rcu_read_lock();
91 peer = rcu_dereference(nk->peer);
92 if (unlikely(!peer || !(peer->flags & IFF_UP) ||
93 !pskb_may_pull(skb, ETH_HLEN) ||
94 skb_orphan_frags(skb, GFP_ATOMIC)))
95 goto drop;
96 netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)),
97 nk->scrub);
98 eth_skb_pkt_type(skb, peer);
99 skb->dev = peer;
100 entry = rcu_dereference(nk->active);
101 if (entry)
102 ret = netkit_run(entry, skb, ret);
103 switch (ret) {
104 case NETKIT_NEXT:
105 case NETKIT_PASS:
106 eth_skb_pull_mac(skb);
107 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
108 if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
109 dev_sw_netstats_tx_add(dev, 1, len);
110 dev_sw_netstats_rx_add(peer, len);
111 } else {
112 goto drop_stats;
113 }
114 break;
115 case NETKIT_REDIRECT:
116 dev_sw_netstats_tx_add(dev, 1, len);
117 skb_do_redirect(skb);
118 break;
119 case NETKIT_DROP:
120 default:
121 drop:
122 kfree_skb(skb);
123 drop_stats:
124 dev_core_stats_tx_dropped_inc(dev);
125 ret_dev = NET_XMIT_DROP;
126 break;
127 }
128 rcu_read_unlock();
129 bpf_net_ctx_clear(bpf_net_ctx);
130 return ret_dev;
131 }
132
netkit_open(struct net_device * dev)133 static int netkit_open(struct net_device *dev)
134 {
135 struct netkit *nk = netkit_priv(dev);
136 struct net_device *peer = rtnl_dereference(nk->peer);
137
138 if (!peer)
139 return -ENOTCONN;
140 if (peer->flags & IFF_UP) {
141 netif_carrier_on(dev);
142 netif_carrier_on(peer);
143 }
144 return 0;
145 }
146
netkit_close(struct net_device * dev)147 static int netkit_close(struct net_device *dev)
148 {
149 struct netkit *nk = netkit_priv(dev);
150 struct net_device *peer = rtnl_dereference(nk->peer);
151
152 netif_carrier_off(dev);
153 if (peer)
154 netif_carrier_off(peer);
155 return 0;
156 }
157
netkit_get_iflink(const struct net_device * dev)158 static int netkit_get_iflink(const struct net_device *dev)
159 {
160 struct netkit *nk = netkit_priv(dev);
161 struct net_device *peer;
162 int iflink = 0;
163
164 rcu_read_lock();
165 peer = rcu_dereference(nk->peer);
166 if (peer)
167 iflink = READ_ONCE(peer->ifindex);
168 rcu_read_unlock();
169 return iflink;
170 }
171
netkit_set_multicast(struct net_device * dev)172 static void netkit_set_multicast(struct net_device *dev)
173 {
174 /* Nothing to do, we receive whatever gets pushed to us! */
175 }
176
netkit_set_macaddr(struct net_device * dev,void * sa)177 static int netkit_set_macaddr(struct net_device *dev, void *sa)
178 {
179 struct netkit *nk = netkit_priv(dev);
180
181 if (nk->mode != NETKIT_L2)
182 return -EOPNOTSUPP;
183
184 return eth_mac_addr(dev, sa);
185 }
186
netkit_set_headroom(struct net_device * dev,int headroom)187 static void netkit_set_headroom(struct net_device *dev, int headroom)
188 {
189 struct netkit *nk = netkit_priv(dev), *nk2;
190 struct net_device *peer;
191
192 if (headroom < 0)
193 headroom = NET_SKB_PAD;
194
195 rcu_read_lock();
196 peer = rcu_dereference(nk->peer);
197 if (unlikely(!peer))
198 goto out;
199
200 nk2 = netkit_priv(peer);
201 nk->headroom = headroom;
202 headroom = max(nk->headroom, nk2->headroom);
203
204 peer->needed_headroom = headroom;
205 dev->needed_headroom = headroom;
206 out:
207 rcu_read_unlock();
208 }
209
netkit_peer_dev(struct net_device * dev)210 INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
211 {
212 return rcu_dereference(netkit_priv(dev)->peer);
213 }
214
netkit_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)215 static void netkit_get_stats(struct net_device *dev,
216 struct rtnl_link_stats64 *stats)
217 {
218 dev_fetch_sw_netstats(stats, dev->tstats);
219 stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
220 }
221
222 static void netkit_uninit(struct net_device *dev);
223
224 static const struct net_device_ops netkit_netdev_ops = {
225 .ndo_open = netkit_open,
226 .ndo_stop = netkit_close,
227 .ndo_start_xmit = netkit_xmit,
228 .ndo_set_rx_mode = netkit_set_multicast,
229 .ndo_set_rx_headroom = netkit_set_headroom,
230 .ndo_set_mac_address = netkit_set_macaddr,
231 .ndo_get_iflink = netkit_get_iflink,
232 .ndo_get_peer_dev = netkit_peer_dev,
233 .ndo_get_stats64 = netkit_get_stats,
234 .ndo_uninit = netkit_uninit,
235 .ndo_features_check = passthru_features_check,
236 };
237
netkit_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)238 static void netkit_get_drvinfo(struct net_device *dev,
239 struct ethtool_drvinfo *info)
240 {
241 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
242 }
243
244 static const struct ethtool_ops netkit_ethtool_ops = {
245 .get_drvinfo = netkit_get_drvinfo,
246 };
247
netkit_setup(struct net_device * dev)248 static void netkit_setup(struct net_device *dev)
249 {
250 static const netdev_features_t netkit_features_hw_vlan =
251 NETIF_F_HW_VLAN_CTAG_TX |
252 NETIF_F_HW_VLAN_CTAG_RX |
253 NETIF_F_HW_VLAN_STAG_TX |
254 NETIF_F_HW_VLAN_STAG_RX;
255 static const netdev_features_t netkit_features =
256 netkit_features_hw_vlan |
257 NETIF_F_SG |
258 NETIF_F_FRAGLIST |
259 NETIF_F_HW_CSUM |
260 NETIF_F_RXCSUM |
261 NETIF_F_SCTP_CRC |
262 NETIF_F_HIGHDMA |
263 NETIF_F_GSO_SOFTWARE |
264 NETIF_F_GSO_ENCAP_ALL;
265
266 ether_setup(dev);
267 dev->max_mtu = ETH_MAX_MTU;
268 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
269
270 dev->flags |= IFF_NOARP;
271 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
272 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
273 dev->priv_flags |= IFF_PHONY_HEADROOM;
274 dev->priv_flags |= IFF_NO_QUEUE;
275 dev->priv_flags |= IFF_DISABLE_NETPOLL;
276 dev->lltx = true;
277
278 dev->ethtool_ops = &netkit_ethtool_ops;
279 dev->netdev_ops = &netkit_netdev_ops;
280
281 dev->features |= netkit_features;
282 dev->hw_features = netkit_features;
283 dev->hw_enc_features = netkit_features;
284 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
285 dev->vlan_features = dev->features & ~netkit_features_hw_vlan;
286
287 dev->needs_free_netdev = true;
288
289 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
290 }
291
netkit_get_link_net(const struct net_device * dev)292 static struct net *netkit_get_link_net(const struct net_device *dev)
293 {
294 struct netkit *nk = netkit_priv(dev);
295 struct net_device *peer = rtnl_dereference(nk->peer);
296
297 return peer ? dev_net(peer) : dev_net(dev);
298 }
299
netkit_check_policy(int policy,struct nlattr * tb,struct netlink_ext_ack * extack)300 static int netkit_check_policy(int policy, struct nlattr *tb,
301 struct netlink_ext_ack *extack)
302 {
303 switch (policy) {
304 case NETKIT_PASS:
305 case NETKIT_DROP:
306 return 0;
307 default:
308 NL_SET_ERR_MSG_ATTR(extack, tb,
309 "Provided default xmit policy not supported");
310 return -EINVAL;
311 }
312 }
313
netkit_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)314 static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
315 struct netlink_ext_ack *extack)
316 {
317 struct nlattr *attr = tb[IFLA_ADDRESS];
318
319 if (!attr)
320 return 0;
321 if (nla_len(attr) != ETH_ALEN)
322 return -EINVAL;
323 if (!is_valid_ether_addr(nla_data(attr)))
324 return -EADDRNOTAVAIL;
325 return 0;
326 }
327
328 static struct rtnl_link_ops netkit_link_ops;
329
netkit_new_link(struct net * peer_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)330 static int netkit_new_link(struct net *peer_net, struct net_device *dev,
331 struct nlattr *tb[], struct nlattr *data[],
332 struct netlink_ext_ack *extack)
333 {
334 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
335 enum netkit_action policy_prim = NETKIT_PASS;
336 enum netkit_action policy_peer = NETKIT_PASS;
337 enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
338 enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
339 enum netkit_mode mode = NETKIT_L3;
340 unsigned char ifname_assign_type;
341 u16 headroom = 0, tailroom = 0;
342 struct ifinfomsg *ifmp = NULL;
343 struct net_device *peer;
344 char ifname[IFNAMSIZ];
345 struct netkit *nk;
346 int err;
347
348 if (data) {
349 if (data[IFLA_NETKIT_MODE])
350 mode = nla_get_u32(data[IFLA_NETKIT_MODE]);
351 if (data[IFLA_NETKIT_PEER_INFO]) {
352 attr = data[IFLA_NETKIT_PEER_INFO];
353 ifmp = nla_data(attr);
354 rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack);
355 tbp = peer_tb;
356 }
357 if (data[IFLA_NETKIT_SCRUB])
358 scrub_prim = nla_get_u32(data[IFLA_NETKIT_SCRUB]);
359 if (data[IFLA_NETKIT_PEER_SCRUB])
360 scrub_peer = nla_get_u32(data[IFLA_NETKIT_PEER_SCRUB]);
361 if (data[IFLA_NETKIT_POLICY]) {
362 attr = data[IFLA_NETKIT_POLICY];
363 policy_prim = nla_get_u32(attr);
364 err = netkit_check_policy(policy_prim, attr, extack);
365 if (err < 0)
366 return err;
367 }
368 if (data[IFLA_NETKIT_PEER_POLICY]) {
369 attr = data[IFLA_NETKIT_PEER_POLICY];
370 policy_peer = nla_get_u32(attr);
371 err = netkit_check_policy(policy_peer, attr, extack);
372 if (err < 0)
373 return err;
374 }
375 if (data[IFLA_NETKIT_HEADROOM])
376 headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
377 if (data[IFLA_NETKIT_TAILROOM])
378 tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
379 }
380
381 if (ifmp && tbp[IFLA_IFNAME]) {
382 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
383 ifname_assign_type = NET_NAME_USER;
384 } else {
385 strscpy(ifname, "nk%d", IFNAMSIZ);
386 ifname_assign_type = NET_NAME_ENUM;
387 }
388 if (mode != NETKIT_L2 &&
389 (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
390 return -EOPNOTSUPP;
391
392 peer = rtnl_create_link(peer_net, ifname, ifname_assign_type,
393 &netkit_link_ops, tbp, extack);
394 if (IS_ERR(peer))
395 return PTR_ERR(peer);
396
397 netif_inherit_tso_max(peer, dev);
398 if (headroom) {
399 peer->needed_headroom = headroom;
400 dev->needed_headroom = headroom;
401 }
402 if (tailroom) {
403 peer->needed_tailroom = tailroom;
404 dev->needed_tailroom = tailroom;
405 }
406
407 if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
408 eth_hw_addr_random(peer);
409 if (ifmp && dev->ifindex)
410 peer->ifindex = ifmp->ifi_index;
411
412 nk = netkit_priv(peer);
413 nk->primary = false;
414 nk->policy = policy_peer;
415 nk->scrub = scrub_peer;
416 nk->mode = mode;
417 nk->headroom = headroom;
418 bpf_mprog_bundle_init(&nk->bundle);
419
420 err = register_netdevice(peer);
421 if (err < 0)
422 goto err_register_peer;
423 netif_carrier_off(peer);
424 if (mode == NETKIT_L2)
425 dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL);
426
427 err = rtnl_configure_link(peer, NULL, 0, NULL);
428 if (err < 0)
429 goto err_configure_peer;
430
431 if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
432 eth_hw_addr_random(dev);
433 if (tb[IFLA_IFNAME])
434 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
435 else
436 strscpy(dev->name, "nk%d", IFNAMSIZ);
437
438 nk = netkit_priv(dev);
439 nk->primary = true;
440 nk->policy = policy_prim;
441 nk->scrub = scrub_prim;
442 nk->mode = mode;
443 nk->headroom = headroom;
444 bpf_mprog_bundle_init(&nk->bundle);
445
446 err = register_netdevice(dev);
447 if (err < 0)
448 goto err_configure_peer;
449 netif_carrier_off(dev);
450 if (mode == NETKIT_L2)
451 dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL);
452
453 rcu_assign_pointer(netkit_priv(dev)->peer, peer);
454 rcu_assign_pointer(netkit_priv(peer)->peer, dev);
455 return 0;
456 err_configure_peer:
457 unregister_netdevice(peer);
458 return err;
459 err_register_peer:
460 free_netdev(peer);
461 return err;
462 }
463
netkit_entry_fetch(struct net_device * dev,bool bundle_fallback)464 static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev,
465 bool bundle_fallback)
466 {
467 struct netkit *nk = netkit_priv(dev);
468 struct bpf_mprog_entry *entry;
469
470 ASSERT_RTNL();
471 entry = rcu_dereference_rtnl(nk->active);
472 if (entry)
473 return entry;
474 if (bundle_fallback)
475 return &nk->bundle.a;
476 return NULL;
477 }
478
netkit_entry_update(struct net_device * dev,struct bpf_mprog_entry * entry)479 static void netkit_entry_update(struct net_device *dev,
480 struct bpf_mprog_entry *entry)
481 {
482 struct netkit *nk = netkit_priv(dev);
483
484 ASSERT_RTNL();
485 rcu_assign_pointer(nk->active, entry);
486 }
487
netkit_entry_sync(void)488 static void netkit_entry_sync(void)
489 {
490 synchronize_rcu();
491 }
492
netkit_dev_fetch(struct net * net,u32 ifindex,u32 which)493 static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which)
494 {
495 struct net_device *dev;
496 struct netkit *nk;
497
498 ASSERT_RTNL();
499
500 switch (which) {
501 case BPF_NETKIT_PRIMARY:
502 case BPF_NETKIT_PEER:
503 break;
504 default:
505 return ERR_PTR(-EINVAL);
506 }
507
508 dev = __dev_get_by_index(net, ifindex);
509 if (!dev)
510 return ERR_PTR(-ENODEV);
511 if (dev->netdev_ops != &netkit_netdev_ops)
512 return ERR_PTR(-ENXIO);
513
514 nk = netkit_priv(dev);
515 if (!nk->primary)
516 return ERR_PTR(-EACCES);
517 if (which == BPF_NETKIT_PEER) {
518 dev = rcu_dereference_rtnl(nk->peer);
519 if (!dev)
520 return ERR_PTR(-ENODEV);
521 }
522 return dev;
523 }
524
netkit_prog_attach(const union bpf_attr * attr,struct bpf_prog * prog)525 int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
526 {
527 struct bpf_mprog_entry *entry, *entry_new;
528 struct bpf_prog *replace_prog = NULL;
529 struct net_device *dev;
530 int ret;
531
532 rtnl_lock();
533 dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
534 attr->attach_type);
535 if (IS_ERR(dev)) {
536 ret = PTR_ERR(dev);
537 goto out;
538 }
539 entry = netkit_entry_fetch(dev, true);
540 if (attr->attach_flags & BPF_F_REPLACE) {
541 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd,
542 prog->type);
543 if (IS_ERR(replace_prog)) {
544 ret = PTR_ERR(replace_prog);
545 replace_prog = NULL;
546 goto out;
547 }
548 }
549 ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog,
550 attr->attach_flags, attr->relative_fd,
551 attr->expected_revision);
552 if (!ret) {
553 if (entry != entry_new) {
554 netkit_entry_update(dev, entry_new);
555 netkit_entry_sync();
556 }
557 bpf_mprog_commit(entry);
558 }
559 out:
560 if (replace_prog)
561 bpf_prog_put(replace_prog);
562 rtnl_unlock();
563 return ret;
564 }
565
netkit_prog_detach(const union bpf_attr * attr,struct bpf_prog * prog)566 int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog)
567 {
568 struct bpf_mprog_entry *entry, *entry_new;
569 struct net_device *dev;
570 int ret;
571
572 rtnl_lock();
573 dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
574 attr->attach_type);
575 if (IS_ERR(dev)) {
576 ret = PTR_ERR(dev);
577 goto out;
578 }
579 entry = netkit_entry_fetch(dev, false);
580 if (!entry) {
581 ret = -ENOENT;
582 goto out;
583 }
584 ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags,
585 attr->relative_fd, attr->expected_revision);
586 if (!ret) {
587 if (!bpf_mprog_total(entry_new))
588 entry_new = NULL;
589 netkit_entry_update(dev, entry_new);
590 netkit_entry_sync();
591 bpf_mprog_commit(entry);
592 }
593 out:
594 rtnl_unlock();
595 return ret;
596 }
597
netkit_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)598 int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
599 {
600 struct net_device *dev;
601 int ret;
602
603 rtnl_lock();
604 dev = netkit_dev_fetch(current->nsproxy->net_ns,
605 attr->query.target_ifindex,
606 attr->query.attach_type);
607 if (IS_ERR(dev)) {
608 ret = PTR_ERR(dev);
609 goto out;
610 }
611 ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false));
612 out:
613 rtnl_unlock();
614 return ret;
615 }
616
netkit_link(const struct bpf_link * link)617 static struct netkit_link *netkit_link(const struct bpf_link *link)
618 {
619 return container_of(link, struct netkit_link, link);
620 }
621
netkit_link_prog_attach(struct bpf_link * link,u32 flags,u32 id_or_fd,u64 revision)622 static int netkit_link_prog_attach(struct bpf_link *link, u32 flags,
623 u32 id_or_fd, u64 revision)
624 {
625 struct netkit_link *nkl = netkit_link(link);
626 struct bpf_mprog_entry *entry, *entry_new;
627 struct net_device *dev = nkl->dev;
628 int ret;
629
630 ASSERT_RTNL();
631 entry = netkit_entry_fetch(dev, true);
632 ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags,
633 id_or_fd, revision);
634 if (!ret) {
635 if (entry != entry_new) {
636 netkit_entry_update(dev, entry_new);
637 netkit_entry_sync();
638 }
639 bpf_mprog_commit(entry);
640 }
641 return ret;
642 }
643
netkit_link_release(struct bpf_link * link)644 static void netkit_link_release(struct bpf_link *link)
645 {
646 struct netkit_link *nkl = netkit_link(link);
647 struct bpf_mprog_entry *entry, *entry_new;
648 struct net_device *dev;
649 int ret = 0;
650
651 rtnl_lock();
652 dev = nkl->dev;
653 if (!dev)
654 goto out;
655 entry = netkit_entry_fetch(dev, false);
656 if (!entry) {
657 ret = -ENOENT;
658 goto out;
659 }
660 ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0);
661 if (!ret) {
662 if (!bpf_mprog_total(entry_new))
663 entry_new = NULL;
664 netkit_entry_update(dev, entry_new);
665 netkit_entry_sync();
666 bpf_mprog_commit(entry);
667 nkl->dev = NULL;
668 }
669 out:
670 WARN_ON_ONCE(ret);
671 rtnl_unlock();
672 }
673
netkit_link_update(struct bpf_link * link,struct bpf_prog * nprog,struct bpf_prog * oprog)674 static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog,
675 struct bpf_prog *oprog)
676 {
677 struct netkit_link *nkl = netkit_link(link);
678 struct bpf_mprog_entry *entry, *entry_new;
679 struct net_device *dev;
680 int ret = 0;
681
682 rtnl_lock();
683 dev = nkl->dev;
684 if (!dev) {
685 ret = -ENOLINK;
686 goto out;
687 }
688 if (oprog && link->prog != oprog) {
689 ret = -EPERM;
690 goto out;
691 }
692 oprog = link->prog;
693 if (oprog == nprog) {
694 bpf_prog_put(nprog);
695 goto out;
696 }
697 entry = netkit_entry_fetch(dev, false);
698 if (!entry) {
699 ret = -ENOENT;
700 goto out;
701 }
702 ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog,
703 BPF_F_REPLACE | BPF_F_ID,
704 link->prog->aux->id, 0);
705 if (!ret) {
706 WARN_ON_ONCE(entry != entry_new);
707 oprog = xchg(&link->prog, nprog);
708 bpf_prog_put(oprog);
709 bpf_mprog_commit(entry);
710 }
711 out:
712 rtnl_unlock();
713 return ret;
714 }
715
netkit_link_dealloc(struct bpf_link * link)716 static void netkit_link_dealloc(struct bpf_link *link)
717 {
718 kfree(netkit_link(link));
719 }
720
netkit_link_fdinfo(const struct bpf_link * link,struct seq_file * seq)721 static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq)
722 {
723 const struct netkit_link *nkl = netkit_link(link);
724 u32 ifindex = 0;
725
726 rtnl_lock();
727 if (nkl->dev)
728 ifindex = nkl->dev->ifindex;
729 rtnl_unlock();
730
731 seq_printf(seq, "ifindex:\t%u\n", ifindex);
732 seq_printf(seq, "attach_type:\t%u (%s)\n",
733 nkl->location,
734 nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer");
735 }
736
netkit_link_fill_info(const struct bpf_link * link,struct bpf_link_info * info)737 static int netkit_link_fill_info(const struct bpf_link *link,
738 struct bpf_link_info *info)
739 {
740 const struct netkit_link *nkl = netkit_link(link);
741 u32 ifindex = 0;
742
743 rtnl_lock();
744 if (nkl->dev)
745 ifindex = nkl->dev->ifindex;
746 rtnl_unlock();
747
748 info->netkit.ifindex = ifindex;
749 info->netkit.attach_type = nkl->location;
750 return 0;
751 }
752
netkit_link_detach(struct bpf_link * link)753 static int netkit_link_detach(struct bpf_link *link)
754 {
755 netkit_link_release(link);
756 return 0;
757 }
758
759 static const struct bpf_link_ops netkit_link_lops = {
760 .release = netkit_link_release,
761 .detach = netkit_link_detach,
762 .dealloc = netkit_link_dealloc,
763 .update_prog = netkit_link_update,
764 .show_fdinfo = netkit_link_fdinfo,
765 .fill_link_info = netkit_link_fill_info,
766 };
767
netkit_link_init(struct netkit_link * nkl,struct bpf_link_primer * link_primer,const union bpf_attr * attr,struct net_device * dev,struct bpf_prog * prog)768 static int netkit_link_init(struct netkit_link *nkl,
769 struct bpf_link_primer *link_primer,
770 const union bpf_attr *attr,
771 struct net_device *dev,
772 struct bpf_prog *prog)
773 {
774 bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT,
775 &netkit_link_lops, prog);
776 nkl->location = attr->link_create.attach_type;
777 nkl->dev = dev;
778 return bpf_link_prime(&nkl->link, link_primer);
779 }
780
netkit_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)781 int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
782 {
783 struct bpf_link_primer link_primer;
784 struct netkit_link *nkl;
785 struct net_device *dev;
786 int ret;
787
788 rtnl_lock();
789 dev = netkit_dev_fetch(current->nsproxy->net_ns,
790 attr->link_create.target_ifindex,
791 attr->link_create.attach_type);
792 if (IS_ERR(dev)) {
793 ret = PTR_ERR(dev);
794 goto out;
795 }
796 nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT);
797 if (!nkl) {
798 ret = -ENOMEM;
799 goto out;
800 }
801 ret = netkit_link_init(nkl, &link_primer, attr, dev, prog);
802 if (ret) {
803 kfree(nkl);
804 goto out;
805 }
806 ret = netkit_link_prog_attach(&nkl->link,
807 attr->link_create.flags,
808 attr->link_create.netkit.relative_fd,
809 attr->link_create.netkit.expected_revision);
810 if (ret) {
811 nkl->dev = NULL;
812 bpf_link_cleanup(&link_primer);
813 goto out;
814 }
815 ret = bpf_link_settle(&link_primer);
816 out:
817 rtnl_unlock();
818 return ret;
819 }
820
netkit_release_all(struct net_device * dev)821 static void netkit_release_all(struct net_device *dev)
822 {
823 struct bpf_mprog_entry *entry;
824 struct bpf_tuple tuple = {};
825 struct bpf_mprog_fp *fp;
826 struct bpf_mprog_cp *cp;
827
828 entry = netkit_entry_fetch(dev, false);
829 if (!entry)
830 return;
831 netkit_entry_update(dev, NULL);
832 netkit_entry_sync();
833 bpf_mprog_foreach_tuple(entry, fp, cp, tuple) {
834 if (tuple.link)
835 netkit_link(tuple.link)->dev = NULL;
836 else
837 bpf_prog_put(tuple.prog);
838 }
839 }
840
netkit_uninit(struct net_device * dev)841 static void netkit_uninit(struct net_device *dev)
842 {
843 netkit_release_all(dev);
844 }
845
netkit_del_link(struct net_device * dev,struct list_head * head)846 static void netkit_del_link(struct net_device *dev, struct list_head *head)
847 {
848 struct netkit *nk = netkit_priv(dev);
849 struct net_device *peer = rtnl_dereference(nk->peer);
850
851 RCU_INIT_POINTER(nk->peer, NULL);
852 unregister_netdevice_queue(dev, head);
853 if (peer) {
854 nk = netkit_priv(peer);
855 RCU_INIT_POINTER(nk->peer, NULL);
856 unregister_netdevice_queue(peer, head);
857 }
858 }
859
netkit_change_link(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)860 static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
861 struct nlattr *data[],
862 struct netlink_ext_ack *extack)
863 {
864 struct netkit *nk = netkit_priv(dev);
865 struct net_device *peer = rtnl_dereference(nk->peer);
866 enum netkit_action policy;
867 struct nlattr *attr;
868 int err, i;
869 static const struct {
870 u32 attr;
871 char *name;
872 } fixed_params[] = {
873 { IFLA_NETKIT_MODE, "operating mode" },
874 { IFLA_NETKIT_SCRUB, "scrubbing" },
875 { IFLA_NETKIT_PEER_SCRUB, "peer scrubbing" },
876 { IFLA_NETKIT_PEER_INFO, "peer info" },
877 { IFLA_NETKIT_HEADROOM, "headroom" },
878 { IFLA_NETKIT_TAILROOM, "tailroom" },
879 };
880
881 if (!nk->primary) {
882 NL_SET_ERR_MSG(extack,
883 "netkit link settings can be changed only through the primary device");
884 return -EACCES;
885 }
886
887 for (i = 0; i < ARRAY_SIZE(fixed_params); i++) {
888 attr = data[fixed_params[i].attr];
889 if (attr) {
890 NL_SET_ERR_MSG_ATTR_FMT(extack, attr,
891 "netkit link %s cannot be changed after device creation",
892 fixed_params[i].name);
893 return -EACCES;
894 }
895 }
896
897 if (data[IFLA_NETKIT_POLICY]) {
898 attr = data[IFLA_NETKIT_POLICY];
899 policy = nla_get_u32(attr);
900 err = netkit_check_policy(policy, attr, extack);
901 if (err)
902 return err;
903 WRITE_ONCE(nk->policy, policy);
904 }
905
906 if (data[IFLA_NETKIT_PEER_POLICY]) {
907 err = -EOPNOTSUPP;
908 attr = data[IFLA_NETKIT_PEER_POLICY];
909 policy = nla_get_u32(attr);
910 if (peer)
911 err = netkit_check_policy(policy, attr, extack);
912 if (err)
913 return err;
914 nk = netkit_priv(peer);
915 WRITE_ONCE(nk->policy, policy);
916 }
917
918 return 0;
919 }
920
netkit_get_size(const struct net_device * dev)921 static size_t netkit_get_size(const struct net_device *dev)
922 {
923 return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */
924 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */
925 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_SCRUB */
926 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
927 nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
928 nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
929 nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
930 nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
931 0;
932 }
933
netkit_fill_info(struct sk_buff * skb,const struct net_device * dev)934 static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
935 {
936 struct netkit *nk = netkit_priv(dev);
937 struct net_device *peer = rtnl_dereference(nk->peer);
938
939 if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary))
940 return -EMSGSIZE;
941 if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy))
942 return -EMSGSIZE;
943 if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode))
944 return -EMSGSIZE;
945 if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
946 return -EMSGSIZE;
947 if (nla_put_u16(skb, IFLA_NETKIT_HEADROOM, dev->needed_headroom))
948 return -EMSGSIZE;
949 if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
950 return -EMSGSIZE;
951
952 if (peer) {
953 nk = netkit_priv(peer);
954 if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy))
955 return -EMSGSIZE;
956 if (nla_put_u32(skb, IFLA_NETKIT_PEER_SCRUB, nk->scrub))
957 return -EMSGSIZE;
958 }
959
960 return 0;
961 }
962
963 static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
964 [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) },
965 [IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3),
966 [IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
967 [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
968 [IFLA_NETKIT_HEADROOM] = { .type = NLA_U16 },
969 [IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
970 [IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
971 [IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
972 [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
973 .reject_message = "Primary attribute is read-only" },
974 };
975
976 static struct rtnl_link_ops netkit_link_ops = {
977 .kind = DRV_NAME,
978 .priv_size = sizeof(struct netkit),
979 .setup = netkit_setup,
980 .newlink = netkit_new_link,
981 .dellink = netkit_del_link,
982 .changelink = netkit_change_link,
983 .get_link_net = netkit_get_link_net,
984 .get_size = netkit_get_size,
985 .fill_info = netkit_fill_info,
986 .policy = netkit_policy,
987 .validate = netkit_validate,
988 .peer_type = IFLA_NETKIT_PEER_INFO,
989 .maxtype = IFLA_NETKIT_MAX,
990 };
991
netkit_init(void)992 static __init int netkit_init(void)
993 {
994 BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT ||
995 (int)NETKIT_PASS != (int)TCX_PASS ||
996 (int)NETKIT_DROP != (int)TCX_DROP ||
997 (int)NETKIT_REDIRECT != (int)TCX_REDIRECT);
998
999 return rtnl_link_register(&netkit_link_ops);
1000 }
1001
netkit_exit(void)1002 static __exit void netkit_exit(void)
1003 {
1004 rtnl_link_unregister(&netkit_link_ops);
1005 }
1006
1007 module_init(netkit_init);
1008 module_exit(netkit_exit);
1009
1010 MODULE_DESCRIPTION("BPF-programmable network device");
1011 MODULE_AUTHOR("Daniel Borkmann <daniel@iogearbox.net>");
1012 MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
1013 MODULE_LICENSE("GPL");
1014 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1015