Lines Matching defs:dev

34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
59 struct net_device *dev;
66 struct net_device *dev; /* must be first member, due to tracepoint */
136 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
218 struct bpf_dtab_netdev *dev;
224 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
225 hlist_del_rcu(&dev->index_hlist);
226 if (dev->xdp_prog)
227 bpf_prog_put(dev->xdp_prog);
228 dev_put(dev->dev);
229 kfree(dev);
236 struct bpf_dtab_netdev *dev;
238 dev = rcu_dereference_raw(dtab->netdev_map[i]);
239 if (!dev)
242 if (dev->xdp_prog)
243 bpf_prog_put(dev->xdp_prog);
244 dev_put(dev->dev);
245 kfree(dev);
279 struct bpf_dtab_netdev *dev;
281 hlist_for_each_entry_rcu(dev, head, index_hlist,
283 if (dev->idx == key)
284 return dev;
294 struct bpf_dtab_netdev *dev, *next_dev;
303 dev = __dev_map_hash_lookup_elem(map, idx);
304 if (!dev)
307 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
339 struct xdp_txq_info txq = { .dev = tx_dev };
340 struct xdp_rxq_info rxq = { .dev = rx_dev };
378 struct net_device *dev = bq->dev;
394 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
399 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
416 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
456 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
459 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
482 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
488 if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
491 if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
495 err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
499 bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
505 struct xdp_txq_info txq = { .dev = dst->dev };
524 trace_xdp_exception(dst->dev, dst->xdp_prog, act);
534 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
537 return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
543 struct net_device *dev = dst->dev;
545 return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
553 if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
556 if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
560 if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
576 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
594 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
600 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
629 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
653 dst->dev->ifindex))
673 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
685 err = xdp_ok_fwd_dev(dst->dev, skb->len);
696 skb->dev = dst->dev;
722 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
736 num_excluded = get_upper_ifindexes(dev, excluded_devices);
737 excluded_devices[num_excluded++] = dev->ifindex;
747 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
768 dst->dev->ifindex))
811 struct bpf_dtab_netdev *dev;
813 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
814 if (dev->xdp_prog)
815 bpf_prog_put(dev->xdp_prog);
816 dev_put(dev->dev);
817 kfree(dev);
865 struct bpf_dtab_netdev *dev;
867 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
870 if (!dev)
873 dev->dev = dev_get_by_index(net, val->ifindex);
874 if (!dev->dev)
887 dev->idx = idx;
889 dev->xdp_prog = prog;
890 dev->val.bpf_prog.id = prog->aux->id;
892 dev->xdp_prog = NULL;
893 dev->val.bpf_prog.id = 0;
895 dev->val.ifindex = val->ifindex;
897 return dev;
901 dev_put(dev->dev);
903 kfree(dev);
911 struct bpf_dtab_netdev *dev, *old_dev;
926 dev = NULL;
931 dev = __dev_map_alloc_node(net, dtab, &val, i);
932 if (IS_ERR(dev))
933 return PTR_ERR(dev);
940 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
960 struct bpf_dtab_netdev *dev, *old_dev;
978 dev = __dev_map_alloc_node(net, dtab, &val, idx);
979 if (IS_ERR(dev)) {
980 err = PTR_ERR(dev);
989 call_rcu(&dev->rcu, __dev_map_entry_free);
995 hlist_add_head_rcu(&dev->index_hlist,
1083 struct bpf_dtab_netdev *dev;
1089 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1090 if (netdev != dev->dev)
1094 hlist_del_rcu(&dev->index_hlist);
1095 call_rcu(&dev->rcu, __dev_map_entry_free);
1119 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1135 struct bpf_dtab_netdev *dev, *odev;
1137 dev = rcu_dereference(dtab->netdev_map[i]);
1138 if (!dev || netdev != dev->dev)
1140 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1141 if (dev == odev) {
1142 call_rcu(&dev->rcu,
1163 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1164 offsetof(struct _bpf_dtab_netdev, dev));