1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #include <linux/inetdevice.h> 5 #include <linux/etherdevice.h> 6 #include <linux/mm.h> 7 #include <linux/bpf.h> 8 #include <linux/bpf_trace.h> 9 #include <net/xdp.h> 10 11 #include <net/mana/mana.h> 12 13 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev) 14 { 15 u16 txq_idx = skb_get_queue_mapping(skb); 16 struct netdev_queue *ndevtxq; 17 int rc; 18 19 __skb_push(skb, ETH_HLEN); 20 21 ndevtxq = netdev_get_tx_queue(ndev, txq_idx); 22 __netif_tx_lock(ndevtxq, smp_processor_id()); 23 24 rc = mana_start_xmit(skb, ndev); 25 26 __netif_tx_unlock(ndevtxq); 27 28 if (dev_xmit_complete(rc)) 29 return; 30 31 dev_kfree_skb_any(skb); 32 ndev->stats.tx_dropped++; 33 } 34 35 static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame, 36 u16 q_idx) 37 { 38 struct sk_buff *skb; 39 40 skb = xdp_build_skb_from_frame(frame, ndev); 41 if (unlikely(!skb)) 42 return -ENOMEM; 43 44 skb_set_queue_mapping(skb, q_idx); 45 46 mana_xdp_tx(skb, ndev); 47 48 return 0; 49 } 50 51 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 52 u32 flags) 53 { 54 struct mana_port_context *apc = netdev_priv(ndev); 55 struct mana_stats_tx *tx_stats; 56 int i, count = 0; 57 u16 q_idx; 58 59 if (unlikely(!apc->port_is_up)) 60 return 0; 61 62 q_idx = smp_processor_id() % ndev->real_num_tx_queues; 63 64 for (i = 0; i < n; i++) { 65 if (mana_xdp_xmit_fm(ndev, frames[i], q_idx)) 66 break; 67 68 count++; 69 } 70 71 tx_stats = &apc->tx_qp[q_idx].txq.stats; 72 73 u64_stats_update_begin(&tx_stats->syncp); 74 tx_stats->xdp_xmit += count; 75 u64_stats_update_end(&tx_stats->syncp); 76 77 return count; 78 } 79 80 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 81 struct xdp_buff *xdp, void *buf_va, uint pkt_len) 82 { 83 struct mana_stats_rx *rx_stats; 84 struct bpf_prog *prog; 85 u32 act = XDP_PASS; 86 87 rcu_read_lock(); 88 prog = rcu_dereference(rxq->bpf_prog); 89 90 if (!prog) 91 goto out; 92 93 xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); 94 xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true); 95 96 act = bpf_prog_run_xdp(prog, xdp); 97 98 rx_stats = &rxq->stats; 99 100 switch (act) { 101 case XDP_PASS: 102 case XDP_TX: 103 case XDP_DROP: 104 break; 105 106 case XDP_REDIRECT: 107 rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog); 108 if (!rxq->xdp_rc) { 109 rxq->xdp_flush = true; 110 111 u64_stats_update_begin(&rx_stats->syncp); 112 rx_stats->packets++; 113 rx_stats->bytes += pkt_len; 114 rx_stats->xdp_redirect++; 115 u64_stats_update_end(&rx_stats->syncp); 116 117 break; 118 } 119 120 fallthrough; 121 122 case XDP_ABORTED: 123 trace_xdp_exception(ndev, prog, act); 124 break; 125 126 default: 127 bpf_warn_invalid_xdp_action(ndev, prog, act); 128 } 129 130 out: 131 rcu_read_unlock(); 132 133 return act; 134 } 135 136 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc) 137 { 138 ASSERT_RTNL(); 139 140 return apc->bpf_prog; 141 } 142 143 static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc) 144 { 145 return rtnl_dereference(apc->rxqs[0]->bpf_prog); 146 } 147 148 /* Set xdp program on channels */ 149 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog) 150 { 151 struct bpf_prog *old_prog = mana_chn_xdp_get(apc); 152 unsigned int num_queues = apc->num_queues; 153 int i; 154 155 ASSERT_RTNL(); 156 157 if (old_prog == prog) 158 return; 159 160 if (prog) 161 bpf_prog_add(prog, num_queues); 162 163 for (i = 0; i < num_queues; i++) 164 rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog); 165 166 if (old_prog) 167 for (i = 0; i < num_queues; i++) 168 bpf_prog_put(old_prog); 169 } 170 171 static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, 172 struct netlink_ext_ack *extack) 173 { 174 struct mana_port_context *apc = netdev_priv(ndev); 175 struct bpf_prog *old_prog; 176 struct gdma_context *gc; 177 int err; 178 179 gc = apc->ac->gdma_dev->gdma_context; 180 181 old_prog = mana_xdp_get(apc); 182 183 if (!old_prog && !prog) 184 return 0; 185 186 if (prog && ndev->mtu > MANA_XDP_MTU_MAX) { 187 netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n", 188 ndev->mtu, MANA_XDP_MTU_MAX); 189 NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); 190 191 return -EOPNOTSUPP; 192 } 193 194 /* One refcnt of the prog is hold by the caller already, so 195 * don't increase refcnt for this one. 196 */ 197 apc->bpf_prog = prog; 198 199 if (apc->port_is_up) { 200 /* Re-create rxq's after xdp prog was loaded or unloaded. 201 * Ex: re create rxq's to switch from full pages to smaller 202 * size page fragments when xdp prog is unloaded and 203 * vice-versa. 204 */ 205 206 /* Pre-allocate buffers to prevent failure in mana_attach */ 207 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues); 208 if (err) { 209 NL_SET_ERR_MSG_MOD(extack, 210 "XDP: Insufficient memory for tx/rx re-config"); 211 return err; 212 } 213 214 err = mana_detach(ndev, false); 215 if (err) { 216 netdev_err(ndev, 217 "mana_detach failed at xdp set: %d\n", err); 218 NL_SET_ERR_MSG_MOD(extack, 219 "XDP: Re-config failed at detach"); 220 goto err_dealloc_rxbuffs; 221 } 222 223 err = mana_attach(ndev); 224 if (err) { 225 netdev_err(ndev, 226 "mana_attach failed at xdp set: %d\n", err); 227 NL_SET_ERR_MSG_MOD(extack, 228 "XDP: Re-config failed at attach"); 229 goto err_dealloc_rxbuffs; 230 } 231 232 mana_chn_setxdp(apc, prog); 233 mana_pre_dealloc_rxbufs(apc); 234 } 235 236 if (old_prog) 237 bpf_prog_put(old_prog); 238 239 if (prog) 240 ndev->max_mtu = MANA_XDP_MTU_MAX; 241 else 242 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; 243 244 return 0; 245 246 err_dealloc_rxbuffs: 247 apc->bpf_prog = old_prog; 248 mana_pre_dealloc_rxbufs(apc); 249 return err; 250 } 251 252 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 253 { 254 struct netlink_ext_ack *extack = bpf->extack; 255 int ret; 256 257 switch (bpf->command) { 258 case XDP_SETUP_PROG: 259 return mana_xdp_set(ndev, bpf->prog, extack); 260 261 default: 262 return -EOPNOTSUPP; 263 } 264 265 return ret; 266 } 267