xref: /linux/drivers/net/ethernet/amazon/ena/ena_xdp.h (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef ENA_XDP_H
7 #define ENA_XDP_H
8 
9 #include "ena_netdev.h"
10 #include <linux/bpf_trace.h>
11 
12 /* The max MTU size is configured to be the ethernet frame size without
13  * the overhead of the ethernet header, which can have a VLAN header, and
14  * a frame check sequence (FCS).
15  * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
16  */
17 #define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN -	\
18 			 VLAN_HLEN - XDP_PACKET_HEADROOM -		\
19 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
20 
21 #define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
22 	((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
23 
24 enum ENA_XDP_ACTIONS {
25 	ENA_XDP_PASS		= 0,
26 	ENA_XDP_TX		= BIT(0),
27 	ENA_XDP_REDIRECT	= BIT(1),
28 	ENA_XDP_DROP		= BIT(2)
29 };
30 
31 #define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
32 
33 int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
34 void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
35 					  struct bpf_prog *prog,
36 					  int first, int count);
37 int ena_xdp_io_poll(struct napi_struct *napi, int budget);
38 int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
39 		       struct ena_adapter *adapter,
40 		       struct xdp_frame *xdpf,
41 		       int flags);
42 int ena_xdp_xmit(struct net_device *dev, int n,
43 		 struct xdp_frame **frames, u32 flags);
44 int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
45 int ena_xdp_register_rxq_info(struct ena_ring *rx_ring);
46 void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring);
47 
48 enum ena_xdp_errors_t {
49 	ENA_XDP_ALLOWED = 0,
50 	ENA_XDP_CURRENT_MTU_TOO_LARGE,
51 	ENA_XDP_NO_ENOUGH_QUEUES,
52 };
53 
ena_xdp_present(struct ena_adapter * adapter)54 static inline bool ena_xdp_present(struct ena_adapter *adapter)
55 {
56 	return !!adapter->xdp_bpf_prog;
57 }
58 
ena_xdp_present_ring(struct ena_ring * ring)59 static inline bool ena_xdp_present_ring(struct ena_ring *ring)
60 {
61 	return !!ring->xdp_bpf_prog;
62 }
63 
ena_xdp_legal_queue_count(struct ena_adapter * adapter,u32 queues)64 static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
65 					     u32 queues)
66 {
67 	return 2 * queues <= adapter->max_num_io_queues;
68 }
69 
ena_xdp_allowed(struct ena_adapter * adapter)70 static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
71 {
72 	enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
73 
74 	if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
75 		rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
76 	else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
77 		rc = ENA_XDP_NO_ENOUGH_QUEUES;
78 
79 	return rc;
80 }
81 
ena_xdp_execute(struct ena_ring * rx_ring,struct xdp_buff * xdp)82 static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
83 {
84 	u32 verdict = ENA_XDP_PASS;
85 	struct bpf_prog *xdp_prog;
86 	struct ena_ring *xdp_ring;
87 	struct xdp_frame *xdpf;
88 	u64 *xdp_stat;
89 
90 	xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
91 
92 	verdict = bpf_prog_run_xdp(xdp_prog, xdp);
93 
94 	switch (verdict) {
95 	case XDP_TX:
96 		xdpf = xdp_convert_buff_to_frame(xdp);
97 		if (unlikely(!xdpf)) {
98 			trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
99 			xdp_stat = &rx_ring->rx_stats.xdp_aborted;
100 			verdict = ENA_XDP_DROP;
101 			break;
102 		}
103 
104 		/* Find xmit queue */
105 		xdp_ring = rx_ring->xdp_ring;
106 
107 		/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
108 		spin_lock(&xdp_ring->xdp_tx_lock);
109 
110 		if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
111 				       XDP_XMIT_FLUSH))
112 			xdp_return_frame(xdpf);
113 
114 		spin_unlock(&xdp_ring->xdp_tx_lock);
115 		xdp_stat = &rx_ring->rx_stats.xdp_tx;
116 		verdict = ENA_XDP_TX;
117 		break;
118 	case XDP_REDIRECT:
119 		if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
120 			xdp_stat = &rx_ring->rx_stats.xdp_redirect;
121 			verdict = ENA_XDP_REDIRECT;
122 			break;
123 		}
124 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
125 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
126 		verdict = ENA_XDP_DROP;
127 		break;
128 	case XDP_ABORTED:
129 		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
130 		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
131 		verdict = ENA_XDP_DROP;
132 		break;
133 	case XDP_DROP:
134 		xdp_stat = &rx_ring->rx_stats.xdp_drop;
135 		verdict = ENA_XDP_DROP;
136 		break;
137 	case XDP_PASS:
138 		xdp_stat = &rx_ring->rx_stats.xdp_pass;
139 		verdict = ENA_XDP_PASS;
140 		break;
141 	default:
142 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
143 		xdp_stat = &rx_ring->rx_stats.xdp_invalid;
144 		verdict = ENA_XDP_DROP;
145 	}
146 
147 	ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
148 
149 	return verdict;
150 }
151 #endif /* ENA_XDP_H */
152