xref: /linux/include/net/netfilter/nf_flow_table.h (revision fcee7d82f27d6a8b1ddc5bbefda59b4e441e9bc0)
1 #ifndef _NF_FLOW_TABLE_H
2 #define _NF_FLOW_TABLE_H
3 
4 #include <linux/in.h>
5 #include <linux/in6.h>
6 #include <linux/netdevice.h>
7 #include <linux/rhashtable-types.h>
8 #include <linux/rcupdate.h>
9 #include <linux/netfilter.h>
10 #include <linux/netfilter/nf_conntrack_tuple_common.h>
11 #include <net/flow_offload.h>
12 #include <net/dst.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 
16 struct nf_flowtable;
17 struct nf_flow_rule;
18 struct flow_offload;
19 enum flow_offload_tuple_dir;
20 
21 struct nf_flow_key {
22 	struct flow_dissector_key_meta			meta;
23 	struct flow_dissector_key_control		control;
24 	struct flow_dissector_key_control		enc_control;
25 	struct flow_dissector_key_basic			basic;
26 	struct flow_dissector_key_vlan			vlan;
27 	struct flow_dissector_key_vlan			cvlan;
28 	union {
29 		struct flow_dissector_key_ipv4_addrs	ipv4;
30 		struct flow_dissector_key_ipv6_addrs	ipv6;
31 	};
32 	struct flow_dissector_key_keyid			enc_key_id;
33 	union {
34 		struct flow_dissector_key_ipv4_addrs	enc_ipv4;
35 		struct flow_dissector_key_ipv6_addrs	enc_ipv6;
36 	};
37 	struct flow_dissector_key_tcp			tcp;
38 	struct flow_dissector_key_ports			tp;
39 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
40 
41 struct nf_flow_match {
42 	struct flow_dissector	dissector;
43 	struct nf_flow_key	key;
44 	struct nf_flow_key	mask;
45 };
46 
47 struct nf_flow_rule {
48 	struct nf_flow_match	match;
49 	struct flow_rule	*rule;
50 };
51 
52 struct nf_flowtable_type {
53 	struct list_head		list;
54 	int				family;
55 	int				(*init)(struct nf_flowtable *ft);
56 	bool				(*gc)(const struct flow_offload *flow);
57 	int				(*setup)(struct nf_flowtable *ft,
58 						 struct net_device *dev,
59 						 enum flow_block_command cmd);
60 	int				(*action)(struct net *net,
61 						  struct flow_offload *flow,
62 						  enum flow_offload_tuple_dir dir,
63 						  struct nf_flow_rule *flow_rule);
64 	void				(*free)(struct nf_flowtable *ft);
65 	void				(*get)(struct nf_flowtable *ft);
66 	void				(*put)(struct nf_flowtable *ft);
67 	nf_hookfn			*hook;
68 	struct module			*owner;
69 };
70 
71 enum nf_flowtable_flags {
72 	NF_FLOWTABLE_HW_OFFLOAD		= 0x1,	/* NFT_FLOWTABLE_HW_OFFLOAD */
73 	NF_FLOWTABLE_COUNTER		= 0x2,	/* NFT_FLOWTABLE_COUNTER */
74 };
75 
76 struct nf_flowtable {
77 	unsigned int			flags;		/* readonly in datapath */
78 	int				priority;	/* control path (padding hole) */
79 	struct rhashtable		rhashtable;	/* datapath, read-mostly members come first */
80 
81 	struct list_head		list;		/* slowpath parts */
82 	const struct nf_flowtable_type	*type;
83 	struct delayed_work		gc_work;
84 	struct flow_block		flow_block;
85 	struct rw_semaphore		flow_block_lock; /* Guards flow_block */
86 	possible_net_t			net;
87 };
88 
nf_flowtable_hw_offload(struct nf_flowtable * flowtable)89 static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
90 {
91 	return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
92 }
93 
94 enum flow_offload_tuple_dir {
95 	FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
96 	FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
97 };
98 #define FLOW_OFFLOAD_DIR_MAX	IP_CT_DIR_MAX
99 
100 enum flow_offload_xmit_type {
101 	FLOW_OFFLOAD_XMIT_UNSPEC	= 0,
102 	FLOW_OFFLOAD_XMIT_NEIGH,
103 	FLOW_OFFLOAD_XMIT_XFRM,
104 	FLOW_OFFLOAD_XMIT_DIRECT,
105 	FLOW_OFFLOAD_XMIT_TC,
106 };
107 
108 #define NF_FLOW_TABLE_ENCAP_MAX		2
109 
110 struct flow_offload_tunnel {
111 	union {
112 		struct in_addr	src_v4;
113 		struct in6_addr	src_v6;
114 	};
115 	union {
116 		struct in_addr	dst_v4;
117 		struct in6_addr	dst_v6;
118 	};
119 
120 	u8	l3_proto;
121 };
122 
123 struct flow_offload_tuple {
124 	union {
125 		struct in_addr		src_v4;
126 		struct in6_addr		src_v6;
127 	};
128 	union {
129 		struct in_addr		dst_v4;
130 		struct in6_addr		dst_v6;
131 	};
132 	struct {
133 		__be16			src_port;
134 		__be16			dst_port;
135 	};
136 
137 	int				iifidx;
138 
139 	u8				l3proto;
140 	u8				l4proto;
141 	struct {
142 		u16			id;
143 		__be16			proto;
144 	} encap[NF_FLOW_TABLE_ENCAP_MAX];
145 
146 	struct flow_offload_tunnel	tun;
147 
148 	/* All members above are keys for lookups, see flow_offload_hash(). */
149 	struct { }			__hash;
150 
151 	u16				dir:2,
152 					xmit_type:3,
153 					encap_num:2,
154 					needs_gso_segment:1,
155 					tun_num:2,
156 					in_vlan_ingress:2;
157 	u16				mtu;
158 	union {
159 		struct {
160 			struct dst_entry *dst_cache;
161 			u32		ifidx;
162 			u32		dst_cookie;
163 		};
164 		struct {
165 			u32		ifidx;
166 			u8		h_source[ETH_ALEN];
167 			u8		h_dest[ETH_ALEN];
168 		} out;
169 		struct {
170 			u32		iifidx;
171 		} tc;
172 	};
173 };
174 
175 struct flow_offload_tuple_rhash {
176 	struct rhash_head		node;
177 	struct flow_offload_tuple	tuple;
178 };
179 
180 enum nf_flow_flags {
181 	NF_FLOW_SNAT,
182 	NF_FLOW_DNAT,
183 	NF_FLOW_CLOSING,
184 	NF_FLOW_TEARDOWN,
185 	NF_FLOW_HW,
186 	NF_FLOW_HW_DYING,
187 	NF_FLOW_HW_DEAD,
188 	NF_FLOW_HW_PENDING,
189 	NF_FLOW_HW_BIDIRECTIONAL,
190 	NF_FLOW_HW_ESTABLISHED,
191 };
192 
193 enum flow_offload_type {
194 	NF_FLOW_OFFLOAD_UNSPEC	= 0,
195 	NF_FLOW_OFFLOAD_ROUTE,
196 };
197 
198 struct flow_offload {
199 	struct flow_offload_tuple_rhash		tuplehash[FLOW_OFFLOAD_DIR_MAX];
200 	struct nf_conn				*ct;
201 	unsigned long				flags;
202 	u16					type;
203 	u32					timeout;
204 	struct rcu_head				rcu_head;
205 };
206 
207 #define NF_FLOW_TIMEOUT (30 * HZ)
208 #define nf_flowtable_time_stamp	(u32)jiffies
209 
210 unsigned long flow_offload_get_timeout(struct flow_offload *flow);
211 
nf_flow_timeout_delta(unsigned int timeout)212 static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
213 {
214 	return (__s32)(timeout - nf_flowtable_time_stamp);
215 }
216 
217 struct nf_flow_route {
218 	struct {
219 		struct dst_entry		*dst;
220 		struct {
221 			u32			ifindex;
222 			struct {
223 				u16		id;
224 				__be16		proto;
225 			} encap[NF_FLOW_TABLE_ENCAP_MAX];
226 			struct flow_offload_tunnel tun;
227 			u8			num_encaps:2,
228 						num_tuns:2,
229 						ingress_vlans:2;
230 		} in;
231 		struct {
232 			u32			ifindex;
233 			u32			hw_ifindex;
234 			u8			h_source[ETH_ALEN];
235 			u8			h_dest[ETH_ALEN];
236 			u8			needs_gso_segment:1;
237 		} out;
238 		enum flow_offload_xmit_type	xmit_type;
239 	} tuple[FLOW_OFFLOAD_DIR_MAX];
240 };
241 
242 struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
243 void flow_offload_free(struct flow_offload *flow);
244 
245 struct nft_flowtable;
246 struct nft_pktinfo;
247 int nft_flow_route(const struct nft_pktinfo *pkt, const struct nf_conn *ct,
248 		   struct nf_flow_route *route, enum ip_conntrack_dir dir,
249 		   struct nft_flowtable *ft);
250 
251 static inline int
nf_flow_table_offload_add_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)252 nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
253 			     flow_setup_cb_t *cb, void *cb_priv)
254 {
255 	struct flow_block *block = &flow_table->flow_block;
256 	struct flow_block_cb *block_cb;
257 	int err = 0;
258 
259 	down_write(&flow_table->flow_block_lock);
260 	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
261 	if (block_cb) {
262 		err = -EEXIST;
263 		goto unlock;
264 	}
265 
266 	block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
267 	if (IS_ERR(block_cb)) {
268 		err = PTR_ERR(block_cb);
269 		goto unlock;
270 	}
271 
272 	list_add_tail(&block_cb->list, &block->cb_list);
273 	up_write(&flow_table->flow_block_lock);
274 
275 	if (flow_table->type->get)
276 		flow_table->type->get(flow_table);
277 	return 0;
278 
279 unlock:
280 	up_write(&flow_table->flow_block_lock);
281 	return err;
282 }
283 
284 static inline void
nf_flow_table_offload_del_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)285 nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
286 			     flow_setup_cb_t *cb, void *cb_priv)
287 {
288 	struct flow_block *block = &flow_table->flow_block;
289 	struct flow_block_cb *block_cb;
290 
291 	down_write(&flow_table->flow_block_lock);
292 	block_cb = flow_block_cb_lookup(block, cb, cb_priv);
293 	if (block_cb) {
294 		list_del(&block_cb->list);
295 		flow_block_cb_free(block_cb);
296 	} else {
297 		WARN_ON(true);
298 	}
299 	up_write(&flow_table->flow_block_lock);
300 
301 	if (flow_table->type->put)
302 		flow_table->type->put(flow_table);
303 }
304 
305 void flow_offload_route_init(struct flow_offload *flow,
306 			     struct nf_flow_route *route);
307 
308 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
309 void flow_offload_refresh(struct nf_flowtable *flow_table,
310 			  struct flow_offload *flow, bool force);
311 
312 struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
313 						     struct flow_offload_tuple *tuple);
314 void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
315 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
316 			      struct net_device *dev);
317 void nf_flow_table_cleanup(struct net_device *dev);
318 
319 int nf_flow_table_init(struct nf_flowtable *flow_table);
320 void nf_flow_table_free(struct nf_flowtable *flow_table);
321 
322 void flow_offload_teardown(struct flow_offload *flow);
323 
324 void nf_flow_snat_port(const struct flow_offload *flow,
325 		       struct sk_buff *skb, unsigned int thoff,
326 		       u8 protocol, enum flow_offload_tuple_dir dir);
327 void nf_flow_dnat_port(const struct flow_offload *flow,
328 		       struct sk_buff *skb, unsigned int thoff,
329 		       u8 protocol, enum flow_offload_tuple_dir dir);
330 
331 struct flow_ports {
332 	__be16 source, dest;
333 };
334 
335 struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev);
336 int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
337 			      struct net_device *dev,
338 			      enum flow_block_command cmd);
339 
340 unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
341 				     const struct nf_hook_state *state);
342 unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
343 				       const struct nf_hook_state *state);
344 
345 #if (IS_BUILTIN(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
346     (IS_MODULE(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
347 extern int nf_flow_register_bpf(void);
348 #else
nf_flow_register_bpf(void)349 static inline int nf_flow_register_bpf(void)
350 {
351 	return 0;
352 }
353 #endif
354 
355 #define MODULE_ALIAS_NF_FLOWTABLE(family)	\
356 	MODULE_ALIAS("nf-flowtable-" __stringify(family))
357 
358 void nf_flow_offload_add(struct nf_flowtable *flowtable,
359 			 struct flow_offload *flow);
360 void nf_flow_offload_del(struct nf_flowtable *flowtable,
361 			 struct flow_offload *flow);
362 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
363 			   struct flow_offload *flow);
364 
365 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
366 void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
367 
368 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
369 				struct net_device *dev,
370 				enum flow_block_command cmd);
371 int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
372 			    enum flow_offload_tuple_dir dir,
373 			    struct nf_flow_rule *flow_rule);
374 int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
375 			    enum flow_offload_tuple_dir dir,
376 			    struct nf_flow_rule *flow_rule);
377 
378 int nf_flow_table_offload_init(void);
379 void nf_flow_table_offload_exit(void);
380 
__nf_flow_pppoe_proto(const struct sk_buff * skb)381 static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
382 {
383 	__be16 proto;
384 
385 	proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
386 			     sizeof(struct pppoe_hdr)));
387 	switch (proto) {
388 	case htons(PPP_IP):
389 		return htons(ETH_P_IP);
390 	case htons(PPP_IPV6):
391 		return htons(ETH_P_IPV6);
392 	}
393 
394 	return 0;
395 }
396 
nf_flow_pppoe_proto(struct sk_buff * skb,__be16 * inner_proto)397 static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
398 {
399 	if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN))
400 		return false;
401 
402 	*inner_proto = __nf_flow_pppoe_proto(skb);
403 
404 	return true;
405 }
406 
407 #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
408 #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
409 #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count)	\
410 	this_cpu_inc((net)->ft.stat->count)
411 #define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count)	\
412 	this_cpu_dec((net)->ft.stat->count)
413 
414 #ifdef CONFIG_NF_FLOW_TABLE_PROCFS
415 int nf_flow_table_init_proc(struct net *net);
416 void nf_flow_table_fini_proc(struct net *net);
417 #else
nf_flow_table_init_proc(struct net * net)418 static inline int nf_flow_table_init_proc(struct net *net)
419 {
420 	return 0;
421 }
422 
nf_flow_table_fini_proc(struct net * net)423 static inline void nf_flow_table_fini_proc(struct net *net)
424 {
425 }
426 #endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
427 
428 #endif /* _NF_FLOW_TABLE_H */
429