xref: /linux/net/netfilter/nf_tables_offload.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9 
nft_flow_rule_alloc(int num_actions)10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11 {
12 	struct nft_flow_rule *flow;
13 
14 	flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 	if (!flow)
16 		return NULL;
17 
18 	flow->rule = flow_rule_alloc(num_actions);
19 	if (!flow->rule) {
20 		kfree(flow);
21 		return NULL;
22 	}
23 
24 	flow->rule->match.dissector	= &flow->match.dissector;
25 	flow->rule->match.mask		= &flow->match.mask;
26 	flow->rule->match.key		= &flow->match.key;
27 
28 	return flow;
29 }
30 
nft_flow_rule_set_addr_type(struct nft_flow_rule * flow,enum flow_dissector_key_id addr_type)31 void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
32 				 enum flow_dissector_key_id addr_type)
33 {
34 	struct nft_flow_match *match = &flow->match;
35 	struct nft_flow_key *mask = &match->mask;
36 	struct nft_flow_key *key = &match->key;
37 
38 	if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))
39 		return;
40 
41 	key->control.addr_type = addr_type;
42 	mask->control.addr_type = 0xffff;
43 	match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
44 	match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
45 		offsetof(struct nft_flow_key, control);
46 }
47 
48 struct nft_offload_ethertype {
49 	__be16 value;
50 	__be16 mask;
51 };
52 
nft_flow_rule_transfer_vlan(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow)53 static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
54 					struct nft_flow_rule *flow)
55 {
56 	struct nft_flow_match *match = &flow->match;
57 	struct nft_offload_ethertype ethertype = {
58 		.value	= match->key.basic.n_proto,
59 		.mask	= match->mask.basic.n_proto,
60 	};
61 
62 	if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) &&
63 	    (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
64 	     match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
65 		match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
66 		match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
67 		match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
68 		match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
69 		match->key.vlan.vlan_tpid = ethertype.value;
70 		match->mask.vlan.vlan_tpid = ethertype.mask;
71 		match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
72 			offsetof(struct nft_flow_key, cvlan);
73 		match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
74 	} else if (match->dissector.used_keys &
75 		   BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) &&
76 		   (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
77 		    match->key.basic.n_proto == htons(ETH_P_8021AD))) {
78 		match->key.basic.n_proto = match->key.vlan.vlan_tpid;
79 		match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
80 		match->key.vlan.vlan_tpid = ethertype.value;
81 		match->mask.vlan.vlan_tpid = ethertype.mask;
82 		match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
83 			offsetof(struct nft_flow_key, vlan);
84 		match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
85 	}
86 }
87 
nft_flow_rule_create(struct net * net,const struct nft_rule * rule)88 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
89 					   const struct nft_rule *rule)
90 {
91 	struct nft_offload_ctx *ctx;
92 	struct nft_flow_rule *flow;
93 	int num_actions = 0, err;
94 	struct nft_expr *expr;
95 
96 	expr = nft_expr_first(rule);
97 	while (nft_expr_more(rule, expr)) {
98 		if (expr->ops->offload_action &&
99 		    expr->ops->offload_action(expr))
100 			num_actions++;
101 
102 		expr = nft_expr_next(expr);
103 	}
104 
105 	if (num_actions == 0)
106 		return ERR_PTR(-EOPNOTSUPP);
107 
108 	flow = nft_flow_rule_alloc(num_actions);
109 	if (!flow)
110 		return ERR_PTR(-ENOMEM);
111 
112 	expr = nft_expr_first(rule);
113 
114 	ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
115 	if (!ctx) {
116 		err = -ENOMEM;
117 		goto err_out;
118 	}
119 	ctx->net = net;
120 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
121 
122 	while (nft_expr_more(rule, expr)) {
123 		if (!expr->ops->offload) {
124 			err = -EOPNOTSUPP;
125 			goto err_out;
126 		}
127 		err = expr->ops->offload(ctx, flow, expr);
128 		if (err < 0)
129 			goto err_out;
130 
131 		expr = nft_expr_next(expr);
132 	}
133 	nft_flow_rule_transfer_vlan(ctx, flow);
134 
135 	flow->proto = ctx->dep.l3num;
136 	kfree(ctx);
137 
138 	return flow;
139 err_out:
140 	kfree(ctx);
141 	nft_flow_rule_destroy(flow);
142 
143 	return ERR_PTR(err);
144 }
145 
nft_flow_rule_destroy(struct nft_flow_rule * flow)146 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
147 {
148 	struct flow_action_entry *entry;
149 	int i;
150 
151 	flow_action_for_each(i, entry, &flow->rule->action) {
152 		switch (entry->id) {
153 		case FLOW_ACTION_REDIRECT:
154 		case FLOW_ACTION_MIRRED:
155 			dev_put(entry->dev);
156 			break;
157 		default:
158 			break;
159 		}
160 	}
161 	kfree(flow->rule);
162 	kfree(flow);
163 }
164 
nft_offload_set_dependency(struct nft_offload_ctx * ctx,enum nft_offload_dep_type type)165 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
166 				enum nft_offload_dep_type type)
167 {
168 	ctx->dep.type = type;
169 }
170 
nft_offload_update_dependency(struct nft_offload_ctx * ctx,const void * data,u32 len)171 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
172 				   const void *data, u32 len)
173 {
174 	switch (ctx->dep.type) {
175 	case NFT_OFFLOAD_DEP_NETWORK:
176 		WARN_ON(len != sizeof(__u16));
177 		memcpy(&ctx->dep.l3num, data, sizeof(__u16));
178 		break;
179 	case NFT_OFFLOAD_DEP_TRANSPORT:
180 		WARN_ON(len != sizeof(__u8));
181 		memcpy(&ctx->dep.protonum, data, sizeof(__u8));
182 		break;
183 	default:
184 		break;
185 	}
186 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
187 }
188 
nft_flow_offload_common_init(struct flow_cls_common_offload * common,__be16 proto,int priority,struct netlink_ext_ack * extack)189 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
190 					 __be16 proto, int priority,
191 					 struct netlink_ext_ack *extack)
192 {
193 	common->protocol = proto;
194 	common->prio = priority;
195 	common->extack = extack;
196 }
197 
nft_setup_cb_call(enum tc_setup_type type,void * type_data,struct list_head * cb_list)198 static int nft_setup_cb_call(enum tc_setup_type type, void *type_data,
199 			     struct list_head *cb_list)
200 {
201 	struct flow_block_cb *block_cb;
202 	int err;
203 
204 	list_for_each_entry(block_cb, cb_list, list) {
205 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
206 		if (err < 0)
207 			return err;
208 	}
209 	return 0;
210 }
211 
nft_chain_offload_priority(const struct nft_base_chain * basechain)212 static int nft_chain_offload_priority(const struct nft_base_chain *basechain)
213 {
214 	if (basechain->ops.priority <= 0 ||
215 	    basechain->ops.priority > USHRT_MAX)
216 		return -1;
217 
218 	return 0;
219 }
220 
nft_chain_offload_support(const struct nft_base_chain * basechain)221 bool nft_chain_offload_support(const struct nft_base_chain *basechain)
222 {
223 	struct net_device *dev;
224 	struct nft_hook *hook;
225 
226 	if (nft_chain_offload_priority(basechain) < 0)
227 		return false;
228 
229 	list_for_each_entry(hook, &basechain->hook_list, list) {
230 		if (hook->ops.pf != NFPROTO_NETDEV ||
231 		    hook->ops.hooknum != NF_NETDEV_INGRESS)
232 			return false;
233 
234 		dev = hook->ops.dev;
235 		if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists())
236 			return false;
237 	}
238 
239 	return true;
240 }
241 
nft_flow_cls_offload_setup(struct flow_cls_offload * cls_flow,const struct nft_base_chain * basechain,const struct nft_rule * rule,const struct nft_flow_rule * flow,struct netlink_ext_ack * extack,enum flow_cls_command command)242 static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow,
243 				       const struct nft_base_chain *basechain,
244 				       const struct nft_rule *rule,
245 				       const struct nft_flow_rule *flow,
246 				       struct netlink_ext_ack *extack,
247 				       enum flow_cls_command command)
248 {
249 	__be16 proto = ETH_P_ALL;
250 
251 	memset(cls_flow, 0, sizeof(*cls_flow));
252 
253 	if (flow)
254 		proto = flow->proto;
255 
256 	nft_flow_offload_common_init(&cls_flow->common, proto,
257 				     basechain->ops.priority, extack);
258 	cls_flow->command = command;
259 	cls_flow->cookie = (unsigned long) rule;
260 	if (flow)
261 		cls_flow->rule = flow->rule;
262 }
263 
nft_flow_offload_cmd(const struct nft_chain * chain,const struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command,struct flow_cls_offload * cls_flow)264 static int nft_flow_offload_cmd(const struct nft_chain *chain,
265 				const struct nft_rule *rule,
266 				struct nft_flow_rule *flow,
267 				enum flow_cls_command command,
268 				struct flow_cls_offload *cls_flow)
269 {
270 	struct netlink_ext_ack extack = {};
271 	struct nft_base_chain *basechain;
272 
273 	if (!nft_is_base_chain(chain))
274 		return -EOPNOTSUPP;
275 
276 	basechain = nft_base_chain(chain);
277 	nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack,
278 				   command);
279 
280 	return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow,
281 				 &basechain->flow_block.cb_list);
282 }
283 
nft_flow_offload_rule(const struct nft_chain * chain,struct nft_rule * rule,struct nft_flow_rule * flow,enum flow_cls_command command)284 static int nft_flow_offload_rule(const struct nft_chain *chain,
285 				 struct nft_rule *rule,
286 				 struct nft_flow_rule *flow,
287 				 enum flow_cls_command command)
288 {
289 	struct flow_cls_offload cls_flow;
290 
291 	return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow);
292 }
293 
nft_flow_rule_stats(const struct nft_chain * chain,const struct nft_rule * rule)294 int nft_flow_rule_stats(const struct nft_chain *chain,
295 			const struct nft_rule *rule)
296 {
297 	struct flow_cls_offload cls_flow = {};
298 	struct nft_expr *expr, *next;
299 	int err;
300 
301 	err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS,
302 				   &cls_flow);
303 	if (err < 0)
304 		return err;
305 
306 	nft_rule_for_each_expr(expr, next, rule) {
307 		if (expr->ops->offload_stats)
308 			expr->ops->offload_stats(expr, &cls_flow.stats);
309 	}
310 
311 	return 0;
312 }
313 
nft_flow_offload_bind(struct flow_block_offload * bo,struct nft_base_chain * basechain)314 static int nft_flow_offload_bind(struct flow_block_offload *bo,
315 				 struct nft_base_chain *basechain)
316 {
317 	list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
318 	return 0;
319 }
320 
nft_flow_offload_unbind(struct flow_block_offload * bo,struct nft_base_chain * basechain)321 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
322 				   struct nft_base_chain *basechain)
323 {
324 	struct flow_block_cb *block_cb, *next;
325 	struct flow_cls_offload cls_flow;
326 	struct netlink_ext_ack extack;
327 	struct nft_chain *chain;
328 	struct nft_rule *rule;
329 
330 	chain = &basechain->chain;
331 	list_for_each_entry(rule, &chain->rules, list) {
332 		memset(&extack, 0, sizeof(extack));
333 		nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL,
334 					   &extack, FLOW_CLS_DESTROY);
335 		nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list);
336 	}
337 
338 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
339 		list_del(&block_cb->list);
340 		flow_block_cb_free(block_cb);
341 	}
342 
343 	return 0;
344 }
345 
nft_block_setup(struct nft_base_chain * basechain,struct flow_block_offload * bo,enum flow_block_command cmd)346 static int nft_block_setup(struct nft_base_chain *basechain,
347 			   struct flow_block_offload *bo,
348 			   enum flow_block_command cmd)
349 {
350 	int err;
351 
352 	switch (cmd) {
353 	case FLOW_BLOCK_BIND:
354 		err = nft_flow_offload_bind(bo, basechain);
355 		break;
356 	case FLOW_BLOCK_UNBIND:
357 		err = nft_flow_offload_unbind(bo, basechain);
358 		break;
359 	default:
360 		WARN_ON_ONCE(1);
361 		err = -EOPNOTSUPP;
362 	}
363 
364 	return err;
365 }
366 
nft_flow_block_offload_init(struct flow_block_offload * bo,struct net * net,enum flow_block_command cmd,struct nft_base_chain * basechain,struct netlink_ext_ack * extack)367 static void nft_flow_block_offload_init(struct flow_block_offload *bo,
368 					struct net *net,
369 					enum flow_block_command cmd,
370 					struct nft_base_chain *basechain,
371 					struct netlink_ext_ack *extack)
372 {
373 	memset(bo, 0, sizeof(*bo));
374 	bo->net		= net;
375 	bo->block	= &basechain->flow_block;
376 	bo->command	= cmd;
377 	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
378 	bo->extack	= extack;
379 	bo->cb_list_head = &basechain->flow_block.cb_list;
380 	INIT_LIST_HEAD(&bo->cb_list);
381 }
382 
nft_block_offload_cmd(struct nft_base_chain * chain,struct net_device * dev,enum flow_block_command cmd)383 static int nft_block_offload_cmd(struct nft_base_chain *chain,
384 				 struct net_device *dev,
385 				 enum flow_block_command cmd)
386 {
387 	struct netlink_ext_ack extack = {};
388 	struct flow_block_offload bo;
389 	int err;
390 
391 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
392 
393 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
394 	if (err < 0)
395 		return err;
396 
397 	return nft_block_setup(chain, &bo, cmd);
398 }
399 
nft_indr_block_cleanup(struct flow_block_cb * block_cb)400 static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
401 {
402 	struct nft_base_chain *basechain = block_cb->indr.data;
403 	struct net_device *dev = block_cb->indr.dev;
404 	struct netlink_ext_ack extack = {};
405 	struct nftables_pernet *nft_net;
406 	struct net *net = dev_net(dev);
407 	struct flow_block_offload bo;
408 
409 	nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
410 				    basechain, &extack);
411 	nft_net = nft_pernet(net);
412 	mutex_lock(&nft_net->commit_mutex);
413 	list_del(&block_cb->driver_list);
414 	list_move(&block_cb->list, &bo.cb_list);
415 	nft_flow_offload_unbind(&bo, basechain);
416 	mutex_unlock(&nft_net->commit_mutex);
417 }
418 
nft_indr_block_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)419 static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
420 				      struct net_device *dev,
421 				      enum flow_block_command cmd)
422 {
423 	struct netlink_ext_ack extack = {};
424 	struct flow_block_offload bo;
425 	int err;
426 
427 	nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
428 
429 	err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
430 					  nft_indr_block_cleanup);
431 	if (err < 0)
432 		return err;
433 
434 	if (list_empty(&bo.cb_list))
435 		return -EOPNOTSUPP;
436 
437 	return nft_block_setup(basechain, &bo, cmd);
438 }
439 
nft_chain_offload_cmd(struct nft_base_chain * basechain,struct net_device * dev,enum flow_block_command cmd)440 static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
441 				 struct net_device *dev,
442 				 enum flow_block_command cmd)
443 {
444 	int err;
445 
446 	if (dev->netdev_ops->ndo_setup_tc)
447 		err = nft_block_offload_cmd(basechain, dev, cmd);
448 	else
449 		err = nft_indr_block_offload_cmd(basechain, dev, cmd);
450 
451 	return err;
452 }
453 
nft_flow_block_chain(struct nft_base_chain * basechain,const struct net_device * this_dev,enum flow_block_command cmd)454 static int nft_flow_block_chain(struct nft_base_chain *basechain,
455 				const struct net_device *this_dev,
456 				enum flow_block_command cmd)
457 {
458 	struct net_device *dev;
459 	struct nft_hook *hook;
460 	int err, i = 0;
461 
462 	list_for_each_entry(hook, &basechain->hook_list, list) {
463 		dev = hook->ops.dev;
464 		if (this_dev && this_dev != dev)
465 			continue;
466 
467 		err = nft_chain_offload_cmd(basechain, dev, cmd);
468 		if (err < 0 && cmd == FLOW_BLOCK_BIND) {
469 			if (!this_dev)
470 				goto err_flow_block;
471 
472 			return err;
473 		}
474 		i++;
475 	}
476 
477 	return 0;
478 
479 err_flow_block:
480 	list_for_each_entry(hook, &basechain->hook_list, list) {
481 		if (i-- <= 0)
482 			break;
483 
484 		dev = hook->ops.dev;
485 		nft_chain_offload_cmd(basechain, dev, FLOW_BLOCK_UNBIND);
486 	}
487 	return err;
488 }
489 
nft_flow_offload_chain(struct nft_chain * chain,u8 * ppolicy,enum flow_block_command cmd)490 static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy,
491 				  enum flow_block_command cmd)
492 {
493 	struct nft_base_chain *basechain;
494 	u8 policy;
495 
496 	if (!nft_is_base_chain(chain))
497 		return -EOPNOTSUPP;
498 
499 	basechain = nft_base_chain(chain);
500 	policy = ppolicy ? *ppolicy : basechain->policy;
501 
502 	/* Only default policy to accept is supported for now. */
503 	if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
504 		return -EOPNOTSUPP;
505 
506 	return nft_flow_block_chain(basechain, NULL, cmd);
507 }
508 
nft_flow_rule_offload_abort(struct net * net,struct nft_trans * trans)509 static void nft_flow_rule_offload_abort(struct net *net,
510 					struct nft_trans *trans)
511 {
512 	struct nftables_pernet *nft_net = nft_pernet(net);
513 	int err = 0;
514 
515 	list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) {
516 		if (trans->table->family != NFPROTO_NETDEV)
517 			continue;
518 
519 		switch (trans->msg_type) {
520 		case NFT_MSG_NEWCHAIN:
521 			if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
522 			    nft_trans_chain_update(trans))
523 				continue;
524 
525 			err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
526 						     FLOW_BLOCK_UNBIND);
527 			break;
528 		case NFT_MSG_DELCHAIN:
529 			if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
530 				continue;
531 
532 			err = nft_flow_offload_chain(nft_trans_chain(trans), NULL,
533 						     FLOW_BLOCK_BIND);
534 			break;
535 		case NFT_MSG_NEWRULE:
536 			if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
537 				continue;
538 
539 			err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
540 						    nft_trans_rule(trans),
541 						    NULL, FLOW_CLS_DESTROY);
542 			break;
543 		case NFT_MSG_DELRULE:
544 			if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
545 				continue;
546 
547 			err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
548 						    nft_trans_rule(trans),
549 						    nft_trans_flow_rule(trans),
550 						    FLOW_CLS_REPLACE);
551 			break;
552 		}
553 
554 		if (WARN_ON_ONCE(err))
555 			break;
556 	}
557 }
558 
nft_flow_rule_offload_commit(struct net * net)559 int nft_flow_rule_offload_commit(struct net *net)
560 {
561 	struct nftables_pernet *nft_net = nft_pernet(net);
562 	struct nft_trans *trans;
563 	int err = 0;
564 	u8 policy;
565 
566 	list_for_each_entry(trans, &nft_net->commit_list, list) {
567 		if (trans->table->family != NFPROTO_NETDEV)
568 			continue;
569 
570 		switch (trans->msg_type) {
571 		case NFT_MSG_NEWCHAIN:
572 			if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) ||
573 			    nft_trans_chain_update(trans))
574 				continue;
575 
576 			policy = nft_trans_chain_policy(trans);
577 			err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
578 						     FLOW_BLOCK_BIND);
579 			break;
580 		case NFT_MSG_DELCHAIN:
581 			if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
582 				continue;
583 
584 			policy = nft_trans_chain_policy(trans);
585 			err = nft_flow_offload_chain(nft_trans_chain(trans), &policy,
586 						     FLOW_BLOCK_UNBIND);
587 			break;
588 		case NFT_MSG_NEWRULE:
589 			if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
590 				continue;
591 
592 			if (trans->flags & NLM_F_REPLACE ||
593 			    !(trans->flags & NLM_F_APPEND)) {
594 				err = -EOPNOTSUPP;
595 				break;
596 			}
597 			err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
598 						    nft_trans_rule(trans),
599 						    nft_trans_flow_rule(trans),
600 						    FLOW_CLS_REPLACE);
601 			break;
602 		case NFT_MSG_DELRULE:
603 			if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD))
604 				continue;
605 
606 			err = nft_flow_offload_rule(nft_trans_rule_chain(trans),
607 						    nft_trans_rule(trans),
608 						    NULL, FLOW_CLS_DESTROY);
609 			break;
610 		}
611 
612 		if (err) {
613 			nft_flow_rule_offload_abort(net, trans);
614 			break;
615 		}
616 	}
617 
618 	return err;
619 }
620 
__nft_offload_get_chain(const struct nftables_pernet * nft_net,struct net_device * dev)621 static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net,
622 						 struct net_device *dev)
623 {
624 	struct nft_base_chain *basechain;
625 	struct nft_hook *hook, *found;
626 	const struct nft_table *table;
627 	struct nft_chain *chain;
628 
629 	list_for_each_entry(table, &nft_net->tables, list) {
630 		if (table->family != NFPROTO_NETDEV)
631 			continue;
632 
633 		list_for_each_entry(chain, &table->chains, list) {
634 			if (!nft_is_base_chain(chain) ||
635 			    !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
636 				continue;
637 
638 			found = NULL;
639 			basechain = nft_base_chain(chain);
640 			list_for_each_entry(hook, &basechain->hook_list, list) {
641 				if (hook->ops.dev != dev)
642 					continue;
643 
644 				found = hook;
645 				break;
646 			}
647 			if (!found)
648 				continue;
649 
650 			return chain;
651 		}
652 	}
653 
654 	return NULL;
655 }
656 
nft_offload_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)657 static int nft_offload_netdev_event(struct notifier_block *this,
658 				    unsigned long event, void *ptr)
659 {
660 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
661 	struct nftables_pernet *nft_net;
662 	struct net *net = dev_net(dev);
663 	struct nft_chain *chain;
664 
665 	if (event != NETDEV_UNREGISTER)
666 		return NOTIFY_DONE;
667 
668 	nft_net = nft_pernet(net);
669 	mutex_lock(&nft_net->commit_mutex);
670 	chain = __nft_offload_get_chain(nft_net, dev);
671 	if (chain)
672 		nft_flow_block_chain(nft_base_chain(chain), dev,
673 				     FLOW_BLOCK_UNBIND);
674 
675 	mutex_unlock(&nft_net->commit_mutex);
676 
677 	return NOTIFY_DONE;
678 }
679 
680 static struct notifier_block nft_offload_netdev_notifier = {
681 	.notifier_call	= nft_offload_netdev_event,
682 };
683 
nft_offload_init(void)684 int nft_offload_init(void)
685 {
686 	return register_netdevice_notifier(&nft_offload_netdev_notifier);
687 }
688 
nft_offload_exit(void)689 void nft_offload_exit(void)
690 {
691 	unregister_netdevice_notifier(&nft_offload_netdev_notifier);
692 }
693