xref: /linux/net/sched/cls_api.c (revision 2aceb896ee18ae35b21b14c978d8c2ef8c7b439d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 	const struct tcf_chain *chain;
55 	const struct tcf_proto *tp;
56 	const struct tcf_exts *exts;
57 	u32 chain_index;
58 	u32 tp_prio;
59 	u32 handle;
60 	u32 miss_cookie_base;
61 	struct rcu_head rcu;
62 };
63 
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65  * action index in the exts tc actions array.
66  */
67 union tcf_exts_miss_cookie {
68 	struct {
69 		u32 miss_cookie_base;
70 		u32 act_index;
71 	};
72 	u64 miss_cookie;
73 };
74 
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 				u32 handle)
79 {
80 	struct tcf_exts_miss_cookie_node *n;
81 	static u32 next;
82 	int err;
83 
84 	if (WARN_ON(!handle || !tp->ops->get_exts))
85 		return -EINVAL;
86 
87 	n = kzalloc(sizeof(*n), GFP_KERNEL);
88 	if (!n)
89 		return -ENOMEM;
90 
91 	n->chain_index = tp->chain->index;
92 	n->chain = tp->chain;
93 	n->tp_prio = tp->prio;
94 	n->tp = tp;
95 	n->exts = exts;
96 	n->handle = handle;
97 
98 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 			      n, xa_limit_32b, &next, GFP_KERNEL);
100 	if (err)
101 		goto err_xa_alloc;
102 
103 	exts->miss_cookie_node = n;
104 	return 0;
105 
106 err_xa_alloc:
107 	kfree(n);
108 	return err;
109 }
110 
111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113 	struct tcf_exts_miss_cookie_node *n;
114 
115 	if (!exts->miss_cookie_node)
116 		return;
117 
118 	n = exts->miss_cookie_node;
119 	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 	kfree_rcu(n, rcu);
121 }
122 
123 static struct tcf_exts_miss_cookie_node *
124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126 	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127 
128 	*act_index = mc.act_index;
129 	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 				u32 handle)
135 {
136 	return 0;
137 }
138 
139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143 
144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146 	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147 
148 	if (!miss_cookie_base)
149 		return 0;
150 
151 	mc.miss_cookie_base = miss_cookie_base;
152 	return mc.miss_cookie;
153 }
154 
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158 
159 void tc_skb_ext_tc_enable(void)
160 {
161 	static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164 
165 void tc_skb_ext_tc_disable(void)
166 {
167 	static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171 
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174 	return jhash_3words(tp->chain->index, tp->prio,
175 			    (__force __u32)tp->protocol, 0);
176 }
177 
178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 					struct tcf_proto *tp)
180 {
181 	struct tcf_block *block = chain->block;
182 
183 	mutex_lock(&block->proto_destroy_lock);
184 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 		     destroy_obj_hashfn(tp));
186 	mutex_unlock(&block->proto_destroy_lock);
187 }
188 
189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 			  const struct tcf_proto *tp2)
191 {
192 	return tp1->chain->index == tp2->chain->index &&
193 	       tp1->prio == tp2->prio &&
194 	       tp1->protocol == tp2->protocol;
195 }
196 
197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 					struct tcf_proto *tp)
199 {
200 	u32 hash = destroy_obj_hashfn(tp);
201 	struct tcf_proto *iter;
202 	bool found = false;
203 
204 	rcu_read_lock();
205 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 				   destroy_ht_node, hash) {
207 		if (tcf_proto_cmp(tp, iter)) {
208 			found = true;
209 			break;
210 		}
211 	}
212 	rcu_read_unlock();
213 
214 	return found;
215 }
216 
217 static void
218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220 	struct tcf_block *block = chain->block;
221 
222 	mutex_lock(&block->proto_destroy_lock);
223 	if (hash_hashed(&tp->destroy_ht_node))
224 		hash_del_rcu(&tp->destroy_ht_node);
225 	mutex_unlock(&block->proto_destroy_lock);
226 }
227 
228 /* Find classifier type by string name */
229 
230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232 	const struct tcf_proto_ops *t, *res = NULL;
233 
234 	if (kind) {
235 		read_lock(&cls_mod_lock);
236 		list_for_each_entry(t, &tcf_proto_base, head) {
237 			if (strcmp(kind, t->kind) == 0) {
238 				if (try_module_get(t->owner))
239 					res = t;
240 				break;
241 			}
242 		}
243 		read_unlock(&cls_mod_lock);
244 	}
245 	return res;
246 }
247 
248 static const struct tcf_proto_ops *
249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 		     struct netlink_ext_ack *extack)
251 {
252 	const struct tcf_proto_ops *ops;
253 
254 	ops = __tcf_proto_lookup_ops(kind);
255 	if (ops)
256 		return ops;
257 #ifdef CONFIG_MODULES
258 	if (rtnl_held)
259 		rtnl_unlock();
260 	request_module("cls_%s", kind);
261 	if (rtnl_held)
262 		rtnl_lock();
263 	ops = __tcf_proto_lookup_ops(kind);
264 	/* We dropped the RTNL semaphore in order to perform
265 	 * the module load. So, even if we succeeded in loading
266 	 * the module we have to replay the request. We indicate
267 	 * this using -EAGAIN.
268 	 */
269 	if (ops) {
270 		module_put(ops->owner);
271 		return ERR_PTR(-EAGAIN);
272 	}
273 #endif
274 	NL_SET_ERR_MSG(extack, "TC classifier not found");
275 	return ERR_PTR(-ENOENT);
276 }
277 
278 /* Register(unregister) new classifier type */
279 
280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282 	struct tcf_proto_ops *t;
283 	int rc = -EEXIST;
284 
285 	write_lock(&cls_mod_lock);
286 	list_for_each_entry(t, &tcf_proto_base, head)
287 		if (!strcmp(ops->kind, t->kind))
288 			goto out;
289 
290 	list_add_tail(&ops->head, &tcf_proto_base);
291 	rc = 0;
292 out:
293 	write_unlock(&cls_mod_lock);
294 	return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297 
298 static struct workqueue_struct *tc_filter_wq;
299 
300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302 	struct tcf_proto_ops *t;
303 	int rc = -ENOENT;
304 
305 	/* Wait for outstanding call_rcu()s, if any, from a
306 	 * tcf_proto_ops's destroy() handler.
307 	 */
308 	rcu_barrier();
309 	flush_workqueue(tc_filter_wq);
310 
311 	write_lock(&cls_mod_lock);
312 	list_for_each_entry(t, &tcf_proto_base, head) {
313 		if (t == ops) {
314 			list_del(&t->head);
315 			rc = 0;
316 			break;
317 		}
318 	}
319 	write_unlock(&cls_mod_lock);
320 
321 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324 
325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327 	INIT_RCU_WORK(rwork, func);
328 	return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331 
332 /* Select new prio value from the range, managed by kernel. */
333 
334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
337 
338 	if (tp)
339 		first = tp->prio - 1;
340 
341 	return TC_H_MAJ(first);
342 }
343 
344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346 	if (kind)
347 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 	memset(name, 0, IFNAMSIZ);
349 	return false;
350 }
351 
352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354 	const struct tcf_proto_ops *ops;
355 	bool ret;
356 
357 	if (strlen(kind) == 0)
358 		return false;
359 
360 	ops = tcf_proto_lookup_ops(kind, false, NULL);
361 	/* On error return false to take rtnl lock. Proto lookup/create
362 	 * functions will perform lookup again and properly handle errors.
363 	 */
364 	if (IS_ERR(ops))
365 		return false;
366 
367 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 	module_put(ops->owner);
369 	return ret;
370 }
371 
372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 					  u32 prio, struct tcf_chain *chain,
374 					  bool rtnl_held,
375 					  struct netlink_ext_ack *extack)
376 {
377 	struct tcf_proto *tp;
378 	int err;
379 
380 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 	if (!tp)
382 		return ERR_PTR(-ENOBUFS);
383 
384 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 	if (IS_ERR(tp->ops)) {
386 		err = PTR_ERR(tp->ops);
387 		goto errout;
388 	}
389 	tp->classify = tp->ops->classify;
390 	tp->protocol = protocol;
391 	tp->prio = prio;
392 	tp->chain = chain;
393 	spin_lock_init(&tp->lock);
394 	refcount_set(&tp->refcnt, 1);
395 
396 	err = tp->ops->init(tp);
397 	if (err) {
398 		module_put(tp->ops->owner);
399 		goto errout;
400 	}
401 	return tp;
402 
403 errout:
404 	kfree(tp);
405 	return ERR_PTR(err);
406 }
407 
408 static void tcf_proto_get(struct tcf_proto *tp)
409 {
410 	refcount_inc(&tp->refcnt);
411 }
412 
413 static void tcf_chain_put(struct tcf_chain *chain);
414 
415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
416 			      bool sig_destroy, struct netlink_ext_ack *extack)
417 {
418 	tp->ops->destroy(tp, rtnl_held, extack);
419 	if (sig_destroy)
420 		tcf_proto_signal_destroyed(tp->chain, tp);
421 	tcf_chain_put(tp->chain);
422 	module_put(tp->ops->owner);
423 	kfree_rcu(tp, rcu);
424 }
425 
426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
427 			  struct netlink_ext_ack *extack)
428 {
429 	if (refcount_dec_and_test(&tp->refcnt))
430 		tcf_proto_destroy(tp, rtnl_held, true, extack);
431 }
432 
433 static bool tcf_proto_check_delete(struct tcf_proto *tp)
434 {
435 	if (tp->ops->delete_empty)
436 		return tp->ops->delete_empty(tp);
437 
438 	tp->deleting = true;
439 	return tp->deleting;
440 }
441 
442 static void tcf_proto_mark_delete(struct tcf_proto *tp)
443 {
444 	spin_lock(&tp->lock);
445 	tp->deleting = true;
446 	spin_unlock(&tp->lock);
447 }
448 
449 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
450 {
451 	bool deleting;
452 
453 	spin_lock(&tp->lock);
454 	deleting = tp->deleting;
455 	spin_unlock(&tp->lock);
456 
457 	return deleting;
458 }
459 
460 #define ASSERT_BLOCK_LOCKED(block)					\
461 	lockdep_assert_held(&(block)->lock)
462 
463 struct tcf_filter_chain_list_item {
464 	struct list_head list;
465 	tcf_chain_head_change_t *chain_head_change;
466 	void *chain_head_change_priv;
467 };
468 
469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
470 					  u32 chain_index)
471 {
472 	struct tcf_chain *chain;
473 
474 	ASSERT_BLOCK_LOCKED(block);
475 
476 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
477 	if (!chain)
478 		return NULL;
479 	list_add_tail_rcu(&chain->list, &block->chain_list);
480 	mutex_init(&chain->filter_chain_lock);
481 	chain->block = block;
482 	chain->index = chain_index;
483 	chain->refcnt = 1;
484 	if (!chain->index)
485 		block->chain0.chain = chain;
486 	return chain;
487 }
488 
489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
490 				       struct tcf_proto *tp_head)
491 {
492 	if (item->chain_head_change)
493 		item->chain_head_change(tp_head, item->chain_head_change_priv);
494 }
495 
496 static void tcf_chain0_head_change(struct tcf_chain *chain,
497 				   struct tcf_proto *tp_head)
498 {
499 	struct tcf_filter_chain_list_item *item;
500 	struct tcf_block *block = chain->block;
501 
502 	if (chain->index)
503 		return;
504 
505 	mutex_lock(&block->lock);
506 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
507 		tcf_chain_head_change_item(item, tp_head);
508 	mutex_unlock(&block->lock);
509 }
510 
511 /* Returns true if block can be safely freed. */
512 
513 static bool tcf_chain_detach(struct tcf_chain *chain)
514 {
515 	struct tcf_block *block = chain->block;
516 
517 	ASSERT_BLOCK_LOCKED(block);
518 
519 	list_del_rcu(&chain->list);
520 	if (!chain->index)
521 		block->chain0.chain = NULL;
522 
523 	if (list_empty(&block->chain_list) &&
524 	    refcount_read(&block->refcnt) == 0)
525 		return true;
526 
527 	return false;
528 }
529 
530 static void tcf_block_destroy(struct tcf_block *block)
531 {
532 	mutex_destroy(&block->lock);
533 	mutex_destroy(&block->proto_destroy_lock);
534 	kfree_rcu(block, rcu);
535 }
536 
537 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
538 {
539 	struct tcf_block *block = chain->block;
540 
541 	mutex_destroy(&chain->filter_chain_lock);
542 	kfree_rcu(chain, rcu);
543 	if (free_block)
544 		tcf_block_destroy(block);
545 }
546 
547 static void tcf_chain_hold(struct tcf_chain *chain)
548 {
549 	ASSERT_BLOCK_LOCKED(chain->block);
550 
551 	++chain->refcnt;
552 }
553 
554 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
555 {
556 	ASSERT_BLOCK_LOCKED(chain->block);
557 
558 	/* In case all the references are action references, this
559 	 * chain should not be shown to the user.
560 	 */
561 	return chain->refcnt == chain->action_refcnt;
562 }
563 
564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
565 					  u32 chain_index)
566 {
567 	struct tcf_chain *chain;
568 
569 	ASSERT_BLOCK_LOCKED(block);
570 
571 	list_for_each_entry(chain, &block->chain_list, list) {
572 		if (chain->index == chain_index)
573 			return chain;
574 	}
575 	return NULL;
576 }
577 
578 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
580 					      u32 chain_index)
581 {
582 	struct tcf_chain *chain;
583 
584 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
585 		if (chain->index == chain_index)
586 			return chain;
587 	}
588 	return NULL;
589 }
590 #endif
591 
592 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
593 			   u32 seq, u16 flags, int event, bool unicast,
594 			   struct netlink_ext_ack *extack);
595 
596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
597 					 u32 chain_index, bool create,
598 					 bool by_act)
599 {
600 	struct tcf_chain *chain = NULL;
601 	bool is_first_reference;
602 
603 	mutex_lock(&block->lock);
604 	chain = tcf_chain_lookup(block, chain_index);
605 	if (chain) {
606 		tcf_chain_hold(chain);
607 	} else {
608 		if (!create)
609 			goto errout;
610 		chain = tcf_chain_create(block, chain_index);
611 		if (!chain)
612 			goto errout;
613 	}
614 
615 	if (by_act)
616 		++chain->action_refcnt;
617 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
618 	mutex_unlock(&block->lock);
619 
620 	/* Send notification only in case we got the first
621 	 * non-action reference. Until then, the chain acts only as
622 	 * a placeholder for actions pointing to it and user ought
623 	 * not know about them.
624 	 */
625 	if (is_first_reference && !by_act)
626 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
627 				RTM_NEWCHAIN, false, NULL);
628 
629 	return chain;
630 
631 errout:
632 	mutex_unlock(&block->lock);
633 	return chain;
634 }
635 
636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
637 				       bool create)
638 {
639 	return __tcf_chain_get(block, chain_index, create, false);
640 }
641 
642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
643 {
644 	return __tcf_chain_get(block, chain_index, true, true);
645 }
646 EXPORT_SYMBOL(tcf_chain_get_by_act);
647 
648 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
649 			       void *tmplt_priv);
650 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
651 				  void *tmplt_priv, u32 chain_index,
652 				  struct tcf_block *block, struct sk_buff *oskb,
653 				  u32 seq, u16 flags, bool unicast);
654 
655 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
656 			    bool explicitly_created)
657 {
658 	struct tcf_block *block = chain->block;
659 	const struct tcf_proto_ops *tmplt_ops;
660 	unsigned int refcnt, non_act_refcnt;
661 	bool free_block = false;
662 	void *tmplt_priv;
663 
664 	mutex_lock(&block->lock);
665 	if (explicitly_created) {
666 		if (!chain->explicitly_created) {
667 			mutex_unlock(&block->lock);
668 			return;
669 		}
670 		chain->explicitly_created = false;
671 	}
672 
673 	if (by_act)
674 		chain->action_refcnt--;
675 
676 	/* tc_chain_notify_delete can't be called while holding block lock.
677 	 * However, when block is unlocked chain can be changed concurrently, so
678 	 * save these to temporary variables.
679 	 */
680 	refcnt = --chain->refcnt;
681 	non_act_refcnt = refcnt - chain->action_refcnt;
682 	tmplt_ops = chain->tmplt_ops;
683 	tmplt_priv = chain->tmplt_priv;
684 
685 	if (non_act_refcnt == chain->explicitly_created && !by_act) {
686 		if (non_act_refcnt == 0)
687 			tc_chain_notify_delete(tmplt_ops, tmplt_priv,
688 					       chain->index, block, NULL, 0, 0,
689 					       false);
690 		/* Last reference to chain, no need to lock. */
691 		chain->flushing = false;
692 	}
693 
694 	if (refcnt == 0)
695 		free_block = tcf_chain_detach(chain);
696 	mutex_unlock(&block->lock);
697 
698 	if (refcnt == 0) {
699 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
700 		tcf_chain_destroy(chain, free_block);
701 	}
702 }
703 
704 static void tcf_chain_put(struct tcf_chain *chain)
705 {
706 	__tcf_chain_put(chain, false, false);
707 }
708 
709 void tcf_chain_put_by_act(struct tcf_chain *chain)
710 {
711 	__tcf_chain_put(chain, true, false);
712 }
713 EXPORT_SYMBOL(tcf_chain_put_by_act);
714 
715 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
716 {
717 	__tcf_chain_put(chain, false, true);
718 }
719 
720 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
721 {
722 	struct tcf_proto *tp, *tp_next;
723 
724 	mutex_lock(&chain->filter_chain_lock);
725 	tp = tcf_chain_dereference(chain->filter_chain, chain);
726 	while (tp) {
727 		tp_next = rcu_dereference_protected(tp->next, 1);
728 		tcf_proto_signal_destroying(chain, tp);
729 		tp = tp_next;
730 	}
731 	tp = tcf_chain_dereference(chain->filter_chain, chain);
732 	RCU_INIT_POINTER(chain->filter_chain, NULL);
733 	tcf_chain0_head_change(chain, NULL);
734 	chain->flushing = true;
735 	mutex_unlock(&chain->filter_chain_lock);
736 
737 	while (tp) {
738 		tp_next = rcu_dereference_protected(tp->next, 1);
739 		tcf_proto_put(tp, rtnl_held, NULL);
740 		tp = tp_next;
741 	}
742 }
743 
744 static int tcf_block_setup(struct tcf_block *block,
745 			   struct flow_block_offload *bo);
746 
747 static void tcf_block_offload_init(struct flow_block_offload *bo,
748 				   struct net_device *dev, struct Qdisc *sch,
749 				   enum flow_block_command command,
750 				   enum flow_block_binder_type binder_type,
751 				   struct flow_block *flow_block,
752 				   bool shared, struct netlink_ext_ack *extack)
753 {
754 	bo->net = dev_net(dev);
755 	bo->command = command;
756 	bo->binder_type = binder_type;
757 	bo->block = flow_block;
758 	bo->block_shared = shared;
759 	bo->extack = extack;
760 	bo->sch = sch;
761 	bo->cb_list_head = &flow_block->cb_list;
762 	INIT_LIST_HEAD(&bo->cb_list);
763 }
764 
765 static void tcf_block_unbind(struct tcf_block *block,
766 			     struct flow_block_offload *bo);
767 
768 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
769 {
770 	struct tcf_block *block = block_cb->indr.data;
771 	struct net_device *dev = block_cb->indr.dev;
772 	struct Qdisc *sch = block_cb->indr.sch;
773 	struct netlink_ext_ack extack = {};
774 	struct flow_block_offload bo = {};
775 
776 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
777 			       block_cb->indr.binder_type,
778 			       &block->flow_block, tcf_block_shared(block),
779 			       &extack);
780 	rtnl_lock();
781 	down_write(&block->cb_lock);
782 	list_del(&block_cb->driver_list);
783 	list_move(&block_cb->list, &bo.cb_list);
784 	tcf_block_unbind(block, &bo);
785 	up_write(&block->cb_lock);
786 	rtnl_unlock();
787 }
788 
789 static bool tcf_block_offload_in_use(struct tcf_block *block)
790 {
791 	return atomic_read(&block->offloadcnt);
792 }
793 
794 static int tcf_block_offload_cmd(struct tcf_block *block,
795 				 struct net_device *dev, struct Qdisc *sch,
796 				 struct tcf_block_ext_info *ei,
797 				 enum flow_block_command command,
798 				 struct netlink_ext_ack *extack)
799 {
800 	struct flow_block_offload bo = {};
801 
802 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
803 			       &block->flow_block, tcf_block_shared(block),
804 			       extack);
805 
806 	if (dev->netdev_ops->ndo_setup_tc) {
807 		int err;
808 
809 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
810 		if (err < 0) {
811 			if (err != -EOPNOTSUPP)
812 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
813 			return err;
814 		}
815 
816 		return tcf_block_setup(block, &bo);
817 	}
818 
819 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
820 				    tc_block_indr_cleanup);
821 	tcf_block_setup(block, &bo);
822 
823 	return -EOPNOTSUPP;
824 }
825 
826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
827 				  struct tcf_block_ext_info *ei,
828 				  struct netlink_ext_ack *extack)
829 {
830 	struct net_device *dev = q->dev_queue->dev;
831 	int err;
832 
833 	down_write(&block->cb_lock);
834 
835 	/* If tc offload feature is disabled and the block we try to bind
836 	 * to already has some offloaded filters, forbid to bind.
837 	 */
838 	if (dev->netdev_ops->ndo_setup_tc &&
839 	    !tc_can_offload(dev) &&
840 	    tcf_block_offload_in_use(block)) {
841 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
842 		err = -EOPNOTSUPP;
843 		goto err_unlock;
844 	}
845 
846 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
847 	if (err == -EOPNOTSUPP)
848 		goto no_offload_dev_inc;
849 	if (err)
850 		goto err_unlock;
851 
852 	up_write(&block->cb_lock);
853 	return 0;
854 
855 no_offload_dev_inc:
856 	if (tcf_block_offload_in_use(block))
857 		goto err_unlock;
858 
859 	err = 0;
860 	block->nooffloaddevcnt++;
861 err_unlock:
862 	up_write(&block->cb_lock);
863 	return err;
864 }
865 
866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
867 				     struct tcf_block_ext_info *ei)
868 {
869 	struct net_device *dev = q->dev_queue->dev;
870 	int err;
871 
872 	down_write(&block->cb_lock);
873 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
874 	if (err == -EOPNOTSUPP)
875 		goto no_offload_dev_dec;
876 	up_write(&block->cb_lock);
877 	return;
878 
879 no_offload_dev_dec:
880 	WARN_ON(block->nooffloaddevcnt-- == 0);
881 	up_write(&block->cb_lock);
882 }
883 
884 static int
885 tcf_chain0_head_change_cb_add(struct tcf_block *block,
886 			      struct tcf_block_ext_info *ei,
887 			      struct netlink_ext_ack *extack)
888 {
889 	struct tcf_filter_chain_list_item *item;
890 	struct tcf_chain *chain0;
891 
892 	item = kmalloc(sizeof(*item), GFP_KERNEL);
893 	if (!item) {
894 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
895 		return -ENOMEM;
896 	}
897 	item->chain_head_change = ei->chain_head_change;
898 	item->chain_head_change_priv = ei->chain_head_change_priv;
899 
900 	mutex_lock(&block->lock);
901 	chain0 = block->chain0.chain;
902 	if (chain0)
903 		tcf_chain_hold(chain0);
904 	else
905 		list_add(&item->list, &block->chain0.filter_chain_list);
906 	mutex_unlock(&block->lock);
907 
908 	if (chain0) {
909 		struct tcf_proto *tp_head;
910 
911 		mutex_lock(&chain0->filter_chain_lock);
912 
913 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
914 		if (tp_head)
915 			tcf_chain_head_change_item(item, tp_head);
916 
917 		mutex_lock(&block->lock);
918 		list_add(&item->list, &block->chain0.filter_chain_list);
919 		mutex_unlock(&block->lock);
920 
921 		mutex_unlock(&chain0->filter_chain_lock);
922 		tcf_chain_put(chain0);
923 	}
924 
925 	return 0;
926 }
927 
928 static void
929 tcf_chain0_head_change_cb_del(struct tcf_block *block,
930 			      struct tcf_block_ext_info *ei)
931 {
932 	struct tcf_filter_chain_list_item *item;
933 
934 	mutex_lock(&block->lock);
935 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
936 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
937 		    (item->chain_head_change == ei->chain_head_change &&
938 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
939 			if (block->chain0.chain)
940 				tcf_chain_head_change_item(item, NULL);
941 			list_del(&item->list);
942 			mutex_unlock(&block->lock);
943 
944 			kfree(item);
945 			return;
946 		}
947 	}
948 	mutex_unlock(&block->lock);
949 	WARN_ON(1);
950 }
951 
952 struct tcf_net {
953 	spinlock_t idr_lock; /* Protects idr */
954 	struct idr idr;
955 };
956 
957 static unsigned int tcf_net_id;
958 
959 static int tcf_block_insert(struct tcf_block *block, struct net *net,
960 			    struct netlink_ext_ack *extack)
961 {
962 	struct tcf_net *tn = net_generic(net, tcf_net_id);
963 	int err;
964 
965 	idr_preload(GFP_KERNEL);
966 	spin_lock(&tn->idr_lock);
967 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
968 			    GFP_NOWAIT);
969 	spin_unlock(&tn->idr_lock);
970 	idr_preload_end();
971 
972 	return err;
973 }
974 
975 static void tcf_block_remove(struct tcf_block *block, struct net *net)
976 {
977 	struct tcf_net *tn = net_generic(net, tcf_net_id);
978 
979 	spin_lock(&tn->idr_lock);
980 	idr_remove(&tn->idr, block->index);
981 	spin_unlock(&tn->idr_lock);
982 }
983 
984 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
985 					  u32 block_index,
986 					  struct netlink_ext_ack *extack)
987 {
988 	struct tcf_block *block;
989 
990 	block = kzalloc(sizeof(*block), GFP_KERNEL);
991 	if (!block) {
992 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
993 		return ERR_PTR(-ENOMEM);
994 	}
995 	mutex_init(&block->lock);
996 	mutex_init(&block->proto_destroy_lock);
997 	init_rwsem(&block->cb_lock);
998 	flow_block_init(&block->flow_block);
999 	INIT_LIST_HEAD(&block->chain_list);
1000 	INIT_LIST_HEAD(&block->owner_list);
1001 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1002 
1003 	refcount_set(&block->refcnt, 1);
1004 	block->net = net;
1005 	block->index = block_index;
1006 
1007 	/* Don't store q pointer for blocks which are shared */
1008 	if (!tcf_block_shared(block))
1009 		block->q = q;
1010 	return block;
1011 }
1012 
1013 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1014 {
1015 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1016 
1017 	return idr_find(&tn->idr, block_index);
1018 }
1019 
1020 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1021 {
1022 	struct tcf_block *block;
1023 
1024 	rcu_read_lock();
1025 	block = tcf_block_lookup(net, block_index);
1026 	if (block && !refcount_inc_not_zero(&block->refcnt))
1027 		block = NULL;
1028 	rcu_read_unlock();
1029 
1030 	return block;
1031 }
1032 
1033 static struct tcf_chain *
1034 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1035 {
1036 	mutex_lock(&block->lock);
1037 	if (chain)
1038 		chain = list_is_last(&chain->list, &block->chain_list) ?
1039 			NULL : list_next_entry(chain, list);
1040 	else
1041 		chain = list_first_entry_or_null(&block->chain_list,
1042 						 struct tcf_chain, list);
1043 
1044 	/* skip all action-only chains */
1045 	while (chain && tcf_chain_held_by_acts_only(chain))
1046 		chain = list_is_last(&chain->list, &block->chain_list) ?
1047 			NULL : list_next_entry(chain, list);
1048 
1049 	if (chain)
1050 		tcf_chain_hold(chain);
1051 	mutex_unlock(&block->lock);
1052 
1053 	return chain;
1054 }
1055 
1056 /* Function to be used by all clients that want to iterate over all chains on
1057  * block. It properly obtains block->lock and takes reference to chain before
1058  * returning it. Users of this function must be tolerant to concurrent chain
1059  * insertion/deletion or ensure that no concurrent chain modification is
1060  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1061  * consistent dump because rtnl lock is released each time skb is filled with
1062  * data and sent to user-space.
1063  */
1064 
1065 struct tcf_chain *
1066 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1067 {
1068 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1069 
1070 	if (chain)
1071 		tcf_chain_put(chain);
1072 
1073 	return chain_next;
1074 }
1075 EXPORT_SYMBOL(tcf_get_next_chain);
1076 
1077 static struct tcf_proto *
1078 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1079 {
1080 	u32 prio = 0;
1081 
1082 	ASSERT_RTNL();
1083 	mutex_lock(&chain->filter_chain_lock);
1084 
1085 	if (!tp) {
1086 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1087 	} else if (tcf_proto_is_deleting(tp)) {
1088 		/* 'deleting' flag is set and chain->filter_chain_lock was
1089 		 * unlocked, which means next pointer could be invalid. Restart
1090 		 * search.
1091 		 */
1092 		prio = tp->prio + 1;
1093 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1094 
1095 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1096 			if (!tp->deleting && tp->prio >= prio)
1097 				break;
1098 	} else {
1099 		tp = tcf_chain_dereference(tp->next, chain);
1100 	}
1101 
1102 	if (tp)
1103 		tcf_proto_get(tp);
1104 
1105 	mutex_unlock(&chain->filter_chain_lock);
1106 
1107 	return tp;
1108 }
1109 
1110 /* Function to be used by all clients that want to iterate over all tp's on
1111  * chain. Users of this function must be tolerant to concurrent tp
1112  * insertion/deletion or ensure that no concurrent chain modification is
1113  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1114  * consistent dump because rtnl lock is released each time skb is filled with
1115  * data and sent to user-space.
1116  */
1117 
1118 struct tcf_proto *
1119 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1120 {
1121 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1122 
1123 	if (tp)
1124 		tcf_proto_put(tp, true, NULL);
1125 
1126 	return tp_next;
1127 }
1128 EXPORT_SYMBOL(tcf_get_next_proto);
1129 
1130 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1131 {
1132 	struct tcf_chain *chain;
1133 
1134 	/* Last reference to block. At this point chains cannot be added or
1135 	 * removed concurrently.
1136 	 */
1137 	for (chain = tcf_get_next_chain(block, NULL);
1138 	     chain;
1139 	     chain = tcf_get_next_chain(block, chain)) {
1140 		tcf_chain_put_explicitly_created(chain);
1141 		tcf_chain_flush(chain, rtnl_held);
1142 	}
1143 }
1144 
1145 /* Lookup Qdisc and increments its reference counter.
1146  * Set parent, if necessary.
1147  */
1148 
1149 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1150 			    u32 *parent, int ifindex, bool rtnl_held,
1151 			    struct netlink_ext_ack *extack)
1152 {
1153 	const struct Qdisc_class_ops *cops;
1154 	struct net_device *dev;
1155 	int err = 0;
1156 
1157 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1158 		return 0;
1159 
1160 	rcu_read_lock();
1161 
1162 	/* Find link */
1163 	dev = dev_get_by_index_rcu(net, ifindex);
1164 	if (!dev) {
1165 		rcu_read_unlock();
1166 		return -ENODEV;
1167 	}
1168 
1169 	/* Find qdisc */
1170 	if (!*parent) {
1171 		*q = rcu_dereference(dev->qdisc);
1172 		*parent = (*q)->handle;
1173 	} else {
1174 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1175 		if (!*q) {
1176 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1177 			err = -EINVAL;
1178 			goto errout_rcu;
1179 		}
1180 	}
1181 
1182 	*q = qdisc_refcount_inc_nz(*q);
1183 	if (!*q) {
1184 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1185 		err = -EINVAL;
1186 		goto errout_rcu;
1187 	}
1188 
1189 	/* Is it classful? */
1190 	cops = (*q)->ops->cl_ops;
1191 	if (!cops) {
1192 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1193 		err = -EINVAL;
1194 		goto errout_qdisc;
1195 	}
1196 
1197 	if (!cops->tcf_block) {
1198 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1199 		err = -EOPNOTSUPP;
1200 		goto errout_qdisc;
1201 	}
1202 
1203 errout_rcu:
1204 	/* At this point we know that qdisc is not noop_qdisc,
1205 	 * which means that qdisc holds a reference to net_device
1206 	 * and we hold a reference to qdisc, so it is safe to release
1207 	 * rcu read lock.
1208 	 */
1209 	rcu_read_unlock();
1210 	return err;
1211 
1212 errout_qdisc:
1213 	rcu_read_unlock();
1214 
1215 	if (rtnl_held)
1216 		qdisc_put(*q);
1217 	else
1218 		qdisc_put_unlocked(*q);
1219 	*q = NULL;
1220 
1221 	return err;
1222 }
1223 
1224 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1225 			       int ifindex, struct netlink_ext_ack *extack)
1226 {
1227 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1228 		return 0;
1229 
1230 	/* Do we search for filter, attached to class? */
1231 	if (TC_H_MIN(parent)) {
1232 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1233 
1234 		*cl = cops->find(q, parent);
1235 		if (*cl == 0) {
1236 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1237 			return -ENOENT;
1238 		}
1239 	}
1240 
1241 	return 0;
1242 }
1243 
1244 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1245 					  unsigned long cl, int ifindex,
1246 					  u32 block_index,
1247 					  struct netlink_ext_ack *extack)
1248 {
1249 	struct tcf_block *block;
1250 
1251 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1252 		block = tcf_block_refcnt_get(net, block_index);
1253 		if (!block) {
1254 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1255 			return ERR_PTR(-EINVAL);
1256 		}
1257 	} else {
1258 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1259 
1260 		block = cops->tcf_block(q, cl, extack);
1261 		if (!block)
1262 			return ERR_PTR(-EINVAL);
1263 
1264 		if (tcf_block_shared(block)) {
1265 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1266 			return ERR_PTR(-EOPNOTSUPP);
1267 		}
1268 
1269 		/* Always take reference to block in order to support execution
1270 		 * of rules update path of cls API without rtnl lock. Caller
1271 		 * must release block when it is finished using it. 'if' block
1272 		 * of this conditional obtain reference to block by calling
1273 		 * tcf_block_refcnt_get().
1274 		 */
1275 		refcount_inc(&block->refcnt);
1276 	}
1277 
1278 	return block;
1279 }
1280 
1281 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1282 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1283 {
1284 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1285 		/* Flushing/putting all chains will cause the block to be
1286 		 * deallocated when last chain is freed. However, if chain_list
1287 		 * is empty, block has to be manually deallocated. After block
1288 		 * reference counter reached 0, it is no longer possible to
1289 		 * increment it or add new chains to block.
1290 		 */
1291 		bool free_block = list_empty(&block->chain_list);
1292 
1293 		mutex_unlock(&block->lock);
1294 		if (tcf_block_shared(block))
1295 			tcf_block_remove(block, block->net);
1296 
1297 		if (q)
1298 			tcf_block_offload_unbind(block, q, ei);
1299 
1300 		if (free_block)
1301 			tcf_block_destroy(block);
1302 		else
1303 			tcf_block_flush_all_chains(block, rtnl_held);
1304 	} else if (q) {
1305 		tcf_block_offload_unbind(block, q, ei);
1306 	}
1307 }
1308 
1309 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1310 {
1311 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1312 }
1313 
1314 /* Find tcf block.
1315  * Set q, parent, cl when appropriate.
1316  */
1317 
1318 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1319 					u32 *parent, unsigned long *cl,
1320 					int ifindex, u32 block_index,
1321 					struct netlink_ext_ack *extack)
1322 {
1323 	struct tcf_block *block;
1324 	int err = 0;
1325 
1326 	ASSERT_RTNL();
1327 
1328 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1329 	if (err)
1330 		goto errout;
1331 
1332 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1333 	if (err)
1334 		goto errout_qdisc;
1335 
1336 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1337 	if (IS_ERR(block)) {
1338 		err = PTR_ERR(block);
1339 		goto errout_qdisc;
1340 	}
1341 
1342 	return block;
1343 
1344 errout_qdisc:
1345 	if (*q)
1346 		qdisc_put(*q);
1347 errout:
1348 	*q = NULL;
1349 	return ERR_PTR(err);
1350 }
1351 
1352 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1353 			      bool rtnl_held)
1354 {
1355 	if (!IS_ERR_OR_NULL(block))
1356 		tcf_block_refcnt_put(block, rtnl_held);
1357 
1358 	if (q) {
1359 		if (rtnl_held)
1360 			qdisc_put(q);
1361 		else
1362 			qdisc_put_unlocked(q);
1363 	}
1364 }
1365 
1366 struct tcf_block_owner_item {
1367 	struct list_head list;
1368 	struct Qdisc *q;
1369 	enum flow_block_binder_type binder_type;
1370 };
1371 
1372 static void
1373 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1374 			       struct Qdisc *q,
1375 			       enum flow_block_binder_type binder_type)
1376 {
1377 	if (block->keep_dst &&
1378 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1379 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1380 		netif_keep_dst(qdisc_dev(q));
1381 }
1382 
1383 void tcf_block_netif_keep_dst(struct tcf_block *block)
1384 {
1385 	struct tcf_block_owner_item *item;
1386 
1387 	block->keep_dst = true;
1388 	list_for_each_entry(item, &block->owner_list, list)
1389 		tcf_block_owner_netif_keep_dst(block, item->q,
1390 					       item->binder_type);
1391 }
1392 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1393 
1394 static int tcf_block_owner_add(struct tcf_block *block,
1395 			       struct Qdisc *q,
1396 			       enum flow_block_binder_type binder_type)
1397 {
1398 	struct tcf_block_owner_item *item;
1399 
1400 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1401 	if (!item)
1402 		return -ENOMEM;
1403 	item->q = q;
1404 	item->binder_type = binder_type;
1405 	list_add(&item->list, &block->owner_list);
1406 	return 0;
1407 }
1408 
1409 static void tcf_block_owner_del(struct tcf_block *block,
1410 				struct Qdisc *q,
1411 				enum flow_block_binder_type binder_type)
1412 {
1413 	struct tcf_block_owner_item *item;
1414 
1415 	list_for_each_entry(item, &block->owner_list, list) {
1416 		if (item->q == q && item->binder_type == binder_type) {
1417 			list_del(&item->list);
1418 			kfree(item);
1419 			return;
1420 		}
1421 	}
1422 	WARN_ON(1);
1423 }
1424 
1425 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1426 		      struct tcf_block_ext_info *ei,
1427 		      struct netlink_ext_ack *extack)
1428 {
1429 	struct net *net = qdisc_net(q);
1430 	struct tcf_block *block = NULL;
1431 	int err;
1432 
1433 	if (ei->block_index)
1434 		/* block_index not 0 means the shared block is requested */
1435 		block = tcf_block_refcnt_get(net, ei->block_index);
1436 
1437 	if (!block) {
1438 		block = tcf_block_create(net, q, ei->block_index, extack);
1439 		if (IS_ERR(block))
1440 			return PTR_ERR(block);
1441 		if (tcf_block_shared(block)) {
1442 			err = tcf_block_insert(block, net, extack);
1443 			if (err)
1444 				goto err_block_insert;
1445 		}
1446 	}
1447 
1448 	err = tcf_block_owner_add(block, q, ei->binder_type);
1449 	if (err)
1450 		goto err_block_owner_add;
1451 
1452 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1453 
1454 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1455 	if (err)
1456 		goto err_chain0_head_change_cb_add;
1457 
1458 	err = tcf_block_offload_bind(block, q, ei, extack);
1459 	if (err)
1460 		goto err_block_offload_bind;
1461 
1462 	*p_block = block;
1463 	return 0;
1464 
1465 err_block_offload_bind:
1466 	tcf_chain0_head_change_cb_del(block, ei);
1467 err_chain0_head_change_cb_add:
1468 	tcf_block_owner_del(block, q, ei->binder_type);
1469 err_block_owner_add:
1470 err_block_insert:
1471 	tcf_block_refcnt_put(block, true);
1472 	return err;
1473 }
1474 EXPORT_SYMBOL(tcf_block_get_ext);
1475 
1476 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1477 {
1478 	struct tcf_proto __rcu **p_filter_chain = priv;
1479 
1480 	rcu_assign_pointer(*p_filter_chain, tp_head);
1481 }
1482 
1483 int tcf_block_get(struct tcf_block **p_block,
1484 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1485 		  struct netlink_ext_ack *extack)
1486 {
1487 	struct tcf_block_ext_info ei = {
1488 		.chain_head_change = tcf_chain_head_change_dflt,
1489 		.chain_head_change_priv = p_filter_chain,
1490 	};
1491 
1492 	WARN_ON(!p_filter_chain);
1493 	return tcf_block_get_ext(p_block, q, &ei, extack);
1494 }
1495 EXPORT_SYMBOL(tcf_block_get);
1496 
1497 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1498  * actions should be all removed after flushing.
1499  */
1500 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1501 		       struct tcf_block_ext_info *ei)
1502 {
1503 	if (!block)
1504 		return;
1505 	tcf_chain0_head_change_cb_del(block, ei);
1506 	tcf_block_owner_del(block, q, ei->binder_type);
1507 
1508 	__tcf_block_put(block, q, ei, true);
1509 }
1510 EXPORT_SYMBOL(tcf_block_put_ext);
1511 
1512 void tcf_block_put(struct tcf_block *block)
1513 {
1514 	struct tcf_block_ext_info ei = {0, };
1515 
1516 	if (!block)
1517 		return;
1518 	tcf_block_put_ext(block, block->q, &ei);
1519 }
1520 
1521 EXPORT_SYMBOL(tcf_block_put);
1522 
1523 static int
1524 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1525 			    void *cb_priv, bool add, bool offload_in_use,
1526 			    struct netlink_ext_ack *extack)
1527 {
1528 	struct tcf_chain *chain, *chain_prev;
1529 	struct tcf_proto *tp, *tp_prev;
1530 	int err;
1531 
1532 	lockdep_assert_held(&block->cb_lock);
1533 
1534 	for (chain = __tcf_get_next_chain(block, NULL);
1535 	     chain;
1536 	     chain_prev = chain,
1537 		     chain = __tcf_get_next_chain(block, chain),
1538 		     tcf_chain_put(chain_prev)) {
1539 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1540 		     tp_prev = tp,
1541 			     tp = __tcf_get_next_proto(chain, tp),
1542 			     tcf_proto_put(tp_prev, true, NULL)) {
1543 			if (tp->ops->reoffload) {
1544 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1545 							 extack);
1546 				if (err && add)
1547 					goto err_playback_remove;
1548 			} else if (add && offload_in_use) {
1549 				err = -EOPNOTSUPP;
1550 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1551 				goto err_playback_remove;
1552 			}
1553 		}
1554 	}
1555 
1556 	return 0;
1557 
1558 err_playback_remove:
1559 	tcf_proto_put(tp, true, NULL);
1560 	tcf_chain_put(chain);
1561 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1562 				    extack);
1563 	return err;
1564 }
1565 
1566 static int tcf_block_bind(struct tcf_block *block,
1567 			  struct flow_block_offload *bo)
1568 {
1569 	struct flow_block_cb *block_cb, *next;
1570 	int err, i = 0;
1571 
1572 	lockdep_assert_held(&block->cb_lock);
1573 
1574 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1575 		err = tcf_block_playback_offloads(block, block_cb->cb,
1576 						  block_cb->cb_priv, true,
1577 						  tcf_block_offload_in_use(block),
1578 						  bo->extack);
1579 		if (err)
1580 			goto err_unroll;
1581 		if (!bo->unlocked_driver_cb)
1582 			block->lockeddevcnt++;
1583 
1584 		i++;
1585 	}
1586 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1587 
1588 	return 0;
1589 
1590 err_unroll:
1591 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1592 		list_del(&block_cb->driver_list);
1593 		if (i-- > 0) {
1594 			list_del(&block_cb->list);
1595 			tcf_block_playback_offloads(block, block_cb->cb,
1596 						    block_cb->cb_priv, false,
1597 						    tcf_block_offload_in_use(block),
1598 						    NULL);
1599 			if (!bo->unlocked_driver_cb)
1600 				block->lockeddevcnt--;
1601 		}
1602 		flow_block_cb_free(block_cb);
1603 	}
1604 
1605 	return err;
1606 }
1607 
1608 static void tcf_block_unbind(struct tcf_block *block,
1609 			     struct flow_block_offload *bo)
1610 {
1611 	struct flow_block_cb *block_cb, *next;
1612 
1613 	lockdep_assert_held(&block->cb_lock);
1614 
1615 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1616 		tcf_block_playback_offloads(block, block_cb->cb,
1617 					    block_cb->cb_priv, false,
1618 					    tcf_block_offload_in_use(block),
1619 					    NULL);
1620 		list_del(&block_cb->list);
1621 		flow_block_cb_free(block_cb);
1622 		if (!bo->unlocked_driver_cb)
1623 			block->lockeddevcnt--;
1624 	}
1625 }
1626 
1627 static int tcf_block_setup(struct tcf_block *block,
1628 			   struct flow_block_offload *bo)
1629 {
1630 	int err;
1631 
1632 	switch (bo->command) {
1633 	case FLOW_BLOCK_BIND:
1634 		err = tcf_block_bind(block, bo);
1635 		break;
1636 	case FLOW_BLOCK_UNBIND:
1637 		err = 0;
1638 		tcf_block_unbind(block, bo);
1639 		break;
1640 	default:
1641 		WARN_ON_ONCE(1);
1642 		err = -EOPNOTSUPP;
1643 	}
1644 
1645 	return err;
1646 }
1647 
1648 /* Main classifier routine: scans classifier chain attached
1649  * to this qdisc, (optionally) tests for protocol and asks
1650  * specific classifiers.
1651  */
1652 static inline int __tcf_classify(struct sk_buff *skb,
1653 				 const struct tcf_proto *tp,
1654 				 const struct tcf_proto *orig_tp,
1655 				 struct tcf_result *res,
1656 				 bool compat_mode,
1657 				 struct tcf_exts_miss_cookie_node *n,
1658 				 int act_index,
1659 				 u32 *last_executed_chain)
1660 {
1661 #ifdef CONFIG_NET_CLS_ACT
1662 	const int max_reclassify_loop = 16;
1663 	const struct tcf_proto *first_tp;
1664 	int limit = 0;
1665 
1666 reclassify:
1667 #endif
1668 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1669 		__be16 protocol = skb_protocol(skb, false);
1670 		int err = 0;
1671 
1672 		if (n) {
1673 			struct tcf_exts *exts;
1674 
1675 			if (n->tp_prio != tp->prio)
1676 				continue;
1677 
1678 			/* We re-lookup the tp and chain based on index instead
1679 			 * of having hard refs and locks to them, so do a sanity
1680 			 * check if any of tp,chain,exts was replaced by the
1681 			 * time we got here with a cookie from hardware.
1682 			 */
1683 			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1684 				     !tp->ops->get_exts)) {
1685 				tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1686 				return TC_ACT_SHOT;
1687 			}
1688 
1689 			exts = tp->ops->get_exts(tp, n->handle);
1690 			if (unlikely(!exts || n->exts != exts)) {
1691 				tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1692 				return TC_ACT_SHOT;
1693 			}
1694 
1695 			n = NULL;
1696 			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1697 		} else {
1698 			if (tp->protocol != protocol &&
1699 			    tp->protocol != htons(ETH_P_ALL))
1700 				continue;
1701 
1702 			err = tc_classify(skb, tp, res);
1703 		}
1704 #ifdef CONFIG_NET_CLS_ACT
1705 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1706 			first_tp = orig_tp;
1707 			*last_executed_chain = first_tp->chain->index;
1708 			goto reset;
1709 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1710 			first_tp = res->goto_tp;
1711 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1712 			goto reset;
1713 		}
1714 #endif
1715 		if (err >= 0)
1716 			return err;
1717 	}
1718 
1719 	if (unlikely(n)) {
1720 		tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1721 		return TC_ACT_SHOT;
1722 	}
1723 
1724 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1725 #ifdef CONFIG_NET_CLS_ACT
1726 reset:
1727 	if (unlikely(limit++ >= max_reclassify_loop)) {
1728 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1729 				       tp->chain->block->index,
1730 				       tp->prio & 0xffff,
1731 				       ntohs(tp->protocol));
1732 		tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1733 		return TC_ACT_SHOT;
1734 	}
1735 
1736 	tp = first_tp;
1737 	goto reclassify;
1738 #endif
1739 }
1740 
1741 int tcf_classify(struct sk_buff *skb,
1742 		 const struct tcf_block *block,
1743 		 const struct tcf_proto *tp,
1744 		 struct tcf_result *res, bool compat_mode)
1745 {
1746 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1747 	u32 last_executed_chain = 0;
1748 
1749 	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1750 			      &last_executed_chain);
1751 #else
1752 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1753 	struct tcf_exts_miss_cookie_node *n = NULL;
1754 	const struct tcf_proto *orig_tp = tp;
1755 	struct tc_skb_ext *ext;
1756 	int act_index = 0;
1757 	int ret;
1758 
1759 	if (block) {
1760 		ext = skb_ext_find(skb, TC_SKB_EXT);
1761 
1762 		if (ext && (ext->chain || ext->act_miss)) {
1763 			struct tcf_chain *fchain;
1764 			u32 chain;
1765 
1766 			if (ext->act_miss) {
1767 				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1768 								&act_index);
1769 				if (!n) {
1770 					tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1771 					return TC_ACT_SHOT;
1772 				}
1773 
1774 				chain = n->chain_index;
1775 			} else {
1776 				chain = ext->chain;
1777 			}
1778 
1779 			fchain = tcf_chain_lookup_rcu(block, chain);
1780 			if (!fchain) {
1781 				tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1782 				return TC_ACT_SHOT;
1783 			}
1784 
1785 			/* Consume, so cloned/redirect skbs won't inherit ext */
1786 			skb_ext_del(skb, TC_SKB_EXT);
1787 
1788 			tp = rcu_dereference_bh(fchain->filter_chain);
1789 			last_executed_chain = fchain->index;
1790 		}
1791 	}
1792 
1793 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1794 			     &last_executed_chain);
1795 
1796 	if (tc_skb_ext_tc_enabled()) {
1797 		/* If we missed on some chain */
1798 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1799 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1800 
1801 			ext = tc_skb_ext_alloc(skb);
1802 			if (WARN_ON_ONCE(!ext)) {
1803 				tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
1804 				return TC_ACT_SHOT;
1805 			}
1806 
1807 			ext->chain = last_executed_chain;
1808 			ext->mru = cb->mru;
1809 			ext->post_ct = cb->post_ct;
1810 			ext->post_ct_snat = cb->post_ct_snat;
1811 			ext->post_ct_dnat = cb->post_ct_dnat;
1812 			ext->zone = cb->zone;
1813 		}
1814 	}
1815 
1816 	return ret;
1817 #endif
1818 }
1819 EXPORT_SYMBOL(tcf_classify);
1820 
1821 struct tcf_chain_info {
1822 	struct tcf_proto __rcu **pprev;
1823 	struct tcf_proto __rcu *next;
1824 };
1825 
1826 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1827 					   struct tcf_chain_info *chain_info)
1828 {
1829 	return tcf_chain_dereference(*chain_info->pprev, chain);
1830 }
1831 
1832 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1833 			       struct tcf_chain_info *chain_info,
1834 			       struct tcf_proto *tp)
1835 {
1836 	if (chain->flushing)
1837 		return -EAGAIN;
1838 
1839 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1840 	if (*chain_info->pprev == chain->filter_chain)
1841 		tcf_chain0_head_change(chain, tp);
1842 	tcf_proto_get(tp);
1843 	rcu_assign_pointer(*chain_info->pprev, tp);
1844 
1845 	return 0;
1846 }
1847 
1848 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1849 				struct tcf_chain_info *chain_info,
1850 				struct tcf_proto *tp)
1851 {
1852 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1853 
1854 	tcf_proto_mark_delete(tp);
1855 	if (tp == chain->filter_chain)
1856 		tcf_chain0_head_change(chain, next);
1857 	RCU_INIT_POINTER(*chain_info->pprev, next);
1858 }
1859 
1860 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1861 					   struct tcf_chain_info *chain_info,
1862 					   u32 protocol, u32 prio,
1863 					   bool prio_allocate);
1864 
1865 /* Try to insert new proto.
1866  * If proto with specified priority already exists, free new proto
1867  * and return existing one.
1868  */
1869 
1870 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1871 						    struct tcf_proto *tp_new,
1872 						    u32 protocol, u32 prio,
1873 						    bool rtnl_held)
1874 {
1875 	struct tcf_chain_info chain_info;
1876 	struct tcf_proto *tp;
1877 	int err = 0;
1878 
1879 	mutex_lock(&chain->filter_chain_lock);
1880 
1881 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1882 		mutex_unlock(&chain->filter_chain_lock);
1883 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1884 		return ERR_PTR(-EAGAIN);
1885 	}
1886 
1887 	tp = tcf_chain_tp_find(chain, &chain_info,
1888 			       protocol, prio, false);
1889 	if (!tp)
1890 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1891 	mutex_unlock(&chain->filter_chain_lock);
1892 
1893 	if (tp) {
1894 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1895 		tp_new = tp;
1896 	} else if (err) {
1897 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1898 		tp_new = ERR_PTR(err);
1899 	}
1900 
1901 	return tp_new;
1902 }
1903 
1904 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1905 				      struct tcf_proto *tp, bool rtnl_held,
1906 				      struct netlink_ext_ack *extack)
1907 {
1908 	struct tcf_chain_info chain_info;
1909 	struct tcf_proto *tp_iter;
1910 	struct tcf_proto **pprev;
1911 	struct tcf_proto *next;
1912 
1913 	mutex_lock(&chain->filter_chain_lock);
1914 
1915 	/* Atomically find and remove tp from chain. */
1916 	for (pprev = &chain->filter_chain;
1917 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1918 	     pprev = &tp_iter->next) {
1919 		if (tp_iter == tp) {
1920 			chain_info.pprev = pprev;
1921 			chain_info.next = tp_iter->next;
1922 			WARN_ON(tp_iter->deleting);
1923 			break;
1924 		}
1925 	}
1926 	/* Verify that tp still exists and no new filters were inserted
1927 	 * concurrently.
1928 	 * Mark tp for deletion if it is empty.
1929 	 */
1930 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1931 		mutex_unlock(&chain->filter_chain_lock);
1932 		return;
1933 	}
1934 
1935 	tcf_proto_signal_destroying(chain, tp);
1936 	next = tcf_chain_dereference(chain_info.next, chain);
1937 	if (tp == chain->filter_chain)
1938 		tcf_chain0_head_change(chain, next);
1939 	RCU_INIT_POINTER(*chain_info.pprev, next);
1940 	mutex_unlock(&chain->filter_chain_lock);
1941 
1942 	tcf_proto_put(tp, rtnl_held, extack);
1943 }
1944 
1945 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1946 					   struct tcf_chain_info *chain_info,
1947 					   u32 protocol, u32 prio,
1948 					   bool prio_allocate)
1949 {
1950 	struct tcf_proto **pprev;
1951 	struct tcf_proto *tp;
1952 
1953 	/* Check the chain for existence of proto-tcf with this priority */
1954 	for (pprev = &chain->filter_chain;
1955 	     (tp = tcf_chain_dereference(*pprev, chain));
1956 	     pprev = &tp->next) {
1957 		if (tp->prio >= prio) {
1958 			if (tp->prio == prio) {
1959 				if (prio_allocate ||
1960 				    (tp->protocol != protocol && protocol))
1961 					return ERR_PTR(-EINVAL);
1962 			} else {
1963 				tp = NULL;
1964 			}
1965 			break;
1966 		}
1967 	}
1968 	chain_info->pprev = pprev;
1969 	if (tp) {
1970 		chain_info->next = tp->next;
1971 		tcf_proto_get(tp);
1972 	} else {
1973 		chain_info->next = NULL;
1974 	}
1975 	return tp;
1976 }
1977 
1978 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1979 			 struct tcf_proto *tp, struct tcf_block *block,
1980 			 struct Qdisc *q, u32 parent, void *fh,
1981 			 u32 portid, u32 seq, u16 flags, int event,
1982 			 bool terse_dump, bool rtnl_held,
1983 			 struct netlink_ext_ack *extack)
1984 {
1985 	struct tcmsg *tcm;
1986 	struct nlmsghdr  *nlh;
1987 	unsigned char *b = skb_tail_pointer(skb);
1988 
1989 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1990 	if (!nlh)
1991 		goto out_nlmsg_trim;
1992 	tcm = nlmsg_data(nlh);
1993 	tcm->tcm_family = AF_UNSPEC;
1994 	tcm->tcm__pad1 = 0;
1995 	tcm->tcm__pad2 = 0;
1996 	if (q) {
1997 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1998 		tcm->tcm_parent = parent;
1999 	} else {
2000 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2001 		tcm->tcm_block_index = block->index;
2002 	}
2003 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2004 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2005 		goto nla_put_failure;
2006 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2007 		goto nla_put_failure;
2008 	if (!fh) {
2009 		tcm->tcm_handle = 0;
2010 	} else if (terse_dump) {
2011 		if (tp->ops->terse_dump) {
2012 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2013 						rtnl_held) < 0)
2014 				goto nla_put_failure;
2015 		} else {
2016 			goto cls_op_not_supp;
2017 		}
2018 	} else {
2019 		if (tp->ops->dump &&
2020 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2021 			goto nla_put_failure;
2022 	}
2023 
2024 	if (extack && extack->_msg &&
2025 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2026 		goto nla_put_failure;
2027 
2028 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2029 
2030 	return skb->len;
2031 
2032 out_nlmsg_trim:
2033 nla_put_failure:
2034 cls_op_not_supp:
2035 	nlmsg_trim(skb, b);
2036 	return -1;
2037 }
2038 
2039 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2040 			  struct nlmsghdr *n, struct tcf_proto *tp,
2041 			  struct tcf_block *block, struct Qdisc *q,
2042 			  u32 parent, void *fh, int event, bool unicast,
2043 			  bool rtnl_held, struct netlink_ext_ack *extack)
2044 {
2045 	struct sk_buff *skb;
2046 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2047 	int err = 0;
2048 
2049 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2050 	if (!skb)
2051 		return -ENOBUFS;
2052 
2053 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2054 			  n->nlmsg_seq, n->nlmsg_flags, event,
2055 			  false, rtnl_held, extack) <= 0) {
2056 		kfree_skb(skb);
2057 		return -EINVAL;
2058 	}
2059 
2060 	if (unicast)
2061 		err = rtnl_unicast(skb, net, portid);
2062 	else
2063 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2064 				     n->nlmsg_flags & NLM_F_ECHO);
2065 	return err;
2066 }
2067 
2068 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2069 			      struct nlmsghdr *n, struct tcf_proto *tp,
2070 			      struct tcf_block *block, struct Qdisc *q,
2071 			      u32 parent, void *fh, bool unicast, bool *last,
2072 			      bool rtnl_held, struct netlink_ext_ack *extack)
2073 {
2074 	struct sk_buff *skb;
2075 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2076 	int err;
2077 
2078 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2079 	if (!skb)
2080 		return -ENOBUFS;
2081 
2082 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2083 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2084 			  false, rtnl_held, extack) <= 0) {
2085 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2086 		kfree_skb(skb);
2087 		return -EINVAL;
2088 	}
2089 
2090 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2091 	if (err) {
2092 		kfree_skb(skb);
2093 		return err;
2094 	}
2095 
2096 	if (unicast)
2097 		err = rtnl_unicast(skb, net, portid);
2098 	else
2099 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2100 				     n->nlmsg_flags & NLM_F_ECHO);
2101 	if (err < 0)
2102 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2103 
2104 	return err;
2105 }
2106 
2107 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2108 				 struct tcf_block *block, struct Qdisc *q,
2109 				 u32 parent, struct nlmsghdr *n,
2110 				 struct tcf_chain *chain, int event,
2111 				 struct netlink_ext_ack *extack)
2112 {
2113 	struct tcf_proto *tp;
2114 
2115 	for (tp = tcf_get_next_proto(chain, NULL);
2116 	     tp; tp = tcf_get_next_proto(chain, tp))
2117 		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2118 			       event, false, true, extack);
2119 }
2120 
2121 static void tfilter_put(struct tcf_proto *tp, void *fh)
2122 {
2123 	if (tp->ops->put && fh)
2124 		tp->ops->put(tp, fh);
2125 }
2126 
2127 static bool is_qdisc_ingress(__u32 classid)
2128 {
2129 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2130 }
2131 
2132 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2133 			  struct netlink_ext_ack *extack)
2134 {
2135 	struct net *net = sock_net(skb->sk);
2136 	struct nlattr *tca[TCA_MAX + 1];
2137 	char name[IFNAMSIZ];
2138 	struct tcmsg *t;
2139 	u32 protocol;
2140 	u32 prio;
2141 	bool prio_allocate;
2142 	u32 parent;
2143 	u32 chain_index;
2144 	struct Qdisc *q;
2145 	struct tcf_chain_info chain_info;
2146 	struct tcf_chain *chain;
2147 	struct tcf_block *block;
2148 	struct tcf_proto *tp;
2149 	unsigned long cl;
2150 	void *fh;
2151 	int err;
2152 	int tp_created;
2153 	bool rtnl_held = false;
2154 	u32 flags;
2155 
2156 replay:
2157 	tp_created = 0;
2158 
2159 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2160 				     rtm_tca_policy, extack);
2161 	if (err < 0)
2162 		return err;
2163 
2164 	t = nlmsg_data(n);
2165 	protocol = TC_H_MIN(t->tcm_info);
2166 	prio = TC_H_MAJ(t->tcm_info);
2167 	prio_allocate = false;
2168 	parent = t->tcm_parent;
2169 	tp = NULL;
2170 	cl = 0;
2171 	block = NULL;
2172 	q = NULL;
2173 	chain = NULL;
2174 	flags = 0;
2175 
2176 	if (prio == 0) {
2177 		/* If no priority is provided by the user,
2178 		 * we allocate one.
2179 		 */
2180 		if (n->nlmsg_flags & NLM_F_CREATE) {
2181 			prio = TC_H_MAKE(0x80000000U, 0U);
2182 			prio_allocate = true;
2183 		} else {
2184 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2185 			return -ENOENT;
2186 		}
2187 	}
2188 
2189 	/* Find head of filter chain. */
2190 
2191 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2192 	if (err)
2193 		return err;
2194 
2195 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2196 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2197 		err = -EINVAL;
2198 		goto errout;
2199 	}
2200 
2201 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2202 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2203 	 * type is not specified, classifier is not unlocked.
2204 	 */
2205 	if (rtnl_held ||
2206 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2207 	    !tcf_proto_is_unlocked(name)) {
2208 		rtnl_held = true;
2209 		rtnl_lock();
2210 	}
2211 
2212 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2213 	if (err)
2214 		goto errout;
2215 
2216 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2217 				 extack);
2218 	if (IS_ERR(block)) {
2219 		err = PTR_ERR(block);
2220 		goto errout;
2221 	}
2222 	block->classid = parent;
2223 
2224 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2225 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2226 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2227 		err = -EINVAL;
2228 		goto errout;
2229 	}
2230 	chain = tcf_chain_get(block, chain_index, true);
2231 	if (!chain) {
2232 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2233 		err = -ENOMEM;
2234 		goto errout;
2235 	}
2236 
2237 	mutex_lock(&chain->filter_chain_lock);
2238 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2239 			       prio, prio_allocate);
2240 	if (IS_ERR(tp)) {
2241 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2242 		err = PTR_ERR(tp);
2243 		goto errout_locked;
2244 	}
2245 
2246 	if (tp == NULL) {
2247 		struct tcf_proto *tp_new = NULL;
2248 
2249 		if (chain->flushing) {
2250 			err = -EAGAIN;
2251 			goto errout_locked;
2252 		}
2253 
2254 		/* Proto-tcf does not exist, create new one */
2255 
2256 		if (tca[TCA_KIND] == NULL || !protocol) {
2257 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2258 			err = -EINVAL;
2259 			goto errout_locked;
2260 		}
2261 
2262 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2263 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2264 			err = -ENOENT;
2265 			goto errout_locked;
2266 		}
2267 
2268 		if (prio_allocate)
2269 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2270 							       &chain_info));
2271 
2272 		mutex_unlock(&chain->filter_chain_lock);
2273 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2274 					  rtnl_held, extack);
2275 		if (IS_ERR(tp_new)) {
2276 			err = PTR_ERR(tp_new);
2277 			goto errout_tp;
2278 		}
2279 
2280 		tp_created = 1;
2281 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2282 						rtnl_held);
2283 		if (IS_ERR(tp)) {
2284 			err = PTR_ERR(tp);
2285 			goto errout_tp;
2286 		}
2287 	} else {
2288 		mutex_unlock(&chain->filter_chain_lock);
2289 	}
2290 
2291 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2292 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2293 		err = -EINVAL;
2294 		goto errout;
2295 	}
2296 
2297 	fh = tp->ops->get(tp, t->tcm_handle);
2298 
2299 	if (!fh) {
2300 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2301 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2302 			err = -ENOENT;
2303 			goto errout;
2304 		}
2305 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2306 		tfilter_put(tp, fh);
2307 		NL_SET_ERR_MSG(extack, "Filter already exists");
2308 		err = -EEXIST;
2309 		goto errout;
2310 	}
2311 
2312 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2313 		tfilter_put(tp, fh);
2314 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2315 		err = -EINVAL;
2316 		goto errout;
2317 	}
2318 
2319 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2320 		flags |= TCA_ACT_FLAGS_REPLACE;
2321 	if (!rtnl_held)
2322 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2323 	if (is_qdisc_ingress(parent))
2324 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2325 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2326 			      flags, extack);
2327 	if (err == 0) {
2328 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2329 			       RTM_NEWTFILTER, false, rtnl_held, extack);
2330 		tfilter_put(tp, fh);
2331 		/* q pointer is NULL for shared blocks */
2332 		if (q)
2333 			q->flags &= ~TCQ_F_CAN_BYPASS;
2334 	}
2335 
2336 errout:
2337 	if (err && tp_created)
2338 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2339 errout_tp:
2340 	if (chain) {
2341 		if (tp && !IS_ERR(tp))
2342 			tcf_proto_put(tp, rtnl_held, NULL);
2343 		if (!tp_created)
2344 			tcf_chain_put(chain);
2345 	}
2346 	tcf_block_release(q, block, rtnl_held);
2347 
2348 	if (rtnl_held)
2349 		rtnl_unlock();
2350 
2351 	if (err == -EAGAIN) {
2352 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2353 		 * of target chain.
2354 		 */
2355 		rtnl_held = true;
2356 		/* Replay the request. */
2357 		goto replay;
2358 	}
2359 	return err;
2360 
2361 errout_locked:
2362 	mutex_unlock(&chain->filter_chain_lock);
2363 	goto errout;
2364 }
2365 
2366 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2367 			  struct netlink_ext_ack *extack)
2368 {
2369 	struct net *net = sock_net(skb->sk);
2370 	struct nlattr *tca[TCA_MAX + 1];
2371 	char name[IFNAMSIZ];
2372 	struct tcmsg *t;
2373 	u32 protocol;
2374 	u32 prio;
2375 	u32 parent;
2376 	u32 chain_index;
2377 	struct Qdisc *q = NULL;
2378 	struct tcf_chain_info chain_info;
2379 	struct tcf_chain *chain = NULL;
2380 	struct tcf_block *block = NULL;
2381 	struct tcf_proto *tp = NULL;
2382 	unsigned long cl = 0;
2383 	void *fh = NULL;
2384 	int err;
2385 	bool rtnl_held = false;
2386 
2387 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2388 				     rtm_tca_policy, extack);
2389 	if (err < 0)
2390 		return err;
2391 
2392 	t = nlmsg_data(n);
2393 	protocol = TC_H_MIN(t->tcm_info);
2394 	prio = TC_H_MAJ(t->tcm_info);
2395 	parent = t->tcm_parent;
2396 
2397 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2398 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2399 		return -ENOENT;
2400 	}
2401 
2402 	/* Find head of filter chain. */
2403 
2404 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2405 	if (err)
2406 		return err;
2407 
2408 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2409 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2410 		err = -EINVAL;
2411 		goto errout;
2412 	}
2413 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2414 	 * found), qdisc is not unlocked, classifier type is not specified,
2415 	 * classifier is not unlocked.
2416 	 */
2417 	if (!prio ||
2418 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2419 	    !tcf_proto_is_unlocked(name)) {
2420 		rtnl_held = true;
2421 		rtnl_lock();
2422 	}
2423 
2424 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2425 	if (err)
2426 		goto errout;
2427 
2428 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2429 				 extack);
2430 	if (IS_ERR(block)) {
2431 		err = PTR_ERR(block);
2432 		goto errout;
2433 	}
2434 
2435 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2436 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2437 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2438 		err = -EINVAL;
2439 		goto errout;
2440 	}
2441 	chain = tcf_chain_get(block, chain_index, false);
2442 	if (!chain) {
2443 		/* User requested flush on non-existent chain. Nothing to do,
2444 		 * so just return success.
2445 		 */
2446 		if (prio == 0) {
2447 			err = 0;
2448 			goto errout;
2449 		}
2450 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2451 		err = -ENOENT;
2452 		goto errout;
2453 	}
2454 
2455 	if (prio == 0) {
2456 		tfilter_notify_chain(net, skb, block, q, parent, n,
2457 				     chain, RTM_DELTFILTER, extack);
2458 		tcf_chain_flush(chain, rtnl_held);
2459 		err = 0;
2460 		goto errout;
2461 	}
2462 
2463 	mutex_lock(&chain->filter_chain_lock);
2464 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2465 			       prio, false);
2466 	if (!tp || IS_ERR(tp)) {
2467 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2468 		err = tp ? PTR_ERR(tp) : -ENOENT;
2469 		goto errout_locked;
2470 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2471 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2472 		err = -EINVAL;
2473 		goto errout_locked;
2474 	} else if (t->tcm_handle == 0) {
2475 		tcf_proto_signal_destroying(chain, tp);
2476 		tcf_chain_tp_remove(chain, &chain_info, tp);
2477 		mutex_unlock(&chain->filter_chain_lock);
2478 
2479 		tcf_proto_put(tp, rtnl_held, NULL);
2480 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2481 			       RTM_DELTFILTER, false, rtnl_held, extack);
2482 		err = 0;
2483 		goto errout;
2484 	}
2485 	mutex_unlock(&chain->filter_chain_lock);
2486 
2487 	fh = tp->ops->get(tp, t->tcm_handle);
2488 
2489 	if (!fh) {
2490 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2491 		err = -ENOENT;
2492 	} else {
2493 		bool last;
2494 
2495 		err = tfilter_del_notify(net, skb, n, tp, block,
2496 					 q, parent, fh, false, &last,
2497 					 rtnl_held, extack);
2498 
2499 		if (err)
2500 			goto errout;
2501 		if (last)
2502 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2503 	}
2504 
2505 errout:
2506 	if (chain) {
2507 		if (tp && !IS_ERR(tp))
2508 			tcf_proto_put(tp, rtnl_held, NULL);
2509 		tcf_chain_put(chain);
2510 	}
2511 	tcf_block_release(q, block, rtnl_held);
2512 
2513 	if (rtnl_held)
2514 		rtnl_unlock();
2515 
2516 	return err;
2517 
2518 errout_locked:
2519 	mutex_unlock(&chain->filter_chain_lock);
2520 	goto errout;
2521 }
2522 
2523 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2524 			  struct netlink_ext_ack *extack)
2525 {
2526 	struct net *net = sock_net(skb->sk);
2527 	struct nlattr *tca[TCA_MAX + 1];
2528 	char name[IFNAMSIZ];
2529 	struct tcmsg *t;
2530 	u32 protocol;
2531 	u32 prio;
2532 	u32 parent;
2533 	u32 chain_index;
2534 	struct Qdisc *q = NULL;
2535 	struct tcf_chain_info chain_info;
2536 	struct tcf_chain *chain = NULL;
2537 	struct tcf_block *block = NULL;
2538 	struct tcf_proto *tp = NULL;
2539 	unsigned long cl = 0;
2540 	void *fh = NULL;
2541 	int err;
2542 	bool rtnl_held = false;
2543 
2544 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2545 				     rtm_tca_policy, extack);
2546 	if (err < 0)
2547 		return err;
2548 
2549 	t = nlmsg_data(n);
2550 	protocol = TC_H_MIN(t->tcm_info);
2551 	prio = TC_H_MAJ(t->tcm_info);
2552 	parent = t->tcm_parent;
2553 
2554 	if (prio == 0) {
2555 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2556 		return -ENOENT;
2557 	}
2558 
2559 	/* Find head of filter chain. */
2560 
2561 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2562 	if (err)
2563 		return err;
2564 
2565 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2566 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2567 		err = -EINVAL;
2568 		goto errout;
2569 	}
2570 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2571 	 * unlocked, classifier type is not specified, classifier is not
2572 	 * unlocked.
2573 	 */
2574 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2575 	    !tcf_proto_is_unlocked(name)) {
2576 		rtnl_held = true;
2577 		rtnl_lock();
2578 	}
2579 
2580 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2581 	if (err)
2582 		goto errout;
2583 
2584 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2585 				 extack);
2586 	if (IS_ERR(block)) {
2587 		err = PTR_ERR(block);
2588 		goto errout;
2589 	}
2590 
2591 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2592 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2593 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2594 		err = -EINVAL;
2595 		goto errout;
2596 	}
2597 	chain = tcf_chain_get(block, chain_index, false);
2598 	if (!chain) {
2599 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2600 		err = -EINVAL;
2601 		goto errout;
2602 	}
2603 
2604 	mutex_lock(&chain->filter_chain_lock);
2605 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2606 			       prio, false);
2607 	mutex_unlock(&chain->filter_chain_lock);
2608 	if (!tp || IS_ERR(tp)) {
2609 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2610 		err = tp ? PTR_ERR(tp) : -ENOENT;
2611 		goto errout;
2612 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2613 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2614 		err = -EINVAL;
2615 		goto errout;
2616 	}
2617 
2618 	fh = tp->ops->get(tp, t->tcm_handle);
2619 
2620 	if (!fh) {
2621 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2622 		err = -ENOENT;
2623 	} else {
2624 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2625 				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2626 		if (err < 0)
2627 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2628 	}
2629 
2630 	tfilter_put(tp, fh);
2631 errout:
2632 	if (chain) {
2633 		if (tp && !IS_ERR(tp))
2634 			tcf_proto_put(tp, rtnl_held, NULL);
2635 		tcf_chain_put(chain);
2636 	}
2637 	tcf_block_release(q, block, rtnl_held);
2638 
2639 	if (rtnl_held)
2640 		rtnl_unlock();
2641 
2642 	return err;
2643 }
2644 
2645 struct tcf_dump_args {
2646 	struct tcf_walker w;
2647 	struct sk_buff *skb;
2648 	struct netlink_callback *cb;
2649 	struct tcf_block *block;
2650 	struct Qdisc *q;
2651 	u32 parent;
2652 	bool terse_dump;
2653 };
2654 
2655 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2656 {
2657 	struct tcf_dump_args *a = (void *)arg;
2658 	struct net *net = sock_net(a->skb->sk);
2659 
2660 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2661 			     n, NETLINK_CB(a->cb->skb).portid,
2662 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2663 			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2664 }
2665 
2666 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2667 			   struct sk_buff *skb, struct netlink_callback *cb,
2668 			   long index_start, long *p_index, bool terse)
2669 {
2670 	struct net *net = sock_net(skb->sk);
2671 	struct tcf_block *block = chain->block;
2672 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2673 	struct tcf_proto *tp, *tp_prev;
2674 	struct tcf_dump_args arg;
2675 
2676 	for (tp = __tcf_get_next_proto(chain, NULL);
2677 	     tp;
2678 	     tp_prev = tp,
2679 		     tp = __tcf_get_next_proto(chain, tp),
2680 		     tcf_proto_put(tp_prev, true, NULL),
2681 		     (*p_index)++) {
2682 		if (*p_index < index_start)
2683 			continue;
2684 		if (TC_H_MAJ(tcm->tcm_info) &&
2685 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2686 			continue;
2687 		if (TC_H_MIN(tcm->tcm_info) &&
2688 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2689 			continue;
2690 		if (*p_index > index_start)
2691 			memset(&cb->args[1], 0,
2692 			       sizeof(cb->args) - sizeof(cb->args[0]));
2693 		if (cb->args[1] == 0) {
2694 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2695 					  NETLINK_CB(cb->skb).portid,
2696 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2697 					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2698 				goto errout;
2699 			cb->args[1] = 1;
2700 		}
2701 		if (!tp->ops->walk)
2702 			continue;
2703 		arg.w.fn = tcf_node_dump;
2704 		arg.skb = skb;
2705 		arg.cb = cb;
2706 		arg.block = block;
2707 		arg.q = q;
2708 		arg.parent = parent;
2709 		arg.w.stop = 0;
2710 		arg.w.skip = cb->args[1] - 1;
2711 		arg.w.count = 0;
2712 		arg.w.cookie = cb->args[2];
2713 		arg.terse_dump = terse;
2714 		tp->ops->walk(tp, &arg.w, true);
2715 		cb->args[2] = arg.w.cookie;
2716 		cb->args[1] = arg.w.count + 1;
2717 		if (arg.w.stop)
2718 			goto errout;
2719 	}
2720 	return true;
2721 
2722 errout:
2723 	tcf_proto_put(tp, true, NULL);
2724 	return false;
2725 }
2726 
2727 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2728 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2729 };
2730 
2731 /* called with RTNL */
2732 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2733 {
2734 	struct tcf_chain *chain, *chain_prev;
2735 	struct net *net = sock_net(skb->sk);
2736 	struct nlattr *tca[TCA_MAX + 1];
2737 	struct Qdisc *q = NULL;
2738 	struct tcf_block *block;
2739 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2740 	bool terse_dump = false;
2741 	long index_start;
2742 	long index;
2743 	u32 parent;
2744 	int err;
2745 
2746 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2747 		return skb->len;
2748 
2749 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2750 				     tcf_tfilter_dump_policy, cb->extack);
2751 	if (err)
2752 		return err;
2753 
2754 	if (tca[TCA_DUMP_FLAGS]) {
2755 		struct nla_bitfield32 flags =
2756 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2757 
2758 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2759 	}
2760 
2761 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2762 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2763 		if (!block)
2764 			goto out;
2765 		/* If we work with block index, q is NULL and parent value
2766 		 * will never be used in the following code. The check
2767 		 * in tcf_fill_node prevents it. However, compiler does not
2768 		 * see that far, so set parent to zero to silence the warning
2769 		 * about parent being uninitialized.
2770 		 */
2771 		parent = 0;
2772 	} else {
2773 		const struct Qdisc_class_ops *cops;
2774 		struct net_device *dev;
2775 		unsigned long cl = 0;
2776 
2777 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2778 		if (!dev)
2779 			return skb->len;
2780 
2781 		parent = tcm->tcm_parent;
2782 		if (!parent)
2783 			q = rtnl_dereference(dev->qdisc);
2784 		else
2785 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2786 		if (!q)
2787 			goto out;
2788 		cops = q->ops->cl_ops;
2789 		if (!cops)
2790 			goto out;
2791 		if (!cops->tcf_block)
2792 			goto out;
2793 		if (TC_H_MIN(tcm->tcm_parent)) {
2794 			cl = cops->find(q, tcm->tcm_parent);
2795 			if (cl == 0)
2796 				goto out;
2797 		}
2798 		block = cops->tcf_block(q, cl, NULL);
2799 		if (!block)
2800 			goto out;
2801 		parent = block->classid;
2802 		if (tcf_block_shared(block))
2803 			q = NULL;
2804 	}
2805 
2806 	index_start = cb->args[0];
2807 	index = 0;
2808 
2809 	for (chain = __tcf_get_next_chain(block, NULL);
2810 	     chain;
2811 	     chain_prev = chain,
2812 		     chain = __tcf_get_next_chain(block, chain),
2813 		     tcf_chain_put(chain_prev)) {
2814 		if (tca[TCA_CHAIN] &&
2815 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2816 			continue;
2817 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2818 				    index_start, &index, terse_dump)) {
2819 			tcf_chain_put(chain);
2820 			err = -EMSGSIZE;
2821 			break;
2822 		}
2823 	}
2824 
2825 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2826 		tcf_block_refcnt_put(block, true);
2827 	cb->args[0] = index;
2828 
2829 out:
2830 	/* If we did no progress, the error (EMSGSIZE) is real */
2831 	if (skb->len == 0 && err)
2832 		return err;
2833 	return skb->len;
2834 }
2835 
2836 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2837 			      void *tmplt_priv, u32 chain_index,
2838 			      struct net *net, struct sk_buff *skb,
2839 			      struct tcf_block *block,
2840 			      u32 portid, u32 seq, u16 flags, int event,
2841 			      struct netlink_ext_ack *extack)
2842 {
2843 	unsigned char *b = skb_tail_pointer(skb);
2844 	const struct tcf_proto_ops *ops;
2845 	struct nlmsghdr *nlh;
2846 	struct tcmsg *tcm;
2847 	void *priv;
2848 
2849 	ops = tmplt_ops;
2850 	priv = tmplt_priv;
2851 
2852 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2853 	if (!nlh)
2854 		goto out_nlmsg_trim;
2855 	tcm = nlmsg_data(nlh);
2856 	tcm->tcm_family = AF_UNSPEC;
2857 	tcm->tcm__pad1 = 0;
2858 	tcm->tcm__pad2 = 0;
2859 	tcm->tcm_handle = 0;
2860 	if (block->q) {
2861 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2862 		tcm->tcm_parent = block->q->handle;
2863 	} else {
2864 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2865 		tcm->tcm_block_index = block->index;
2866 	}
2867 
2868 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2869 		goto nla_put_failure;
2870 
2871 	if (ops) {
2872 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2873 			goto nla_put_failure;
2874 		if (ops->tmplt_dump(skb, net, priv) < 0)
2875 			goto nla_put_failure;
2876 	}
2877 
2878 	if (extack && extack->_msg &&
2879 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2880 		goto out_nlmsg_trim;
2881 
2882 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2883 
2884 	return skb->len;
2885 
2886 out_nlmsg_trim:
2887 nla_put_failure:
2888 	nlmsg_trim(skb, b);
2889 	return -EMSGSIZE;
2890 }
2891 
2892 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2893 			   u32 seq, u16 flags, int event, bool unicast,
2894 			   struct netlink_ext_ack *extack)
2895 {
2896 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2897 	struct tcf_block *block = chain->block;
2898 	struct net *net = block->net;
2899 	struct sk_buff *skb;
2900 	int err = 0;
2901 
2902 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2903 	if (!skb)
2904 		return -ENOBUFS;
2905 
2906 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2907 			       chain->index, net, skb, block, portid,
2908 			       seq, flags, event, extack) <= 0) {
2909 		kfree_skb(skb);
2910 		return -EINVAL;
2911 	}
2912 
2913 	if (unicast)
2914 		err = rtnl_unicast(skb, net, portid);
2915 	else
2916 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2917 				     flags & NLM_F_ECHO);
2918 
2919 	return err;
2920 }
2921 
2922 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2923 				  void *tmplt_priv, u32 chain_index,
2924 				  struct tcf_block *block, struct sk_buff *oskb,
2925 				  u32 seq, u16 flags, bool unicast)
2926 {
2927 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2928 	struct net *net = block->net;
2929 	struct sk_buff *skb;
2930 
2931 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2932 	if (!skb)
2933 		return -ENOBUFS;
2934 
2935 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2936 			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2937 		kfree_skb(skb);
2938 		return -EINVAL;
2939 	}
2940 
2941 	if (unicast)
2942 		return rtnl_unicast(skb, net, portid);
2943 
2944 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2945 }
2946 
2947 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2948 			      struct nlattr **tca,
2949 			      struct netlink_ext_ack *extack)
2950 {
2951 	const struct tcf_proto_ops *ops;
2952 	char name[IFNAMSIZ];
2953 	void *tmplt_priv;
2954 
2955 	/* If kind is not set, user did not specify template. */
2956 	if (!tca[TCA_KIND])
2957 		return 0;
2958 
2959 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2960 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2961 		return -EINVAL;
2962 	}
2963 
2964 	ops = tcf_proto_lookup_ops(name, true, extack);
2965 	if (IS_ERR(ops))
2966 		return PTR_ERR(ops);
2967 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2968 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2969 		module_put(ops->owner);
2970 		return -EOPNOTSUPP;
2971 	}
2972 
2973 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2974 	if (IS_ERR(tmplt_priv)) {
2975 		module_put(ops->owner);
2976 		return PTR_ERR(tmplt_priv);
2977 	}
2978 	chain->tmplt_ops = ops;
2979 	chain->tmplt_priv = tmplt_priv;
2980 	return 0;
2981 }
2982 
2983 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2984 			       void *tmplt_priv)
2985 {
2986 	/* If template ops are set, no work to do for us. */
2987 	if (!tmplt_ops)
2988 		return;
2989 
2990 	tmplt_ops->tmplt_destroy(tmplt_priv);
2991 	module_put(tmplt_ops->owner);
2992 }
2993 
2994 /* Add/delete/get a chain */
2995 
2996 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2997 			struct netlink_ext_ack *extack)
2998 {
2999 	struct net *net = sock_net(skb->sk);
3000 	struct nlattr *tca[TCA_MAX + 1];
3001 	struct tcmsg *t;
3002 	u32 parent;
3003 	u32 chain_index;
3004 	struct Qdisc *q;
3005 	struct tcf_chain *chain;
3006 	struct tcf_block *block;
3007 	unsigned long cl;
3008 	int err;
3009 
3010 replay:
3011 	q = NULL;
3012 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3013 				     rtm_tca_policy, extack);
3014 	if (err < 0)
3015 		return err;
3016 
3017 	t = nlmsg_data(n);
3018 	parent = t->tcm_parent;
3019 	cl = 0;
3020 
3021 	block = tcf_block_find(net, &q, &parent, &cl,
3022 			       t->tcm_ifindex, t->tcm_block_index, extack);
3023 	if (IS_ERR(block))
3024 		return PTR_ERR(block);
3025 
3026 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3027 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3028 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3029 		err = -EINVAL;
3030 		goto errout_block;
3031 	}
3032 
3033 	mutex_lock(&block->lock);
3034 	chain = tcf_chain_lookup(block, chain_index);
3035 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3036 		if (chain) {
3037 			if (tcf_chain_held_by_acts_only(chain)) {
3038 				/* The chain exists only because there is
3039 				 * some action referencing it.
3040 				 */
3041 				tcf_chain_hold(chain);
3042 			} else {
3043 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3044 				err = -EEXIST;
3045 				goto errout_block_locked;
3046 			}
3047 		} else {
3048 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3049 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3050 				err = -ENOENT;
3051 				goto errout_block_locked;
3052 			}
3053 			chain = tcf_chain_create(block, chain_index);
3054 			if (!chain) {
3055 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3056 				err = -ENOMEM;
3057 				goto errout_block_locked;
3058 			}
3059 		}
3060 	} else {
3061 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3062 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3063 			err = -EINVAL;
3064 			goto errout_block_locked;
3065 		}
3066 		tcf_chain_hold(chain);
3067 	}
3068 
3069 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3070 		/* Modifying chain requires holding parent block lock. In case
3071 		 * the chain was successfully added, take a reference to the
3072 		 * chain. This ensures that an empty chain does not disappear at
3073 		 * the end of this function.
3074 		 */
3075 		tcf_chain_hold(chain);
3076 		chain->explicitly_created = true;
3077 	}
3078 	mutex_unlock(&block->lock);
3079 
3080 	switch (n->nlmsg_type) {
3081 	case RTM_NEWCHAIN:
3082 		err = tc_chain_tmplt_add(chain, net, tca, extack);
3083 		if (err) {
3084 			tcf_chain_put_explicitly_created(chain);
3085 			goto errout;
3086 		}
3087 
3088 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3089 				RTM_NEWCHAIN, false, extack);
3090 		break;
3091 	case RTM_DELCHAIN:
3092 		tfilter_notify_chain(net, skb, block, q, parent, n,
3093 				     chain, RTM_DELTFILTER, extack);
3094 		/* Flush the chain first as the user requested chain removal. */
3095 		tcf_chain_flush(chain, true);
3096 		/* In case the chain was successfully deleted, put a reference
3097 		 * to the chain previously taken during addition.
3098 		 */
3099 		tcf_chain_put_explicitly_created(chain);
3100 		break;
3101 	case RTM_GETCHAIN:
3102 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3103 				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3104 		if (err < 0)
3105 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3106 		break;
3107 	default:
3108 		err = -EOPNOTSUPP;
3109 		NL_SET_ERR_MSG(extack, "Unsupported message type");
3110 		goto errout;
3111 	}
3112 
3113 errout:
3114 	tcf_chain_put(chain);
3115 errout_block:
3116 	tcf_block_release(q, block, true);
3117 	if (err == -EAGAIN)
3118 		/* Replay the request. */
3119 		goto replay;
3120 	return err;
3121 
3122 errout_block_locked:
3123 	mutex_unlock(&block->lock);
3124 	goto errout_block;
3125 }
3126 
3127 /* called with RTNL */
3128 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3129 {
3130 	struct net *net = sock_net(skb->sk);
3131 	struct nlattr *tca[TCA_MAX + 1];
3132 	struct Qdisc *q = NULL;
3133 	struct tcf_block *block;
3134 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3135 	struct tcf_chain *chain;
3136 	long index_start;
3137 	long index;
3138 	int err;
3139 
3140 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3141 		return skb->len;
3142 
3143 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3144 				     rtm_tca_policy, cb->extack);
3145 	if (err)
3146 		return err;
3147 
3148 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3149 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3150 		if (!block)
3151 			goto out;
3152 	} else {
3153 		const struct Qdisc_class_ops *cops;
3154 		struct net_device *dev;
3155 		unsigned long cl = 0;
3156 
3157 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3158 		if (!dev)
3159 			return skb->len;
3160 
3161 		if (!tcm->tcm_parent)
3162 			q = rtnl_dereference(dev->qdisc);
3163 		else
3164 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3165 
3166 		if (!q)
3167 			goto out;
3168 		cops = q->ops->cl_ops;
3169 		if (!cops)
3170 			goto out;
3171 		if (!cops->tcf_block)
3172 			goto out;
3173 		if (TC_H_MIN(tcm->tcm_parent)) {
3174 			cl = cops->find(q, tcm->tcm_parent);
3175 			if (cl == 0)
3176 				goto out;
3177 		}
3178 		block = cops->tcf_block(q, cl, NULL);
3179 		if (!block)
3180 			goto out;
3181 		if (tcf_block_shared(block))
3182 			q = NULL;
3183 	}
3184 
3185 	index_start = cb->args[0];
3186 	index = 0;
3187 
3188 	mutex_lock(&block->lock);
3189 	list_for_each_entry(chain, &block->chain_list, list) {
3190 		if ((tca[TCA_CHAIN] &&
3191 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3192 			continue;
3193 		if (index < index_start) {
3194 			index++;
3195 			continue;
3196 		}
3197 		if (tcf_chain_held_by_acts_only(chain))
3198 			continue;
3199 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3200 					 chain->index, net, skb, block,
3201 					 NETLINK_CB(cb->skb).portid,
3202 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3203 					 RTM_NEWCHAIN, NULL);
3204 		if (err <= 0)
3205 			break;
3206 		index++;
3207 	}
3208 	mutex_unlock(&block->lock);
3209 
3210 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3211 		tcf_block_refcnt_put(block, true);
3212 	cb->args[0] = index;
3213 
3214 out:
3215 	/* If we did no progress, the error (EMSGSIZE) is real */
3216 	if (skb->len == 0 && err)
3217 		return err;
3218 	return skb->len;
3219 }
3220 
3221 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3222 		     int police, struct tcf_proto *tp, u32 handle,
3223 		     bool use_action_miss)
3224 {
3225 	int err = 0;
3226 
3227 #ifdef CONFIG_NET_CLS_ACT
3228 	exts->type = 0;
3229 	exts->nr_actions = 0;
3230 	exts->miss_cookie_node = NULL;
3231 	/* Note: we do not own yet a reference on net.
3232 	 * This reference might be taken later from tcf_exts_get_net().
3233 	 */
3234 	exts->net = net;
3235 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3236 				GFP_KERNEL);
3237 	if (!exts->actions)
3238 		return -ENOMEM;
3239 #endif
3240 
3241 	exts->action = action;
3242 	exts->police = police;
3243 
3244 	if (!use_action_miss)
3245 		return 0;
3246 
3247 	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3248 	if (err)
3249 		goto err_miss_alloc;
3250 
3251 	return 0;
3252 
3253 err_miss_alloc:
3254 	tcf_exts_destroy(exts);
3255 #ifdef CONFIG_NET_CLS_ACT
3256 	exts->actions = NULL;
3257 #endif
3258 	return err;
3259 }
3260 EXPORT_SYMBOL(tcf_exts_init_ex);
3261 
3262 void tcf_exts_destroy(struct tcf_exts *exts)
3263 {
3264 	tcf_exts_miss_cookie_base_destroy(exts);
3265 
3266 #ifdef CONFIG_NET_CLS_ACT
3267 	if (exts->actions) {
3268 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3269 		kfree(exts->actions);
3270 	}
3271 	exts->nr_actions = 0;
3272 #endif
3273 }
3274 EXPORT_SYMBOL(tcf_exts_destroy);
3275 
3276 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3277 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3278 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3279 {
3280 #ifdef CONFIG_NET_CLS_ACT
3281 	{
3282 		int init_res[TCA_ACT_MAX_PRIO] = {};
3283 		struct tc_action *act;
3284 		size_t attr_size = 0;
3285 
3286 		if (exts->police && tb[exts->police]) {
3287 			struct tc_action_ops *a_o;
3288 
3289 			a_o = tc_action_load_ops(tb[exts->police], true,
3290 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3291 						 extack);
3292 			if (IS_ERR(a_o))
3293 				return PTR_ERR(a_o);
3294 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3295 			act = tcf_action_init_1(net, tp, tb[exts->police],
3296 						rate_tlv, a_o, init_res, flags,
3297 						extack);
3298 			module_put(a_o->owner);
3299 			if (IS_ERR(act))
3300 				return PTR_ERR(act);
3301 
3302 			act->type = exts->type = TCA_OLD_COMPAT;
3303 			exts->actions[0] = act;
3304 			exts->nr_actions = 1;
3305 			tcf_idr_insert_many(exts->actions);
3306 		} else if (exts->action && tb[exts->action]) {
3307 			int err;
3308 
3309 			flags |= TCA_ACT_FLAGS_BIND;
3310 			err = tcf_action_init(net, tp, tb[exts->action],
3311 					      rate_tlv, exts->actions, init_res,
3312 					      &attr_size, flags, fl_flags,
3313 					      extack);
3314 			if (err < 0)
3315 				return err;
3316 			exts->nr_actions = err;
3317 		}
3318 	}
3319 #else
3320 	if ((exts->action && tb[exts->action]) ||
3321 	    (exts->police && tb[exts->police])) {
3322 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3323 		return -EOPNOTSUPP;
3324 	}
3325 #endif
3326 
3327 	return 0;
3328 }
3329 EXPORT_SYMBOL(tcf_exts_validate_ex);
3330 
3331 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3332 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3333 		      u32 flags, struct netlink_ext_ack *extack)
3334 {
3335 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3336 				    flags, 0, extack);
3337 }
3338 EXPORT_SYMBOL(tcf_exts_validate);
3339 
3340 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3341 {
3342 #ifdef CONFIG_NET_CLS_ACT
3343 	struct tcf_exts old = *dst;
3344 
3345 	*dst = *src;
3346 	tcf_exts_destroy(&old);
3347 #endif
3348 }
3349 EXPORT_SYMBOL(tcf_exts_change);
3350 
3351 #ifdef CONFIG_NET_CLS_ACT
3352 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3353 {
3354 	if (exts->nr_actions == 0)
3355 		return NULL;
3356 	else
3357 		return exts->actions[0];
3358 }
3359 #endif
3360 
3361 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3362 {
3363 #ifdef CONFIG_NET_CLS_ACT
3364 	struct nlattr *nest;
3365 
3366 	if (exts->action && tcf_exts_has_actions(exts)) {
3367 		/*
3368 		 * again for backward compatible mode - we want
3369 		 * to work with both old and new modes of entering
3370 		 * tc data even if iproute2  was newer - jhs
3371 		 */
3372 		if (exts->type != TCA_OLD_COMPAT) {
3373 			nest = nla_nest_start_noflag(skb, exts->action);
3374 			if (nest == NULL)
3375 				goto nla_put_failure;
3376 
3377 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3378 			    < 0)
3379 				goto nla_put_failure;
3380 			nla_nest_end(skb, nest);
3381 		} else if (exts->police) {
3382 			struct tc_action *act = tcf_exts_first_act(exts);
3383 			nest = nla_nest_start_noflag(skb, exts->police);
3384 			if (nest == NULL || !act)
3385 				goto nla_put_failure;
3386 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3387 				goto nla_put_failure;
3388 			nla_nest_end(skb, nest);
3389 		}
3390 	}
3391 	return 0;
3392 
3393 nla_put_failure:
3394 	nla_nest_cancel(skb, nest);
3395 	return -1;
3396 #else
3397 	return 0;
3398 #endif
3399 }
3400 EXPORT_SYMBOL(tcf_exts_dump);
3401 
3402 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3403 {
3404 #ifdef CONFIG_NET_CLS_ACT
3405 	struct nlattr *nest;
3406 
3407 	if (!exts->action || !tcf_exts_has_actions(exts))
3408 		return 0;
3409 
3410 	nest = nla_nest_start_noflag(skb, exts->action);
3411 	if (!nest)
3412 		goto nla_put_failure;
3413 
3414 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3415 		goto nla_put_failure;
3416 	nla_nest_end(skb, nest);
3417 	return 0;
3418 
3419 nla_put_failure:
3420 	nla_nest_cancel(skb, nest);
3421 	return -1;
3422 #else
3423 	return 0;
3424 #endif
3425 }
3426 EXPORT_SYMBOL(tcf_exts_terse_dump);
3427 
3428 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3429 {
3430 #ifdef CONFIG_NET_CLS_ACT
3431 	struct tc_action *a = tcf_exts_first_act(exts);
3432 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3433 		return -1;
3434 #endif
3435 	return 0;
3436 }
3437 EXPORT_SYMBOL(tcf_exts_dump_stats);
3438 
3439 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3440 {
3441 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3442 		return;
3443 	*flags |= TCA_CLS_FLAGS_IN_HW;
3444 	atomic_inc(&block->offloadcnt);
3445 }
3446 
3447 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3448 {
3449 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3450 		return;
3451 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3452 	atomic_dec(&block->offloadcnt);
3453 }
3454 
3455 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3456 				      struct tcf_proto *tp, u32 *cnt,
3457 				      u32 *flags, u32 diff, bool add)
3458 {
3459 	lockdep_assert_held(&block->cb_lock);
3460 
3461 	spin_lock(&tp->lock);
3462 	if (add) {
3463 		if (!*cnt)
3464 			tcf_block_offload_inc(block, flags);
3465 		*cnt += diff;
3466 	} else {
3467 		*cnt -= diff;
3468 		if (!*cnt)
3469 			tcf_block_offload_dec(block, flags);
3470 	}
3471 	spin_unlock(&tp->lock);
3472 }
3473 
3474 static void
3475 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3476 			 u32 *cnt, u32 *flags)
3477 {
3478 	lockdep_assert_held(&block->cb_lock);
3479 
3480 	spin_lock(&tp->lock);
3481 	tcf_block_offload_dec(block, flags);
3482 	*cnt = 0;
3483 	spin_unlock(&tp->lock);
3484 }
3485 
3486 static int
3487 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3488 		   void *type_data, bool err_stop)
3489 {
3490 	struct flow_block_cb *block_cb;
3491 	int ok_count = 0;
3492 	int err;
3493 
3494 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3495 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3496 		if (err) {
3497 			if (err_stop)
3498 				return err;
3499 		} else {
3500 			ok_count++;
3501 		}
3502 	}
3503 	return ok_count;
3504 }
3505 
3506 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3507 		     void *type_data, bool err_stop, bool rtnl_held)
3508 {
3509 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3510 	int ok_count;
3511 
3512 retry:
3513 	if (take_rtnl)
3514 		rtnl_lock();
3515 	down_read(&block->cb_lock);
3516 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3517 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3518 	 * obtain the locks in same order here.
3519 	 */
3520 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3521 		up_read(&block->cb_lock);
3522 		take_rtnl = true;
3523 		goto retry;
3524 	}
3525 
3526 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3527 
3528 	up_read(&block->cb_lock);
3529 	if (take_rtnl)
3530 		rtnl_unlock();
3531 	return ok_count;
3532 }
3533 EXPORT_SYMBOL(tc_setup_cb_call);
3534 
3535 /* Non-destructive filter add. If filter that wasn't already in hardware is
3536  * successfully offloaded, increment block offloads counter. On failure,
3537  * previously offloaded filter is considered to be intact and offloads counter
3538  * is not decremented.
3539  */
3540 
3541 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3542 		    enum tc_setup_type type, void *type_data, bool err_stop,
3543 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3544 {
3545 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3546 	int ok_count;
3547 
3548 retry:
3549 	if (take_rtnl)
3550 		rtnl_lock();
3551 	down_read(&block->cb_lock);
3552 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3553 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3554 	 * obtain the locks in same order here.
3555 	 */
3556 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3557 		up_read(&block->cb_lock);
3558 		take_rtnl = true;
3559 		goto retry;
3560 	}
3561 
3562 	/* Make sure all netdevs sharing this block are offload-capable. */
3563 	if (block->nooffloaddevcnt && err_stop) {
3564 		ok_count = -EOPNOTSUPP;
3565 		goto err_unlock;
3566 	}
3567 
3568 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3569 	if (ok_count < 0)
3570 		goto err_unlock;
3571 
3572 	if (tp->ops->hw_add)
3573 		tp->ops->hw_add(tp, type_data);
3574 	if (ok_count > 0)
3575 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3576 					  ok_count, true);
3577 err_unlock:
3578 	up_read(&block->cb_lock);
3579 	if (take_rtnl)
3580 		rtnl_unlock();
3581 	return min(ok_count, 0);
3582 }
3583 EXPORT_SYMBOL(tc_setup_cb_add);
3584 
3585 /* Destructive filter replace. If filter that wasn't already in hardware is
3586  * successfully offloaded, increment block offload counter. On failure,
3587  * previously offloaded filter is considered to be destroyed and offload counter
3588  * is decremented.
3589  */
3590 
3591 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3592 			enum tc_setup_type type, void *type_data, bool err_stop,
3593 			u32 *old_flags, unsigned int *old_in_hw_count,
3594 			u32 *new_flags, unsigned int *new_in_hw_count,
3595 			bool rtnl_held)
3596 {
3597 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3598 	int ok_count;
3599 
3600 retry:
3601 	if (take_rtnl)
3602 		rtnl_lock();
3603 	down_read(&block->cb_lock);
3604 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3605 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3606 	 * obtain the locks in same order here.
3607 	 */
3608 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3609 		up_read(&block->cb_lock);
3610 		take_rtnl = true;
3611 		goto retry;
3612 	}
3613 
3614 	/* Make sure all netdevs sharing this block are offload-capable. */
3615 	if (block->nooffloaddevcnt && err_stop) {
3616 		ok_count = -EOPNOTSUPP;
3617 		goto err_unlock;
3618 	}
3619 
3620 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3621 	if (tp->ops->hw_del)
3622 		tp->ops->hw_del(tp, type_data);
3623 
3624 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3625 	if (ok_count < 0)
3626 		goto err_unlock;
3627 
3628 	if (tp->ops->hw_add)
3629 		tp->ops->hw_add(tp, type_data);
3630 	if (ok_count > 0)
3631 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3632 					  new_flags, ok_count, true);
3633 err_unlock:
3634 	up_read(&block->cb_lock);
3635 	if (take_rtnl)
3636 		rtnl_unlock();
3637 	return min(ok_count, 0);
3638 }
3639 EXPORT_SYMBOL(tc_setup_cb_replace);
3640 
3641 /* Destroy filter and decrement block offload counter, if filter was previously
3642  * offloaded.
3643  */
3644 
3645 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3646 			enum tc_setup_type type, void *type_data, bool err_stop,
3647 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3648 {
3649 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3650 	int ok_count;
3651 
3652 retry:
3653 	if (take_rtnl)
3654 		rtnl_lock();
3655 	down_read(&block->cb_lock);
3656 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3657 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3658 	 * obtain the locks in same order here.
3659 	 */
3660 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3661 		up_read(&block->cb_lock);
3662 		take_rtnl = true;
3663 		goto retry;
3664 	}
3665 
3666 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3667 
3668 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3669 	if (tp->ops->hw_del)
3670 		tp->ops->hw_del(tp, type_data);
3671 
3672 	up_read(&block->cb_lock);
3673 	if (take_rtnl)
3674 		rtnl_unlock();
3675 	return min(ok_count, 0);
3676 }
3677 EXPORT_SYMBOL(tc_setup_cb_destroy);
3678 
3679 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3680 			  bool add, flow_setup_cb_t *cb,
3681 			  enum tc_setup_type type, void *type_data,
3682 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3683 {
3684 	int err = cb(type, type_data, cb_priv);
3685 
3686 	if (err) {
3687 		if (add && tc_skip_sw(*flags))
3688 			return err;
3689 	} else {
3690 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3691 					  add);
3692 	}
3693 
3694 	return 0;
3695 }
3696 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3697 
3698 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3699 				   const struct tc_action *act)
3700 {
3701 	struct tc_cookie *user_cookie;
3702 	int err = 0;
3703 
3704 	rcu_read_lock();
3705 	user_cookie = rcu_dereference(act->user_cookie);
3706 	if (user_cookie) {
3707 		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3708 							       user_cookie->len,
3709 							       GFP_ATOMIC);
3710 		if (!entry->user_cookie)
3711 			err = -ENOMEM;
3712 	}
3713 	rcu_read_unlock();
3714 	return err;
3715 }
3716 
3717 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3718 {
3719 	flow_action_cookie_destroy(entry->user_cookie);
3720 }
3721 
3722 void tc_cleanup_offload_action(struct flow_action *flow_action)
3723 {
3724 	struct flow_action_entry *entry;
3725 	int i;
3726 
3727 	flow_action_for_each(i, entry, flow_action) {
3728 		tcf_act_put_user_cookie(entry);
3729 		if (entry->destructor)
3730 			entry->destructor(entry->destructor_priv);
3731 	}
3732 }
3733 EXPORT_SYMBOL(tc_cleanup_offload_action);
3734 
3735 static int tc_setup_offload_act(struct tc_action *act,
3736 				struct flow_action_entry *entry,
3737 				u32 *index_inc,
3738 				struct netlink_ext_ack *extack)
3739 {
3740 #ifdef CONFIG_NET_CLS_ACT
3741 	if (act->ops->offload_act_setup) {
3742 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3743 						   extack);
3744 	} else {
3745 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3746 		return -EOPNOTSUPP;
3747 	}
3748 #else
3749 	return 0;
3750 #endif
3751 }
3752 
3753 int tc_setup_action(struct flow_action *flow_action,
3754 		    struct tc_action *actions[],
3755 		    u32 miss_cookie_base,
3756 		    struct netlink_ext_ack *extack)
3757 {
3758 	int i, j, k, index, err = 0;
3759 	struct tc_action *act;
3760 
3761 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3762 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3763 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3764 
3765 	if (!actions)
3766 		return 0;
3767 
3768 	j = 0;
3769 	tcf_act_for_each_action(i, act, actions) {
3770 		struct flow_action_entry *entry;
3771 
3772 		entry = &flow_action->entries[j];
3773 		spin_lock_bh(&act->tcfa_lock);
3774 		err = tcf_act_get_user_cookie(entry, act);
3775 		if (err)
3776 			goto err_out_locked;
3777 
3778 		index = 0;
3779 		err = tc_setup_offload_act(act, entry, &index, extack);
3780 		if (err)
3781 			goto err_out_locked;
3782 
3783 		for (k = 0; k < index ; k++) {
3784 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3785 			entry[k].hw_index = act->tcfa_index;
3786 			entry[k].cookie = (unsigned long)act;
3787 			entry[k].miss_cookie =
3788 				tcf_exts_miss_cookie_get(miss_cookie_base, i);
3789 		}
3790 
3791 		j += index;
3792 
3793 		spin_unlock_bh(&act->tcfa_lock);
3794 	}
3795 
3796 err_out:
3797 	if (err)
3798 		tc_cleanup_offload_action(flow_action);
3799 
3800 	return err;
3801 err_out_locked:
3802 	spin_unlock_bh(&act->tcfa_lock);
3803 	goto err_out;
3804 }
3805 
3806 int tc_setup_offload_action(struct flow_action *flow_action,
3807 			    const struct tcf_exts *exts,
3808 			    struct netlink_ext_ack *extack)
3809 {
3810 #ifdef CONFIG_NET_CLS_ACT
3811 	u32 miss_cookie_base;
3812 
3813 	if (!exts)
3814 		return 0;
3815 
3816 	miss_cookie_base = exts->miss_cookie_node ?
3817 			   exts->miss_cookie_node->miss_cookie_base : 0;
3818 	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3819 			       extack);
3820 #else
3821 	return 0;
3822 #endif
3823 }
3824 EXPORT_SYMBOL(tc_setup_offload_action);
3825 
3826 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3827 {
3828 	unsigned int num_acts = 0;
3829 	struct tc_action *act;
3830 	int i;
3831 
3832 	tcf_exts_for_each_action(i, act, exts) {
3833 		if (is_tcf_pedit(act))
3834 			num_acts += tcf_pedit_nkeys(act);
3835 		else
3836 			num_acts++;
3837 	}
3838 	return num_acts;
3839 }
3840 EXPORT_SYMBOL(tcf_exts_num_actions);
3841 
3842 #ifdef CONFIG_NET_CLS_ACT
3843 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3844 					u32 *p_block_index,
3845 					struct netlink_ext_ack *extack)
3846 {
3847 	*p_block_index = nla_get_u32(block_index_attr);
3848 	if (!*p_block_index) {
3849 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3850 		return -EINVAL;
3851 	}
3852 
3853 	return 0;
3854 }
3855 
3856 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3857 		    enum flow_block_binder_type binder_type,
3858 		    struct nlattr *block_index_attr,
3859 		    struct netlink_ext_ack *extack)
3860 {
3861 	u32 block_index;
3862 	int err;
3863 
3864 	if (!block_index_attr)
3865 		return 0;
3866 
3867 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3868 	if (err)
3869 		return err;
3870 
3871 	qe->info.binder_type = binder_type;
3872 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3873 	qe->info.chain_head_change_priv = &qe->filter_chain;
3874 	qe->info.block_index = block_index;
3875 
3876 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3877 }
3878 EXPORT_SYMBOL(tcf_qevent_init);
3879 
3880 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3881 {
3882 	if (qe->info.block_index)
3883 		tcf_block_put_ext(qe->block, sch, &qe->info);
3884 }
3885 EXPORT_SYMBOL(tcf_qevent_destroy);
3886 
3887 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3888 			       struct netlink_ext_ack *extack)
3889 {
3890 	u32 block_index;
3891 	int err;
3892 
3893 	if (!block_index_attr)
3894 		return 0;
3895 
3896 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3897 	if (err)
3898 		return err;
3899 
3900 	/* Bounce newly-configured block or change in block. */
3901 	if (block_index != qe->info.block_index) {
3902 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3903 		return -EINVAL;
3904 	}
3905 
3906 	return 0;
3907 }
3908 EXPORT_SYMBOL(tcf_qevent_validate_change);
3909 
3910 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3911 				  struct sk_buff **to_free, int *ret)
3912 {
3913 	struct tcf_result cl_res;
3914 	struct tcf_proto *fl;
3915 
3916 	if (!qe->info.block_index)
3917 		return skb;
3918 
3919 	fl = rcu_dereference_bh(qe->filter_chain);
3920 
3921 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3922 	case TC_ACT_SHOT:
3923 		qdisc_qstats_drop(sch);
3924 		__qdisc_drop(skb, to_free);
3925 		*ret = __NET_XMIT_BYPASS;
3926 		return NULL;
3927 	case TC_ACT_STOLEN:
3928 	case TC_ACT_QUEUED:
3929 	case TC_ACT_TRAP:
3930 		__qdisc_drop(skb, to_free);
3931 		*ret = __NET_XMIT_STOLEN;
3932 		return NULL;
3933 	case TC_ACT_REDIRECT:
3934 		skb_do_redirect(skb);
3935 		*ret = __NET_XMIT_STOLEN;
3936 		return NULL;
3937 	}
3938 
3939 	return skb;
3940 }
3941 EXPORT_SYMBOL(tcf_qevent_handle);
3942 
3943 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3944 {
3945 	if (!qe->info.block_index)
3946 		return 0;
3947 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3948 }
3949 EXPORT_SYMBOL(tcf_qevent_dump);
3950 #endif
3951 
3952 static __net_init int tcf_net_init(struct net *net)
3953 {
3954 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3955 
3956 	spin_lock_init(&tn->idr_lock);
3957 	idr_init(&tn->idr);
3958 	return 0;
3959 }
3960 
3961 static void __net_exit tcf_net_exit(struct net *net)
3962 {
3963 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3964 
3965 	idr_destroy(&tn->idr);
3966 }
3967 
3968 static struct pernet_operations tcf_net_ops = {
3969 	.init = tcf_net_init,
3970 	.exit = tcf_net_exit,
3971 	.id   = &tcf_net_id,
3972 	.size = sizeof(struct tcf_net),
3973 };
3974 
3975 static int __init tc_filter_init(void)
3976 {
3977 	int err;
3978 
3979 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3980 	if (!tc_filter_wq)
3981 		return -ENOMEM;
3982 
3983 	err = register_pernet_subsys(&tcf_net_ops);
3984 	if (err)
3985 		goto err_register_pernet_subsys;
3986 
3987 	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
3988 
3989 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3990 		      RTNL_FLAG_DOIT_UNLOCKED);
3991 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3992 		      RTNL_FLAG_DOIT_UNLOCKED);
3993 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3994 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3995 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3996 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3997 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3998 		      tc_dump_chain, 0);
3999 
4000 	return 0;
4001 
4002 err_register_pernet_subsys:
4003 	destroy_workqueue(tc_filter_wq);
4004 	return err;
4005 }
4006 
4007 subsys_initcall(tc_filter_init);
4008