1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 const struct tcf_chain *chain;
55 const struct tcf_proto *tp;
56 const struct tcf_exts *exts;
57 u32 chain_index;
58 u32 tp_prio;
59 u32 handle;
60 u32 miss_cookie_base;
61 struct rcu_head rcu;
62 };
63
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
66 */
67 union tcf_exts_miss_cookie {
68 struct {
69 u32 miss_cookie_base;
70 u32 act_index;
71 };
72 u64 miss_cookie;
73 };
74
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
tcf_exts_miss_cookie_base_alloc(struct tcf_exts * exts,struct tcf_proto * tp,u32 handle)77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 u32 handle)
79 {
80 struct tcf_exts_miss_cookie_node *n;
81 static u32 next;
82 int err;
83
84 if (WARN_ON(!handle || !tp->ops->get_exts))
85 return -EINVAL;
86
87 n = kzalloc(sizeof(*n), GFP_KERNEL);
88 if (!n)
89 return -ENOMEM;
90
91 n->chain_index = tp->chain->index;
92 n->chain = tp->chain;
93 n->tp_prio = tp->prio;
94 n->tp = tp;
95 n->exts = exts;
96 n->handle = handle;
97
98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 n, xa_limit_32b, &next, GFP_KERNEL);
100 if (err)
101 goto err_xa_alloc;
102
103 exts->miss_cookie_node = n;
104 return 0;
105
106 err_xa_alloc:
107 kfree(n);
108 return err;
109 }
110
tcf_exts_miss_cookie_base_destroy(struct tcf_exts * exts)111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113 struct tcf_exts_miss_cookie_node *n;
114
115 if (!exts->miss_cookie_node)
116 return;
117
118 n = exts->miss_cookie_node;
119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 kfree_rcu(n, rcu);
121 }
122
123 static struct tcf_exts_miss_cookie_node *
tcf_exts_miss_cookie_lookup(u64 miss_cookie,int * act_index)124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127
128 *act_index = mc.act_index;
129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
tcf_exts_miss_cookie_base_alloc(struct tcf_exts * exts,struct tcf_proto * tp,u32 handle)133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 u32 handle)
135 {
136 return 0;
137 }
138
tcf_exts_miss_cookie_base_destroy(struct tcf_exts * exts)139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143
tcf_exts_miss_cookie_get(u32 miss_cookie_base,int act_index)144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147
148 if (!miss_cookie_base)
149 return 0;
150
151 mc.miss_cookie_base = miss_cookie_base;
152 return mc.miss_cookie;
153 }
154
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158
tc_skb_ext_tc_enable(void)159 void tc_skb_ext_tc_enable(void)
160 {
161 static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164
tc_skb_ext_tc_disable(void)165 void tc_skb_ext_tc_disable(void)
166 {
167 static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171
destroy_obj_hashfn(const struct tcf_proto * tp)172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174 return jhash_3words(tp->chain->index, tp->prio,
175 (__force __u32)tp->protocol, 0);
176 }
177
tcf_proto_signal_destroying(struct tcf_chain * chain,struct tcf_proto * tp)178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 struct tcf_proto *tp)
180 {
181 struct tcf_block *block = chain->block;
182
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 destroy_obj_hashfn(tp));
186 mutex_unlock(&block->proto_destroy_lock);
187 }
188
tcf_proto_cmp(const struct tcf_proto * tp1,const struct tcf_proto * tp2)189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 const struct tcf_proto *tp2)
191 {
192 return tp1->chain->index == tp2->chain->index &&
193 tp1->prio == tp2->prio &&
194 tp1->protocol == tp2->protocol;
195 }
196
tcf_proto_exists_destroying(struct tcf_chain * chain,struct tcf_proto * tp)197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 struct tcf_proto *tp)
199 {
200 u32 hash = destroy_obj_hashfn(tp);
201 struct tcf_proto *iter;
202 bool found = false;
203
204 rcu_read_lock();
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 destroy_ht_node, hash) {
207 if (tcf_proto_cmp(tp, iter)) {
208 found = true;
209 break;
210 }
211 }
212 rcu_read_unlock();
213
214 return found;
215 }
216
217 static void
tcf_proto_signal_destroyed(struct tcf_chain * chain,struct tcf_proto * tp)218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220 struct tcf_block *block = chain->block;
221
222 mutex_lock(&block->proto_destroy_lock);
223 if (hash_hashed(&tp->destroy_ht_node))
224 hash_del_rcu(&tp->destroy_ht_node);
225 mutex_unlock(&block->proto_destroy_lock);
226 }
227
228 /* Find classifier type by string name */
229
__tcf_proto_lookup_ops(const char * kind)230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232 const struct tcf_proto_ops *t, *res = NULL;
233
234 if (kind) {
235 read_lock(&cls_mod_lock);
236 list_for_each_entry(t, &tcf_proto_base, head) {
237 if (strcmp(kind, t->kind) == 0) {
238 if (try_module_get(t->owner))
239 res = t;
240 break;
241 }
242 }
243 read_unlock(&cls_mod_lock);
244 }
245 return res;
246 }
247
248 static const struct tcf_proto_ops *
tcf_proto_lookup_ops(const char * kind,bool rtnl_held,struct netlink_ext_ack * extack)249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 struct netlink_ext_ack *extack)
251 {
252 const struct tcf_proto_ops *ops;
253
254 ops = __tcf_proto_lookup_ops(kind);
255 if (ops)
256 return ops;
257 #ifdef CONFIG_MODULES
258 if (rtnl_held)
259 rtnl_unlock();
260 request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
261 if (rtnl_held)
262 rtnl_lock();
263 ops = __tcf_proto_lookup_ops(kind);
264 /* We dropped the RTNL semaphore in order to perform
265 * the module load. So, even if we succeeded in loading
266 * the module we have to replay the request. We indicate
267 * this using -EAGAIN.
268 */
269 if (ops) {
270 module_put(ops->owner);
271 return ERR_PTR(-EAGAIN);
272 }
273 #endif
274 NL_SET_ERR_MSG(extack, "TC classifier not found");
275 return ERR_PTR(-ENOENT);
276 }
277
278 /* Register(unregister) new classifier type */
279
register_tcf_proto_ops(struct tcf_proto_ops * ops)280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282 struct tcf_proto_ops *t;
283 int rc = -EEXIST;
284
285 write_lock(&cls_mod_lock);
286 list_for_each_entry(t, &tcf_proto_base, head)
287 if (!strcmp(ops->kind, t->kind))
288 goto out;
289
290 list_add_tail(&ops->head, &tcf_proto_base);
291 rc = 0;
292 out:
293 write_unlock(&cls_mod_lock);
294 return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297
298 static struct workqueue_struct *tc_filter_wq;
299
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302 struct tcf_proto_ops *t;
303 int rc = -ENOENT;
304
305 /* Wait for outstanding call_rcu()s, if any, from a
306 * tcf_proto_ops's destroy() handler.
307 */
308 rcu_barrier();
309 flush_workqueue(tc_filter_wq);
310
311 write_lock(&cls_mod_lock);
312 list_for_each_entry(t, &tcf_proto_base, head) {
313 if (t == ops) {
314 list_del(&t->head);
315 rc = 0;
316 break;
317 }
318 }
319 write_unlock(&cls_mod_lock);
320
321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324
tcf_queue_work(struct rcu_work * rwork,work_func_t func)325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327 INIT_RCU_WORK(rwork, func);
328 return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331
332 /* Select new prio value from the range, managed by kernel. */
333
tcf_auto_prio(struct tcf_proto * tp)334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336 u32 first = TC_H_MAKE(0xC0000000U, 0U);
337
338 if (tp)
339 first = tp->prio - 1;
340
341 return TC_H_MAJ(first);
342 }
343
tcf_proto_check_kind(struct nlattr * kind,char * name)344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346 if (kind)
347 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 memset(name, 0, IFNAMSIZ);
349 return false;
350 }
351
tcf_proto_is_unlocked(const char * kind)352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354 const struct tcf_proto_ops *ops;
355 bool ret;
356
357 if (strlen(kind) == 0)
358 return false;
359
360 ops = tcf_proto_lookup_ops(kind, false, NULL);
361 /* On error return false to take rtnl lock. Proto lookup/create
362 * functions will perform lookup again and properly handle errors.
363 */
364 if (IS_ERR(ops))
365 return false;
366
367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 module_put(ops->owner);
369 return ret;
370 }
371
tcf_proto_create(const char * kind,u32 protocol,u32 prio,struct tcf_chain * chain,bool rtnl_held,struct netlink_ext_ack * extack)372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 u32 prio, struct tcf_chain *chain,
374 bool rtnl_held,
375 struct netlink_ext_ack *extack)
376 {
377 struct tcf_proto *tp;
378 int err;
379
380 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 if (!tp)
382 return ERR_PTR(-ENOBUFS);
383
384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 if (IS_ERR(tp->ops)) {
386 err = PTR_ERR(tp->ops);
387 goto errout;
388 }
389 tp->classify = tp->ops->classify;
390 tp->protocol = protocol;
391 tp->prio = prio;
392 tp->chain = chain;
393 spin_lock_init(&tp->lock);
394 refcount_set(&tp->refcnt, 1);
395
396 err = tp->ops->init(tp);
397 if (err) {
398 module_put(tp->ops->owner);
399 goto errout;
400 }
401 return tp;
402
403 errout:
404 kfree(tp);
405 return ERR_PTR(err);
406 }
407
tcf_proto_get(struct tcf_proto * tp)408 static void tcf_proto_get(struct tcf_proto *tp)
409 {
410 refcount_inc(&tp->refcnt);
411 }
412
tcf_maintain_bypass(struct tcf_block * block)413 static void tcf_maintain_bypass(struct tcf_block *block)
414 {
415 int filtercnt = atomic_read(&block->filtercnt);
416 int skipswcnt = atomic_read(&block->skipswcnt);
417 bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
418
419 if (bypass_wanted != block->bypass_wanted) {
420 #ifdef CONFIG_NET_CLS_ACT
421 if (bypass_wanted)
422 static_branch_inc(&tcf_bypass_check_needed_key);
423 else
424 static_branch_dec(&tcf_bypass_check_needed_key);
425 #endif
426 block->bypass_wanted = bypass_wanted;
427 }
428 }
429
tcf_block_filter_cnt_update(struct tcf_block * block,bool * counted,bool add)430 static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
431 {
432 lockdep_assert_not_held(&block->cb_lock);
433
434 down_write(&block->cb_lock);
435 if (*counted != add) {
436 if (add) {
437 atomic_inc(&block->filtercnt);
438 *counted = true;
439 } else {
440 atomic_dec(&block->filtercnt);
441 *counted = false;
442 }
443 }
444 tcf_maintain_bypass(block);
445 up_write(&block->cb_lock);
446 }
447
448 static void tcf_chain_put(struct tcf_chain *chain);
449
tcf_proto_destroy(struct tcf_proto * tp,bool rtnl_held,bool sig_destroy,struct netlink_ext_ack * extack)450 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
451 bool sig_destroy, struct netlink_ext_ack *extack)
452 {
453 tp->ops->destroy(tp, rtnl_held, extack);
454 tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
455 if (sig_destroy)
456 tcf_proto_signal_destroyed(tp->chain, tp);
457 tcf_chain_put(tp->chain);
458 module_put(tp->ops->owner);
459 kfree_rcu(tp, rcu);
460 }
461
tcf_proto_put(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)462 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
463 struct netlink_ext_ack *extack)
464 {
465 if (refcount_dec_and_test(&tp->refcnt))
466 tcf_proto_destroy(tp, rtnl_held, true, extack);
467 }
468
tcf_proto_check_delete(struct tcf_proto * tp)469 static bool tcf_proto_check_delete(struct tcf_proto *tp)
470 {
471 if (tp->ops->delete_empty)
472 return tp->ops->delete_empty(tp);
473
474 tp->deleting = true;
475 return tp->deleting;
476 }
477
tcf_proto_mark_delete(struct tcf_proto * tp)478 static void tcf_proto_mark_delete(struct tcf_proto *tp)
479 {
480 spin_lock(&tp->lock);
481 tp->deleting = true;
482 spin_unlock(&tp->lock);
483 }
484
tcf_proto_is_deleting(struct tcf_proto * tp)485 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
486 {
487 bool deleting;
488
489 spin_lock(&tp->lock);
490 deleting = tp->deleting;
491 spin_unlock(&tp->lock);
492
493 return deleting;
494 }
495
496 #define ASSERT_BLOCK_LOCKED(block) \
497 lockdep_assert_held(&(block)->lock)
498
499 struct tcf_filter_chain_list_item {
500 struct list_head list;
501 tcf_chain_head_change_t *chain_head_change;
502 void *chain_head_change_priv;
503 };
504
tcf_chain_create(struct tcf_block * block,u32 chain_index)505 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
506 u32 chain_index)
507 {
508 struct tcf_chain *chain;
509
510 ASSERT_BLOCK_LOCKED(block);
511
512 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
513 if (!chain)
514 return NULL;
515 list_add_tail_rcu(&chain->list, &block->chain_list);
516 mutex_init(&chain->filter_chain_lock);
517 chain->block = block;
518 chain->index = chain_index;
519 chain->refcnt = 1;
520 if (!chain->index)
521 block->chain0.chain = chain;
522 return chain;
523 }
524
tcf_chain_head_change_item(struct tcf_filter_chain_list_item * item,struct tcf_proto * tp_head)525 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
526 struct tcf_proto *tp_head)
527 {
528 if (item->chain_head_change)
529 item->chain_head_change(tp_head, item->chain_head_change_priv);
530 }
531
tcf_chain0_head_change(struct tcf_chain * chain,struct tcf_proto * tp_head)532 static void tcf_chain0_head_change(struct tcf_chain *chain,
533 struct tcf_proto *tp_head)
534 {
535 struct tcf_filter_chain_list_item *item;
536 struct tcf_block *block = chain->block;
537
538 if (chain->index)
539 return;
540
541 mutex_lock(&block->lock);
542 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
543 tcf_chain_head_change_item(item, tp_head);
544 mutex_unlock(&block->lock);
545 }
546
547 /* Returns true if block can be safely freed. */
548
tcf_chain_detach(struct tcf_chain * chain)549 static bool tcf_chain_detach(struct tcf_chain *chain)
550 {
551 struct tcf_block *block = chain->block;
552
553 ASSERT_BLOCK_LOCKED(block);
554
555 list_del_rcu(&chain->list);
556 if (!chain->index)
557 block->chain0.chain = NULL;
558
559 if (list_empty(&block->chain_list) &&
560 refcount_read(&block->refcnt) == 0)
561 return true;
562
563 return false;
564 }
565
tcf_block_destroy(struct tcf_block * block)566 static void tcf_block_destroy(struct tcf_block *block)
567 {
568 mutex_destroy(&block->lock);
569 mutex_destroy(&block->proto_destroy_lock);
570 xa_destroy(&block->ports);
571 kfree_rcu(block, rcu);
572 }
573
tcf_chain_destroy(struct tcf_chain * chain,bool free_block)574 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
575 {
576 struct tcf_block *block = chain->block;
577
578 mutex_destroy(&chain->filter_chain_lock);
579 kfree_rcu(chain, rcu);
580 if (free_block)
581 tcf_block_destroy(block);
582 }
583
tcf_chain_hold(struct tcf_chain * chain)584 static void tcf_chain_hold(struct tcf_chain *chain)
585 {
586 ASSERT_BLOCK_LOCKED(chain->block);
587
588 ++chain->refcnt;
589 }
590
tcf_chain_held_by_acts_only(struct tcf_chain * chain)591 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
592 {
593 ASSERT_BLOCK_LOCKED(chain->block);
594
595 /* In case all the references are action references, this
596 * chain should not be shown to the user.
597 */
598 return chain->refcnt == chain->action_refcnt;
599 }
600
tcf_chain_lookup(struct tcf_block * block,u32 chain_index)601 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
602 u32 chain_index)
603 {
604 struct tcf_chain *chain;
605
606 ASSERT_BLOCK_LOCKED(block);
607
608 list_for_each_entry(chain, &block->chain_list, list) {
609 if (chain->index == chain_index)
610 return chain;
611 }
612 return NULL;
613 }
614
615 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tcf_chain_lookup_rcu(const struct tcf_block * block,u32 chain_index)616 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
617 u32 chain_index)
618 {
619 struct tcf_chain *chain;
620
621 list_for_each_entry_rcu(chain, &block->chain_list, list) {
622 if (chain->index == chain_index)
623 return chain;
624 }
625 return NULL;
626 }
627 #endif
628
629 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
630 u32 seq, u16 flags, int event, bool unicast,
631 struct netlink_ext_ack *extack);
632
__tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create,bool by_act)633 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
634 u32 chain_index, bool create,
635 bool by_act)
636 {
637 struct tcf_chain *chain = NULL;
638 bool is_first_reference;
639
640 mutex_lock(&block->lock);
641 chain = tcf_chain_lookup(block, chain_index);
642 if (chain) {
643 tcf_chain_hold(chain);
644 } else {
645 if (!create)
646 goto errout;
647 chain = tcf_chain_create(block, chain_index);
648 if (!chain)
649 goto errout;
650 }
651
652 if (by_act)
653 ++chain->action_refcnt;
654 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
655 mutex_unlock(&block->lock);
656
657 /* Send notification only in case we got the first
658 * non-action reference. Until then, the chain acts only as
659 * a placeholder for actions pointing to it and user ought
660 * not know about them.
661 */
662 if (is_first_reference && !by_act)
663 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
664 RTM_NEWCHAIN, false, NULL);
665
666 return chain;
667
668 errout:
669 mutex_unlock(&block->lock);
670 return chain;
671 }
672
tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create)673 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
674 bool create)
675 {
676 return __tcf_chain_get(block, chain_index, create, false);
677 }
678
tcf_chain_get_by_act(struct tcf_block * block,u32 chain_index)679 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
680 {
681 return __tcf_chain_get(block, chain_index, true, true);
682 }
683 EXPORT_SYMBOL(tcf_chain_get_by_act);
684
685 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
686 void *tmplt_priv);
687 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
688 void *tmplt_priv, u32 chain_index,
689 struct tcf_block *block, struct sk_buff *oskb,
690 u32 seq, u16 flags);
691
__tcf_chain_put(struct tcf_chain * chain,bool by_act,bool explicitly_created)692 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
693 bool explicitly_created)
694 {
695 struct tcf_block *block = chain->block;
696 const struct tcf_proto_ops *tmplt_ops;
697 unsigned int refcnt, non_act_refcnt;
698 bool free_block = false;
699 void *tmplt_priv;
700
701 mutex_lock(&block->lock);
702 if (explicitly_created) {
703 if (!chain->explicitly_created) {
704 mutex_unlock(&block->lock);
705 return;
706 }
707 chain->explicitly_created = false;
708 }
709
710 if (by_act)
711 chain->action_refcnt--;
712
713 /* tc_chain_notify_delete can't be called while holding block lock.
714 * However, when block is unlocked chain can be changed concurrently, so
715 * save these to temporary variables.
716 */
717 refcnt = --chain->refcnt;
718 non_act_refcnt = refcnt - chain->action_refcnt;
719 tmplt_ops = chain->tmplt_ops;
720 tmplt_priv = chain->tmplt_priv;
721
722 if (non_act_refcnt == chain->explicitly_created && !by_act) {
723 if (non_act_refcnt == 0)
724 tc_chain_notify_delete(tmplt_ops, tmplt_priv,
725 chain->index, block, NULL, 0, 0);
726 /* Last reference to chain, no need to lock. */
727 chain->flushing = false;
728 }
729
730 if (refcnt == 0)
731 free_block = tcf_chain_detach(chain);
732 mutex_unlock(&block->lock);
733
734 if (refcnt == 0) {
735 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
736 tcf_chain_destroy(chain, free_block);
737 }
738 }
739
tcf_chain_put(struct tcf_chain * chain)740 static void tcf_chain_put(struct tcf_chain *chain)
741 {
742 __tcf_chain_put(chain, false, false);
743 }
744
tcf_chain_put_by_act(struct tcf_chain * chain)745 void tcf_chain_put_by_act(struct tcf_chain *chain)
746 {
747 __tcf_chain_put(chain, true, false);
748 }
749 EXPORT_SYMBOL(tcf_chain_put_by_act);
750
tcf_chain_put_explicitly_created(struct tcf_chain * chain)751 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
752 {
753 __tcf_chain_put(chain, false, true);
754 }
755
tcf_chain_flush(struct tcf_chain * chain,bool rtnl_held)756 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
757 {
758 struct tcf_proto *tp, *tp_next;
759
760 mutex_lock(&chain->filter_chain_lock);
761 tp = tcf_chain_dereference(chain->filter_chain, chain);
762 while (tp) {
763 tp_next = rcu_dereference_protected(tp->next, 1);
764 tcf_proto_signal_destroying(chain, tp);
765 tp = tp_next;
766 }
767 tp = tcf_chain_dereference(chain->filter_chain, chain);
768 RCU_INIT_POINTER(chain->filter_chain, NULL);
769 tcf_chain0_head_change(chain, NULL);
770 chain->flushing = true;
771 mutex_unlock(&chain->filter_chain_lock);
772
773 while (tp) {
774 tp_next = rcu_dereference_protected(tp->next, 1);
775 tcf_proto_put(tp, rtnl_held, NULL);
776 tp = tp_next;
777 }
778 }
779
780 static int tcf_block_setup(struct tcf_block *block,
781 struct flow_block_offload *bo);
782
tcf_block_offload_init(struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,enum flow_block_command command,enum flow_block_binder_type binder_type,struct flow_block * flow_block,bool shared,struct netlink_ext_ack * extack)783 static void tcf_block_offload_init(struct flow_block_offload *bo,
784 struct net_device *dev, struct Qdisc *sch,
785 enum flow_block_command command,
786 enum flow_block_binder_type binder_type,
787 struct flow_block *flow_block,
788 bool shared, struct netlink_ext_ack *extack)
789 {
790 bo->net = dev_net(dev);
791 bo->command = command;
792 bo->binder_type = binder_type;
793 bo->block = flow_block;
794 bo->block_shared = shared;
795 bo->extack = extack;
796 bo->sch = sch;
797 bo->cb_list_head = &flow_block->cb_list;
798 INIT_LIST_HEAD(&bo->cb_list);
799 }
800
801 static void tcf_block_unbind(struct tcf_block *block,
802 struct flow_block_offload *bo);
803
tc_block_indr_cleanup(struct flow_block_cb * block_cb)804 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
805 {
806 struct tcf_block *block = block_cb->indr.data;
807 struct net_device *dev = block_cb->indr.dev;
808 struct Qdisc *sch = block_cb->indr.sch;
809 struct netlink_ext_ack extack = {};
810 struct flow_block_offload bo = {};
811
812 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
813 block_cb->indr.binder_type,
814 &block->flow_block, tcf_block_shared(block),
815 &extack);
816 rtnl_lock();
817 down_write(&block->cb_lock);
818 list_del(&block_cb->driver_list);
819 list_move(&block_cb->list, &bo.cb_list);
820 tcf_block_unbind(block, &bo);
821 up_write(&block->cb_lock);
822 rtnl_unlock();
823 }
824
tcf_block_offload_in_use(struct tcf_block * block)825 static bool tcf_block_offload_in_use(struct tcf_block *block)
826 {
827 return atomic_read(&block->offloadcnt);
828 }
829
tcf_block_offload_cmd(struct tcf_block * block,struct net_device * dev,struct Qdisc * sch,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)830 static int tcf_block_offload_cmd(struct tcf_block *block,
831 struct net_device *dev, struct Qdisc *sch,
832 struct tcf_block_ext_info *ei,
833 enum flow_block_command command,
834 struct netlink_ext_ack *extack)
835 {
836 struct flow_block_offload bo = {};
837
838 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
839 &block->flow_block, tcf_block_shared(block),
840 extack);
841
842 if (dev->netdev_ops->ndo_setup_tc) {
843 int err;
844
845 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
846 if (err < 0) {
847 if (err != -EOPNOTSUPP)
848 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
849 return err;
850 }
851
852 return tcf_block_setup(block, &bo);
853 }
854
855 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
856 tc_block_indr_cleanup);
857 tcf_block_setup(block, &bo);
858
859 return -EOPNOTSUPP;
860 }
861
tcf_block_offload_bind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)862 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
863 struct tcf_block_ext_info *ei,
864 struct netlink_ext_ack *extack)
865 {
866 struct net_device *dev = q->dev_queue->dev;
867 int err;
868
869 down_write(&block->cb_lock);
870
871 /* If tc offload feature is disabled and the block we try to bind
872 * to already has some offloaded filters, forbid to bind.
873 */
874 if (dev->netdev_ops->ndo_setup_tc &&
875 !tc_can_offload(dev) &&
876 tcf_block_offload_in_use(block)) {
877 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
878 err = -EOPNOTSUPP;
879 goto err_unlock;
880 }
881
882 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
883 if (err == -EOPNOTSUPP)
884 goto no_offload_dev_inc;
885 if (err)
886 goto err_unlock;
887
888 up_write(&block->cb_lock);
889 return 0;
890
891 no_offload_dev_inc:
892 if (tcf_block_offload_in_use(block))
893 goto err_unlock;
894
895 err = 0;
896 block->nooffloaddevcnt++;
897 err_unlock:
898 up_write(&block->cb_lock);
899 return err;
900 }
901
tcf_block_offload_unbind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)902 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
903 struct tcf_block_ext_info *ei)
904 {
905 struct net_device *dev = q->dev_queue->dev;
906 int err;
907
908 down_write(&block->cb_lock);
909 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
910 if (err == -EOPNOTSUPP)
911 goto no_offload_dev_dec;
912 up_write(&block->cb_lock);
913 return;
914
915 no_offload_dev_dec:
916 WARN_ON(block->nooffloaddevcnt-- == 0);
917 up_write(&block->cb_lock);
918 }
919
920 static int
tcf_chain0_head_change_cb_add(struct tcf_block * block,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)921 tcf_chain0_head_change_cb_add(struct tcf_block *block,
922 struct tcf_block_ext_info *ei,
923 struct netlink_ext_ack *extack)
924 {
925 struct tcf_filter_chain_list_item *item;
926 struct tcf_chain *chain0;
927
928 item = kmalloc(sizeof(*item), GFP_KERNEL);
929 if (!item) {
930 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
931 return -ENOMEM;
932 }
933 item->chain_head_change = ei->chain_head_change;
934 item->chain_head_change_priv = ei->chain_head_change_priv;
935
936 mutex_lock(&block->lock);
937 chain0 = block->chain0.chain;
938 if (chain0)
939 tcf_chain_hold(chain0);
940 else
941 list_add(&item->list, &block->chain0.filter_chain_list);
942 mutex_unlock(&block->lock);
943
944 if (chain0) {
945 struct tcf_proto *tp_head;
946
947 mutex_lock(&chain0->filter_chain_lock);
948
949 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
950 if (tp_head)
951 tcf_chain_head_change_item(item, tp_head);
952
953 mutex_lock(&block->lock);
954 list_add(&item->list, &block->chain0.filter_chain_list);
955 mutex_unlock(&block->lock);
956
957 mutex_unlock(&chain0->filter_chain_lock);
958 tcf_chain_put(chain0);
959 }
960
961 return 0;
962 }
963
964 static void
tcf_chain0_head_change_cb_del(struct tcf_block * block,struct tcf_block_ext_info * ei)965 tcf_chain0_head_change_cb_del(struct tcf_block *block,
966 struct tcf_block_ext_info *ei)
967 {
968 struct tcf_filter_chain_list_item *item;
969
970 mutex_lock(&block->lock);
971 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
972 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
973 (item->chain_head_change == ei->chain_head_change &&
974 item->chain_head_change_priv == ei->chain_head_change_priv)) {
975 if (block->chain0.chain)
976 tcf_chain_head_change_item(item, NULL);
977 list_del(&item->list);
978 mutex_unlock(&block->lock);
979
980 kfree(item);
981 return;
982 }
983 }
984 mutex_unlock(&block->lock);
985 WARN_ON(1);
986 }
987
988 struct tcf_net {
989 spinlock_t idr_lock; /* Protects idr */
990 struct idr idr;
991 };
992
993 static unsigned int tcf_net_id;
994
tcf_block_insert(struct tcf_block * block,struct net * net,struct netlink_ext_ack * extack)995 static int tcf_block_insert(struct tcf_block *block, struct net *net,
996 struct netlink_ext_ack *extack)
997 {
998 struct tcf_net *tn = net_generic(net, tcf_net_id);
999 int err;
1000
1001 idr_preload(GFP_KERNEL);
1002 spin_lock(&tn->idr_lock);
1003 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
1004 GFP_NOWAIT);
1005 spin_unlock(&tn->idr_lock);
1006 idr_preload_end();
1007
1008 return err;
1009 }
1010
tcf_block_remove(struct tcf_block * block,struct net * net)1011 static void tcf_block_remove(struct tcf_block *block, struct net *net)
1012 {
1013 struct tcf_net *tn = net_generic(net, tcf_net_id);
1014
1015 spin_lock(&tn->idr_lock);
1016 idr_remove(&tn->idr, block->index);
1017 spin_unlock(&tn->idr_lock);
1018 }
1019
tcf_block_create(struct net * net,struct Qdisc * q,u32 block_index,struct netlink_ext_ack * extack)1020 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1021 u32 block_index,
1022 struct netlink_ext_ack *extack)
1023 {
1024 struct tcf_block *block;
1025
1026 block = kzalloc(sizeof(*block), GFP_KERNEL);
1027 if (!block) {
1028 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1029 return ERR_PTR(-ENOMEM);
1030 }
1031 mutex_init(&block->lock);
1032 mutex_init(&block->proto_destroy_lock);
1033 init_rwsem(&block->cb_lock);
1034 flow_block_init(&block->flow_block);
1035 INIT_LIST_HEAD(&block->chain_list);
1036 INIT_LIST_HEAD(&block->owner_list);
1037 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1038
1039 refcount_set(&block->refcnt, 1);
1040 block->net = net;
1041 block->index = block_index;
1042 xa_init(&block->ports);
1043
1044 /* Don't store q pointer for blocks which are shared */
1045 if (!tcf_block_shared(block))
1046 block->q = q;
1047 return block;
1048 }
1049
tcf_block_lookup(struct net * net,u32 block_index)1050 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1051 {
1052 struct tcf_net *tn = net_generic(net, tcf_net_id);
1053
1054 return idr_find(&tn->idr, block_index);
1055 }
1056 EXPORT_SYMBOL(tcf_block_lookup);
1057
tcf_block_refcnt_get(struct net * net,u32 block_index)1058 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1059 {
1060 struct tcf_block *block;
1061
1062 rcu_read_lock();
1063 block = tcf_block_lookup(net, block_index);
1064 if (block && !refcount_inc_not_zero(&block->refcnt))
1065 block = NULL;
1066 rcu_read_unlock();
1067
1068 return block;
1069 }
1070
1071 static struct tcf_chain *
__tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)1072 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1073 {
1074 mutex_lock(&block->lock);
1075 if (chain)
1076 chain = list_is_last(&chain->list, &block->chain_list) ?
1077 NULL : list_next_entry(chain, list);
1078 else
1079 chain = list_first_entry_or_null(&block->chain_list,
1080 struct tcf_chain, list);
1081
1082 /* skip all action-only chains */
1083 while (chain && tcf_chain_held_by_acts_only(chain))
1084 chain = list_is_last(&chain->list, &block->chain_list) ?
1085 NULL : list_next_entry(chain, list);
1086
1087 if (chain)
1088 tcf_chain_hold(chain);
1089 mutex_unlock(&block->lock);
1090
1091 return chain;
1092 }
1093
1094 /* Function to be used by all clients that want to iterate over all chains on
1095 * block. It properly obtains block->lock and takes reference to chain before
1096 * returning it. Users of this function must be tolerant to concurrent chain
1097 * insertion/deletion or ensure that no concurrent chain modification is
1098 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1099 * consistent dump because rtnl lock is released each time skb is filled with
1100 * data and sent to user-space.
1101 */
1102
1103 struct tcf_chain *
tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)1104 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1105 {
1106 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1107
1108 if (chain)
1109 tcf_chain_put(chain);
1110
1111 return chain_next;
1112 }
1113 EXPORT_SYMBOL(tcf_get_next_chain);
1114
1115 static struct tcf_proto *
__tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)1116 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1117 {
1118 u32 prio = 0;
1119
1120 ASSERT_RTNL();
1121 mutex_lock(&chain->filter_chain_lock);
1122
1123 if (!tp) {
1124 tp = tcf_chain_dereference(chain->filter_chain, chain);
1125 } else if (tcf_proto_is_deleting(tp)) {
1126 /* 'deleting' flag is set and chain->filter_chain_lock was
1127 * unlocked, which means next pointer could be invalid. Restart
1128 * search.
1129 */
1130 prio = tp->prio + 1;
1131 tp = tcf_chain_dereference(chain->filter_chain, chain);
1132
1133 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1134 if (!tp->deleting && tp->prio >= prio)
1135 break;
1136 } else {
1137 tp = tcf_chain_dereference(tp->next, chain);
1138 }
1139
1140 if (tp)
1141 tcf_proto_get(tp);
1142
1143 mutex_unlock(&chain->filter_chain_lock);
1144
1145 return tp;
1146 }
1147
1148 /* Function to be used by all clients that want to iterate over all tp's on
1149 * chain. Users of this function must be tolerant to concurrent tp
1150 * insertion/deletion or ensure that no concurrent chain modification is
1151 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1152 * consistent dump because rtnl lock is released each time skb is filled with
1153 * data and sent to user-space.
1154 */
1155
1156 struct tcf_proto *
tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)1157 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1158 {
1159 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1160
1161 if (tp)
1162 tcf_proto_put(tp, true, NULL);
1163
1164 return tp_next;
1165 }
1166 EXPORT_SYMBOL(tcf_get_next_proto);
1167
tcf_block_flush_all_chains(struct tcf_block * block,bool rtnl_held)1168 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1169 {
1170 struct tcf_chain *chain;
1171
1172 /* Last reference to block. At this point chains cannot be added or
1173 * removed concurrently.
1174 */
1175 for (chain = tcf_get_next_chain(block, NULL);
1176 chain;
1177 chain = tcf_get_next_chain(block, chain)) {
1178 tcf_chain_put_explicitly_created(chain);
1179 tcf_chain_flush(chain, rtnl_held);
1180 }
1181 }
1182
1183 /* Lookup Qdisc and increments its reference counter.
1184 * Set parent, if necessary.
1185 */
1186
__tcf_qdisc_find(struct net * net,struct Qdisc ** q,u32 * parent,int ifindex,bool rtnl_held,struct netlink_ext_ack * extack)1187 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1188 u32 *parent, int ifindex, bool rtnl_held,
1189 struct netlink_ext_ack *extack)
1190 {
1191 const struct Qdisc_class_ops *cops;
1192 struct net_device *dev;
1193 int err = 0;
1194
1195 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1196 return 0;
1197
1198 rcu_read_lock();
1199
1200 /* Find link */
1201 dev = dev_get_by_index_rcu(net, ifindex);
1202 if (!dev) {
1203 rcu_read_unlock();
1204 return -ENODEV;
1205 }
1206
1207 /* Find qdisc */
1208 if (!*parent) {
1209 *q = rcu_dereference(dev->qdisc);
1210 *parent = (*q)->handle;
1211 } else {
1212 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1213 if (!*q) {
1214 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1215 err = -EINVAL;
1216 goto errout_rcu;
1217 }
1218 }
1219
1220 *q = qdisc_refcount_inc_nz(*q);
1221 if (!*q) {
1222 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1223 err = -EINVAL;
1224 goto errout_rcu;
1225 }
1226
1227 /* Is it classful? */
1228 cops = (*q)->ops->cl_ops;
1229 if (!cops) {
1230 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1231 err = -EINVAL;
1232 goto errout_qdisc;
1233 }
1234
1235 if (!cops->tcf_block) {
1236 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1237 err = -EOPNOTSUPP;
1238 goto errout_qdisc;
1239 }
1240
1241 errout_rcu:
1242 /* At this point we know that qdisc is not noop_qdisc,
1243 * which means that qdisc holds a reference to net_device
1244 * and we hold a reference to qdisc, so it is safe to release
1245 * rcu read lock.
1246 */
1247 rcu_read_unlock();
1248 return err;
1249
1250 errout_qdisc:
1251 rcu_read_unlock();
1252
1253 if (rtnl_held)
1254 qdisc_put(*q);
1255 else
1256 qdisc_put_unlocked(*q);
1257 *q = NULL;
1258
1259 return err;
1260 }
1261
__tcf_qdisc_cl_find(struct Qdisc * q,u32 parent,unsigned long * cl,int ifindex,struct netlink_ext_ack * extack)1262 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1263 int ifindex, struct netlink_ext_ack *extack)
1264 {
1265 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1266 return 0;
1267
1268 /* Do we search for filter, attached to class? */
1269 if (TC_H_MIN(parent)) {
1270 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1271
1272 *cl = cops->find(q, parent);
1273 if (*cl == 0) {
1274 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1275 return -ENOENT;
1276 }
1277 }
1278
1279 return 0;
1280 }
1281
__tcf_block_find(struct net * net,struct Qdisc * q,unsigned long cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1282 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1283 unsigned long cl, int ifindex,
1284 u32 block_index,
1285 struct netlink_ext_ack *extack)
1286 {
1287 struct tcf_block *block;
1288
1289 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1290 block = tcf_block_refcnt_get(net, block_index);
1291 if (!block) {
1292 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1293 return ERR_PTR(-EINVAL);
1294 }
1295 } else {
1296 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1297
1298 block = cops->tcf_block(q, cl, extack);
1299 if (!block)
1300 return ERR_PTR(-EINVAL);
1301
1302 if (tcf_block_shared(block)) {
1303 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1304 return ERR_PTR(-EOPNOTSUPP);
1305 }
1306
1307 /* Always take reference to block in order to support execution
1308 * of rules update path of cls API without rtnl lock. Caller
1309 * must release block when it is finished using it. 'if' block
1310 * of this conditional obtain reference to block by calling
1311 * tcf_block_refcnt_get().
1312 */
1313 refcount_inc(&block->refcnt);
1314 }
1315
1316 return block;
1317 }
1318
__tcf_block_put(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,bool rtnl_held)1319 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1320 struct tcf_block_ext_info *ei, bool rtnl_held)
1321 {
1322 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1323 /* Flushing/putting all chains will cause the block to be
1324 * deallocated when last chain is freed. However, if chain_list
1325 * is empty, block has to be manually deallocated. After block
1326 * reference counter reached 0, it is no longer possible to
1327 * increment it or add new chains to block.
1328 */
1329 bool free_block = list_empty(&block->chain_list);
1330
1331 mutex_unlock(&block->lock);
1332 if (tcf_block_shared(block))
1333 tcf_block_remove(block, block->net);
1334
1335 if (q)
1336 tcf_block_offload_unbind(block, q, ei);
1337
1338 if (free_block)
1339 tcf_block_destroy(block);
1340 else
1341 tcf_block_flush_all_chains(block, rtnl_held);
1342 } else if (q) {
1343 tcf_block_offload_unbind(block, q, ei);
1344 }
1345 }
1346
tcf_block_refcnt_put(struct tcf_block * block,bool rtnl_held)1347 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1348 {
1349 __tcf_block_put(block, NULL, NULL, rtnl_held);
1350 }
1351
1352 /* Find tcf block.
1353 * Set q, parent, cl when appropriate.
1354 */
1355
tcf_block_find(struct net * net,struct Qdisc ** q,u32 * parent,unsigned long * cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1356 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1357 u32 *parent, unsigned long *cl,
1358 int ifindex, u32 block_index,
1359 struct netlink_ext_ack *extack)
1360 {
1361 struct tcf_block *block;
1362 int err = 0;
1363
1364 ASSERT_RTNL();
1365
1366 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1367 if (err)
1368 goto errout;
1369
1370 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1371 if (err)
1372 goto errout_qdisc;
1373
1374 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1375 if (IS_ERR(block)) {
1376 err = PTR_ERR(block);
1377 goto errout_qdisc;
1378 }
1379
1380 return block;
1381
1382 errout_qdisc:
1383 if (*q)
1384 qdisc_put(*q);
1385 errout:
1386 *q = NULL;
1387 return ERR_PTR(err);
1388 }
1389
tcf_block_release(struct Qdisc * q,struct tcf_block * block,bool rtnl_held)1390 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1391 bool rtnl_held)
1392 {
1393 if (!IS_ERR_OR_NULL(block))
1394 tcf_block_refcnt_put(block, rtnl_held);
1395
1396 if (q) {
1397 if (rtnl_held)
1398 qdisc_put(q);
1399 else
1400 qdisc_put_unlocked(q);
1401 }
1402 }
1403
1404 struct tcf_block_owner_item {
1405 struct list_head list;
1406 struct Qdisc *q;
1407 enum flow_block_binder_type binder_type;
1408 };
1409
1410 static void
tcf_block_owner_netif_keep_dst(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1411 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1412 struct Qdisc *q,
1413 enum flow_block_binder_type binder_type)
1414 {
1415 if (block->keep_dst &&
1416 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1417 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1418 netif_keep_dst(qdisc_dev(q));
1419 }
1420
tcf_block_netif_keep_dst(struct tcf_block * block)1421 void tcf_block_netif_keep_dst(struct tcf_block *block)
1422 {
1423 struct tcf_block_owner_item *item;
1424
1425 block->keep_dst = true;
1426 list_for_each_entry(item, &block->owner_list, list)
1427 tcf_block_owner_netif_keep_dst(block, item->q,
1428 item->binder_type);
1429 }
1430 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1431
tcf_block_owner_add(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1432 static int tcf_block_owner_add(struct tcf_block *block,
1433 struct Qdisc *q,
1434 enum flow_block_binder_type binder_type)
1435 {
1436 struct tcf_block_owner_item *item;
1437
1438 item = kmalloc(sizeof(*item), GFP_KERNEL);
1439 if (!item)
1440 return -ENOMEM;
1441 item->q = q;
1442 item->binder_type = binder_type;
1443 list_add(&item->list, &block->owner_list);
1444 return 0;
1445 }
1446
tcf_block_owner_del(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1447 static void tcf_block_owner_del(struct tcf_block *block,
1448 struct Qdisc *q,
1449 enum flow_block_binder_type binder_type)
1450 {
1451 struct tcf_block_owner_item *item;
1452
1453 list_for_each_entry(item, &block->owner_list, list) {
1454 if (item->q == q && item->binder_type == binder_type) {
1455 list_del(&item->list);
1456 kfree(item);
1457 return;
1458 }
1459 }
1460 WARN_ON(1);
1461 }
1462
tcf_block_tracks_dev(struct tcf_block * block,struct tcf_block_ext_info * ei)1463 static bool tcf_block_tracks_dev(struct tcf_block *block,
1464 struct tcf_block_ext_info *ei)
1465 {
1466 return tcf_block_shared(block) &&
1467 (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1468 ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1469 }
1470
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)1471 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1472 struct tcf_block_ext_info *ei,
1473 struct netlink_ext_ack *extack)
1474 {
1475 struct net_device *dev = qdisc_dev(q);
1476 struct net *net = qdisc_net(q);
1477 struct tcf_block *block = NULL;
1478 int err;
1479
1480 if (ei->block_index)
1481 /* block_index not 0 means the shared block is requested */
1482 block = tcf_block_refcnt_get(net, ei->block_index);
1483
1484 if (!block) {
1485 block = tcf_block_create(net, q, ei->block_index, extack);
1486 if (IS_ERR(block))
1487 return PTR_ERR(block);
1488 if (tcf_block_shared(block)) {
1489 err = tcf_block_insert(block, net, extack);
1490 if (err)
1491 goto err_block_insert;
1492 }
1493 }
1494
1495 err = tcf_block_owner_add(block, q, ei->binder_type);
1496 if (err)
1497 goto err_block_owner_add;
1498
1499 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1500
1501 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1502 if (err)
1503 goto err_chain0_head_change_cb_add;
1504
1505 err = tcf_block_offload_bind(block, q, ei, extack);
1506 if (err)
1507 goto err_block_offload_bind;
1508
1509 if (tcf_block_tracks_dev(block, ei)) {
1510 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1511 if (err) {
1512 NL_SET_ERR_MSG(extack, "block dev insert failed");
1513 goto err_dev_insert;
1514 }
1515 }
1516
1517 *p_block = block;
1518 return 0;
1519
1520 err_dev_insert:
1521 tcf_block_offload_unbind(block, q, ei);
1522 err_block_offload_bind:
1523 tcf_chain0_head_change_cb_del(block, ei);
1524 err_chain0_head_change_cb_add:
1525 tcf_block_owner_del(block, q, ei->binder_type);
1526 err_block_owner_add:
1527 err_block_insert:
1528 tcf_block_refcnt_put(block, true);
1529 return err;
1530 }
1531 EXPORT_SYMBOL(tcf_block_get_ext);
1532
tcf_chain_head_change_dflt(struct tcf_proto * tp_head,void * priv)1533 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1534 {
1535 struct tcf_proto __rcu **p_filter_chain = priv;
1536
1537 rcu_assign_pointer(*p_filter_chain, tp_head);
1538 }
1539
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)1540 int tcf_block_get(struct tcf_block **p_block,
1541 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1542 struct netlink_ext_ack *extack)
1543 {
1544 struct tcf_block_ext_info ei = {
1545 .chain_head_change = tcf_chain_head_change_dflt,
1546 .chain_head_change_priv = p_filter_chain,
1547 };
1548
1549 WARN_ON(!p_filter_chain);
1550 return tcf_block_get_ext(p_block, q, &ei, extack);
1551 }
1552 EXPORT_SYMBOL(tcf_block_get);
1553
1554 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1555 * actions should be all removed after flushing.
1556 */
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)1557 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1558 struct tcf_block_ext_info *ei)
1559 {
1560 struct net_device *dev = qdisc_dev(q);
1561
1562 if (!block)
1563 return;
1564 if (tcf_block_tracks_dev(block, ei))
1565 xa_erase(&block->ports, dev->ifindex);
1566 tcf_chain0_head_change_cb_del(block, ei);
1567 tcf_block_owner_del(block, q, ei->binder_type);
1568
1569 __tcf_block_put(block, q, ei, true);
1570 }
1571 EXPORT_SYMBOL(tcf_block_put_ext);
1572
tcf_block_put(struct tcf_block * block)1573 void tcf_block_put(struct tcf_block *block)
1574 {
1575 struct tcf_block_ext_info ei = {0, };
1576
1577 if (!block)
1578 return;
1579 tcf_block_put_ext(block, block->q, &ei);
1580 }
1581
1582 EXPORT_SYMBOL(tcf_block_put);
1583
1584 static int
tcf_block_playback_offloads(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv,bool add,bool offload_in_use,struct netlink_ext_ack * extack)1585 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1586 void *cb_priv, bool add, bool offload_in_use,
1587 struct netlink_ext_ack *extack)
1588 {
1589 struct tcf_chain *chain, *chain_prev;
1590 struct tcf_proto *tp, *tp_prev;
1591 int err;
1592
1593 lockdep_assert_held(&block->cb_lock);
1594
1595 for (chain = __tcf_get_next_chain(block, NULL);
1596 chain;
1597 chain_prev = chain,
1598 chain = __tcf_get_next_chain(block, chain),
1599 tcf_chain_put(chain_prev)) {
1600 if (chain->tmplt_ops && add)
1601 chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1602 cb_priv);
1603 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1604 tp_prev = tp,
1605 tp = __tcf_get_next_proto(chain, tp),
1606 tcf_proto_put(tp_prev, true, NULL)) {
1607 if (tp->ops->reoffload) {
1608 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1609 extack);
1610 if (err && add)
1611 goto err_playback_remove;
1612 } else if (add && offload_in_use) {
1613 err = -EOPNOTSUPP;
1614 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1615 goto err_playback_remove;
1616 }
1617 }
1618 if (chain->tmplt_ops && !add)
1619 chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1620 cb_priv);
1621 }
1622
1623 return 0;
1624
1625 err_playback_remove:
1626 tcf_proto_put(tp, true, NULL);
1627 tcf_chain_put(chain);
1628 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1629 extack);
1630 return err;
1631 }
1632
tcf_block_bind(struct tcf_block * block,struct flow_block_offload * bo)1633 static int tcf_block_bind(struct tcf_block *block,
1634 struct flow_block_offload *bo)
1635 {
1636 struct flow_block_cb *block_cb, *next;
1637 int err, i = 0;
1638
1639 lockdep_assert_held(&block->cb_lock);
1640
1641 list_for_each_entry(block_cb, &bo->cb_list, list) {
1642 err = tcf_block_playback_offloads(block, block_cb->cb,
1643 block_cb->cb_priv, true,
1644 tcf_block_offload_in_use(block),
1645 bo->extack);
1646 if (err)
1647 goto err_unroll;
1648 if (!bo->unlocked_driver_cb)
1649 block->lockeddevcnt++;
1650
1651 i++;
1652 }
1653 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1654
1655 return 0;
1656
1657 err_unroll:
1658 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1659 list_del(&block_cb->driver_list);
1660 if (i-- > 0) {
1661 list_del(&block_cb->list);
1662 tcf_block_playback_offloads(block, block_cb->cb,
1663 block_cb->cb_priv, false,
1664 tcf_block_offload_in_use(block),
1665 NULL);
1666 if (!bo->unlocked_driver_cb)
1667 block->lockeddevcnt--;
1668 }
1669 flow_block_cb_free(block_cb);
1670 }
1671
1672 return err;
1673 }
1674
tcf_block_unbind(struct tcf_block * block,struct flow_block_offload * bo)1675 static void tcf_block_unbind(struct tcf_block *block,
1676 struct flow_block_offload *bo)
1677 {
1678 struct flow_block_cb *block_cb, *next;
1679
1680 lockdep_assert_held(&block->cb_lock);
1681
1682 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1683 tcf_block_playback_offloads(block, block_cb->cb,
1684 block_cb->cb_priv, false,
1685 tcf_block_offload_in_use(block),
1686 NULL);
1687 list_del(&block_cb->list);
1688 flow_block_cb_free(block_cb);
1689 if (!bo->unlocked_driver_cb)
1690 block->lockeddevcnt--;
1691 }
1692 }
1693
tcf_block_setup(struct tcf_block * block,struct flow_block_offload * bo)1694 static int tcf_block_setup(struct tcf_block *block,
1695 struct flow_block_offload *bo)
1696 {
1697 int err;
1698
1699 switch (bo->command) {
1700 case FLOW_BLOCK_BIND:
1701 err = tcf_block_bind(block, bo);
1702 break;
1703 case FLOW_BLOCK_UNBIND:
1704 err = 0;
1705 tcf_block_unbind(block, bo);
1706 break;
1707 default:
1708 WARN_ON_ONCE(1);
1709 err = -EOPNOTSUPP;
1710 }
1711
1712 return err;
1713 }
1714
1715 /* Main classifier routine: scans classifier chain attached
1716 * to this qdisc, (optionally) tests for protocol and asks
1717 * specific classifiers.
1718 */
__tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,const struct tcf_proto * orig_tp,struct tcf_result * res,bool compat_mode,struct tcf_exts_miss_cookie_node * n,int act_index,u32 * last_executed_chain)1719 static inline int __tcf_classify(struct sk_buff *skb,
1720 const struct tcf_proto *tp,
1721 const struct tcf_proto *orig_tp,
1722 struct tcf_result *res,
1723 bool compat_mode,
1724 struct tcf_exts_miss_cookie_node *n,
1725 int act_index,
1726 u32 *last_executed_chain)
1727 {
1728 #ifdef CONFIG_NET_CLS_ACT
1729 const int max_reclassify_loop = 16;
1730 const struct tcf_proto *first_tp;
1731 int limit = 0;
1732
1733 reclassify:
1734 #endif
1735 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1736 __be16 protocol = skb_protocol(skb, false);
1737 int err = 0;
1738
1739 if (n) {
1740 struct tcf_exts *exts;
1741
1742 if (n->tp_prio != tp->prio)
1743 continue;
1744
1745 /* We re-lookup the tp and chain based on index instead
1746 * of having hard refs and locks to them, so do a sanity
1747 * check if any of tp,chain,exts was replaced by the
1748 * time we got here with a cookie from hardware.
1749 */
1750 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1751 !tp->ops->get_exts)) {
1752 tcf_set_drop_reason(skb,
1753 SKB_DROP_REASON_TC_COOKIE_ERROR);
1754 return TC_ACT_SHOT;
1755 }
1756
1757 exts = tp->ops->get_exts(tp, n->handle);
1758 if (unlikely(!exts || n->exts != exts)) {
1759 tcf_set_drop_reason(skb,
1760 SKB_DROP_REASON_TC_COOKIE_ERROR);
1761 return TC_ACT_SHOT;
1762 }
1763
1764 n = NULL;
1765 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1766 } else {
1767 if (tp->protocol != protocol &&
1768 tp->protocol != htons(ETH_P_ALL))
1769 continue;
1770
1771 err = tc_classify(skb, tp, res);
1772 }
1773 #ifdef CONFIG_NET_CLS_ACT
1774 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1775 first_tp = orig_tp;
1776 *last_executed_chain = first_tp->chain->index;
1777 goto reset;
1778 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1779 first_tp = res->goto_tp;
1780 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1781 goto reset;
1782 }
1783 #endif
1784 if (err >= 0)
1785 return err;
1786 }
1787
1788 if (unlikely(n)) {
1789 tcf_set_drop_reason(skb,
1790 SKB_DROP_REASON_TC_COOKIE_ERROR);
1791 return TC_ACT_SHOT;
1792 }
1793
1794 return TC_ACT_UNSPEC; /* signal: continue lookup */
1795 #ifdef CONFIG_NET_CLS_ACT
1796 reset:
1797 if (unlikely(limit++ >= max_reclassify_loop)) {
1798 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1799 tp->chain->block->index,
1800 tp->prio & 0xffff,
1801 ntohs(tp->protocol));
1802 tcf_set_drop_reason(skb,
1803 SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1804 return TC_ACT_SHOT;
1805 }
1806
1807 tp = first_tp;
1808 goto reclassify;
1809 #endif
1810 }
1811
tcf_classify(struct sk_buff * skb,const struct tcf_block * block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1812 int tcf_classify(struct sk_buff *skb,
1813 const struct tcf_block *block,
1814 const struct tcf_proto *tp,
1815 struct tcf_result *res, bool compat_mode)
1816 {
1817 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1818 u32 last_executed_chain = 0;
1819
1820 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1821 &last_executed_chain);
1822 #else
1823 u32 last_executed_chain = tp ? tp->chain->index : 0;
1824 struct tcf_exts_miss_cookie_node *n = NULL;
1825 const struct tcf_proto *orig_tp = tp;
1826 struct tc_skb_ext *ext;
1827 int act_index = 0;
1828 int ret;
1829
1830 if (block) {
1831 ext = skb_ext_find(skb, TC_SKB_EXT);
1832
1833 if (ext && (ext->chain || ext->act_miss)) {
1834 struct tcf_chain *fchain;
1835 u32 chain;
1836
1837 if (ext->act_miss) {
1838 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1839 &act_index);
1840 if (!n) {
1841 tcf_set_drop_reason(skb,
1842 SKB_DROP_REASON_TC_COOKIE_ERROR);
1843 return TC_ACT_SHOT;
1844 }
1845
1846 chain = n->chain_index;
1847 } else {
1848 chain = ext->chain;
1849 }
1850
1851 fchain = tcf_chain_lookup_rcu(block, chain);
1852 if (!fchain) {
1853 tcf_set_drop_reason(skb,
1854 SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1855
1856 return TC_ACT_SHOT;
1857 }
1858
1859 /* Consume, so cloned/redirect skbs won't inherit ext */
1860 skb_ext_del(skb, TC_SKB_EXT);
1861
1862 tp = rcu_dereference_bh(fchain->filter_chain);
1863 last_executed_chain = fchain->index;
1864 }
1865 }
1866
1867 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1868 &last_executed_chain);
1869
1870 if (tc_skb_ext_tc_enabled()) {
1871 /* If we missed on some chain */
1872 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1873 struct tc_skb_cb *cb = tc_skb_cb(skb);
1874
1875 ext = tc_skb_ext_alloc(skb);
1876 if (WARN_ON_ONCE(!ext)) {
1877 tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1878 return TC_ACT_SHOT;
1879 }
1880 ext->chain = last_executed_chain;
1881 ext->mru = cb->mru;
1882 ext->post_ct = cb->post_ct;
1883 ext->post_ct_snat = cb->post_ct_snat;
1884 ext->post_ct_dnat = cb->post_ct_dnat;
1885 ext->zone = cb->zone;
1886 }
1887 }
1888
1889 return ret;
1890 #endif
1891 }
1892 EXPORT_SYMBOL(tcf_classify);
1893
1894 struct tcf_chain_info {
1895 struct tcf_proto __rcu **pprev;
1896 struct tcf_proto __rcu *next;
1897 };
1898
tcf_chain_tp_prev(struct tcf_chain * chain,struct tcf_chain_info * chain_info)1899 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1900 struct tcf_chain_info *chain_info)
1901 {
1902 return tcf_chain_dereference(*chain_info->pprev, chain);
1903 }
1904
tcf_chain_tp_insert(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1905 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1906 struct tcf_chain_info *chain_info,
1907 struct tcf_proto *tp)
1908 {
1909 if (chain->flushing)
1910 return -EAGAIN;
1911
1912 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1913 if (*chain_info->pprev == chain->filter_chain)
1914 tcf_chain0_head_change(chain, tp);
1915 tcf_proto_get(tp);
1916 rcu_assign_pointer(*chain_info->pprev, tp);
1917
1918 return 0;
1919 }
1920
tcf_chain_tp_remove(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1921 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1922 struct tcf_chain_info *chain_info,
1923 struct tcf_proto *tp)
1924 {
1925 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1926
1927 tcf_proto_mark_delete(tp);
1928 if (tp == chain->filter_chain)
1929 tcf_chain0_head_change(chain, next);
1930 RCU_INIT_POINTER(*chain_info->pprev, next);
1931 }
1932
1933 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1934 struct tcf_chain_info *chain_info,
1935 u32 protocol, u32 prio,
1936 bool prio_allocate);
1937
1938 /* Try to insert new proto.
1939 * If proto with specified priority already exists, free new proto
1940 * and return existing one.
1941 */
1942
tcf_chain_tp_insert_unique(struct tcf_chain * chain,struct tcf_proto * tp_new,u32 protocol,u32 prio,bool rtnl_held)1943 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1944 struct tcf_proto *tp_new,
1945 u32 protocol, u32 prio,
1946 bool rtnl_held)
1947 {
1948 struct tcf_chain_info chain_info;
1949 struct tcf_proto *tp;
1950 int err = 0;
1951
1952 mutex_lock(&chain->filter_chain_lock);
1953
1954 if (tcf_proto_exists_destroying(chain, tp_new)) {
1955 mutex_unlock(&chain->filter_chain_lock);
1956 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1957 return ERR_PTR(-EAGAIN);
1958 }
1959
1960 tp = tcf_chain_tp_find(chain, &chain_info,
1961 protocol, prio, false);
1962 if (!tp)
1963 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1964 mutex_unlock(&chain->filter_chain_lock);
1965
1966 if (tp) {
1967 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1968 tp_new = tp;
1969 } else if (err) {
1970 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1971 tp_new = ERR_PTR(err);
1972 }
1973
1974 return tp_new;
1975 }
1976
tcf_chain_tp_delete_empty(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)1977 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1978 struct tcf_proto *tp, bool rtnl_held,
1979 struct netlink_ext_ack *extack)
1980 {
1981 struct tcf_chain_info chain_info;
1982 struct tcf_proto *tp_iter;
1983 struct tcf_proto **pprev;
1984 struct tcf_proto *next;
1985
1986 mutex_lock(&chain->filter_chain_lock);
1987
1988 /* Atomically find and remove tp from chain. */
1989 for (pprev = &chain->filter_chain;
1990 (tp_iter = tcf_chain_dereference(*pprev, chain));
1991 pprev = &tp_iter->next) {
1992 if (tp_iter == tp) {
1993 chain_info.pprev = pprev;
1994 chain_info.next = tp_iter->next;
1995 WARN_ON(tp_iter->deleting);
1996 break;
1997 }
1998 }
1999 /* Verify that tp still exists and no new filters were inserted
2000 * concurrently.
2001 * Mark tp for deletion if it is empty.
2002 */
2003 if (!tp_iter || !tcf_proto_check_delete(tp)) {
2004 mutex_unlock(&chain->filter_chain_lock);
2005 return;
2006 }
2007
2008 tcf_proto_signal_destroying(chain, tp);
2009 next = tcf_chain_dereference(chain_info.next, chain);
2010 if (tp == chain->filter_chain)
2011 tcf_chain0_head_change(chain, next);
2012 RCU_INIT_POINTER(*chain_info.pprev, next);
2013 mutex_unlock(&chain->filter_chain_lock);
2014
2015 tcf_proto_put(tp, rtnl_held, extack);
2016 }
2017
tcf_chain_tp_find(struct tcf_chain * chain,struct tcf_chain_info * chain_info,u32 protocol,u32 prio,bool prio_allocate)2018 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2019 struct tcf_chain_info *chain_info,
2020 u32 protocol, u32 prio,
2021 bool prio_allocate)
2022 {
2023 struct tcf_proto **pprev;
2024 struct tcf_proto *tp;
2025
2026 /* Check the chain for existence of proto-tcf with this priority */
2027 for (pprev = &chain->filter_chain;
2028 (tp = tcf_chain_dereference(*pprev, chain));
2029 pprev = &tp->next) {
2030 if (tp->prio >= prio) {
2031 if (tp->prio == prio) {
2032 if (prio_allocate ||
2033 (tp->protocol != protocol && protocol))
2034 return ERR_PTR(-EINVAL);
2035 } else {
2036 tp = NULL;
2037 }
2038 break;
2039 }
2040 }
2041 chain_info->pprev = pprev;
2042 if (tp) {
2043 chain_info->next = tp->next;
2044 tcf_proto_get(tp);
2045 } else {
2046 chain_info->next = NULL;
2047 }
2048 return tp;
2049 }
2050
tcf_fill_node(struct net * net,struct sk_buff * skb,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,u32 portid,u32 seq,u16 flags,int event,bool terse_dump,bool rtnl_held,struct netlink_ext_ack * extack)2051 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2052 struct tcf_proto *tp, struct tcf_block *block,
2053 struct Qdisc *q, u32 parent, void *fh,
2054 u32 portid, u32 seq, u16 flags, int event,
2055 bool terse_dump, bool rtnl_held,
2056 struct netlink_ext_ack *extack)
2057 {
2058 struct tcmsg *tcm;
2059 struct nlmsghdr *nlh;
2060 unsigned char *b = skb_tail_pointer(skb);
2061
2062 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2063 if (!nlh)
2064 goto out_nlmsg_trim;
2065 tcm = nlmsg_data(nlh);
2066 tcm->tcm_family = AF_UNSPEC;
2067 tcm->tcm__pad1 = 0;
2068 tcm->tcm__pad2 = 0;
2069 if (q) {
2070 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2071 tcm->tcm_parent = parent;
2072 } else {
2073 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2074 tcm->tcm_block_index = block->index;
2075 }
2076 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2077 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2078 goto nla_put_failure;
2079 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2080 goto nla_put_failure;
2081 if (!fh) {
2082 tcm->tcm_handle = 0;
2083 } else if (terse_dump) {
2084 if (tp->ops->terse_dump) {
2085 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2086 rtnl_held) < 0)
2087 goto nla_put_failure;
2088 } else {
2089 goto cls_op_not_supp;
2090 }
2091 } else {
2092 if (tp->ops->dump &&
2093 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2094 goto nla_put_failure;
2095 }
2096
2097 if (extack && extack->_msg &&
2098 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2099 goto nla_put_failure;
2100
2101 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2102
2103 return skb->len;
2104
2105 out_nlmsg_trim:
2106 nla_put_failure:
2107 cls_op_not_supp:
2108 nlmsg_trim(skb, b);
2109 return -1;
2110 }
2111
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,bool unicast,bool rtnl_held,struct netlink_ext_ack * extack)2112 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2113 struct nlmsghdr *n, struct tcf_proto *tp,
2114 struct tcf_block *block, struct Qdisc *q,
2115 u32 parent, void *fh, int event, bool unicast,
2116 bool rtnl_held, struct netlink_ext_ack *extack)
2117 {
2118 struct sk_buff *skb;
2119 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2120 int err = 0;
2121
2122 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2123 return 0;
2124
2125 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2126 if (!skb)
2127 return -ENOBUFS;
2128
2129 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2130 n->nlmsg_seq, n->nlmsg_flags, event,
2131 false, rtnl_held, extack) <= 0) {
2132 kfree_skb(skb);
2133 return -EINVAL;
2134 }
2135
2136 if (unicast)
2137 err = rtnl_unicast(skb, net, portid);
2138 else
2139 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2140 n->nlmsg_flags & NLM_F_ECHO);
2141 return err;
2142 }
2143
tfilter_del_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2144 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2145 struct nlmsghdr *n, struct tcf_proto *tp,
2146 struct tcf_block *block, struct Qdisc *q,
2147 u32 parent, void *fh, bool *last, bool rtnl_held,
2148 struct netlink_ext_ack *extack)
2149 {
2150 struct sk_buff *skb;
2151 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2152 int err;
2153
2154 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2155 return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2156
2157 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2158 if (!skb)
2159 return -ENOBUFS;
2160
2161 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2162 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2163 false, rtnl_held, extack) <= 0) {
2164 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2165 kfree_skb(skb);
2166 return -EINVAL;
2167 }
2168
2169 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2170 if (err) {
2171 kfree_skb(skb);
2172 return err;
2173 }
2174
2175 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2176 n->nlmsg_flags & NLM_F_ECHO);
2177 if (err < 0)
2178 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2179
2180 return err;
2181 }
2182
tfilter_notify_chain(struct net * net,struct sk_buff * oskb,struct tcf_block * block,struct Qdisc * q,u32 parent,struct nlmsghdr * n,struct tcf_chain * chain,int event,struct netlink_ext_ack * extack)2183 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2184 struct tcf_block *block, struct Qdisc *q,
2185 u32 parent, struct nlmsghdr *n,
2186 struct tcf_chain *chain, int event,
2187 struct netlink_ext_ack *extack)
2188 {
2189 struct tcf_proto *tp;
2190
2191 for (tp = tcf_get_next_proto(chain, NULL);
2192 tp; tp = tcf_get_next_proto(chain, tp))
2193 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2194 event, false, true, extack);
2195 }
2196
tfilter_put(struct tcf_proto * tp,void * fh)2197 static void tfilter_put(struct tcf_proto *tp, void *fh)
2198 {
2199 if (tp->ops->put && fh)
2200 tp->ops->put(tp, fh);
2201 }
2202
is_qdisc_ingress(__u32 classid)2203 static bool is_qdisc_ingress(__u32 classid)
2204 {
2205 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2206 }
2207
tc_new_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2208 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2209 struct netlink_ext_ack *extack)
2210 {
2211 struct net *net = sock_net(skb->sk);
2212 struct nlattr *tca[TCA_MAX + 1];
2213 char name[IFNAMSIZ];
2214 struct tcmsg *t;
2215 u32 protocol;
2216 u32 prio;
2217 bool prio_allocate;
2218 u32 parent;
2219 u32 chain_index;
2220 struct Qdisc *q;
2221 struct tcf_chain_info chain_info;
2222 struct tcf_chain *chain;
2223 struct tcf_block *block;
2224 struct tcf_proto *tp;
2225 unsigned long cl;
2226 void *fh;
2227 int err;
2228 int tp_created;
2229 bool rtnl_held = false;
2230 u32 flags;
2231
2232 replay:
2233 tp_created = 0;
2234
2235 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2236 rtm_tca_policy, extack);
2237 if (err < 0)
2238 return err;
2239
2240 t = nlmsg_data(n);
2241 protocol = TC_H_MIN(t->tcm_info);
2242 prio = TC_H_MAJ(t->tcm_info);
2243 prio_allocate = false;
2244 parent = t->tcm_parent;
2245 tp = NULL;
2246 cl = 0;
2247 block = NULL;
2248 q = NULL;
2249 chain = NULL;
2250 flags = 0;
2251
2252 if (prio == 0) {
2253 /* If no priority is provided by the user,
2254 * we allocate one.
2255 */
2256 if (n->nlmsg_flags & NLM_F_CREATE) {
2257 prio = TC_H_MAKE(0x80000000U, 0U);
2258 prio_allocate = true;
2259 } else {
2260 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2261 return -ENOENT;
2262 }
2263 }
2264
2265 /* Find head of filter chain. */
2266
2267 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2268 if (err)
2269 return err;
2270
2271 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2272 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2273 err = -EINVAL;
2274 goto errout;
2275 }
2276
2277 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2278 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2279 * type is not specified, classifier is not unlocked.
2280 */
2281 if (rtnl_held ||
2282 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2283 !tcf_proto_is_unlocked(name)) {
2284 rtnl_held = true;
2285 rtnl_lock();
2286 }
2287
2288 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2289 if (err)
2290 goto errout;
2291
2292 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2293 extack);
2294 if (IS_ERR(block)) {
2295 err = PTR_ERR(block);
2296 goto errout;
2297 }
2298 block->classid = parent;
2299
2300 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2301 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2302 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2303 err = -EINVAL;
2304 goto errout;
2305 }
2306 chain = tcf_chain_get(block, chain_index, true);
2307 if (!chain) {
2308 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2309 err = -ENOMEM;
2310 goto errout;
2311 }
2312
2313 mutex_lock(&chain->filter_chain_lock);
2314 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2315 prio, prio_allocate);
2316 if (IS_ERR(tp)) {
2317 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2318 err = PTR_ERR(tp);
2319 goto errout_locked;
2320 }
2321
2322 if (tp == NULL) {
2323 struct tcf_proto *tp_new = NULL;
2324
2325 if (chain->flushing) {
2326 err = -EAGAIN;
2327 goto errout_locked;
2328 }
2329
2330 /* Proto-tcf does not exist, create new one */
2331
2332 if (tca[TCA_KIND] == NULL || !protocol) {
2333 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2334 err = -EINVAL;
2335 goto errout_locked;
2336 }
2337
2338 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2339 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2340 err = -ENOENT;
2341 goto errout_locked;
2342 }
2343
2344 if (prio_allocate)
2345 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2346 &chain_info));
2347
2348 mutex_unlock(&chain->filter_chain_lock);
2349 tp_new = tcf_proto_create(name, protocol, prio, chain,
2350 rtnl_held, extack);
2351 if (IS_ERR(tp_new)) {
2352 err = PTR_ERR(tp_new);
2353 goto errout_tp;
2354 }
2355
2356 tp_created = 1;
2357 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2358 rtnl_held);
2359 if (IS_ERR(tp)) {
2360 err = PTR_ERR(tp);
2361 goto errout_tp;
2362 }
2363 } else {
2364 mutex_unlock(&chain->filter_chain_lock);
2365 }
2366
2367 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2368 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2369 err = -EINVAL;
2370 goto errout;
2371 }
2372
2373 fh = tp->ops->get(tp, t->tcm_handle);
2374
2375 if (!fh) {
2376 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2377 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2378 err = -ENOENT;
2379 goto errout;
2380 }
2381 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2382 tfilter_put(tp, fh);
2383 NL_SET_ERR_MSG(extack, "Filter already exists");
2384 err = -EEXIST;
2385 goto errout;
2386 }
2387
2388 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2389 tfilter_put(tp, fh);
2390 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2391 err = -EINVAL;
2392 goto errout;
2393 }
2394
2395 if (!(n->nlmsg_flags & NLM_F_CREATE))
2396 flags |= TCA_ACT_FLAGS_REPLACE;
2397 if (!rtnl_held)
2398 flags |= TCA_ACT_FLAGS_NO_RTNL;
2399 if (is_qdisc_ingress(parent))
2400 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2401 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2402 flags, extack);
2403 if (err == 0) {
2404 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2405 RTM_NEWTFILTER, false, rtnl_held, extack);
2406 tfilter_put(tp, fh);
2407 tcf_block_filter_cnt_update(block, &tp->counted, true);
2408 /* q pointer is NULL for shared blocks */
2409 if (q)
2410 q->flags &= ~TCQ_F_CAN_BYPASS;
2411 }
2412
2413 errout:
2414 if (err && tp_created)
2415 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2416 errout_tp:
2417 if (chain) {
2418 if (tp && !IS_ERR(tp))
2419 tcf_proto_put(tp, rtnl_held, NULL);
2420 if (!tp_created)
2421 tcf_chain_put(chain);
2422 }
2423 tcf_block_release(q, block, rtnl_held);
2424
2425 if (rtnl_held)
2426 rtnl_unlock();
2427
2428 if (err == -EAGAIN) {
2429 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2430 * of target chain.
2431 */
2432 rtnl_held = true;
2433 /* Replay the request. */
2434 goto replay;
2435 }
2436 return err;
2437
2438 errout_locked:
2439 mutex_unlock(&chain->filter_chain_lock);
2440 goto errout;
2441 }
2442
tc_del_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2443 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2444 struct netlink_ext_ack *extack)
2445 {
2446 struct net *net = sock_net(skb->sk);
2447 struct nlattr *tca[TCA_MAX + 1];
2448 char name[IFNAMSIZ];
2449 struct tcmsg *t;
2450 u32 protocol;
2451 u32 prio;
2452 u32 parent;
2453 u32 chain_index;
2454 struct Qdisc *q = NULL;
2455 struct tcf_chain_info chain_info;
2456 struct tcf_chain *chain = NULL;
2457 struct tcf_block *block = NULL;
2458 struct tcf_proto *tp = NULL;
2459 unsigned long cl = 0;
2460 void *fh = NULL;
2461 int err;
2462 bool rtnl_held = false;
2463
2464 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2465 rtm_tca_policy, extack);
2466 if (err < 0)
2467 return err;
2468
2469 t = nlmsg_data(n);
2470 protocol = TC_H_MIN(t->tcm_info);
2471 prio = TC_H_MAJ(t->tcm_info);
2472 parent = t->tcm_parent;
2473
2474 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2475 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2476 return -ENOENT;
2477 }
2478
2479 /* Find head of filter chain. */
2480
2481 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2482 if (err)
2483 return err;
2484
2485 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2486 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2487 err = -EINVAL;
2488 goto errout;
2489 }
2490 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2491 * found), qdisc is not unlocked, classifier type is not specified,
2492 * classifier is not unlocked.
2493 */
2494 if (!prio ||
2495 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2496 !tcf_proto_is_unlocked(name)) {
2497 rtnl_held = true;
2498 rtnl_lock();
2499 }
2500
2501 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2502 if (err)
2503 goto errout;
2504
2505 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2506 extack);
2507 if (IS_ERR(block)) {
2508 err = PTR_ERR(block);
2509 goto errout;
2510 }
2511
2512 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2513 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2514 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2515 err = -EINVAL;
2516 goto errout;
2517 }
2518 chain = tcf_chain_get(block, chain_index, false);
2519 if (!chain) {
2520 /* User requested flush on non-existent chain. Nothing to do,
2521 * so just return success.
2522 */
2523 if (prio == 0) {
2524 err = 0;
2525 goto errout;
2526 }
2527 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2528 err = -ENOENT;
2529 goto errout;
2530 }
2531
2532 if (prio == 0) {
2533 tfilter_notify_chain(net, skb, block, q, parent, n,
2534 chain, RTM_DELTFILTER, extack);
2535 tcf_chain_flush(chain, rtnl_held);
2536 err = 0;
2537 goto errout;
2538 }
2539
2540 mutex_lock(&chain->filter_chain_lock);
2541 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2542 prio, false);
2543 if (!tp || IS_ERR(tp)) {
2544 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2545 err = tp ? PTR_ERR(tp) : -ENOENT;
2546 goto errout_locked;
2547 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2548 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2549 err = -EINVAL;
2550 goto errout_locked;
2551 } else if (t->tcm_handle == 0) {
2552 tcf_proto_signal_destroying(chain, tp);
2553 tcf_chain_tp_remove(chain, &chain_info, tp);
2554 mutex_unlock(&chain->filter_chain_lock);
2555
2556 tcf_proto_put(tp, rtnl_held, NULL);
2557 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2558 RTM_DELTFILTER, false, rtnl_held, extack);
2559 err = 0;
2560 goto errout;
2561 }
2562 mutex_unlock(&chain->filter_chain_lock);
2563
2564 fh = tp->ops->get(tp, t->tcm_handle);
2565
2566 if (!fh) {
2567 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2568 err = -ENOENT;
2569 } else {
2570 bool last;
2571
2572 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2573 &last, rtnl_held, extack);
2574
2575 if (err)
2576 goto errout;
2577 if (last)
2578 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2579 }
2580
2581 errout:
2582 if (chain) {
2583 if (tp && !IS_ERR(tp))
2584 tcf_proto_put(tp, rtnl_held, NULL);
2585 tcf_chain_put(chain);
2586 }
2587 tcf_block_release(q, block, rtnl_held);
2588
2589 if (rtnl_held)
2590 rtnl_unlock();
2591
2592 return err;
2593
2594 errout_locked:
2595 mutex_unlock(&chain->filter_chain_lock);
2596 goto errout;
2597 }
2598
tc_get_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2599 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2600 struct netlink_ext_ack *extack)
2601 {
2602 struct net *net = sock_net(skb->sk);
2603 struct nlattr *tca[TCA_MAX + 1];
2604 char name[IFNAMSIZ];
2605 struct tcmsg *t;
2606 u32 protocol;
2607 u32 prio;
2608 u32 parent;
2609 u32 chain_index;
2610 struct Qdisc *q = NULL;
2611 struct tcf_chain_info chain_info;
2612 struct tcf_chain *chain = NULL;
2613 struct tcf_block *block = NULL;
2614 struct tcf_proto *tp = NULL;
2615 unsigned long cl = 0;
2616 void *fh = NULL;
2617 int err;
2618 bool rtnl_held = false;
2619
2620 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2621 rtm_tca_policy, extack);
2622 if (err < 0)
2623 return err;
2624
2625 t = nlmsg_data(n);
2626 protocol = TC_H_MIN(t->tcm_info);
2627 prio = TC_H_MAJ(t->tcm_info);
2628 parent = t->tcm_parent;
2629
2630 if (prio == 0) {
2631 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2632 return -ENOENT;
2633 }
2634
2635 /* Find head of filter chain. */
2636
2637 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2638 if (err)
2639 return err;
2640
2641 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2642 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2643 err = -EINVAL;
2644 goto errout;
2645 }
2646 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2647 * unlocked, classifier type is not specified, classifier is not
2648 * unlocked.
2649 */
2650 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2651 !tcf_proto_is_unlocked(name)) {
2652 rtnl_held = true;
2653 rtnl_lock();
2654 }
2655
2656 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2657 if (err)
2658 goto errout;
2659
2660 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2661 extack);
2662 if (IS_ERR(block)) {
2663 err = PTR_ERR(block);
2664 goto errout;
2665 }
2666
2667 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2668 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2669 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2670 err = -EINVAL;
2671 goto errout;
2672 }
2673 chain = tcf_chain_get(block, chain_index, false);
2674 if (!chain) {
2675 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2676 err = -EINVAL;
2677 goto errout;
2678 }
2679
2680 mutex_lock(&chain->filter_chain_lock);
2681 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2682 prio, false);
2683 mutex_unlock(&chain->filter_chain_lock);
2684 if (!tp || IS_ERR(tp)) {
2685 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2686 err = tp ? PTR_ERR(tp) : -ENOENT;
2687 goto errout;
2688 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2689 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2690 err = -EINVAL;
2691 goto errout;
2692 }
2693
2694 fh = tp->ops->get(tp, t->tcm_handle);
2695
2696 if (!fh) {
2697 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2698 err = -ENOENT;
2699 } else {
2700 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2701 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2702 if (err < 0)
2703 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2704 }
2705
2706 tfilter_put(tp, fh);
2707 errout:
2708 if (chain) {
2709 if (tp && !IS_ERR(tp))
2710 tcf_proto_put(tp, rtnl_held, NULL);
2711 tcf_chain_put(chain);
2712 }
2713 tcf_block_release(q, block, rtnl_held);
2714
2715 if (rtnl_held)
2716 rtnl_unlock();
2717
2718 return err;
2719 }
2720
2721 struct tcf_dump_args {
2722 struct tcf_walker w;
2723 struct sk_buff *skb;
2724 struct netlink_callback *cb;
2725 struct tcf_block *block;
2726 struct Qdisc *q;
2727 u32 parent;
2728 bool terse_dump;
2729 };
2730
tcf_node_dump(struct tcf_proto * tp,void * n,struct tcf_walker * arg)2731 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2732 {
2733 struct tcf_dump_args *a = (void *)arg;
2734 struct net *net = sock_net(a->skb->sk);
2735
2736 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2737 n, NETLINK_CB(a->cb->skb).portid,
2738 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2739 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2740 }
2741
tcf_chain_dump(struct tcf_chain * chain,struct Qdisc * q,u32 parent,struct sk_buff * skb,struct netlink_callback * cb,long index_start,long * p_index,bool terse)2742 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2743 struct sk_buff *skb, struct netlink_callback *cb,
2744 long index_start, long *p_index, bool terse)
2745 {
2746 struct net *net = sock_net(skb->sk);
2747 struct tcf_block *block = chain->block;
2748 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2749 struct tcf_proto *tp, *tp_prev;
2750 struct tcf_dump_args arg;
2751
2752 for (tp = __tcf_get_next_proto(chain, NULL);
2753 tp;
2754 tp_prev = tp,
2755 tp = __tcf_get_next_proto(chain, tp),
2756 tcf_proto_put(tp_prev, true, NULL),
2757 (*p_index)++) {
2758 if (*p_index < index_start)
2759 continue;
2760 if (TC_H_MAJ(tcm->tcm_info) &&
2761 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2762 continue;
2763 if (TC_H_MIN(tcm->tcm_info) &&
2764 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2765 continue;
2766 if (*p_index > index_start)
2767 memset(&cb->args[1], 0,
2768 sizeof(cb->args) - sizeof(cb->args[0]));
2769 if (cb->args[1] == 0) {
2770 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2771 NETLINK_CB(cb->skb).portid,
2772 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2773 RTM_NEWTFILTER, false, true, NULL) <= 0)
2774 goto errout;
2775 cb->args[1] = 1;
2776 }
2777 if (!tp->ops->walk)
2778 continue;
2779 arg.w.fn = tcf_node_dump;
2780 arg.skb = skb;
2781 arg.cb = cb;
2782 arg.block = block;
2783 arg.q = q;
2784 arg.parent = parent;
2785 arg.w.stop = 0;
2786 arg.w.skip = cb->args[1] - 1;
2787 arg.w.count = 0;
2788 arg.w.cookie = cb->args[2];
2789 arg.terse_dump = terse;
2790 tp->ops->walk(tp, &arg.w, true);
2791 cb->args[2] = arg.w.cookie;
2792 cb->args[1] = arg.w.count + 1;
2793 if (arg.w.stop)
2794 goto errout;
2795 }
2796 return true;
2797
2798 errout:
2799 tcf_proto_put(tp, true, NULL);
2800 return false;
2801 }
2802
2803 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2804 [TCA_CHAIN] = { .type = NLA_U32 },
2805 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2806 };
2807
2808 /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)2809 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2810 {
2811 struct tcf_chain *chain, *chain_prev;
2812 struct net *net = sock_net(skb->sk);
2813 struct nlattr *tca[TCA_MAX + 1];
2814 struct Qdisc *q = NULL;
2815 struct tcf_block *block;
2816 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2817 bool terse_dump = false;
2818 long index_start;
2819 long index;
2820 u32 parent;
2821 int err;
2822
2823 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2824 return skb->len;
2825
2826 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2827 tcf_tfilter_dump_policy, cb->extack);
2828 if (err)
2829 return err;
2830
2831 if (tca[TCA_DUMP_FLAGS]) {
2832 struct nla_bitfield32 flags =
2833 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2834
2835 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2836 }
2837
2838 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2839 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2840 if (!block)
2841 goto out;
2842 /* If we work with block index, q is NULL and parent value
2843 * will never be used in the following code. The check
2844 * in tcf_fill_node prevents it. However, compiler does not
2845 * see that far, so set parent to zero to silence the warning
2846 * about parent being uninitialized.
2847 */
2848 parent = 0;
2849 } else {
2850 const struct Qdisc_class_ops *cops;
2851 struct net_device *dev;
2852 unsigned long cl = 0;
2853
2854 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2855 if (!dev)
2856 return skb->len;
2857
2858 parent = tcm->tcm_parent;
2859 if (!parent)
2860 q = rtnl_dereference(dev->qdisc);
2861 else
2862 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2863 if (!q)
2864 goto out;
2865 cops = q->ops->cl_ops;
2866 if (!cops)
2867 goto out;
2868 if (!cops->tcf_block)
2869 goto out;
2870 if (TC_H_MIN(tcm->tcm_parent)) {
2871 cl = cops->find(q, tcm->tcm_parent);
2872 if (cl == 0)
2873 goto out;
2874 }
2875 block = cops->tcf_block(q, cl, NULL);
2876 if (!block)
2877 goto out;
2878 parent = block->classid;
2879 if (tcf_block_shared(block))
2880 q = NULL;
2881 }
2882
2883 index_start = cb->args[0];
2884 index = 0;
2885
2886 for (chain = __tcf_get_next_chain(block, NULL);
2887 chain;
2888 chain_prev = chain,
2889 chain = __tcf_get_next_chain(block, chain),
2890 tcf_chain_put(chain_prev)) {
2891 if (tca[TCA_CHAIN] &&
2892 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2893 continue;
2894 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2895 index_start, &index, terse_dump)) {
2896 tcf_chain_put(chain);
2897 err = -EMSGSIZE;
2898 break;
2899 }
2900 }
2901
2902 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2903 tcf_block_refcnt_put(block, true);
2904 cb->args[0] = index;
2905
2906 out:
2907 /* If we did no progress, the error (EMSGSIZE) is real */
2908 if (skb->len == 0 && err)
2909 return err;
2910 return skb->len;
2911 }
2912
tc_chain_fill_node(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct net * net,struct sk_buff * skb,struct tcf_block * block,u32 portid,u32 seq,u16 flags,int event,struct netlink_ext_ack * extack)2913 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2914 void *tmplt_priv, u32 chain_index,
2915 struct net *net, struct sk_buff *skb,
2916 struct tcf_block *block,
2917 u32 portid, u32 seq, u16 flags, int event,
2918 struct netlink_ext_ack *extack)
2919 {
2920 unsigned char *b = skb_tail_pointer(skb);
2921 const struct tcf_proto_ops *ops;
2922 struct nlmsghdr *nlh;
2923 struct tcmsg *tcm;
2924 void *priv;
2925
2926 ops = tmplt_ops;
2927 priv = tmplt_priv;
2928
2929 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2930 if (!nlh)
2931 goto out_nlmsg_trim;
2932 tcm = nlmsg_data(nlh);
2933 tcm->tcm_family = AF_UNSPEC;
2934 tcm->tcm__pad1 = 0;
2935 tcm->tcm__pad2 = 0;
2936 tcm->tcm_handle = 0;
2937 if (block->q) {
2938 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2939 tcm->tcm_parent = block->q->handle;
2940 } else {
2941 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2942 tcm->tcm_block_index = block->index;
2943 }
2944
2945 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2946 goto nla_put_failure;
2947
2948 if (ops) {
2949 if (nla_put_string(skb, TCA_KIND, ops->kind))
2950 goto nla_put_failure;
2951 if (ops->tmplt_dump(skb, net, priv) < 0)
2952 goto nla_put_failure;
2953 }
2954
2955 if (extack && extack->_msg &&
2956 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2957 goto out_nlmsg_trim;
2958
2959 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2960
2961 return skb->len;
2962
2963 out_nlmsg_trim:
2964 nla_put_failure:
2965 nlmsg_trim(skb, b);
2966 return -EMSGSIZE;
2967 }
2968
tc_chain_notify(struct tcf_chain * chain,struct sk_buff * oskb,u32 seq,u16 flags,int event,bool unicast,struct netlink_ext_ack * extack)2969 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2970 u32 seq, u16 flags, int event, bool unicast,
2971 struct netlink_ext_ack *extack)
2972 {
2973 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2974 struct tcf_block *block = chain->block;
2975 struct net *net = block->net;
2976 struct sk_buff *skb;
2977 int err = 0;
2978
2979 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2980 return 0;
2981
2982 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2983 if (!skb)
2984 return -ENOBUFS;
2985
2986 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2987 chain->index, net, skb, block, portid,
2988 seq, flags, event, extack) <= 0) {
2989 kfree_skb(skb);
2990 return -EINVAL;
2991 }
2992
2993 if (unicast)
2994 err = rtnl_unicast(skb, net, portid);
2995 else
2996 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2997 flags & NLM_F_ECHO);
2998
2999 return err;
3000 }
3001
tc_chain_notify_delete(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct tcf_block * block,struct sk_buff * oskb,u32 seq,u16 flags)3002 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3003 void *tmplt_priv, u32 chain_index,
3004 struct tcf_block *block, struct sk_buff *oskb,
3005 u32 seq, u16 flags)
3006 {
3007 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3008 struct net *net = block->net;
3009 struct sk_buff *skb;
3010
3011 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3012 return 0;
3013
3014 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3015 if (!skb)
3016 return -ENOBUFS;
3017
3018 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3019 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3020 kfree_skb(skb);
3021 return -EINVAL;
3022 }
3023
3024 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3025 }
3026
tc_chain_tmplt_add(struct tcf_chain * chain,struct net * net,struct nlattr ** tca,struct netlink_ext_ack * extack)3027 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3028 struct nlattr **tca,
3029 struct netlink_ext_ack *extack)
3030 {
3031 const struct tcf_proto_ops *ops;
3032 char name[IFNAMSIZ];
3033 void *tmplt_priv;
3034
3035 /* If kind is not set, user did not specify template. */
3036 if (!tca[TCA_KIND])
3037 return 0;
3038
3039 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3040 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3041 return -EINVAL;
3042 }
3043
3044 ops = tcf_proto_lookup_ops(name, true, extack);
3045 if (IS_ERR(ops))
3046 return PTR_ERR(ops);
3047 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3048 !ops->tmplt_reoffload) {
3049 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3050 module_put(ops->owner);
3051 return -EOPNOTSUPP;
3052 }
3053
3054 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3055 if (IS_ERR(tmplt_priv)) {
3056 module_put(ops->owner);
3057 return PTR_ERR(tmplt_priv);
3058 }
3059 chain->tmplt_ops = ops;
3060 chain->tmplt_priv = tmplt_priv;
3061 return 0;
3062 }
3063
tc_chain_tmplt_del(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv)3064 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3065 void *tmplt_priv)
3066 {
3067 /* If template ops are set, no work to do for us. */
3068 if (!tmplt_ops)
3069 return;
3070
3071 tmplt_ops->tmplt_destroy(tmplt_priv);
3072 module_put(tmplt_ops->owner);
3073 }
3074
3075 /* Add/delete/get a chain */
3076
tc_ctl_chain(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)3077 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3078 struct netlink_ext_ack *extack)
3079 {
3080 struct net *net = sock_net(skb->sk);
3081 struct nlattr *tca[TCA_MAX + 1];
3082 struct tcmsg *t;
3083 u32 parent;
3084 u32 chain_index;
3085 struct Qdisc *q;
3086 struct tcf_chain *chain;
3087 struct tcf_block *block;
3088 unsigned long cl;
3089 int err;
3090
3091 replay:
3092 q = NULL;
3093 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3094 rtm_tca_policy, extack);
3095 if (err < 0)
3096 return err;
3097
3098 t = nlmsg_data(n);
3099 parent = t->tcm_parent;
3100 cl = 0;
3101
3102 block = tcf_block_find(net, &q, &parent, &cl,
3103 t->tcm_ifindex, t->tcm_block_index, extack);
3104 if (IS_ERR(block))
3105 return PTR_ERR(block);
3106
3107 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3108 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3109 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3110 err = -EINVAL;
3111 goto errout_block;
3112 }
3113
3114 mutex_lock(&block->lock);
3115 chain = tcf_chain_lookup(block, chain_index);
3116 if (n->nlmsg_type == RTM_NEWCHAIN) {
3117 if (chain) {
3118 if (tcf_chain_held_by_acts_only(chain)) {
3119 /* The chain exists only because there is
3120 * some action referencing it.
3121 */
3122 tcf_chain_hold(chain);
3123 } else {
3124 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3125 err = -EEXIST;
3126 goto errout_block_locked;
3127 }
3128 } else {
3129 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3130 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3131 err = -ENOENT;
3132 goto errout_block_locked;
3133 }
3134 chain = tcf_chain_create(block, chain_index);
3135 if (!chain) {
3136 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3137 err = -ENOMEM;
3138 goto errout_block_locked;
3139 }
3140 }
3141 } else {
3142 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3143 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3144 err = -EINVAL;
3145 goto errout_block_locked;
3146 }
3147 tcf_chain_hold(chain);
3148 }
3149
3150 if (n->nlmsg_type == RTM_NEWCHAIN) {
3151 /* Modifying chain requires holding parent block lock. In case
3152 * the chain was successfully added, take a reference to the
3153 * chain. This ensures that an empty chain does not disappear at
3154 * the end of this function.
3155 */
3156 tcf_chain_hold(chain);
3157 chain->explicitly_created = true;
3158 }
3159 mutex_unlock(&block->lock);
3160
3161 switch (n->nlmsg_type) {
3162 case RTM_NEWCHAIN:
3163 err = tc_chain_tmplt_add(chain, net, tca, extack);
3164 if (err) {
3165 tcf_chain_put_explicitly_created(chain);
3166 goto errout;
3167 }
3168
3169 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3170 RTM_NEWCHAIN, false, extack);
3171 break;
3172 case RTM_DELCHAIN:
3173 tfilter_notify_chain(net, skb, block, q, parent, n,
3174 chain, RTM_DELTFILTER, extack);
3175 /* Flush the chain first as the user requested chain removal. */
3176 tcf_chain_flush(chain, true);
3177 /* In case the chain was successfully deleted, put a reference
3178 * to the chain previously taken during addition.
3179 */
3180 tcf_chain_put_explicitly_created(chain);
3181 break;
3182 case RTM_GETCHAIN:
3183 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3184 n->nlmsg_flags, n->nlmsg_type, true, extack);
3185 if (err < 0)
3186 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3187 break;
3188 default:
3189 err = -EOPNOTSUPP;
3190 NL_SET_ERR_MSG(extack, "Unsupported message type");
3191 goto errout;
3192 }
3193
3194 errout:
3195 tcf_chain_put(chain);
3196 errout_block:
3197 tcf_block_release(q, block, true);
3198 if (err == -EAGAIN)
3199 /* Replay the request. */
3200 goto replay;
3201 return err;
3202
3203 errout_block_locked:
3204 mutex_unlock(&block->lock);
3205 goto errout_block;
3206 }
3207
3208 /* called with RTNL */
tc_dump_chain(struct sk_buff * skb,struct netlink_callback * cb)3209 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3210 {
3211 struct net *net = sock_net(skb->sk);
3212 struct nlattr *tca[TCA_MAX + 1];
3213 struct Qdisc *q = NULL;
3214 struct tcf_block *block;
3215 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3216 struct tcf_chain *chain;
3217 long index_start;
3218 long index;
3219 int err;
3220
3221 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3222 return skb->len;
3223
3224 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3225 rtm_tca_policy, cb->extack);
3226 if (err)
3227 return err;
3228
3229 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3230 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3231 if (!block)
3232 goto out;
3233 } else {
3234 const struct Qdisc_class_ops *cops;
3235 struct net_device *dev;
3236 unsigned long cl = 0;
3237
3238 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3239 if (!dev)
3240 return skb->len;
3241
3242 if (!tcm->tcm_parent)
3243 q = rtnl_dereference(dev->qdisc);
3244 else
3245 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3246
3247 if (!q)
3248 goto out;
3249 cops = q->ops->cl_ops;
3250 if (!cops)
3251 goto out;
3252 if (!cops->tcf_block)
3253 goto out;
3254 if (TC_H_MIN(tcm->tcm_parent)) {
3255 cl = cops->find(q, tcm->tcm_parent);
3256 if (cl == 0)
3257 goto out;
3258 }
3259 block = cops->tcf_block(q, cl, NULL);
3260 if (!block)
3261 goto out;
3262 if (tcf_block_shared(block))
3263 q = NULL;
3264 }
3265
3266 index_start = cb->args[0];
3267 index = 0;
3268
3269 mutex_lock(&block->lock);
3270 list_for_each_entry(chain, &block->chain_list, list) {
3271 if ((tca[TCA_CHAIN] &&
3272 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3273 continue;
3274 if (index < index_start) {
3275 index++;
3276 continue;
3277 }
3278 if (tcf_chain_held_by_acts_only(chain))
3279 continue;
3280 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3281 chain->index, net, skb, block,
3282 NETLINK_CB(cb->skb).portid,
3283 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3284 RTM_NEWCHAIN, NULL);
3285 if (err <= 0)
3286 break;
3287 index++;
3288 }
3289 mutex_unlock(&block->lock);
3290
3291 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3292 tcf_block_refcnt_put(block, true);
3293 cb->args[0] = index;
3294
3295 out:
3296 /* If we did no progress, the error (EMSGSIZE) is real */
3297 if (skb->len == 0 && err)
3298 return err;
3299 return skb->len;
3300 }
3301
tcf_exts_init_ex(struct tcf_exts * exts,struct net * net,int action,int police,struct tcf_proto * tp,u32 handle,bool use_action_miss)3302 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3303 int police, struct tcf_proto *tp, u32 handle,
3304 bool use_action_miss)
3305 {
3306 int err = 0;
3307
3308 #ifdef CONFIG_NET_CLS_ACT
3309 exts->type = 0;
3310 exts->nr_actions = 0;
3311 exts->miss_cookie_node = NULL;
3312 /* Note: we do not own yet a reference on net.
3313 * This reference might be taken later from tcf_exts_get_net().
3314 */
3315 exts->net = net;
3316 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3317 GFP_KERNEL);
3318 if (!exts->actions)
3319 return -ENOMEM;
3320 #endif
3321
3322 exts->action = action;
3323 exts->police = police;
3324
3325 if (!use_action_miss)
3326 return 0;
3327
3328 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3329 if (err)
3330 goto err_miss_alloc;
3331
3332 return 0;
3333
3334 err_miss_alloc:
3335 tcf_exts_destroy(exts);
3336 #ifdef CONFIG_NET_CLS_ACT
3337 exts->actions = NULL;
3338 #endif
3339 return err;
3340 }
3341 EXPORT_SYMBOL(tcf_exts_init_ex);
3342
tcf_exts_destroy(struct tcf_exts * exts)3343 void tcf_exts_destroy(struct tcf_exts *exts)
3344 {
3345 tcf_exts_miss_cookie_base_destroy(exts);
3346
3347 #ifdef CONFIG_NET_CLS_ACT
3348 if (exts->actions) {
3349 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3350 kfree(exts->actions);
3351 }
3352 exts->nr_actions = 0;
3353 #endif
3354 }
3355 EXPORT_SYMBOL(tcf_exts_destroy);
3356
tcf_exts_validate_ex(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,u32 fl_flags,struct netlink_ext_ack * extack)3357 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3358 struct nlattr *rate_tlv, struct tcf_exts *exts,
3359 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3360 {
3361 #ifdef CONFIG_NET_CLS_ACT
3362 {
3363 int init_res[TCA_ACT_MAX_PRIO] = {};
3364 struct tc_action *act;
3365 size_t attr_size = 0;
3366
3367 if (exts->police && tb[exts->police]) {
3368 struct tc_action_ops *a_o;
3369
3370 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3371 a_o = tc_action_load_ops(tb[exts->police], flags,
3372 extack);
3373 if (IS_ERR(a_o))
3374 return PTR_ERR(a_o);
3375 act = tcf_action_init_1(net, tp, tb[exts->police],
3376 rate_tlv, a_o, init_res, flags,
3377 extack);
3378 module_put(a_o->owner);
3379 if (IS_ERR(act))
3380 return PTR_ERR(act);
3381
3382 act->type = exts->type = TCA_OLD_COMPAT;
3383 exts->actions[0] = act;
3384 exts->nr_actions = 1;
3385 tcf_idr_insert_many(exts->actions, init_res);
3386 } else if (exts->action && tb[exts->action]) {
3387 int err;
3388
3389 flags |= TCA_ACT_FLAGS_BIND;
3390 err = tcf_action_init(net, tp, tb[exts->action],
3391 rate_tlv, exts->actions, init_res,
3392 &attr_size, flags, fl_flags,
3393 extack);
3394 if (err < 0)
3395 return err;
3396 exts->nr_actions = err;
3397 }
3398 }
3399 #else
3400 if ((exts->action && tb[exts->action]) ||
3401 (exts->police && tb[exts->police])) {
3402 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3403 return -EOPNOTSUPP;
3404 }
3405 #endif
3406
3407 return 0;
3408 }
3409 EXPORT_SYMBOL(tcf_exts_validate_ex);
3410
tcf_exts_validate(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,struct netlink_ext_ack * extack)3411 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3412 struct nlattr *rate_tlv, struct tcf_exts *exts,
3413 u32 flags, struct netlink_ext_ack *extack)
3414 {
3415 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3416 flags, 0, extack);
3417 }
3418 EXPORT_SYMBOL(tcf_exts_validate);
3419
tcf_exts_change(struct tcf_exts * dst,struct tcf_exts * src)3420 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3421 {
3422 #ifdef CONFIG_NET_CLS_ACT
3423 struct tcf_exts old = *dst;
3424
3425 *dst = *src;
3426 tcf_exts_destroy(&old);
3427 #endif
3428 }
3429 EXPORT_SYMBOL(tcf_exts_change);
3430
3431 #ifdef CONFIG_NET_CLS_ACT
tcf_exts_first_act(struct tcf_exts * exts)3432 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3433 {
3434 if (exts->nr_actions == 0)
3435 return NULL;
3436 else
3437 return exts->actions[0];
3438 }
3439 #endif
3440
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts)3441 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3442 {
3443 #ifdef CONFIG_NET_CLS_ACT
3444 struct nlattr *nest;
3445
3446 if (exts->action && tcf_exts_has_actions(exts)) {
3447 /*
3448 * again for backward compatible mode - we want
3449 * to work with both old and new modes of entering
3450 * tc data even if iproute2 was newer - jhs
3451 */
3452 if (exts->type != TCA_OLD_COMPAT) {
3453 nest = nla_nest_start_noflag(skb, exts->action);
3454 if (nest == NULL)
3455 goto nla_put_failure;
3456
3457 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3458 < 0)
3459 goto nla_put_failure;
3460 nla_nest_end(skb, nest);
3461 } else if (exts->police) {
3462 struct tc_action *act = tcf_exts_first_act(exts);
3463 nest = nla_nest_start_noflag(skb, exts->police);
3464 if (nest == NULL || !act)
3465 goto nla_put_failure;
3466 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3467 goto nla_put_failure;
3468 nla_nest_end(skb, nest);
3469 }
3470 }
3471 return 0;
3472
3473 nla_put_failure:
3474 nla_nest_cancel(skb, nest);
3475 return -1;
3476 #else
3477 return 0;
3478 #endif
3479 }
3480 EXPORT_SYMBOL(tcf_exts_dump);
3481
tcf_exts_terse_dump(struct sk_buff * skb,struct tcf_exts * exts)3482 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3483 {
3484 #ifdef CONFIG_NET_CLS_ACT
3485 struct nlattr *nest;
3486
3487 if (!exts->action || !tcf_exts_has_actions(exts))
3488 return 0;
3489
3490 nest = nla_nest_start_noflag(skb, exts->action);
3491 if (!nest)
3492 goto nla_put_failure;
3493
3494 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3495 goto nla_put_failure;
3496 nla_nest_end(skb, nest);
3497 return 0;
3498
3499 nla_put_failure:
3500 nla_nest_cancel(skb, nest);
3501 return -1;
3502 #else
3503 return 0;
3504 #endif
3505 }
3506 EXPORT_SYMBOL(tcf_exts_terse_dump);
3507
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts)3508 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3509 {
3510 #ifdef CONFIG_NET_CLS_ACT
3511 struct tc_action *a = tcf_exts_first_act(exts);
3512 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3513 return -1;
3514 #endif
3515 return 0;
3516 }
3517 EXPORT_SYMBOL(tcf_exts_dump_stats);
3518
tcf_block_offload_inc(struct tcf_block * block,u32 * flags)3519 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3520 {
3521 if (*flags & TCA_CLS_FLAGS_IN_HW)
3522 return;
3523 *flags |= TCA_CLS_FLAGS_IN_HW;
3524 if (tc_skip_sw(*flags))
3525 atomic_inc(&block->skipswcnt);
3526 atomic_inc(&block->offloadcnt);
3527 }
3528
tcf_block_offload_dec(struct tcf_block * block,u32 * flags)3529 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3530 {
3531 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3532 return;
3533 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3534 if (tc_skip_sw(*flags))
3535 atomic_dec(&block->skipswcnt);
3536 atomic_dec(&block->offloadcnt);
3537 }
3538
tc_cls_offload_cnt_update(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags,u32 diff,bool add)3539 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3540 struct tcf_proto *tp, u32 *cnt,
3541 u32 *flags, u32 diff, bool add)
3542 {
3543 lockdep_assert_held(&block->cb_lock);
3544
3545 spin_lock(&tp->lock);
3546 if (add) {
3547 if (!*cnt)
3548 tcf_block_offload_inc(block, flags);
3549 *cnt += diff;
3550 } else {
3551 *cnt -= diff;
3552 if (!*cnt)
3553 tcf_block_offload_dec(block, flags);
3554 }
3555 spin_unlock(&tp->lock);
3556 }
3557
3558 static void
tc_cls_offload_cnt_reset(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags)3559 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3560 u32 *cnt, u32 *flags)
3561 {
3562 lockdep_assert_held(&block->cb_lock);
3563
3564 spin_lock(&tp->lock);
3565 tcf_block_offload_dec(block, flags);
3566 *cnt = 0;
3567 spin_unlock(&tp->lock);
3568 }
3569
3570 static int
__tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop)3571 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3572 void *type_data, bool err_stop)
3573 {
3574 struct flow_block_cb *block_cb;
3575 int ok_count = 0;
3576 int err;
3577
3578 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3579 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3580 if (err) {
3581 if (err_stop)
3582 return err;
3583 } else {
3584 ok_count++;
3585 }
3586 }
3587 return ok_count;
3588 }
3589
tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop,bool rtnl_held)3590 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3591 void *type_data, bool err_stop, bool rtnl_held)
3592 {
3593 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3594 int ok_count;
3595
3596 retry:
3597 if (take_rtnl)
3598 rtnl_lock();
3599 down_read(&block->cb_lock);
3600 /* Need to obtain rtnl lock if block is bound to devs that require it.
3601 * In block bind code cb_lock is obtained while holding rtnl, so we must
3602 * obtain the locks in same order here.
3603 */
3604 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3605 up_read(&block->cb_lock);
3606 take_rtnl = true;
3607 goto retry;
3608 }
3609
3610 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3611
3612 up_read(&block->cb_lock);
3613 if (take_rtnl)
3614 rtnl_unlock();
3615 return ok_count;
3616 }
3617 EXPORT_SYMBOL(tc_setup_cb_call);
3618
3619 /* Non-destructive filter add. If filter that wasn't already in hardware is
3620 * successfully offloaded, increment block offloads counter. On failure,
3621 * previously offloaded filter is considered to be intact and offloads counter
3622 * is not decremented.
3623 */
3624
tc_setup_cb_add(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3625 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3626 enum tc_setup_type type, void *type_data, bool err_stop,
3627 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3628 {
3629 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3630 int ok_count;
3631
3632 retry:
3633 if (take_rtnl)
3634 rtnl_lock();
3635 down_read(&block->cb_lock);
3636 /* Need to obtain rtnl lock if block is bound to devs that require it.
3637 * In block bind code cb_lock is obtained while holding rtnl, so we must
3638 * obtain the locks in same order here.
3639 */
3640 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3641 up_read(&block->cb_lock);
3642 take_rtnl = true;
3643 goto retry;
3644 }
3645
3646 /* Make sure all netdevs sharing this block are offload-capable. */
3647 if (block->nooffloaddevcnt && err_stop) {
3648 ok_count = -EOPNOTSUPP;
3649 goto err_unlock;
3650 }
3651
3652 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3653 if (ok_count < 0)
3654 goto err_unlock;
3655
3656 if (tp->ops->hw_add)
3657 tp->ops->hw_add(tp, type_data);
3658 if (ok_count > 0)
3659 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3660 ok_count, true);
3661 err_unlock:
3662 up_read(&block->cb_lock);
3663 if (take_rtnl)
3664 rtnl_unlock();
3665 return min(ok_count, 0);
3666 }
3667 EXPORT_SYMBOL(tc_setup_cb_add);
3668
3669 /* Destructive filter replace. If filter that wasn't already in hardware is
3670 * successfully offloaded, increment block offload counter. On failure,
3671 * previously offloaded filter is considered to be destroyed and offload counter
3672 * is decremented.
3673 */
3674
tc_setup_cb_replace(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * old_flags,unsigned int * old_in_hw_count,u32 * new_flags,unsigned int * new_in_hw_count,bool rtnl_held)3675 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3676 enum tc_setup_type type, void *type_data, bool err_stop,
3677 u32 *old_flags, unsigned int *old_in_hw_count,
3678 u32 *new_flags, unsigned int *new_in_hw_count,
3679 bool rtnl_held)
3680 {
3681 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3682 int ok_count;
3683
3684 retry:
3685 if (take_rtnl)
3686 rtnl_lock();
3687 down_read(&block->cb_lock);
3688 /* Need to obtain rtnl lock if block is bound to devs that require it.
3689 * In block bind code cb_lock is obtained while holding rtnl, so we must
3690 * obtain the locks in same order here.
3691 */
3692 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3693 up_read(&block->cb_lock);
3694 take_rtnl = true;
3695 goto retry;
3696 }
3697
3698 /* Make sure all netdevs sharing this block are offload-capable. */
3699 if (block->nooffloaddevcnt && err_stop) {
3700 ok_count = -EOPNOTSUPP;
3701 goto err_unlock;
3702 }
3703
3704 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3705 if (tp->ops->hw_del)
3706 tp->ops->hw_del(tp, type_data);
3707
3708 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3709 if (ok_count < 0)
3710 goto err_unlock;
3711
3712 if (tp->ops->hw_add)
3713 tp->ops->hw_add(tp, type_data);
3714 if (ok_count > 0)
3715 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3716 new_flags, ok_count, true);
3717 err_unlock:
3718 up_read(&block->cb_lock);
3719 if (take_rtnl)
3720 rtnl_unlock();
3721 return min(ok_count, 0);
3722 }
3723 EXPORT_SYMBOL(tc_setup_cb_replace);
3724
3725 /* Destroy filter and decrement block offload counter, if filter was previously
3726 * offloaded.
3727 */
3728
tc_setup_cb_destroy(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3729 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3730 enum tc_setup_type type, void *type_data, bool err_stop,
3731 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3732 {
3733 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3734 int ok_count;
3735
3736 retry:
3737 if (take_rtnl)
3738 rtnl_lock();
3739 down_read(&block->cb_lock);
3740 /* Need to obtain rtnl lock if block is bound to devs that require it.
3741 * In block bind code cb_lock is obtained while holding rtnl, so we must
3742 * obtain the locks in same order here.
3743 */
3744 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3745 up_read(&block->cb_lock);
3746 take_rtnl = true;
3747 goto retry;
3748 }
3749
3750 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3751
3752 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3753 if (tp->ops->hw_del)
3754 tp->ops->hw_del(tp, type_data);
3755
3756 up_read(&block->cb_lock);
3757 if (take_rtnl)
3758 rtnl_unlock();
3759 return min(ok_count, 0);
3760 }
3761 EXPORT_SYMBOL(tc_setup_cb_destroy);
3762
tc_setup_cb_reoffload(struct tcf_block * block,struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,enum tc_setup_type type,void * type_data,void * cb_priv,u32 * flags,unsigned int * in_hw_count)3763 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3764 bool add, flow_setup_cb_t *cb,
3765 enum tc_setup_type type, void *type_data,
3766 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3767 {
3768 int err = cb(type, type_data, cb_priv);
3769
3770 if (err) {
3771 if (add && tc_skip_sw(*flags))
3772 return err;
3773 } else {
3774 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3775 add);
3776 }
3777
3778 return 0;
3779 }
3780 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3781
tcf_act_get_user_cookie(struct flow_action_entry * entry,const struct tc_action * act)3782 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3783 const struct tc_action *act)
3784 {
3785 struct tc_cookie *user_cookie;
3786 int err = 0;
3787
3788 rcu_read_lock();
3789 user_cookie = rcu_dereference(act->user_cookie);
3790 if (user_cookie) {
3791 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3792 user_cookie->len,
3793 GFP_ATOMIC);
3794 if (!entry->user_cookie)
3795 err = -ENOMEM;
3796 }
3797 rcu_read_unlock();
3798 return err;
3799 }
3800
tcf_act_put_user_cookie(struct flow_action_entry * entry)3801 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3802 {
3803 flow_action_cookie_destroy(entry->user_cookie);
3804 }
3805
tc_cleanup_offload_action(struct flow_action * flow_action)3806 void tc_cleanup_offload_action(struct flow_action *flow_action)
3807 {
3808 struct flow_action_entry *entry;
3809 int i;
3810
3811 flow_action_for_each(i, entry, flow_action) {
3812 tcf_act_put_user_cookie(entry);
3813 if (entry->destructor)
3814 entry->destructor(entry->destructor_priv);
3815 }
3816 }
3817 EXPORT_SYMBOL(tc_cleanup_offload_action);
3818
tc_setup_offload_act(struct tc_action * act,struct flow_action_entry * entry,u32 * index_inc,struct netlink_ext_ack * extack)3819 static int tc_setup_offload_act(struct tc_action *act,
3820 struct flow_action_entry *entry,
3821 u32 *index_inc,
3822 struct netlink_ext_ack *extack)
3823 {
3824 #ifdef CONFIG_NET_CLS_ACT
3825 if (act->ops->offload_act_setup) {
3826 return act->ops->offload_act_setup(act, entry, index_inc, true,
3827 extack);
3828 } else {
3829 NL_SET_ERR_MSG(extack, "Action does not support offload");
3830 return -EOPNOTSUPP;
3831 }
3832 #else
3833 return 0;
3834 #endif
3835 }
3836
tc_setup_action(struct flow_action * flow_action,struct tc_action * actions[],u32 miss_cookie_base,struct netlink_ext_ack * extack)3837 int tc_setup_action(struct flow_action *flow_action,
3838 struct tc_action *actions[],
3839 u32 miss_cookie_base,
3840 struct netlink_ext_ack *extack)
3841 {
3842 int i, j, k, index, err = 0;
3843 struct tc_action *act;
3844
3845 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3846 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3847 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3848
3849 if (!actions)
3850 return 0;
3851
3852 j = 0;
3853 tcf_act_for_each_action(i, act, actions) {
3854 struct flow_action_entry *entry;
3855
3856 entry = &flow_action->entries[j];
3857 spin_lock_bh(&act->tcfa_lock);
3858 err = tcf_act_get_user_cookie(entry, act);
3859 if (err)
3860 goto err_out_locked;
3861
3862 index = 0;
3863 err = tc_setup_offload_act(act, entry, &index, extack);
3864 if (err)
3865 goto err_out_locked;
3866
3867 for (k = 0; k < index ; k++) {
3868 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3869 entry[k].hw_index = act->tcfa_index;
3870 entry[k].cookie = (unsigned long)act;
3871 entry[k].miss_cookie =
3872 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3873 }
3874
3875 j += index;
3876
3877 spin_unlock_bh(&act->tcfa_lock);
3878 }
3879
3880 err_out:
3881 if (err)
3882 tc_cleanup_offload_action(flow_action);
3883
3884 return err;
3885 err_out_locked:
3886 spin_unlock_bh(&act->tcfa_lock);
3887 goto err_out;
3888 }
3889
tc_setup_offload_action(struct flow_action * flow_action,const struct tcf_exts * exts,struct netlink_ext_ack * extack)3890 int tc_setup_offload_action(struct flow_action *flow_action,
3891 const struct tcf_exts *exts,
3892 struct netlink_ext_ack *extack)
3893 {
3894 #ifdef CONFIG_NET_CLS_ACT
3895 u32 miss_cookie_base;
3896
3897 if (!exts)
3898 return 0;
3899
3900 miss_cookie_base = exts->miss_cookie_node ?
3901 exts->miss_cookie_node->miss_cookie_base : 0;
3902 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3903 extack);
3904 #else
3905 return 0;
3906 #endif
3907 }
3908 EXPORT_SYMBOL(tc_setup_offload_action);
3909
tcf_exts_num_actions(struct tcf_exts * exts)3910 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3911 {
3912 unsigned int num_acts = 0;
3913 struct tc_action *act;
3914 int i;
3915
3916 tcf_exts_for_each_action(i, act, exts) {
3917 if (is_tcf_pedit(act))
3918 num_acts += tcf_pedit_nkeys(act);
3919 else
3920 num_acts++;
3921 }
3922 return num_acts;
3923 }
3924 EXPORT_SYMBOL(tcf_exts_num_actions);
3925
3926 #ifdef CONFIG_NET_CLS_ACT
tcf_qevent_parse_block_index(struct nlattr * block_index_attr,u32 * p_block_index,struct netlink_ext_ack * extack)3927 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3928 u32 *p_block_index,
3929 struct netlink_ext_ack *extack)
3930 {
3931 *p_block_index = nla_get_u32(block_index_attr);
3932 if (!*p_block_index) {
3933 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3934 return -EINVAL;
3935 }
3936
3937 return 0;
3938 }
3939
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3940 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3941 enum flow_block_binder_type binder_type,
3942 struct nlattr *block_index_attr,
3943 struct netlink_ext_ack *extack)
3944 {
3945 u32 block_index;
3946 int err;
3947
3948 if (!block_index_attr)
3949 return 0;
3950
3951 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3952 if (err)
3953 return err;
3954
3955 qe->info.binder_type = binder_type;
3956 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3957 qe->info.chain_head_change_priv = &qe->filter_chain;
3958 qe->info.block_index = block_index;
3959
3960 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3961 }
3962 EXPORT_SYMBOL(tcf_qevent_init);
3963
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)3964 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3965 {
3966 if (qe->info.block_index)
3967 tcf_block_put_ext(qe->block, sch, &qe->info);
3968 }
3969 EXPORT_SYMBOL(tcf_qevent_destroy);
3970
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3971 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3972 struct netlink_ext_ack *extack)
3973 {
3974 u32 block_index;
3975 int err;
3976
3977 if (!block_index_attr)
3978 return 0;
3979
3980 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3981 if (err)
3982 return err;
3983
3984 /* Bounce newly-configured block or change in block. */
3985 if (block_index != qe->info.block_index) {
3986 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3987 return -EINVAL;
3988 }
3989
3990 return 0;
3991 }
3992 EXPORT_SYMBOL(tcf_qevent_validate_change);
3993
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)3994 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3995 struct sk_buff **to_free, int *ret)
3996 {
3997 struct tcf_result cl_res;
3998 struct tcf_proto *fl;
3999
4000 if (!qe->info.block_index)
4001 return skb;
4002
4003 fl = rcu_dereference_bh(qe->filter_chain);
4004
4005 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4006 case TC_ACT_SHOT:
4007 qdisc_qstats_drop(sch);
4008 __qdisc_drop(skb, to_free);
4009 *ret = __NET_XMIT_BYPASS;
4010 return NULL;
4011 case TC_ACT_STOLEN:
4012 case TC_ACT_QUEUED:
4013 case TC_ACT_TRAP:
4014 __qdisc_drop(skb, to_free);
4015 *ret = __NET_XMIT_STOLEN;
4016 return NULL;
4017 case TC_ACT_REDIRECT:
4018 skb_do_redirect(skb);
4019 *ret = __NET_XMIT_STOLEN;
4020 return NULL;
4021 }
4022
4023 return skb;
4024 }
4025 EXPORT_SYMBOL(tcf_qevent_handle);
4026
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)4027 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4028 {
4029 if (!qe->info.block_index)
4030 return 0;
4031 return nla_put_u32(skb, attr_name, qe->info.block_index);
4032 }
4033 EXPORT_SYMBOL(tcf_qevent_dump);
4034 #endif
4035
tcf_net_init(struct net * net)4036 static __net_init int tcf_net_init(struct net *net)
4037 {
4038 struct tcf_net *tn = net_generic(net, tcf_net_id);
4039
4040 spin_lock_init(&tn->idr_lock);
4041 idr_init(&tn->idr);
4042 return 0;
4043 }
4044
tcf_net_exit(struct net * net)4045 static void __net_exit tcf_net_exit(struct net *net)
4046 {
4047 struct tcf_net *tn = net_generic(net, tcf_net_id);
4048
4049 idr_destroy(&tn->idr);
4050 }
4051
4052 static struct pernet_operations tcf_net_ops = {
4053 .init = tcf_net_init,
4054 .exit = tcf_net_exit,
4055 .id = &tcf_net_id,
4056 .size = sizeof(struct tcf_net),
4057 };
4058
tc_filter_init(void)4059 static int __init tc_filter_init(void)
4060 {
4061 int err;
4062
4063 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4064 if (!tc_filter_wq)
4065 return -ENOMEM;
4066
4067 err = register_pernet_subsys(&tcf_net_ops);
4068 if (err)
4069 goto err_register_pernet_subsys;
4070
4071 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4072
4073 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
4074 RTNL_FLAG_DOIT_UNLOCKED);
4075 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
4076 RTNL_FLAG_DOIT_UNLOCKED);
4077 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
4078 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
4079 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
4080 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
4081 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
4082 tc_dump_chain, 0);
4083
4084 return 0;
4085
4086 err_register_pernet_subsys:
4087 destroy_workqueue(tc_filter_wq);
4088 return err;
4089 }
4090
4091 subsys_initcall(tc_filter_init);
4092