xref: /linux/net/sched/act_api.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_api.c	Packet action API.
4  *
5  * Author:	Jamal Hadi Salim
6  */
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_pedit.h>
23 #include <net/act_api.h>
24 #include <net/netlink.h>
25 #include <net/flow_offload.h>
26 #include <net/tc_wrapper.h>
27 
28 #ifdef CONFIG_INET
29 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
30 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
31 #endif
32 
tcf_dev_queue_xmit(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb))33 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
34 {
35 #ifdef CONFIG_INET
36 	if (static_branch_unlikely(&tcf_frag_xmit_count))
37 		return sch_frag_xmit_hook(skb, xmit);
38 #endif
39 
40 	return xmit(skb);
41 }
42 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
43 
tcf_action_goto_chain_exec(const struct tc_action * a,struct tcf_result * res)44 static void tcf_action_goto_chain_exec(const struct tc_action *a,
45 				       struct tcf_result *res)
46 {
47 	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
48 
49 	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
50 }
51 
tcf_free_cookie_rcu(struct rcu_head * p)52 static void tcf_free_cookie_rcu(struct rcu_head *p)
53 {
54 	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
55 
56 	kfree(cookie->data);
57 	kfree(cookie);
58 }
59 
tcf_set_action_cookie(struct tc_cookie __rcu ** old_cookie,struct tc_cookie * new_cookie)60 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
61 				  struct tc_cookie *new_cookie)
62 {
63 	struct tc_cookie *old;
64 
65 	old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie)));
66 	if (old)
67 		call_rcu(&old->rcu, tcf_free_cookie_rcu);
68 }
69 
tcf_action_check_ctrlact(int action,struct tcf_proto * tp,struct tcf_chain ** newchain,struct netlink_ext_ack * extack)70 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
71 			     struct tcf_chain **newchain,
72 			     struct netlink_ext_ack *extack)
73 {
74 	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
75 	u32 chain_index;
76 
77 	if (!opcode)
78 		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
79 	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
80 		ret = 0;
81 	if (ret) {
82 		NL_SET_ERR_MSG(extack, "invalid control action");
83 		goto end;
84 	}
85 
86 	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
87 		chain_index = action & TC_ACT_EXT_VAL_MASK;
88 		if (!tp || !newchain) {
89 			ret = -EINVAL;
90 			NL_SET_ERR_MSG(extack,
91 				       "can't goto NULL proto/chain");
92 			goto end;
93 		}
94 		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
95 		if (!*newchain) {
96 			ret = -ENOMEM;
97 			NL_SET_ERR_MSG(extack,
98 				       "can't allocate goto_chain");
99 		}
100 	}
101 end:
102 	return ret;
103 }
104 EXPORT_SYMBOL(tcf_action_check_ctrlact);
105 
tcf_action_set_ctrlact(struct tc_action * a,int action,struct tcf_chain * goto_chain)106 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
107 					 struct tcf_chain *goto_chain)
108 {
109 	a->tcfa_action = action;
110 	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
111 	return goto_chain;
112 }
113 EXPORT_SYMBOL(tcf_action_set_ctrlact);
114 
115 /* XXX: For standalone actions, we don't need a RCU grace period either, because
116  * actions are always connected to filters and filters are already destroyed in
117  * RCU callbacks, so after a RCU grace period actions are already disconnected
118  * from filters. Readers later can not find us.
119  */
free_tcf(struct tc_action * p)120 static void free_tcf(struct tc_action *p)
121 {
122 	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
123 
124 	free_percpu(p->cpu_bstats);
125 	free_percpu(p->cpu_bstats_hw);
126 	free_percpu(p->cpu_qstats);
127 
128 	tcf_set_action_cookie(&p->user_cookie, NULL);
129 	if (chain)
130 		tcf_chain_put_by_act(chain);
131 
132 	kfree(p);
133 }
134 
offload_action_hw_count_set(struct tc_action * act,u32 hw_count)135 static void offload_action_hw_count_set(struct tc_action *act,
136 					u32 hw_count)
137 {
138 	act->in_hw_count = hw_count;
139 }
140 
offload_action_hw_count_inc(struct tc_action * act,u32 hw_count)141 static void offload_action_hw_count_inc(struct tc_action *act,
142 					u32 hw_count)
143 {
144 	act->in_hw_count += hw_count;
145 }
146 
offload_action_hw_count_dec(struct tc_action * act,u32 hw_count)147 static void offload_action_hw_count_dec(struct tc_action *act,
148 					u32 hw_count)
149 {
150 	act->in_hw_count = act->in_hw_count > hw_count ?
151 			   act->in_hw_count - hw_count : 0;
152 }
153 
tcf_offload_act_num_actions_single(struct tc_action * act)154 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
155 {
156 	if (is_tcf_pedit(act))
157 		return tcf_pedit_nkeys(act);
158 	else
159 		return 1;
160 }
161 
tc_act_skip_hw(u32 flags)162 static bool tc_act_skip_hw(u32 flags)
163 {
164 	return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
165 }
166 
tc_act_skip_sw(u32 flags)167 static bool tc_act_skip_sw(u32 flags)
168 {
169 	return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
170 }
171 
172 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
tc_act_flags_valid(u32 flags)173 static bool tc_act_flags_valid(u32 flags)
174 {
175 	flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
176 
177 	return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
178 }
179 
offload_action_init(struct flow_offload_action * fl_action,struct tc_action * act,enum offload_act_command cmd,struct netlink_ext_ack * extack)180 static int offload_action_init(struct flow_offload_action *fl_action,
181 			       struct tc_action *act,
182 			       enum offload_act_command  cmd,
183 			       struct netlink_ext_ack *extack)
184 {
185 	int err;
186 
187 	fl_action->extack = extack;
188 	fl_action->command = cmd;
189 	fl_action->index = act->tcfa_index;
190 	fl_action->cookie = (unsigned long)act;
191 
192 	if (act->ops->offload_act_setup) {
193 		spin_lock_bh(&act->tcfa_lock);
194 		err = act->ops->offload_act_setup(act, fl_action, NULL,
195 						  false, extack);
196 		spin_unlock_bh(&act->tcfa_lock);
197 		return err;
198 	}
199 
200 	return -EOPNOTSUPP;
201 }
202 
tcf_action_offload_cmd_ex(struct flow_offload_action * fl_act,u32 * hw_count)203 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
204 				     u32 *hw_count)
205 {
206 	int err;
207 
208 	err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
209 					  fl_act, NULL, NULL);
210 	if (err < 0)
211 		return err;
212 
213 	if (hw_count)
214 		*hw_count = err;
215 
216 	return 0;
217 }
218 
tcf_action_offload_cmd_cb_ex(struct flow_offload_action * fl_act,u32 * hw_count,flow_indr_block_bind_cb_t * cb,void * cb_priv)219 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
220 					u32 *hw_count,
221 					flow_indr_block_bind_cb_t *cb,
222 					void *cb_priv)
223 {
224 	int err;
225 
226 	err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
227 	if (err < 0)
228 		return err;
229 
230 	if (hw_count)
231 		*hw_count = 1;
232 
233 	return 0;
234 }
235 
tcf_action_offload_cmd(struct flow_offload_action * fl_act,u32 * hw_count,flow_indr_block_bind_cb_t * cb,void * cb_priv)236 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
237 				  u32 *hw_count,
238 				  flow_indr_block_bind_cb_t *cb,
239 				  void *cb_priv)
240 {
241 	return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
242 						 cb, cb_priv) :
243 		    tcf_action_offload_cmd_ex(fl_act, hw_count);
244 }
245 
tcf_action_offload_add_ex(struct tc_action * action,struct netlink_ext_ack * extack,flow_indr_block_bind_cb_t * cb,void * cb_priv)246 static int tcf_action_offload_add_ex(struct tc_action *action,
247 				     struct netlink_ext_ack *extack,
248 				     flow_indr_block_bind_cb_t *cb,
249 				     void *cb_priv)
250 {
251 	bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
252 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
253 		[0] = action,
254 	};
255 	struct flow_offload_action *fl_action;
256 	u32 in_hw_count = 0;
257 	int num, err = 0;
258 
259 	if (tc_act_skip_hw(action->tcfa_flags))
260 		return 0;
261 
262 	num = tcf_offload_act_num_actions_single(action);
263 	fl_action = offload_action_alloc(num);
264 	if (!fl_action)
265 		return -ENOMEM;
266 
267 	err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
268 	if (err)
269 		goto fl_err;
270 
271 	err = tc_setup_action(&fl_action->action, actions, 0, extack);
272 	if (err) {
273 		NL_SET_ERR_MSG_MOD(extack,
274 				   "Failed to setup tc actions for offload");
275 		goto fl_err;
276 	}
277 
278 	err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
279 	if (!err)
280 		cb ? offload_action_hw_count_inc(action, in_hw_count) :
281 		     offload_action_hw_count_set(action, in_hw_count);
282 
283 	if (skip_sw && !tc_act_in_hw(action))
284 		err = -EINVAL;
285 
286 	tc_cleanup_offload_action(&fl_action->action);
287 
288 fl_err:
289 	kfree(fl_action);
290 
291 	return err;
292 }
293 
294 /* offload the tc action after it is inserted */
tcf_action_offload_add(struct tc_action * action,struct netlink_ext_ack * extack)295 static int tcf_action_offload_add(struct tc_action *action,
296 				  struct netlink_ext_ack *extack)
297 {
298 	return tcf_action_offload_add_ex(action, extack, NULL, NULL);
299 }
300 
tcf_action_update_hw_stats(struct tc_action * action)301 int tcf_action_update_hw_stats(struct tc_action *action)
302 {
303 	struct flow_offload_action fl_act = {};
304 	int err;
305 
306 	err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
307 	if (err)
308 		return err;
309 
310 	err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
311 	if (!err) {
312 		preempt_disable();
313 		tcf_action_stats_update(action, fl_act.stats.bytes,
314 					fl_act.stats.pkts,
315 					fl_act.stats.drops,
316 					fl_act.stats.lastused,
317 					true);
318 		preempt_enable();
319 		action->used_hw_stats = fl_act.stats.used_hw_stats;
320 		action->used_hw_stats_valid = true;
321 	} else {
322 		return -EOPNOTSUPP;
323 	}
324 
325 	return 0;
326 }
327 EXPORT_SYMBOL(tcf_action_update_hw_stats);
328 
tcf_action_offload_del_ex(struct tc_action * action,flow_indr_block_bind_cb_t * cb,void * cb_priv)329 static int tcf_action_offload_del_ex(struct tc_action *action,
330 				     flow_indr_block_bind_cb_t *cb,
331 				     void *cb_priv)
332 {
333 	struct flow_offload_action fl_act = {};
334 	u32 in_hw_count = 0;
335 	int err = 0;
336 
337 	if (!tc_act_in_hw(action))
338 		return 0;
339 
340 	err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
341 	if (err)
342 		return err;
343 
344 	err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
345 	if (err < 0)
346 		return err;
347 
348 	if (!cb && action->in_hw_count != in_hw_count)
349 		return -EINVAL;
350 
351 	/* do not need to update hw state when deleting action */
352 	if (cb && in_hw_count)
353 		offload_action_hw_count_dec(action, in_hw_count);
354 
355 	return 0;
356 }
357 
tcf_action_offload_del(struct tc_action * action)358 static int tcf_action_offload_del(struct tc_action *action)
359 {
360 	return tcf_action_offload_del_ex(action, NULL, NULL);
361 }
362 
tcf_action_cleanup(struct tc_action * p)363 static void tcf_action_cleanup(struct tc_action *p)
364 {
365 	tcf_action_offload_del(p);
366 	if (p->ops->cleanup)
367 		p->ops->cleanup(p);
368 
369 	gen_kill_estimator(&p->tcfa_rate_est);
370 	free_tcf(p);
371 }
372 
__tcf_action_put(struct tc_action * p,bool bind)373 static int __tcf_action_put(struct tc_action *p, bool bind)
374 {
375 	struct tcf_idrinfo *idrinfo = p->idrinfo;
376 
377 	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
378 		if (bind)
379 			atomic_dec(&p->tcfa_bindcnt);
380 		idr_remove(&idrinfo->action_idr, p->tcfa_index);
381 		mutex_unlock(&idrinfo->lock);
382 
383 		tcf_action_cleanup(p);
384 		return 1;
385 	}
386 
387 	if (bind)
388 		atomic_dec(&p->tcfa_bindcnt);
389 
390 	return 0;
391 }
392 
__tcf_idr_release(struct tc_action * p,bool bind,bool strict)393 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
394 {
395 	int ret = 0;
396 
397 	/* Release with strict==1 and bind==0 is only called through act API
398 	 * interface (classifiers always bind). Only case when action with
399 	 * positive reference count and zero bind count can exist is when it was
400 	 * also created with act API (unbinding last classifier will destroy the
401 	 * action if it was created by classifier). So only case when bind count
402 	 * can be changed after initial check is when unbound action is
403 	 * destroyed by act API while classifier binds to action with same id
404 	 * concurrently. This result either creation of new action(same behavior
405 	 * as before), or reusing existing action if concurrent process
406 	 * increments reference count before action is deleted. Both scenarios
407 	 * are acceptable.
408 	 */
409 	if (p) {
410 		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
411 			return -EPERM;
412 
413 		if (__tcf_action_put(p, bind))
414 			ret = ACT_P_DELETED;
415 	}
416 
417 	return ret;
418 }
419 
tcf_idr_release(struct tc_action * a,bool bind)420 int tcf_idr_release(struct tc_action *a, bool bind)
421 {
422 	const struct tc_action_ops *ops = a->ops;
423 	int ret;
424 
425 	ret = __tcf_idr_release(a, bind, false);
426 	if (ret == ACT_P_DELETED)
427 		module_put(ops->owner);
428 	return ret;
429 }
430 EXPORT_SYMBOL(tcf_idr_release);
431 
tcf_action_shared_attrs_size(const struct tc_action * act)432 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
433 {
434 	struct tc_cookie *user_cookie;
435 	u32 cookie_len = 0;
436 
437 	rcu_read_lock();
438 	user_cookie = rcu_dereference(act->user_cookie);
439 
440 	if (user_cookie)
441 		cookie_len = nla_total_size(user_cookie->len);
442 	rcu_read_unlock();
443 
444 	return  nla_total_size(0) /* action number nested */
445 		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
446 		+ cookie_len /* TCA_ACT_COOKIE */
447 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
448 		+ nla_total_size(0) /* TCA_ACT_STATS nested */
449 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
450 		/* TCA_STATS_BASIC */
451 		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
452 		/* TCA_STATS_PKT64 */
453 		+ nla_total_size_64bit(sizeof(u64))
454 		/* TCA_STATS_QUEUE */
455 		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
456 		+ nla_total_size(0) /* TCA_ACT_OPTIONS nested */
457 		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
458 }
459 
tcf_action_full_attrs_size(size_t sz)460 static size_t tcf_action_full_attrs_size(size_t sz)
461 {
462 	return NLMSG_HDRLEN                     /* struct nlmsghdr */
463 		+ sizeof(struct tcamsg)
464 		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
465 		+ sz;
466 }
467 
tcf_action_fill_size(const struct tc_action * act)468 static size_t tcf_action_fill_size(const struct tc_action *act)
469 {
470 	size_t sz = tcf_action_shared_attrs_size(act);
471 
472 	if (act->ops->get_fill_size)
473 		return act->ops->get_fill_size(act) + sz;
474 	return sz;
475 }
476 
477 static int
tcf_action_dump_terse(struct sk_buff * skb,struct tc_action * a,bool from_act)478 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
479 {
480 	unsigned char *b = skb_tail_pointer(skb);
481 	struct tc_cookie *cookie;
482 
483 	if (nla_put_string(skb, TCA_ACT_KIND, a->ops->kind))
484 		goto nla_put_failure;
485 	if (tcf_action_copy_stats(skb, a, 0))
486 		goto nla_put_failure;
487 	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
488 		goto nla_put_failure;
489 
490 	rcu_read_lock();
491 	cookie = rcu_dereference(a->user_cookie);
492 	if (cookie) {
493 		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
494 			rcu_read_unlock();
495 			goto nla_put_failure;
496 		}
497 	}
498 	rcu_read_unlock();
499 
500 	return 0;
501 
502 nla_put_failure:
503 	nlmsg_trim(skb, b);
504 	return -1;
505 }
506 
507 static int
tcf_action_dump_1(struct sk_buff * skb,struct tc_action * a,int bind,int ref)508 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
509 {
510 	unsigned char *b = skb_tail_pointer(skb);
511 	struct nlattr *nest;
512 	int err = -EINVAL;
513 	u32 flags;
514 
515 	if (tcf_action_dump_terse(skb, a, false))
516 		goto nla_put_failure;
517 
518 	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
519 	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
520 			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
521 		goto nla_put_failure;
522 
523 	if (a->used_hw_stats_valid &&
524 	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
525 			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
526 		goto nla_put_failure;
527 
528 	flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
529 	if (flags &&
530 	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
531 			       flags, flags))
532 		goto nla_put_failure;
533 
534 	if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
535 		goto nla_put_failure;
536 
537 	nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
538 	if (nest == NULL)
539 		goto nla_put_failure;
540 	err = tcf_action_dump_old(skb, a, bind, ref);
541 	if (err > 0) {
542 		nla_nest_end(skb, nest);
543 		return err;
544 	}
545 
546 nla_put_failure:
547 	nlmsg_trim(skb, b);
548 	return -1;
549 }
550 
tcf_dump_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,struct netlink_callback * cb)551 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
552 			   struct netlink_callback *cb)
553 {
554 	int err = 0, index = -1, s_i = 0, n_i = 0;
555 	u32 act_flags = cb->args[2];
556 	unsigned long jiffy_since = cb->args[3];
557 	struct nlattr *nest;
558 	struct idr *idr = &idrinfo->action_idr;
559 	struct tc_action *p;
560 	unsigned long id = 1;
561 	unsigned long tmp;
562 
563 	mutex_lock(&idrinfo->lock);
564 
565 	s_i = cb->args[0];
566 
567 	idr_for_each_entry_ul(idr, p, tmp, id) {
568 		index++;
569 		if (index < s_i)
570 			continue;
571 		if (IS_ERR(p))
572 			continue;
573 
574 		if (jiffy_since &&
575 		    time_after(jiffy_since,
576 			       (unsigned long)p->tcfa_tm.lastuse))
577 			continue;
578 
579 		tcf_action_update_hw_stats(p);
580 
581 		nest = nla_nest_start_noflag(skb, n_i);
582 		if (!nest) {
583 			index--;
584 			goto nla_put_failure;
585 		}
586 		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
587 			tcf_action_dump_terse(skb, p, true) :
588 			tcf_action_dump_1(skb, p, 0, 0);
589 		if (err < 0) {
590 			index--;
591 			nlmsg_trim(skb, nest);
592 			goto done;
593 		}
594 		nla_nest_end(skb, nest);
595 		n_i++;
596 		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
597 		    n_i >= TCA_ACT_MAX_PRIO)
598 			goto done;
599 	}
600 done:
601 	if (index >= 0)
602 		cb->args[0] = index + 1;
603 
604 	mutex_unlock(&idrinfo->lock);
605 	if (n_i) {
606 		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
607 			cb->args[1] = n_i;
608 	}
609 	return n_i;
610 
611 nla_put_failure:
612 	nla_nest_cancel(skb, nest);
613 	goto done;
614 }
615 
tcf_idr_release_unsafe(struct tc_action * p)616 static int tcf_idr_release_unsafe(struct tc_action *p)
617 {
618 	if (atomic_read(&p->tcfa_bindcnt) > 0)
619 		return -EPERM;
620 
621 	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
622 		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
623 		tcf_action_cleanup(p);
624 		return ACT_P_DELETED;
625 	}
626 
627 	return 0;
628 }
629 
tcf_del_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)630 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
631 			  const struct tc_action_ops *ops,
632 			  struct netlink_ext_ack *extack)
633 {
634 	struct nlattr *nest;
635 	int n_i = 0;
636 	int ret = -EINVAL;
637 	struct idr *idr = &idrinfo->action_idr;
638 	struct tc_action *p;
639 	unsigned long id = 1;
640 	unsigned long tmp;
641 
642 	nest = nla_nest_start_noflag(skb, 0);
643 	if (nest == NULL)
644 		goto nla_put_failure;
645 	if (nla_put_string(skb, TCA_ACT_KIND, ops->kind))
646 		goto nla_put_failure;
647 
648 	ret = 0;
649 	mutex_lock(&idrinfo->lock);
650 	idr_for_each_entry_ul(idr, p, tmp, id) {
651 		if (IS_ERR(p))
652 			continue;
653 		ret = tcf_idr_release_unsafe(p);
654 		if (ret == ACT_P_DELETED)
655 			module_put(ops->owner);
656 		else if (ret < 0)
657 			break;
658 		n_i++;
659 	}
660 	mutex_unlock(&idrinfo->lock);
661 	if (ret < 0) {
662 		if (n_i)
663 			NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
664 		else
665 			goto nla_put_failure;
666 	}
667 
668 	ret = nla_put_u32(skb, TCA_FCNT, n_i);
669 	if (ret)
670 		goto nla_put_failure;
671 	nla_nest_end(skb, nest);
672 
673 	return n_i;
674 nla_put_failure:
675 	nla_nest_cancel(skb, nest);
676 	return ret;
677 }
678 
tcf_generic_walker(struct tc_action_net * tn,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)679 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
680 		       struct netlink_callback *cb, int type,
681 		       const struct tc_action_ops *ops,
682 		       struct netlink_ext_ack *extack)
683 {
684 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
685 
686 	if (type == RTM_DELACTION) {
687 		return tcf_del_walker(idrinfo, skb, ops, extack);
688 	} else if (type == RTM_GETACTION) {
689 		return tcf_dump_walker(idrinfo, skb, cb);
690 	} else {
691 		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
692 		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
693 		return -EINVAL;
694 	}
695 }
696 EXPORT_SYMBOL(tcf_generic_walker);
697 
tcf_idr_search(struct tc_action_net * tn,struct tc_action ** a,u32 index)698 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
699 {
700 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
701 	struct tc_action *p;
702 
703 	mutex_lock(&idrinfo->lock);
704 	p = idr_find(&idrinfo->action_idr, index);
705 	if (IS_ERR(p))
706 		p = NULL;
707 	else if (p)
708 		refcount_inc(&p->tcfa_refcnt);
709 	mutex_unlock(&idrinfo->lock);
710 
711 	if (p) {
712 		*a = p;
713 		return true;
714 	}
715 	return false;
716 }
717 EXPORT_SYMBOL(tcf_idr_search);
718 
__tcf_generic_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)719 static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
720 				struct netlink_callback *cb, int type,
721 				const struct tc_action_ops *ops,
722 				struct netlink_ext_ack *extack)
723 {
724 	struct tc_action_net *tn = net_generic(net, ops->net_id);
725 
726 	if (unlikely(ops->walk))
727 		return ops->walk(net, skb, cb, type, ops, extack);
728 
729 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
730 }
731 
__tcf_idr_search(struct net * net,const struct tc_action_ops * ops,struct tc_action ** a,u32 index)732 static int __tcf_idr_search(struct net *net,
733 			    const struct tc_action_ops *ops,
734 			    struct tc_action **a, u32 index)
735 {
736 	struct tc_action_net *tn = net_generic(net, ops->net_id);
737 
738 	if (unlikely(ops->lookup))
739 		return ops->lookup(net, a, index);
740 
741 	return tcf_idr_search(tn, a, index);
742 }
743 
tcf_idr_delete_index(struct tcf_idrinfo * idrinfo,u32 index)744 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
745 {
746 	struct tc_action *p;
747 	int ret = 0;
748 
749 	mutex_lock(&idrinfo->lock);
750 	p = idr_find(&idrinfo->action_idr, index);
751 	if (!p) {
752 		mutex_unlock(&idrinfo->lock);
753 		return -ENOENT;
754 	}
755 
756 	if (!atomic_read(&p->tcfa_bindcnt)) {
757 		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
758 			struct module *owner = p->ops->owner;
759 
760 			WARN_ON(p != idr_remove(&idrinfo->action_idr,
761 						p->tcfa_index));
762 			mutex_unlock(&idrinfo->lock);
763 
764 			tcf_action_cleanup(p);
765 			module_put(owner);
766 			return 0;
767 		}
768 		ret = 0;
769 	} else {
770 		ret = -EPERM;
771 	}
772 
773 	mutex_unlock(&idrinfo->lock);
774 	return ret;
775 }
776 
tcf_idr_create(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,bool cpustats,u32 flags)777 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
778 		   struct tc_action **a, const struct tc_action_ops *ops,
779 		   int bind, bool cpustats, u32 flags)
780 {
781 	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
782 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
783 	int err = -ENOMEM;
784 
785 	if (unlikely(!p))
786 		return -ENOMEM;
787 	refcount_set(&p->tcfa_refcnt, 1);
788 	if (bind)
789 		atomic_set(&p->tcfa_bindcnt, 1);
790 
791 	if (cpustats) {
792 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
793 		if (!p->cpu_bstats)
794 			goto err1;
795 		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
796 		if (!p->cpu_bstats_hw)
797 			goto err2;
798 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
799 		if (!p->cpu_qstats)
800 			goto err3;
801 	}
802 	gnet_stats_basic_sync_init(&p->tcfa_bstats);
803 	gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
804 	spin_lock_init(&p->tcfa_lock);
805 	p->tcfa_index = index;
806 	p->tcfa_tm.install = jiffies;
807 	p->tcfa_tm.lastuse = jiffies;
808 	p->tcfa_tm.firstuse = 0;
809 	p->tcfa_flags = flags;
810 	if (est) {
811 		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
812 					&p->tcfa_rate_est,
813 					&p->tcfa_lock, false, est);
814 		if (err)
815 			goto err4;
816 	}
817 
818 	p->idrinfo = idrinfo;
819 	__module_get(ops->owner);
820 	p->ops = ops;
821 	*a = p;
822 	return 0;
823 err4:
824 	free_percpu(p->cpu_qstats);
825 err3:
826 	free_percpu(p->cpu_bstats_hw);
827 err2:
828 	free_percpu(p->cpu_bstats);
829 err1:
830 	kfree(p);
831 	return err;
832 }
833 EXPORT_SYMBOL(tcf_idr_create);
834 
tcf_idr_create_from_flags(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,u32 flags)835 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
836 			      struct nlattr *est, struct tc_action **a,
837 			      const struct tc_action_ops *ops, int bind,
838 			      u32 flags)
839 {
840 	/* Set cpustats according to actions flags. */
841 	return tcf_idr_create(tn, index, est, a, ops, bind,
842 			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
843 }
844 EXPORT_SYMBOL(tcf_idr_create_from_flags);
845 
846 /* Cleanup idr index that was allocated but not initialized. */
847 
tcf_idr_cleanup(struct tc_action_net * tn,u32 index)848 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
849 {
850 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
851 
852 	mutex_lock(&idrinfo->lock);
853 	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
854 	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
855 	mutex_unlock(&idrinfo->lock);
856 }
857 EXPORT_SYMBOL(tcf_idr_cleanup);
858 
859 /* Check if action with specified index exists. If actions is found, increments
860  * its reference and bind counters, and return 1. Otherwise insert temporary
861  * error pointer (to prevent concurrent users from inserting actions with same
862  * index) and return 0.
863  *
864  * May return -EAGAIN for binding actions in case of a parallel add/delete on
865  * the requested index.
866  */
867 
tcf_idr_check_alloc(struct tc_action_net * tn,u32 * index,struct tc_action ** a,int bind)868 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
869 			struct tc_action **a, int bind)
870 {
871 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
872 	struct tc_action *p;
873 	int ret;
874 	u32 max;
875 
876 	if (*index) {
877 		rcu_read_lock();
878 		p = idr_find(&idrinfo->action_idr, *index);
879 
880 		if (IS_ERR(p)) {
881 			/* This means that another process allocated
882 			 * index but did not assign the pointer yet.
883 			 */
884 			rcu_read_unlock();
885 			return -EAGAIN;
886 		}
887 
888 		if (!p) {
889 			/* Empty slot, try to allocate it */
890 			max = *index;
891 			rcu_read_unlock();
892 			goto new;
893 		}
894 
895 		if (!refcount_inc_not_zero(&p->tcfa_refcnt)) {
896 			/* Action was deleted in parallel */
897 			rcu_read_unlock();
898 			return -EAGAIN;
899 		}
900 
901 		if (bind)
902 			atomic_inc(&p->tcfa_bindcnt);
903 		*a = p;
904 
905 		rcu_read_unlock();
906 
907 		return 1;
908 	} else {
909 		/* Find a slot */
910 		*index = 1;
911 		max = UINT_MAX;
912 	}
913 
914 new:
915 	*a = NULL;
916 
917 	mutex_lock(&idrinfo->lock);
918 	ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max,
919 			    GFP_KERNEL);
920 	mutex_unlock(&idrinfo->lock);
921 
922 	/* N binds raced for action allocation,
923 	 * retry for all the ones that failed.
924 	 */
925 	if (ret == -ENOSPC && *index == max)
926 		ret = -EAGAIN;
927 
928 	return ret;
929 }
930 EXPORT_SYMBOL(tcf_idr_check_alloc);
931 
tcf_idrinfo_destroy(const struct tc_action_ops * ops,struct tcf_idrinfo * idrinfo)932 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
933 			 struct tcf_idrinfo *idrinfo)
934 {
935 	struct idr *idr = &idrinfo->action_idr;
936 	bool mutex_taken = false;
937 	struct tc_action *p;
938 	unsigned long id = 1;
939 	unsigned long tmp;
940 	int ret;
941 
942 	idr_for_each_entry_ul(idr, p, tmp, id) {
943 		if (tc_act_in_hw(p) && !mutex_taken) {
944 			rtnl_lock();
945 			mutex_taken = true;
946 		}
947 		ret = __tcf_idr_release(p, false, true);
948 		if (ret == ACT_P_DELETED)
949 			module_put(ops->owner);
950 		else if (ret < 0)
951 			return;
952 	}
953 	if (mutex_taken)
954 		rtnl_unlock();
955 	idr_destroy(&idrinfo->action_idr);
956 }
957 EXPORT_SYMBOL(tcf_idrinfo_destroy);
958 
959 static LIST_HEAD(act_base);
960 static DEFINE_RWLOCK(act_mod_lock);
961 /* since act ops id is stored in pernet subsystem list,
962  * then there is no way to walk through only all the action
963  * subsystem, so we keep tc action pernet ops id for
964  * reoffload to walk through.
965  */
966 static LIST_HEAD(act_pernet_id_list);
967 static DEFINE_MUTEX(act_id_mutex);
968 struct tc_act_pernet_id {
969 	struct list_head list;
970 	unsigned int id;
971 };
972 
tcf_pernet_add_id_list(unsigned int id)973 static int tcf_pernet_add_id_list(unsigned int id)
974 {
975 	struct tc_act_pernet_id *id_ptr;
976 	int ret = 0;
977 
978 	mutex_lock(&act_id_mutex);
979 	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
980 		if (id_ptr->id == id) {
981 			ret = -EEXIST;
982 			goto err_out;
983 		}
984 	}
985 
986 	id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
987 	if (!id_ptr) {
988 		ret = -ENOMEM;
989 		goto err_out;
990 	}
991 	id_ptr->id = id;
992 
993 	list_add_tail(&id_ptr->list, &act_pernet_id_list);
994 
995 err_out:
996 	mutex_unlock(&act_id_mutex);
997 	return ret;
998 }
999 
tcf_pernet_del_id_list(unsigned int id)1000 static void tcf_pernet_del_id_list(unsigned int id)
1001 {
1002 	struct tc_act_pernet_id *id_ptr;
1003 
1004 	mutex_lock(&act_id_mutex);
1005 	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1006 		if (id_ptr->id == id) {
1007 			list_del(&id_ptr->list);
1008 			kfree(id_ptr);
1009 			break;
1010 		}
1011 	}
1012 	mutex_unlock(&act_id_mutex);
1013 }
1014 
tcf_register_action(struct tc_action_ops * act,struct pernet_operations * ops)1015 int tcf_register_action(struct tc_action_ops *act,
1016 			struct pernet_operations *ops)
1017 {
1018 	struct tc_action_ops *a;
1019 	int ret;
1020 
1021 	if (!act->act || !act->dump || !act->init)
1022 		return -EINVAL;
1023 
1024 	/* We have to register pernet ops before making the action ops visible,
1025 	 * otherwise tcf_action_init_1() could get a partially initialized
1026 	 * netns.
1027 	 */
1028 	ret = register_pernet_subsys(ops);
1029 	if (ret)
1030 		return ret;
1031 
1032 	if (ops->id) {
1033 		ret = tcf_pernet_add_id_list(*ops->id);
1034 		if (ret)
1035 			goto err_id;
1036 	}
1037 
1038 	write_lock(&act_mod_lock);
1039 	list_for_each_entry(a, &act_base, head) {
1040 		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
1041 			ret = -EEXIST;
1042 			goto err_out;
1043 		}
1044 	}
1045 	list_add_tail(&act->head, &act_base);
1046 	write_unlock(&act_mod_lock);
1047 
1048 	return 0;
1049 
1050 err_out:
1051 	write_unlock(&act_mod_lock);
1052 	if (ops->id)
1053 		tcf_pernet_del_id_list(*ops->id);
1054 err_id:
1055 	unregister_pernet_subsys(ops);
1056 	return ret;
1057 }
1058 EXPORT_SYMBOL(tcf_register_action);
1059 
tcf_unregister_action(struct tc_action_ops * act,struct pernet_operations * ops)1060 int tcf_unregister_action(struct tc_action_ops *act,
1061 			  struct pernet_operations *ops)
1062 {
1063 	struct tc_action_ops *a;
1064 	int err = -ENOENT;
1065 
1066 	write_lock(&act_mod_lock);
1067 	list_for_each_entry(a, &act_base, head) {
1068 		if (a == act) {
1069 			list_del(&act->head);
1070 			err = 0;
1071 			break;
1072 		}
1073 	}
1074 	write_unlock(&act_mod_lock);
1075 	if (!err) {
1076 		unregister_pernet_subsys(ops);
1077 		if (ops->id)
1078 			tcf_pernet_del_id_list(*ops->id);
1079 	}
1080 	return err;
1081 }
1082 EXPORT_SYMBOL(tcf_unregister_action);
1083 
1084 /* lookup by name */
tc_lookup_action_n(char * kind)1085 static struct tc_action_ops *tc_lookup_action_n(char *kind)
1086 {
1087 	struct tc_action_ops *a, *res = NULL;
1088 
1089 	if (kind) {
1090 		read_lock(&act_mod_lock);
1091 		list_for_each_entry(a, &act_base, head) {
1092 			if (strcmp(kind, a->kind) == 0) {
1093 				if (try_module_get(a->owner))
1094 					res = a;
1095 				break;
1096 			}
1097 		}
1098 		read_unlock(&act_mod_lock);
1099 	}
1100 	return res;
1101 }
1102 
1103 /* lookup by nlattr */
tc_lookup_action(struct nlattr * kind)1104 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1105 {
1106 	struct tc_action_ops *a, *res = NULL;
1107 
1108 	if (kind) {
1109 		read_lock(&act_mod_lock);
1110 		list_for_each_entry(a, &act_base, head) {
1111 			if (nla_strcmp(kind, a->kind) == 0) {
1112 				if (try_module_get(a->owner))
1113 					res = a;
1114 				break;
1115 			}
1116 		}
1117 		read_unlock(&act_mod_lock);
1118 	}
1119 	return res;
1120 }
1121 
1122 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1123 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
tcf_action_exec(struct sk_buff * skb,struct tc_action ** actions,int nr_actions,struct tcf_result * res)1124 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1125 		    int nr_actions, struct tcf_result *res)
1126 {
1127 	u32 jmp_prgcnt = 0;
1128 	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1129 	int i;
1130 	int ret = TC_ACT_OK;
1131 
1132 	if (skb_skip_tc_classify(skb))
1133 		return TC_ACT_OK;
1134 
1135 restart_act_graph:
1136 	for (i = 0; i < nr_actions; i++) {
1137 		const struct tc_action *a = actions[i];
1138 		int repeat_ttl;
1139 
1140 		if (jmp_prgcnt > 0) {
1141 			jmp_prgcnt -= 1;
1142 			continue;
1143 		}
1144 
1145 		if (tc_act_skip_sw(a->tcfa_flags))
1146 			continue;
1147 
1148 		repeat_ttl = 32;
1149 repeat:
1150 		ret = tc_act(skb, a, res);
1151 		if (unlikely(ret == TC_ACT_REPEAT)) {
1152 			if (--repeat_ttl != 0)
1153 				goto repeat;
1154 			/* suspicious opcode, stop pipeline */
1155 			net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1156 			return TC_ACT_OK;
1157 		}
1158 		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1159 			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1160 			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1161 				/* faulty opcode, stop pipeline */
1162 				return TC_ACT_OK;
1163 			} else {
1164 				jmp_ttl -= 1;
1165 				if (jmp_ttl > 0)
1166 					goto restart_act_graph;
1167 				else /* faulty graph, stop pipeline */
1168 					return TC_ACT_OK;
1169 			}
1170 		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1171 			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1172 				tcf_set_drop_reason(skb,
1173 						    SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1174 				return TC_ACT_SHOT;
1175 			}
1176 			tcf_action_goto_chain_exec(a, res);
1177 		}
1178 
1179 		if (ret != TC_ACT_PIPE)
1180 			break;
1181 	}
1182 
1183 	return ret;
1184 }
1185 EXPORT_SYMBOL(tcf_action_exec);
1186 
tcf_action_destroy(struct tc_action * actions[],int bind)1187 int tcf_action_destroy(struct tc_action *actions[], int bind)
1188 {
1189 	const struct tc_action_ops *ops;
1190 	struct tc_action *a;
1191 	int ret = 0, i;
1192 
1193 	tcf_act_for_each_action(i, a, actions) {
1194 		actions[i] = NULL;
1195 		ops = a->ops;
1196 		ret = __tcf_idr_release(a, bind, true);
1197 		if (ret == ACT_P_DELETED)
1198 			module_put(ops->owner);
1199 		else if (ret < 0)
1200 			return ret;
1201 	}
1202 	return ret;
1203 }
1204 
tcf_action_put(struct tc_action * p)1205 static int tcf_action_put(struct tc_action *p)
1206 {
1207 	return __tcf_action_put(p, false);
1208 }
1209 
tcf_action_put_many(struct tc_action * actions[])1210 static void tcf_action_put_many(struct tc_action *actions[])
1211 {
1212 	struct tc_action *a;
1213 	int i;
1214 
1215 	tcf_act_for_each_action(i, a, actions) {
1216 		const struct tc_action_ops *ops = a->ops;
1217 		if (tcf_action_put(a))
1218 			module_put(ops->owner);
1219 	}
1220 }
1221 
tca_put_bound_many(struct tc_action * actions[],int init_res[])1222 static void tca_put_bound_many(struct tc_action *actions[], int init_res[])
1223 {
1224 	struct tc_action *a;
1225 	int i;
1226 
1227 	tcf_act_for_each_action(i, a, actions) {
1228 		const struct tc_action_ops *ops = a->ops;
1229 
1230 		if (init_res[i] == ACT_P_CREATED)
1231 			continue;
1232 
1233 		if (tcf_action_put(a))
1234 			module_put(ops->owner);
1235 	}
1236 }
1237 
1238 int
tcf_action_dump_old(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1239 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1240 {
1241 	return a->ops->dump(skb, a, bind, ref);
1242 }
1243 
tcf_action_dump(struct sk_buff * skb,struct tc_action * actions[],int bind,int ref,bool terse)1244 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1245 		    int bind, int ref, bool terse)
1246 {
1247 	struct tc_action *a;
1248 	int err = -EINVAL, i;
1249 	struct nlattr *nest;
1250 
1251 	tcf_act_for_each_action(i, a, actions) {
1252 		nest = nla_nest_start_noflag(skb, i + 1);
1253 		if (nest == NULL)
1254 			goto nla_put_failure;
1255 		err = terse ? tcf_action_dump_terse(skb, a, false) :
1256 			tcf_action_dump_1(skb, a, bind, ref);
1257 		if (err < 0)
1258 			goto errout;
1259 		nla_nest_end(skb, nest);
1260 	}
1261 
1262 	return 0;
1263 
1264 nla_put_failure:
1265 	err = -EINVAL;
1266 errout:
1267 	nla_nest_cancel(skb, nest);
1268 	return err;
1269 }
1270 
nla_memdup_cookie(struct nlattr ** tb)1271 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1272 {
1273 	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1274 	if (!c)
1275 		return NULL;
1276 
1277 	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1278 	if (!c->data) {
1279 		kfree(c);
1280 		return NULL;
1281 	}
1282 	c->len = nla_len(tb[TCA_ACT_COOKIE]);
1283 
1284 	return c;
1285 }
1286 
tcf_action_hw_stats_get(struct nlattr * hw_stats_attr)1287 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1288 {
1289 	struct nla_bitfield32 hw_stats_bf;
1290 
1291 	/* If the user did not pass the attr, that means he does
1292 	 * not care about the type. Return "any" in that case
1293 	 * which is setting on all supported types.
1294 	 */
1295 	if (!hw_stats_attr)
1296 		return TCA_ACT_HW_STATS_ANY;
1297 	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1298 	return hw_stats_bf.value;
1299 }
1300 
1301 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1302 	[TCA_ACT_KIND]		= { .type = NLA_STRING },
1303 	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
1304 	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
1305 				    .len = TC_COOKIE_MAX_SIZE },
1306 	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
1307 	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1308 							TCA_ACT_FLAGS_SKIP_HW |
1309 							TCA_ACT_FLAGS_SKIP_SW),
1310 	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1311 };
1312 
tcf_idr_insert_many(struct tc_action * actions[],int init_res[])1313 void tcf_idr_insert_many(struct tc_action *actions[], int init_res[])
1314 {
1315 	struct tc_action *a;
1316 	int i;
1317 
1318 	tcf_act_for_each_action(i, a, actions) {
1319 		struct tcf_idrinfo *idrinfo;
1320 
1321 		if (init_res[i] == ACT_P_BOUND)
1322 			continue;
1323 
1324 		idrinfo = a->idrinfo;
1325 		mutex_lock(&idrinfo->lock);
1326 		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
1327 		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1328 		mutex_unlock(&idrinfo->lock);
1329 	}
1330 }
1331 
tc_action_load_ops(struct nlattr * nla,u32 flags,struct netlink_ext_ack * extack)1332 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
1333 					 struct netlink_ext_ack *extack)
1334 {
1335 	bool police = flags & TCA_ACT_FLAGS_POLICE;
1336 	struct nlattr *tb[TCA_ACT_MAX + 1];
1337 	struct tc_action_ops *a_o;
1338 	char act_name[IFNAMSIZ];
1339 	struct nlattr *kind;
1340 	int err;
1341 
1342 	if (!police) {
1343 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1344 						  tcf_action_policy, extack);
1345 		if (err < 0)
1346 			return ERR_PTR(err);
1347 		err = -EINVAL;
1348 		kind = tb[TCA_ACT_KIND];
1349 		if (!kind) {
1350 			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1351 			return ERR_PTR(err);
1352 		}
1353 		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1354 			NL_SET_ERR_MSG(extack, "TC action name too long");
1355 			return ERR_PTR(err);
1356 		}
1357 	} else {
1358 		if (strscpy(act_name, "police", IFNAMSIZ) < 0) {
1359 			NL_SET_ERR_MSG(extack, "TC action name too long");
1360 			return ERR_PTR(-EINVAL);
1361 		}
1362 	}
1363 
1364 	a_o = tc_lookup_action_n(act_name);
1365 	if (a_o == NULL) {
1366 #ifdef CONFIG_MODULES
1367 		bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
1368 
1369 		if (rtnl_held)
1370 			rtnl_unlock();
1371 		request_module(NET_ACT_ALIAS_PREFIX "%s", act_name);
1372 		if (rtnl_held)
1373 			rtnl_lock();
1374 
1375 		a_o = tc_lookup_action_n(act_name);
1376 
1377 		/* We dropped the RTNL semaphore in order to
1378 		 * perform the module load.  So, even if we
1379 		 * succeeded in loading the module we have to
1380 		 * tell the caller to replay the request.  We
1381 		 * indicate this using -EAGAIN.
1382 		 */
1383 		if (a_o != NULL) {
1384 			module_put(a_o->owner);
1385 			return ERR_PTR(-EAGAIN);
1386 		}
1387 #endif
1388 		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1389 		return ERR_PTR(-ENOENT);
1390 	}
1391 
1392 	return a_o;
1393 }
1394 
tcf_action_init_1(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action_ops * a_o,int * init_res,u32 flags,struct netlink_ext_ack * extack)1395 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1396 				    struct nlattr *nla, struct nlattr *est,
1397 				    struct tc_action_ops *a_o, int *init_res,
1398 				    u32 flags, struct netlink_ext_ack *extack)
1399 {
1400 	bool police = flags & TCA_ACT_FLAGS_POLICE;
1401 	struct nla_bitfield32 userflags = { 0, 0 };
1402 	struct tc_cookie *user_cookie = NULL;
1403 	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1404 	struct nlattr *tb[TCA_ACT_MAX + 1];
1405 	struct tc_action *a;
1406 	int err;
1407 
1408 	/* backward compatibility for policer */
1409 	if (!police) {
1410 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1411 						  tcf_action_policy, extack);
1412 		if (err < 0)
1413 			return ERR_PTR(err);
1414 		if (tb[TCA_ACT_COOKIE]) {
1415 			user_cookie = nla_memdup_cookie(tb);
1416 			if (!user_cookie) {
1417 				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1418 				err = -ENOMEM;
1419 				goto err_out;
1420 			}
1421 		}
1422 		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1423 		if (tb[TCA_ACT_FLAGS]) {
1424 			userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1425 			if (!tc_act_flags_valid(userflags.value)) {
1426 				err = -EINVAL;
1427 				goto err_out;
1428 			}
1429 		}
1430 
1431 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1432 				userflags.value | flags, extack);
1433 	} else {
1434 		err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1435 				extack);
1436 	}
1437 	if (err < 0)
1438 		goto err_out;
1439 	*init_res = err;
1440 
1441 	if (!police && tb[TCA_ACT_COOKIE])
1442 		tcf_set_action_cookie(&a->user_cookie, user_cookie);
1443 
1444 	if (!police)
1445 		a->hw_stats = hw_stats;
1446 
1447 	return a;
1448 
1449 err_out:
1450 	if (user_cookie) {
1451 		kfree(user_cookie->data);
1452 		kfree(user_cookie);
1453 	}
1454 	return ERR_PTR(err);
1455 }
1456 
tc_act_bind(u32 flags)1457 static bool tc_act_bind(u32 flags)
1458 {
1459 	return !!(flags & TCA_ACT_FLAGS_BIND);
1460 }
1461 
1462 /* Returns numbers of initialized actions or negative error. */
1463 
tcf_action_init(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action * actions[],int init_res[],size_t * attr_size,u32 flags,u32 fl_flags,struct netlink_ext_ack * extack)1464 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1465 		    struct nlattr *est, struct tc_action *actions[],
1466 		    int init_res[], size_t *attr_size,
1467 		    u32 flags, u32 fl_flags,
1468 		    struct netlink_ext_ack *extack)
1469 {
1470 	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1471 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 2];
1472 	struct tc_action *act;
1473 	size_t sz = 0;
1474 	int err;
1475 	int i;
1476 
1477 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO + 1, nla, NULL,
1478 					  extack);
1479 	if (err < 0)
1480 		return err;
1481 
1482 	/* The nested attributes are parsed as types, but they are really an
1483 	 * array of actions. So we parse one more than we can handle, and return
1484 	 * an error if the last one is set (as that indicates that the request
1485 	 * contained more than the maximum number of actions).
1486 	 */
1487 	if (tb[TCA_ACT_MAX_PRIO + 1]) {
1488 		NL_SET_ERR_MSG_FMT(extack,
1489 				   "Only %d actions supported per filter",
1490 				   TCA_ACT_MAX_PRIO);
1491 		return -EINVAL;
1492 	}
1493 
1494 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1495 		struct tc_action_ops *a_o;
1496 
1497 		a_o = tc_action_load_ops(tb[i], flags, extack);
1498 		if (IS_ERR(a_o)) {
1499 			err = PTR_ERR(a_o);
1500 			goto err_mod;
1501 		}
1502 		ops[i - 1] = a_o;
1503 	}
1504 
1505 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1506 		act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1507 					&init_res[i - 1], flags, extack);
1508 		if (IS_ERR(act)) {
1509 			err = PTR_ERR(act);
1510 			goto err;
1511 		}
1512 		sz += tcf_action_fill_size(act);
1513 		/* Start from index 0 */
1514 		actions[i - 1] = act;
1515 		if (tc_act_bind(flags)) {
1516 			bool skip_sw = tc_skip_sw(fl_flags);
1517 			bool skip_hw = tc_skip_hw(fl_flags);
1518 
1519 			if (tc_act_bind(act->tcfa_flags)) {
1520 				/* Action is created by classifier and is not
1521 				 * standalone. Check that the user did not set
1522 				 * any action flags different than the
1523 				 * classifier flags, and inherit the flags from
1524 				 * the classifier for the compatibility case
1525 				 * where no flags were specified at all.
1526 				 */
1527 				if ((tc_act_skip_sw(act->tcfa_flags) && !skip_sw) ||
1528 				    (tc_act_skip_hw(act->tcfa_flags) && !skip_hw)) {
1529 					NL_SET_ERR_MSG(extack,
1530 						       "Mismatch between action and filter offload flags");
1531 					err = -EINVAL;
1532 					goto err;
1533 				}
1534 				if (skip_sw)
1535 					act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_SW;
1536 				if (skip_hw)
1537 					act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_HW;
1538 				continue;
1539 			}
1540 
1541 			/* Action is standalone */
1542 			if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1543 			    skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1544 				NL_SET_ERR_MSG(extack,
1545 					       "Mismatch between action and filter offload flags");
1546 				err = -EINVAL;
1547 				goto err;
1548 			}
1549 		} else {
1550 			err = tcf_action_offload_add(act, extack);
1551 			if (tc_act_skip_sw(act->tcfa_flags) && err)
1552 				goto err;
1553 		}
1554 	}
1555 
1556 	/* We have to commit them all together, because if any error happened in
1557 	 * between, we could not handle the failure gracefully.
1558 	 */
1559 	tcf_idr_insert_many(actions, init_res);
1560 
1561 	*attr_size = tcf_action_full_attrs_size(sz);
1562 	err = i - 1;
1563 	goto err_mod;
1564 
1565 err:
1566 	tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1567 err_mod:
1568 	for (i = 0; i < TCA_ACT_MAX_PRIO && ops[i]; i++)
1569 		module_put(ops[i]->owner);
1570 	return err;
1571 }
1572 
tcf_action_update_stats(struct tc_action * a,u64 bytes,u64 packets,u64 drops,bool hw)1573 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1574 			     u64 drops, bool hw)
1575 {
1576 	if (a->cpu_bstats) {
1577 		_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1578 
1579 		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1580 
1581 		if (hw)
1582 			_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1583 				       bytes, packets);
1584 		return;
1585 	}
1586 
1587 	_bstats_update(&a->tcfa_bstats, bytes, packets);
1588 	a->tcfa_qstats.drops += drops;
1589 	if (hw)
1590 		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1591 }
1592 EXPORT_SYMBOL(tcf_action_update_stats);
1593 
tcf_action_copy_stats(struct sk_buff * skb,struct tc_action * p,int compat_mode)1594 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1595 			  int compat_mode)
1596 {
1597 	int err = 0;
1598 	struct gnet_dump d;
1599 
1600 	if (p == NULL)
1601 		goto errout;
1602 
1603 	/* compat_mode being true specifies a call that is supposed
1604 	 * to add additional backward compatibility statistic TLVs.
1605 	 */
1606 	if (compat_mode) {
1607 		if (p->type == TCA_OLD_COMPAT)
1608 			err = gnet_stats_start_copy_compat(skb, 0,
1609 							   TCA_STATS,
1610 							   TCA_XSTATS,
1611 							   &p->tcfa_lock, &d,
1612 							   TCA_PAD);
1613 		else
1614 			return 0;
1615 	} else
1616 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1617 					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1618 
1619 	if (err < 0)
1620 		goto errout;
1621 
1622 	if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1623 				  &p->tcfa_bstats, false) < 0 ||
1624 	    gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1625 				     &p->tcfa_bstats_hw, false) < 0 ||
1626 	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1627 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1628 				  &p->tcfa_qstats,
1629 				  p->tcfa_qstats.qlen) < 0)
1630 		goto errout;
1631 
1632 	if (gnet_stats_finish_copy(&d) < 0)
1633 		goto errout;
1634 
1635 	return 0;
1636 
1637 errout:
1638 	return -1;
1639 }
1640 
tca_get_fill(struct sk_buff * skb,struct tc_action * actions[],u32 portid,u32 seq,u16 flags,int event,int bind,int ref,struct netlink_ext_ack * extack)1641 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1642 			u32 portid, u32 seq, u16 flags, int event, int bind,
1643 			int ref, struct netlink_ext_ack *extack)
1644 {
1645 	struct tcamsg *t;
1646 	struct nlmsghdr *nlh;
1647 	unsigned char *b = skb_tail_pointer(skb);
1648 	struct nlattr *nest;
1649 
1650 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1651 	if (!nlh)
1652 		goto out_nlmsg_trim;
1653 	t = nlmsg_data(nlh);
1654 	t->tca_family = AF_UNSPEC;
1655 	t->tca__pad1 = 0;
1656 	t->tca__pad2 = 0;
1657 
1658 	if (extack && extack->_msg &&
1659 	    nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
1660 		goto out_nlmsg_trim;
1661 
1662 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1663 	if (!nest)
1664 		goto out_nlmsg_trim;
1665 
1666 	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1667 		goto out_nlmsg_trim;
1668 
1669 	nla_nest_end(skb, nest);
1670 
1671 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1672 
1673 	return skb->len;
1674 
1675 out_nlmsg_trim:
1676 	nlmsg_trim(skb, b);
1677 	return -1;
1678 }
1679 
1680 static int
tcf_get_notify(struct net * net,u32 portid,struct nlmsghdr * n,struct tc_action * actions[],int event,struct netlink_ext_ack * extack)1681 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1682 	       struct tc_action *actions[], int event,
1683 	       struct netlink_ext_ack *extack)
1684 {
1685 	struct sk_buff *skb;
1686 
1687 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1688 	if (!skb)
1689 		return -ENOBUFS;
1690 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1691 			 0, 1, NULL) <= 0) {
1692 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1693 		kfree_skb(skb);
1694 		return -EINVAL;
1695 	}
1696 
1697 	return rtnl_unicast(skb, net, portid);
1698 }
1699 
tcf_action_get_1(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1700 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1701 					  struct nlmsghdr *n, u32 portid,
1702 					  struct netlink_ext_ack *extack)
1703 {
1704 	struct nlattr *tb[TCA_ACT_MAX + 1];
1705 	const struct tc_action_ops *ops;
1706 	struct tc_action *a;
1707 	int index;
1708 	int err;
1709 
1710 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1711 					  tcf_action_policy, extack);
1712 	if (err < 0)
1713 		goto err_out;
1714 
1715 	err = -EINVAL;
1716 	if (tb[TCA_ACT_INDEX] == NULL ||
1717 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1718 		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1719 		goto err_out;
1720 	}
1721 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1722 
1723 	err = -EINVAL;
1724 	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1725 	if (!ops) { /* could happen in batch of actions */
1726 		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1727 		goto err_out;
1728 	}
1729 	err = -ENOENT;
1730 	if (__tcf_idr_search(net, ops, &a, index) == 0) {
1731 		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1732 		goto err_mod;
1733 	}
1734 
1735 	module_put(ops->owner);
1736 	return a;
1737 
1738 err_mod:
1739 	module_put(ops->owner);
1740 err_out:
1741 	return ERR_PTR(err);
1742 }
1743 
tca_action_flush(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1744 static int tca_action_flush(struct net *net, struct nlattr *nla,
1745 			    struct nlmsghdr *n, u32 portid,
1746 			    struct netlink_ext_ack *extack)
1747 {
1748 	struct sk_buff *skb;
1749 	unsigned char *b;
1750 	struct nlmsghdr *nlh;
1751 	struct tcamsg *t;
1752 	struct netlink_callback dcb;
1753 	struct nlattr *nest;
1754 	struct nlattr *tb[TCA_ACT_MAX + 1];
1755 	const struct tc_action_ops *ops;
1756 	struct nlattr *kind;
1757 	int err = -ENOMEM;
1758 
1759 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1760 	if (!skb)
1761 		return err;
1762 
1763 	b = skb_tail_pointer(skb);
1764 
1765 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1766 					  tcf_action_policy, extack);
1767 	if (err < 0)
1768 		goto err_out;
1769 
1770 	err = -EINVAL;
1771 	kind = tb[TCA_ACT_KIND];
1772 	ops = tc_lookup_action(kind);
1773 	if (!ops) { /*some idjot trying to flush unknown action */
1774 		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1775 		goto err_out;
1776 	}
1777 
1778 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1779 			sizeof(*t), 0);
1780 	if (!nlh) {
1781 		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1782 		goto out_module_put;
1783 	}
1784 	t = nlmsg_data(nlh);
1785 	t->tca_family = AF_UNSPEC;
1786 	t->tca__pad1 = 0;
1787 	t->tca__pad2 = 0;
1788 
1789 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1790 	if (!nest) {
1791 		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1792 		goto out_module_put;
1793 	}
1794 
1795 	err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1796 	if (err <= 0) {
1797 		nla_nest_cancel(skb, nest);
1798 		goto out_module_put;
1799 	}
1800 
1801 	nla_nest_end(skb, nest);
1802 
1803 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1804 	nlh->nlmsg_flags |= NLM_F_ROOT;
1805 	module_put(ops->owner);
1806 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1807 			     n->nlmsg_flags & NLM_F_ECHO);
1808 	if (err < 0)
1809 		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1810 
1811 	return err;
1812 
1813 out_module_put:
1814 	module_put(ops->owner);
1815 err_out:
1816 	kfree_skb(skb);
1817 	return err;
1818 }
1819 
tcf_action_delete(struct net * net,struct tc_action * actions[])1820 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1821 {
1822 	struct tc_action *a;
1823 	int i;
1824 
1825 	tcf_act_for_each_action(i, a, actions) {
1826 		const struct tc_action_ops *ops = a->ops;
1827 		/* Actions can be deleted concurrently so we must save their
1828 		 * type and id to search again after reference is released.
1829 		 */
1830 		struct tcf_idrinfo *idrinfo = a->idrinfo;
1831 		u32 act_index = a->tcfa_index;
1832 
1833 		actions[i] = NULL;
1834 		if (tcf_action_put(a)) {
1835 			/* last reference, action was deleted concurrently */
1836 			module_put(ops->owner);
1837 		} else {
1838 			int ret;
1839 
1840 			/* now do the delete */
1841 			ret = tcf_idr_delete_index(idrinfo, act_index);
1842 			if (ret < 0)
1843 				return ret;
1844 		}
1845 	}
1846 	return 0;
1847 }
1848 
tcf_reoffload_del_notify_msg(struct net * net,struct tc_action * action)1849 static struct sk_buff *tcf_reoffload_del_notify_msg(struct net *net,
1850 						    struct tc_action *action)
1851 {
1852 	size_t attr_size = tcf_action_fill_size(action);
1853 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1854 		[0] = action,
1855 	};
1856 	struct sk_buff *skb;
1857 
1858 	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
1859 	if (!skb)
1860 		return ERR_PTR(-ENOBUFS);
1861 
1862 	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
1863 		kfree_skb(skb);
1864 		return ERR_PTR(-EINVAL);
1865 	}
1866 
1867 	return skb;
1868 }
1869 
tcf_reoffload_del_notify(struct net * net,struct tc_action * action)1870 static int tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1871 {
1872 	const struct tc_action_ops *ops = action->ops;
1873 	struct sk_buff *skb;
1874 	int ret;
1875 
1876 	if (!rtnl_notify_needed(net, 0, RTNLGRP_TC)) {
1877 		skb = NULL;
1878 	} else {
1879 		skb = tcf_reoffload_del_notify_msg(net, action);
1880 		if (IS_ERR(skb))
1881 			return PTR_ERR(skb);
1882 	}
1883 
1884 	ret = tcf_idr_release_unsafe(action);
1885 	if (ret == ACT_P_DELETED) {
1886 		module_put(ops->owner);
1887 		ret = rtnetlink_maybe_send(skb, net, 0, RTNLGRP_TC, 0);
1888 	} else {
1889 		kfree_skb(skb);
1890 	}
1891 
1892 	return ret;
1893 }
1894 
tcf_action_reoffload_cb(flow_indr_block_bind_cb_t * cb,void * cb_priv,bool add)1895 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1896 			    void *cb_priv, bool add)
1897 {
1898 	struct tc_act_pernet_id *id_ptr;
1899 	struct tcf_idrinfo *idrinfo;
1900 	struct tc_action_net *tn;
1901 	struct tc_action *p;
1902 	unsigned int act_id;
1903 	unsigned long tmp;
1904 	unsigned long id;
1905 	struct idr *idr;
1906 	struct net *net;
1907 	int ret;
1908 
1909 	if (!cb)
1910 		return -EINVAL;
1911 
1912 	down_read(&net_rwsem);
1913 	mutex_lock(&act_id_mutex);
1914 
1915 	for_each_net(net) {
1916 		list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1917 			act_id = id_ptr->id;
1918 			tn = net_generic(net, act_id);
1919 			if (!tn)
1920 				continue;
1921 			idrinfo = tn->idrinfo;
1922 			if (!idrinfo)
1923 				continue;
1924 
1925 			mutex_lock(&idrinfo->lock);
1926 			idr = &idrinfo->action_idr;
1927 			idr_for_each_entry_ul(idr, p, tmp, id) {
1928 				if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1929 					continue;
1930 				if (add) {
1931 					tcf_action_offload_add_ex(p, NULL, cb,
1932 								  cb_priv);
1933 					continue;
1934 				}
1935 
1936 				/* cb unregister to update hw count */
1937 				ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1938 				if (ret < 0)
1939 					continue;
1940 				if (tc_act_skip_sw(p->tcfa_flags) &&
1941 				    !tc_act_in_hw(p))
1942 					tcf_reoffload_del_notify(net, p);
1943 			}
1944 			mutex_unlock(&idrinfo->lock);
1945 		}
1946 	}
1947 	mutex_unlock(&act_id_mutex);
1948 	up_read(&net_rwsem);
1949 
1950 	return 0;
1951 }
1952 
tcf_del_notify_msg(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1953 static struct sk_buff *tcf_del_notify_msg(struct net *net, struct nlmsghdr *n,
1954 					  struct tc_action *actions[],
1955 					  u32 portid, size_t attr_size,
1956 					  struct netlink_ext_ack *extack)
1957 {
1958 	struct sk_buff *skb;
1959 
1960 	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
1961 	if (!skb)
1962 		return ERR_PTR(-ENOBUFS);
1963 
1964 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1965 			 0, 2, extack) <= 0) {
1966 		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1967 		kfree_skb(skb);
1968 		return ERR_PTR(-EINVAL);
1969 	}
1970 
1971 	return skb;
1972 }
1973 
tcf_del_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1974 static int tcf_del_notify(struct net *net, struct nlmsghdr *n,
1975 			  struct tc_action *actions[], u32 portid,
1976 			  size_t attr_size, struct netlink_ext_ack *extack)
1977 {
1978 	struct sk_buff *skb;
1979 	int ret;
1980 
1981 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
1982 		skb = NULL;
1983 	} else {
1984 		skb = tcf_del_notify_msg(net, n, actions, portid, attr_size,
1985 					 extack);
1986 		if (IS_ERR(skb))
1987 			return PTR_ERR(skb);
1988 	}
1989 
1990 	/* now do the delete */
1991 	ret = tcf_action_delete(net, actions);
1992 	if (ret < 0) {
1993 		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1994 		kfree_skb(skb);
1995 		return ret;
1996 	}
1997 
1998 	return rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
1999 				    n->nlmsg_flags & NLM_F_ECHO);
2000 }
2001 
2002 static int
tca_action_gd(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,int event,struct netlink_ext_ack * extack)2003 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
2004 	      u32 portid, int event, struct netlink_ext_ack *extack)
2005 {
2006 	int i, ret;
2007 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2008 	struct tc_action *act;
2009 	size_t attr_size = 0;
2010 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
2011 
2012 	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
2013 					  extack);
2014 	if (ret < 0)
2015 		return ret;
2016 
2017 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
2018 		if (tb[1])
2019 			return tca_action_flush(net, tb[1], n, portid, extack);
2020 
2021 		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
2022 		return -EINVAL;
2023 	}
2024 
2025 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
2026 		act = tcf_action_get_1(net, tb[i], n, portid, extack);
2027 		if (IS_ERR(act)) {
2028 			ret = PTR_ERR(act);
2029 			goto err;
2030 		}
2031 		attr_size += tcf_action_fill_size(act);
2032 		actions[i - 1] = act;
2033 	}
2034 
2035 	attr_size = tcf_action_full_attrs_size(attr_size);
2036 
2037 	if (event == RTM_GETACTION)
2038 		ret = tcf_get_notify(net, portid, n, actions, event, extack);
2039 	else { /* delete */
2040 		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
2041 		if (ret)
2042 			goto err;
2043 		return 0;
2044 	}
2045 err:
2046 	tcf_action_put_many(actions);
2047 	return ret;
2048 }
2049 
tcf_add_notify_msg(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)2050 static struct sk_buff *tcf_add_notify_msg(struct net *net, struct nlmsghdr *n,
2051 					  struct tc_action *actions[],
2052 					  u32 portid, size_t attr_size,
2053 					  struct netlink_ext_ack *extack)
2054 {
2055 	struct sk_buff *skb;
2056 
2057 	skb = alloc_skb(max(attr_size, NLMSG_GOODSIZE), GFP_KERNEL);
2058 	if (!skb)
2059 		return ERR_PTR(-ENOBUFS);
2060 
2061 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
2062 			 RTM_NEWACTION, 0, 0, extack) <= 0) {
2063 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
2064 		kfree_skb(skb);
2065 		return ERR_PTR(-EINVAL);
2066 	}
2067 
2068 	return skb;
2069 }
2070 
tcf_add_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)2071 static int tcf_add_notify(struct net *net, struct nlmsghdr *n,
2072 			  struct tc_action *actions[], u32 portid,
2073 			  size_t attr_size, struct netlink_ext_ack *extack)
2074 {
2075 	struct sk_buff *skb;
2076 
2077 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) {
2078 		skb = NULL;
2079 	} else {
2080 		skb = tcf_add_notify_msg(net, n, actions, portid, attr_size,
2081 					 extack);
2082 		if (IS_ERR(skb))
2083 			return PTR_ERR(skb);
2084 	}
2085 
2086 	return rtnetlink_maybe_send(skb, net, portid, RTNLGRP_TC,
2087 				    n->nlmsg_flags & NLM_F_ECHO);
2088 }
2089 
tcf_action_add(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,u32 flags,struct netlink_ext_ack * extack)2090 static int tcf_action_add(struct net *net, struct nlattr *nla,
2091 			  struct nlmsghdr *n, u32 portid, u32 flags,
2092 			  struct netlink_ext_ack *extack)
2093 {
2094 	size_t attr_size = 0;
2095 	int loop, ret;
2096 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
2097 	int init_res[TCA_ACT_MAX_PRIO] = {};
2098 
2099 	for (loop = 0; loop < 10; loop++) {
2100 		ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
2101 				      &attr_size, flags, 0, extack);
2102 		if (ret != -EAGAIN)
2103 			break;
2104 	}
2105 
2106 	if (ret < 0)
2107 		return ret;
2108 
2109 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
2110 
2111 	/* only put bound actions */
2112 	tca_put_bound_many(actions, init_res);
2113 
2114 	return ret;
2115 }
2116 
2117 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2118 	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2119 						 TCA_ACT_FLAG_TERSE_DUMP),
2120 	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
2121 };
2122 
tc_ctl_action(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2123 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2124 			 struct netlink_ext_ack *extack)
2125 {
2126 	struct net *net = sock_net(skb->sk);
2127 	struct nlattr *tca[TCA_ROOT_MAX + 1];
2128 	u32 portid = NETLINK_CB(skb).portid;
2129 	u32 flags = 0;
2130 	int ret = 0;
2131 
2132 	if ((n->nlmsg_type != RTM_GETACTION) &&
2133 	    !netlink_capable(skb, CAP_NET_ADMIN))
2134 		return -EPERM;
2135 
2136 	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2137 				     TCA_ROOT_MAX, NULL, extack);
2138 	if (ret < 0)
2139 		return ret;
2140 
2141 	if (tca[TCA_ACT_TAB] == NULL) {
2142 		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2143 		return -EINVAL;
2144 	}
2145 
2146 	/* n->nlmsg_flags & NLM_F_CREATE */
2147 	switch (n->nlmsg_type) {
2148 	case RTM_NEWACTION:
2149 		/* we are going to assume all other flags
2150 		 * imply create only if it doesn't exist
2151 		 * Note that CREATE | EXCL implies that
2152 		 * but since we want avoid ambiguity (eg when flags
2153 		 * is zero) then just set this
2154 		 */
2155 		if (n->nlmsg_flags & NLM_F_REPLACE)
2156 			flags = TCA_ACT_FLAGS_REPLACE;
2157 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2158 				     extack);
2159 		break;
2160 	case RTM_DELACTION:
2161 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2162 				    portid, RTM_DELACTION, extack);
2163 		break;
2164 	case RTM_GETACTION:
2165 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2166 				    portid, RTM_GETACTION, extack);
2167 		break;
2168 	default:
2169 		BUG();
2170 	}
2171 
2172 	return ret;
2173 }
2174 
find_dump_kind(struct nlattr ** nla)2175 static struct nlattr *find_dump_kind(struct nlattr **nla)
2176 {
2177 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2178 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2179 	struct nlattr *kind;
2180 
2181 	tb1 = nla[TCA_ACT_TAB];
2182 	if (tb1 == NULL)
2183 		return NULL;
2184 
2185 	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2186 		return NULL;
2187 
2188 	if (tb[1] == NULL)
2189 		return NULL;
2190 	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2191 		return NULL;
2192 	kind = tb2[TCA_ACT_KIND];
2193 
2194 	return kind;
2195 }
2196 
tc_dump_action(struct sk_buff * skb,struct netlink_callback * cb)2197 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2198 {
2199 	struct net *net = sock_net(skb->sk);
2200 	struct nlmsghdr *nlh;
2201 	unsigned char *b = skb_tail_pointer(skb);
2202 	struct nlattr *nest;
2203 	struct tc_action_ops *a_o;
2204 	int ret = 0;
2205 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2206 	struct nlattr *tb[TCA_ROOT_MAX + 1];
2207 	struct nlattr *count_attr = NULL;
2208 	unsigned long jiffy_since = 0;
2209 	struct nlattr *kind = NULL;
2210 	struct nla_bitfield32 bf;
2211 	u32 msecs_since = 0;
2212 	u32 act_count = 0;
2213 
2214 	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2215 				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
2216 	if (ret < 0)
2217 		return ret;
2218 
2219 	kind = find_dump_kind(tb);
2220 	if (kind == NULL) {
2221 		pr_info("tc_dump_action: action bad kind\n");
2222 		return 0;
2223 	}
2224 
2225 	a_o = tc_lookup_action(kind);
2226 	if (a_o == NULL)
2227 		return 0;
2228 
2229 	cb->args[2] = 0;
2230 	if (tb[TCA_ROOT_FLAGS]) {
2231 		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2232 		cb->args[2] = bf.value;
2233 	}
2234 
2235 	if (tb[TCA_ROOT_TIME_DELTA]) {
2236 		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2237 	}
2238 
2239 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2240 			cb->nlh->nlmsg_type, sizeof(*t), 0);
2241 	if (!nlh)
2242 		goto out_module_put;
2243 
2244 	if (msecs_since)
2245 		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2246 
2247 	t = nlmsg_data(nlh);
2248 	t->tca_family = AF_UNSPEC;
2249 	t->tca__pad1 = 0;
2250 	t->tca__pad2 = 0;
2251 	cb->args[3] = jiffy_since;
2252 	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2253 	if (!count_attr)
2254 		goto out_module_put;
2255 
2256 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2257 	if (nest == NULL)
2258 		goto out_module_put;
2259 
2260 	ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2261 	if (ret < 0)
2262 		goto out_module_put;
2263 
2264 	if (ret > 0) {
2265 		nla_nest_end(skb, nest);
2266 		ret = skb->len;
2267 		act_count = cb->args[1];
2268 		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2269 		cb->args[1] = 0;
2270 	} else
2271 		nlmsg_trim(skb, b);
2272 
2273 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2274 	if (NETLINK_CB(cb->skb).portid && ret)
2275 		nlh->nlmsg_flags |= NLM_F_MULTI;
2276 	module_put(a_o->owner);
2277 	return skb->len;
2278 
2279 out_module_put:
2280 	module_put(a_o->owner);
2281 	nlmsg_trim(skb, b);
2282 	return skb->len;
2283 }
2284 
2285 static const struct rtnl_msg_handler tc_action_rtnl_msg_handlers[] __initconst = {
2286 	{.msgtype = RTM_NEWACTION, .doit = tc_ctl_action},
2287 	{.msgtype = RTM_DELACTION, .doit = tc_ctl_action},
2288 	{.msgtype = RTM_GETACTION, .doit = tc_ctl_action,
2289 	 .dumpit = tc_dump_action},
2290 };
2291 
tc_action_init(void)2292 static int __init tc_action_init(void)
2293 {
2294 	rtnl_register_many(tc_action_rtnl_msg_handlers);
2295 	return 0;
2296 }
2297 
2298 subsys_initcall(tc_action_init);
2299