xref: /linux/net/sched/act_gate.c (revision abacaf559950eec0d99d37ff6b92049409af5943)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright 2020 NXP */
3 
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/errno.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/tc_act/tc_gate.h>
17 #include <net/tc_wrapper.h>
18 
19 static struct tc_action_ops act_gate_ops;
20 
gate_get_time(struct tcf_gate * gact)21 static ktime_t gate_get_time(struct tcf_gate *gact)
22 {
23 	ktime_t mono = ktime_get();
24 
25 	switch (gact->tk_offset) {
26 	case TK_OFFS_MAX:
27 		return mono;
28 	default:
29 		return ktime_mono_to_any(mono, gact->tk_offset);
30 	}
31 
32 	return KTIME_MAX;
33 }
34 
35 static void tcf_gate_params_free_rcu(struct rcu_head *head);
36 
gate_get_start_time(struct tcf_gate * gact,const struct tcf_gate_params * param,ktime_t * start)37 static void gate_get_start_time(struct tcf_gate *gact,
38 				const struct tcf_gate_params *param,
39 				ktime_t *start)
40 {
41 	ktime_t now, base, cycle;
42 	u64 n;
43 
44 	base = ns_to_ktime(param->tcfg_basetime);
45 	now = gate_get_time(gact);
46 
47 	if (ktime_after(base, now)) {
48 		*start = base;
49 		return;
50 	}
51 
52 	cycle = param->tcfg_cycletime;
53 
54 	n = div64_u64(ktime_sub_ns(now, base), cycle);
55 	*start = ktime_add_ns(base, (n + 1) * cycle);
56 }
57 
gate_start_timer(struct tcf_gate * gact,ktime_t start)58 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
59 {
60 	ktime_t expires;
61 
62 	expires = hrtimer_get_expires(&gact->hitimer);
63 	if (expires == 0)
64 		expires = KTIME_MAX;
65 
66 	start = min_t(ktime_t, start, expires);
67 
68 	hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
69 }
70 
gate_timer_func(struct hrtimer * timer)71 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
72 {
73 	struct tcf_gate *gact = container_of(timer, struct tcf_gate,
74 					     hitimer);
75 	struct tcfg_gate_entry *next;
76 	struct tcf_gate_params *p;
77 	ktime_t close_time, now;
78 
79 	spin_lock(&gact->tcf_lock);
80 
81 	p = rcu_dereference_protected(gact->param,
82 				      lockdep_is_held(&gact->tcf_lock));
83 	next = gact->next_entry;
84 
85 	/* cycle start, clear pending bit, clear total octets */
86 	gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
87 	gact->current_entry_octets = 0;
88 	gact->current_max_octets = next->maxoctets;
89 
90 	gact->current_close_time = ktime_add_ns(gact->current_close_time,
91 						next->interval);
92 
93 	close_time = gact->current_close_time;
94 
95 	if (list_is_last(&next->list, &p->entries))
96 		next = list_first_entry(&p->entries,
97 					struct tcfg_gate_entry, list);
98 	else
99 		next = list_next_entry(next, list);
100 
101 	now = gate_get_time(gact);
102 
103 	if (ktime_after(now, close_time)) {
104 		ktime_t cycle, base;
105 		u64 n;
106 
107 		cycle = p->tcfg_cycletime;
108 		base = ns_to_ktime(p->tcfg_basetime);
109 		n = div64_u64(ktime_sub_ns(now, base), cycle);
110 		close_time = ktime_add_ns(base, (n + 1) * cycle);
111 	}
112 
113 	gact->next_entry = next;
114 
115 	hrtimer_set_expires(&gact->hitimer, close_time);
116 
117 	spin_unlock(&gact->tcf_lock);
118 
119 	return HRTIMER_RESTART;
120 }
121 
tcf_gate_act(struct sk_buff * skb,const struct tc_action * a,struct tcf_result * res)122 TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb,
123 				   const struct tc_action *a,
124 				   struct tcf_result *res)
125 {
126 	struct tcf_gate *gact = to_gate(a);
127 	int action = READ_ONCE(gact->tcf_action);
128 
129 	tcf_lastuse_update(&gact->tcf_tm);
130 	tcf_action_update_bstats(&gact->common, skb);
131 
132 	spin_lock(&gact->tcf_lock);
133 	if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
134 		spin_unlock(&gact->tcf_lock);
135 		return action;
136 	}
137 
138 	if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) {
139 		spin_unlock(&gact->tcf_lock);
140 		goto drop;
141 	}
142 
143 	if (gact->current_max_octets >= 0) {
144 		gact->current_entry_octets += qdisc_pkt_len(skb);
145 		if (gact->current_entry_octets > gact->current_max_octets) {
146 			spin_unlock(&gact->tcf_lock);
147 			goto overlimit;
148 		}
149 	}
150 	spin_unlock(&gact->tcf_lock);
151 
152 	return action;
153 
154 overlimit:
155 	tcf_action_inc_overlimit_qstats(&gact->common);
156 drop:
157 	tcf_action_inc_drop_qstats(&gact->common);
158 	return TC_ACT_SHOT;
159 }
160 
161 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
162 	[TCA_GATE_ENTRY_INDEX]		= { .type = NLA_U32 },
163 	[TCA_GATE_ENTRY_GATE]		= { .type = NLA_FLAG },
164 	[TCA_GATE_ENTRY_INTERVAL]	= { .type = NLA_U32 },
165 	[TCA_GATE_ENTRY_IPV]		= { .type = NLA_S32 },
166 	[TCA_GATE_ENTRY_MAX_OCTETS]	= { .type = NLA_S32 },
167 };
168 
169 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
170 	[TCA_GATE_PARMS]		=
171 		NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
172 	[TCA_GATE_PRIORITY]		= { .type = NLA_S32 },
173 	[TCA_GATE_ENTRY_LIST]		= { .type = NLA_NESTED },
174 	[TCA_GATE_BASE_TIME]		= { .type = NLA_U64 },
175 	[TCA_GATE_CYCLE_TIME]		= { .type = NLA_U64 },
176 	[TCA_GATE_CYCLE_TIME_EXT]	= { .type = NLA_U64 },
177 	[TCA_GATE_FLAGS]		= { .type = NLA_U32 },
178 	[TCA_GATE_CLOCKID]		= { .type = NLA_S32 },
179 };
180 
fill_gate_entry(struct nlattr ** tb,struct tcfg_gate_entry * entry,struct netlink_ext_ack * extack)181 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
182 			   struct netlink_ext_ack *extack)
183 {
184 	u32 interval = 0;
185 
186 	entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
187 
188 	if (tb[TCA_GATE_ENTRY_INTERVAL])
189 		interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
190 
191 	if (interval == 0) {
192 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
193 		return -EINVAL;
194 	}
195 
196 	entry->interval = interval;
197 
198 	entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1);
199 
200 	entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS],
201 					       -1);
202 
203 	return 0;
204 }
205 
parse_gate_entry(struct nlattr * n,struct tcfg_gate_entry * entry,int index,struct netlink_ext_ack * extack)206 static int parse_gate_entry(struct nlattr *n, struct  tcfg_gate_entry *entry,
207 			    int index, struct netlink_ext_ack *extack)
208 {
209 	struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
210 	int err;
211 
212 	err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
213 	if (err < 0) {
214 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
215 		return -EINVAL;
216 	}
217 
218 	entry->index = index;
219 
220 	return fill_gate_entry(tb, entry, extack);
221 }
222 
release_entry_list(struct list_head * entries)223 static void release_entry_list(struct list_head *entries)
224 {
225 	struct tcfg_gate_entry *entry, *e;
226 
227 	list_for_each_entry_safe(entry, e, entries, list) {
228 		list_del(&entry->list);
229 		kfree(entry);
230 	}
231 }
232 
tcf_gate_copy_entries(struct tcf_gate_params * dst,const struct tcf_gate_params * src,struct netlink_ext_ack * extack)233 static int tcf_gate_copy_entries(struct tcf_gate_params *dst,
234 				 const struct tcf_gate_params *src,
235 				 struct netlink_ext_ack *extack)
236 {
237 	struct tcfg_gate_entry *entry;
238 	int i = 0;
239 
240 	list_for_each_entry(entry, &src->entries, list) {
241 		struct tcfg_gate_entry *new;
242 
243 		new = kzalloc(sizeof(*new), GFP_ATOMIC);
244 		if (!new) {
245 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
246 			return -ENOMEM;
247 		}
248 
249 		new->index      = entry->index;
250 		new->gate_state = entry->gate_state;
251 		new->interval   = entry->interval;
252 		new->ipv        = entry->ipv;
253 		new->maxoctets  = entry->maxoctets;
254 		list_add_tail(&new->list, &dst->entries);
255 		i++;
256 	}
257 
258 	dst->num_entries = i;
259 	return 0;
260 }
261 
parse_gate_list(struct nlattr * list_attr,struct tcf_gate_params * sched,struct netlink_ext_ack * extack)262 static int parse_gate_list(struct nlattr *list_attr,
263 			   struct tcf_gate_params *sched,
264 			   struct netlink_ext_ack *extack)
265 {
266 	struct tcfg_gate_entry *entry;
267 	struct nlattr *n;
268 	int err, rem;
269 	int i = 0;
270 
271 	if (!list_attr)
272 		return -EINVAL;
273 
274 	nla_for_each_nested(n, list_attr, rem) {
275 		if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
276 			NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
277 			continue;
278 		}
279 
280 		entry = kzalloc_obj(*entry, GFP_ATOMIC);
281 		if (!entry) {
282 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
283 			err = -ENOMEM;
284 			goto release_list;
285 		}
286 
287 		err = parse_gate_entry(n, entry, i, extack);
288 		if (err < 0) {
289 			kfree(entry);
290 			goto release_list;
291 		}
292 
293 		list_add_tail(&entry->list, &sched->entries);
294 		i++;
295 	}
296 
297 	sched->num_entries = i;
298 
299 	return i;
300 
301 release_list:
302 	release_entry_list(&sched->entries);
303 
304 	return err;
305 }
306 
gate_timer_needs_cancel(u64 basetime,u64 old_basetime,enum tk_offsets tko,enum tk_offsets old_tko,s32 clockid,s32 old_clockid)307 static bool gate_timer_needs_cancel(u64 basetime, u64 old_basetime,
308 				    enum tk_offsets tko,
309 				    enum tk_offsets old_tko,
310 				    s32 clockid, s32 old_clockid)
311 {
312 	return basetime != old_basetime ||
313 	       clockid != old_clockid ||
314 	       tko != old_tko;
315 }
316 
gate_clock_resolve(s32 clockid,enum tk_offsets * tko,struct netlink_ext_ack * extack)317 static int gate_clock_resolve(s32 clockid, enum tk_offsets *tko,
318 			      struct netlink_ext_ack *extack)
319 {
320 	switch (clockid) {
321 	case CLOCK_REALTIME:
322 		*tko = TK_OFFS_REAL;
323 		return 0;
324 	case CLOCK_MONOTONIC:
325 		*tko = TK_OFFS_MAX;
326 		return 0;
327 	case CLOCK_BOOTTIME:
328 		*tko = TK_OFFS_BOOT;
329 		return 0;
330 	case CLOCK_TAI:
331 		*tko = TK_OFFS_TAI;
332 		return 0;
333 	default:
334 		NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
335 		return -EINVAL;
336 	}
337 }
338 
gate_setup_timer(struct tcf_gate * gact,s32 clockid,enum tk_offsets tko)339 static void gate_setup_timer(struct tcf_gate *gact, s32 clockid,
340 			     enum tk_offsets tko)
341 {
342 	WRITE_ONCE(gact->tk_offset, tko);
343 	hrtimer_setup(&gact->hitimer, gate_timer_func, clockid,
344 		      HRTIMER_MODE_ABS_SOFT);
345 }
346 
tcf_gate_init(struct net * net,struct nlattr * nla,struct nlattr * est,struct tc_action ** a,struct tcf_proto * tp,u32 flags,struct netlink_ext_ack * extack)347 static int tcf_gate_init(struct net *net, struct nlattr *nla,
348 			 struct nlattr *est, struct tc_action **a,
349 			 struct tcf_proto *tp, u32 flags,
350 			 struct netlink_ext_ack *extack)
351 {
352 	struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
353 	u64 cycletime = 0, basetime = 0, cycletime_ext = 0;
354 	struct tcf_gate_params *p = NULL, *old_p = NULL;
355 	enum tk_offsets old_tk_offset = TK_OFFS_TAI;
356 	const struct tcf_gate_params *cur_p = NULL;
357 	bool bind = flags & TCA_ACT_FLAGS_BIND;
358 	struct nlattr *tb[TCA_GATE_MAX + 1];
359 	enum tk_offsets tko = TK_OFFS_TAI;
360 	struct tcf_chain *goto_ch = NULL;
361 	s32 timer_clockid = CLOCK_TAI;
362 	bool use_old_entries = false;
363 	s32 old_clockid = CLOCK_TAI;
364 	bool need_cancel = false;
365 	s32 clockid = CLOCK_TAI;
366 	struct tcf_gate *gact;
367 	struct tc_gate *parm;
368 	u64 old_basetime = 0;
369 	int ret = 0, err;
370 	u32 gflags = 0;
371 	s32 prio = -1;
372 	ktime_t start;
373 	u32 index;
374 
375 	if (!nla)
376 		return -EINVAL;
377 
378 	err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
379 	if (err < 0)
380 		return err;
381 
382 	if (!tb[TCA_GATE_PARMS])
383 		return -EINVAL;
384 
385 	if (tb[TCA_GATE_CLOCKID])
386 		clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
387 
388 	parm = nla_data(tb[TCA_GATE_PARMS]);
389 	index = parm->index;
390 
391 	err = tcf_idr_check_alloc(tn, &index, a, bind);
392 	if (err < 0)
393 		return err;
394 
395 	if (err && bind)
396 		return ACT_P_BOUND;
397 
398 	if (!err) {
399 		ret = tcf_idr_create_from_flags(tn, index, est, a,
400 						&act_gate_ops, bind, flags);
401 		if (ret) {
402 			tcf_idr_cleanup(tn, index);
403 			return ret;
404 		}
405 
406 		ret = ACT_P_CREATED;
407 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
408 		tcf_idr_release(*a, bind);
409 		return -EEXIST;
410 	}
411 
412 	gact = to_gate(*a);
413 
414 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
415 	if (err < 0)
416 		goto release_idr;
417 
418 	p = kzalloc(sizeof(*p), GFP_KERNEL);
419 	if (!p) {
420 		err = -ENOMEM;
421 		goto chain_put;
422 	}
423 	INIT_LIST_HEAD(&p->entries);
424 
425 	use_old_entries = !tb[TCA_GATE_ENTRY_LIST];
426 	if (!use_old_entries) {
427 		err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
428 		if (err < 0)
429 			goto err_free;
430 		use_old_entries = !err;
431 	}
432 
433 	if (ret == ACT_P_CREATED && use_old_entries) {
434 		NL_SET_ERR_MSG(extack, "The entry list is empty");
435 		err = -EINVAL;
436 		goto err_free;
437 	}
438 
439 	if (ret != ACT_P_CREATED) {
440 		rcu_read_lock();
441 		cur_p = rcu_dereference(gact->param);
442 
443 		old_basetime  = cur_p->tcfg_basetime;
444 		old_clockid   = cur_p->tcfg_clockid;
445 		old_tk_offset = READ_ONCE(gact->tk_offset);
446 
447 		basetime      = old_basetime;
448 		cycletime_ext = cur_p->tcfg_cycletime_ext;
449 		prio          = cur_p->tcfg_priority;
450 		gflags        = cur_p->tcfg_flags;
451 
452 		if (!tb[TCA_GATE_CLOCKID])
453 			clockid = old_clockid;
454 
455 		err = 0;
456 		if (use_old_entries) {
457 			err = tcf_gate_copy_entries(p, cur_p, extack);
458 			if (!err && !tb[TCA_GATE_CYCLE_TIME])
459 				cycletime = cur_p->tcfg_cycletime;
460 		}
461 		rcu_read_unlock();
462 		if (err)
463 			goto err_free;
464 	}
465 
466 	if (tb[TCA_GATE_PRIORITY])
467 		prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
468 
469 	if (tb[TCA_GATE_BASE_TIME])
470 		basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
471 
472 	if (tb[TCA_GATE_FLAGS])
473 		gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
474 
475 	if (tb[TCA_GATE_CYCLE_TIME])
476 		cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
477 
478 	if (tb[TCA_GATE_CYCLE_TIME_EXT])
479 		cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
480 
481 	err = gate_clock_resolve(clockid, &tko, extack);
482 	if (err)
483 		goto err_free;
484 	timer_clockid = clockid;
485 
486 	need_cancel = ret != ACT_P_CREATED &&
487 		      gate_timer_needs_cancel(basetime, old_basetime,
488 					      tko, old_tk_offset,
489 					      timer_clockid, old_clockid);
490 
491 	if (need_cancel)
492 		hrtimer_cancel(&gact->hitimer);
493 
494 	spin_lock_bh(&gact->tcf_lock);
495 
496 	if (!cycletime) {
497 		struct tcfg_gate_entry *entry;
498 		ktime_t cycle = 0;
499 
500 		list_for_each_entry(entry, &p->entries, list)
501 			cycle = ktime_add_ns(cycle, entry->interval);
502 		cycletime = cycle;
503 	}
504 	p->tcfg_cycletime = cycletime;
505 	p->tcfg_cycletime_ext = cycletime_ext;
506 
507 	if (need_cancel || ret == ACT_P_CREATED)
508 		gate_setup_timer(gact, timer_clockid, tko);
509 	p->tcfg_priority = prio;
510 	p->tcfg_flags = gflags;
511 	p->tcfg_basetime = basetime;
512 	p->tcfg_clockid = timer_clockid;
513 	gate_get_start_time(gact, p, &start);
514 
515 	old_p = rcu_replace_pointer(gact->param, p,
516 				    lockdep_is_held(&gact->tcf_lock));
517 
518 	gact->current_close_time = start;
519 	gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
520 
521 	gact->next_entry = list_first_entry(&p->entries,
522 					    struct tcfg_gate_entry, list);
523 
524 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
525 
526 	gate_start_timer(gact, start);
527 
528 	spin_unlock_bh(&gact->tcf_lock);
529 
530 	if (goto_ch)
531 		tcf_chain_put_by_act(goto_ch);
532 
533 	if (old_p)
534 		call_rcu(&old_p->rcu, tcf_gate_params_free_rcu);
535 
536 	return ret;
537 
538 err_free:
539 	release_entry_list(&p->entries);
540 	kfree(p);
541 chain_put:
542 	if (goto_ch)
543 		tcf_chain_put_by_act(goto_ch);
544 release_idr:
545 	/* action is not inserted in any list: it's safe to init hitimer
546 	 * without taking tcf_lock.
547 	 */
548 	if (ret == ACT_P_CREATED)
549 		gate_setup_timer(gact, timer_clockid, tko);
550 
551 	tcf_idr_release(*a, bind);
552 	return err;
553 }
554 
tcf_gate_params_free_rcu(struct rcu_head * head)555 static void tcf_gate_params_free_rcu(struct rcu_head *head)
556 {
557 	struct tcf_gate_params *p = container_of(head, struct tcf_gate_params, rcu);
558 
559 	release_entry_list(&p->entries);
560 	kfree(p);
561 }
562 
tcf_gate_cleanup(struct tc_action * a)563 static void tcf_gate_cleanup(struct tc_action *a)
564 {
565 	struct tcf_gate *gact = to_gate(a);
566 	struct tcf_gate_params *p;
567 
568 	hrtimer_cancel(&gact->hitimer);
569 	p = rcu_dereference_protected(gact->param, 1);
570 	if (p)
571 		call_rcu(&p->rcu, tcf_gate_params_free_rcu);
572 }
573 
dumping_entry(struct sk_buff * skb,struct tcfg_gate_entry * entry)574 static int dumping_entry(struct sk_buff *skb,
575 			 struct tcfg_gate_entry *entry)
576 {
577 	struct nlattr *item;
578 
579 	item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
580 	if (!item)
581 		return -ENOSPC;
582 
583 	if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
584 		goto nla_put_failure;
585 
586 	if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
587 		goto nla_put_failure;
588 
589 	if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
590 		goto nla_put_failure;
591 
592 	if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
593 		goto nla_put_failure;
594 
595 	if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
596 		goto nla_put_failure;
597 
598 	return nla_nest_end(skb, item);
599 
600 nla_put_failure:
601 	nla_nest_cancel(skb, item);
602 	return -1;
603 }
604 
tcf_gate_dump(struct sk_buff * skb,struct tc_action * a,int bind,int ref)605 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
606 			 int bind, int ref)
607 {
608 	unsigned char *b = skb_tail_pointer(skb);
609 	struct tcf_gate *gact = to_gate(a);
610 	struct tc_gate opt = {
611 		.index    = gact->tcf_index,
612 		.refcnt   = refcount_read(&gact->tcf_refcnt) - ref,
613 		.bindcnt  = atomic_read(&gact->tcf_bindcnt) - bind,
614 	};
615 	struct tcfg_gate_entry *entry;
616 	struct tcf_gate_params *p;
617 	struct nlattr *entry_list;
618 	struct tcf_t t;
619 
620 	rcu_read_lock();
621 	opt.action = READ_ONCE(gact->tcf_action);
622 	p = rcu_dereference(gact->param);
623 
624 	if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
625 		goto nla_put_failure;
626 
627 	if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
628 			      p->tcfg_basetime, TCA_GATE_PAD))
629 		goto nla_put_failure;
630 
631 	if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
632 			      p->tcfg_cycletime, TCA_GATE_PAD))
633 		goto nla_put_failure;
634 
635 	if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
636 			      p->tcfg_cycletime_ext, TCA_GATE_PAD))
637 		goto nla_put_failure;
638 
639 	if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
640 		goto nla_put_failure;
641 
642 	if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
643 		goto nla_put_failure;
644 
645 	if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
646 		goto nla_put_failure;
647 
648 	entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
649 	if (!entry_list)
650 		goto nla_put_failure;
651 
652 	list_for_each_entry(entry, &p->entries, list) {
653 		if (dumping_entry(skb, entry) < 0)
654 			goto nla_put_failure;
655 	}
656 
657 	nla_nest_end(skb, entry_list);
658 
659 	tcf_tm_dump(&t, &gact->tcf_tm);
660 	if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
661 		goto nla_put_failure;
662 	rcu_read_unlock();
663 
664 	return skb->len;
665 
666 nla_put_failure:
667 	rcu_read_unlock();
668 	nlmsg_trim(skb, b);
669 	return -1;
670 }
671 
tcf_gate_stats_update(struct tc_action * a,u64 bytes,u64 packets,u64 drops,u64 lastuse,bool hw)672 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
673 				  u64 drops, u64 lastuse, bool hw)
674 {
675 	struct tcf_gate *gact = to_gate(a);
676 	struct tcf_t *tm = &gact->tcf_tm;
677 
678 	tcf_action_update_stats(a, bytes, packets, drops, hw);
679 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
680 }
681 
tcf_gate_get_fill_size(const struct tc_action * act)682 static size_t tcf_gate_get_fill_size(const struct tc_action *act)
683 {
684 	return nla_total_size(sizeof(struct tc_gate));
685 }
686 
tcf_gate_entry_destructor(void * priv)687 static void tcf_gate_entry_destructor(void *priv)
688 {
689 	struct action_gate_entry *oe = priv;
690 
691 	kfree(oe);
692 }
693 
tcf_gate_get_entries(struct flow_action_entry * entry,const struct tc_action * act)694 static int tcf_gate_get_entries(struct flow_action_entry *entry,
695 				const struct tc_action *act)
696 {
697 	entry->gate.entries = tcf_gate_get_list(act);
698 
699 	if (!entry->gate.entries)
700 		return -EINVAL;
701 
702 	entry->destructor = tcf_gate_entry_destructor;
703 	entry->destructor_priv = entry->gate.entries;
704 
705 	return 0;
706 }
707 
tcf_gate_offload_act_setup(struct tc_action * act,void * entry_data,u32 * index_inc,bool bind,struct netlink_ext_ack * extack)708 static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
709 				      u32 *index_inc, bool bind,
710 				      struct netlink_ext_ack *extack)
711 {
712 	int err;
713 
714 	if (bind) {
715 		struct flow_action_entry *entry = entry_data;
716 
717 		entry->id = FLOW_ACTION_GATE;
718 		entry->gate.prio = tcf_gate_prio(act);
719 		entry->gate.basetime = tcf_gate_basetime(act);
720 		entry->gate.cycletime = tcf_gate_cycletime(act);
721 		entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
722 		entry->gate.num_entries = tcf_gate_num_entries(act);
723 		err = tcf_gate_get_entries(entry, act);
724 		if (err)
725 			return err;
726 		*index_inc = 1;
727 	} else {
728 		struct flow_offload_action *fl_action = entry_data;
729 
730 		fl_action->id = FLOW_ACTION_GATE;
731 	}
732 
733 	return 0;
734 }
735 
736 static struct tc_action_ops act_gate_ops = {
737 	.kind		=	"gate",
738 	.id		=	TCA_ID_GATE,
739 	.owner		=	THIS_MODULE,
740 	.act		=	tcf_gate_act,
741 	.dump		=	tcf_gate_dump,
742 	.init		=	tcf_gate_init,
743 	.cleanup	=	tcf_gate_cleanup,
744 	.stats_update	=	tcf_gate_stats_update,
745 	.get_fill_size	=	tcf_gate_get_fill_size,
746 	.offload_act_setup =	tcf_gate_offload_act_setup,
747 	.size		=	sizeof(struct tcf_gate),
748 };
749 MODULE_ALIAS_NET_ACT("gate");
750 
gate_init_net(struct net * net)751 static __net_init int gate_init_net(struct net *net)
752 {
753 	struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
754 
755 	return tc_action_net_init(net, tn, &act_gate_ops);
756 }
757 
gate_exit_net(struct list_head * net_list)758 static void __net_exit gate_exit_net(struct list_head *net_list)
759 {
760 	tc_action_net_exit(net_list, act_gate_ops.net_id);
761 }
762 
763 static struct pernet_operations gate_net_ops = {
764 	.init = gate_init_net,
765 	.exit_batch = gate_exit_net,
766 	.id   = &act_gate_ops.net_id,
767 	.size = sizeof(struct tc_action_net),
768 };
769 
gate_init_module(void)770 static int __init gate_init_module(void)
771 {
772 	return tcf_register_action(&act_gate_ops, &gate_net_ops);
773 }
774 
gate_cleanup_module(void)775 static void __exit gate_cleanup_module(void)
776 {
777 	tcf_unregister_action(&act_gate_ops, &gate_net_ops);
778 }
779 
780 module_init(gate_init_module);
781 module_exit(gate_cleanup_module);
782 MODULE_DESCRIPTION("TC gate action");
783 MODULE_LICENSE("GPL v2");
784