1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright 2020 NXP */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/kernel.h> 7 #include <linux/string.h> 8 #include <linux/errno.h> 9 #include <linux/skbuff.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/init.h> 12 #include <linux/slab.h> 13 #include <net/act_api.h> 14 #include <net/netlink.h> 15 #include <net/pkt_cls.h> 16 #include <net/tc_act/tc_gate.h> 17 #include <net/tc_wrapper.h> 18 19 static struct tc_action_ops act_gate_ops; 20 21 static ktime_t gate_get_time(struct tcf_gate *gact) 22 { 23 ktime_t mono = ktime_get(); 24 25 switch (gact->tk_offset) { 26 case TK_OFFS_MAX: 27 return mono; 28 default: 29 return ktime_mono_to_any(mono, gact->tk_offset); 30 } 31 32 return KTIME_MAX; 33 } 34 35 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) 36 { 37 struct tcf_gate_params *param = &gact->param; 38 ktime_t now, base, cycle; 39 u64 n; 40 41 base = ns_to_ktime(param->tcfg_basetime); 42 now = gate_get_time(gact); 43 44 if (ktime_after(base, now)) { 45 *start = base; 46 return; 47 } 48 49 cycle = param->tcfg_cycletime; 50 51 n = div64_u64(ktime_sub_ns(now, base), cycle); 52 *start = ktime_add_ns(base, (n + 1) * cycle); 53 } 54 55 static void gate_start_timer(struct tcf_gate *gact, ktime_t start) 56 { 57 ktime_t expires; 58 59 expires = hrtimer_get_expires(&gact->hitimer); 60 if (expires == 0) 61 expires = KTIME_MAX; 62 63 start = min_t(ktime_t, start, expires); 64 65 hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT); 66 } 67 68 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer) 69 { 70 struct tcf_gate *gact = container_of(timer, struct tcf_gate, 71 hitimer); 72 struct tcf_gate_params *p = &gact->param; 73 struct tcfg_gate_entry *next; 74 ktime_t close_time, now; 75 76 spin_lock(&gact->tcf_lock); 77 78 next = gact->next_entry; 79 80 /* cycle start, clear pending bit, clear total octets */ 81 gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0; 82 gact->current_entry_octets = 0; 83 gact->current_max_octets = next->maxoctets; 84 85 gact->current_close_time = ktime_add_ns(gact->current_close_time, 86 next->interval); 87 88 close_time = gact->current_close_time; 89 90 if (list_is_last(&next->list, &p->entries)) 91 next = list_first_entry(&p->entries, 92 struct tcfg_gate_entry, list); 93 else 94 next = list_next_entry(next, list); 95 96 now = gate_get_time(gact); 97 98 if (ktime_after(now, close_time)) { 99 ktime_t cycle, base; 100 u64 n; 101 102 cycle = p->tcfg_cycletime; 103 base = ns_to_ktime(p->tcfg_basetime); 104 n = div64_u64(ktime_sub_ns(now, base), cycle); 105 close_time = ktime_add_ns(base, (n + 1) * cycle); 106 } 107 108 gact->next_entry = next; 109 110 hrtimer_set_expires(&gact->hitimer, close_time); 111 112 spin_unlock(&gact->tcf_lock); 113 114 return HRTIMER_RESTART; 115 } 116 117 TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb, 118 const struct tc_action *a, 119 struct tcf_result *res) 120 { 121 struct tcf_gate *gact = to_gate(a); 122 int action = READ_ONCE(gact->tcf_action); 123 124 tcf_lastuse_update(&gact->tcf_tm); 125 tcf_action_update_bstats(&gact->common, skb); 126 127 spin_lock(&gact->tcf_lock); 128 if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) { 129 spin_unlock(&gact->tcf_lock); 130 return action; 131 } 132 133 if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) { 134 spin_unlock(&gact->tcf_lock); 135 goto drop; 136 } 137 138 if (gact->current_max_octets >= 0) { 139 gact->current_entry_octets += qdisc_pkt_len(skb); 140 if (gact->current_entry_octets > gact->current_max_octets) { 141 spin_unlock(&gact->tcf_lock); 142 goto overlimit; 143 } 144 } 145 spin_unlock(&gact->tcf_lock); 146 147 return action; 148 149 overlimit: 150 tcf_action_inc_overlimit_qstats(&gact->common); 151 drop: 152 tcf_action_inc_drop_qstats(&gact->common); 153 return TC_ACT_SHOT; 154 } 155 156 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = { 157 [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 }, 158 [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG }, 159 [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 }, 160 [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 }, 161 [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 }, 162 }; 163 164 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = { 165 [TCA_GATE_PARMS] = 166 NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)), 167 [TCA_GATE_PRIORITY] = { .type = NLA_S32 }, 168 [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED }, 169 [TCA_GATE_BASE_TIME] = { .type = NLA_U64 }, 170 [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 }, 171 [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 }, 172 [TCA_GATE_FLAGS] = { .type = NLA_U32 }, 173 [TCA_GATE_CLOCKID] = { .type = NLA_S32 }, 174 }; 175 176 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry, 177 struct netlink_ext_ack *extack) 178 { 179 u32 interval = 0; 180 181 entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]); 182 183 if (tb[TCA_GATE_ENTRY_INTERVAL]) 184 interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]); 185 186 if (interval == 0) { 187 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 188 return -EINVAL; 189 } 190 191 entry->interval = interval; 192 193 entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1); 194 195 entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS], 196 -1); 197 198 return 0; 199 } 200 201 static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry, 202 int index, struct netlink_ext_ack *extack) 203 { 204 struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { }; 205 int err; 206 207 err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack); 208 if (err < 0) { 209 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 210 return -EINVAL; 211 } 212 213 entry->index = index; 214 215 return fill_gate_entry(tb, entry, extack); 216 } 217 218 static void release_entry_list(struct list_head *entries) 219 { 220 struct tcfg_gate_entry *entry, *e; 221 222 list_for_each_entry_safe(entry, e, entries, list) { 223 list_del(&entry->list); 224 kfree(entry); 225 } 226 } 227 228 static int parse_gate_list(struct nlattr *list_attr, 229 struct tcf_gate_params *sched, 230 struct netlink_ext_ack *extack) 231 { 232 struct tcfg_gate_entry *entry; 233 struct nlattr *n; 234 int err, rem; 235 int i = 0; 236 237 if (!list_attr) 238 return -EINVAL; 239 240 nla_for_each_nested(n, list_attr, rem) { 241 if (nla_type(n) != TCA_GATE_ONE_ENTRY) { 242 NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'"); 243 continue; 244 } 245 246 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 247 if (!entry) { 248 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 249 err = -ENOMEM; 250 goto release_list; 251 } 252 253 err = parse_gate_entry(n, entry, i, extack); 254 if (err < 0) { 255 kfree(entry); 256 goto release_list; 257 } 258 259 list_add_tail(&entry->list, &sched->entries); 260 i++; 261 } 262 263 sched->num_entries = i; 264 265 return i; 266 267 release_list: 268 release_entry_list(&sched->entries); 269 270 return err; 271 } 272 273 static void gate_setup_timer(struct tcf_gate *gact, u64 basetime, 274 enum tk_offsets tko, s32 clockid, 275 bool do_init) 276 { 277 if (!do_init) { 278 if (basetime == gact->param.tcfg_basetime && 279 tko == gact->tk_offset && 280 clockid == gact->param.tcfg_clockid) 281 return; 282 283 spin_unlock_bh(&gact->tcf_lock); 284 hrtimer_cancel(&gact->hitimer); 285 spin_lock_bh(&gact->tcf_lock); 286 } 287 gact->param.tcfg_basetime = basetime; 288 gact->param.tcfg_clockid = clockid; 289 gact->tk_offset = tko; 290 hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT); 291 gact->hitimer.function = gate_timer_func; 292 } 293 294 static int tcf_gate_init(struct net *net, struct nlattr *nla, 295 struct nlattr *est, struct tc_action **a, 296 struct tcf_proto *tp, u32 flags, 297 struct netlink_ext_ack *extack) 298 { 299 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); 300 enum tk_offsets tk_offset = TK_OFFS_TAI; 301 bool bind = flags & TCA_ACT_FLAGS_BIND; 302 struct nlattr *tb[TCA_GATE_MAX + 1]; 303 struct tcf_chain *goto_ch = NULL; 304 u64 cycletime = 0, basetime = 0; 305 struct tcf_gate_params *p; 306 s32 clockid = CLOCK_TAI; 307 struct tcf_gate *gact; 308 struct tc_gate *parm; 309 int ret = 0, err; 310 u32 gflags = 0; 311 s32 prio = -1; 312 ktime_t start; 313 u32 index; 314 315 if (!nla) 316 return -EINVAL; 317 318 err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack); 319 if (err < 0) 320 return err; 321 322 if (!tb[TCA_GATE_PARMS]) 323 return -EINVAL; 324 325 if (tb[TCA_GATE_CLOCKID]) { 326 clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); 327 switch (clockid) { 328 case CLOCK_REALTIME: 329 tk_offset = TK_OFFS_REAL; 330 break; 331 case CLOCK_MONOTONIC: 332 tk_offset = TK_OFFS_MAX; 333 break; 334 case CLOCK_BOOTTIME: 335 tk_offset = TK_OFFS_BOOT; 336 break; 337 case CLOCK_TAI: 338 tk_offset = TK_OFFS_TAI; 339 break; 340 default: 341 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 342 return -EINVAL; 343 } 344 } 345 346 parm = nla_data(tb[TCA_GATE_PARMS]); 347 index = parm->index; 348 349 err = tcf_idr_check_alloc(tn, &index, a, bind); 350 if (err < 0) 351 return err; 352 353 if (err && bind) 354 return ACT_P_BOUND; 355 356 if (!err) { 357 ret = tcf_idr_create_from_flags(tn, index, est, a, 358 &act_gate_ops, bind, flags); 359 if (ret) { 360 tcf_idr_cleanup(tn, index); 361 return ret; 362 } 363 364 ret = ACT_P_CREATED; 365 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 366 tcf_idr_release(*a, bind); 367 return -EEXIST; 368 } 369 370 if (tb[TCA_GATE_PRIORITY]) 371 prio = nla_get_s32(tb[TCA_GATE_PRIORITY]); 372 373 if (tb[TCA_GATE_BASE_TIME]) 374 basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]); 375 376 if (tb[TCA_GATE_FLAGS]) 377 gflags = nla_get_u32(tb[TCA_GATE_FLAGS]); 378 379 gact = to_gate(*a); 380 if (ret == ACT_P_CREATED) 381 INIT_LIST_HEAD(&gact->param.entries); 382 383 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 384 if (err < 0) 385 goto release_idr; 386 387 spin_lock_bh(&gact->tcf_lock); 388 p = &gact->param; 389 390 if (tb[TCA_GATE_CYCLE_TIME]) 391 cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); 392 393 if (tb[TCA_GATE_ENTRY_LIST]) { 394 err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack); 395 if (err < 0) 396 goto chain_put; 397 } 398 399 if (!cycletime) { 400 struct tcfg_gate_entry *entry; 401 ktime_t cycle = 0; 402 403 list_for_each_entry(entry, &p->entries, list) 404 cycle = ktime_add_ns(cycle, entry->interval); 405 cycletime = cycle; 406 if (!cycletime) { 407 err = -EINVAL; 408 goto chain_put; 409 } 410 } 411 p->tcfg_cycletime = cycletime; 412 413 if (tb[TCA_GATE_CYCLE_TIME_EXT]) 414 p->tcfg_cycletime_ext = 415 nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]); 416 417 gate_setup_timer(gact, basetime, tk_offset, clockid, 418 ret == ACT_P_CREATED); 419 p->tcfg_priority = prio; 420 p->tcfg_flags = gflags; 421 gate_get_start_time(gact, &start); 422 423 gact->current_close_time = start; 424 gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING; 425 426 gact->next_entry = list_first_entry(&p->entries, 427 struct tcfg_gate_entry, list); 428 429 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 430 431 gate_start_timer(gact, start); 432 433 spin_unlock_bh(&gact->tcf_lock); 434 435 if (goto_ch) 436 tcf_chain_put_by_act(goto_ch); 437 438 return ret; 439 440 chain_put: 441 spin_unlock_bh(&gact->tcf_lock); 442 443 if (goto_ch) 444 tcf_chain_put_by_act(goto_ch); 445 release_idr: 446 /* action is not inserted in any list: it's safe to init hitimer 447 * without taking tcf_lock. 448 */ 449 if (ret == ACT_P_CREATED) 450 gate_setup_timer(gact, gact->param.tcfg_basetime, 451 gact->tk_offset, gact->param.tcfg_clockid, 452 true); 453 tcf_idr_release(*a, bind); 454 return err; 455 } 456 457 static void tcf_gate_cleanup(struct tc_action *a) 458 { 459 struct tcf_gate *gact = to_gate(a); 460 struct tcf_gate_params *p; 461 462 p = &gact->param; 463 hrtimer_cancel(&gact->hitimer); 464 release_entry_list(&p->entries); 465 } 466 467 static int dumping_entry(struct sk_buff *skb, 468 struct tcfg_gate_entry *entry) 469 { 470 struct nlattr *item; 471 472 item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY); 473 if (!item) 474 return -ENOSPC; 475 476 if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index)) 477 goto nla_put_failure; 478 479 if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE)) 480 goto nla_put_failure; 481 482 if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval)) 483 goto nla_put_failure; 484 485 if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets)) 486 goto nla_put_failure; 487 488 if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv)) 489 goto nla_put_failure; 490 491 return nla_nest_end(skb, item); 492 493 nla_put_failure: 494 nla_nest_cancel(skb, item); 495 return -1; 496 } 497 498 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a, 499 int bind, int ref) 500 { 501 unsigned char *b = skb_tail_pointer(skb); 502 struct tcf_gate *gact = to_gate(a); 503 struct tc_gate opt = { 504 .index = gact->tcf_index, 505 .refcnt = refcount_read(&gact->tcf_refcnt) - ref, 506 .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, 507 }; 508 struct tcfg_gate_entry *entry; 509 struct tcf_gate_params *p; 510 struct nlattr *entry_list; 511 struct tcf_t t; 512 513 spin_lock_bh(&gact->tcf_lock); 514 opt.action = gact->tcf_action; 515 516 p = &gact->param; 517 518 if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt)) 519 goto nla_put_failure; 520 521 if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME, 522 p->tcfg_basetime, TCA_GATE_PAD)) 523 goto nla_put_failure; 524 525 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME, 526 p->tcfg_cycletime, TCA_GATE_PAD)) 527 goto nla_put_failure; 528 529 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT, 530 p->tcfg_cycletime_ext, TCA_GATE_PAD)) 531 goto nla_put_failure; 532 533 if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid)) 534 goto nla_put_failure; 535 536 if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags)) 537 goto nla_put_failure; 538 539 if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority)) 540 goto nla_put_failure; 541 542 entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST); 543 if (!entry_list) 544 goto nla_put_failure; 545 546 list_for_each_entry(entry, &p->entries, list) { 547 if (dumping_entry(skb, entry) < 0) 548 goto nla_put_failure; 549 } 550 551 nla_nest_end(skb, entry_list); 552 553 tcf_tm_dump(&t, &gact->tcf_tm); 554 if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD)) 555 goto nla_put_failure; 556 spin_unlock_bh(&gact->tcf_lock); 557 558 return skb->len; 559 560 nla_put_failure: 561 spin_unlock_bh(&gact->tcf_lock); 562 nlmsg_trim(skb, b); 563 return -1; 564 } 565 566 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, 567 u64 drops, u64 lastuse, bool hw) 568 { 569 struct tcf_gate *gact = to_gate(a); 570 struct tcf_t *tm = &gact->tcf_tm; 571 572 tcf_action_update_stats(a, bytes, packets, drops, hw); 573 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 574 } 575 576 static size_t tcf_gate_get_fill_size(const struct tc_action *act) 577 { 578 return nla_total_size(sizeof(struct tc_gate)); 579 } 580 581 static void tcf_gate_entry_destructor(void *priv) 582 { 583 struct action_gate_entry *oe = priv; 584 585 kfree(oe); 586 } 587 588 static int tcf_gate_get_entries(struct flow_action_entry *entry, 589 const struct tc_action *act) 590 { 591 entry->gate.entries = tcf_gate_get_list(act); 592 593 if (!entry->gate.entries) 594 return -EINVAL; 595 596 entry->destructor = tcf_gate_entry_destructor; 597 entry->destructor_priv = entry->gate.entries; 598 599 return 0; 600 } 601 602 static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, 603 u32 *index_inc, bool bind, 604 struct netlink_ext_ack *extack) 605 { 606 int err; 607 608 if (bind) { 609 struct flow_action_entry *entry = entry_data; 610 611 entry->id = FLOW_ACTION_GATE; 612 entry->gate.prio = tcf_gate_prio(act); 613 entry->gate.basetime = tcf_gate_basetime(act); 614 entry->gate.cycletime = tcf_gate_cycletime(act); 615 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 616 entry->gate.num_entries = tcf_gate_num_entries(act); 617 err = tcf_gate_get_entries(entry, act); 618 if (err) 619 return err; 620 *index_inc = 1; 621 } else { 622 struct flow_offload_action *fl_action = entry_data; 623 624 fl_action->id = FLOW_ACTION_GATE; 625 } 626 627 return 0; 628 } 629 630 static struct tc_action_ops act_gate_ops = { 631 .kind = "gate", 632 .id = TCA_ID_GATE, 633 .owner = THIS_MODULE, 634 .act = tcf_gate_act, 635 .dump = tcf_gate_dump, 636 .init = tcf_gate_init, 637 .cleanup = tcf_gate_cleanup, 638 .stats_update = tcf_gate_stats_update, 639 .get_fill_size = tcf_gate_get_fill_size, 640 .offload_act_setup = tcf_gate_offload_act_setup, 641 .size = sizeof(struct tcf_gate), 642 }; 643 MODULE_ALIAS_NET_ACT("gate"); 644 645 static __net_init int gate_init_net(struct net *net) 646 { 647 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); 648 649 return tc_action_net_init(net, tn, &act_gate_ops); 650 } 651 652 static void __net_exit gate_exit_net(struct list_head *net_list) 653 { 654 tc_action_net_exit(net_list, act_gate_ops.net_id); 655 } 656 657 static struct pernet_operations gate_net_ops = { 658 .init = gate_init_net, 659 .exit_batch = gate_exit_net, 660 .id = &act_gate_ops.net_id, 661 .size = sizeof(struct tc_action_net), 662 }; 663 664 static int __init gate_init_module(void) 665 { 666 return tcf_register_action(&act_gate_ops, &gate_net_ops); 667 } 668 669 static void __exit gate_cleanup_module(void) 670 { 671 tcf_unregister_action(&act_gate_ops, &gate_net_ops); 672 } 673 674 module_init(gate_init_module); 675 module_exit(gate_cleanup_module); 676 MODULE_DESCRIPTION("TC gate action"); 677 MODULE_LICENSE("GPL v2"); 678