1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright 2020 NXP */ 3 4 #include <linux/module.h> 5 #include <linux/types.h> 6 #include <linux/kernel.h> 7 #include <linux/string.h> 8 #include <linux/errno.h> 9 #include <linux/skbuff.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/init.h> 12 #include <linux/slab.h> 13 #include <net/act_api.h> 14 #include <net/netlink.h> 15 #include <net/pkt_cls.h> 16 #include <net/tc_act/tc_gate.h> 17 #include <net/tc_wrapper.h> 18 19 static struct tc_action_ops act_gate_ops; 20 21 static ktime_t gate_get_time(struct tcf_gate *gact) 22 { 23 ktime_t mono = ktime_get(); 24 25 switch (gact->tk_offset) { 26 case TK_OFFS_MAX: 27 return mono; 28 default: 29 return ktime_mono_to_any(mono, gact->tk_offset); 30 } 31 32 return KTIME_MAX; 33 } 34 35 static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) 36 { 37 struct tcf_gate_params *param = &gact->param; 38 ktime_t now, base, cycle; 39 u64 n; 40 41 base = ns_to_ktime(param->tcfg_basetime); 42 now = gate_get_time(gact); 43 44 if (ktime_after(base, now)) { 45 *start = base; 46 return; 47 } 48 49 cycle = param->tcfg_cycletime; 50 51 n = div64_u64(ktime_sub_ns(now, base), cycle); 52 *start = ktime_add_ns(base, (n + 1) * cycle); 53 } 54 55 static void gate_start_timer(struct tcf_gate *gact, ktime_t start) 56 { 57 ktime_t expires; 58 59 expires = hrtimer_get_expires(&gact->hitimer); 60 if (expires == 0) 61 expires = KTIME_MAX; 62 63 start = min_t(ktime_t, start, expires); 64 65 hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT); 66 } 67 68 static enum hrtimer_restart gate_timer_func(struct hrtimer *timer) 69 { 70 struct tcf_gate *gact = container_of(timer, struct tcf_gate, 71 hitimer); 72 struct tcf_gate_params *p = &gact->param; 73 struct tcfg_gate_entry *next; 74 ktime_t close_time, now; 75 76 spin_lock(&gact->tcf_lock); 77 78 next = gact->next_entry; 79 80 /* cycle start, clear pending bit, clear total octets */ 81 gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0; 82 gact->current_entry_octets = 0; 83 gact->current_max_octets = next->maxoctets; 84 85 gact->current_close_time = ktime_add_ns(gact->current_close_time, 86 next->interval); 87 88 close_time = gact->current_close_time; 89 90 if (list_is_last(&next->list, &p->entries)) 91 next = list_first_entry(&p->entries, 92 struct tcfg_gate_entry, list); 93 else 94 next = list_next_entry(next, list); 95 96 now = gate_get_time(gact); 97 98 if (ktime_after(now, close_time)) { 99 ktime_t cycle, base; 100 u64 n; 101 102 cycle = p->tcfg_cycletime; 103 base = ns_to_ktime(p->tcfg_basetime); 104 n = div64_u64(ktime_sub_ns(now, base), cycle); 105 close_time = ktime_add_ns(base, (n + 1) * cycle); 106 } 107 108 gact->next_entry = next; 109 110 hrtimer_set_expires(&gact->hitimer, close_time); 111 112 spin_unlock(&gact->tcf_lock); 113 114 return HRTIMER_RESTART; 115 } 116 117 TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb, 118 const struct tc_action *a, 119 struct tcf_result *res) 120 { 121 struct tcf_gate *gact = to_gate(a); 122 123 spin_lock(&gact->tcf_lock); 124 125 tcf_lastuse_update(&gact->tcf_tm); 126 bstats_update(&gact->tcf_bstats, skb); 127 128 if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) { 129 spin_unlock(&gact->tcf_lock); 130 return gact->tcf_action; 131 } 132 133 if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) 134 goto drop; 135 136 if (gact->current_max_octets >= 0) { 137 gact->current_entry_octets += qdisc_pkt_len(skb); 138 if (gact->current_entry_octets > gact->current_max_octets) { 139 gact->tcf_qstats.overlimits++; 140 goto drop; 141 } 142 } 143 144 spin_unlock(&gact->tcf_lock); 145 146 return gact->tcf_action; 147 drop: 148 gact->tcf_qstats.drops++; 149 spin_unlock(&gact->tcf_lock); 150 151 return TC_ACT_SHOT; 152 } 153 154 static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = { 155 [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 }, 156 [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG }, 157 [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 }, 158 [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 }, 159 [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 }, 160 }; 161 162 static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = { 163 [TCA_GATE_PARMS] = 164 NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)), 165 [TCA_GATE_PRIORITY] = { .type = NLA_S32 }, 166 [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED }, 167 [TCA_GATE_BASE_TIME] = { .type = NLA_U64 }, 168 [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 }, 169 [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 }, 170 [TCA_GATE_FLAGS] = { .type = NLA_U32 }, 171 [TCA_GATE_CLOCKID] = { .type = NLA_S32 }, 172 }; 173 174 static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry, 175 struct netlink_ext_ack *extack) 176 { 177 u32 interval = 0; 178 179 entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]); 180 181 if (tb[TCA_GATE_ENTRY_INTERVAL]) 182 interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]); 183 184 if (interval == 0) { 185 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); 186 return -EINVAL; 187 } 188 189 entry->interval = interval; 190 191 if (tb[TCA_GATE_ENTRY_IPV]) 192 entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]); 193 else 194 entry->ipv = -1; 195 196 if (tb[TCA_GATE_ENTRY_MAX_OCTETS]) 197 entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]); 198 else 199 entry->maxoctets = -1; 200 201 return 0; 202 } 203 204 static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry, 205 int index, struct netlink_ext_ack *extack) 206 { 207 struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { }; 208 int err; 209 210 err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack); 211 if (err < 0) { 212 NL_SET_ERR_MSG(extack, "Could not parse nested entry"); 213 return -EINVAL; 214 } 215 216 entry->index = index; 217 218 return fill_gate_entry(tb, entry, extack); 219 } 220 221 static void release_entry_list(struct list_head *entries) 222 { 223 struct tcfg_gate_entry *entry, *e; 224 225 list_for_each_entry_safe(entry, e, entries, list) { 226 list_del(&entry->list); 227 kfree(entry); 228 } 229 } 230 231 static int parse_gate_list(struct nlattr *list_attr, 232 struct tcf_gate_params *sched, 233 struct netlink_ext_ack *extack) 234 { 235 struct tcfg_gate_entry *entry; 236 struct nlattr *n; 237 int err, rem; 238 int i = 0; 239 240 if (!list_attr) 241 return -EINVAL; 242 243 nla_for_each_nested(n, list_attr, rem) { 244 if (nla_type(n) != TCA_GATE_ONE_ENTRY) { 245 NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'"); 246 continue; 247 } 248 249 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 250 if (!entry) { 251 NL_SET_ERR_MSG(extack, "Not enough memory for entry"); 252 err = -ENOMEM; 253 goto release_list; 254 } 255 256 err = parse_gate_entry(n, entry, i, extack); 257 if (err < 0) { 258 kfree(entry); 259 goto release_list; 260 } 261 262 list_add_tail(&entry->list, &sched->entries); 263 i++; 264 } 265 266 sched->num_entries = i; 267 268 return i; 269 270 release_list: 271 release_entry_list(&sched->entries); 272 273 return err; 274 } 275 276 static void gate_setup_timer(struct tcf_gate *gact, u64 basetime, 277 enum tk_offsets tko, s32 clockid, 278 bool do_init) 279 { 280 if (!do_init) { 281 if (basetime == gact->param.tcfg_basetime && 282 tko == gact->tk_offset && 283 clockid == gact->param.tcfg_clockid) 284 return; 285 286 spin_unlock_bh(&gact->tcf_lock); 287 hrtimer_cancel(&gact->hitimer); 288 spin_lock_bh(&gact->tcf_lock); 289 } 290 gact->param.tcfg_basetime = basetime; 291 gact->param.tcfg_clockid = clockid; 292 gact->tk_offset = tko; 293 hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT); 294 gact->hitimer.function = gate_timer_func; 295 } 296 297 static int tcf_gate_init(struct net *net, struct nlattr *nla, 298 struct nlattr *est, struct tc_action **a, 299 struct tcf_proto *tp, u32 flags, 300 struct netlink_ext_ack *extack) 301 { 302 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); 303 enum tk_offsets tk_offset = TK_OFFS_TAI; 304 bool bind = flags & TCA_ACT_FLAGS_BIND; 305 struct nlattr *tb[TCA_GATE_MAX + 1]; 306 struct tcf_chain *goto_ch = NULL; 307 u64 cycletime = 0, basetime = 0; 308 struct tcf_gate_params *p; 309 s32 clockid = CLOCK_TAI; 310 struct tcf_gate *gact; 311 struct tc_gate *parm; 312 int ret = 0, err; 313 u32 gflags = 0; 314 s32 prio = -1; 315 ktime_t start; 316 u32 index; 317 318 if (!nla) 319 return -EINVAL; 320 321 err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack); 322 if (err < 0) 323 return err; 324 325 if (!tb[TCA_GATE_PARMS]) 326 return -EINVAL; 327 328 if (tb[TCA_GATE_CLOCKID]) { 329 clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); 330 switch (clockid) { 331 case CLOCK_REALTIME: 332 tk_offset = TK_OFFS_REAL; 333 break; 334 case CLOCK_MONOTONIC: 335 tk_offset = TK_OFFS_MAX; 336 break; 337 case CLOCK_BOOTTIME: 338 tk_offset = TK_OFFS_BOOT; 339 break; 340 case CLOCK_TAI: 341 tk_offset = TK_OFFS_TAI; 342 break; 343 default: 344 NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); 345 return -EINVAL; 346 } 347 } 348 349 parm = nla_data(tb[TCA_GATE_PARMS]); 350 index = parm->index; 351 352 err = tcf_idr_check_alloc(tn, &index, a, bind); 353 if (err < 0) 354 return err; 355 356 if (err && bind) 357 return 0; 358 359 if (!err) { 360 ret = tcf_idr_create(tn, index, est, a, 361 &act_gate_ops, bind, false, flags); 362 if (ret) { 363 tcf_idr_cleanup(tn, index); 364 return ret; 365 } 366 367 ret = ACT_P_CREATED; 368 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { 369 tcf_idr_release(*a, bind); 370 return -EEXIST; 371 } 372 373 if (tb[TCA_GATE_PRIORITY]) 374 prio = nla_get_s32(tb[TCA_GATE_PRIORITY]); 375 376 if (tb[TCA_GATE_BASE_TIME]) 377 basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]); 378 379 if (tb[TCA_GATE_FLAGS]) 380 gflags = nla_get_u32(tb[TCA_GATE_FLAGS]); 381 382 gact = to_gate(*a); 383 if (ret == ACT_P_CREATED) 384 INIT_LIST_HEAD(&gact->param.entries); 385 386 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); 387 if (err < 0) 388 goto release_idr; 389 390 spin_lock_bh(&gact->tcf_lock); 391 p = &gact->param; 392 393 if (tb[TCA_GATE_CYCLE_TIME]) 394 cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); 395 396 if (tb[TCA_GATE_ENTRY_LIST]) { 397 err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack); 398 if (err < 0) 399 goto chain_put; 400 } 401 402 if (!cycletime) { 403 struct tcfg_gate_entry *entry; 404 ktime_t cycle = 0; 405 406 list_for_each_entry(entry, &p->entries, list) 407 cycle = ktime_add_ns(cycle, entry->interval); 408 cycletime = cycle; 409 if (!cycletime) { 410 err = -EINVAL; 411 goto chain_put; 412 } 413 } 414 p->tcfg_cycletime = cycletime; 415 416 if (tb[TCA_GATE_CYCLE_TIME_EXT]) 417 p->tcfg_cycletime_ext = 418 nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]); 419 420 gate_setup_timer(gact, basetime, tk_offset, clockid, 421 ret == ACT_P_CREATED); 422 p->tcfg_priority = prio; 423 p->tcfg_flags = gflags; 424 gate_get_start_time(gact, &start); 425 426 gact->current_close_time = start; 427 gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING; 428 429 gact->next_entry = list_first_entry(&p->entries, 430 struct tcfg_gate_entry, list); 431 432 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 433 434 gate_start_timer(gact, start); 435 436 spin_unlock_bh(&gact->tcf_lock); 437 438 if (goto_ch) 439 tcf_chain_put_by_act(goto_ch); 440 441 return ret; 442 443 chain_put: 444 spin_unlock_bh(&gact->tcf_lock); 445 446 if (goto_ch) 447 tcf_chain_put_by_act(goto_ch); 448 release_idr: 449 /* action is not inserted in any list: it's safe to init hitimer 450 * without taking tcf_lock. 451 */ 452 if (ret == ACT_P_CREATED) 453 gate_setup_timer(gact, gact->param.tcfg_basetime, 454 gact->tk_offset, gact->param.tcfg_clockid, 455 true); 456 tcf_idr_release(*a, bind); 457 return err; 458 } 459 460 static void tcf_gate_cleanup(struct tc_action *a) 461 { 462 struct tcf_gate *gact = to_gate(a); 463 struct tcf_gate_params *p; 464 465 p = &gact->param; 466 hrtimer_cancel(&gact->hitimer); 467 release_entry_list(&p->entries); 468 } 469 470 static int dumping_entry(struct sk_buff *skb, 471 struct tcfg_gate_entry *entry) 472 { 473 struct nlattr *item; 474 475 item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY); 476 if (!item) 477 return -ENOSPC; 478 479 if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index)) 480 goto nla_put_failure; 481 482 if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE)) 483 goto nla_put_failure; 484 485 if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval)) 486 goto nla_put_failure; 487 488 if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets)) 489 goto nla_put_failure; 490 491 if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv)) 492 goto nla_put_failure; 493 494 return nla_nest_end(skb, item); 495 496 nla_put_failure: 497 nla_nest_cancel(skb, item); 498 return -1; 499 } 500 501 static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a, 502 int bind, int ref) 503 { 504 unsigned char *b = skb_tail_pointer(skb); 505 struct tcf_gate *gact = to_gate(a); 506 struct tc_gate opt = { 507 .index = gact->tcf_index, 508 .refcnt = refcount_read(&gact->tcf_refcnt) - ref, 509 .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, 510 }; 511 struct tcfg_gate_entry *entry; 512 struct tcf_gate_params *p; 513 struct nlattr *entry_list; 514 struct tcf_t t; 515 516 spin_lock_bh(&gact->tcf_lock); 517 opt.action = gact->tcf_action; 518 519 p = &gact->param; 520 521 if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt)) 522 goto nla_put_failure; 523 524 if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME, 525 p->tcfg_basetime, TCA_GATE_PAD)) 526 goto nla_put_failure; 527 528 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME, 529 p->tcfg_cycletime, TCA_GATE_PAD)) 530 goto nla_put_failure; 531 532 if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT, 533 p->tcfg_cycletime_ext, TCA_GATE_PAD)) 534 goto nla_put_failure; 535 536 if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid)) 537 goto nla_put_failure; 538 539 if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags)) 540 goto nla_put_failure; 541 542 if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority)) 543 goto nla_put_failure; 544 545 entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST); 546 if (!entry_list) 547 goto nla_put_failure; 548 549 list_for_each_entry(entry, &p->entries, list) { 550 if (dumping_entry(skb, entry) < 0) 551 goto nla_put_failure; 552 } 553 554 nla_nest_end(skb, entry_list); 555 556 tcf_tm_dump(&t, &gact->tcf_tm); 557 if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD)) 558 goto nla_put_failure; 559 spin_unlock_bh(&gact->tcf_lock); 560 561 return skb->len; 562 563 nla_put_failure: 564 spin_unlock_bh(&gact->tcf_lock); 565 nlmsg_trim(skb, b); 566 return -1; 567 } 568 569 static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, 570 u64 drops, u64 lastuse, bool hw) 571 { 572 struct tcf_gate *gact = to_gate(a); 573 struct tcf_t *tm = &gact->tcf_tm; 574 575 tcf_action_update_stats(a, bytes, packets, drops, hw); 576 tm->lastuse = max_t(u64, tm->lastuse, lastuse); 577 } 578 579 static size_t tcf_gate_get_fill_size(const struct tc_action *act) 580 { 581 return nla_total_size(sizeof(struct tc_gate)); 582 } 583 584 static void tcf_gate_entry_destructor(void *priv) 585 { 586 struct action_gate_entry *oe = priv; 587 588 kfree(oe); 589 } 590 591 static int tcf_gate_get_entries(struct flow_action_entry *entry, 592 const struct tc_action *act) 593 { 594 entry->gate.entries = tcf_gate_get_list(act); 595 596 if (!entry->gate.entries) 597 return -EINVAL; 598 599 entry->destructor = tcf_gate_entry_destructor; 600 entry->destructor_priv = entry->gate.entries; 601 602 return 0; 603 } 604 605 static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, 606 u32 *index_inc, bool bind, 607 struct netlink_ext_ack *extack) 608 { 609 int err; 610 611 if (bind) { 612 struct flow_action_entry *entry = entry_data; 613 614 entry->id = FLOW_ACTION_GATE; 615 entry->gate.prio = tcf_gate_prio(act); 616 entry->gate.basetime = tcf_gate_basetime(act); 617 entry->gate.cycletime = tcf_gate_cycletime(act); 618 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 619 entry->gate.num_entries = tcf_gate_num_entries(act); 620 err = tcf_gate_get_entries(entry, act); 621 if (err) 622 return err; 623 *index_inc = 1; 624 } else { 625 struct flow_offload_action *fl_action = entry_data; 626 627 fl_action->id = FLOW_ACTION_GATE; 628 } 629 630 return 0; 631 } 632 633 static struct tc_action_ops act_gate_ops = { 634 .kind = "gate", 635 .id = TCA_ID_GATE, 636 .owner = THIS_MODULE, 637 .act = tcf_gate_act, 638 .dump = tcf_gate_dump, 639 .init = tcf_gate_init, 640 .cleanup = tcf_gate_cleanup, 641 .stats_update = tcf_gate_stats_update, 642 .get_fill_size = tcf_gate_get_fill_size, 643 .offload_act_setup = tcf_gate_offload_act_setup, 644 .size = sizeof(struct tcf_gate), 645 }; 646 647 static __net_init int gate_init_net(struct net *net) 648 { 649 struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); 650 651 return tc_action_net_init(net, tn, &act_gate_ops); 652 } 653 654 static void __net_exit gate_exit_net(struct list_head *net_list) 655 { 656 tc_action_net_exit(net_list, act_gate_ops.net_id); 657 } 658 659 static struct pernet_operations gate_net_ops = { 660 .init = gate_init_net, 661 .exit_batch = gate_exit_net, 662 .id = &act_gate_ops.net_id, 663 .size = sizeof(struct tc_action_net), 664 }; 665 666 static int __init gate_init_module(void) 667 { 668 return tcf_register_action(&act_gate_ops, &gate_net_ops); 669 } 670 671 static void __exit gate_cleanup_module(void) 672 { 673 tcf_unregister_action(&act_gate_ops, &gate_net_ops); 674 } 675 676 module_init(gate_init_module); 677 module_exit(gate_cleanup_module); 678 MODULE_LICENSE("GPL v2"); 679