1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * The filters are packed to hash tables of key nodes 8 * with a set of 32bit key/mask pairs at every node. 9 * Nodes reference next level hash tables etc. 10 * 11 * This scheme is the best universal classifier I managed to 12 * invent; it is not super-fast, but it is not slow (provided you 13 * program it correctly), and general enough. And its relative 14 * speed grows as the number of rules becomes larger. 15 * 16 * It seems that it represents the best middle point between 17 * speed and manageability both by human and by machine. 18 * 19 * It is especially useful for link sharing combined with QoS; 20 * pure RSVP doesn't need such a general approach and can use 21 * much simpler (and faster) schemes, sort of cls_rsvp.c. 22 * 23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> 24 */ 25 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/types.h> 29 #include <linux/kernel.h> 30 #include <linux/string.h> 31 #include <linux/errno.h> 32 #include <linux/percpu.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/skbuff.h> 35 #include <linux/bitmap.h> 36 #include <linux/netdevice.h> 37 #include <linux/hash.h> 38 #include <net/netlink.h> 39 #include <net/act_api.h> 40 #include <net/pkt_cls.h> 41 #include <linux/idr.h> 42 #include <net/tc_wrapper.h> 43 44 struct tc_u_knode { 45 struct tc_u_knode __rcu *next; 46 u32 handle; 47 struct tc_u_hnode __rcu *ht_up; 48 struct tcf_exts exts; 49 int ifindex; 50 u8 fshift; 51 struct tcf_result res; 52 struct tc_u_hnode __rcu *ht_down; 53 #ifdef CONFIG_CLS_U32_PERF 54 struct tc_u32_pcnt __percpu *pf; 55 #endif 56 u32 flags; 57 unsigned int in_hw_count; 58 #ifdef CONFIG_CLS_U32_MARK 59 u32 val; 60 u32 mask; 61 u32 __percpu *pcpu_success; 62 #endif 63 struct rcu_work rwork; 64 /* The 'sel' field MUST be the last field in structure to allow for 65 * tc_u32_keys allocated at end of structure. 66 */ 67 struct tc_u32_sel sel; 68 }; 69 70 struct tc_u_hnode { 71 struct tc_u_hnode __rcu *next; 72 u32 handle; 73 u32 prio; 74 refcount_t refcnt; 75 unsigned int divisor; 76 struct idr handle_idr; 77 bool is_root; 78 struct rcu_head rcu; 79 u32 flags; 80 /* The 'ht' field MUST be the last field in structure to allow for 81 * more entries allocated at end of structure. 82 */ 83 struct tc_u_knode __rcu *ht[]; 84 }; 85 86 struct tc_u_common { 87 struct tc_u_hnode __rcu *hlist; 88 void *ptr; 89 refcount_t refcnt; 90 struct idr handle_idr; 91 struct hlist_node hnode; 92 long knodes; 93 }; 94 95 static u32 handle2id(u32 h) 96 { 97 return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h); 98 } 99 100 static u32 id2handle(u32 id) 101 { 102 return (id | 0x800U) << 20; 103 } 104 105 static inline unsigned int u32_hash_fold(__be32 key, 106 const struct tc_u32_sel *sel, 107 u8 fshift) 108 { 109 unsigned int h = ntohl(key & sel->hmask) >> fshift; 110 111 return h; 112 } 113 114 TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb, 115 const struct tcf_proto *tp, 116 struct tcf_result *res) 117 { 118 struct { 119 struct tc_u_knode *knode; 120 unsigned int off; 121 } stack[TC_U32_MAXDEPTH]; 122 123 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); 124 unsigned int off = skb_network_offset(skb); 125 struct tc_u_knode *n; 126 int sdepth = 0; 127 int off2 = 0; 128 int sel = 0; 129 #ifdef CONFIG_CLS_U32_PERF 130 int j; 131 #endif 132 int i, r; 133 134 next_ht: 135 n = rcu_dereference_bh(ht->ht[sel]); 136 137 next_knode: 138 if (n) { 139 struct tc_u32_key *key = n->sel.keys; 140 141 #ifdef CONFIG_CLS_U32_PERF 142 __this_cpu_inc(n->pf->rcnt); 143 j = 0; 144 #endif 145 146 if (tc_skip_sw(n->flags)) { 147 n = rcu_dereference_bh(n->next); 148 goto next_knode; 149 } 150 151 #ifdef CONFIG_CLS_U32_MARK 152 if ((skb->mark & n->mask) != n->val) { 153 n = rcu_dereference_bh(n->next); 154 goto next_knode; 155 } else { 156 __this_cpu_inc(*n->pcpu_success); 157 } 158 #endif 159 160 for (i = n->sel.nkeys; i > 0; i--, key++) { 161 int toff = off + key->off + (off2 & key->offmask); 162 __be32 *data, hdata; 163 164 data = skb_header_pointer_careful(skb, toff, 4, 165 &hdata); 166 if (!data) 167 goto out; 168 if ((*data ^ key->val) & key->mask) { 169 n = rcu_dereference_bh(n->next); 170 goto next_knode; 171 } 172 #ifdef CONFIG_CLS_U32_PERF 173 __this_cpu_inc(n->pf->kcnts[j]); 174 j++; 175 #endif 176 } 177 178 ht = rcu_dereference_bh(n->ht_down); 179 if (!ht) { 180 check_terminal: 181 if (n->sel.flags & TC_U32_TERMINAL) { 182 183 *res = n->res; 184 if (!tcf_match_indev(skb, n->ifindex)) { 185 n = rcu_dereference_bh(n->next); 186 goto next_knode; 187 } 188 #ifdef CONFIG_CLS_U32_PERF 189 __this_cpu_inc(n->pf->rhit); 190 #endif 191 r = tcf_exts_exec(skb, &n->exts, res); 192 if (r < 0) { 193 n = rcu_dereference_bh(n->next); 194 goto next_knode; 195 } 196 197 return r; 198 } 199 n = rcu_dereference_bh(n->next); 200 goto next_knode; 201 } 202 203 /* PUSH */ 204 if (sdepth >= TC_U32_MAXDEPTH) 205 goto deadloop; 206 stack[sdepth].knode = n; 207 stack[sdepth].off = off; 208 sdepth++; 209 210 ht = rcu_dereference_bh(n->ht_down); 211 sel = 0; 212 if (ht->divisor) { 213 __be32 *data, hdata; 214 215 data = skb_header_pointer_careful(skb, 216 off + n->sel.hoff, 217 4, &hdata); 218 if (!data) 219 goto out; 220 sel = ht->divisor & u32_hash_fold(*data, &n->sel, 221 n->fshift); 222 } 223 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) 224 goto next_ht; 225 226 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { 227 off2 = n->sel.off + 3; 228 if (n->sel.flags & TC_U32_VAROFFSET) { 229 __be16 *data, hdata; 230 231 data = skb_header_pointer_careful(skb, 232 off + n->sel.offoff, 233 2, &hdata); 234 if (!data) 235 goto out; 236 off2 += ntohs(n->sel.offmask & *data) >> 237 n->sel.offshift; 238 } 239 off2 &= ~3; 240 } 241 if (n->sel.flags & TC_U32_EAT) { 242 off += off2; 243 off2 = 0; 244 } 245 246 if (off < skb->len) 247 goto next_ht; 248 } 249 250 /* POP */ 251 if (sdepth--) { 252 n = stack[sdepth].knode; 253 ht = rcu_dereference_bh(n->ht_up); 254 off = stack[sdepth].off; 255 goto check_terminal; 256 } 257 out: 258 return -1; 259 260 deadloop: 261 net_warn_ratelimited("cls_u32: dead loop\n"); 262 return -1; 263 } 264 265 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) 266 { 267 struct tc_u_hnode *ht; 268 269 for (ht = rtnl_dereference(tp_c->hlist); 270 ht; 271 ht = rtnl_dereference(ht->next)) 272 if (ht->handle == handle) 273 break; 274 275 return ht; 276 } 277 278 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle) 279 { 280 unsigned int sel; 281 struct tc_u_knode *n = NULL; 282 283 sel = TC_U32_HASH(handle); 284 if (sel > ht->divisor) 285 goto out; 286 287 for (n = rtnl_dereference(ht->ht[sel]); 288 n; 289 n = rtnl_dereference(n->next)) 290 if (n->handle == handle) 291 break; 292 out: 293 return n; 294 } 295 296 297 static void *u32_get(struct tcf_proto *tp, u32 handle) 298 { 299 struct tc_u_hnode *ht; 300 struct tc_u_common *tp_c = tp->data; 301 302 if (TC_U32_HTID(handle) == TC_U32_ROOT) 303 ht = rtnl_dereference(tp->root); 304 else 305 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); 306 307 if (!ht) 308 return NULL; 309 310 if (TC_U32_KEY(handle) == 0) 311 return ht; 312 313 return u32_lookup_key(ht, handle); 314 } 315 316 /* Protected by rtnl lock */ 317 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) 318 { 319 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); 320 if (id < 0) 321 return 0; 322 return id2handle(id); 323 } 324 325 static struct hlist_head *tc_u_common_hash; 326 327 #define U32_HASH_SHIFT 10 328 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) 329 330 static void *tc_u_common_ptr(const struct tcf_proto *tp) 331 { 332 struct tcf_block *block = tp->chain->block; 333 334 /* The block sharing is currently supported only 335 * for classless qdiscs. In that case we use block 336 * for tc_u_common identification. In case the 337 * block is not shared, block->q is a valid pointer 338 * and we can use that. That works for classful qdiscs. 339 */ 340 if (tcf_block_shared(block)) 341 return block; 342 else 343 return block->q; 344 } 345 346 static struct hlist_head *tc_u_hash(void *key) 347 { 348 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT); 349 } 350 351 static struct tc_u_common *tc_u_common_find(void *key) 352 { 353 struct tc_u_common *tc; 354 hlist_for_each_entry(tc, tc_u_hash(key), hnode) { 355 if (tc->ptr == key) 356 return tc; 357 } 358 return NULL; 359 } 360 361 static int u32_init(struct tcf_proto *tp) 362 { 363 struct tc_u_hnode *root_ht; 364 void *key = tc_u_common_ptr(tp); 365 struct tc_u_common *tp_c = tc_u_common_find(key); 366 367 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL); 368 if (root_ht == NULL) 369 return -ENOBUFS; 370 371 refcount_set(&root_ht->refcnt, 1); 372 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0); 373 root_ht->prio = tp->prio; 374 root_ht->is_root = true; 375 idr_init(&root_ht->handle_idr); 376 377 if (tp_c == NULL) { 378 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); 379 if (tp_c == NULL) { 380 kfree(root_ht); 381 return -ENOBUFS; 382 } 383 refcount_set(&tp_c->refcnt, 1); 384 tp_c->ptr = key; 385 INIT_HLIST_NODE(&tp_c->hnode); 386 idr_init(&tp_c->handle_idr); 387 388 hlist_add_head(&tp_c->hnode, tc_u_hash(key)); 389 } else { 390 refcount_inc(&tp_c->refcnt); 391 } 392 393 RCU_INIT_POINTER(root_ht->next, tp_c->hlist); 394 rcu_assign_pointer(tp_c->hlist, root_ht); 395 396 /* root_ht must be destroyed when tcf_proto is destroyed */ 397 rcu_assign_pointer(tp->root, root_ht); 398 tp->data = tp_c; 399 return 0; 400 } 401 402 static void __u32_destroy_key(struct tc_u_knode *n) 403 { 404 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 405 406 tcf_exts_destroy(&n->exts); 407 if (ht && refcount_dec_and_test(&ht->refcnt)) 408 kfree(ht); 409 kfree(n); 410 } 411 412 static void u32_destroy_key(struct tc_u_knode *n, bool free_pf) 413 { 414 tcf_exts_put_net(&n->exts); 415 #ifdef CONFIG_CLS_U32_PERF 416 if (free_pf) 417 free_percpu(n->pf); 418 #endif 419 #ifdef CONFIG_CLS_U32_MARK 420 if (free_pf) 421 free_percpu(n->pcpu_success); 422 #endif 423 __u32_destroy_key(n); 424 } 425 426 /* u32_delete_key_rcu should be called when free'ing a copied 427 * version of a tc_u_knode obtained from u32_init_knode(). When 428 * copies are obtained from u32_init_knode() the statistics are 429 * shared between the old and new copies to allow readers to 430 * continue to update the statistics during the copy. To support 431 * this the u32_delete_key_rcu variant does not free the percpu 432 * statistics. 433 */ 434 static void u32_delete_key_work(struct work_struct *work) 435 { 436 struct tc_u_knode *key = container_of(to_rcu_work(work), 437 struct tc_u_knode, 438 rwork); 439 rtnl_lock(); 440 u32_destroy_key(key, false); 441 rtnl_unlock(); 442 } 443 444 /* u32_delete_key_freepf_rcu is the rcu callback variant 445 * that free's the entire structure including the statistics 446 * percpu variables. Only use this if the key is not a copy 447 * returned by u32_init_knode(). See u32_delete_key_rcu() 448 * for the variant that should be used with keys return from 449 * u32_init_knode() 450 */ 451 static void u32_delete_key_freepf_work(struct work_struct *work) 452 { 453 struct tc_u_knode *key = container_of(to_rcu_work(work), 454 struct tc_u_knode, 455 rwork); 456 rtnl_lock(); 457 u32_destroy_key(key, true); 458 rtnl_unlock(); 459 } 460 461 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) 462 { 463 struct tc_u_common *tp_c = tp->data; 464 struct tc_u_knode __rcu **kp; 465 struct tc_u_knode *pkp; 466 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); 467 468 if (ht) { 469 kp = &ht->ht[TC_U32_HASH(key->handle)]; 470 for (pkp = rtnl_dereference(*kp); pkp; 471 kp = &pkp->next, pkp = rtnl_dereference(*kp)) { 472 if (pkp == key) { 473 RCU_INIT_POINTER(*kp, key->next); 474 tp_c->knodes--; 475 476 tcf_unbind_filter(tp, &key->res); 477 idr_remove(&ht->handle_idr, key->handle); 478 tcf_exts_get_net(&key->exts); 479 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work); 480 return 0; 481 } 482 } 483 } 484 WARN_ON(1); 485 return 0; 486 } 487 488 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, 489 struct netlink_ext_ack *extack) 490 { 491 struct tcf_block *block = tp->chain->block; 492 struct tc_cls_u32_offload cls_u32 = {}; 493 494 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack); 495 cls_u32.command = TC_CLSU32_DELETE_HNODE; 496 cls_u32.hnode.divisor = h->divisor; 497 cls_u32.hnode.handle = h->handle; 498 cls_u32.hnode.prio = h->prio; 499 500 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true); 501 } 502 503 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, 504 u32 flags, struct netlink_ext_ack *extack) 505 { 506 struct tcf_block *block = tp->chain->block; 507 struct tc_cls_u32_offload cls_u32 = {}; 508 bool skip_sw = tc_skip_sw(flags); 509 bool offloaded = false; 510 int err; 511 512 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); 513 cls_u32.command = TC_CLSU32_NEW_HNODE; 514 cls_u32.hnode.divisor = h->divisor; 515 cls_u32.hnode.handle = h->handle; 516 cls_u32.hnode.prio = h->prio; 517 518 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true); 519 if (err < 0) { 520 u32_clear_hw_hnode(tp, h, NULL); 521 return err; 522 } else if (err > 0) { 523 offloaded = true; 524 } 525 526 if (skip_sw && !offloaded) 527 return -EINVAL; 528 529 return 0; 530 } 531 532 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, 533 struct netlink_ext_ack *extack) 534 { 535 struct tcf_block *block = tp->chain->block; 536 struct tc_cls_u32_offload cls_u32 = {}; 537 538 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); 539 cls_u32.command = TC_CLSU32_DELETE_KNODE; 540 cls_u32.knode.handle = n->handle; 541 542 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false, 543 &n->flags, &n->in_hw_count, true); 544 } 545 546 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, 547 u32 flags, struct netlink_ext_ack *extack) 548 { 549 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 550 struct tcf_block *block = tp->chain->block; 551 struct tc_cls_u32_offload cls_u32 = {}; 552 bool skip_sw = tc_skip_sw(flags); 553 int err; 554 555 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); 556 cls_u32.command = TC_CLSU32_REPLACE_KNODE; 557 cls_u32.knode.handle = n->handle; 558 cls_u32.knode.fshift = n->fshift; 559 #ifdef CONFIG_CLS_U32_MARK 560 cls_u32.knode.val = n->val; 561 cls_u32.knode.mask = n->mask; 562 #else 563 cls_u32.knode.val = 0; 564 cls_u32.knode.mask = 0; 565 #endif 566 cls_u32.knode.sel = &n->sel; 567 cls_u32.knode.res = &n->res; 568 cls_u32.knode.exts = &n->exts; 569 if (n->ht_down) 570 cls_u32.knode.link_handle = ht->handle; 571 572 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw, 573 &n->flags, &n->in_hw_count, true); 574 if (err) { 575 u32_remove_hw_knode(tp, n, NULL); 576 return err; 577 } 578 579 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW)) 580 return -EINVAL; 581 582 return 0; 583 } 584 585 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 586 struct netlink_ext_ack *extack) 587 { 588 struct tc_u_common *tp_c = tp->data; 589 struct tc_u_knode *n; 590 unsigned int h; 591 592 for (h = 0; h <= ht->divisor; h++) { 593 while ((n = rtnl_dereference(ht->ht[h])) != NULL) { 594 RCU_INIT_POINTER(ht->ht[h], 595 rtnl_dereference(n->next)); 596 tp_c->knodes--; 597 tcf_unbind_filter(tp, &n->res); 598 u32_remove_hw_knode(tp, n, extack); 599 idr_remove(&ht->handle_idr, n->handle); 600 if (tcf_exts_get_net(&n->exts)) 601 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); 602 else 603 u32_destroy_key(n, true); 604 } 605 } 606 } 607 608 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 609 struct netlink_ext_ack *extack) 610 { 611 struct tc_u_common *tp_c = tp->data; 612 struct tc_u_hnode __rcu **hn; 613 struct tc_u_hnode *phn; 614 615 u32_clear_hnode(tp, ht, extack); 616 617 hn = &tp_c->hlist; 618 for (phn = rtnl_dereference(*hn); 619 phn; 620 hn = &phn->next, phn = rtnl_dereference(*hn)) { 621 if (phn == ht) { 622 u32_clear_hw_hnode(tp, ht, extack); 623 idr_destroy(&ht->handle_idr); 624 idr_remove(&tp_c->handle_idr, handle2id(ht->handle)); 625 RCU_INIT_POINTER(*hn, ht->next); 626 kfree_rcu(ht, rcu); 627 return 0; 628 } 629 } 630 631 return -ENOENT; 632 } 633 634 static void u32_destroy(struct tcf_proto *tp, bool rtnl_held, 635 struct netlink_ext_ack *extack) 636 { 637 struct tc_u_common *tp_c = tp->data; 638 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); 639 640 WARN_ON(root_ht == NULL); 641 642 if (root_ht && refcount_dec_and_test(&root_ht->refcnt)) 643 u32_destroy_hnode(tp, root_ht, extack); 644 645 if (refcount_dec_and_test(&tp_c->refcnt)) { 646 struct tc_u_hnode *ht; 647 648 hlist_del(&tp_c->hnode); 649 650 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) { 651 u32_clear_hnode(tp, ht, extack); 652 RCU_INIT_POINTER(tp_c->hlist, ht->next); 653 654 /* u32_destroy_key() will later free ht for us, if it's 655 * still referenced by some knode 656 */ 657 if (refcount_dec_and_test(&ht->refcnt)) 658 kfree_rcu(ht, rcu); 659 } 660 661 idr_destroy(&tp_c->handle_idr); 662 kfree(tp_c); 663 } 664 665 tp->data = NULL; 666 } 667 668 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, 669 bool rtnl_held, struct netlink_ext_ack *extack) 670 { 671 struct tc_u_hnode *ht = arg; 672 struct tc_u_common *tp_c = tp->data; 673 int ret = 0; 674 675 if (TC_U32_KEY(ht->handle)) { 676 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack); 677 ret = u32_delete_key(tp, (struct tc_u_knode *)ht); 678 goto out; 679 } 680 681 if (ht->is_root) { 682 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node"); 683 return -EINVAL; 684 } 685 686 if (refcount_dec_if_one(&ht->refcnt)) { 687 u32_destroy_hnode(tp, ht, extack); 688 } else { 689 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); 690 return -EBUSY; 691 } 692 693 out: 694 *last = refcount_read(&tp_c->refcnt) == 1 && tp_c->knodes == 0; 695 return ret; 696 } 697 698 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid) 699 { 700 u32 index = htid | 0x800; 701 u32 max = htid | 0xFFF; 702 703 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) { 704 index = htid + 1; 705 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, 706 GFP_KERNEL)) 707 index = max; 708 } 709 710 return index; 711 } 712 713 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 714 [TCA_U32_CLASSID] = { .type = NLA_U32 }, 715 [TCA_U32_HASH] = { .type = NLA_U32 }, 716 [TCA_U32_LINK] = { .type = NLA_U32 }, 717 [TCA_U32_DIVISOR] = { .type = NLA_U32 }, 718 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) }, 719 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, 720 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) }, 721 [TCA_U32_FLAGS] = { .type = NLA_U32 }, 722 }; 723 724 static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n, 725 struct nlattr **tb) 726 { 727 if (tb[TCA_U32_CLASSID]) 728 tcf_unbind_filter(tp, &n->res); 729 } 730 731 static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n, 732 unsigned long base, struct nlattr **tb) 733 { 734 if (tb[TCA_U32_CLASSID]) { 735 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); 736 tcf_bind_filter(tp, &n->res, base); 737 } 738 } 739 740 static int u32_set_parms(struct net *net, struct tcf_proto *tp, 741 struct tc_u_knode *n, struct nlattr **tb, 742 struct nlattr *est, u32 flags, u32 fl_flags, 743 struct netlink_ext_ack *extack) 744 { 745 int err, ifindex = -1; 746 747 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags, 748 fl_flags, extack); 749 if (err < 0) 750 return err; 751 752 if (tb[TCA_U32_INDEV]) { 753 ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); 754 if (ifindex < 0) 755 return -EINVAL; 756 } 757 758 if (tb[TCA_U32_LINK]) { 759 u32 handle = nla_get_u32(tb[TCA_U32_LINK]); 760 struct tc_u_hnode *ht_down = NULL, *ht_old; 761 762 if (TC_U32_KEY(handle)) { 763 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table"); 764 return -EINVAL; 765 } 766 767 if (handle) { 768 ht_down = u32_lookup_ht(tp->data, handle); 769 770 if (!ht_down) { 771 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found"); 772 return -EINVAL; 773 } 774 if (ht_down->is_root) { 775 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node"); 776 return -EINVAL; 777 } 778 refcount_inc(&ht_down->refcnt); 779 } 780 781 ht_old = rtnl_dereference(n->ht_down); 782 rcu_assign_pointer(n->ht_down, ht_down); 783 784 if (ht_old) 785 refcount_dec(&ht_old->refcnt); 786 } 787 788 if (ifindex >= 0) 789 n->ifindex = ifindex; 790 791 return 0; 792 } 793 794 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, 795 struct tc_u_knode *n) 796 { 797 struct tc_u_knode __rcu **ins; 798 struct tc_u_knode *pins; 799 struct tc_u_hnode *ht; 800 801 if (TC_U32_HTID(n->handle) == TC_U32_ROOT) 802 ht = rtnl_dereference(tp->root); 803 else 804 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle)); 805 806 ins = &ht->ht[TC_U32_HASH(n->handle)]; 807 808 /* The node must always exist for it to be replaced if this is not the 809 * case then something went very wrong elsewhere. 810 */ 811 for (pins = rtnl_dereference(*ins); ; 812 ins = &pins->next, pins = rtnl_dereference(*ins)) 813 if (pins->handle == n->handle) 814 break; 815 816 idr_replace(&ht->handle_idr, n, n->handle); 817 RCU_INIT_POINTER(n->next, pins->next); 818 rcu_assign_pointer(*ins, n); 819 } 820 821 static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, 822 struct tc_u_knode *n) 823 { 824 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 825 struct tc_u32_sel *s = &n->sel; 826 struct tc_u_knode *new; 827 828 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL); 829 if (!new) 830 return NULL; 831 832 RCU_INIT_POINTER(new->next, n->next); 833 new->handle = n->handle; 834 RCU_INIT_POINTER(new->ht_up, n->ht_up); 835 836 new->ifindex = n->ifindex; 837 new->fshift = n->fshift; 838 new->flags = n->flags; 839 RCU_INIT_POINTER(new->ht_down, ht); 840 841 #ifdef CONFIG_CLS_U32_PERF 842 /* Statistics may be incremented by readers during update 843 * so we must keep them in tact. When the node is later destroyed 844 * a special destroy call must be made to not free the pf memory. 845 */ 846 new->pf = n->pf; 847 #endif 848 849 #ifdef CONFIG_CLS_U32_MARK 850 new->val = n->val; 851 new->mask = n->mask; 852 /* Similarly success statistics must be moved as pointers */ 853 new->pcpu_success = n->pcpu_success; 854 #endif 855 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys)); 856 857 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) { 858 kfree(new); 859 return NULL; 860 } 861 862 /* bump reference count as long as we hold pointer to structure */ 863 if (ht) 864 refcount_inc(&ht->refcnt); 865 866 return new; 867 } 868 869 static int u32_change(struct net *net, struct sk_buff *in_skb, 870 struct tcf_proto *tp, unsigned long base, u32 handle, 871 struct nlattr **tca, void **arg, u32 flags, 872 struct netlink_ext_ack *extack) 873 { 874 struct tc_u_common *tp_c = tp->data; 875 struct tc_u_hnode *ht; 876 struct tc_u_knode *n; 877 struct tc_u32_sel *s; 878 struct nlattr *opt = tca[TCA_OPTIONS]; 879 struct nlattr *tb[TCA_U32_MAX + 1]; 880 u32 htid, userflags = 0; 881 size_t sel_size; 882 int err; 883 884 if (!opt) { 885 if (handle) { 886 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options"); 887 return -EINVAL; 888 } else { 889 return 0; 890 } 891 } 892 893 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy, 894 extack); 895 if (err < 0) 896 return err; 897 898 if (tb[TCA_U32_FLAGS]) { 899 userflags = nla_get_u32(tb[TCA_U32_FLAGS]); 900 if (!tc_flags_valid(userflags)) { 901 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags"); 902 return -EINVAL; 903 } 904 } 905 906 n = *arg; 907 if (n) { 908 struct tc_u_knode *new; 909 910 if (TC_U32_KEY(n->handle) == 0) { 911 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero"); 912 return -EINVAL; 913 } 914 915 if ((n->flags ^ userflags) & 916 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) { 917 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags"); 918 return -EINVAL; 919 } 920 921 new = u32_init_knode(net, tp, n); 922 if (!new) 923 return -ENOMEM; 924 925 err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE], 926 flags, new->flags, extack); 927 928 if (err) { 929 __u32_destroy_key(new); 930 return err; 931 } 932 933 u32_bind_filter(tp, new, base, tb); 934 935 err = u32_replace_hw_knode(tp, new, flags, extack); 936 if (err) { 937 u32_unbind_filter(tp, new, tb); 938 939 if (tb[TCA_U32_LINK]) { 940 struct tc_u_hnode *ht_old; 941 942 ht_old = rtnl_dereference(n->ht_down); 943 if (ht_old) 944 refcount_inc(&ht_old->refcnt); 945 } 946 __u32_destroy_key(new); 947 return err; 948 } 949 950 if (!tc_in_hw(new->flags)) 951 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 952 953 tcf_proto_update_usesw(tp, new->flags); 954 955 u32_replace_knode(tp, tp_c, new); 956 tcf_unbind_filter(tp, &n->res); 957 tcf_exts_get_net(&n->exts); 958 tcf_queue_work(&n->rwork, u32_delete_key_work); 959 return 0; 960 } 961 962 if (tb[TCA_U32_DIVISOR]) { 963 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); 964 965 if (!is_power_of_2(divisor)) { 966 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2"); 967 return -EINVAL; 968 } 969 if (divisor-- > 0x100) { 970 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets"); 971 return -EINVAL; 972 } 973 if (TC_U32_KEY(handle)) { 974 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table"); 975 return -EINVAL; 976 } 977 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL); 978 if (ht == NULL) 979 return -ENOBUFS; 980 if (handle == 0) { 981 handle = gen_new_htid(tp->data, ht); 982 if (handle == 0) { 983 kfree(ht); 984 return -ENOMEM; 985 } 986 } else { 987 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle, 988 handle, GFP_KERNEL); 989 if (err) { 990 kfree(ht); 991 return err; 992 } 993 } 994 refcount_set(&ht->refcnt, 1); 995 ht->divisor = divisor; 996 ht->handle = handle; 997 ht->prio = tp->prio; 998 idr_init(&ht->handle_idr); 999 ht->flags = userflags; 1000 1001 err = u32_replace_hw_hnode(tp, ht, userflags, extack); 1002 if (err) { 1003 idr_remove(&tp_c->handle_idr, handle2id(handle)); 1004 kfree(ht); 1005 return err; 1006 } 1007 1008 RCU_INIT_POINTER(ht->next, tp_c->hlist); 1009 rcu_assign_pointer(tp_c->hlist, ht); 1010 *arg = ht; 1011 1012 return 0; 1013 } 1014 1015 if (tb[TCA_U32_HASH]) { 1016 htid = nla_get_u32(tb[TCA_U32_HASH]); 1017 if (TC_U32_HTID(htid) == TC_U32_ROOT) { 1018 ht = rtnl_dereference(tp->root); 1019 htid = ht->handle; 1020 } else { 1021 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); 1022 if (!ht) { 1023 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found"); 1024 return -EINVAL; 1025 } 1026 } 1027 } else { 1028 ht = rtnl_dereference(tp->root); 1029 htid = ht->handle; 1030 } 1031 1032 if (ht->divisor < TC_U32_HASH(htid)) { 1033 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value"); 1034 return -EINVAL; 1035 } 1036 1037 /* At this point, we need to derive the new handle that will be used to 1038 * uniquely map the identity of this table match entry. The 1039 * identity of the entry that we need to construct is 32 bits made of: 1040 * htid(12b):bucketid(8b):node/entryid(12b) 1041 * 1042 * At this point _we have the table(ht)_ in which we will insert this 1043 * entry. We carry the table's id in variable "htid". 1044 * Note that earlier code picked the ht selection either by a) the user 1045 * providing the htid specified via TCA_U32_HASH attribute or b) when 1046 * no such attribute is passed then the root ht, is default to at ID 1047 * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0. 1048 * If OTOH the user passed us the htid, they may also pass a bucketid of 1049 * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is 1050 * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be 1051 * passed via the htid, so even if it was non-zero it will be ignored. 1052 * 1053 * We may also have a handle, if the user passed one. The handle also 1054 * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b). 1055 * Rule: the bucketid on the handle is ignored even if one was passed; 1056 * rather the value on "htid" is always assumed to be the bucketid. 1057 */ 1058 if (handle) { 1059 /* Rule: The htid from handle and tableid from htid must match */ 1060 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { 1061 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); 1062 return -EINVAL; 1063 } 1064 /* Ok, so far we have a valid htid(12b):bucketid(8b) but we 1065 * need to finalize the table entry identification with the last 1066 * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for 1067 * entries. Rule: nodeid of 0 is reserved only for tables(see 1068 * earlier code which processes TC_U32_DIVISOR attribute). 1069 * Rule: The nodeid can only be derived from the handle (and not 1070 * htid). 1071 * Rule: if the handle specified zero for the node id example 1072 * 0x60000000, then pick a new nodeid from the pool of IDs 1073 * this hash table has been allocating from. 1074 * If OTOH it is specified (i.e for example the user passed a 1075 * handle such as 0x60000123), then we use it generate our final 1076 * handle which is used to uniquely identify the match entry. 1077 */ 1078 if (!TC_U32_NODE(handle)) { 1079 handle = gen_new_kid(ht, htid); 1080 } else { 1081 handle = htid | TC_U32_NODE(handle); 1082 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, 1083 handle, GFP_KERNEL); 1084 if (err) 1085 return err; 1086 } 1087 } else { 1088 /* The user did not give us a handle; lets just generate one 1089 * from the table's pool of nodeids. 1090 */ 1091 handle = gen_new_kid(ht, htid); 1092 } 1093 1094 if (tb[TCA_U32_SEL] == NULL) { 1095 NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); 1096 err = -EINVAL; 1097 goto erridr; 1098 } 1099 1100 s = nla_data(tb[TCA_U32_SEL]); 1101 sel_size = struct_size(s, keys, s->nkeys); 1102 if (nla_len(tb[TCA_U32_SEL]) < sel_size) { 1103 err = -EINVAL; 1104 goto erridr; 1105 } 1106 1107 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL); 1108 if (n == NULL) { 1109 err = -ENOBUFS; 1110 goto erridr; 1111 } 1112 1113 #ifdef CONFIG_CLS_U32_PERF 1114 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys), 1115 __alignof__(struct tc_u32_pcnt)); 1116 if (!n->pf) { 1117 err = -ENOBUFS; 1118 goto errfree; 1119 } 1120 #endif 1121 1122 unsafe_memcpy(&n->sel, s, sel_size, 1123 /* A composite flex-array structure destination, 1124 * which was correctly sized with struct_size(), 1125 * bounds-checked against nla_len(), and allocated 1126 * above. */); 1127 RCU_INIT_POINTER(n->ht_up, ht); 1128 n->handle = handle; 1129 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; 1130 n->flags = userflags; 1131 1132 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE); 1133 if (err < 0) 1134 goto errout; 1135 1136 #ifdef CONFIG_CLS_U32_MARK 1137 n->pcpu_success = alloc_percpu(u32); 1138 if (!n->pcpu_success) { 1139 err = -ENOMEM; 1140 goto errout; 1141 } 1142 1143 if (tb[TCA_U32_MARK]) { 1144 struct tc_u32_mark *mark; 1145 1146 mark = nla_data(tb[TCA_U32_MARK]); 1147 n->val = mark->val; 1148 n->mask = mark->mask; 1149 } 1150 #endif 1151 1152 err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE], 1153 flags, n->flags, extack); 1154 1155 u32_bind_filter(tp, n, base, tb); 1156 1157 if (err == 0) { 1158 struct tc_u_knode __rcu **ins; 1159 struct tc_u_knode *pins; 1160 1161 err = u32_replace_hw_knode(tp, n, flags, extack); 1162 if (err) 1163 goto errunbind; 1164 1165 if (!tc_in_hw(n->flags)) 1166 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1167 1168 tcf_proto_update_usesw(tp, n->flags); 1169 1170 ins = &ht->ht[TC_U32_HASH(handle)]; 1171 for (pins = rtnl_dereference(*ins); pins; 1172 ins = &pins->next, pins = rtnl_dereference(*ins)) 1173 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle)) 1174 break; 1175 1176 RCU_INIT_POINTER(n->next, pins); 1177 rcu_assign_pointer(*ins, n); 1178 tp_c->knodes++; 1179 *arg = n; 1180 return 0; 1181 } 1182 1183 errunbind: 1184 u32_unbind_filter(tp, n, tb); 1185 1186 #ifdef CONFIG_CLS_U32_MARK 1187 free_percpu(n->pcpu_success); 1188 #endif 1189 1190 errout: 1191 tcf_exts_destroy(&n->exts); 1192 #ifdef CONFIG_CLS_U32_PERF 1193 errfree: 1194 free_percpu(n->pf); 1195 #endif 1196 kfree(n); 1197 erridr: 1198 idr_remove(&ht->handle_idr, handle); 1199 return err; 1200 } 1201 1202 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg, 1203 bool rtnl_held) 1204 { 1205 struct tc_u_common *tp_c = tp->data; 1206 struct tc_u_hnode *ht; 1207 struct tc_u_knode *n; 1208 unsigned int h; 1209 1210 if (arg->stop) 1211 return; 1212 1213 for (ht = rtnl_dereference(tp_c->hlist); 1214 ht; 1215 ht = rtnl_dereference(ht->next)) { 1216 if (ht->prio != tp->prio) 1217 continue; 1218 1219 if (!tc_cls_stats_dump(tp, arg, ht)) 1220 return; 1221 1222 for (h = 0; h <= ht->divisor; h++) { 1223 for (n = rtnl_dereference(ht->ht[h]); 1224 n; 1225 n = rtnl_dereference(n->next)) { 1226 if (!tc_cls_stats_dump(tp, arg, n)) 1227 return; 1228 } 1229 } 1230 } 1231 } 1232 1233 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 1234 bool add, flow_setup_cb_t *cb, void *cb_priv, 1235 struct netlink_ext_ack *extack) 1236 { 1237 struct tc_cls_u32_offload cls_u32 = {}; 1238 int err; 1239 1240 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); 1241 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; 1242 cls_u32.hnode.divisor = ht->divisor; 1243 cls_u32.hnode.handle = ht->handle; 1244 cls_u32.hnode.prio = ht->prio; 1245 1246 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); 1247 if (err && add && tc_skip_sw(ht->flags)) 1248 return err; 1249 1250 return 0; 1251 } 1252 1253 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, 1254 bool add, flow_setup_cb_t *cb, void *cb_priv, 1255 struct netlink_ext_ack *extack) 1256 { 1257 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 1258 struct tcf_block *block = tp->chain->block; 1259 struct tc_cls_u32_offload cls_u32 = {}; 1260 1261 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); 1262 cls_u32.command = add ? 1263 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; 1264 cls_u32.knode.handle = n->handle; 1265 1266 if (add) { 1267 cls_u32.knode.fshift = n->fshift; 1268 #ifdef CONFIG_CLS_U32_MARK 1269 cls_u32.knode.val = n->val; 1270 cls_u32.knode.mask = n->mask; 1271 #else 1272 cls_u32.knode.val = 0; 1273 cls_u32.knode.mask = 0; 1274 #endif 1275 cls_u32.knode.sel = &n->sel; 1276 cls_u32.knode.res = &n->res; 1277 cls_u32.knode.exts = &n->exts; 1278 if (n->ht_down) 1279 cls_u32.knode.link_handle = ht->handle; 1280 } 1281 1282 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, 1283 &cls_u32, cb_priv, &n->flags, 1284 &n->in_hw_count); 1285 } 1286 1287 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 1288 void *cb_priv, struct netlink_ext_ack *extack) 1289 { 1290 struct tc_u_common *tp_c = tp->data; 1291 struct tc_u_hnode *ht; 1292 struct tc_u_knode *n; 1293 unsigned int h; 1294 int err; 1295 1296 for (ht = rtnl_dereference(tp_c->hlist); 1297 ht; 1298 ht = rtnl_dereference(ht->next)) { 1299 if (ht->prio != tp->prio) 1300 continue; 1301 1302 /* When adding filters to a new dev, try to offload the 1303 * hashtable first. When removing, do the filters before the 1304 * hashtable. 1305 */ 1306 if (add && !tc_skip_hw(ht->flags)) { 1307 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, 1308 extack); 1309 if (err) 1310 return err; 1311 } 1312 1313 for (h = 0; h <= ht->divisor; h++) { 1314 for (n = rtnl_dereference(ht->ht[h]); 1315 n; 1316 n = rtnl_dereference(n->next)) { 1317 if (tc_skip_hw(n->flags)) 1318 continue; 1319 1320 err = u32_reoffload_knode(tp, n, add, cb, 1321 cb_priv, extack); 1322 if (err) 1323 return err; 1324 } 1325 } 1326 1327 if (!add && !tc_skip_hw(ht->flags)) 1328 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack); 1329 } 1330 1331 return 0; 1332 } 1333 1334 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 1335 unsigned long base) 1336 { 1337 struct tc_u_knode *n = fh; 1338 1339 tc_cls_bind_class(classid, cl, q, &n->res, base); 1340 } 1341 1342 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, 1343 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 1344 { 1345 struct tc_u_knode *n = fh; 1346 struct tc_u_hnode *ht_up, *ht_down; 1347 struct nlattr *nest; 1348 1349 if (n == NULL) 1350 return skb->len; 1351 1352 t->tcm_handle = n->handle; 1353 1354 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1355 if (nest == NULL) 1356 goto nla_put_failure; 1357 1358 if (TC_U32_KEY(n->handle) == 0) { 1359 struct tc_u_hnode *ht = fh; 1360 u32 divisor = ht->divisor + 1; 1361 1362 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) 1363 goto nla_put_failure; 1364 } else { 1365 #ifdef CONFIG_CLS_U32_PERF 1366 struct tc_u32_pcnt *gpf; 1367 int cpu; 1368 #endif 1369 1370 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys), 1371 &n->sel)) 1372 goto nla_put_failure; 1373 1374 ht_up = rtnl_dereference(n->ht_up); 1375 if (ht_up) { 1376 u32 htid = n->handle & 0xFFFFF000; 1377 if (nla_put_u32(skb, TCA_U32_HASH, htid)) 1378 goto nla_put_failure; 1379 } 1380 if (n->res.classid && 1381 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) 1382 goto nla_put_failure; 1383 1384 ht_down = rtnl_dereference(n->ht_down); 1385 if (ht_down && 1386 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle)) 1387 goto nla_put_failure; 1388 1389 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags)) 1390 goto nla_put_failure; 1391 1392 #ifdef CONFIG_CLS_U32_MARK 1393 if ((n->val || n->mask)) { 1394 struct tc_u32_mark mark = {.val = n->val, 1395 .mask = n->mask, 1396 .success = 0}; 1397 int cpum; 1398 1399 for_each_possible_cpu(cpum) { 1400 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); 1401 1402 mark.success += cnt; 1403 } 1404 1405 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark)) 1406 goto nla_put_failure; 1407 } 1408 #endif 1409 1410 if (tcf_exts_dump(skb, &n->exts) < 0) 1411 goto nla_put_failure; 1412 1413 if (n->ifindex) { 1414 struct net_device *dev; 1415 dev = __dev_get_by_index(net, n->ifindex); 1416 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name)) 1417 goto nla_put_failure; 1418 } 1419 #ifdef CONFIG_CLS_U32_PERF 1420 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL); 1421 if (!gpf) 1422 goto nla_put_failure; 1423 1424 for_each_possible_cpu(cpu) { 1425 int i; 1426 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); 1427 1428 gpf->rcnt += pf->rcnt; 1429 gpf->rhit += pf->rhit; 1430 for (i = 0; i < n->sel.nkeys; i++) 1431 gpf->kcnts[i] += pf->kcnts[i]; 1432 } 1433 1434 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys), 1435 gpf, TCA_U32_PAD)) { 1436 kfree(gpf); 1437 goto nla_put_failure; 1438 } 1439 kfree(gpf); 1440 #endif 1441 } 1442 1443 nla_nest_end(skb, nest); 1444 1445 if (TC_U32_KEY(n->handle)) 1446 if (tcf_exts_dump_stats(skb, &n->exts) < 0) 1447 goto nla_put_failure; 1448 return skb->len; 1449 1450 nla_put_failure: 1451 nla_nest_cancel(skb, nest); 1452 return -1; 1453 } 1454 1455 static struct tcf_proto_ops cls_u32_ops __read_mostly = { 1456 .kind = "u32", 1457 .classify = u32_classify, 1458 .init = u32_init, 1459 .destroy = u32_destroy, 1460 .get = u32_get, 1461 .change = u32_change, 1462 .delete = u32_delete, 1463 .walk = u32_walk, 1464 .reoffload = u32_reoffload, 1465 .dump = u32_dump, 1466 .bind_class = u32_bind_class, 1467 .owner = THIS_MODULE, 1468 }; 1469 MODULE_ALIAS_NET_CLS("u32"); 1470 1471 static int __init init_u32(void) 1472 { 1473 int i, ret; 1474 1475 pr_info("u32 classifier\n"); 1476 #ifdef CONFIG_CLS_U32_PERF 1477 pr_info(" Performance counters on\n"); 1478 #endif 1479 pr_info(" input device check on\n"); 1480 #ifdef CONFIG_NET_CLS_ACT 1481 pr_info(" Actions configured\n"); 1482 #endif 1483 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE, 1484 sizeof(struct hlist_head), 1485 GFP_KERNEL); 1486 if (!tc_u_common_hash) 1487 return -ENOMEM; 1488 1489 for (i = 0; i < U32_HASH_SIZE; i++) 1490 INIT_HLIST_HEAD(&tc_u_common_hash[i]); 1491 1492 ret = register_tcf_proto_ops(&cls_u32_ops); 1493 if (ret) 1494 kvfree(tc_u_common_hash); 1495 return ret; 1496 } 1497 1498 static void __exit exit_u32(void) 1499 { 1500 unregister_tcf_proto_ops(&cls_u32_ops); 1501 kvfree(tc_u_common_hash); 1502 } 1503 1504 module_init(init_u32) 1505 module_exit(exit_u32) 1506 MODULE_DESCRIPTION("Universal 32bit based TC Classifier"); 1507 MODULE_LICENSE("GPL"); 1508