1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * The filters are packed to hash tables of key nodes 8 * with a set of 32bit key/mask pairs at every node. 9 * Nodes reference next level hash tables etc. 10 * 11 * This scheme is the best universal classifier I managed to 12 * invent; it is not super-fast, but it is not slow (provided you 13 * program it correctly), and general enough. And its relative 14 * speed grows as the number of rules becomes larger. 15 * 16 * It seems that it represents the best middle point between 17 * speed and manageability both by human and by machine. 18 * 19 * It is especially useful for link sharing combined with QoS; 20 * pure RSVP doesn't need such a general approach and can use 21 * much simpler (and faster) schemes, sort of cls_rsvp.c. 22 * 23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> 24 */ 25 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/types.h> 29 #include <linux/kernel.h> 30 #include <linux/string.h> 31 #include <linux/errno.h> 32 #include <linux/percpu.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/skbuff.h> 35 #include <linux/bitmap.h> 36 #include <linux/netdevice.h> 37 #include <linux/hash.h> 38 #include <net/netlink.h> 39 #include <net/act_api.h> 40 #include <net/pkt_cls.h> 41 #include <linux/idr.h> 42 43 struct tc_u_knode { 44 struct tc_u_knode __rcu *next; 45 u32 handle; 46 struct tc_u_hnode __rcu *ht_up; 47 struct tcf_exts exts; 48 int ifindex; 49 u8 fshift; 50 struct tcf_result res; 51 struct tc_u_hnode __rcu *ht_down; 52 #ifdef CONFIG_CLS_U32_PERF 53 struct tc_u32_pcnt __percpu *pf; 54 #endif 55 u32 flags; 56 unsigned int in_hw_count; 57 #ifdef CONFIG_CLS_U32_MARK 58 u32 val; 59 u32 mask; 60 u32 __percpu *pcpu_success; 61 #endif 62 struct rcu_work rwork; 63 /* The 'sel' field MUST be the last field in structure to allow for 64 * tc_u32_keys allocated at end of structure. 65 */ 66 struct tc_u32_sel sel; 67 }; 68 69 struct tc_u_hnode { 70 struct tc_u_hnode __rcu *next; 71 u32 handle; 72 u32 prio; 73 int refcnt; 74 unsigned int divisor; 75 struct idr handle_idr; 76 bool is_root; 77 struct rcu_head rcu; 78 u32 flags; 79 /* The 'ht' field MUST be the last field in structure to allow for 80 * more entries allocated at end of structure. 81 */ 82 struct tc_u_knode __rcu *ht[]; 83 }; 84 85 struct tc_u_common { 86 struct tc_u_hnode __rcu *hlist; 87 void *ptr; 88 int refcnt; 89 struct idr handle_idr; 90 struct hlist_node hnode; 91 long knodes; 92 }; 93 94 static inline unsigned int u32_hash_fold(__be32 key, 95 const struct tc_u32_sel *sel, 96 u8 fshift) 97 { 98 unsigned int h = ntohl(key & sel->hmask) >> fshift; 99 100 return h; 101 } 102 103 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, 104 struct tcf_result *res) 105 { 106 struct { 107 struct tc_u_knode *knode; 108 unsigned int off; 109 } stack[TC_U32_MAXDEPTH]; 110 111 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); 112 unsigned int off = skb_network_offset(skb); 113 struct tc_u_knode *n; 114 int sdepth = 0; 115 int off2 = 0; 116 int sel = 0; 117 #ifdef CONFIG_CLS_U32_PERF 118 int j; 119 #endif 120 int i, r; 121 122 next_ht: 123 n = rcu_dereference_bh(ht->ht[sel]); 124 125 next_knode: 126 if (n) { 127 struct tc_u32_key *key = n->sel.keys; 128 129 #ifdef CONFIG_CLS_U32_PERF 130 __this_cpu_inc(n->pf->rcnt); 131 j = 0; 132 #endif 133 134 if (tc_skip_sw(n->flags)) { 135 n = rcu_dereference_bh(n->next); 136 goto next_knode; 137 } 138 139 #ifdef CONFIG_CLS_U32_MARK 140 if ((skb->mark & n->mask) != n->val) { 141 n = rcu_dereference_bh(n->next); 142 goto next_knode; 143 } else { 144 __this_cpu_inc(*n->pcpu_success); 145 } 146 #endif 147 148 for (i = n->sel.nkeys; i > 0; i--, key++) { 149 int toff = off + key->off + (off2 & key->offmask); 150 __be32 *data, hdata; 151 152 if (skb_headroom(skb) + toff > INT_MAX) 153 goto out; 154 155 data = skb_header_pointer(skb, toff, 4, &hdata); 156 if (!data) 157 goto out; 158 if ((*data ^ key->val) & key->mask) { 159 n = rcu_dereference_bh(n->next); 160 goto next_knode; 161 } 162 #ifdef CONFIG_CLS_U32_PERF 163 __this_cpu_inc(n->pf->kcnts[j]); 164 j++; 165 #endif 166 } 167 168 ht = rcu_dereference_bh(n->ht_down); 169 if (!ht) { 170 check_terminal: 171 if (n->sel.flags & TC_U32_TERMINAL) { 172 173 *res = n->res; 174 if (!tcf_match_indev(skb, n->ifindex)) { 175 n = rcu_dereference_bh(n->next); 176 goto next_knode; 177 } 178 #ifdef CONFIG_CLS_U32_PERF 179 __this_cpu_inc(n->pf->rhit); 180 #endif 181 r = tcf_exts_exec(skb, &n->exts, res); 182 if (r < 0) { 183 n = rcu_dereference_bh(n->next); 184 goto next_knode; 185 } 186 187 return r; 188 } 189 n = rcu_dereference_bh(n->next); 190 goto next_knode; 191 } 192 193 /* PUSH */ 194 if (sdepth >= TC_U32_MAXDEPTH) 195 goto deadloop; 196 stack[sdepth].knode = n; 197 stack[sdepth].off = off; 198 sdepth++; 199 200 ht = rcu_dereference_bh(n->ht_down); 201 sel = 0; 202 if (ht->divisor) { 203 __be32 *data, hdata; 204 205 data = skb_header_pointer(skb, off + n->sel.hoff, 4, 206 &hdata); 207 if (!data) 208 goto out; 209 sel = ht->divisor & u32_hash_fold(*data, &n->sel, 210 n->fshift); 211 } 212 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) 213 goto next_ht; 214 215 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { 216 off2 = n->sel.off + 3; 217 if (n->sel.flags & TC_U32_VAROFFSET) { 218 __be16 *data, hdata; 219 220 data = skb_header_pointer(skb, 221 off + n->sel.offoff, 222 2, &hdata); 223 if (!data) 224 goto out; 225 off2 += ntohs(n->sel.offmask & *data) >> 226 n->sel.offshift; 227 } 228 off2 &= ~3; 229 } 230 if (n->sel.flags & TC_U32_EAT) { 231 off += off2; 232 off2 = 0; 233 } 234 235 if (off < skb->len) 236 goto next_ht; 237 } 238 239 /* POP */ 240 if (sdepth--) { 241 n = stack[sdepth].knode; 242 ht = rcu_dereference_bh(n->ht_up); 243 off = stack[sdepth].off; 244 goto check_terminal; 245 } 246 out: 247 return -1; 248 249 deadloop: 250 net_warn_ratelimited("cls_u32: dead loop\n"); 251 return -1; 252 } 253 254 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) 255 { 256 struct tc_u_hnode *ht; 257 258 for (ht = rtnl_dereference(tp_c->hlist); 259 ht; 260 ht = rtnl_dereference(ht->next)) 261 if (ht->handle == handle) 262 break; 263 264 return ht; 265 } 266 267 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle) 268 { 269 unsigned int sel; 270 struct tc_u_knode *n = NULL; 271 272 sel = TC_U32_HASH(handle); 273 if (sel > ht->divisor) 274 goto out; 275 276 for (n = rtnl_dereference(ht->ht[sel]); 277 n; 278 n = rtnl_dereference(n->next)) 279 if (n->handle == handle) 280 break; 281 out: 282 return n; 283 } 284 285 286 static void *u32_get(struct tcf_proto *tp, u32 handle) 287 { 288 struct tc_u_hnode *ht; 289 struct tc_u_common *tp_c = tp->data; 290 291 if (TC_U32_HTID(handle) == TC_U32_ROOT) 292 ht = rtnl_dereference(tp->root); 293 else 294 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); 295 296 if (!ht) 297 return NULL; 298 299 if (TC_U32_KEY(handle) == 0) 300 return ht; 301 302 return u32_lookup_key(ht, handle); 303 } 304 305 /* Protected by rtnl lock */ 306 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) 307 { 308 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); 309 if (id < 0) 310 return 0; 311 return (id | 0x800U) << 20; 312 } 313 314 static struct hlist_head *tc_u_common_hash; 315 316 #define U32_HASH_SHIFT 10 317 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT) 318 319 static void *tc_u_common_ptr(const struct tcf_proto *tp) 320 { 321 struct tcf_block *block = tp->chain->block; 322 323 /* The block sharing is currently supported only 324 * for classless qdiscs. In that case we use block 325 * for tc_u_common identification. In case the 326 * block is not shared, block->q is a valid pointer 327 * and we can use that. That works for classful qdiscs. 328 */ 329 if (tcf_block_shared(block)) 330 return block; 331 else 332 return block->q; 333 } 334 335 static struct hlist_head *tc_u_hash(void *key) 336 { 337 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT); 338 } 339 340 static struct tc_u_common *tc_u_common_find(void *key) 341 { 342 struct tc_u_common *tc; 343 hlist_for_each_entry(tc, tc_u_hash(key), hnode) { 344 if (tc->ptr == key) 345 return tc; 346 } 347 return NULL; 348 } 349 350 static int u32_init(struct tcf_proto *tp) 351 { 352 struct tc_u_hnode *root_ht; 353 void *key = tc_u_common_ptr(tp); 354 struct tc_u_common *tp_c = tc_u_common_find(key); 355 356 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL); 357 if (root_ht == NULL) 358 return -ENOBUFS; 359 360 root_ht->refcnt++; 361 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000; 362 root_ht->prio = tp->prio; 363 root_ht->is_root = true; 364 idr_init(&root_ht->handle_idr); 365 366 if (tp_c == NULL) { 367 tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL); 368 if (tp_c == NULL) { 369 kfree(root_ht); 370 return -ENOBUFS; 371 } 372 tp_c->ptr = key; 373 INIT_HLIST_NODE(&tp_c->hnode); 374 idr_init(&tp_c->handle_idr); 375 376 hlist_add_head(&tp_c->hnode, tc_u_hash(key)); 377 } 378 379 tp_c->refcnt++; 380 RCU_INIT_POINTER(root_ht->next, tp_c->hlist); 381 rcu_assign_pointer(tp_c->hlist, root_ht); 382 383 root_ht->refcnt++; 384 rcu_assign_pointer(tp->root, root_ht); 385 tp->data = tp_c; 386 return 0; 387 } 388 389 static void __u32_destroy_key(struct tc_u_knode *n) 390 { 391 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 392 393 tcf_exts_destroy(&n->exts); 394 if (ht && --ht->refcnt == 0) 395 kfree(ht); 396 kfree(n); 397 } 398 399 static void u32_destroy_key(struct tc_u_knode *n, bool free_pf) 400 { 401 tcf_exts_put_net(&n->exts); 402 #ifdef CONFIG_CLS_U32_PERF 403 if (free_pf) 404 free_percpu(n->pf); 405 #endif 406 #ifdef CONFIG_CLS_U32_MARK 407 if (free_pf) 408 free_percpu(n->pcpu_success); 409 #endif 410 __u32_destroy_key(n); 411 } 412 413 /* u32_delete_key_rcu should be called when free'ing a copied 414 * version of a tc_u_knode obtained from u32_init_knode(). When 415 * copies are obtained from u32_init_knode() the statistics are 416 * shared between the old and new copies to allow readers to 417 * continue to update the statistics during the copy. To support 418 * this the u32_delete_key_rcu variant does not free the percpu 419 * statistics. 420 */ 421 static void u32_delete_key_work(struct work_struct *work) 422 { 423 struct tc_u_knode *key = container_of(to_rcu_work(work), 424 struct tc_u_knode, 425 rwork); 426 rtnl_lock(); 427 u32_destroy_key(key, false); 428 rtnl_unlock(); 429 } 430 431 /* u32_delete_key_freepf_rcu is the rcu callback variant 432 * that free's the entire structure including the statistics 433 * percpu variables. Only use this if the key is not a copy 434 * returned by u32_init_knode(). See u32_delete_key_rcu() 435 * for the variant that should be used with keys return from 436 * u32_init_knode() 437 */ 438 static void u32_delete_key_freepf_work(struct work_struct *work) 439 { 440 struct tc_u_knode *key = container_of(to_rcu_work(work), 441 struct tc_u_knode, 442 rwork); 443 rtnl_lock(); 444 u32_destroy_key(key, true); 445 rtnl_unlock(); 446 } 447 448 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) 449 { 450 struct tc_u_common *tp_c = tp->data; 451 struct tc_u_knode __rcu **kp; 452 struct tc_u_knode *pkp; 453 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); 454 455 if (ht) { 456 kp = &ht->ht[TC_U32_HASH(key->handle)]; 457 for (pkp = rtnl_dereference(*kp); pkp; 458 kp = &pkp->next, pkp = rtnl_dereference(*kp)) { 459 if (pkp == key) { 460 RCU_INIT_POINTER(*kp, key->next); 461 tp_c->knodes--; 462 463 tcf_unbind_filter(tp, &key->res); 464 idr_remove(&ht->handle_idr, key->handle); 465 tcf_exts_get_net(&key->exts); 466 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work); 467 return 0; 468 } 469 } 470 } 471 WARN_ON(1); 472 return 0; 473 } 474 475 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, 476 struct netlink_ext_ack *extack) 477 { 478 struct tcf_block *block = tp->chain->block; 479 struct tc_cls_u32_offload cls_u32 = {}; 480 481 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack); 482 cls_u32.command = TC_CLSU32_DELETE_HNODE; 483 cls_u32.hnode.divisor = h->divisor; 484 cls_u32.hnode.handle = h->handle; 485 cls_u32.hnode.prio = h->prio; 486 487 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true); 488 } 489 490 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, 491 u32 flags, struct netlink_ext_ack *extack) 492 { 493 struct tcf_block *block = tp->chain->block; 494 struct tc_cls_u32_offload cls_u32 = {}; 495 bool skip_sw = tc_skip_sw(flags); 496 bool offloaded = false; 497 int err; 498 499 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); 500 cls_u32.command = TC_CLSU32_NEW_HNODE; 501 cls_u32.hnode.divisor = h->divisor; 502 cls_u32.hnode.handle = h->handle; 503 cls_u32.hnode.prio = h->prio; 504 505 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true); 506 if (err < 0) { 507 u32_clear_hw_hnode(tp, h, NULL); 508 return err; 509 } else if (err > 0) { 510 offloaded = true; 511 } 512 513 if (skip_sw && !offloaded) 514 return -EINVAL; 515 516 return 0; 517 } 518 519 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, 520 struct netlink_ext_ack *extack) 521 { 522 struct tcf_block *block = tp->chain->block; 523 struct tc_cls_u32_offload cls_u32 = {}; 524 525 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); 526 cls_u32.command = TC_CLSU32_DELETE_KNODE; 527 cls_u32.knode.handle = n->handle; 528 529 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false, 530 &n->flags, &n->in_hw_count, true); 531 } 532 533 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, 534 u32 flags, struct netlink_ext_ack *extack) 535 { 536 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 537 struct tcf_block *block = tp->chain->block; 538 struct tc_cls_u32_offload cls_u32 = {}; 539 bool skip_sw = tc_skip_sw(flags); 540 int err; 541 542 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); 543 cls_u32.command = TC_CLSU32_REPLACE_KNODE; 544 cls_u32.knode.handle = n->handle; 545 cls_u32.knode.fshift = n->fshift; 546 #ifdef CONFIG_CLS_U32_MARK 547 cls_u32.knode.val = n->val; 548 cls_u32.knode.mask = n->mask; 549 #else 550 cls_u32.knode.val = 0; 551 cls_u32.knode.mask = 0; 552 #endif 553 cls_u32.knode.sel = &n->sel; 554 cls_u32.knode.res = &n->res; 555 cls_u32.knode.exts = &n->exts; 556 if (n->ht_down) 557 cls_u32.knode.link_handle = ht->handle; 558 559 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw, 560 &n->flags, &n->in_hw_count, true); 561 if (err) { 562 u32_remove_hw_knode(tp, n, NULL); 563 return err; 564 } 565 566 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW)) 567 return -EINVAL; 568 569 return 0; 570 } 571 572 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 573 struct netlink_ext_ack *extack) 574 { 575 struct tc_u_common *tp_c = tp->data; 576 struct tc_u_knode *n; 577 unsigned int h; 578 579 for (h = 0; h <= ht->divisor; h++) { 580 while ((n = rtnl_dereference(ht->ht[h])) != NULL) { 581 RCU_INIT_POINTER(ht->ht[h], 582 rtnl_dereference(n->next)); 583 tp_c->knodes--; 584 tcf_unbind_filter(tp, &n->res); 585 u32_remove_hw_knode(tp, n, extack); 586 idr_remove(&ht->handle_idr, n->handle); 587 if (tcf_exts_get_net(&n->exts)) 588 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); 589 else 590 u32_destroy_key(n, true); 591 } 592 } 593 } 594 595 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 596 struct netlink_ext_ack *extack) 597 { 598 struct tc_u_common *tp_c = tp->data; 599 struct tc_u_hnode __rcu **hn; 600 struct tc_u_hnode *phn; 601 602 WARN_ON(--ht->refcnt); 603 604 u32_clear_hnode(tp, ht, extack); 605 606 hn = &tp_c->hlist; 607 for (phn = rtnl_dereference(*hn); 608 phn; 609 hn = &phn->next, phn = rtnl_dereference(*hn)) { 610 if (phn == ht) { 611 u32_clear_hw_hnode(tp, ht, extack); 612 idr_destroy(&ht->handle_idr); 613 idr_remove(&tp_c->handle_idr, ht->handle); 614 RCU_INIT_POINTER(*hn, ht->next); 615 kfree_rcu(ht, rcu); 616 return 0; 617 } 618 } 619 620 return -ENOENT; 621 } 622 623 static void u32_destroy(struct tcf_proto *tp, bool rtnl_held, 624 struct netlink_ext_ack *extack) 625 { 626 struct tc_u_common *tp_c = tp->data; 627 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); 628 629 WARN_ON(root_ht == NULL); 630 631 if (root_ht && --root_ht->refcnt == 1) 632 u32_destroy_hnode(tp, root_ht, extack); 633 634 if (--tp_c->refcnt == 0) { 635 struct tc_u_hnode *ht; 636 637 hlist_del(&tp_c->hnode); 638 639 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) { 640 u32_clear_hnode(tp, ht, extack); 641 RCU_INIT_POINTER(tp_c->hlist, ht->next); 642 643 /* u32_destroy_key() will later free ht for us, if it's 644 * still referenced by some knode 645 */ 646 if (--ht->refcnt == 0) 647 kfree_rcu(ht, rcu); 648 } 649 650 idr_destroy(&tp_c->handle_idr); 651 kfree(tp_c); 652 } 653 654 tp->data = NULL; 655 } 656 657 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, 658 bool rtnl_held, struct netlink_ext_ack *extack) 659 { 660 struct tc_u_hnode *ht = arg; 661 struct tc_u_common *tp_c = tp->data; 662 int ret = 0; 663 664 if (TC_U32_KEY(ht->handle)) { 665 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack); 666 ret = u32_delete_key(tp, (struct tc_u_knode *)ht); 667 goto out; 668 } 669 670 if (ht->is_root) { 671 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node"); 672 return -EINVAL; 673 } 674 675 if (ht->refcnt == 1) { 676 u32_destroy_hnode(tp, ht, extack); 677 } else { 678 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); 679 return -EBUSY; 680 } 681 682 out: 683 *last = tp_c->refcnt == 1 && tp_c->knodes == 0; 684 return ret; 685 } 686 687 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid) 688 { 689 u32 index = htid | 0x800; 690 u32 max = htid | 0xFFF; 691 692 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) { 693 index = htid + 1; 694 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, 695 GFP_KERNEL)) 696 index = max; 697 } 698 699 return index; 700 } 701 702 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 703 [TCA_U32_CLASSID] = { .type = NLA_U32 }, 704 [TCA_U32_HASH] = { .type = NLA_U32 }, 705 [TCA_U32_LINK] = { .type = NLA_U32 }, 706 [TCA_U32_DIVISOR] = { .type = NLA_U32 }, 707 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) }, 708 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, 709 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) }, 710 [TCA_U32_FLAGS] = { .type = NLA_U32 }, 711 }; 712 713 static int u32_set_parms(struct net *net, struct tcf_proto *tp, 714 unsigned long base, 715 struct tc_u_knode *n, struct nlattr **tb, 716 struct nlattr *est, u32 flags, u32 fl_flags, 717 struct netlink_ext_ack *extack) 718 { 719 int err; 720 721 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags, 722 fl_flags, extack); 723 if (err < 0) 724 return err; 725 726 if (tb[TCA_U32_LINK]) { 727 u32 handle = nla_get_u32(tb[TCA_U32_LINK]); 728 struct tc_u_hnode *ht_down = NULL, *ht_old; 729 730 if (TC_U32_KEY(handle)) { 731 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table"); 732 return -EINVAL; 733 } 734 735 if (handle) { 736 ht_down = u32_lookup_ht(tp->data, handle); 737 738 if (!ht_down) { 739 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found"); 740 return -EINVAL; 741 } 742 if (ht_down->is_root) { 743 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node"); 744 return -EINVAL; 745 } 746 ht_down->refcnt++; 747 } 748 749 ht_old = rtnl_dereference(n->ht_down); 750 rcu_assign_pointer(n->ht_down, ht_down); 751 752 if (ht_old) 753 ht_old->refcnt--; 754 } 755 if (tb[TCA_U32_CLASSID]) { 756 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); 757 tcf_bind_filter(tp, &n->res, base); 758 } 759 760 if (tb[TCA_U32_INDEV]) { 761 int ret; 762 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack); 763 if (ret < 0) 764 return -EINVAL; 765 n->ifindex = ret; 766 } 767 return 0; 768 } 769 770 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, 771 struct tc_u_knode *n) 772 { 773 struct tc_u_knode __rcu **ins; 774 struct tc_u_knode *pins; 775 struct tc_u_hnode *ht; 776 777 if (TC_U32_HTID(n->handle) == TC_U32_ROOT) 778 ht = rtnl_dereference(tp->root); 779 else 780 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle)); 781 782 ins = &ht->ht[TC_U32_HASH(n->handle)]; 783 784 /* The node must always exist for it to be replaced if this is not the 785 * case then something went very wrong elsewhere. 786 */ 787 for (pins = rtnl_dereference(*ins); ; 788 ins = &pins->next, pins = rtnl_dereference(*ins)) 789 if (pins->handle == n->handle) 790 break; 791 792 idr_replace(&ht->handle_idr, n, n->handle); 793 RCU_INIT_POINTER(n->next, pins->next); 794 rcu_assign_pointer(*ins, n); 795 } 796 797 static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, 798 struct tc_u_knode *n) 799 { 800 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 801 struct tc_u32_sel *s = &n->sel; 802 struct tc_u_knode *new; 803 804 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL); 805 if (!new) 806 return NULL; 807 808 RCU_INIT_POINTER(new->next, n->next); 809 new->handle = n->handle; 810 RCU_INIT_POINTER(new->ht_up, n->ht_up); 811 812 new->ifindex = n->ifindex; 813 new->fshift = n->fshift; 814 new->res = n->res; 815 new->flags = n->flags; 816 RCU_INIT_POINTER(new->ht_down, ht); 817 818 #ifdef CONFIG_CLS_U32_PERF 819 /* Statistics may be incremented by readers during update 820 * so we must keep them in tact. When the node is later destroyed 821 * a special destroy call must be made to not free the pf memory. 822 */ 823 new->pf = n->pf; 824 #endif 825 826 #ifdef CONFIG_CLS_U32_MARK 827 new->val = n->val; 828 new->mask = n->mask; 829 /* Similarly success statistics must be moved as pointers */ 830 new->pcpu_success = n->pcpu_success; 831 #endif 832 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys)); 833 834 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) { 835 kfree(new); 836 return NULL; 837 } 838 839 /* bump reference count as long as we hold pointer to structure */ 840 if (ht) 841 ht->refcnt++; 842 843 return new; 844 } 845 846 static int u32_change(struct net *net, struct sk_buff *in_skb, 847 struct tcf_proto *tp, unsigned long base, u32 handle, 848 struct nlattr **tca, void **arg, u32 flags, 849 struct netlink_ext_ack *extack) 850 { 851 struct tc_u_common *tp_c = tp->data; 852 struct tc_u_hnode *ht; 853 struct tc_u_knode *n; 854 struct tc_u32_sel *s; 855 struct nlattr *opt = tca[TCA_OPTIONS]; 856 struct nlattr *tb[TCA_U32_MAX + 1]; 857 u32 htid, userflags = 0; 858 size_t sel_size; 859 int err; 860 861 if (!opt) { 862 if (handle) { 863 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options"); 864 return -EINVAL; 865 } else { 866 return 0; 867 } 868 } 869 870 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy, 871 extack); 872 if (err < 0) 873 return err; 874 875 if (tb[TCA_U32_FLAGS]) { 876 userflags = nla_get_u32(tb[TCA_U32_FLAGS]); 877 if (!tc_flags_valid(userflags)) { 878 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags"); 879 return -EINVAL; 880 } 881 } 882 883 n = *arg; 884 if (n) { 885 struct tc_u_knode *new; 886 887 if (TC_U32_KEY(n->handle) == 0) { 888 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero"); 889 return -EINVAL; 890 } 891 892 if ((n->flags ^ userflags) & 893 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) { 894 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags"); 895 return -EINVAL; 896 } 897 898 new = u32_init_knode(net, tp, n); 899 if (!new) 900 return -ENOMEM; 901 902 err = u32_set_parms(net, tp, base, new, tb, 903 tca[TCA_RATE], flags, new->flags, 904 extack); 905 906 if (err) { 907 __u32_destroy_key(new); 908 return err; 909 } 910 911 err = u32_replace_hw_knode(tp, new, flags, extack); 912 if (err) { 913 __u32_destroy_key(new); 914 return err; 915 } 916 917 if (!tc_in_hw(new->flags)) 918 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 919 920 u32_replace_knode(tp, tp_c, new); 921 tcf_unbind_filter(tp, &n->res); 922 tcf_exts_get_net(&n->exts); 923 tcf_queue_work(&n->rwork, u32_delete_key_work); 924 return 0; 925 } 926 927 if (tb[TCA_U32_DIVISOR]) { 928 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); 929 930 if (!is_power_of_2(divisor)) { 931 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2"); 932 return -EINVAL; 933 } 934 if (divisor-- > 0x100) { 935 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets"); 936 return -EINVAL; 937 } 938 if (TC_U32_KEY(handle)) { 939 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table"); 940 return -EINVAL; 941 } 942 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL); 943 if (ht == NULL) 944 return -ENOBUFS; 945 if (handle == 0) { 946 handle = gen_new_htid(tp->data, ht); 947 if (handle == 0) { 948 kfree(ht); 949 return -ENOMEM; 950 } 951 } else { 952 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle, 953 handle, GFP_KERNEL); 954 if (err) { 955 kfree(ht); 956 return err; 957 } 958 } 959 ht->refcnt = 1; 960 ht->divisor = divisor; 961 ht->handle = handle; 962 ht->prio = tp->prio; 963 idr_init(&ht->handle_idr); 964 ht->flags = userflags; 965 966 err = u32_replace_hw_hnode(tp, ht, userflags, extack); 967 if (err) { 968 idr_remove(&tp_c->handle_idr, handle); 969 kfree(ht); 970 return err; 971 } 972 973 RCU_INIT_POINTER(ht->next, tp_c->hlist); 974 rcu_assign_pointer(tp_c->hlist, ht); 975 *arg = ht; 976 977 return 0; 978 } 979 980 if (tb[TCA_U32_HASH]) { 981 htid = nla_get_u32(tb[TCA_U32_HASH]); 982 if (TC_U32_HTID(htid) == TC_U32_ROOT) { 983 ht = rtnl_dereference(tp->root); 984 htid = ht->handle; 985 } else { 986 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); 987 if (!ht) { 988 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found"); 989 return -EINVAL; 990 } 991 } 992 } else { 993 ht = rtnl_dereference(tp->root); 994 htid = ht->handle; 995 } 996 997 if (ht->divisor < TC_U32_HASH(htid)) { 998 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value"); 999 return -EINVAL; 1000 } 1001 1002 if (handle) { 1003 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { 1004 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); 1005 return -EINVAL; 1006 } 1007 handle = htid | TC_U32_NODE(handle); 1008 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, 1009 GFP_KERNEL); 1010 if (err) 1011 return err; 1012 } else 1013 handle = gen_new_kid(ht, htid); 1014 1015 if (tb[TCA_U32_SEL] == NULL) { 1016 NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); 1017 err = -EINVAL; 1018 goto erridr; 1019 } 1020 1021 s = nla_data(tb[TCA_U32_SEL]); 1022 sel_size = struct_size(s, keys, s->nkeys); 1023 if (nla_len(tb[TCA_U32_SEL]) < sel_size) { 1024 err = -EINVAL; 1025 goto erridr; 1026 } 1027 1028 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL); 1029 if (n == NULL) { 1030 err = -ENOBUFS; 1031 goto erridr; 1032 } 1033 1034 #ifdef CONFIG_CLS_U32_PERF 1035 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys), 1036 __alignof__(struct tc_u32_pcnt)); 1037 if (!n->pf) { 1038 err = -ENOBUFS; 1039 goto errfree; 1040 } 1041 #endif 1042 1043 memcpy(&n->sel, s, sel_size); 1044 RCU_INIT_POINTER(n->ht_up, ht); 1045 n->handle = handle; 1046 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; 1047 n->flags = userflags; 1048 1049 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE); 1050 if (err < 0) 1051 goto errout; 1052 1053 #ifdef CONFIG_CLS_U32_MARK 1054 n->pcpu_success = alloc_percpu(u32); 1055 if (!n->pcpu_success) { 1056 err = -ENOMEM; 1057 goto errout; 1058 } 1059 1060 if (tb[TCA_U32_MARK]) { 1061 struct tc_u32_mark *mark; 1062 1063 mark = nla_data(tb[TCA_U32_MARK]); 1064 n->val = mark->val; 1065 n->mask = mark->mask; 1066 } 1067 #endif 1068 1069 err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], 1070 flags, n->flags, extack); 1071 if (err == 0) { 1072 struct tc_u_knode __rcu **ins; 1073 struct tc_u_knode *pins; 1074 1075 err = u32_replace_hw_knode(tp, n, flags, extack); 1076 if (err) 1077 goto errhw; 1078 1079 if (!tc_in_hw(n->flags)) 1080 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1081 1082 ins = &ht->ht[TC_U32_HASH(handle)]; 1083 for (pins = rtnl_dereference(*ins); pins; 1084 ins = &pins->next, pins = rtnl_dereference(*ins)) 1085 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle)) 1086 break; 1087 1088 RCU_INIT_POINTER(n->next, pins); 1089 rcu_assign_pointer(*ins, n); 1090 tp_c->knodes++; 1091 *arg = n; 1092 return 0; 1093 } 1094 1095 errhw: 1096 #ifdef CONFIG_CLS_U32_MARK 1097 free_percpu(n->pcpu_success); 1098 #endif 1099 1100 errout: 1101 tcf_exts_destroy(&n->exts); 1102 #ifdef CONFIG_CLS_U32_PERF 1103 errfree: 1104 free_percpu(n->pf); 1105 #endif 1106 kfree(n); 1107 erridr: 1108 idr_remove(&ht->handle_idr, handle); 1109 return err; 1110 } 1111 1112 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg, 1113 bool rtnl_held) 1114 { 1115 struct tc_u_common *tp_c = tp->data; 1116 struct tc_u_hnode *ht; 1117 struct tc_u_knode *n; 1118 unsigned int h; 1119 1120 if (arg->stop) 1121 return; 1122 1123 for (ht = rtnl_dereference(tp_c->hlist); 1124 ht; 1125 ht = rtnl_dereference(ht->next)) { 1126 if (ht->prio != tp->prio) 1127 continue; 1128 if (arg->count >= arg->skip) { 1129 if (arg->fn(tp, ht, arg) < 0) { 1130 arg->stop = 1; 1131 return; 1132 } 1133 } 1134 arg->count++; 1135 for (h = 0; h <= ht->divisor; h++) { 1136 for (n = rtnl_dereference(ht->ht[h]); 1137 n; 1138 n = rtnl_dereference(n->next)) { 1139 if (arg->count < arg->skip) { 1140 arg->count++; 1141 continue; 1142 } 1143 if (arg->fn(tp, n, arg) < 0) { 1144 arg->stop = 1; 1145 return; 1146 } 1147 arg->count++; 1148 } 1149 } 1150 } 1151 } 1152 1153 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, 1154 bool add, flow_setup_cb_t *cb, void *cb_priv, 1155 struct netlink_ext_ack *extack) 1156 { 1157 struct tc_cls_u32_offload cls_u32 = {}; 1158 int err; 1159 1160 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); 1161 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; 1162 cls_u32.hnode.divisor = ht->divisor; 1163 cls_u32.hnode.handle = ht->handle; 1164 cls_u32.hnode.prio = ht->prio; 1165 1166 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv); 1167 if (err && add && tc_skip_sw(ht->flags)) 1168 return err; 1169 1170 return 0; 1171 } 1172 1173 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, 1174 bool add, flow_setup_cb_t *cb, void *cb_priv, 1175 struct netlink_ext_ack *extack) 1176 { 1177 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); 1178 struct tcf_block *block = tp->chain->block; 1179 struct tc_cls_u32_offload cls_u32 = {}; 1180 1181 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); 1182 cls_u32.command = add ? 1183 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; 1184 cls_u32.knode.handle = n->handle; 1185 1186 if (add) { 1187 cls_u32.knode.fshift = n->fshift; 1188 #ifdef CONFIG_CLS_U32_MARK 1189 cls_u32.knode.val = n->val; 1190 cls_u32.knode.mask = n->mask; 1191 #else 1192 cls_u32.knode.val = 0; 1193 cls_u32.knode.mask = 0; 1194 #endif 1195 cls_u32.knode.sel = &n->sel; 1196 cls_u32.knode.res = &n->res; 1197 cls_u32.knode.exts = &n->exts; 1198 if (n->ht_down) 1199 cls_u32.knode.link_handle = ht->handle; 1200 } 1201 1202 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, 1203 &cls_u32, cb_priv, &n->flags, 1204 &n->in_hw_count); 1205 } 1206 1207 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 1208 void *cb_priv, struct netlink_ext_ack *extack) 1209 { 1210 struct tc_u_common *tp_c = tp->data; 1211 struct tc_u_hnode *ht; 1212 struct tc_u_knode *n; 1213 unsigned int h; 1214 int err; 1215 1216 for (ht = rtnl_dereference(tp_c->hlist); 1217 ht; 1218 ht = rtnl_dereference(ht->next)) { 1219 if (ht->prio != tp->prio) 1220 continue; 1221 1222 /* When adding filters to a new dev, try to offload the 1223 * hashtable first. When removing, do the filters before the 1224 * hashtable. 1225 */ 1226 if (add && !tc_skip_hw(ht->flags)) { 1227 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, 1228 extack); 1229 if (err) 1230 return err; 1231 } 1232 1233 for (h = 0; h <= ht->divisor; h++) { 1234 for (n = rtnl_dereference(ht->ht[h]); 1235 n; 1236 n = rtnl_dereference(n->next)) { 1237 if (tc_skip_hw(n->flags)) 1238 continue; 1239 1240 err = u32_reoffload_knode(tp, n, add, cb, 1241 cb_priv, extack); 1242 if (err) 1243 return err; 1244 } 1245 } 1246 1247 if (!add && !tc_skip_hw(ht->flags)) 1248 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack); 1249 } 1250 1251 return 0; 1252 } 1253 1254 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 1255 unsigned long base) 1256 { 1257 struct tc_u_knode *n = fh; 1258 1259 if (n && n->res.classid == classid) { 1260 if (cl) 1261 __tcf_bind_filter(q, &n->res, base); 1262 else 1263 __tcf_unbind_filter(q, &n->res); 1264 } 1265 } 1266 1267 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, 1268 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 1269 { 1270 struct tc_u_knode *n = fh; 1271 struct tc_u_hnode *ht_up, *ht_down; 1272 struct nlattr *nest; 1273 1274 if (n == NULL) 1275 return skb->len; 1276 1277 t->tcm_handle = n->handle; 1278 1279 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1280 if (nest == NULL) 1281 goto nla_put_failure; 1282 1283 if (TC_U32_KEY(n->handle) == 0) { 1284 struct tc_u_hnode *ht = fh; 1285 u32 divisor = ht->divisor + 1; 1286 1287 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) 1288 goto nla_put_failure; 1289 } else { 1290 #ifdef CONFIG_CLS_U32_PERF 1291 struct tc_u32_pcnt *gpf; 1292 int cpu; 1293 #endif 1294 1295 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys), 1296 &n->sel)) 1297 goto nla_put_failure; 1298 1299 ht_up = rtnl_dereference(n->ht_up); 1300 if (ht_up) { 1301 u32 htid = n->handle & 0xFFFFF000; 1302 if (nla_put_u32(skb, TCA_U32_HASH, htid)) 1303 goto nla_put_failure; 1304 } 1305 if (n->res.classid && 1306 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) 1307 goto nla_put_failure; 1308 1309 ht_down = rtnl_dereference(n->ht_down); 1310 if (ht_down && 1311 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle)) 1312 goto nla_put_failure; 1313 1314 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags)) 1315 goto nla_put_failure; 1316 1317 #ifdef CONFIG_CLS_U32_MARK 1318 if ((n->val || n->mask)) { 1319 struct tc_u32_mark mark = {.val = n->val, 1320 .mask = n->mask, 1321 .success = 0}; 1322 int cpum; 1323 1324 for_each_possible_cpu(cpum) { 1325 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); 1326 1327 mark.success += cnt; 1328 } 1329 1330 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark)) 1331 goto nla_put_failure; 1332 } 1333 #endif 1334 1335 if (tcf_exts_dump(skb, &n->exts) < 0) 1336 goto nla_put_failure; 1337 1338 if (n->ifindex) { 1339 struct net_device *dev; 1340 dev = __dev_get_by_index(net, n->ifindex); 1341 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name)) 1342 goto nla_put_failure; 1343 } 1344 #ifdef CONFIG_CLS_U32_PERF 1345 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL); 1346 if (!gpf) 1347 goto nla_put_failure; 1348 1349 for_each_possible_cpu(cpu) { 1350 int i; 1351 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); 1352 1353 gpf->rcnt += pf->rcnt; 1354 gpf->rhit += pf->rhit; 1355 for (i = 0; i < n->sel.nkeys; i++) 1356 gpf->kcnts[i] += pf->kcnts[i]; 1357 } 1358 1359 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys), 1360 gpf, TCA_U32_PAD)) { 1361 kfree(gpf); 1362 goto nla_put_failure; 1363 } 1364 kfree(gpf); 1365 #endif 1366 } 1367 1368 nla_nest_end(skb, nest); 1369 1370 if (TC_U32_KEY(n->handle)) 1371 if (tcf_exts_dump_stats(skb, &n->exts) < 0) 1372 goto nla_put_failure; 1373 return skb->len; 1374 1375 nla_put_failure: 1376 nla_nest_cancel(skb, nest); 1377 return -1; 1378 } 1379 1380 static struct tcf_proto_ops cls_u32_ops __read_mostly = { 1381 .kind = "u32", 1382 .classify = u32_classify, 1383 .init = u32_init, 1384 .destroy = u32_destroy, 1385 .get = u32_get, 1386 .change = u32_change, 1387 .delete = u32_delete, 1388 .walk = u32_walk, 1389 .reoffload = u32_reoffload, 1390 .dump = u32_dump, 1391 .bind_class = u32_bind_class, 1392 .owner = THIS_MODULE, 1393 }; 1394 1395 static int __init init_u32(void) 1396 { 1397 int i, ret; 1398 1399 pr_info("u32 classifier\n"); 1400 #ifdef CONFIG_CLS_U32_PERF 1401 pr_info(" Performance counters on\n"); 1402 #endif 1403 pr_info(" input device check on\n"); 1404 #ifdef CONFIG_NET_CLS_ACT 1405 pr_info(" Actions configured\n"); 1406 #endif 1407 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE, 1408 sizeof(struct hlist_head), 1409 GFP_KERNEL); 1410 if (!tc_u_common_hash) 1411 return -ENOMEM; 1412 1413 for (i = 0; i < U32_HASH_SIZE; i++) 1414 INIT_HLIST_HEAD(&tc_u_common_hash[i]); 1415 1416 ret = register_tcf_proto_ops(&cls_u32_ops); 1417 if (ret) 1418 kvfree(tc_u_common_hash); 1419 return ret; 1420 } 1421 1422 static void __exit exit_u32(void) 1423 { 1424 unregister_tcf_proto_ops(&cls_u32_ops); 1425 kvfree(tc_u_common_hash); 1426 } 1427 1428 module_init(init_u32) 1429 module_exit(exit_u32) 1430 MODULE_LICENSE("GPL"); 1431