1 /* 2 * net/sched/cls_flower.c Flower classifier 3 * 4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/rhashtable.h> 16 #include <linux/workqueue.h> 17 18 #include <linux/if_ether.h> 19 #include <linux/in6.h> 20 #include <linux/ip.h> 21 #include <linux/mpls.h> 22 23 #include <net/sch_generic.h> 24 #include <net/pkt_cls.h> 25 #include <net/ip.h> 26 #include <net/flow_dissector.h> 27 28 #include <net/dst.h> 29 #include <net/dst_metadata.h> 30 31 struct fl_flow_key { 32 int indev_ifindex; 33 struct flow_dissector_key_control control; 34 struct flow_dissector_key_control enc_control; 35 struct flow_dissector_key_basic basic; 36 struct flow_dissector_key_eth_addrs eth; 37 struct flow_dissector_key_vlan vlan; 38 union { 39 struct flow_dissector_key_ipv4_addrs ipv4; 40 struct flow_dissector_key_ipv6_addrs ipv6; 41 }; 42 struct flow_dissector_key_ports tp; 43 struct flow_dissector_key_icmp icmp; 44 struct flow_dissector_key_arp arp; 45 struct flow_dissector_key_keyid enc_key_id; 46 union { 47 struct flow_dissector_key_ipv4_addrs enc_ipv4; 48 struct flow_dissector_key_ipv6_addrs enc_ipv6; 49 }; 50 struct flow_dissector_key_ports enc_tp; 51 struct flow_dissector_key_mpls mpls; 52 struct flow_dissector_key_tcp tcp; 53 struct flow_dissector_key_ip ip; 54 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 55 56 struct fl_flow_mask_range { 57 unsigned short int start; 58 unsigned short int end; 59 }; 60 61 struct fl_flow_mask { 62 struct fl_flow_key key; 63 struct fl_flow_mask_range range; 64 struct rcu_head rcu; 65 }; 66 67 struct cls_fl_head { 68 struct rhashtable ht; 69 struct fl_flow_mask mask; 70 struct flow_dissector dissector; 71 bool mask_assigned; 72 struct list_head filters; 73 struct rhashtable_params ht_params; 74 union { 75 struct work_struct work; 76 struct rcu_head rcu; 77 }; 78 struct idr handle_idr; 79 }; 80 81 struct cls_fl_filter { 82 struct rhash_head ht_node; 83 struct fl_flow_key mkey; 84 struct tcf_exts exts; 85 struct tcf_result res; 86 struct fl_flow_key key; 87 struct list_head list; 88 u32 handle; 89 u32 flags; 90 struct rcu_head rcu; 91 }; 92 93 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 94 { 95 return mask->range.end - mask->range.start; 96 } 97 98 static void fl_mask_update_range(struct fl_flow_mask *mask) 99 { 100 const u8 *bytes = (const u8 *) &mask->key; 101 size_t size = sizeof(mask->key); 102 size_t i, first = 0, last = size - 1; 103 104 for (i = 0; i < sizeof(mask->key); i++) { 105 if (bytes[i]) { 106 if (!first && i) 107 first = i; 108 last = i; 109 } 110 } 111 mask->range.start = rounddown(first, sizeof(long)); 112 mask->range.end = roundup(last + 1, sizeof(long)); 113 } 114 115 static void *fl_key_get_start(struct fl_flow_key *key, 116 const struct fl_flow_mask *mask) 117 { 118 return (u8 *) key + mask->range.start; 119 } 120 121 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 122 struct fl_flow_mask *mask) 123 { 124 const long *lkey = fl_key_get_start(key, mask); 125 const long *lmask = fl_key_get_start(&mask->key, mask); 126 long *lmkey = fl_key_get_start(mkey, mask); 127 int i; 128 129 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 130 *lmkey++ = *lkey++ & *lmask++; 131 } 132 133 static void fl_clear_masked_range(struct fl_flow_key *key, 134 struct fl_flow_mask *mask) 135 { 136 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 137 } 138 139 static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head, 140 struct fl_flow_key *mkey) 141 { 142 return rhashtable_lookup_fast(&head->ht, 143 fl_key_get_start(mkey, &head->mask), 144 head->ht_params); 145 } 146 147 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, 148 struct tcf_result *res) 149 { 150 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 151 struct cls_fl_filter *f; 152 struct fl_flow_key skb_key; 153 struct fl_flow_key skb_mkey; 154 155 if (!atomic_read(&head->ht.nelems)) 156 return -1; 157 158 fl_clear_masked_range(&skb_key, &head->mask); 159 160 skb_key.indev_ifindex = skb->skb_iif; 161 /* skb_flow_dissect() does not set n_proto in case an unknown protocol, 162 * so do it rather here. 163 */ 164 skb_key.basic.n_proto = skb->protocol; 165 skb_flow_dissect(skb, &head->dissector, &skb_key, 0); 166 167 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask); 168 169 f = fl_lookup(head, &skb_mkey); 170 if (f && !tc_skip_sw(f->flags)) { 171 *res = f->res; 172 return tcf_exts_exec(skb, &f->exts, res); 173 } 174 return -1; 175 } 176 177 static int fl_init(struct tcf_proto *tp) 178 { 179 struct cls_fl_head *head; 180 181 head = kzalloc(sizeof(*head), GFP_KERNEL); 182 if (!head) 183 return -ENOBUFS; 184 185 INIT_LIST_HEAD_RCU(&head->filters); 186 rcu_assign_pointer(tp->root, head); 187 idr_init(&head->handle_idr); 188 189 return 0; 190 } 191 192 static void fl_destroy_filter(struct rcu_head *head) 193 { 194 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); 195 196 tcf_exts_destroy(&f->exts); 197 kfree(f); 198 } 199 200 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) 201 { 202 struct tc_cls_flower_offload cls_flower = {}; 203 struct tcf_block *block = tp->chain->block; 204 205 tc_cls_common_offload_init(&cls_flower.common, tp); 206 cls_flower.command = TC_CLSFLOWER_DESTROY; 207 cls_flower.cookie = (unsigned long) f; 208 209 tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, 210 &cls_flower, false); 211 } 212 213 static int fl_hw_replace_filter(struct tcf_proto *tp, 214 struct flow_dissector *dissector, 215 struct fl_flow_key *mask, 216 struct cls_fl_filter *f) 217 { 218 struct tc_cls_flower_offload cls_flower = {}; 219 struct tcf_block *block = tp->chain->block; 220 bool skip_sw = tc_skip_sw(f->flags); 221 int err; 222 223 tc_cls_common_offload_init(&cls_flower.common, tp); 224 cls_flower.command = TC_CLSFLOWER_REPLACE; 225 cls_flower.cookie = (unsigned long) f; 226 cls_flower.dissector = dissector; 227 cls_flower.mask = mask; 228 cls_flower.key = &f->mkey; 229 cls_flower.exts = &f->exts; 230 231 err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, 232 &cls_flower, skip_sw); 233 if (err < 0) { 234 fl_hw_destroy_filter(tp, f); 235 return err; 236 } else if (err > 0) { 237 f->flags |= TCA_CLS_FLAGS_IN_HW; 238 } 239 240 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 241 return -EINVAL; 242 243 return 0; 244 } 245 246 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) 247 { 248 struct tc_cls_flower_offload cls_flower = {}; 249 struct tcf_block *block = tp->chain->block; 250 251 tc_cls_common_offload_init(&cls_flower.common, tp); 252 cls_flower.command = TC_CLSFLOWER_STATS; 253 cls_flower.cookie = (unsigned long) f; 254 cls_flower.exts = &f->exts; 255 256 tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, 257 &cls_flower, false); 258 } 259 260 static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f) 261 { 262 struct cls_fl_head *head = rtnl_dereference(tp->root); 263 264 idr_remove_ext(&head->handle_idr, f->handle); 265 list_del_rcu(&f->list); 266 if (!tc_skip_hw(f->flags)) 267 fl_hw_destroy_filter(tp, f); 268 tcf_unbind_filter(tp, &f->res); 269 call_rcu(&f->rcu, fl_destroy_filter); 270 } 271 272 static void fl_destroy_sleepable(struct work_struct *work) 273 { 274 struct cls_fl_head *head = container_of(work, struct cls_fl_head, 275 work); 276 if (head->mask_assigned) 277 rhashtable_destroy(&head->ht); 278 kfree(head); 279 module_put(THIS_MODULE); 280 } 281 282 static void fl_destroy_rcu(struct rcu_head *rcu) 283 { 284 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu); 285 286 INIT_WORK(&head->work, fl_destroy_sleepable); 287 schedule_work(&head->work); 288 } 289 290 static void fl_destroy(struct tcf_proto *tp) 291 { 292 struct cls_fl_head *head = rtnl_dereference(tp->root); 293 struct cls_fl_filter *f, *next; 294 295 list_for_each_entry_safe(f, next, &head->filters, list) 296 __fl_delete(tp, f); 297 idr_destroy(&head->handle_idr); 298 299 __module_get(THIS_MODULE); 300 call_rcu(&head->rcu, fl_destroy_rcu); 301 } 302 303 static void *fl_get(struct tcf_proto *tp, u32 handle) 304 { 305 struct cls_fl_head *head = rtnl_dereference(tp->root); 306 307 return idr_find_ext(&head->handle_idr, handle); 308 } 309 310 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 311 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, 312 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 313 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 314 .len = IFNAMSIZ }, 315 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 316 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 317 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 318 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 319 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 320 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 321 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 322 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 323 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 324 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 325 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 326 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 327 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 328 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 329 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 330 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 331 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 332 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 333 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 334 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 335 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 336 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 337 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 338 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 339 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 340 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 341 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 342 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 343 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 344 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 345 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 346 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 347 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 348 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 349 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 350 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 351 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 352 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 353 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 354 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 355 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 356 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 357 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 358 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 359 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 360 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 361 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 362 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 363 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 364 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 365 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 366 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 367 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 368 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 369 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 370 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 371 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 372 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 373 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 374 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 375 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 376 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 377 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 378 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 379 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 380 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 381 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 382 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 383 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 384 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 385 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 386 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 387 }; 388 389 static void fl_set_key_val(struct nlattr **tb, 390 void *val, int val_type, 391 void *mask, int mask_type, int len) 392 { 393 if (!tb[val_type]) 394 return; 395 memcpy(val, nla_data(tb[val_type]), len); 396 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 397 memset(mask, 0xff, len); 398 else 399 memcpy(mask, nla_data(tb[mask_type]), len); 400 } 401 402 static int fl_set_key_mpls(struct nlattr **tb, 403 struct flow_dissector_key_mpls *key_val, 404 struct flow_dissector_key_mpls *key_mask) 405 { 406 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 407 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 408 key_mask->mpls_ttl = MPLS_TTL_MASK; 409 } 410 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 411 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 412 413 if (bos & ~MPLS_BOS_MASK) 414 return -EINVAL; 415 key_val->mpls_bos = bos; 416 key_mask->mpls_bos = MPLS_BOS_MASK; 417 } 418 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 419 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 420 421 if (tc & ~MPLS_TC_MASK) 422 return -EINVAL; 423 key_val->mpls_tc = tc; 424 key_mask->mpls_tc = MPLS_TC_MASK; 425 } 426 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 427 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 428 429 if (label & ~MPLS_LABEL_MASK) 430 return -EINVAL; 431 key_val->mpls_label = label; 432 key_mask->mpls_label = MPLS_LABEL_MASK; 433 } 434 return 0; 435 } 436 437 static void fl_set_key_vlan(struct nlattr **tb, 438 struct flow_dissector_key_vlan *key_val, 439 struct flow_dissector_key_vlan *key_mask) 440 { 441 #define VLAN_PRIORITY_MASK 0x7 442 443 if (tb[TCA_FLOWER_KEY_VLAN_ID]) { 444 key_val->vlan_id = 445 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK; 446 key_mask->vlan_id = VLAN_VID_MASK; 447 } 448 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) { 449 key_val->vlan_priority = 450 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) & 451 VLAN_PRIORITY_MASK; 452 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 453 } 454 } 455 456 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 457 u32 *dissector_key, u32 *dissector_mask, 458 u32 flower_flag_bit, u32 dissector_flag_bit) 459 { 460 if (flower_mask & flower_flag_bit) { 461 *dissector_mask |= dissector_flag_bit; 462 if (flower_key & flower_flag_bit) 463 *dissector_key |= dissector_flag_bit; 464 } 465 } 466 467 static int fl_set_key_flags(struct nlattr **tb, 468 u32 *flags_key, u32 *flags_mask) 469 { 470 u32 key, mask; 471 472 /* mask is mandatory for flags */ 473 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) 474 return -EINVAL; 475 476 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS])); 477 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 478 479 *flags_key = 0; 480 *flags_mask = 0; 481 482 fl_set_key_flag(key, mask, flags_key, flags_mask, 483 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 484 485 return 0; 486 } 487 488 static void fl_set_key_ip(struct nlattr **tb, 489 struct flow_dissector_key_ip *key, 490 struct flow_dissector_key_ip *mask) 491 { 492 fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS, 493 &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK, 494 sizeof(key->tos)); 495 496 fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, 497 &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK, 498 sizeof(key->ttl)); 499 } 500 501 static int fl_set_key(struct net *net, struct nlattr **tb, 502 struct fl_flow_key *key, struct fl_flow_key *mask) 503 { 504 __be16 ethertype; 505 int ret = 0; 506 #ifdef CONFIG_NET_CLS_IND 507 if (tb[TCA_FLOWER_INDEV]) { 508 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]); 509 if (err < 0) 510 return err; 511 key->indev_ifindex = err; 512 mask->indev_ifindex = 0xffffffff; 513 } 514 #endif 515 516 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 517 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 518 sizeof(key->eth.dst)); 519 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 520 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 521 sizeof(key->eth.src)); 522 523 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { 524 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); 525 526 if (ethertype == htons(ETH_P_8021Q)) { 527 fl_set_key_vlan(tb, &key->vlan, &mask->vlan); 528 fl_set_key_val(tb, &key->basic.n_proto, 529 TCA_FLOWER_KEY_VLAN_ETH_TYPE, 530 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 531 sizeof(key->basic.n_proto)); 532 } else { 533 key->basic.n_proto = ethertype; 534 mask->basic.n_proto = cpu_to_be16(~0); 535 } 536 } 537 538 if (key->basic.n_proto == htons(ETH_P_IP) || 539 key->basic.n_proto == htons(ETH_P_IPV6)) { 540 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 541 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 542 sizeof(key->basic.ip_proto)); 543 fl_set_key_ip(tb, &key->ip, &mask->ip); 544 } 545 546 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 547 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 548 mask->control.addr_type = ~0; 549 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 550 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 551 sizeof(key->ipv4.src)); 552 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 553 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 554 sizeof(key->ipv4.dst)); 555 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 556 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 557 mask->control.addr_type = ~0; 558 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 559 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 560 sizeof(key->ipv6.src)); 561 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 562 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 563 sizeof(key->ipv6.dst)); 564 } 565 566 if (key->basic.ip_proto == IPPROTO_TCP) { 567 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 568 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 569 sizeof(key->tp.src)); 570 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 571 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 572 sizeof(key->tp.dst)); 573 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 574 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 575 sizeof(key->tcp.flags)); 576 } else if (key->basic.ip_proto == IPPROTO_UDP) { 577 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 578 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 579 sizeof(key->tp.src)); 580 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 581 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 582 sizeof(key->tp.dst)); 583 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 584 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 585 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 586 sizeof(key->tp.src)); 587 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 588 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 589 sizeof(key->tp.dst)); 590 } else if (key->basic.n_proto == htons(ETH_P_IP) && 591 key->basic.ip_proto == IPPROTO_ICMP) { 592 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 593 &mask->icmp.type, 594 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 595 sizeof(key->icmp.type)); 596 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 597 &mask->icmp.code, 598 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 599 sizeof(key->icmp.code)); 600 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 601 key->basic.ip_proto == IPPROTO_ICMPV6) { 602 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 603 &mask->icmp.type, 604 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 605 sizeof(key->icmp.type)); 606 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 607 &mask->icmp.code, 608 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 609 sizeof(key->icmp.code)); 610 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 611 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 612 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls); 613 if (ret) 614 return ret; 615 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 616 key->basic.n_proto == htons(ETH_P_RARP)) { 617 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 618 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 619 sizeof(key->arp.sip)); 620 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 621 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 622 sizeof(key->arp.tip)); 623 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 624 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 625 sizeof(key->arp.op)); 626 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 627 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 628 sizeof(key->arp.sha)); 629 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 630 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 631 sizeof(key->arp.tha)); 632 } 633 634 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 635 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 636 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 637 mask->enc_control.addr_type = ~0; 638 fl_set_key_val(tb, &key->enc_ipv4.src, 639 TCA_FLOWER_KEY_ENC_IPV4_SRC, 640 &mask->enc_ipv4.src, 641 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 642 sizeof(key->enc_ipv4.src)); 643 fl_set_key_val(tb, &key->enc_ipv4.dst, 644 TCA_FLOWER_KEY_ENC_IPV4_DST, 645 &mask->enc_ipv4.dst, 646 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 647 sizeof(key->enc_ipv4.dst)); 648 } 649 650 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 651 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 652 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 653 mask->enc_control.addr_type = ~0; 654 fl_set_key_val(tb, &key->enc_ipv6.src, 655 TCA_FLOWER_KEY_ENC_IPV6_SRC, 656 &mask->enc_ipv6.src, 657 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 658 sizeof(key->enc_ipv6.src)); 659 fl_set_key_val(tb, &key->enc_ipv6.dst, 660 TCA_FLOWER_KEY_ENC_IPV6_DST, 661 &mask->enc_ipv6.dst, 662 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 663 sizeof(key->enc_ipv6.dst)); 664 } 665 666 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 667 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 668 sizeof(key->enc_key_id.keyid)); 669 670 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 671 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 672 sizeof(key->enc_tp.src)); 673 674 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 675 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 676 sizeof(key->enc_tp.dst)); 677 678 if (tb[TCA_FLOWER_KEY_FLAGS]) 679 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags); 680 681 return ret; 682 } 683 684 static bool fl_mask_eq(struct fl_flow_mask *mask1, 685 struct fl_flow_mask *mask2) 686 { 687 const long *lmask1 = fl_key_get_start(&mask1->key, mask1); 688 const long *lmask2 = fl_key_get_start(&mask2->key, mask2); 689 690 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) && 691 !memcmp(lmask1, lmask2, fl_mask_range(mask1)); 692 } 693 694 static const struct rhashtable_params fl_ht_params = { 695 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 696 .head_offset = offsetof(struct cls_fl_filter, ht_node), 697 .automatic_shrinking = true, 698 }; 699 700 static int fl_init_hashtable(struct cls_fl_head *head, 701 struct fl_flow_mask *mask) 702 { 703 head->ht_params = fl_ht_params; 704 head->ht_params.key_len = fl_mask_range(mask); 705 head->ht_params.key_offset += mask->range.start; 706 707 return rhashtable_init(&head->ht, &head->ht_params); 708 } 709 710 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 711 #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member)) 712 713 #define FL_KEY_IS_MASKED(mask, member) \ 714 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 715 0, FL_KEY_MEMBER_SIZE(member)) \ 716 717 #define FL_KEY_SET(keys, cnt, id, member) \ 718 do { \ 719 keys[cnt].key_id = id; \ 720 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 721 cnt++; \ 722 } while(0); 723 724 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 725 do { \ 726 if (FL_KEY_IS_MASKED(mask, member)) \ 727 FL_KEY_SET(keys, cnt, id, member); \ 728 } while(0); 729 730 static void fl_init_dissector(struct cls_fl_head *head, 731 struct fl_flow_mask *mask) 732 { 733 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 734 size_t cnt = 0; 735 736 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 737 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 738 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 739 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 740 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 741 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 742 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 743 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 744 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 745 FLOW_DISSECTOR_KEY_PORTS, tp); 746 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 747 FLOW_DISSECTOR_KEY_IP, ip); 748 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 749 FLOW_DISSECTOR_KEY_TCP, tcp); 750 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 751 FLOW_DISSECTOR_KEY_ICMP, icmp); 752 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 753 FLOW_DISSECTOR_KEY_ARP, arp); 754 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 755 FLOW_DISSECTOR_KEY_MPLS, mpls); 756 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 757 FLOW_DISSECTOR_KEY_VLAN, vlan); 758 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 759 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 760 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 761 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 762 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 763 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 764 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) || 765 FL_KEY_IS_MASKED(&mask->key, enc_ipv6)) 766 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 767 enc_control); 768 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 769 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 770 771 skb_flow_dissector_init(&head->dissector, keys, cnt); 772 } 773 774 static int fl_check_assign_mask(struct cls_fl_head *head, 775 struct fl_flow_mask *mask) 776 { 777 int err; 778 779 if (head->mask_assigned) { 780 if (!fl_mask_eq(&head->mask, mask)) 781 return -EINVAL; 782 else 783 return 0; 784 } 785 786 /* Mask is not assigned yet. So assign it and init hashtable 787 * according to that. 788 */ 789 err = fl_init_hashtable(head, mask); 790 if (err) 791 return err; 792 memcpy(&head->mask, mask, sizeof(head->mask)); 793 head->mask_assigned = true; 794 795 fl_init_dissector(head, mask); 796 797 return 0; 798 } 799 800 static int fl_set_parms(struct net *net, struct tcf_proto *tp, 801 struct cls_fl_filter *f, struct fl_flow_mask *mask, 802 unsigned long base, struct nlattr **tb, 803 struct nlattr *est, bool ovr) 804 { 805 int err; 806 807 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr); 808 if (err < 0) 809 return err; 810 811 if (tb[TCA_FLOWER_CLASSID]) { 812 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 813 tcf_bind_filter(tp, &f->res, base); 814 } 815 816 err = fl_set_key(net, tb, &f->key, &mask->key); 817 if (err) 818 return err; 819 820 fl_mask_update_range(mask); 821 fl_set_masked_key(&f->mkey, &f->key, mask); 822 823 return 0; 824 } 825 826 static int fl_change(struct net *net, struct sk_buff *in_skb, 827 struct tcf_proto *tp, unsigned long base, 828 u32 handle, struct nlattr **tca, 829 void **arg, bool ovr) 830 { 831 struct cls_fl_head *head = rtnl_dereference(tp->root); 832 struct cls_fl_filter *fold = *arg; 833 struct cls_fl_filter *fnew; 834 struct nlattr **tb; 835 struct fl_flow_mask mask = {}; 836 unsigned long idr_index; 837 int err; 838 839 if (!tca[TCA_OPTIONS]) 840 return -EINVAL; 841 842 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 843 if (!tb) 844 return -ENOBUFS; 845 846 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], 847 fl_policy, NULL); 848 if (err < 0) 849 goto errout_tb; 850 851 if (fold && handle && fold->handle != handle) { 852 err = -EINVAL; 853 goto errout_tb; 854 } 855 856 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 857 if (!fnew) { 858 err = -ENOBUFS; 859 goto errout_tb; 860 } 861 862 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0); 863 if (err < 0) 864 goto errout; 865 866 if (!handle) { 867 err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, 868 1, 0x80000000, GFP_KERNEL); 869 if (err) 870 goto errout; 871 fnew->handle = idr_index; 872 } 873 874 /* user specifies a handle and it doesn't exist */ 875 if (handle && !fold) { 876 err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index, 877 handle, handle + 1, GFP_KERNEL); 878 if (err) 879 goto errout; 880 fnew->handle = idr_index; 881 } 882 883 if (tb[TCA_FLOWER_FLAGS]) { 884 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 885 886 if (!tc_flags_valid(fnew->flags)) { 887 err = -EINVAL; 888 goto errout_idr; 889 } 890 } 891 892 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr); 893 if (err) 894 goto errout_idr; 895 896 err = fl_check_assign_mask(head, &mask); 897 if (err) 898 goto errout_idr; 899 900 if (!tc_skip_sw(fnew->flags)) { 901 if (!fold && fl_lookup(head, &fnew->mkey)) { 902 err = -EEXIST; 903 goto errout_idr; 904 } 905 906 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, 907 head->ht_params); 908 if (err) 909 goto errout_idr; 910 } 911 912 if (!tc_skip_hw(fnew->flags)) { 913 err = fl_hw_replace_filter(tp, 914 &head->dissector, 915 &mask.key, 916 fnew); 917 if (err) 918 goto errout_idr; 919 } 920 921 if (!tc_in_hw(fnew->flags)) 922 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 923 924 if (fold) { 925 if (!tc_skip_sw(fold->flags)) 926 rhashtable_remove_fast(&head->ht, &fold->ht_node, 927 head->ht_params); 928 if (!tc_skip_hw(fold->flags)) 929 fl_hw_destroy_filter(tp, fold); 930 } 931 932 *arg = fnew; 933 934 if (fold) { 935 fnew->handle = handle; 936 idr_replace_ext(&head->handle_idr, fnew, fnew->handle); 937 list_replace_rcu(&fold->list, &fnew->list); 938 tcf_unbind_filter(tp, &fold->res); 939 call_rcu(&fold->rcu, fl_destroy_filter); 940 } else { 941 list_add_tail_rcu(&fnew->list, &head->filters); 942 } 943 944 kfree(tb); 945 return 0; 946 947 errout_idr: 948 if (fnew->handle) 949 idr_remove_ext(&head->handle_idr, fnew->handle); 950 errout: 951 tcf_exts_destroy(&fnew->exts); 952 kfree(fnew); 953 errout_tb: 954 kfree(tb); 955 return err; 956 } 957 958 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last) 959 { 960 struct cls_fl_head *head = rtnl_dereference(tp->root); 961 struct cls_fl_filter *f = arg; 962 963 if (!tc_skip_sw(f->flags)) 964 rhashtable_remove_fast(&head->ht, &f->ht_node, 965 head->ht_params); 966 __fl_delete(tp, f); 967 *last = list_empty(&head->filters); 968 return 0; 969 } 970 971 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg) 972 { 973 struct cls_fl_head *head = rtnl_dereference(tp->root); 974 struct cls_fl_filter *f; 975 976 list_for_each_entry_rcu(f, &head->filters, list) { 977 if (arg->count < arg->skip) 978 goto skip; 979 if (arg->fn(tp, f, arg) < 0) { 980 arg->stop = 1; 981 break; 982 } 983 skip: 984 arg->count++; 985 } 986 } 987 988 static int fl_dump_key_val(struct sk_buff *skb, 989 void *val, int val_type, 990 void *mask, int mask_type, int len) 991 { 992 int err; 993 994 if (!memchr_inv(mask, 0, len)) 995 return 0; 996 err = nla_put(skb, val_type, len, val); 997 if (err) 998 return err; 999 if (mask_type != TCA_FLOWER_UNSPEC) { 1000 err = nla_put(skb, mask_type, len, mask); 1001 if (err) 1002 return err; 1003 } 1004 return 0; 1005 } 1006 1007 static int fl_dump_key_mpls(struct sk_buff *skb, 1008 struct flow_dissector_key_mpls *mpls_key, 1009 struct flow_dissector_key_mpls *mpls_mask) 1010 { 1011 int err; 1012 1013 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask))) 1014 return 0; 1015 if (mpls_mask->mpls_ttl) { 1016 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 1017 mpls_key->mpls_ttl); 1018 if (err) 1019 return err; 1020 } 1021 if (mpls_mask->mpls_tc) { 1022 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 1023 mpls_key->mpls_tc); 1024 if (err) 1025 return err; 1026 } 1027 if (mpls_mask->mpls_label) { 1028 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 1029 mpls_key->mpls_label); 1030 if (err) 1031 return err; 1032 } 1033 if (mpls_mask->mpls_bos) { 1034 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 1035 mpls_key->mpls_bos); 1036 if (err) 1037 return err; 1038 } 1039 return 0; 1040 } 1041 1042 static int fl_dump_key_ip(struct sk_buff *skb, 1043 struct flow_dissector_key_ip *key, 1044 struct flow_dissector_key_ip *mask) 1045 { 1046 if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos, 1047 TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) || 1048 fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl, 1049 TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl))) 1050 return -1; 1051 1052 return 0; 1053 } 1054 1055 static int fl_dump_key_vlan(struct sk_buff *skb, 1056 struct flow_dissector_key_vlan *vlan_key, 1057 struct flow_dissector_key_vlan *vlan_mask) 1058 { 1059 int err; 1060 1061 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 1062 return 0; 1063 if (vlan_mask->vlan_id) { 1064 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID, 1065 vlan_key->vlan_id); 1066 if (err) 1067 return err; 1068 } 1069 if (vlan_mask->vlan_priority) { 1070 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO, 1071 vlan_key->vlan_priority); 1072 if (err) 1073 return err; 1074 } 1075 return 0; 1076 } 1077 1078 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 1079 u32 *flower_key, u32 *flower_mask, 1080 u32 flower_flag_bit, u32 dissector_flag_bit) 1081 { 1082 if (dissector_mask & dissector_flag_bit) { 1083 *flower_mask |= flower_flag_bit; 1084 if (dissector_key & dissector_flag_bit) 1085 *flower_key |= flower_flag_bit; 1086 } 1087 } 1088 1089 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 1090 { 1091 u32 key, mask; 1092 __be32 _key, _mask; 1093 int err; 1094 1095 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 1096 return 0; 1097 1098 key = 0; 1099 mask = 0; 1100 1101 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 1102 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1103 1104 _key = cpu_to_be32(key); 1105 _mask = cpu_to_be32(mask); 1106 1107 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 1108 if (err) 1109 return err; 1110 1111 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 1112 } 1113 1114 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 1115 struct sk_buff *skb, struct tcmsg *t) 1116 { 1117 struct cls_fl_head *head = rtnl_dereference(tp->root); 1118 struct cls_fl_filter *f = fh; 1119 struct nlattr *nest; 1120 struct fl_flow_key *key, *mask; 1121 1122 if (!f) 1123 return skb->len; 1124 1125 t->tcm_handle = f->handle; 1126 1127 nest = nla_nest_start(skb, TCA_OPTIONS); 1128 if (!nest) 1129 goto nla_put_failure; 1130 1131 if (f->res.classid && 1132 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 1133 goto nla_put_failure; 1134 1135 key = &f->key; 1136 mask = &head->mask.key; 1137 1138 if (mask->indev_ifindex) { 1139 struct net_device *dev; 1140 1141 dev = __dev_get_by_index(net, key->indev_ifindex); 1142 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 1143 goto nla_put_failure; 1144 } 1145 1146 if (!tc_skip_hw(f->flags)) 1147 fl_hw_update_stats(tp, f); 1148 1149 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1150 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1151 sizeof(key->eth.dst)) || 1152 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1153 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1154 sizeof(key->eth.src)) || 1155 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 1156 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 1157 sizeof(key->basic.n_proto))) 1158 goto nla_put_failure; 1159 1160 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 1161 goto nla_put_failure; 1162 1163 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) 1164 goto nla_put_failure; 1165 1166 if ((key->basic.n_proto == htons(ETH_P_IP) || 1167 key->basic.n_proto == htons(ETH_P_IPV6)) && 1168 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1169 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1170 sizeof(key->basic.ip_proto)) || 1171 fl_dump_key_ip(skb, &key->ip, &mask->ip))) 1172 goto nla_put_failure; 1173 1174 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 1175 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1176 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1177 sizeof(key->ipv4.src)) || 1178 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1179 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1180 sizeof(key->ipv4.dst)))) 1181 goto nla_put_failure; 1182 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 1183 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1184 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1185 sizeof(key->ipv6.src)) || 1186 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1187 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1188 sizeof(key->ipv6.dst)))) 1189 goto nla_put_failure; 1190 1191 if (key->basic.ip_proto == IPPROTO_TCP && 1192 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1193 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1194 sizeof(key->tp.src)) || 1195 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1196 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1197 sizeof(key->tp.dst)) || 1198 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1199 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1200 sizeof(key->tcp.flags)))) 1201 goto nla_put_failure; 1202 else if (key->basic.ip_proto == IPPROTO_UDP && 1203 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1204 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1205 sizeof(key->tp.src)) || 1206 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1207 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1208 sizeof(key->tp.dst)))) 1209 goto nla_put_failure; 1210 else if (key->basic.ip_proto == IPPROTO_SCTP && 1211 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1212 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1213 sizeof(key->tp.src)) || 1214 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1215 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1216 sizeof(key->tp.dst)))) 1217 goto nla_put_failure; 1218 else if (key->basic.n_proto == htons(ETH_P_IP) && 1219 key->basic.ip_proto == IPPROTO_ICMP && 1220 (fl_dump_key_val(skb, &key->icmp.type, 1221 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 1222 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1223 sizeof(key->icmp.type)) || 1224 fl_dump_key_val(skb, &key->icmp.code, 1225 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 1226 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1227 sizeof(key->icmp.code)))) 1228 goto nla_put_failure; 1229 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1230 key->basic.ip_proto == IPPROTO_ICMPV6 && 1231 (fl_dump_key_val(skb, &key->icmp.type, 1232 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 1233 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1234 sizeof(key->icmp.type)) || 1235 fl_dump_key_val(skb, &key->icmp.code, 1236 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 1237 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1238 sizeof(key->icmp.code)))) 1239 goto nla_put_failure; 1240 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 1241 key->basic.n_proto == htons(ETH_P_RARP)) && 1242 (fl_dump_key_val(skb, &key->arp.sip, 1243 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 1244 TCA_FLOWER_KEY_ARP_SIP_MASK, 1245 sizeof(key->arp.sip)) || 1246 fl_dump_key_val(skb, &key->arp.tip, 1247 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 1248 TCA_FLOWER_KEY_ARP_TIP_MASK, 1249 sizeof(key->arp.tip)) || 1250 fl_dump_key_val(skb, &key->arp.op, 1251 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 1252 TCA_FLOWER_KEY_ARP_OP_MASK, 1253 sizeof(key->arp.op)) || 1254 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1255 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1256 sizeof(key->arp.sha)) || 1257 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1258 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1259 sizeof(key->arp.tha)))) 1260 goto nla_put_failure; 1261 1262 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 1263 (fl_dump_key_val(skb, &key->enc_ipv4.src, 1264 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 1265 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 1266 sizeof(key->enc_ipv4.src)) || 1267 fl_dump_key_val(skb, &key->enc_ipv4.dst, 1268 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 1269 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 1270 sizeof(key->enc_ipv4.dst)))) 1271 goto nla_put_failure; 1272 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 1273 (fl_dump_key_val(skb, &key->enc_ipv6.src, 1274 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 1275 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 1276 sizeof(key->enc_ipv6.src)) || 1277 fl_dump_key_val(skb, &key->enc_ipv6.dst, 1278 TCA_FLOWER_KEY_ENC_IPV6_DST, 1279 &mask->enc_ipv6.dst, 1280 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 1281 sizeof(key->enc_ipv6.dst)))) 1282 goto nla_put_failure; 1283 1284 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 1285 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 1286 sizeof(key->enc_key_id)) || 1287 fl_dump_key_val(skb, &key->enc_tp.src, 1288 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 1289 &mask->enc_tp.src, 1290 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 1291 sizeof(key->enc_tp.src)) || 1292 fl_dump_key_val(skb, &key->enc_tp.dst, 1293 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 1294 &mask->enc_tp.dst, 1295 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 1296 sizeof(key->enc_tp.dst))) 1297 goto nla_put_failure; 1298 1299 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 1300 goto nla_put_failure; 1301 1302 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 1303 goto nla_put_failure; 1304 1305 if (tcf_exts_dump(skb, &f->exts)) 1306 goto nla_put_failure; 1307 1308 nla_nest_end(skb, nest); 1309 1310 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 1311 goto nla_put_failure; 1312 1313 return skb->len; 1314 1315 nla_put_failure: 1316 nla_nest_cancel(skb, nest); 1317 return -1; 1318 } 1319 1320 static void fl_bind_class(void *fh, u32 classid, unsigned long cl) 1321 { 1322 struct cls_fl_filter *f = fh; 1323 1324 if (f && f->res.classid == classid) 1325 f->res.class = cl; 1326 } 1327 1328 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 1329 .kind = "flower", 1330 .classify = fl_classify, 1331 .init = fl_init, 1332 .destroy = fl_destroy, 1333 .get = fl_get, 1334 .change = fl_change, 1335 .delete = fl_delete, 1336 .walk = fl_walk, 1337 .dump = fl_dump, 1338 .bind_class = fl_bind_class, 1339 .owner = THIS_MODULE, 1340 }; 1341 1342 static int __init cls_fl_init(void) 1343 { 1344 return register_tcf_proto_ops(&cls_fl_ops); 1345 } 1346 1347 static void __exit cls_fl_exit(void) 1348 { 1349 unregister_tcf_proto_ops(&cls_fl_ops); 1350 } 1351 1352 module_init(cls_fl_init); 1353 module_exit(cls_fl_exit); 1354 1355 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 1356 MODULE_DESCRIPTION("Flower classifier"); 1357 MODULE_LICENSE("GPL v2"); 1358