1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/kernel.h> 3 #include <linux/slab.h> 4 #include <net/flow_offload.h> 5 #include <linux/rtnetlink.h> 6 #include <linux/mutex.h> 7 8 struct flow_rule *flow_rule_alloc(unsigned int num_actions) 9 { 10 struct flow_rule *rule; 11 int i; 12 13 rule = kzalloc(struct_size(rule, action.entries, num_actions), 14 GFP_KERNEL); 15 if (!rule) 16 return NULL; 17 18 rule->action.num_entries = num_actions; 19 /* Pre-fill each action hw_stats with DONT_CARE. 20 * Caller can override this if it wants stats for a given action. 21 */ 22 for (i = 0; i < num_actions; i++) 23 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; 24 25 return rule; 26 } 27 EXPORT_SYMBOL(flow_rule_alloc); 28 29 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ 30 const struct flow_match *__m = &(__rule)->match; \ 31 struct flow_dissector *__d = (__m)->dissector; \ 32 \ 33 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ 34 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ 35 36 void flow_rule_match_meta(const struct flow_rule *rule, 37 struct flow_match_meta *out) 38 { 39 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); 40 } 41 EXPORT_SYMBOL(flow_rule_match_meta); 42 43 void flow_rule_match_basic(const struct flow_rule *rule, 44 struct flow_match_basic *out) 45 { 46 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); 47 } 48 EXPORT_SYMBOL(flow_rule_match_basic); 49 50 void flow_rule_match_control(const struct flow_rule *rule, 51 struct flow_match_control *out) 52 { 53 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); 54 } 55 EXPORT_SYMBOL(flow_rule_match_control); 56 57 void flow_rule_match_eth_addrs(const struct flow_rule *rule, 58 struct flow_match_eth_addrs *out) 59 { 60 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); 61 } 62 EXPORT_SYMBOL(flow_rule_match_eth_addrs); 63 64 void flow_rule_match_vlan(const struct flow_rule *rule, 65 struct flow_match_vlan *out) 66 { 67 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); 68 } 69 EXPORT_SYMBOL(flow_rule_match_vlan); 70 71 void flow_rule_match_cvlan(const struct flow_rule *rule, 72 struct flow_match_vlan *out) 73 { 74 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); 75 } 76 EXPORT_SYMBOL(flow_rule_match_cvlan); 77 78 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 79 struct flow_match_ipv4_addrs *out) 80 { 81 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); 82 } 83 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs); 84 85 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, 86 struct flow_match_ipv6_addrs *out) 87 { 88 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); 89 } 90 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs); 91 92 void flow_rule_match_ip(const struct flow_rule *rule, 93 struct flow_match_ip *out) 94 { 95 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); 96 } 97 EXPORT_SYMBOL(flow_rule_match_ip); 98 99 void flow_rule_match_ports(const struct flow_rule *rule, 100 struct flow_match_ports *out) 101 { 102 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); 103 } 104 EXPORT_SYMBOL(flow_rule_match_ports); 105 106 void flow_rule_match_tcp(const struct flow_rule *rule, 107 struct flow_match_tcp *out) 108 { 109 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); 110 } 111 EXPORT_SYMBOL(flow_rule_match_tcp); 112 113 void flow_rule_match_icmp(const struct flow_rule *rule, 114 struct flow_match_icmp *out) 115 { 116 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); 117 } 118 EXPORT_SYMBOL(flow_rule_match_icmp); 119 120 void flow_rule_match_mpls(const struct flow_rule *rule, 121 struct flow_match_mpls *out) 122 { 123 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); 124 } 125 EXPORT_SYMBOL(flow_rule_match_mpls); 126 127 void flow_rule_match_enc_control(const struct flow_rule *rule, 128 struct flow_match_control *out) 129 { 130 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); 131 } 132 EXPORT_SYMBOL(flow_rule_match_enc_control); 133 134 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, 135 struct flow_match_ipv4_addrs *out) 136 { 137 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); 138 } 139 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs); 140 141 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, 142 struct flow_match_ipv6_addrs *out) 143 { 144 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); 145 } 146 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs); 147 148 void flow_rule_match_enc_ip(const struct flow_rule *rule, 149 struct flow_match_ip *out) 150 { 151 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); 152 } 153 EXPORT_SYMBOL(flow_rule_match_enc_ip); 154 155 void flow_rule_match_enc_ports(const struct flow_rule *rule, 156 struct flow_match_ports *out) 157 { 158 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); 159 } 160 EXPORT_SYMBOL(flow_rule_match_enc_ports); 161 162 void flow_rule_match_enc_keyid(const struct flow_rule *rule, 163 struct flow_match_enc_keyid *out) 164 { 165 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); 166 } 167 EXPORT_SYMBOL(flow_rule_match_enc_keyid); 168 169 void flow_rule_match_enc_opts(const struct flow_rule *rule, 170 struct flow_match_enc_opts *out) 171 { 172 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); 173 } 174 EXPORT_SYMBOL(flow_rule_match_enc_opts); 175 176 struct flow_action_cookie *flow_action_cookie_create(void *data, 177 unsigned int len, 178 gfp_t gfp) 179 { 180 struct flow_action_cookie *cookie; 181 182 cookie = kmalloc(sizeof(*cookie) + len, gfp); 183 if (!cookie) 184 return NULL; 185 cookie->cookie_len = len; 186 memcpy(cookie->cookie, data, len); 187 return cookie; 188 } 189 EXPORT_SYMBOL(flow_action_cookie_create); 190 191 void flow_action_cookie_destroy(struct flow_action_cookie *cookie) 192 { 193 kfree(cookie); 194 } 195 EXPORT_SYMBOL(flow_action_cookie_destroy); 196 197 void flow_rule_match_ct(const struct flow_rule *rule, 198 struct flow_match_ct *out) 199 { 200 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out); 201 } 202 EXPORT_SYMBOL(flow_rule_match_ct); 203 204 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 205 void *cb_ident, void *cb_priv, 206 void (*release)(void *cb_priv)) 207 { 208 struct flow_block_cb *block_cb; 209 210 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); 211 if (!block_cb) 212 return ERR_PTR(-ENOMEM); 213 214 block_cb->cb = cb; 215 block_cb->cb_ident = cb_ident; 216 block_cb->cb_priv = cb_priv; 217 block_cb->release = release; 218 219 return block_cb; 220 } 221 EXPORT_SYMBOL(flow_block_cb_alloc); 222 223 void flow_block_cb_free(struct flow_block_cb *block_cb) 224 { 225 if (block_cb->release) 226 block_cb->release(block_cb->cb_priv); 227 228 kfree(block_cb); 229 } 230 EXPORT_SYMBOL(flow_block_cb_free); 231 232 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 233 flow_setup_cb_t *cb, void *cb_ident) 234 { 235 struct flow_block_cb *block_cb; 236 237 list_for_each_entry(block_cb, &block->cb_list, list) { 238 if (block_cb->cb == cb && 239 block_cb->cb_ident == cb_ident) 240 return block_cb; 241 } 242 243 return NULL; 244 } 245 EXPORT_SYMBOL(flow_block_cb_lookup); 246 247 void *flow_block_cb_priv(struct flow_block_cb *block_cb) 248 { 249 return block_cb->cb_priv; 250 } 251 EXPORT_SYMBOL(flow_block_cb_priv); 252 253 void flow_block_cb_incref(struct flow_block_cb *block_cb) 254 { 255 block_cb->refcnt++; 256 } 257 EXPORT_SYMBOL(flow_block_cb_incref); 258 259 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) 260 { 261 return --block_cb->refcnt; 262 } 263 EXPORT_SYMBOL(flow_block_cb_decref); 264 265 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 266 struct list_head *driver_block_list) 267 { 268 struct flow_block_cb *block_cb; 269 270 list_for_each_entry(block_cb, driver_block_list, driver_list) { 271 if (block_cb->cb == cb && 272 block_cb->cb_ident == cb_ident) 273 return true; 274 } 275 276 return false; 277 } 278 EXPORT_SYMBOL(flow_block_cb_is_busy); 279 280 int flow_block_cb_setup_simple(struct flow_block_offload *f, 281 struct list_head *driver_block_list, 282 flow_setup_cb_t *cb, 283 void *cb_ident, void *cb_priv, 284 bool ingress_only) 285 { 286 struct flow_block_cb *block_cb; 287 288 if (ingress_only && 289 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 290 return -EOPNOTSUPP; 291 292 f->driver_block_list = driver_block_list; 293 294 switch (f->command) { 295 case FLOW_BLOCK_BIND: 296 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) 297 return -EBUSY; 298 299 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); 300 if (IS_ERR(block_cb)) 301 return PTR_ERR(block_cb); 302 303 flow_block_cb_add(block_cb, f); 304 list_add_tail(&block_cb->driver_list, driver_block_list); 305 return 0; 306 case FLOW_BLOCK_UNBIND: 307 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); 308 if (!block_cb) 309 return -ENOENT; 310 311 flow_block_cb_remove(block_cb, f); 312 list_del(&block_cb->driver_list); 313 return 0; 314 default: 315 return -EOPNOTSUPP; 316 } 317 } 318 EXPORT_SYMBOL(flow_block_cb_setup_simple); 319 320 static LIST_HEAD(block_cb_list); 321 322 static struct rhashtable indr_setup_block_ht; 323 324 struct flow_indr_block_cb { 325 struct list_head list; 326 void *cb_priv; 327 flow_indr_block_bind_cb_t *cb; 328 void *cb_ident; 329 }; 330 331 struct flow_indr_block_dev { 332 struct rhash_head ht_node; 333 struct net_device *dev; 334 unsigned int refcnt; 335 struct list_head cb_list; 336 }; 337 338 static const struct rhashtable_params flow_indr_setup_block_ht_params = { 339 .key_offset = offsetof(struct flow_indr_block_dev, dev), 340 .head_offset = offsetof(struct flow_indr_block_dev, ht_node), 341 .key_len = sizeof(struct net_device *), 342 }; 343 344 static struct flow_indr_block_dev * 345 flow_indr_block_dev_lookup(struct net_device *dev) 346 { 347 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev, 348 flow_indr_setup_block_ht_params); 349 } 350 351 static struct flow_indr_block_dev * 352 flow_indr_block_dev_get(struct net_device *dev) 353 { 354 struct flow_indr_block_dev *indr_dev; 355 356 indr_dev = flow_indr_block_dev_lookup(dev); 357 if (indr_dev) 358 goto inc_ref; 359 360 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL); 361 if (!indr_dev) 362 return NULL; 363 364 INIT_LIST_HEAD(&indr_dev->cb_list); 365 indr_dev->dev = dev; 366 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node, 367 flow_indr_setup_block_ht_params)) { 368 kfree(indr_dev); 369 return NULL; 370 } 371 372 inc_ref: 373 indr_dev->refcnt++; 374 return indr_dev; 375 } 376 377 static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev) 378 { 379 if (--indr_dev->refcnt) 380 return; 381 382 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node, 383 flow_indr_setup_block_ht_params); 384 kfree(indr_dev); 385 } 386 387 static struct flow_indr_block_cb * 388 flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev, 389 flow_indr_block_bind_cb_t *cb, void *cb_ident) 390 { 391 struct flow_indr_block_cb *indr_block_cb; 392 393 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) 394 if (indr_block_cb->cb == cb && 395 indr_block_cb->cb_ident == cb_ident) 396 return indr_block_cb; 397 return NULL; 398 } 399 400 static struct flow_indr_block_cb * 401 flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv, 402 flow_indr_block_bind_cb_t *cb, void *cb_ident) 403 { 404 struct flow_indr_block_cb *indr_block_cb; 405 406 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident); 407 if (indr_block_cb) 408 return ERR_PTR(-EEXIST); 409 410 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL); 411 if (!indr_block_cb) 412 return ERR_PTR(-ENOMEM); 413 414 indr_block_cb->cb_priv = cb_priv; 415 indr_block_cb->cb = cb; 416 indr_block_cb->cb_ident = cb_ident; 417 list_add(&indr_block_cb->list, &indr_dev->cb_list); 418 419 return indr_block_cb; 420 } 421 422 static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb) 423 { 424 list_del(&indr_block_cb->list); 425 kfree(indr_block_cb); 426 } 427 428 static DEFINE_MUTEX(flow_indr_block_cb_lock); 429 430 static void flow_block_cmd(struct net_device *dev, 431 flow_indr_block_bind_cb_t *cb, void *cb_priv, 432 enum flow_block_command command) 433 { 434 struct flow_indr_block_entry *entry; 435 436 mutex_lock(&flow_indr_block_cb_lock); 437 list_for_each_entry(entry, &block_cb_list, list) { 438 entry->cb(dev, cb, cb_priv, command); 439 } 440 mutex_unlock(&flow_indr_block_cb_lock); 441 } 442 443 int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, 444 flow_indr_block_bind_cb_t *cb, 445 void *cb_ident) 446 { 447 struct flow_indr_block_cb *indr_block_cb; 448 struct flow_indr_block_dev *indr_dev; 449 int err; 450 451 indr_dev = flow_indr_block_dev_get(dev); 452 if (!indr_dev) 453 return -ENOMEM; 454 455 indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident); 456 err = PTR_ERR_OR_ZERO(indr_block_cb); 457 if (err) 458 goto err_dev_put; 459 460 flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 461 FLOW_BLOCK_BIND); 462 463 return 0; 464 465 err_dev_put: 466 flow_indr_block_dev_put(indr_dev); 467 return err; 468 } 469 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register); 470 471 int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, 472 flow_indr_block_bind_cb_t *cb, 473 void *cb_ident) 474 { 475 int err; 476 477 rtnl_lock(); 478 err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident); 479 rtnl_unlock(); 480 481 return err; 482 } 483 EXPORT_SYMBOL_GPL(flow_indr_block_cb_register); 484 485 void __flow_indr_block_cb_unregister(struct net_device *dev, 486 flow_indr_block_bind_cb_t *cb, 487 void *cb_ident) 488 { 489 struct flow_indr_block_cb *indr_block_cb; 490 struct flow_indr_block_dev *indr_dev; 491 492 indr_dev = flow_indr_block_dev_lookup(dev); 493 if (!indr_dev) 494 return; 495 496 indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident); 497 if (!indr_block_cb) 498 return; 499 500 flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, 501 FLOW_BLOCK_UNBIND); 502 503 flow_indr_block_cb_del(indr_block_cb); 504 flow_indr_block_dev_put(indr_dev); 505 } 506 EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister); 507 508 void flow_indr_block_cb_unregister(struct net_device *dev, 509 flow_indr_block_bind_cb_t *cb, 510 void *cb_ident) 511 { 512 rtnl_lock(); 513 __flow_indr_block_cb_unregister(dev, cb, cb_ident); 514 rtnl_unlock(); 515 } 516 EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister); 517 518 void flow_indr_block_call(struct net_device *dev, 519 struct flow_block_offload *bo, 520 enum flow_block_command command, 521 enum tc_setup_type type) 522 { 523 struct flow_indr_block_cb *indr_block_cb; 524 struct flow_indr_block_dev *indr_dev; 525 526 indr_dev = flow_indr_block_dev_lookup(dev); 527 if (!indr_dev) 528 return; 529 530 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) 531 indr_block_cb->cb(dev, indr_block_cb->cb_priv, type, bo); 532 } 533 EXPORT_SYMBOL_GPL(flow_indr_block_call); 534 535 void flow_indr_add_block_cb(struct flow_indr_block_entry *entry) 536 { 537 mutex_lock(&flow_indr_block_cb_lock); 538 list_add_tail(&entry->list, &block_cb_list); 539 mutex_unlock(&flow_indr_block_cb_lock); 540 } 541 EXPORT_SYMBOL_GPL(flow_indr_add_block_cb); 542 543 void flow_indr_del_block_cb(struct flow_indr_block_entry *entry) 544 { 545 mutex_lock(&flow_indr_block_cb_lock); 546 list_del(&entry->list); 547 mutex_unlock(&flow_indr_block_cb_lock); 548 } 549 EXPORT_SYMBOL_GPL(flow_indr_del_block_cb); 550 551 static int __init init_flow_indr_rhashtable(void) 552 { 553 return rhashtable_init(&indr_setup_block_ht, 554 &flow_indr_setup_block_ht_params); 555 } 556 subsys_initcall(init_flow_indr_rhashtable); 557