1 /* 2 * Copyright (C) 2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <net/devlink.h> 36 #include <net/pkt_cls.h> 37 38 #include "cmsg.h" 39 #include "main.h" 40 #include "../nfpcore/nfp_cpp.h" 41 #include "../nfpcore/nfp_nsp.h" 42 #include "../nfp_app.h" 43 #include "../nfp_main.h" 44 #include "../nfp_net.h" 45 #include "../nfp_port.h" 46 47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ 48 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 49 TCPHDR_PSH | TCPHDR_URG) 50 51 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \ 52 (FLOW_DIS_IS_FRAGMENT | \ 53 FLOW_DIS_FIRST_FRAG) 54 55 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 56 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 57 BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 58 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 59 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 60 BIT(FLOW_DISSECTOR_KEY_TCP) | \ 61 BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 62 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 63 BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 64 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 65 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 66 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 67 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 68 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 69 BIT(FLOW_DISSECTOR_KEY_MPLS) | \ 70 BIT(FLOW_DISSECTOR_KEY_IP)) 71 72 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 73 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 74 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 75 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 76 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 77 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 78 79 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 80 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 81 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 82 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 83 84 static int 85 nfp_flower_xmit_flow(struct net_device *netdev, 86 struct nfp_fl_payload *nfp_flow, u8 mtype) 87 { 88 u32 meta_len, key_len, mask_len, act_len, tot_len; 89 struct nfp_repr *priv = netdev_priv(netdev); 90 struct sk_buff *skb; 91 unsigned char *msg; 92 93 meta_len = sizeof(struct nfp_fl_rule_metadata); 94 key_len = nfp_flow->meta.key_len; 95 mask_len = nfp_flow->meta.mask_len; 96 act_len = nfp_flow->meta.act_len; 97 98 tot_len = meta_len + key_len + mask_len + act_len; 99 100 /* Convert to long words as firmware expects 101 * lengths in units of NFP_FL_LW_SIZ. 102 */ 103 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 104 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 105 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 106 107 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); 108 if (!skb) 109 return -ENOMEM; 110 111 msg = nfp_flower_cmsg_get_data(skb); 112 memcpy(msg, &nfp_flow->meta, meta_len); 113 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 114 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 115 memcpy(&msg[meta_len + key_len + mask_len], 116 nfp_flow->action_data, act_len); 117 118 /* Convert back to bytes as software expects 119 * lengths in units of bytes. 120 */ 121 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 122 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 123 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 124 125 nfp_ctrl_tx(priv->app->ctrl, skb); 126 127 return 0; 128 } 129 130 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) 131 { 132 return dissector_uses_key(f->dissector, 133 FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 134 dissector_uses_key(f->dissector, 135 FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 136 dissector_uses_key(f->dissector, 137 FLOW_DISSECTOR_KEY_PORTS) || 138 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); 139 } 140 141 static int 142 nfp_flower_calculate_key_layers(struct nfp_app *app, 143 struct nfp_fl_key_ls *ret_key_ls, 144 struct tc_cls_flower_offload *flow, 145 bool egress, 146 enum nfp_flower_tun_type *tun_type) 147 { 148 struct flow_dissector_key_basic *mask_basic = NULL; 149 struct flow_dissector_key_basic *key_basic = NULL; 150 struct nfp_flower_priv *priv = app->priv; 151 u32 key_layer_two; 152 u8 key_layer; 153 int key_size; 154 155 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) 156 return -EOPNOTSUPP; 157 158 /* If any tun dissector is used then the required set must be used. */ 159 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 160 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 161 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 162 return -EOPNOTSUPP; 163 164 key_layer_two = 0; 165 key_layer = NFP_FLOWER_LAYER_PORT; 166 key_size = sizeof(struct nfp_flower_meta_tci) + 167 sizeof(struct nfp_flower_in_port); 168 169 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || 170 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { 171 key_layer |= NFP_FLOWER_LAYER_MAC; 172 key_size += sizeof(struct nfp_flower_mac_mpls); 173 } 174 175 if (dissector_uses_key(flow->dissector, 176 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 177 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 178 struct flow_dissector_key_ports *mask_enc_ports = NULL; 179 struct flow_dissector_key_ports *enc_ports = NULL; 180 struct flow_dissector_key_control *mask_enc_ctl = 181 skb_flow_dissector_target(flow->dissector, 182 FLOW_DISSECTOR_KEY_ENC_CONTROL, 183 flow->mask); 184 struct flow_dissector_key_control *enc_ctl = 185 skb_flow_dissector_target(flow->dissector, 186 FLOW_DISSECTOR_KEY_ENC_CONTROL, 187 flow->key); 188 if (!egress) 189 return -EOPNOTSUPP; 190 191 if (mask_enc_ctl->addr_type != 0xffff || 192 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) 193 return -EOPNOTSUPP; 194 195 /* These fields are already verified as used. */ 196 mask_ipv4 = 197 skb_flow_dissector_target(flow->dissector, 198 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 199 flow->mask); 200 if (mask_ipv4->dst != cpu_to_be32(~0)) 201 return -EOPNOTSUPP; 202 203 mask_enc_ports = 204 skb_flow_dissector_target(flow->dissector, 205 FLOW_DISSECTOR_KEY_ENC_PORTS, 206 flow->mask); 207 enc_ports = 208 skb_flow_dissector_target(flow->dissector, 209 FLOW_DISSECTOR_KEY_ENC_PORTS, 210 flow->key); 211 212 if (mask_enc_ports->dst != cpu_to_be16(~0)) 213 return -EOPNOTSUPP; 214 215 switch (enc_ports->dst) { 216 case htons(NFP_FL_VXLAN_PORT): 217 *tun_type = NFP_FL_TUNNEL_VXLAN; 218 key_layer |= NFP_FLOWER_LAYER_VXLAN; 219 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 220 break; 221 case htons(NFP_FL_GENEVE_PORT): 222 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) 223 return -EOPNOTSUPP; 224 *tun_type = NFP_FL_TUNNEL_GENEVE; 225 key_layer |= NFP_FLOWER_LAYER_EXT_META; 226 key_size += sizeof(struct nfp_flower_ext_meta); 227 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; 228 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 229 break; 230 default: 231 return -EOPNOTSUPP; 232 } 233 } else if (egress) { 234 /* Reject non tunnel matches offloaded to egress repr. */ 235 return -EOPNOTSUPP; 236 } 237 238 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 239 mask_basic = skb_flow_dissector_target(flow->dissector, 240 FLOW_DISSECTOR_KEY_BASIC, 241 flow->mask); 242 243 key_basic = skb_flow_dissector_target(flow->dissector, 244 FLOW_DISSECTOR_KEY_BASIC, 245 flow->key); 246 } 247 248 if (mask_basic && mask_basic->n_proto) { 249 /* Ethernet type is present in the key. */ 250 switch (key_basic->n_proto) { 251 case cpu_to_be16(ETH_P_IP): 252 key_layer |= NFP_FLOWER_LAYER_IPV4; 253 key_size += sizeof(struct nfp_flower_ipv4); 254 break; 255 256 case cpu_to_be16(ETH_P_IPV6): 257 key_layer |= NFP_FLOWER_LAYER_IPV6; 258 key_size += sizeof(struct nfp_flower_ipv6); 259 break; 260 261 /* Currently we do not offload ARP 262 * because we rely on it to get to the host. 263 */ 264 case cpu_to_be16(ETH_P_ARP): 265 return -EOPNOTSUPP; 266 267 /* Will be included in layer 2. */ 268 case cpu_to_be16(ETH_P_8021Q): 269 break; 270 271 default: 272 /* Other ethtype - we need check the masks for the 273 * remainder of the key to ensure we can offload. 274 */ 275 if (nfp_flower_check_higher_than_mac(flow)) 276 return -EOPNOTSUPP; 277 break; 278 } 279 } 280 281 if (mask_basic && mask_basic->ip_proto) { 282 /* Ethernet type is present in the key. */ 283 switch (key_basic->ip_proto) { 284 case IPPROTO_TCP: 285 case IPPROTO_UDP: 286 case IPPROTO_SCTP: 287 case IPPROTO_ICMP: 288 case IPPROTO_ICMPV6: 289 key_layer |= NFP_FLOWER_LAYER_TP; 290 key_size += sizeof(struct nfp_flower_tp_ports); 291 break; 292 default: 293 /* Other ip proto - we need check the masks for the 294 * remainder of the key to ensure we can offload. 295 */ 296 return -EOPNOTSUPP; 297 } 298 } 299 300 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { 301 struct flow_dissector_key_tcp *tcp; 302 u32 tcp_flags; 303 304 tcp = skb_flow_dissector_target(flow->dissector, 305 FLOW_DISSECTOR_KEY_TCP, 306 flow->key); 307 tcp_flags = be16_to_cpu(tcp->flags); 308 309 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) 310 return -EOPNOTSUPP; 311 312 /* We only support PSH and URG flags when either 313 * FIN, SYN or RST is present as well. 314 */ 315 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && 316 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) 317 return -EOPNOTSUPP; 318 319 /* We need to store TCP flags in the IPv4 key space, thus 320 * we need to ensure we include a IPv4 key layer if we have 321 * not done so already. 322 */ 323 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { 324 key_layer |= NFP_FLOWER_LAYER_IPV4; 325 key_size += sizeof(struct nfp_flower_ipv4); 326 } 327 } 328 329 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 330 struct flow_dissector_key_control *key_ctl; 331 332 key_ctl = skb_flow_dissector_target(flow->dissector, 333 FLOW_DISSECTOR_KEY_CONTROL, 334 flow->key); 335 336 if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) 337 return -EOPNOTSUPP; 338 } 339 340 ret_key_ls->key_layer = key_layer; 341 ret_key_ls->key_layer_two = key_layer_two; 342 ret_key_ls->key_size = key_size; 343 344 return 0; 345 } 346 347 static struct nfp_fl_payload * 348 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) 349 { 350 struct nfp_fl_payload *flow_pay; 351 352 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 353 if (!flow_pay) 354 return NULL; 355 356 flow_pay->meta.key_len = key_layer->key_size; 357 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 358 if (!flow_pay->unmasked_data) 359 goto err_free_flow; 360 361 flow_pay->meta.mask_len = key_layer->key_size; 362 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 363 if (!flow_pay->mask_data) 364 goto err_free_unmasked; 365 366 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 367 if (!flow_pay->action_data) 368 goto err_free_mask; 369 370 flow_pay->nfp_tun_ipv4_addr = 0; 371 flow_pay->meta.flags = 0; 372 spin_lock_init(&flow_pay->lock); 373 374 flow_pay->ingress_offload = !egress; 375 376 return flow_pay; 377 378 err_free_mask: 379 kfree(flow_pay->mask_data); 380 err_free_unmasked: 381 kfree(flow_pay->unmasked_data); 382 err_free_flow: 383 kfree(flow_pay); 384 return NULL; 385 } 386 387 /** 388 * nfp_flower_add_offload() - Adds a new flow to hardware. 389 * @app: Pointer to the APP handle 390 * @netdev: netdev structure. 391 * @flow: TC flower classifier offload structure. 392 * @egress: NFP netdev is the egress. 393 * 394 * Adds a new flow to the repeated hash structure and action payload. 395 * 396 * Return: negative value on error, 0 if configured successfully. 397 */ 398 static int 399 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 400 struct tc_cls_flower_offload *flow, bool egress) 401 { 402 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; 403 struct nfp_port *port = nfp_port_from_netdev(netdev); 404 struct nfp_flower_priv *priv = app->priv; 405 struct nfp_fl_payload *flow_pay; 406 struct nfp_fl_key_ls *key_layer; 407 struct net_device *ingr_dev; 408 int err; 409 410 ingr_dev = egress ? NULL : netdev; 411 flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, 412 NFP_FL_STATS_CTX_DONT_CARE); 413 if (flow_pay) { 414 /* Ignore as duplicate if it has been added by different cb. */ 415 if (flow_pay->ingress_offload && egress) 416 return 0; 417 else 418 return -EOPNOTSUPP; 419 } 420 421 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 422 if (!key_layer) 423 return -ENOMEM; 424 425 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, 426 &tun_type); 427 if (err) 428 goto err_free_key_ls; 429 430 flow_pay = nfp_flower_allocate_new(key_layer, egress); 431 if (!flow_pay) { 432 err = -ENOMEM; 433 goto err_free_key_ls; 434 } 435 436 flow_pay->ingress_dev = egress ? NULL : netdev; 437 438 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, 439 tun_type); 440 if (err) 441 goto err_destroy_flow; 442 443 err = nfp_flower_compile_action(app, flow, netdev, flow_pay); 444 if (err) 445 goto err_destroy_flow; 446 447 err = nfp_compile_flow_metadata(app, flow, flow_pay, 448 flow_pay->ingress_dev); 449 if (err) 450 goto err_destroy_flow; 451 452 err = nfp_flower_xmit_flow(netdev, flow_pay, 453 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 454 if (err) 455 goto err_destroy_flow; 456 457 INIT_HLIST_NODE(&flow_pay->link); 458 flow_pay->tc_flower_cookie = flow->cookie; 459 hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); 460 port->tc_offload_cnt++; 461 462 /* Deallocate flow payload when flower rule has been destroyed. */ 463 kfree(key_layer); 464 465 return 0; 466 467 err_destroy_flow: 468 kfree(flow_pay->action_data); 469 kfree(flow_pay->mask_data); 470 kfree(flow_pay->unmasked_data); 471 kfree(flow_pay); 472 err_free_key_ls: 473 kfree(key_layer); 474 return err; 475 } 476 477 /** 478 * nfp_flower_del_offload() - Removes a flow from hardware. 479 * @app: Pointer to the APP handle 480 * @netdev: netdev structure. 481 * @flow: TC flower classifier offload structure 482 * @egress: Netdev is the egress dev. 483 * 484 * Removes a flow from the repeated hash structure and clears the 485 * action payload. 486 * 487 * Return: negative value on error, 0 if removed successfully. 488 */ 489 static int 490 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 491 struct tc_cls_flower_offload *flow, bool egress) 492 { 493 struct nfp_port *port = nfp_port_from_netdev(netdev); 494 struct nfp_fl_payload *nfp_flow; 495 struct net_device *ingr_dev; 496 int err; 497 498 ingr_dev = egress ? NULL : netdev; 499 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, 500 NFP_FL_STATS_CTX_DONT_CARE); 501 if (!nfp_flow) 502 return egress ? 0 : -ENOENT; 503 504 err = nfp_modify_flow_metadata(app, nfp_flow); 505 if (err) 506 goto err_free_flow; 507 508 if (nfp_flow->nfp_tun_ipv4_addr) 509 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 510 511 err = nfp_flower_xmit_flow(netdev, nfp_flow, 512 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 513 if (err) 514 goto err_free_flow; 515 516 err_free_flow: 517 hash_del_rcu(&nfp_flow->link); 518 port->tc_offload_cnt--; 519 kfree(nfp_flow->action_data); 520 kfree(nfp_flow->mask_data); 521 kfree(nfp_flow->unmasked_data); 522 kfree_rcu(nfp_flow, rcu); 523 return err; 524 } 525 526 /** 527 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 528 * @app: Pointer to the APP handle 529 * @netdev: Netdev structure. 530 * @flow: TC flower classifier offload structure 531 * @egress: Netdev is the egress dev. 532 * 533 * Populates a flow statistics structure which which corresponds to a 534 * specific flow. 535 * 536 * Return: negative value on error, 0 if stats populated successfully. 537 */ 538 static int 539 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, 540 struct tc_cls_flower_offload *flow, bool egress) 541 { 542 struct nfp_fl_payload *nfp_flow; 543 struct net_device *ingr_dev; 544 545 ingr_dev = egress ? NULL : netdev; 546 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, 547 NFP_FL_STATS_CTX_DONT_CARE); 548 if (!nfp_flow) 549 return -EINVAL; 550 551 if (nfp_flow->ingress_offload && egress) 552 return 0; 553 554 spin_lock_bh(&nfp_flow->lock); 555 tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, 556 nfp_flow->stats.pkts, nfp_flow->stats.used); 557 558 nfp_flow->stats.pkts = 0; 559 nfp_flow->stats.bytes = 0; 560 spin_unlock_bh(&nfp_flow->lock); 561 562 return 0; 563 } 564 565 static int 566 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 567 struct tc_cls_flower_offload *flower, bool egress) 568 { 569 if (!eth_proto_is_802_3(flower->common.protocol)) 570 return -EOPNOTSUPP; 571 572 switch (flower->command) { 573 case TC_CLSFLOWER_REPLACE: 574 return nfp_flower_add_offload(app, netdev, flower, egress); 575 case TC_CLSFLOWER_DESTROY: 576 return nfp_flower_del_offload(app, netdev, flower, egress); 577 case TC_CLSFLOWER_STATS: 578 return nfp_flower_get_stats(app, netdev, flower, egress); 579 } 580 581 return -EOPNOTSUPP; 582 } 583 584 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, 585 void *cb_priv) 586 { 587 struct nfp_repr *repr = cb_priv; 588 589 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 590 return -EOPNOTSUPP; 591 592 switch (type) { 593 case TC_SETUP_CLSFLOWER: 594 return nfp_flower_repr_offload(repr->app, repr->netdev, 595 type_data, true); 596 default: 597 return -EOPNOTSUPP; 598 } 599 } 600 601 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, 602 void *type_data, void *cb_priv) 603 { 604 struct nfp_repr *repr = cb_priv; 605 606 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 607 return -EOPNOTSUPP; 608 609 switch (type) { 610 case TC_SETUP_CLSFLOWER: 611 return nfp_flower_repr_offload(repr->app, repr->netdev, 612 type_data, false); 613 default: 614 return -EOPNOTSUPP; 615 } 616 } 617 618 static int nfp_flower_setup_tc_block(struct net_device *netdev, 619 struct tc_block_offload *f) 620 { 621 struct nfp_repr *repr = netdev_priv(netdev); 622 623 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 624 return -EOPNOTSUPP; 625 626 switch (f->command) { 627 case TC_BLOCK_BIND: 628 return tcf_block_cb_register(f->block, 629 nfp_flower_setup_tc_block_cb, 630 repr, repr); 631 case TC_BLOCK_UNBIND: 632 tcf_block_cb_unregister(f->block, 633 nfp_flower_setup_tc_block_cb, 634 repr); 635 return 0; 636 default: 637 return -EOPNOTSUPP; 638 } 639 } 640 641 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 642 enum tc_setup_type type, void *type_data) 643 { 644 switch (type) { 645 case TC_SETUP_BLOCK: 646 return nfp_flower_setup_tc_block(netdev, type_data); 647 default: 648 return -EOPNOTSUPP; 649 } 650 } 651