1 /* 2 * Copyright (C) 2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/skbuff.h> 35 #include <net/devlink.h> 36 #include <net/pkt_cls.h> 37 38 #include "cmsg.h" 39 #include "main.h" 40 #include "../nfpcore/nfp_cpp.h" 41 #include "../nfpcore/nfp_nsp.h" 42 #include "../nfp_app.h" 43 #include "../nfp_main.h" 44 #include "../nfp_net.h" 45 #include "../nfp_port.h" 46 47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ 48 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 49 TCPHDR_PSH | TCPHDR_URG) 50 51 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 52 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 53 BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 54 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 55 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 56 BIT(FLOW_DISSECTOR_KEY_TCP) | \ 57 BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 58 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 59 BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 60 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 61 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 62 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 63 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 64 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 65 BIT(FLOW_DISSECTOR_KEY_MPLS) | \ 66 BIT(FLOW_DISSECTOR_KEY_IP)) 67 68 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 69 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 70 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 71 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 72 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 73 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 74 75 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 76 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 77 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 78 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 79 80 static int 81 nfp_flower_xmit_flow(struct net_device *netdev, 82 struct nfp_fl_payload *nfp_flow, u8 mtype) 83 { 84 u32 meta_len, key_len, mask_len, act_len, tot_len; 85 struct nfp_repr *priv = netdev_priv(netdev); 86 struct sk_buff *skb; 87 unsigned char *msg; 88 89 meta_len = sizeof(struct nfp_fl_rule_metadata); 90 key_len = nfp_flow->meta.key_len; 91 mask_len = nfp_flow->meta.mask_len; 92 act_len = nfp_flow->meta.act_len; 93 94 tot_len = meta_len + key_len + mask_len + act_len; 95 96 /* Convert to long words as firmware expects 97 * lengths in units of NFP_FL_LW_SIZ. 98 */ 99 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 100 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 101 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 102 103 skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); 104 if (!skb) 105 return -ENOMEM; 106 107 msg = nfp_flower_cmsg_get_data(skb); 108 memcpy(msg, &nfp_flow->meta, meta_len); 109 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 110 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 111 memcpy(&msg[meta_len + key_len + mask_len], 112 nfp_flow->action_data, act_len); 113 114 /* Convert back to bytes as software expects 115 * lengths in units of bytes. 116 */ 117 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 118 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 119 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 120 121 nfp_ctrl_tx(priv->app->ctrl, skb); 122 123 return 0; 124 } 125 126 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) 127 { 128 return dissector_uses_key(f->dissector, 129 FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 130 dissector_uses_key(f->dissector, 131 FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 132 dissector_uses_key(f->dissector, 133 FLOW_DISSECTOR_KEY_PORTS) || 134 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); 135 } 136 137 static int 138 nfp_flower_calculate_key_layers(struct nfp_app *app, 139 struct nfp_fl_key_ls *ret_key_ls, 140 struct tc_cls_flower_offload *flow, 141 bool egress, 142 enum nfp_flower_tun_type *tun_type) 143 { 144 struct flow_dissector_key_basic *mask_basic = NULL; 145 struct flow_dissector_key_basic *key_basic = NULL; 146 struct nfp_flower_priv *priv = app->priv; 147 u32 key_layer_two; 148 u8 key_layer; 149 int key_size; 150 151 if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) 152 return -EOPNOTSUPP; 153 154 /* If any tun dissector is used then the required set must be used. */ 155 if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 156 (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 157 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 158 return -EOPNOTSUPP; 159 160 key_layer_two = 0; 161 key_layer = NFP_FLOWER_LAYER_PORT; 162 key_size = sizeof(struct nfp_flower_meta_tci) + 163 sizeof(struct nfp_flower_in_port); 164 165 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || 166 dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { 167 key_layer |= NFP_FLOWER_LAYER_MAC; 168 key_size += sizeof(struct nfp_flower_mac_mpls); 169 } 170 171 if (dissector_uses_key(flow->dissector, 172 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 173 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 174 struct flow_dissector_key_ports *mask_enc_ports = NULL; 175 struct flow_dissector_key_ports *enc_ports = NULL; 176 struct flow_dissector_key_control *mask_enc_ctl = 177 skb_flow_dissector_target(flow->dissector, 178 FLOW_DISSECTOR_KEY_ENC_CONTROL, 179 flow->mask); 180 struct flow_dissector_key_control *enc_ctl = 181 skb_flow_dissector_target(flow->dissector, 182 FLOW_DISSECTOR_KEY_ENC_CONTROL, 183 flow->key); 184 if (!egress) 185 return -EOPNOTSUPP; 186 187 if (mask_enc_ctl->addr_type != 0xffff || 188 enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) 189 return -EOPNOTSUPP; 190 191 /* These fields are already verified as used. */ 192 mask_ipv4 = 193 skb_flow_dissector_target(flow->dissector, 194 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 195 flow->mask); 196 if (mask_ipv4->dst != cpu_to_be32(~0)) 197 return -EOPNOTSUPP; 198 199 mask_enc_ports = 200 skb_flow_dissector_target(flow->dissector, 201 FLOW_DISSECTOR_KEY_ENC_PORTS, 202 flow->mask); 203 enc_ports = 204 skb_flow_dissector_target(flow->dissector, 205 FLOW_DISSECTOR_KEY_ENC_PORTS, 206 flow->key); 207 208 if (mask_enc_ports->dst != cpu_to_be16(~0)) 209 return -EOPNOTSUPP; 210 211 switch (enc_ports->dst) { 212 case htons(NFP_FL_VXLAN_PORT): 213 *tun_type = NFP_FL_TUNNEL_VXLAN; 214 key_layer |= NFP_FLOWER_LAYER_VXLAN; 215 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 216 break; 217 case htons(NFP_FL_GENEVE_PORT): 218 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) 219 return -EOPNOTSUPP; 220 *tun_type = NFP_FL_TUNNEL_GENEVE; 221 key_layer |= NFP_FLOWER_LAYER_EXT_META; 222 key_size += sizeof(struct nfp_flower_ext_meta); 223 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; 224 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 225 break; 226 default: 227 return -EOPNOTSUPP; 228 } 229 } else if (egress) { 230 /* Reject non tunnel matches offloaded to egress repr. */ 231 return -EOPNOTSUPP; 232 } 233 234 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 235 mask_basic = skb_flow_dissector_target(flow->dissector, 236 FLOW_DISSECTOR_KEY_BASIC, 237 flow->mask); 238 239 key_basic = skb_flow_dissector_target(flow->dissector, 240 FLOW_DISSECTOR_KEY_BASIC, 241 flow->key); 242 } 243 244 if (mask_basic && mask_basic->n_proto) { 245 /* Ethernet type is present in the key. */ 246 switch (key_basic->n_proto) { 247 case cpu_to_be16(ETH_P_IP): 248 key_layer |= NFP_FLOWER_LAYER_IPV4; 249 key_size += sizeof(struct nfp_flower_ipv4); 250 break; 251 252 case cpu_to_be16(ETH_P_IPV6): 253 key_layer |= NFP_FLOWER_LAYER_IPV6; 254 key_size += sizeof(struct nfp_flower_ipv6); 255 break; 256 257 /* Currently we do not offload ARP 258 * because we rely on it to get to the host. 259 */ 260 case cpu_to_be16(ETH_P_ARP): 261 return -EOPNOTSUPP; 262 263 /* Will be included in layer 2. */ 264 case cpu_to_be16(ETH_P_8021Q): 265 break; 266 267 default: 268 /* Other ethtype - we need check the masks for the 269 * remainder of the key to ensure we can offload. 270 */ 271 if (nfp_flower_check_higher_than_mac(flow)) 272 return -EOPNOTSUPP; 273 break; 274 } 275 } 276 277 if (mask_basic && mask_basic->ip_proto) { 278 /* Ethernet type is present in the key. */ 279 switch (key_basic->ip_proto) { 280 case IPPROTO_TCP: 281 case IPPROTO_UDP: 282 case IPPROTO_SCTP: 283 case IPPROTO_ICMP: 284 case IPPROTO_ICMPV6: 285 key_layer |= NFP_FLOWER_LAYER_TP; 286 key_size += sizeof(struct nfp_flower_tp_ports); 287 break; 288 default: 289 /* Other ip proto - we need check the masks for the 290 * remainder of the key to ensure we can offload. 291 */ 292 return -EOPNOTSUPP; 293 } 294 } 295 296 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { 297 struct flow_dissector_key_tcp *tcp; 298 u32 tcp_flags; 299 300 tcp = skb_flow_dissector_target(flow->dissector, 301 FLOW_DISSECTOR_KEY_TCP, 302 flow->key); 303 tcp_flags = be16_to_cpu(tcp->flags); 304 305 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) 306 return -EOPNOTSUPP; 307 308 /* We only support PSH and URG flags when either 309 * FIN, SYN or RST is present as well. 310 */ 311 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && 312 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) 313 return -EOPNOTSUPP; 314 315 /* We need to store TCP flags in the IPv4 key space, thus 316 * we need to ensure we include a IPv4 key layer if we have 317 * not done so already. 318 */ 319 if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) { 320 key_layer |= NFP_FLOWER_LAYER_IPV4; 321 key_size += sizeof(struct nfp_flower_ipv4); 322 } 323 } 324 325 ret_key_ls->key_layer = key_layer; 326 ret_key_ls->key_layer_two = key_layer_two; 327 ret_key_ls->key_size = key_size; 328 329 return 0; 330 } 331 332 static struct nfp_fl_payload * 333 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) 334 { 335 struct nfp_fl_payload *flow_pay; 336 337 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 338 if (!flow_pay) 339 return NULL; 340 341 flow_pay->meta.key_len = key_layer->key_size; 342 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 343 if (!flow_pay->unmasked_data) 344 goto err_free_flow; 345 346 flow_pay->meta.mask_len = key_layer->key_size; 347 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 348 if (!flow_pay->mask_data) 349 goto err_free_unmasked; 350 351 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 352 if (!flow_pay->action_data) 353 goto err_free_mask; 354 355 flow_pay->nfp_tun_ipv4_addr = 0; 356 flow_pay->meta.flags = 0; 357 spin_lock_init(&flow_pay->lock); 358 359 return flow_pay; 360 361 err_free_mask: 362 kfree(flow_pay->mask_data); 363 err_free_unmasked: 364 kfree(flow_pay->unmasked_data); 365 err_free_flow: 366 kfree(flow_pay); 367 return NULL; 368 } 369 370 /** 371 * nfp_flower_add_offload() - Adds a new flow to hardware. 372 * @app: Pointer to the APP handle 373 * @netdev: netdev structure. 374 * @flow: TC flower classifier offload structure. 375 * @egress: NFP netdev is the egress. 376 * 377 * Adds a new flow to the repeated hash structure and action payload. 378 * 379 * Return: negative value on error, 0 if configured successfully. 380 */ 381 static int 382 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 383 struct tc_cls_flower_offload *flow, bool egress) 384 { 385 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; 386 struct nfp_port *port = nfp_port_from_netdev(netdev); 387 struct nfp_flower_priv *priv = app->priv; 388 struct nfp_fl_payload *flow_pay; 389 struct nfp_fl_key_ls *key_layer; 390 int err; 391 392 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 393 if (!key_layer) 394 return -ENOMEM; 395 396 err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, 397 &tun_type); 398 if (err) 399 goto err_free_key_ls; 400 401 flow_pay = nfp_flower_allocate_new(key_layer); 402 if (!flow_pay) { 403 err = -ENOMEM; 404 goto err_free_key_ls; 405 } 406 407 err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, 408 tun_type); 409 if (err) 410 goto err_destroy_flow; 411 412 err = nfp_flower_compile_action(flow, netdev, flow_pay); 413 if (err) 414 goto err_destroy_flow; 415 416 err = nfp_compile_flow_metadata(app, flow, flow_pay); 417 if (err) 418 goto err_destroy_flow; 419 420 err = nfp_flower_xmit_flow(netdev, flow_pay, 421 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 422 if (err) 423 goto err_destroy_flow; 424 425 INIT_HLIST_NODE(&flow_pay->link); 426 flow_pay->tc_flower_cookie = flow->cookie; 427 hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); 428 port->tc_offload_cnt++; 429 430 /* Deallocate flow payload when flower rule has been destroyed. */ 431 kfree(key_layer); 432 433 return 0; 434 435 err_destroy_flow: 436 kfree(flow_pay->action_data); 437 kfree(flow_pay->mask_data); 438 kfree(flow_pay->unmasked_data); 439 kfree(flow_pay); 440 err_free_key_ls: 441 kfree(key_layer); 442 return err; 443 } 444 445 /** 446 * nfp_flower_del_offload() - Removes a flow from hardware. 447 * @app: Pointer to the APP handle 448 * @netdev: netdev structure. 449 * @flow: TC flower classifier offload structure 450 * 451 * Removes a flow from the repeated hash structure and clears the 452 * action payload. 453 * 454 * Return: negative value on error, 0 if removed successfully. 455 */ 456 static int 457 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 458 struct tc_cls_flower_offload *flow) 459 { 460 struct nfp_port *port = nfp_port_from_netdev(netdev); 461 struct nfp_fl_payload *nfp_flow; 462 int err; 463 464 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); 465 if (!nfp_flow) 466 return -ENOENT; 467 468 err = nfp_modify_flow_metadata(app, nfp_flow); 469 if (err) 470 goto err_free_flow; 471 472 if (nfp_flow->nfp_tun_ipv4_addr) 473 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 474 475 err = nfp_flower_xmit_flow(netdev, nfp_flow, 476 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 477 if (err) 478 goto err_free_flow; 479 480 err_free_flow: 481 hash_del_rcu(&nfp_flow->link); 482 port->tc_offload_cnt--; 483 kfree(nfp_flow->action_data); 484 kfree(nfp_flow->mask_data); 485 kfree(nfp_flow->unmasked_data); 486 kfree_rcu(nfp_flow, rcu); 487 return err; 488 } 489 490 /** 491 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 492 * @app: Pointer to the APP handle 493 * @flow: TC flower classifier offload structure 494 * 495 * Populates a flow statistics structure which which corresponds to a 496 * specific flow. 497 * 498 * Return: negative value on error, 0 if stats populated successfully. 499 */ 500 static int 501 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) 502 { 503 struct nfp_fl_payload *nfp_flow; 504 505 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); 506 if (!nfp_flow) 507 return -EINVAL; 508 509 spin_lock_bh(&nfp_flow->lock); 510 tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, 511 nfp_flow->stats.pkts, nfp_flow->stats.used); 512 513 nfp_flow->stats.pkts = 0; 514 nfp_flow->stats.bytes = 0; 515 spin_unlock_bh(&nfp_flow->lock); 516 517 return 0; 518 } 519 520 static int 521 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 522 struct tc_cls_flower_offload *flower, bool egress) 523 { 524 if (!eth_proto_is_802_3(flower->common.protocol)) 525 return -EOPNOTSUPP; 526 527 switch (flower->command) { 528 case TC_CLSFLOWER_REPLACE: 529 return nfp_flower_add_offload(app, netdev, flower, egress); 530 case TC_CLSFLOWER_DESTROY: 531 return nfp_flower_del_offload(app, netdev, flower); 532 case TC_CLSFLOWER_STATS: 533 return nfp_flower_get_stats(app, flower); 534 } 535 536 return -EOPNOTSUPP; 537 } 538 539 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, 540 void *cb_priv) 541 { 542 struct nfp_repr *repr = cb_priv; 543 544 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 545 return -EOPNOTSUPP; 546 547 switch (type) { 548 case TC_SETUP_CLSFLOWER: 549 return nfp_flower_repr_offload(repr->app, repr->netdev, 550 type_data, true); 551 default: 552 return -EOPNOTSUPP; 553 } 554 } 555 556 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, 557 void *type_data, void *cb_priv) 558 { 559 struct nfp_repr *repr = cb_priv; 560 561 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 562 return -EOPNOTSUPP; 563 564 switch (type) { 565 case TC_SETUP_CLSFLOWER: 566 return nfp_flower_repr_offload(repr->app, repr->netdev, 567 type_data, false); 568 default: 569 return -EOPNOTSUPP; 570 } 571 } 572 573 static int nfp_flower_setup_tc_block(struct net_device *netdev, 574 struct tc_block_offload *f) 575 { 576 struct nfp_repr *repr = netdev_priv(netdev); 577 578 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 579 return -EOPNOTSUPP; 580 581 switch (f->command) { 582 case TC_BLOCK_BIND: 583 return tcf_block_cb_register(f->block, 584 nfp_flower_setup_tc_block_cb, 585 repr, repr); 586 case TC_BLOCK_UNBIND: 587 tcf_block_cb_unregister(f->block, 588 nfp_flower_setup_tc_block_cb, 589 repr); 590 return 0; 591 default: 592 return -EOPNOTSUPP; 593 } 594 } 595 596 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 597 enum tc_setup_type type, void *type_data) 598 { 599 switch (type) { 600 case TC_SETUP_BLOCK: 601 return nfp_flower_setup_tc_block(netdev, type_data); 602 default: 603 return -EOPNOTSUPP; 604 } 605 } 606