1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/skbuff.h> 5 #include <net/devlink.h> 6 #include <net/pkt_cls.h> 7 8 #include "cmsg.h" 9 #include "main.h" 10 #include "../nfpcore/nfp_cpp.h" 11 #include "../nfpcore/nfp_nsp.h" 12 #include "../nfp_app.h" 13 #include "../nfp_main.h" 14 #include "../nfp_net.h" 15 #include "../nfp_port.h" 16 17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ 18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 19 TCPHDR_PSH | TCPHDR_URG) 20 21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \ 22 (FLOW_DIS_IS_FRAGMENT | \ 23 FLOW_DIS_FIRST_FRAG) 24 25 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ 27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \ 28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 30 BIT(FLOW_DISSECTOR_KEY_TCP) | \ 31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \ 32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \ 34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ 40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ 41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \ 42 BIT(FLOW_DISSECTOR_KEY_IP)) 43 44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ 50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 51 BIT(FLOW_DISSECTOR_KEY_ENC_IP)) 52 53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 56 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) 57 58 #define NFP_FLOWER_MERGE_FIELDS \ 59 (NFP_FLOWER_LAYER_PORT | \ 60 NFP_FLOWER_LAYER_MAC | \ 61 NFP_FLOWER_LAYER_TP | \ 62 NFP_FLOWER_LAYER_IPV4 | \ 63 NFP_FLOWER_LAYER_IPV6) 64 65 struct nfp_flower_merge_check { 66 union { 67 struct { 68 __be16 tci; 69 struct nfp_flower_mac_mpls l2; 70 struct nfp_flower_tp_ports l4; 71 union { 72 struct nfp_flower_ipv4 ipv4; 73 struct nfp_flower_ipv6 ipv6; 74 }; 75 }; 76 unsigned long vals[8]; 77 }; 78 }; 79 80 static int 81 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, 82 u8 mtype) 83 { 84 u32 meta_len, key_len, mask_len, act_len, tot_len; 85 struct sk_buff *skb; 86 unsigned char *msg; 87 88 meta_len = sizeof(struct nfp_fl_rule_metadata); 89 key_len = nfp_flow->meta.key_len; 90 mask_len = nfp_flow->meta.mask_len; 91 act_len = nfp_flow->meta.act_len; 92 93 tot_len = meta_len + key_len + mask_len + act_len; 94 95 /* Convert to long words as firmware expects 96 * lengths in units of NFP_FL_LW_SIZ. 97 */ 98 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 99 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 100 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 101 102 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); 103 if (!skb) 104 return -ENOMEM; 105 106 msg = nfp_flower_cmsg_get_data(skb); 107 memcpy(msg, &nfp_flow->meta, meta_len); 108 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 109 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 110 memcpy(&msg[meta_len + key_len + mask_len], 111 nfp_flow->action_data, act_len); 112 113 /* Convert back to bytes as software expects 114 * lengths in units of bytes. 115 */ 116 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 117 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 118 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 119 120 nfp_ctrl_tx(app->ctrl, skb); 121 122 return 0; 123 } 124 125 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) 126 { 127 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); 128 129 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 130 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 131 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || 132 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); 133 } 134 135 static bool nfp_flower_check_higher_than_l3(struct tc_cls_flower_offload *f) 136 { 137 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); 138 139 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || 140 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); 141 } 142 143 static int 144 nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts, 145 u32 *key_layer_two, int *key_size, 146 struct netlink_ext_ack *extack) 147 { 148 if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY) { 149 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); 150 return -EOPNOTSUPP; 151 } 152 153 if (enc_opts->key->len > 0) { 154 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; 155 *key_size += sizeof(struct nfp_flower_geneve_options); 156 } 157 158 return 0; 159 } 160 161 static int 162 nfp_flower_calculate_key_layers(struct nfp_app *app, 163 struct net_device *netdev, 164 struct nfp_fl_key_ls *ret_key_ls, 165 struct tc_cls_flower_offload *flow, 166 enum nfp_flower_tun_type *tun_type, 167 struct netlink_ext_ack *extack) 168 { 169 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 170 struct flow_dissector *dissector = rule->match.dissector; 171 struct flow_match_basic basic = { NULL, NULL}; 172 struct nfp_flower_priv *priv = app->priv; 173 u32 key_layer_two; 174 u8 key_layer; 175 int key_size; 176 int err; 177 178 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { 179 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported"); 180 return -EOPNOTSUPP; 181 } 182 183 /* If any tun dissector is used then the required set must be used. */ 184 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 185 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 186 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { 187 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); 188 return -EOPNOTSUPP; 189 } 190 191 key_layer_two = 0; 192 key_layer = NFP_FLOWER_LAYER_PORT; 193 key_size = sizeof(struct nfp_flower_meta_tci) + 194 sizeof(struct nfp_flower_in_port); 195 196 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) || 197 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 198 key_layer |= NFP_FLOWER_LAYER_MAC; 199 key_size += sizeof(struct nfp_flower_mac_mpls); 200 } 201 202 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 203 struct flow_match_vlan vlan; 204 205 flow_rule_match_vlan(rule, &vlan); 206 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && 207 vlan.key->vlan_priority) { 208 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); 209 return -EOPNOTSUPP; 210 } 211 } 212 213 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 214 struct flow_match_enc_opts enc_op = { NULL, NULL }; 215 struct flow_match_ipv4_addrs ipv4_addrs; 216 struct flow_match_control enc_ctl; 217 struct flow_match_ports enc_ports; 218 219 flow_rule_match_enc_control(rule, &enc_ctl); 220 221 if (enc_ctl.mask->addr_type != 0xffff) { 222 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); 223 return -EOPNOTSUPP; 224 } 225 if (enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 226 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only IPv4 tunnels are supported"); 227 return -EOPNOTSUPP; 228 } 229 230 /* These fields are already verified as used. */ 231 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); 232 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { 233 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); 234 return -EOPNOTSUPP; 235 } 236 237 flow_rule_match_enc_ports(rule, &enc_ports); 238 if (enc_ports.mask->dst != cpu_to_be16(~0)) { 239 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported"); 240 return -EOPNOTSUPP; 241 } 242 243 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) 244 flow_rule_match_enc_opts(rule, &enc_op); 245 246 switch (enc_ports.key->dst) { 247 case htons(IANA_VXLAN_UDP_PORT): 248 *tun_type = NFP_FL_TUNNEL_VXLAN; 249 key_layer |= NFP_FLOWER_LAYER_VXLAN; 250 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 251 252 if (enc_op.key) { 253 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); 254 return -EOPNOTSUPP; 255 } 256 break; 257 case htons(GENEVE_UDP_PORT): 258 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { 259 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload"); 260 return -EOPNOTSUPP; 261 } 262 *tun_type = NFP_FL_TUNNEL_GENEVE; 263 key_layer |= NFP_FLOWER_LAYER_EXT_META; 264 key_size += sizeof(struct nfp_flower_ext_meta); 265 key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; 266 key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 267 268 if (!enc_op.key) 269 break; 270 if (!(priv->flower_ext_feats & 271 NFP_FL_FEATS_GENEVE_OPT)) { 272 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); 273 return -EOPNOTSUPP; 274 } 275 err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two, 276 &key_size, extack); 277 if (err) 278 return err; 279 break; 280 default: 281 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown"); 282 return -EOPNOTSUPP; 283 } 284 285 /* Ensure the ingress netdev matches the expected tun type. */ 286 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) { 287 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type"); 288 return -EOPNOTSUPP; 289 } 290 } 291 292 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) 293 flow_rule_match_basic(rule, &basic); 294 295 if (basic.mask && basic.mask->n_proto) { 296 /* Ethernet type is present in the key. */ 297 switch (basic.key->n_proto) { 298 case cpu_to_be16(ETH_P_IP): 299 key_layer |= NFP_FLOWER_LAYER_IPV4; 300 key_size += sizeof(struct nfp_flower_ipv4); 301 break; 302 303 case cpu_to_be16(ETH_P_IPV6): 304 key_layer |= NFP_FLOWER_LAYER_IPV6; 305 key_size += sizeof(struct nfp_flower_ipv6); 306 break; 307 308 /* Currently we do not offload ARP 309 * because we rely on it to get to the host. 310 */ 311 case cpu_to_be16(ETH_P_ARP): 312 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported"); 313 return -EOPNOTSUPP; 314 315 case cpu_to_be16(ETH_P_MPLS_UC): 316 case cpu_to_be16(ETH_P_MPLS_MC): 317 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { 318 key_layer |= NFP_FLOWER_LAYER_MAC; 319 key_size += sizeof(struct nfp_flower_mac_mpls); 320 } 321 break; 322 323 /* Will be included in layer 2. */ 324 case cpu_to_be16(ETH_P_8021Q): 325 break; 326 327 default: 328 /* Other ethtype - we need check the masks for the 329 * remainder of the key to ensure we can offload. 330 */ 331 if (nfp_flower_check_higher_than_mac(flow)) { 332 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: non IPv4/IPv6 offload with L3/L4 matches not supported"); 333 return -EOPNOTSUPP; 334 } 335 break; 336 } 337 } 338 339 if (basic.mask && basic.mask->ip_proto) { 340 switch (basic.key->ip_proto) { 341 case IPPROTO_TCP: 342 case IPPROTO_UDP: 343 case IPPROTO_SCTP: 344 case IPPROTO_ICMP: 345 case IPPROTO_ICMPV6: 346 key_layer |= NFP_FLOWER_LAYER_TP; 347 key_size += sizeof(struct nfp_flower_tp_ports); 348 break; 349 default: 350 /* Other ip proto - we need check the masks for the 351 * remainder of the key to ensure we can offload. 352 */ 353 if (nfp_flower_check_higher_than_l3(flow)) { 354 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unknown IP protocol with L4 matches not supported"); 355 return -EOPNOTSUPP; 356 } 357 break; 358 } 359 } 360 361 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 362 struct flow_match_tcp tcp; 363 u32 tcp_flags; 364 365 flow_rule_match_tcp(rule, &tcp); 366 tcp_flags = be16_to_cpu(tcp.key->flags); 367 368 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) { 369 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags"); 370 return -EOPNOTSUPP; 371 } 372 373 /* We only support PSH and URG flags when either 374 * FIN, SYN or RST is present as well. 375 */ 376 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && 377 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) { 378 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST"); 379 return -EOPNOTSUPP; 380 } 381 382 /* We need to store TCP flags in the either the IPv4 or IPv6 key 383 * space, thus we need to ensure we include a IPv4/IPv6 key 384 * layer if we have not done so already. 385 */ 386 if (!basic.key) { 387 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol"); 388 return -EOPNOTSUPP; 389 } 390 391 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && 392 !(key_layer & NFP_FLOWER_LAYER_IPV6)) { 393 switch (basic.key->n_proto) { 394 case cpu_to_be16(ETH_P_IP): 395 key_layer |= NFP_FLOWER_LAYER_IPV4; 396 key_size += sizeof(struct nfp_flower_ipv4); 397 break; 398 399 case cpu_to_be16(ETH_P_IPV6): 400 key_layer |= NFP_FLOWER_LAYER_IPV6; 401 key_size += sizeof(struct nfp_flower_ipv6); 402 break; 403 404 default: 405 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6"); 406 return -EOPNOTSUPP; 407 } 408 } 409 } 410 411 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 412 struct flow_match_control ctl; 413 414 flow_rule_match_control(rule, &ctl); 415 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) { 416 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag"); 417 return -EOPNOTSUPP; 418 } 419 } 420 421 ret_key_ls->key_layer = key_layer; 422 ret_key_ls->key_layer_two = key_layer_two; 423 ret_key_ls->key_size = key_size; 424 425 return 0; 426 } 427 428 static struct nfp_fl_payload * 429 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) 430 { 431 struct nfp_fl_payload *flow_pay; 432 433 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 434 if (!flow_pay) 435 return NULL; 436 437 flow_pay->meta.key_len = key_layer->key_size; 438 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 439 if (!flow_pay->unmasked_data) 440 goto err_free_flow; 441 442 flow_pay->meta.mask_len = key_layer->key_size; 443 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 444 if (!flow_pay->mask_data) 445 goto err_free_unmasked; 446 447 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 448 if (!flow_pay->action_data) 449 goto err_free_mask; 450 451 flow_pay->nfp_tun_ipv4_addr = 0; 452 flow_pay->meta.flags = 0; 453 INIT_LIST_HEAD(&flow_pay->linked_flows); 454 flow_pay->in_hw = false; 455 456 return flow_pay; 457 458 err_free_mask: 459 kfree(flow_pay->mask_data); 460 err_free_unmasked: 461 kfree(flow_pay->unmasked_data); 462 err_free_flow: 463 kfree(flow_pay); 464 return NULL; 465 } 466 467 static int 468 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, 469 struct nfp_flower_merge_check *merge, 470 u8 *last_act_id, int *act_out) 471 { 472 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl; 473 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos; 474 struct nfp_fl_set_ip4_addrs *ipv4_add; 475 struct nfp_fl_set_ipv6_addr *ipv6_add; 476 struct nfp_fl_push_vlan *push_vlan; 477 struct nfp_fl_set_tport *tport; 478 struct nfp_fl_set_eth *eth; 479 struct nfp_fl_act_head *a; 480 unsigned int act_off = 0; 481 u8 act_id = 0; 482 u8 *ports; 483 int i; 484 485 while (act_off < flow->meta.act_len) { 486 a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; 487 act_id = a->jump_id; 488 489 switch (act_id) { 490 case NFP_FL_ACTION_OPCODE_OUTPUT: 491 if (act_out) 492 (*act_out)++; 493 break; 494 case NFP_FL_ACTION_OPCODE_PUSH_VLAN: 495 push_vlan = (struct nfp_fl_push_vlan *)a; 496 if (push_vlan->vlan_tci) 497 merge->tci = cpu_to_be16(0xffff); 498 break; 499 case NFP_FL_ACTION_OPCODE_POP_VLAN: 500 merge->tci = cpu_to_be16(0); 501 break; 502 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL: 503 /* New tunnel header means l2 to l4 can be matched. */ 504 eth_broadcast_addr(&merge->l2.mac_dst[0]); 505 eth_broadcast_addr(&merge->l2.mac_src[0]); 506 memset(&merge->l4, 0xff, 507 sizeof(struct nfp_flower_tp_ports)); 508 memset(&merge->ipv4, 0xff, 509 sizeof(struct nfp_flower_ipv4)); 510 break; 511 case NFP_FL_ACTION_OPCODE_SET_ETHERNET: 512 eth = (struct nfp_fl_set_eth *)a; 513 for (i = 0; i < ETH_ALEN; i++) 514 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; 515 for (i = 0; i < ETH_ALEN; i++) 516 merge->l2.mac_src[i] |= 517 eth->eth_addr_mask[ETH_ALEN + i]; 518 break; 519 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS: 520 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a; 521 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; 522 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; 523 break; 524 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS: 525 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a; 526 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; 527 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; 528 break; 529 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC: 530 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; 531 for (i = 0; i < 4; i++) 532 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= 533 ipv6_add->ipv6[i].mask; 534 break; 535 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST: 536 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; 537 for (i = 0; i < 4; i++) 538 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= 539 ipv6_add->ipv6[i].mask; 540 break; 541 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL: 542 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a; 543 merge->ipv6.ip_ext.ttl |= 544 ipv6_tc_hl_fl->ipv6_hop_limit_mask; 545 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; 546 merge->ipv6.ipv6_flow_label_exthdr |= 547 ipv6_tc_hl_fl->ipv6_label_mask; 548 break; 549 case NFP_FL_ACTION_OPCODE_SET_UDP: 550 case NFP_FL_ACTION_OPCODE_SET_TCP: 551 tport = (struct nfp_fl_set_tport *)a; 552 ports = (u8 *)&merge->l4.port_src; 553 for (i = 0; i < 4; i++) 554 ports[i] |= tport->tp_port_mask[i]; 555 break; 556 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: 557 case NFP_FL_ACTION_OPCODE_PRE_LAG: 558 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: 559 break; 560 default: 561 return -EOPNOTSUPP; 562 } 563 564 act_off += a->len_lw << NFP_FL_LW_SIZ; 565 } 566 567 if (last_act_id) 568 *last_act_id = act_id; 569 570 return 0; 571 } 572 573 static int 574 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, 575 struct nfp_flower_merge_check *merge, 576 bool extra_fields) 577 { 578 struct nfp_flower_meta_tci *meta_tci; 579 u8 *mask = flow->mask_data; 580 u8 key_layer, match_size; 581 582 memset(merge, 0, sizeof(struct nfp_flower_merge_check)); 583 584 meta_tci = (struct nfp_flower_meta_tci *)mask; 585 key_layer = meta_tci->nfp_flow_key_layer; 586 587 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields) 588 return -EOPNOTSUPP; 589 590 merge->tci = meta_tci->tci; 591 mask += sizeof(struct nfp_flower_meta_tci); 592 593 if (key_layer & NFP_FLOWER_LAYER_EXT_META) 594 mask += sizeof(struct nfp_flower_ext_meta); 595 596 mask += sizeof(struct nfp_flower_in_port); 597 598 if (key_layer & NFP_FLOWER_LAYER_MAC) { 599 match_size = sizeof(struct nfp_flower_mac_mpls); 600 memcpy(&merge->l2, mask, match_size); 601 mask += match_size; 602 } 603 604 if (key_layer & NFP_FLOWER_LAYER_TP) { 605 match_size = sizeof(struct nfp_flower_tp_ports); 606 memcpy(&merge->l4, mask, match_size); 607 mask += match_size; 608 } 609 610 if (key_layer & NFP_FLOWER_LAYER_IPV4) { 611 match_size = sizeof(struct nfp_flower_ipv4); 612 memcpy(&merge->ipv4, mask, match_size); 613 } 614 615 if (key_layer & NFP_FLOWER_LAYER_IPV6) { 616 match_size = sizeof(struct nfp_flower_ipv6); 617 memcpy(&merge->ipv6, mask, match_size); 618 } 619 620 return 0; 621 } 622 623 static int 624 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, 625 struct nfp_fl_payload *sub_flow2) 626 { 627 /* Two flows can be merged if sub_flow2 only matches on bits that are 628 * either matched by sub_flow1 or set by a sub_flow1 action. This 629 * ensures that every packet that hits sub_flow1 and recirculates is 630 * guaranteed to hit sub_flow2. 631 */ 632 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; 633 int err, act_out = 0; 634 u8 last_act_id = 0; 635 636 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge, 637 true); 638 if (err) 639 return err; 640 641 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge, 642 false); 643 if (err) 644 return err; 645 646 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge, 647 &last_act_id, &act_out); 648 if (err) 649 return err; 650 651 /* Must only be 1 output action and it must be the last in sequence. */ 652 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) 653 return -EOPNOTSUPP; 654 655 /* Reject merge if sub_flow2 matches on something that is not matched 656 * on or set in an action by sub_flow1. 657 */ 658 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals, 659 sub_flow1_merge.vals, 660 sizeof(struct nfp_flower_merge_check) * 8); 661 if (err) 662 return -EINVAL; 663 664 return 0; 665 } 666 667 static unsigned int 668 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, 669 bool *tunnel_act) 670 { 671 unsigned int act_off = 0, act_len; 672 struct nfp_fl_act_head *a; 673 u8 act_id = 0; 674 675 while (act_off < len) { 676 a = (struct nfp_fl_act_head *)&act_src[act_off]; 677 act_len = a->len_lw << NFP_FL_LW_SIZ; 678 act_id = a->jump_id; 679 680 switch (act_id) { 681 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: 682 if (tunnel_act) 683 *tunnel_act = true; 684 /* fall through */ 685 case NFP_FL_ACTION_OPCODE_PRE_LAG: 686 memcpy(act_dst + act_off, act_src + act_off, act_len); 687 break; 688 default: 689 return act_off; 690 } 691 692 act_off += act_len; 693 } 694 695 return act_off; 696 } 697 698 static int nfp_fl_verify_post_tun_acts(char *acts, int len) 699 { 700 struct nfp_fl_act_head *a; 701 unsigned int act_off = 0; 702 703 while (act_off < len) { 704 a = (struct nfp_fl_act_head *)&acts[act_off]; 705 if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) 706 return -EOPNOTSUPP; 707 708 act_off += a->len_lw << NFP_FL_LW_SIZ; 709 } 710 711 return 0; 712 } 713 714 static int 715 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, 716 struct nfp_fl_payload *sub_flow2, 717 struct nfp_fl_payload *merge_flow) 718 { 719 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; 720 bool tunnel_act = false; 721 char *merge_act; 722 int err; 723 724 /* The last action of sub_flow1 must be output - do not merge this. */ 725 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); 726 sub2_act_len = sub_flow2->meta.act_len; 727 728 if (!sub2_act_len) 729 return -EINVAL; 730 731 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) 732 return -EINVAL; 733 734 /* A shortcut can only be applied if there is a single action. */ 735 if (sub1_act_len) 736 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); 737 else 738 merge_flow->meta.shortcut = sub_flow2->meta.shortcut; 739 740 merge_flow->meta.act_len = sub1_act_len + sub2_act_len; 741 merge_act = merge_flow->action_data; 742 743 /* Copy any pre-actions to the start of merge flow action list. */ 744 pre_off1 = nfp_flower_copy_pre_actions(merge_act, 745 sub_flow1->action_data, 746 sub1_act_len, &tunnel_act); 747 merge_act += pre_off1; 748 sub1_act_len -= pre_off1; 749 pre_off2 = nfp_flower_copy_pre_actions(merge_act, 750 sub_flow2->action_data, 751 sub2_act_len, NULL); 752 merge_act += pre_off2; 753 sub2_act_len -= pre_off2; 754 755 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes 756 * a tunnel, sub_flow 2 can only have output actions for a valid merge. 757 */ 758 if (tunnel_act) { 759 char *post_tun_acts = &sub_flow2->action_data[pre_off2]; 760 761 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); 762 if (err) 763 return err; 764 } 765 766 /* Copy remaining actions from sub_flows 1 and 2. */ 767 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); 768 merge_act += sub1_act_len; 769 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); 770 771 return 0; 772 } 773 774 /* Flow link code should only be accessed under RTNL. */ 775 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link) 776 { 777 list_del(&link->merge_flow.list); 778 list_del(&link->sub_flow.list); 779 kfree(link); 780 } 781 782 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow, 783 struct nfp_fl_payload *sub_flow) 784 { 785 struct nfp_fl_payload_link *link; 786 787 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) 788 if (link->sub_flow.flow == sub_flow) { 789 nfp_flower_unlink_flow(link); 790 return; 791 } 792 } 793 794 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow, 795 struct nfp_fl_payload *sub_flow) 796 { 797 struct nfp_fl_payload_link *link; 798 799 link = kmalloc(sizeof(*link), GFP_KERNEL); 800 if (!link) 801 return -ENOMEM; 802 803 link->merge_flow.flow = merge_flow; 804 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows); 805 link->sub_flow.flow = sub_flow; 806 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows); 807 808 return 0; 809 } 810 811 /** 812 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. 813 * @app: Pointer to the APP handle 814 * @sub_flow1: Initial flow matched to produce merge hint 815 * @sub_flow2: Post recirculation flow matched in merge hint 816 * 817 * Combines 2 flows (if valid) to a single flow, removing the initial from hw 818 * and offloading the new, merged flow. 819 * 820 * Return: negative value on error, 0 in success. 821 */ 822 int nfp_flower_merge_offloaded_flows(struct nfp_app *app, 823 struct nfp_fl_payload *sub_flow1, 824 struct nfp_fl_payload *sub_flow2) 825 { 826 struct tc_cls_flower_offload merge_tc_off; 827 struct nfp_flower_priv *priv = app->priv; 828 struct netlink_ext_ack *extack = NULL; 829 struct nfp_fl_payload *merge_flow; 830 struct nfp_fl_key_ls merge_key_ls; 831 int err; 832 833 ASSERT_RTNL(); 834 835 extack = merge_tc_off.common.extack; 836 if (sub_flow1 == sub_flow2 || 837 nfp_flower_is_merge_flow(sub_flow1) || 838 nfp_flower_is_merge_flow(sub_flow2)) 839 return -EINVAL; 840 841 err = nfp_flower_can_merge(sub_flow1, sub_flow2); 842 if (err) 843 return err; 844 845 merge_key_ls.key_size = sub_flow1->meta.key_len; 846 847 merge_flow = nfp_flower_allocate_new(&merge_key_ls); 848 if (!merge_flow) 849 return -ENOMEM; 850 851 merge_flow->tc_flower_cookie = (unsigned long)merge_flow; 852 merge_flow->ingress_dev = sub_flow1->ingress_dev; 853 854 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, 855 sub_flow1->meta.key_len); 856 memcpy(merge_flow->mask_data, sub_flow1->mask_data, 857 sub_flow1->meta.mask_len); 858 859 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow); 860 if (err) 861 goto err_destroy_merge_flow; 862 863 err = nfp_flower_link_flows(merge_flow, sub_flow1); 864 if (err) 865 goto err_destroy_merge_flow; 866 867 err = nfp_flower_link_flows(merge_flow, sub_flow2); 868 if (err) 869 goto err_unlink_sub_flow1; 870 871 merge_tc_off.cookie = merge_flow->tc_flower_cookie; 872 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow, 873 merge_flow->ingress_dev, extack); 874 if (err) 875 goto err_unlink_sub_flow2; 876 877 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node, 878 nfp_flower_table_params); 879 if (err) 880 goto err_release_metadata; 881 882 err = nfp_flower_xmit_flow(app, merge_flow, 883 NFP_FLOWER_CMSG_TYPE_FLOW_MOD); 884 if (err) 885 goto err_remove_rhash; 886 887 merge_flow->in_hw = true; 888 sub_flow1->in_hw = false; 889 890 return 0; 891 892 err_remove_rhash: 893 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 894 &merge_flow->fl_node, 895 nfp_flower_table_params)); 896 err_release_metadata: 897 nfp_modify_flow_metadata(app, merge_flow); 898 err_unlink_sub_flow2: 899 nfp_flower_unlink_flows(merge_flow, sub_flow2); 900 err_unlink_sub_flow1: 901 nfp_flower_unlink_flows(merge_flow, sub_flow1); 902 err_destroy_merge_flow: 903 kfree(merge_flow->action_data); 904 kfree(merge_flow->mask_data); 905 kfree(merge_flow->unmasked_data); 906 kfree(merge_flow); 907 return err; 908 } 909 910 /** 911 * nfp_flower_add_offload() - Adds a new flow to hardware. 912 * @app: Pointer to the APP handle 913 * @netdev: netdev structure. 914 * @flow: TC flower classifier offload structure. 915 * 916 * Adds a new flow to the repeated hash structure and action payload. 917 * 918 * Return: negative value on error, 0 if configured successfully. 919 */ 920 static int 921 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 922 struct tc_cls_flower_offload *flow) 923 { 924 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; 925 struct nfp_flower_priv *priv = app->priv; 926 struct netlink_ext_ack *extack = NULL; 927 struct nfp_fl_payload *flow_pay; 928 struct nfp_fl_key_ls *key_layer; 929 struct nfp_port *port = NULL; 930 int err; 931 932 extack = flow->common.extack; 933 if (nfp_netdev_is_nfp_repr(netdev)) 934 port = nfp_port_from_netdev(netdev); 935 936 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 937 if (!key_layer) 938 return -ENOMEM; 939 940 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, 941 &tun_type, extack); 942 if (err) 943 goto err_free_key_ls; 944 945 flow_pay = nfp_flower_allocate_new(key_layer); 946 if (!flow_pay) { 947 err = -ENOMEM; 948 goto err_free_key_ls; 949 } 950 951 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, 952 flow_pay, tun_type, extack); 953 if (err) 954 goto err_destroy_flow; 955 956 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack); 957 if (err) 958 goto err_destroy_flow; 959 960 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); 961 if (err) 962 goto err_destroy_flow; 963 964 flow_pay->tc_flower_cookie = flow->cookie; 965 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, 966 nfp_flower_table_params); 967 if (err) { 968 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads"); 969 goto err_release_metadata; 970 } 971 972 err = nfp_flower_xmit_flow(app, flow_pay, 973 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 974 if (err) 975 goto err_remove_rhash; 976 977 if (port) 978 port->tc_offload_cnt++; 979 980 flow_pay->in_hw = true; 981 982 /* Deallocate flow payload when flower rule has been destroyed. */ 983 kfree(key_layer); 984 985 return 0; 986 987 err_remove_rhash: 988 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 989 &flow_pay->fl_node, 990 nfp_flower_table_params)); 991 err_release_metadata: 992 nfp_modify_flow_metadata(app, flow_pay); 993 err_destroy_flow: 994 kfree(flow_pay->action_data); 995 kfree(flow_pay->mask_data); 996 kfree(flow_pay->unmasked_data); 997 kfree(flow_pay); 998 err_free_key_ls: 999 kfree(key_layer); 1000 return err; 1001 } 1002 1003 static void 1004 nfp_flower_remove_merge_flow(struct nfp_app *app, 1005 struct nfp_fl_payload *del_sub_flow, 1006 struct nfp_fl_payload *merge_flow) 1007 { 1008 struct nfp_flower_priv *priv = app->priv; 1009 struct nfp_fl_payload_link *link, *temp; 1010 struct nfp_fl_payload *origin; 1011 bool mod = false; 1012 int err; 1013 1014 link = list_first_entry(&merge_flow->linked_flows, 1015 struct nfp_fl_payload_link, merge_flow.list); 1016 origin = link->sub_flow.flow; 1017 1018 /* Re-add rule the merge had overwritten if it has not been deleted. */ 1019 if (origin != del_sub_flow) 1020 mod = true; 1021 1022 err = nfp_modify_flow_metadata(app, merge_flow); 1023 if (err) { 1024 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n"); 1025 goto err_free_links; 1026 } 1027 1028 if (!mod) { 1029 err = nfp_flower_xmit_flow(app, merge_flow, 1030 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1031 if (err) { 1032 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n"); 1033 goto err_free_links; 1034 } 1035 } else { 1036 __nfp_modify_flow_metadata(priv, origin); 1037 err = nfp_flower_xmit_flow(app, origin, 1038 NFP_FLOWER_CMSG_TYPE_FLOW_MOD); 1039 if (err) 1040 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n"); 1041 origin->in_hw = true; 1042 } 1043 1044 err_free_links: 1045 /* Clean any links connected with the merged flow. */ 1046 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, 1047 merge_flow.list) 1048 nfp_flower_unlink_flow(link); 1049 1050 kfree(merge_flow->action_data); 1051 kfree(merge_flow->mask_data); 1052 kfree(merge_flow->unmasked_data); 1053 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 1054 &merge_flow->fl_node, 1055 nfp_flower_table_params)); 1056 kfree_rcu(merge_flow, rcu); 1057 } 1058 1059 static void 1060 nfp_flower_del_linked_merge_flows(struct nfp_app *app, 1061 struct nfp_fl_payload *sub_flow) 1062 { 1063 struct nfp_fl_payload_link *link, *temp; 1064 1065 /* Remove any merge flow formed from the deleted sub_flow. */ 1066 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, 1067 sub_flow.list) 1068 nfp_flower_remove_merge_flow(app, sub_flow, 1069 link->merge_flow.flow); 1070 } 1071 1072 /** 1073 * nfp_flower_del_offload() - Removes a flow from hardware. 1074 * @app: Pointer to the APP handle 1075 * @netdev: netdev structure. 1076 * @flow: TC flower classifier offload structure 1077 * 1078 * Removes a flow from the repeated hash structure and clears the 1079 * action payload. Any flows merged from this are also deleted. 1080 * 1081 * Return: negative value on error, 0 if removed successfully. 1082 */ 1083 static int 1084 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 1085 struct tc_cls_flower_offload *flow) 1086 { 1087 struct nfp_flower_priv *priv = app->priv; 1088 struct netlink_ext_ack *extack = NULL; 1089 struct nfp_fl_payload *nfp_flow; 1090 struct nfp_port *port = NULL; 1091 int err; 1092 1093 extack = flow->common.extack; 1094 if (nfp_netdev_is_nfp_repr(netdev)) 1095 port = nfp_port_from_netdev(netdev); 1096 1097 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); 1098 if (!nfp_flow) { 1099 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist"); 1100 return -ENOENT; 1101 } 1102 1103 err = nfp_modify_flow_metadata(app, nfp_flow); 1104 if (err) 1105 goto err_free_merge_flow; 1106 1107 if (nfp_flow->nfp_tun_ipv4_addr) 1108 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 1109 1110 if (!nfp_flow->in_hw) { 1111 err = 0; 1112 goto err_free_merge_flow; 1113 } 1114 1115 err = nfp_flower_xmit_flow(app, nfp_flow, 1116 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1117 /* Fall through on error. */ 1118 1119 err_free_merge_flow: 1120 nfp_flower_del_linked_merge_flows(app, nfp_flow); 1121 if (port) 1122 port->tc_offload_cnt--; 1123 kfree(nfp_flow->action_data); 1124 kfree(nfp_flow->mask_data); 1125 kfree(nfp_flow->unmasked_data); 1126 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 1127 &nfp_flow->fl_node, 1128 nfp_flower_table_params)); 1129 kfree_rcu(nfp_flow, rcu); 1130 return err; 1131 } 1132 1133 static void 1134 __nfp_flower_update_merge_stats(struct nfp_app *app, 1135 struct nfp_fl_payload *merge_flow) 1136 { 1137 struct nfp_flower_priv *priv = app->priv; 1138 struct nfp_fl_payload_link *link; 1139 struct nfp_fl_payload *sub_flow; 1140 u64 pkts, bytes, used; 1141 u32 ctx_id; 1142 1143 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); 1144 pkts = priv->stats[ctx_id].pkts; 1145 /* Do not cycle subflows if no stats to distribute. */ 1146 if (!pkts) 1147 return; 1148 bytes = priv->stats[ctx_id].bytes; 1149 used = priv->stats[ctx_id].used; 1150 1151 /* Reset stats for the merge flow. */ 1152 priv->stats[ctx_id].pkts = 0; 1153 priv->stats[ctx_id].bytes = 0; 1154 1155 /* The merge flow has received stats updates from firmware. 1156 * Distribute these stats to all subflows that form the merge. 1157 * The stats will collected from TC via the subflows. 1158 */ 1159 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { 1160 sub_flow = link->sub_flow.flow; 1161 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); 1162 priv->stats[ctx_id].pkts += pkts; 1163 priv->stats[ctx_id].bytes += bytes; 1164 max_t(u64, priv->stats[ctx_id].used, used); 1165 } 1166 } 1167 1168 static void 1169 nfp_flower_update_merge_stats(struct nfp_app *app, 1170 struct nfp_fl_payload *sub_flow) 1171 { 1172 struct nfp_fl_payload_link *link; 1173 1174 /* Get merge flows that the subflow forms to distribute their stats. */ 1175 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) 1176 __nfp_flower_update_merge_stats(app, link->merge_flow.flow); 1177 } 1178 1179 /** 1180 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 1181 * @app: Pointer to the APP handle 1182 * @netdev: Netdev structure. 1183 * @flow: TC flower classifier offload structure 1184 * 1185 * Populates a flow statistics structure which which corresponds to a 1186 * specific flow. 1187 * 1188 * Return: negative value on error, 0 if stats populated successfully. 1189 */ 1190 static int 1191 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, 1192 struct tc_cls_flower_offload *flow) 1193 { 1194 struct nfp_flower_priv *priv = app->priv; 1195 struct netlink_ext_ack *extack = NULL; 1196 struct nfp_fl_payload *nfp_flow; 1197 u32 ctx_id; 1198 1199 extack = flow->common.extack; 1200 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); 1201 if (!nfp_flow) { 1202 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist"); 1203 return -EINVAL; 1204 } 1205 1206 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); 1207 1208 spin_lock_bh(&priv->stats_lock); 1209 /* If request is for a sub_flow, update stats from merged flows. */ 1210 if (!list_empty(&nfp_flow->linked_flows)) 1211 nfp_flower_update_merge_stats(app, nfp_flow); 1212 1213 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, 1214 priv->stats[ctx_id].pkts, priv->stats[ctx_id].used); 1215 1216 priv->stats[ctx_id].pkts = 0; 1217 priv->stats[ctx_id].bytes = 0; 1218 spin_unlock_bh(&priv->stats_lock); 1219 1220 return 0; 1221 } 1222 1223 static int 1224 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 1225 struct tc_cls_flower_offload *flower) 1226 { 1227 if (!eth_proto_is_802_3(flower->common.protocol)) 1228 return -EOPNOTSUPP; 1229 1230 switch (flower->command) { 1231 case TC_CLSFLOWER_REPLACE: 1232 return nfp_flower_add_offload(app, netdev, flower); 1233 case TC_CLSFLOWER_DESTROY: 1234 return nfp_flower_del_offload(app, netdev, flower); 1235 case TC_CLSFLOWER_STATS: 1236 return nfp_flower_get_stats(app, netdev, flower); 1237 default: 1238 return -EOPNOTSUPP; 1239 } 1240 } 1241 1242 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, 1243 void *type_data, void *cb_priv) 1244 { 1245 struct nfp_repr *repr = cb_priv; 1246 1247 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) 1248 return -EOPNOTSUPP; 1249 1250 switch (type) { 1251 case TC_SETUP_CLSFLOWER: 1252 return nfp_flower_repr_offload(repr->app, repr->netdev, 1253 type_data); 1254 case TC_SETUP_CLSMATCHALL: 1255 return nfp_flower_setup_qos_offload(repr->app, repr->netdev, 1256 type_data); 1257 default: 1258 return -EOPNOTSUPP; 1259 } 1260 } 1261 1262 static int nfp_flower_setup_tc_block(struct net_device *netdev, 1263 struct tc_block_offload *f) 1264 { 1265 struct nfp_repr *repr = netdev_priv(netdev); 1266 struct nfp_flower_repr_priv *repr_priv; 1267 1268 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1269 return -EOPNOTSUPP; 1270 1271 repr_priv = repr->app_priv; 1272 repr_priv->block_shared = tcf_block_shared(f->block); 1273 1274 switch (f->command) { 1275 case TC_BLOCK_BIND: 1276 return tcf_block_cb_register(f->block, 1277 nfp_flower_setup_tc_block_cb, 1278 repr, repr, f->extack); 1279 case TC_BLOCK_UNBIND: 1280 tcf_block_cb_unregister(f->block, 1281 nfp_flower_setup_tc_block_cb, 1282 repr); 1283 return 0; 1284 default: 1285 return -EOPNOTSUPP; 1286 } 1287 } 1288 1289 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 1290 enum tc_setup_type type, void *type_data) 1291 { 1292 switch (type) { 1293 case TC_SETUP_BLOCK: 1294 return nfp_flower_setup_tc_block(netdev, type_data); 1295 default: 1296 return -EOPNOTSUPP; 1297 } 1298 } 1299 1300 struct nfp_flower_indr_block_cb_priv { 1301 struct net_device *netdev; 1302 struct nfp_app *app; 1303 struct list_head list; 1304 }; 1305 1306 static struct nfp_flower_indr_block_cb_priv * 1307 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, 1308 struct net_device *netdev) 1309 { 1310 struct nfp_flower_indr_block_cb_priv *cb_priv; 1311 struct nfp_flower_priv *priv = app->priv; 1312 1313 /* All callback list access should be protected by RTNL. */ 1314 ASSERT_RTNL(); 1315 1316 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) 1317 if (cb_priv->netdev == netdev) 1318 return cb_priv; 1319 1320 return NULL; 1321 } 1322 1323 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, 1324 void *type_data, void *cb_priv) 1325 { 1326 struct nfp_flower_indr_block_cb_priv *priv = cb_priv; 1327 struct tc_cls_flower_offload *flower = type_data; 1328 1329 if (flower->common.chain_index) 1330 return -EOPNOTSUPP; 1331 1332 switch (type) { 1333 case TC_SETUP_CLSFLOWER: 1334 return nfp_flower_repr_offload(priv->app, priv->netdev, 1335 type_data); 1336 default: 1337 return -EOPNOTSUPP; 1338 } 1339 } 1340 1341 static int 1342 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, 1343 struct tc_block_offload *f) 1344 { 1345 struct nfp_flower_indr_block_cb_priv *cb_priv; 1346 struct nfp_flower_priv *priv = app->priv; 1347 int err; 1348 1349 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1350 !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1351 nfp_flower_internal_port_can_offload(app, netdev))) 1352 return -EOPNOTSUPP; 1353 1354 switch (f->command) { 1355 case TC_BLOCK_BIND: 1356 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1357 if (!cb_priv) 1358 return -ENOMEM; 1359 1360 cb_priv->netdev = netdev; 1361 cb_priv->app = app; 1362 list_add(&cb_priv->list, &priv->indr_block_cb_priv); 1363 1364 err = tcf_block_cb_register(f->block, 1365 nfp_flower_setup_indr_block_cb, 1366 cb_priv, cb_priv, f->extack); 1367 if (err) { 1368 list_del(&cb_priv->list); 1369 kfree(cb_priv); 1370 } 1371 1372 return err; 1373 case TC_BLOCK_UNBIND: 1374 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); 1375 if (!cb_priv) 1376 return -ENOENT; 1377 1378 tcf_block_cb_unregister(f->block, 1379 nfp_flower_setup_indr_block_cb, 1380 cb_priv); 1381 list_del(&cb_priv->list); 1382 kfree(cb_priv); 1383 1384 return 0; 1385 default: 1386 return -EOPNOTSUPP; 1387 } 1388 return 0; 1389 } 1390 1391 static int 1392 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, 1393 enum tc_setup_type type, void *type_data) 1394 { 1395 switch (type) { 1396 case TC_SETUP_BLOCK: 1397 return nfp_flower_setup_indr_tc_block(netdev, cb_priv, 1398 type_data); 1399 default: 1400 return -EOPNOTSUPP; 1401 } 1402 } 1403 1404 int nfp_flower_reg_indir_block_handler(struct nfp_app *app, 1405 struct net_device *netdev, 1406 unsigned long event) 1407 { 1408 int err; 1409 1410 if (!nfp_fl_is_netdev_to_offload(netdev)) 1411 return NOTIFY_OK; 1412 1413 if (event == NETDEV_REGISTER) { 1414 err = __tc_indr_block_cb_register(netdev, app, 1415 nfp_flower_indr_setup_tc_cb, 1416 app); 1417 if (err) 1418 nfp_flower_cmsg_warn(app, 1419 "Indirect block reg failed - %s\n", 1420 netdev->name); 1421 } else if (event == NETDEV_UNREGISTER) { 1422 __tc_indr_block_cb_unregister(netdev, 1423 nfp_flower_indr_setup_tc_cb, app); 1424 } 1425 1426 return NOTIFY_OK; 1427 } 1428