1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/skbuff.h> 5 #include <net/devlink.h> 6 #include <net/pkt_cls.h> 7 8 #include "cmsg.h" 9 #include "main.h" 10 #include "conntrack.h" 11 #include "../nfpcore/nfp_cpp.h" 12 #include "../nfpcore/nfp_nsp.h" 13 #include "../nfp_app.h" 14 #include "../nfp_main.h" 15 #include "../nfp_net.h" 16 #include "../nfp_port.h" 17 18 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ 19 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 20 TCPHDR_PSH | TCPHDR_URG) 21 22 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \ 23 (FLOW_DIS_IS_FRAGMENT | \ 24 FLOW_DIS_FIRST_FRAG) 25 26 #define NFP_FLOWER_WHITELIST_DISSECTOR \ 27 (BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | \ 28 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | \ 29 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ 30 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ 31 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | \ 32 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | \ 33 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ 34 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | \ 35 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | \ 36 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 37 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 38 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 39 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 40 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 41 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ 42 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | \ 43 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | \ 44 BIT_ULL(FLOW_DISSECTOR_KEY_CT) | \ 45 BIT_ULL(FLOW_DISSECTOR_KEY_META) | \ 46 BIT_ULL(FLOW_DISSECTOR_KEY_IP)) 47 48 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ 49 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 50 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ 51 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ 52 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ 53 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ 54 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ 55 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) 56 57 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ 58 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 59 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) 60 61 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \ 62 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ 63 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) 64 65 #define NFP_FLOWER_MERGE_FIELDS \ 66 (NFP_FLOWER_LAYER_PORT | \ 67 NFP_FLOWER_LAYER_MAC | \ 68 NFP_FLOWER_LAYER_TP | \ 69 NFP_FLOWER_LAYER_IPV4 | \ 70 NFP_FLOWER_LAYER_IPV6) 71 72 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ 73 (NFP_FLOWER_LAYER_EXT_META | \ 74 NFP_FLOWER_LAYER_PORT | \ 75 NFP_FLOWER_LAYER_MAC | \ 76 NFP_FLOWER_LAYER_IPV4 | \ 77 NFP_FLOWER_LAYER_IPV6) 78 79 struct nfp_flower_merge_check { 80 union { 81 struct { 82 __be16 tci; 83 struct nfp_flower_mac_mpls l2; 84 struct nfp_flower_tp_ports l4; 85 union { 86 struct nfp_flower_ipv4 ipv4; 87 struct nfp_flower_ipv6 ipv6; 88 }; 89 }; 90 unsigned long vals[8]; 91 }; 92 }; 93 94 int 95 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, 96 u8 mtype) 97 { 98 u32 meta_len, key_len, mask_len, act_len, tot_len; 99 struct sk_buff *skb; 100 unsigned char *msg; 101 102 meta_len = sizeof(struct nfp_fl_rule_metadata); 103 key_len = nfp_flow->meta.key_len; 104 mask_len = nfp_flow->meta.mask_len; 105 act_len = nfp_flow->meta.act_len; 106 107 tot_len = meta_len + key_len + mask_len + act_len; 108 109 /* Convert to long words as firmware expects 110 * lengths in units of NFP_FL_LW_SIZ. 111 */ 112 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; 113 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; 114 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; 115 116 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); 117 if (!skb) 118 return -ENOMEM; 119 120 msg = nfp_flower_cmsg_get_data(skb); 121 memcpy(msg, &nfp_flow->meta, meta_len); 122 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); 123 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); 124 memcpy(&msg[meta_len + key_len + mask_len], 125 nfp_flow->action_data, act_len); 126 127 /* Convert back to bytes as software expects 128 * lengths in units of bytes. 129 */ 130 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; 131 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; 132 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; 133 134 nfp_ctrl_tx(app->ctrl, skb); 135 136 return 0; 137 } 138 139 static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule) 140 { 141 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || 142 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || 143 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || 144 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); 145 } 146 147 static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule) 148 { 149 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || 150 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); 151 } 152 153 static int 154 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, 155 u32 *key_layer_two, int *key_size, bool ipv6, 156 struct netlink_ext_ack *extack) 157 { 158 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY || 159 (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) { 160 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); 161 return -EOPNOTSUPP; 162 } 163 164 if (enc_opts->len > 0) { 165 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; 166 *key_size += sizeof(struct nfp_flower_geneve_options); 167 } 168 169 return 0; 170 } 171 172 static int 173 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, 174 struct flow_dissector_key_enc_opts *enc_op, 175 u32 *key_layer_two, u8 *key_layer, int *key_size, 176 struct nfp_flower_priv *priv, 177 enum nfp_flower_tun_type *tun_type, bool ipv6, 178 struct netlink_ext_ack *extack) 179 { 180 int err; 181 182 switch (enc_ports->dst) { 183 case htons(IANA_VXLAN_UDP_PORT): 184 *tun_type = NFP_FL_TUNNEL_VXLAN; 185 *key_layer |= NFP_FLOWER_LAYER_VXLAN; 186 187 if (ipv6) { 188 *key_layer |= NFP_FLOWER_LAYER_EXT_META; 189 *key_size += sizeof(struct nfp_flower_ext_meta); 190 *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; 191 *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); 192 } else { 193 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 194 } 195 196 if (enc_op) { 197 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); 198 return -EOPNOTSUPP; 199 } 200 break; 201 case htons(GENEVE_UDP_PORT): 202 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { 203 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload"); 204 return -EOPNOTSUPP; 205 } 206 *tun_type = NFP_FL_TUNNEL_GENEVE; 207 *key_layer |= NFP_FLOWER_LAYER_EXT_META; 208 *key_size += sizeof(struct nfp_flower_ext_meta); 209 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; 210 211 if (ipv6) { 212 *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; 213 *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); 214 } else { 215 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); 216 } 217 218 if (!enc_op) 219 break; 220 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) { 221 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); 222 return -EOPNOTSUPP; 223 } 224 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size, 225 ipv6, extack); 226 if (err) 227 return err; 228 break; 229 default: 230 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown"); 231 return -EOPNOTSUPP; 232 } 233 234 return 0; 235 } 236 237 int 238 nfp_flower_calculate_key_layers(struct nfp_app *app, 239 struct net_device *netdev, 240 struct nfp_fl_key_ls *ret_key_ls, 241 struct flow_rule *rule, 242 enum nfp_flower_tun_type *tun_type, 243 struct netlink_ext_ack *extack) 244 { 245 struct flow_dissector *dissector = rule->match.dissector; 246 struct flow_match_basic basic = { NULL, NULL}; 247 struct nfp_flower_priv *priv = app->priv; 248 u32 key_layer_two; 249 u8 key_layer; 250 int key_size; 251 int err; 252 253 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { 254 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported"); 255 return -EOPNOTSUPP; 256 } 257 258 /* If any tun dissector is used then the required set must be used. */ 259 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && 260 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R) 261 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R && 262 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) 263 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { 264 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); 265 return -EOPNOTSUPP; 266 } 267 268 key_layer_two = 0; 269 key_layer = NFP_FLOWER_LAYER_PORT; 270 key_size = sizeof(struct nfp_flower_meta_tci) + 271 sizeof(struct nfp_flower_in_port); 272 273 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) || 274 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 275 key_layer |= NFP_FLOWER_LAYER_MAC; 276 key_size += sizeof(struct nfp_flower_mac_mpls); 277 } 278 279 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 280 struct flow_match_vlan vlan; 281 282 flow_rule_match_vlan(rule, &vlan); 283 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && 284 vlan.key->vlan_priority) { 285 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); 286 return -EOPNOTSUPP; 287 } 288 if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && 289 !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { 290 key_layer |= NFP_FLOWER_LAYER_EXT_META; 291 key_size += sizeof(struct nfp_flower_ext_meta); 292 key_size += sizeof(struct nfp_flower_vlan); 293 key_layer_two |= NFP_FLOWER_LAYER2_QINQ; 294 } 295 } 296 297 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 298 struct flow_match_vlan cvlan; 299 300 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { 301 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload"); 302 return -EOPNOTSUPP; 303 } 304 305 flow_rule_match_vlan(rule, &cvlan); 306 if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { 307 key_layer |= NFP_FLOWER_LAYER_EXT_META; 308 key_size += sizeof(struct nfp_flower_ext_meta); 309 key_size += sizeof(struct nfp_flower_vlan); 310 key_layer_two |= NFP_FLOWER_LAYER2_QINQ; 311 } 312 } 313 314 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 315 struct flow_match_enc_opts enc_op = { NULL, NULL }; 316 struct flow_match_ipv4_addrs ipv4_addrs; 317 struct flow_match_ipv6_addrs ipv6_addrs; 318 struct flow_match_control enc_ctl; 319 struct flow_match_ports enc_ports; 320 bool ipv6_tun = false; 321 322 flow_rule_match_enc_control(rule, &enc_ctl); 323 324 if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags, 325 extack)) 326 return -EOPNOTSUPP; 327 328 if (enc_ctl.mask->addr_type != 0xffff) { 329 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); 330 return -EOPNOTSUPP; 331 } 332 333 ipv6_tun = enc_ctl.key->addr_type == 334 FLOW_DISSECTOR_KEY_IPV6_ADDRS; 335 if (ipv6_tun && 336 !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) { 337 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels"); 338 return -EOPNOTSUPP; 339 } 340 341 if (!ipv6_tun && 342 enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 343 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6"); 344 return -EOPNOTSUPP; 345 } 346 347 if (ipv6_tun) { 348 flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs); 349 if (memchr_inv(&ipv6_addrs.mask->dst, 0xff, 350 sizeof(ipv6_addrs.mask->dst))) { 351 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported"); 352 return -EOPNOTSUPP; 353 } 354 } else { 355 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); 356 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { 357 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); 358 return -EOPNOTSUPP; 359 } 360 } 361 362 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) 363 flow_rule_match_enc_opts(rule, &enc_op); 364 365 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 366 /* Check if GRE, which has no enc_ports */ 367 if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) { 368 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); 369 return -EOPNOTSUPP; 370 } 371 372 *tun_type = NFP_FL_TUNNEL_GRE; 373 key_layer |= NFP_FLOWER_LAYER_EXT_META; 374 key_size += sizeof(struct nfp_flower_ext_meta); 375 key_layer_two |= NFP_FLOWER_LAYER2_GRE; 376 377 if (ipv6_tun) { 378 key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; 379 key_size += 380 sizeof(struct nfp_flower_ipv6_gre_tun); 381 } else { 382 key_size += 383 sizeof(struct nfp_flower_ipv4_gre_tun); 384 } 385 386 if (enc_op.key) { 387 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); 388 return -EOPNOTSUPP; 389 } 390 } else { 391 flow_rule_match_enc_ports(rule, &enc_ports); 392 if (enc_ports.mask->dst != cpu_to_be16(~0)) { 393 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported"); 394 return -EOPNOTSUPP; 395 } 396 397 err = nfp_flower_calc_udp_tun_layer(enc_ports.key, 398 enc_op.key, 399 &key_layer_two, 400 &key_layer, 401 &key_size, priv, 402 tun_type, ipv6_tun, 403 extack); 404 if (err) 405 return err; 406 407 /* Ensure the ingress netdev matches the expected 408 * tun type. 409 */ 410 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) { 411 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type"); 412 return -EOPNOTSUPP; 413 } 414 } 415 } 416 417 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) 418 flow_rule_match_basic(rule, &basic); 419 420 if (basic.mask && basic.mask->n_proto) { 421 /* Ethernet type is present in the key. */ 422 switch (basic.key->n_proto) { 423 case cpu_to_be16(ETH_P_IP): 424 key_layer |= NFP_FLOWER_LAYER_IPV4; 425 key_size += sizeof(struct nfp_flower_ipv4); 426 break; 427 428 case cpu_to_be16(ETH_P_IPV6): 429 key_layer |= NFP_FLOWER_LAYER_IPV6; 430 key_size += sizeof(struct nfp_flower_ipv6); 431 break; 432 433 /* Currently we do not offload ARP 434 * because we rely on it to get to the host. 435 */ 436 case cpu_to_be16(ETH_P_ARP): 437 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported"); 438 return -EOPNOTSUPP; 439 440 case cpu_to_be16(ETH_P_MPLS_UC): 441 case cpu_to_be16(ETH_P_MPLS_MC): 442 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { 443 key_layer |= NFP_FLOWER_LAYER_MAC; 444 key_size += sizeof(struct nfp_flower_mac_mpls); 445 } 446 break; 447 448 /* Will be included in layer 2. */ 449 case cpu_to_be16(ETH_P_8021Q): 450 break; 451 452 default: 453 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported"); 454 return -EOPNOTSUPP; 455 } 456 } else if (nfp_flower_check_higher_than_mac(rule)) { 457 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType"); 458 return -EOPNOTSUPP; 459 } 460 461 if (basic.mask && basic.mask->ip_proto) { 462 switch (basic.key->ip_proto) { 463 case IPPROTO_TCP: 464 case IPPROTO_UDP: 465 case IPPROTO_SCTP: 466 case IPPROTO_ICMP: 467 case IPPROTO_ICMPV6: 468 key_layer |= NFP_FLOWER_LAYER_TP; 469 key_size += sizeof(struct nfp_flower_tp_ports); 470 break; 471 } 472 } 473 474 if (!(key_layer & NFP_FLOWER_LAYER_TP) && 475 nfp_flower_check_higher_than_l3(rule)) { 476 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type"); 477 return -EOPNOTSUPP; 478 } 479 480 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 481 struct flow_match_tcp tcp; 482 u32 tcp_flags; 483 484 flow_rule_match_tcp(rule, &tcp); 485 tcp_flags = be16_to_cpu(tcp.key->flags); 486 487 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) { 488 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags"); 489 return -EOPNOTSUPP; 490 } 491 492 /* We only support PSH and URG flags when either 493 * FIN, SYN or RST is present as well. 494 */ 495 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && 496 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) { 497 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST"); 498 return -EOPNOTSUPP; 499 } 500 501 /* We need to store TCP flags in the either the IPv4 or IPv6 key 502 * space, thus we need to ensure we include a IPv4/IPv6 key 503 * layer if we have not done so already. 504 */ 505 if (!basic.key) { 506 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol"); 507 return -EOPNOTSUPP; 508 } 509 510 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && 511 !(key_layer & NFP_FLOWER_LAYER_IPV6)) { 512 switch (basic.key->n_proto) { 513 case cpu_to_be16(ETH_P_IP): 514 key_layer |= NFP_FLOWER_LAYER_IPV4; 515 key_size += sizeof(struct nfp_flower_ipv4); 516 break; 517 518 case cpu_to_be16(ETH_P_IPV6): 519 key_layer |= NFP_FLOWER_LAYER_IPV6; 520 key_size += sizeof(struct nfp_flower_ipv6); 521 break; 522 523 default: 524 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6"); 525 return -EOPNOTSUPP; 526 } 527 } 528 } 529 530 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 531 struct flow_match_control ctl; 532 533 flow_rule_match_control(rule, &ctl); 534 535 if (!flow_rule_is_supp_control_flags(NFP_FLOWER_SUPPORTED_CTLFLAGS, 536 ctl.mask->flags, extack)) 537 return -EOPNOTSUPP; 538 } 539 540 ret_key_ls->key_layer = key_layer; 541 ret_key_ls->key_layer_two = key_layer_two; 542 ret_key_ls->key_size = key_size; 543 544 return 0; 545 } 546 547 struct nfp_fl_payload * 548 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) 549 { 550 struct nfp_fl_payload *flow_pay; 551 552 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); 553 if (!flow_pay) 554 return NULL; 555 556 flow_pay->meta.key_len = key_layer->key_size; 557 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); 558 if (!flow_pay->unmasked_data) 559 goto err_free_flow; 560 561 flow_pay->meta.mask_len = key_layer->key_size; 562 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); 563 if (!flow_pay->mask_data) 564 goto err_free_unmasked; 565 566 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); 567 if (!flow_pay->action_data) 568 goto err_free_mask; 569 570 flow_pay->nfp_tun_ipv4_addr = 0; 571 flow_pay->nfp_tun_ipv6 = NULL; 572 flow_pay->meta.flags = 0; 573 INIT_LIST_HEAD(&flow_pay->linked_flows); 574 flow_pay->in_hw = false; 575 flow_pay->pre_tun_rule.dev = NULL; 576 577 return flow_pay; 578 579 err_free_mask: 580 kfree(flow_pay->mask_data); 581 err_free_unmasked: 582 kfree(flow_pay->unmasked_data); 583 err_free_flow: 584 kfree(flow_pay); 585 return NULL; 586 } 587 588 static int 589 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, 590 struct nfp_flower_merge_check *merge, 591 u8 *last_act_id, int *act_out) 592 { 593 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl; 594 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos; 595 struct nfp_fl_set_ip4_addrs *ipv4_add; 596 struct nfp_fl_set_ipv6_addr *ipv6_add; 597 struct nfp_fl_push_vlan *push_vlan; 598 struct nfp_fl_pre_tunnel *pre_tun; 599 struct nfp_fl_set_tport *tport; 600 struct nfp_fl_set_eth *eth; 601 struct nfp_fl_act_head *a; 602 unsigned int act_off = 0; 603 bool ipv6_tun = false; 604 u8 act_id = 0; 605 u8 *ports; 606 int i; 607 608 while (act_off < flow->meta.act_len) { 609 a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; 610 act_id = a->jump_id; 611 612 switch (act_id) { 613 case NFP_FL_ACTION_OPCODE_OUTPUT: 614 if (act_out) 615 (*act_out)++; 616 break; 617 case NFP_FL_ACTION_OPCODE_PUSH_VLAN: 618 push_vlan = (struct nfp_fl_push_vlan *)a; 619 if (push_vlan->vlan_tci) 620 merge->tci = cpu_to_be16(0xffff); 621 break; 622 case NFP_FL_ACTION_OPCODE_POP_VLAN: 623 merge->tci = cpu_to_be16(0); 624 break; 625 case NFP_FL_ACTION_OPCODE_SET_TUNNEL: 626 /* New tunnel header means l2 to l4 can be matched. */ 627 eth_broadcast_addr(&merge->l2.mac_dst[0]); 628 eth_broadcast_addr(&merge->l2.mac_src[0]); 629 memset(&merge->l4, 0xff, 630 sizeof(struct nfp_flower_tp_ports)); 631 if (ipv6_tun) 632 memset(&merge->ipv6, 0xff, 633 sizeof(struct nfp_flower_ipv6)); 634 else 635 memset(&merge->ipv4, 0xff, 636 sizeof(struct nfp_flower_ipv4)); 637 break; 638 case NFP_FL_ACTION_OPCODE_SET_ETHERNET: 639 eth = (struct nfp_fl_set_eth *)a; 640 for (i = 0; i < ETH_ALEN; i++) 641 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; 642 for (i = 0; i < ETH_ALEN; i++) 643 merge->l2.mac_src[i] |= 644 eth->eth_addr_mask[ETH_ALEN + i]; 645 break; 646 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS: 647 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a; 648 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; 649 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; 650 break; 651 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS: 652 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a; 653 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; 654 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; 655 break; 656 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC: 657 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; 658 for (i = 0; i < 4; i++) 659 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= 660 ipv6_add->ipv6[i].mask; 661 break; 662 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST: 663 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; 664 for (i = 0; i < 4; i++) 665 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= 666 ipv6_add->ipv6[i].mask; 667 break; 668 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL: 669 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a; 670 merge->ipv6.ip_ext.ttl |= 671 ipv6_tc_hl_fl->ipv6_hop_limit_mask; 672 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; 673 merge->ipv6.ipv6_flow_label_exthdr |= 674 ipv6_tc_hl_fl->ipv6_label_mask; 675 break; 676 case NFP_FL_ACTION_OPCODE_SET_UDP: 677 case NFP_FL_ACTION_OPCODE_SET_TCP: 678 tport = (struct nfp_fl_set_tport *)a; 679 ports = (u8 *)&merge->l4.port_src; 680 for (i = 0; i < 4; i++) 681 ports[i] |= tport->tp_port_mask[i]; 682 break; 683 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: 684 pre_tun = (struct nfp_fl_pre_tunnel *)a; 685 ipv6_tun = be16_to_cpu(pre_tun->flags) & 686 NFP_FL_PRE_TUN_IPV6; 687 break; 688 case NFP_FL_ACTION_OPCODE_PRE_LAG: 689 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: 690 break; 691 default: 692 return -EOPNOTSUPP; 693 } 694 695 act_off += a->len_lw << NFP_FL_LW_SIZ; 696 } 697 698 if (last_act_id) 699 *last_act_id = act_id; 700 701 return 0; 702 } 703 704 static int 705 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, 706 struct nfp_flower_merge_check *merge, 707 bool extra_fields) 708 { 709 struct nfp_flower_meta_tci *meta_tci; 710 u8 *mask = flow->mask_data; 711 u8 key_layer, match_size; 712 713 memset(merge, 0, sizeof(struct nfp_flower_merge_check)); 714 715 meta_tci = (struct nfp_flower_meta_tci *)mask; 716 key_layer = meta_tci->nfp_flow_key_layer; 717 718 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields) 719 return -EOPNOTSUPP; 720 721 merge->tci = meta_tci->tci; 722 mask += sizeof(struct nfp_flower_meta_tci); 723 724 if (key_layer & NFP_FLOWER_LAYER_EXT_META) 725 mask += sizeof(struct nfp_flower_ext_meta); 726 727 mask += sizeof(struct nfp_flower_in_port); 728 729 if (key_layer & NFP_FLOWER_LAYER_MAC) { 730 match_size = sizeof(struct nfp_flower_mac_mpls); 731 memcpy(&merge->l2, mask, match_size); 732 mask += match_size; 733 } 734 735 if (key_layer & NFP_FLOWER_LAYER_TP) { 736 match_size = sizeof(struct nfp_flower_tp_ports); 737 memcpy(&merge->l4, mask, match_size); 738 mask += match_size; 739 } 740 741 if (key_layer & NFP_FLOWER_LAYER_IPV4) { 742 match_size = sizeof(struct nfp_flower_ipv4); 743 memcpy(&merge->ipv4, mask, match_size); 744 } 745 746 if (key_layer & NFP_FLOWER_LAYER_IPV6) { 747 match_size = sizeof(struct nfp_flower_ipv6); 748 memcpy(&merge->ipv6, mask, match_size); 749 } 750 751 return 0; 752 } 753 754 static int 755 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, 756 struct nfp_fl_payload *sub_flow2) 757 { 758 /* Two flows can be merged if sub_flow2 only matches on bits that are 759 * either matched by sub_flow1 or set by a sub_flow1 action. This 760 * ensures that every packet that hits sub_flow1 and recirculates is 761 * guaranteed to hit sub_flow2. 762 */ 763 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; 764 int err, act_out = 0; 765 u8 last_act_id = 0; 766 767 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge, 768 true); 769 if (err) 770 return err; 771 772 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge, 773 false); 774 if (err) 775 return err; 776 777 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge, 778 &last_act_id, &act_out); 779 if (err) 780 return err; 781 782 /* Must only be 1 output action and it must be the last in sequence. */ 783 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) 784 return -EOPNOTSUPP; 785 786 /* Reject merge if sub_flow2 matches on something that is not matched 787 * on or set in an action by sub_flow1. 788 */ 789 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals, 790 sub_flow1_merge.vals, 791 sizeof(struct nfp_flower_merge_check) * 8); 792 if (err) 793 return -EINVAL; 794 795 return 0; 796 } 797 798 static unsigned int 799 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, 800 bool *tunnel_act) 801 { 802 unsigned int act_off = 0, act_len; 803 struct nfp_fl_act_head *a; 804 u8 act_id = 0; 805 806 while (act_off < len) { 807 a = (struct nfp_fl_act_head *)&act_src[act_off]; 808 act_len = a->len_lw << NFP_FL_LW_SIZ; 809 act_id = a->jump_id; 810 811 switch (act_id) { 812 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: 813 if (tunnel_act) 814 *tunnel_act = true; 815 fallthrough; 816 case NFP_FL_ACTION_OPCODE_PRE_LAG: 817 memcpy(act_dst + act_off, act_src + act_off, act_len); 818 break; 819 default: 820 return act_off; 821 } 822 823 act_off += act_len; 824 } 825 826 return act_off; 827 } 828 829 static int 830 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) 831 { 832 struct nfp_fl_act_head *a; 833 unsigned int act_off = 0; 834 835 while (act_off < len) { 836 a = (struct nfp_fl_act_head *)&acts[act_off]; 837 838 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) 839 *vlan = (struct nfp_fl_push_vlan *)a; 840 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) 841 return -EOPNOTSUPP; 842 843 act_off += a->len_lw << NFP_FL_LW_SIZ; 844 } 845 846 /* Ensure any VLAN push also has an egress action. */ 847 if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) 848 return -EOPNOTSUPP; 849 850 return 0; 851 } 852 853 static int 854 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) 855 { 856 struct nfp_fl_set_tun *tun; 857 struct nfp_fl_act_head *a; 858 unsigned int act_off = 0; 859 860 while (act_off < len) { 861 a = (struct nfp_fl_act_head *)&acts[act_off]; 862 863 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) { 864 tun = (struct nfp_fl_set_tun *)a; 865 tun->outer_vlan_tpid = vlan->vlan_tpid; 866 tun->outer_vlan_tci = vlan->vlan_tci; 867 868 return 0; 869 } 870 871 act_off += a->len_lw << NFP_FL_LW_SIZ; 872 } 873 874 /* Return error if no tunnel action is found. */ 875 return -EOPNOTSUPP; 876 } 877 878 static int 879 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, 880 struct nfp_fl_payload *sub_flow2, 881 struct nfp_fl_payload *merge_flow) 882 { 883 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; 884 struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; 885 bool tunnel_act = false; 886 char *merge_act; 887 int err; 888 889 /* The last action of sub_flow1 must be output - do not merge this. */ 890 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); 891 sub2_act_len = sub_flow2->meta.act_len; 892 893 if (!sub2_act_len) 894 return -EINVAL; 895 896 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) 897 return -EINVAL; 898 899 /* A shortcut can only be applied if there is a single action. */ 900 if (sub1_act_len) 901 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); 902 else 903 merge_flow->meta.shortcut = sub_flow2->meta.shortcut; 904 905 merge_flow->meta.act_len = sub1_act_len + sub2_act_len; 906 merge_act = merge_flow->action_data; 907 908 /* Copy any pre-actions to the start of merge flow action list. */ 909 pre_off1 = nfp_flower_copy_pre_actions(merge_act, 910 sub_flow1->action_data, 911 sub1_act_len, &tunnel_act); 912 merge_act += pre_off1; 913 sub1_act_len -= pre_off1; 914 pre_off2 = nfp_flower_copy_pre_actions(merge_act, 915 sub_flow2->action_data, 916 sub2_act_len, NULL); 917 merge_act += pre_off2; 918 sub2_act_len -= pre_off2; 919 920 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes 921 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a 922 * valid merge. 923 */ 924 if (tunnel_act) { 925 char *post_tun_acts = &sub_flow2->action_data[pre_off2]; 926 927 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, 928 &post_tun_push_vlan); 929 if (err) 930 return err; 931 932 if (post_tun_push_vlan) { 933 pre_off2 += sizeof(*post_tun_push_vlan); 934 sub2_act_len -= sizeof(*post_tun_push_vlan); 935 } 936 } 937 938 /* Copy remaining actions from sub_flows 1 and 2. */ 939 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); 940 941 if (post_tun_push_vlan) { 942 /* Update tunnel action in merge to include VLAN push. */ 943 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, 944 post_tun_push_vlan); 945 if (err) 946 return err; 947 948 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); 949 } 950 951 merge_act += sub1_act_len; 952 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); 953 954 return 0; 955 } 956 957 /* Flow link code should only be accessed under RTNL. */ 958 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link) 959 { 960 list_del(&link->merge_flow.list); 961 list_del(&link->sub_flow.list); 962 kfree(link); 963 } 964 965 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow, 966 struct nfp_fl_payload *sub_flow) 967 { 968 struct nfp_fl_payload_link *link; 969 970 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) 971 if (link->sub_flow.flow == sub_flow) { 972 nfp_flower_unlink_flow(link); 973 return; 974 } 975 } 976 977 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow, 978 struct nfp_fl_payload *sub_flow) 979 { 980 struct nfp_fl_payload_link *link; 981 982 link = kmalloc(sizeof(*link), GFP_KERNEL); 983 if (!link) 984 return -ENOMEM; 985 986 link->merge_flow.flow = merge_flow; 987 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows); 988 link->sub_flow.flow = sub_flow; 989 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows); 990 991 return 0; 992 } 993 994 /** 995 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. 996 * @app: Pointer to the APP handle 997 * @sub_flow1: Initial flow matched to produce merge hint 998 * @sub_flow2: Post recirculation flow matched in merge hint 999 * 1000 * Combines 2 flows (if valid) to a single flow, removing the initial from hw 1001 * and offloading the new, merged flow. 1002 * 1003 * Return: negative value on error, 0 in success. 1004 */ 1005 int nfp_flower_merge_offloaded_flows(struct nfp_app *app, 1006 struct nfp_fl_payload *sub_flow1, 1007 struct nfp_fl_payload *sub_flow2) 1008 { 1009 struct nfp_flower_priv *priv = app->priv; 1010 struct nfp_fl_payload *merge_flow; 1011 struct nfp_fl_key_ls merge_key_ls; 1012 struct nfp_merge_info *merge_info; 1013 u64 parent_ctx = 0; 1014 int err; 1015 1016 if (sub_flow1 == sub_flow2 || 1017 nfp_flower_is_merge_flow(sub_flow1) || 1018 nfp_flower_is_merge_flow(sub_flow2)) 1019 return -EINVAL; 1020 1021 /* Check if the two flows are already merged */ 1022 parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; 1023 parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); 1024 if (rhashtable_lookup_fast(&priv->merge_table, 1025 &parent_ctx, merge_table_params)) { 1026 nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); 1027 return 0; 1028 } 1029 1030 err = nfp_flower_can_merge(sub_flow1, sub_flow2); 1031 if (err) 1032 return err; 1033 1034 merge_key_ls.key_size = sub_flow1->meta.key_len; 1035 1036 merge_flow = nfp_flower_allocate_new(&merge_key_ls); 1037 if (!merge_flow) 1038 return -ENOMEM; 1039 1040 merge_flow->tc_flower_cookie = (unsigned long)merge_flow; 1041 merge_flow->ingress_dev = sub_flow1->ingress_dev; 1042 1043 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, 1044 sub_flow1->meta.key_len); 1045 memcpy(merge_flow->mask_data, sub_flow1->mask_data, 1046 sub_flow1->meta.mask_len); 1047 1048 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow); 1049 if (err) 1050 goto err_destroy_merge_flow; 1051 1052 err = nfp_flower_link_flows(merge_flow, sub_flow1); 1053 if (err) 1054 goto err_destroy_merge_flow; 1055 1056 err = nfp_flower_link_flows(merge_flow, sub_flow2); 1057 if (err) 1058 goto err_unlink_sub_flow1; 1059 1060 err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow, 1061 merge_flow->ingress_dev, NULL); 1062 if (err) 1063 goto err_unlink_sub_flow2; 1064 1065 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node, 1066 nfp_flower_table_params); 1067 if (err) 1068 goto err_release_metadata; 1069 1070 merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL); 1071 if (!merge_info) { 1072 err = -ENOMEM; 1073 goto err_remove_rhash; 1074 } 1075 merge_info->parent_ctx = parent_ctx; 1076 err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, 1077 merge_table_params); 1078 if (err) 1079 goto err_destroy_merge_info; 1080 1081 err = nfp_flower_xmit_flow(app, merge_flow, 1082 NFP_FLOWER_CMSG_TYPE_FLOW_MOD); 1083 if (err) 1084 goto err_remove_merge_info; 1085 1086 merge_flow->in_hw = true; 1087 sub_flow1->in_hw = false; 1088 1089 return 0; 1090 1091 err_remove_merge_info: 1092 WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, 1093 &merge_info->ht_node, 1094 merge_table_params)); 1095 err_destroy_merge_info: 1096 kfree(merge_info); 1097 err_remove_rhash: 1098 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 1099 &merge_flow->fl_node, 1100 nfp_flower_table_params)); 1101 err_release_metadata: 1102 nfp_modify_flow_metadata(app, merge_flow); 1103 err_unlink_sub_flow2: 1104 nfp_flower_unlink_flows(merge_flow, sub_flow2); 1105 err_unlink_sub_flow1: 1106 nfp_flower_unlink_flows(merge_flow, sub_flow1); 1107 err_destroy_merge_flow: 1108 kfree(merge_flow->action_data); 1109 kfree(merge_flow->mask_data); 1110 kfree(merge_flow->unmasked_data); 1111 kfree(merge_flow); 1112 return err; 1113 } 1114 1115 /** 1116 * nfp_flower_validate_pre_tun_rule() 1117 * @app: Pointer to the APP handle 1118 * @flow: Pointer to NFP flow representation of rule 1119 * @key_ls: Pointer to NFP key layers structure 1120 * @extack: Netlink extended ACK report 1121 * 1122 * Verifies the flow as a pre-tunnel rule. 1123 * 1124 * Return: negative value on error, 0 if verified. 1125 */ 1126 static int 1127 nfp_flower_validate_pre_tun_rule(struct nfp_app *app, 1128 struct nfp_fl_payload *flow, 1129 struct nfp_fl_key_ls *key_ls, 1130 struct netlink_ext_ack *extack) 1131 { 1132 struct nfp_flower_priv *priv = app->priv; 1133 struct nfp_flower_meta_tci *meta_tci; 1134 struct nfp_flower_mac_mpls *mac; 1135 u8 *ext = flow->unmasked_data; 1136 struct nfp_fl_act_head *act; 1137 u8 *mask = flow->mask_data; 1138 bool vlan = false; 1139 int act_offset; 1140 u8 key_layer; 1141 1142 meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; 1143 key_layer = key_ls->key_layer; 1144 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { 1145 if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { 1146 u16 vlan_tci = be16_to_cpu(meta_tci->tci); 1147 1148 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; 1149 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); 1150 vlan = true; 1151 } else { 1152 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); 1153 } 1154 } 1155 1156 if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { 1157 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); 1158 return -EOPNOTSUPP; 1159 } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { 1160 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); 1161 return -EOPNOTSUPP; 1162 } 1163 1164 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { 1165 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); 1166 return -EOPNOTSUPP; 1167 } 1168 1169 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && 1170 !(key_layer & NFP_FLOWER_LAYER_IPV6)) { 1171 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); 1172 return -EOPNOTSUPP; 1173 } 1174 1175 if (key_layer & NFP_FLOWER_LAYER_IPV6) 1176 flow->pre_tun_rule.is_ipv6 = true; 1177 else 1178 flow->pre_tun_rule.is_ipv6 = false; 1179 1180 /* Skip fields known to exist. */ 1181 mask += sizeof(struct nfp_flower_meta_tci); 1182 ext += sizeof(struct nfp_flower_meta_tci); 1183 if (key_ls->key_layer_two) { 1184 mask += sizeof(struct nfp_flower_ext_meta); 1185 ext += sizeof(struct nfp_flower_ext_meta); 1186 } 1187 mask += sizeof(struct nfp_flower_in_port); 1188 ext += sizeof(struct nfp_flower_in_port); 1189 1190 /* Ensure destination MAC address is fully matched. */ 1191 mac = (struct nfp_flower_mac_mpls *)mask; 1192 if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { 1193 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); 1194 return -EOPNOTSUPP; 1195 } 1196 1197 /* Ensure source MAC address is fully matched. This is only needed 1198 * for firmware with the DECAP_V2 feature enabled. Don't do this 1199 * for firmware without this feature to keep old behaviour. 1200 */ 1201 if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { 1202 mac = (struct nfp_flower_mac_mpls *)mask; 1203 if (!is_broadcast_ether_addr(&mac->mac_src[0])) { 1204 NL_SET_ERR_MSG_MOD(extack, 1205 "unsupported pre-tunnel rule: source MAC field must not be masked"); 1206 return -EOPNOTSUPP; 1207 } 1208 } 1209 1210 if (mac->mpls_lse) { 1211 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); 1212 return -EOPNOTSUPP; 1213 } 1214 1215 /* Ensure destination MAC address matches pre_tun_dev. */ 1216 mac = (struct nfp_flower_mac_mpls *)ext; 1217 if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { 1218 NL_SET_ERR_MSG_MOD(extack, 1219 "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); 1220 return -EOPNOTSUPP; 1221 } 1222 1223 /* Save mac addresses in pre_tun_rule entry for later use */ 1224 memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN); 1225 memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN); 1226 1227 mask += sizeof(struct nfp_flower_mac_mpls); 1228 ext += sizeof(struct nfp_flower_mac_mpls); 1229 if (key_layer & NFP_FLOWER_LAYER_IPV4 || 1230 key_layer & NFP_FLOWER_LAYER_IPV6) { 1231 /* Flags and proto fields have same offset in IPv4 and IPv6. */ 1232 int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); 1233 int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); 1234 int size; 1235 int i; 1236 1237 size = key_layer & NFP_FLOWER_LAYER_IPV4 ? 1238 sizeof(struct nfp_flower_ipv4) : 1239 sizeof(struct nfp_flower_ipv6); 1240 1241 1242 /* Ensure proto and flags are the only IP layer fields. */ 1243 for (i = 0; i < size; i++) 1244 if (mask[i] && i != ip_flags && i != ip_proto) { 1245 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); 1246 return -EOPNOTSUPP; 1247 } 1248 ext += size; 1249 mask += size; 1250 } 1251 1252 if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { 1253 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { 1254 struct nfp_flower_vlan *vlan_tags; 1255 u16 vlan_tpid; 1256 u16 vlan_tci; 1257 1258 vlan_tags = (struct nfp_flower_vlan *)ext; 1259 1260 vlan_tci = be16_to_cpu(vlan_tags->outer_tci); 1261 vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid); 1262 1263 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; 1264 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); 1265 flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid); 1266 vlan = true; 1267 } else { 1268 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); 1269 flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff); 1270 } 1271 } 1272 1273 /* Action must be a single egress or pop_vlan and egress. */ 1274 act_offset = 0; 1275 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; 1276 if (vlan) { 1277 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { 1278 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); 1279 return -EOPNOTSUPP; 1280 } 1281 1282 act_offset += act->len_lw << NFP_FL_LW_SIZ; 1283 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; 1284 } 1285 1286 if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { 1287 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); 1288 return -EOPNOTSUPP; 1289 } 1290 1291 act_offset += act->len_lw << NFP_FL_LW_SIZ; 1292 1293 /* Ensure there are no more actions after egress. */ 1294 if (act_offset != flow->meta.act_len) { 1295 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); 1296 return -EOPNOTSUPP; 1297 } 1298 1299 return 0; 1300 } 1301 1302 static bool offload_pre_check(struct flow_cls_offload *flow) 1303 { 1304 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); 1305 struct flow_dissector *dissector = rule->match.dissector; 1306 struct flow_match_ct ct; 1307 1308 if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { 1309 flow_rule_match_ct(rule, &ct); 1310 /* Allow special case where CT match is all 0 */ 1311 if (memchr_inv(ct.key, 0, sizeof(*ct.key))) 1312 return false; 1313 } 1314 1315 if (flow->common.chain_index) 1316 return false; 1317 1318 return true; 1319 } 1320 1321 /** 1322 * nfp_flower_add_offload() - Adds a new flow to hardware. 1323 * @app: Pointer to the APP handle 1324 * @netdev: netdev structure. 1325 * @flow: TC flower classifier offload structure. 1326 * 1327 * Adds a new flow to the repeated hash structure and action payload. 1328 * 1329 * Return: negative value on error, 0 if configured successfully. 1330 */ 1331 static int 1332 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, 1333 struct flow_cls_offload *flow) 1334 { 1335 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); 1336 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; 1337 struct nfp_flower_priv *priv = app->priv; 1338 struct netlink_ext_ack *extack = NULL; 1339 struct nfp_fl_payload *flow_pay; 1340 struct nfp_fl_key_ls *key_layer; 1341 struct nfp_port *port = NULL; 1342 int err; 1343 1344 extack = flow->common.extack; 1345 if (nfp_netdev_is_nfp_repr(netdev)) 1346 port = nfp_port_from_netdev(netdev); 1347 1348 if (is_pre_ct_flow(flow)) 1349 return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack, NULL); 1350 1351 if (is_post_ct_flow(flow)) 1352 return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack); 1353 1354 if (!offload_pre_check(flow)) 1355 return -EOPNOTSUPP; 1356 1357 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); 1358 if (!key_layer) 1359 return -ENOMEM; 1360 1361 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule, 1362 &tun_type, extack); 1363 if (err) 1364 goto err_free_key_ls; 1365 1366 flow_pay = nfp_flower_allocate_new(key_layer); 1367 if (!flow_pay) { 1368 err = -ENOMEM; 1369 goto err_free_key_ls; 1370 } 1371 1372 err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev, 1373 flow_pay, tun_type, extack); 1374 if (err) 1375 goto err_destroy_flow; 1376 1377 err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack); 1378 if (err) 1379 goto err_destroy_flow; 1380 1381 if (flow_pay->pre_tun_rule.dev) { 1382 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack); 1383 if (err) 1384 goto err_destroy_flow; 1385 } 1386 1387 err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack); 1388 if (err) 1389 goto err_destroy_flow; 1390 1391 flow_pay->tc_flower_cookie = flow->cookie; 1392 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, 1393 nfp_flower_table_params); 1394 if (err) { 1395 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads"); 1396 goto err_release_metadata; 1397 } 1398 1399 if (flow_pay->pre_tun_rule.dev) { 1400 if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { 1401 struct nfp_predt_entry *predt; 1402 1403 predt = kzalloc(sizeof(*predt), GFP_KERNEL); 1404 if (!predt) { 1405 err = -ENOMEM; 1406 goto err_remove_rhash; 1407 } 1408 predt->flow_pay = flow_pay; 1409 INIT_LIST_HEAD(&predt->nn_list); 1410 spin_lock_bh(&priv->predt_lock); 1411 list_add(&predt->list_head, &priv->predt_list); 1412 flow_pay->pre_tun_rule.predt = predt; 1413 nfp_tun_link_and_update_nn_entries(app, predt); 1414 spin_unlock_bh(&priv->predt_lock); 1415 } else { 1416 err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); 1417 } 1418 } else { 1419 err = nfp_flower_xmit_flow(app, flow_pay, 1420 NFP_FLOWER_CMSG_TYPE_FLOW_ADD); 1421 } 1422 1423 if (err) 1424 goto err_remove_rhash; 1425 1426 if (port) 1427 port->tc_offload_cnt++; 1428 1429 flow_pay->in_hw = true; 1430 1431 /* Deallocate flow payload when flower rule has been destroyed. */ 1432 kfree(key_layer); 1433 1434 return 0; 1435 1436 err_remove_rhash: 1437 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 1438 &flow_pay->fl_node, 1439 nfp_flower_table_params)); 1440 err_release_metadata: 1441 nfp_modify_flow_metadata(app, flow_pay); 1442 err_destroy_flow: 1443 if (flow_pay->nfp_tun_ipv6) 1444 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); 1445 kfree(flow_pay->action_data); 1446 kfree(flow_pay->mask_data); 1447 kfree(flow_pay->unmasked_data); 1448 kfree(flow_pay); 1449 err_free_key_ls: 1450 kfree(key_layer); 1451 return err; 1452 } 1453 1454 static void 1455 nfp_flower_remove_merge_flow(struct nfp_app *app, 1456 struct nfp_fl_payload *del_sub_flow, 1457 struct nfp_fl_payload *merge_flow) 1458 { 1459 struct nfp_flower_priv *priv = app->priv; 1460 struct nfp_fl_payload_link *link, *temp; 1461 struct nfp_merge_info *merge_info; 1462 struct nfp_fl_payload *origin; 1463 u64 parent_ctx = 0; 1464 bool mod = false; 1465 int err; 1466 1467 link = list_first_entry(&merge_flow->linked_flows, 1468 struct nfp_fl_payload_link, merge_flow.list); 1469 origin = link->sub_flow.flow; 1470 1471 /* Re-add rule the merge had overwritten if it has not been deleted. */ 1472 if (origin != del_sub_flow) 1473 mod = true; 1474 1475 err = nfp_modify_flow_metadata(app, merge_flow); 1476 if (err) { 1477 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n"); 1478 goto err_free_links; 1479 } 1480 1481 if (!mod) { 1482 err = nfp_flower_xmit_flow(app, merge_flow, 1483 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1484 if (err) { 1485 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n"); 1486 goto err_free_links; 1487 } 1488 } else { 1489 __nfp_modify_flow_metadata(priv, origin); 1490 err = nfp_flower_xmit_flow(app, origin, 1491 NFP_FLOWER_CMSG_TYPE_FLOW_MOD); 1492 if (err) 1493 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n"); 1494 origin->in_hw = true; 1495 } 1496 1497 err_free_links: 1498 /* Clean any links connected with the merged flow. */ 1499 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, 1500 merge_flow.list) { 1501 u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); 1502 1503 parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); 1504 nfp_flower_unlink_flow(link); 1505 } 1506 1507 merge_info = rhashtable_lookup_fast(&priv->merge_table, 1508 &parent_ctx, 1509 merge_table_params); 1510 if (merge_info) { 1511 WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, 1512 &merge_info->ht_node, 1513 merge_table_params)); 1514 kfree(merge_info); 1515 } 1516 1517 kfree(merge_flow->action_data); 1518 kfree(merge_flow->mask_data); 1519 kfree(merge_flow->unmasked_data); 1520 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 1521 &merge_flow->fl_node, 1522 nfp_flower_table_params)); 1523 kfree_rcu(merge_flow, rcu); 1524 } 1525 1526 void 1527 nfp_flower_del_linked_merge_flows(struct nfp_app *app, 1528 struct nfp_fl_payload *sub_flow) 1529 { 1530 struct nfp_fl_payload_link *link, *temp; 1531 1532 /* Remove any merge flow formed from the deleted sub_flow. */ 1533 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, 1534 sub_flow.list) 1535 nfp_flower_remove_merge_flow(app, sub_flow, 1536 link->merge_flow.flow); 1537 } 1538 1539 /** 1540 * nfp_flower_del_offload() - Removes a flow from hardware. 1541 * @app: Pointer to the APP handle 1542 * @netdev: netdev structure. 1543 * @flow: TC flower classifier offload structure 1544 * 1545 * Removes a flow from the repeated hash structure and clears the 1546 * action payload. Any flows merged from this are also deleted. 1547 * 1548 * Return: negative value on error, 0 if removed successfully. 1549 */ 1550 static int 1551 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, 1552 struct flow_cls_offload *flow) 1553 { 1554 struct nfp_flower_priv *priv = app->priv; 1555 struct nfp_fl_ct_map_entry *ct_map_ent; 1556 struct netlink_ext_ack *extack = NULL; 1557 struct nfp_fl_payload *nfp_flow; 1558 struct nfp_port *port = NULL; 1559 int err; 1560 1561 extack = flow->common.extack; 1562 if (nfp_netdev_is_nfp_repr(netdev)) 1563 port = nfp_port_from_netdev(netdev); 1564 1565 /* Check ct_map_table */ 1566 ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, 1567 nfp_ct_map_params); 1568 if (ct_map_ent) { 1569 err = nfp_fl_ct_del_flow(ct_map_ent); 1570 return err; 1571 } 1572 1573 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); 1574 if (!nfp_flow) { 1575 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist"); 1576 return -ENOENT; 1577 } 1578 1579 err = nfp_modify_flow_metadata(app, nfp_flow); 1580 if (err) 1581 goto err_free_merge_flow; 1582 1583 if (nfp_flow->nfp_tun_ipv4_addr) 1584 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); 1585 1586 if (nfp_flow->nfp_tun_ipv6) 1587 nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6); 1588 1589 if (!nfp_flow->in_hw) { 1590 err = 0; 1591 goto err_free_merge_flow; 1592 } 1593 1594 if (nfp_flow->pre_tun_rule.dev) { 1595 if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { 1596 struct nfp_predt_entry *predt; 1597 1598 predt = nfp_flow->pre_tun_rule.predt; 1599 if (predt) { 1600 spin_lock_bh(&priv->predt_lock); 1601 nfp_tun_unlink_and_update_nn_entries(app, predt); 1602 list_del(&predt->list_head); 1603 spin_unlock_bh(&priv->predt_lock); 1604 kfree(predt); 1605 } 1606 } else { 1607 err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); 1608 } 1609 } else { 1610 err = nfp_flower_xmit_flow(app, nfp_flow, 1611 NFP_FLOWER_CMSG_TYPE_FLOW_DEL); 1612 } 1613 /* Fall through on error. */ 1614 1615 err_free_merge_flow: 1616 nfp_flower_del_linked_merge_flows(app, nfp_flow); 1617 if (port) 1618 port->tc_offload_cnt--; 1619 kfree(nfp_flow->action_data); 1620 kfree(nfp_flow->mask_data); 1621 kfree(nfp_flow->unmasked_data); 1622 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, 1623 &nfp_flow->fl_node, 1624 nfp_flower_table_params)); 1625 kfree_rcu(nfp_flow, rcu); 1626 return err; 1627 } 1628 1629 static void 1630 __nfp_flower_update_merge_stats(struct nfp_app *app, 1631 struct nfp_fl_payload *merge_flow) 1632 { 1633 struct nfp_flower_priv *priv = app->priv; 1634 struct nfp_fl_payload_link *link; 1635 struct nfp_fl_payload *sub_flow; 1636 u64 pkts, bytes, used; 1637 u32 ctx_id; 1638 1639 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); 1640 pkts = priv->stats[ctx_id].pkts; 1641 /* Do not cycle subflows if no stats to distribute. */ 1642 if (!pkts) 1643 return; 1644 bytes = priv->stats[ctx_id].bytes; 1645 used = priv->stats[ctx_id].used; 1646 1647 /* Reset stats for the merge flow. */ 1648 priv->stats[ctx_id].pkts = 0; 1649 priv->stats[ctx_id].bytes = 0; 1650 1651 /* The merge flow has received stats updates from firmware. 1652 * Distribute these stats to all subflows that form the merge. 1653 * The stats will collected from TC via the subflows. 1654 */ 1655 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { 1656 sub_flow = link->sub_flow.flow; 1657 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); 1658 priv->stats[ctx_id].pkts += pkts; 1659 priv->stats[ctx_id].bytes += bytes; 1660 priv->stats[ctx_id].used = max_t(u64, used, 1661 priv->stats[ctx_id].used); 1662 } 1663 } 1664 1665 void 1666 nfp_flower_update_merge_stats(struct nfp_app *app, 1667 struct nfp_fl_payload *sub_flow) 1668 { 1669 struct nfp_fl_payload_link *link; 1670 1671 /* Get merge flows that the subflow forms to distribute their stats. */ 1672 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) 1673 __nfp_flower_update_merge_stats(app, link->merge_flow.flow); 1674 } 1675 1676 /** 1677 * nfp_flower_get_stats() - Populates flow stats obtained from hardware. 1678 * @app: Pointer to the APP handle 1679 * @netdev: Netdev structure. 1680 * @flow: TC flower classifier offload structure 1681 * 1682 * Populates a flow statistics structure which which corresponds to a 1683 * specific flow. 1684 * 1685 * Return: negative value on error, 0 if stats populated successfully. 1686 */ 1687 static int 1688 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, 1689 struct flow_cls_offload *flow) 1690 { 1691 struct nfp_flower_priv *priv = app->priv; 1692 struct nfp_fl_ct_map_entry *ct_map_ent; 1693 struct netlink_ext_ack *extack = NULL; 1694 struct nfp_fl_payload *nfp_flow; 1695 u32 ctx_id; 1696 1697 /* Check ct_map table first */ 1698 ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, 1699 nfp_ct_map_params); 1700 if (ct_map_ent) 1701 return nfp_fl_ct_stats(flow, ct_map_ent); 1702 1703 extack = flow->common.extack; 1704 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); 1705 if (!nfp_flow) { 1706 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist"); 1707 return -EINVAL; 1708 } 1709 1710 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); 1711 1712 spin_lock_bh(&priv->stats_lock); 1713 /* If request is for a sub_flow, update stats from merged flows. */ 1714 if (!list_empty(&nfp_flow->linked_flows)) 1715 nfp_flower_update_merge_stats(app, nfp_flow); 1716 1717 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, 1718 priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, 1719 FLOW_ACTION_HW_STATS_DELAYED); 1720 1721 priv->stats[ctx_id].pkts = 0; 1722 priv->stats[ctx_id].bytes = 0; 1723 spin_unlock_bh(&priv->stats_lock); 1724 1725 return 0; 1726 } 1727 1728 static int 1729 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, 1730 struct flow_cls_offload *flower) 1731 { 1732 struct nfp_flower_priv *priv = app->priv; 1733 int ret; 1734 1735 if (!eth_proto_is_802_3(flower->common.protocol)) 1736 return -EOPNOTSUPP; 1737 1738 mutex_lock(&priv->nfp_fl_lock); 1739 switch (flower->command) { 1740 case FLOW_CLS_REPLACE: 1741 ret = nfp_flower_add_offload(app, netdev, flower); 1742 break; 1743 case FLOW_CLS_DESTROY: 1744 ret = nfp_flower_del_offload(app, netdev, flower); 1745 break; 1746 case FLOW_CLS_STATS: 1747 ret = nfp_flower_get_stats(app, netdev, flower); 1748 break; 1749 default: 1750 ret = -EOPNOTSUPP; 1751 break; 1752 } 1753 mutex_unlock(&priv->nfp_fl_lock); 1754 1755 return ret; 1756 } 1757 1758 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, 1759 void *type_data, void *cb_priv) 1760 { 1761 struct flow_cls_common_offload *common = type_data; 1762 struct nfp_repr *repr = cb_priv; 1763 1764 if (!tc_can_offload_extack(repr->netdev, common->extack)) 1765 return -EOPNOTSUPP; 1766 1767 switch (type) { 1768 case TC_SETUP_CLSFLOWER: 1769 return nfp_flower_repr_offload(repr->app, repr->netdev, 1770 type_data); 1771 case TC_SETUP_CLSMATCHALL: 1772 return nfp_flower_setup_qos_offload(repr->app, repr->netdev, 1773 type_data); 1774 default: 1775 return -EOPNOTSUPP; 1776 } 1777 } 1778 1779 static LIST_HEAD(nfp_block_cb_list); 1780 1781 static int nfp_flower_setup_tc_block(struct net_device *netdev, 1782 struct flow_block_offload *f) 1783 { 1784 struct nfp_repr *repr = netdev_priv(netdev); 1785 struct nfp_flower_repr_priv *repr_priv; 1786 struct flow_block_cb *block_cb; 1787 1788 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1789 return -EOPNOTSUPP; 1790 1791 repr_priv = repr->app_priv; 1792 repr_priv->block_shared = f->block_shared; 1793 f->driver_block_list = &nfp_block_cb_list; 1794 f->unlocked_driver_cb = true; 1795 1796 switch (f->command) { 1797 case FLOW_BLOCK_BIND: 1798 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr, 1799 &nfp_block_cb_list)) 1800 return -EBUSY; 1801 1802 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, 1803 repr, repr, NULL); 1804 if (IS_ERR(block_cb)) 1805 return PTR_ERR(block_cb); 1806 1807 flow_block_cb_add(block_cb, f); 1808 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); 1809 return 0; 1810 case FLOW_BLOCK_UNBIND: 1811 block_cb = flow_block_cb_lookup(f->block, 1812 nfp_flower_setup_tc_block_cb, 1813 repr); 1814 if (!block_cb) 1815 return -ENOENT; 1816 1817 flow_block_cb_remove(block_cb, f); 1818 list_del(&block_cb->driver_list); 1819 return 0; 1820 default: 1821 return -EOPNOTSUPP; 1822 } 1823 } 1824 1825 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, 1826 enum tc_setup_type type, void *type_data) 1827 { 1828 switch (type) { 1829 case TC_SETUP_BLOCK: 1830 return nfp_flower_setup_tc_block(netdev, type_data); 1831 default: 1832 return -EOPNOTSUPP; 1833 } 1834 } 1835 1836 struct nfp_flower_indr_block_cb_priv { 1837 struct net_device *netdev; 1838 struct nfp_app *app; 1839 struct list_head list; 1840 }; 1841 1842 static struct nfp_flower_indr_block_cb_priv * 1843 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, 1844 struct net_device *netdev) 1845 { 1846 struct nfp_flower_indr_block_cb_priv *cb_priv; 1847 struct nfp_flower_priv *priv = app->priv; 1848 1849 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) 1850 if (cb_priv->netdev == netdev) 1851 return cb_priv; 1852 1853 return NULL; 1854 } 1855 1856 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, 1857 void *type_data, void *cb_priv) 1858 { 1859 struct nfp_flower_indr_block_cb_priv *priv = cb_priv; 1860 1861 switch (type) { 1862 case TC_SETUP_CLSFLOWER: 1863 return nfp_flower_repr_offload(priv->app, priv->netdev, 1864 type_data); 1865 default: 1866 return -EOPNOTSUPP; 1867 } 1868 } 1869 1870 void nfp_flower_setup_indr_tc_release(void *cb_priv) 1871 { 1872 struct nfp_flower_indr_block_cb_priv *priv = cb_priv; 1873 1874 list_del(&priv->list); 1875 kfree(priv); 1876 } 1877 1878 static int 1879 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, 1880 struct flow_block_offload *f, void *data, 1881 void (*cleanup)(struct flow_block_cb *block_cb)) 1882 { 1883 struct nfp_flower_indr_block_cb_priv *cb_priv; 1884 struct nfp_flower_priv *priv = app->priv; 1885 struct flow_block_cb *block_cb; 1886 1887 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1888 !nfp_flower_internal_port_can_offload(app, netdev)) || 1889 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1890 nfp_flower_internal_port_can_offload(app, netdev))) 1891 return -EOPNOTSUPP; 1892 1893 f->unlocked_driver_cb = true; 1894 1895 switch (f->command) { 1896 case FLOW_BLOCK_BIND: 1897 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); 1898 if (cb_priv && 1899 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb, 1900 cb_priv, 1901 &nfp_block_cb_list)) 1902 return -EBUSY; 1903 1904 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1905 if (!cb_priv) 1906 return -ENOMEM; 1907 1908 cb_priv->netdev = netdev; 1909 cb_priv->app = app; 1910 list_add(&cb_priv->list, &priv->indr_block_cb_priv); 1911 1912 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, 1913 cb_priv, cb_priv, 1914 nfp_flower_setup_indr_tc_release, 1915 f, netdev, sch, data, app, cleanup); 1916 if (IS_ERR(block_cb)) { 1917 list_del(&cb_priv->list); 1918 kfree(cb_priv); 1919 return PTR_ERR(block_cb); 1920 } 1921 1922 flow_block_cb_add(block_cb, f); 1923 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); 1924 return 0; 1925 case FLOW_BLOCK_UNBIND: 1926 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); 1927 if (!cb_priv) 1928 return -ENOENT; 1929 1930 block_cb = flow_block_cb_lookup(f->block, 1931 nfp_flower_setup_indr_block_cb, 1932 cb_priv); 1933 if (!block_cb) 1934 return -ENOENT; 1935 1936 flow_indr_block_cb_remove(block_cb, f); 1937 list_del(&block_cb->driver_list); 1938 return 0; 1939 default: 1940 return -EOPNOTSUPP; 1941 } 1942 return 0; 1943 } 1944 1945 static int 1946 nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data) 1947 { 1948 if (!data) 1949 return -EOPNOTSUPP; 1950 1951 switch (type) { 1952 case TC_SETUP_ACT: 1953 return nfp_setup_tc_act_offload(app, data); 1954 default: 1955 return -EOPNOTSUPP; 1956 } 1957 } 1958 1959 int 1960 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 1961 enum tc_setup_type type, void *type_data, 1962 void *data, 1963 void (*cleanup)(struct flow_block_cb *block_cb)) 1964 { 1965 if (!netdev) 1966 return nfp_setup_tc_no_dev(cb_priv, type, data); 1967 1968 if (!nfp_fl_is_netdev_to_offload(netdev)) 1969 return -EOPNOTSUPP; 1970 1971 switch (type) { 1972 case TC_SETUP_BLOCK: 1973 return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, 1974 type_data, data, cleanup); 1975 default: 1976 return -EOPNOTSUPP; 1977 } 1978 } 1979