1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <net/pkt_cls.h> 6 #include <net/tc_act/tc_csum.h> 7 #include <net/tc_act/tc_gact.h> 8 #include <net/tc_act/tc_mirred.h> 9 #include <net/tc_act/tc_pedit.h> 10 #include <net/tc_act/tc_vlan.h> 11 #include <net/tc_act/tc_tunnel_key.h> 12 13 #include "cmsg.h" 14 #include "main.h" 15 #include "../nfp_net_repr.h" 16 17 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable 18 * to change. Such changes will break our FW ABI. 19 */ 20 #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) 21 #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) 22 #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) 23 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX 24 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ 25 NFP_FL_TUNNEL_KEY | \ 26 NFP_FL_TUNNEL_GENEVE_OPT) 27 28 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) 29 { 30 size_t act_size = sizeof(struct nfp_fl_pop_vlan); 31 32 pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN; 33 pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; 34 pop_vlan->reserved = 0; 35 } 36 37 static void 38 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, 39 const struct flow_action_entry *act) 40 { 41 size_t act_size = sizeof(struct nfp_fl_push_vlan); 42 u16 tmp_push_vlan_tci; 43 44 push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; 45 push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; 46 push_vlan->reserved = 0; 47 push_vlan->vlan_tpid = act->vlan.proto; 48 49 tmp_push_vlan_tci = 50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | 51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); 52 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); 53 } 54 55 static int 56 nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act, 57 struct nfp_fl_payload *nfp_flow, int act_len, 58 struct netlink_ext_ack *extack) 59 { 60 size_t act_size = sizeof(struct nfp_fl_pre_lag); 61 struct nfp_fl_pre_lag *pre_lag; 62 struct net_device *out_dev; 63 int err; 64 65 out_dev = act->dev; 66 if (!out_dev || !netif_is_lag_master(out_dev)) 67 return 0; 68 69 if (act_len + act_size > NFP_FL_MAX_A_SIZ) { 70 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action"); 71 return -EOPNOTSUPP; 72 } 73 74 /* Pre_lag action must be first on action list. 75 * If other actions already exist they need pushed forward. 76 */ 77 if (act_len) 78 memmove(nfp_flow->action_data + act_size, 79 nfp_flow->action_data, act_len); 80 81 pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data; 82 err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack); 83 if (err) 84 return err; 85 86 pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG; 87 pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ; 88 89 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); 90 91 return act_size; 92 } 93 94 static int 95 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, 96 const struct flow_action_entry *act, 97 struct nfp_fl_payload *nfp_flow, 98 bool last, struct net_device *in_dev, 99 enum nfp_flower_tun_type tun_type, int *tun_out_cnt, 100 struct netlink_ext_ack *extack) 101 { 102 size_t act_size = sizeof(struct nfp_fl_output); 103 struct nfp_flower_priv *priv = app->priv; 104 struct net_device *out_dev; 105 u16 tmp_flags; 106 107 output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; 108 output->head.len_lw = act_size >> NFP_FL_LW_SIZ; 109 110 out_dev = act->dev; 111 if (!out_dev) { 112 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action"); 113 return -EOPNOTSUPP; 114 } 115 116 tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0; 117 118 if (tun_type) { 119 /* Verify the egress netdev matches the tunnel type. */ 120 if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) { 121 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type"); 122 return -EOPNOTSUPP; 123 } 124 125 if (*tun_out_cnt) { 126 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter"); 127 return -EOPNOTSUPP; 128 } 129 (*tun_out_cnt)++; 130 131 output->flags = cpu_to_be16(tmp_flags | 132 NFP_FL_OUT_FLAGS_USE_TUN); 133 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); 134 } else if (netif_is_lag_master(out_dev) && 135 priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 136 int gid; 137 138 output->flags = cpu_to_be16(tmp_flags); 139 gid = nfp_flower_lag_get_output_id(app, out_dev); 140 if (gid < 0) { 141 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action"); 142 return gid; 143 } 144 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); 145 } else { 146 /* Set action output parameters. */ 147 output->flags = cpu_to_be16(tmp_flags); 148 149 if (nfp_netdev_is_nfp_repr(in_dev)) { 150 /* Confirm ingress and egress are on same device. */ 151 if (!netdev_port_same_parent_id(in_dev, out_dev)) { 152 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices"); 153 return -EOPNOTSUPP; 154 } 155 } 156 157 if (!nfp_netdev_is_nfp_repr(out_dev)) { 158 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port"); 159 return -EOPNOTSUPP; 160 } 161 162 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); 163 if (!output->port) { 164 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface"); 165 return -EOPNOTSUPP; 166 } 167 } 168 nfp_flow->meta.shortcut = output->port; 169 170 return 0; 171 } 172 173 static enum nfp_flower_tun_type 174 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app, 175 const struct flow_action_entry *act) 176 { 177 const struct ip_tunnel_info *tun = act->tunnel; 178 struct nfp_flower_priv *priv = app->priv; 179 180 switch (tun->key.tp_dst) { 181 case htons(IANA_VXLAN_UDP_PORT): 182 return NFP_FL_TUNNEL_VXLAN; 183 case htons(GENEVE_UDP_PORT): 184 if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) 185 return NFP_FL_TUNNEL_GENEVE; 186 /* FALLTHROUGH */ 187 default: 188 return NFP_FL_TUNNEL_NONE; 189 } 190 } 191 192 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) 193 { 194 size_t act_size = sizeof(struct nfp_fl_pre_tunnel); 195 struct nfp_fl_pre_tunnel *pre_tun_act; 196 197 /* Pre_tunnel action must be first on action list. 198 * If other actions already exist they need to be pushed forward. 199 */ 200 if (act_len) 201 memmove(act_data + act_size, act_data, act_len); 202 203 pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data; 204 205 memset(pre_tun_act, 0, act_size); 206 207 pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL; 208 pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ; 209 210 return pre_tun_act; 211 } 212 213 static int 214 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, 215 const struct flow_action_entry *act, 216 struct netlink_ext_ack *extack) 217 { 218 struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel; 219 int opt_len, opt_cnt, act_start, tot_push_len; 220 u8 *src = ip_tunnel_info_opts(ip_tun); 221 222 /* We need to populate the options in reverse order for HW. 223 * Therefore we go through the options, calculating the 224 * number of options and the total size, then we populate 225 * them in reverse order in the action list. 226 */ 227 opt_cnt = 0; 228 tot_push_len = 0; 229 opt_len = ip_tun->options_len; 230 while (opt_len > 0) { 231 struct geneve_opt *opt = (struct geneve_opt *)src; 232 233 opt_cnt++; 234 if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) { 235 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded"); 236 return -EOPNOTSUPP; 237 } 238 239 tot_push_len += sizeof(struct nfp_fl_push_geneve) + 240 opt->length * 4; 241 if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) { 242 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options"); 243 return -EOPNOTSUPP; 244 } 245 246 opt_len -= sizeof(struct geneve_opt) + opt->length * 4; 247 src += sizeof(struct geneve_opt) + opt->length * 4; 248 } 249 250 if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) { 251 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options"); 252 return -EOPNOTSUPP; 253 } 254 255 act_start = *list_len; 256 *list_len += tot_push_len; 257 src = ip_tunnel_info_opts(ip_tun); 258 while (opt_cnt) { 259 struct geneve_opt *opt = (struct geneve_opt *)src; 260 struct nfp_fl_push_geneve *push; 261 size_t act_size, len; 262 263 opt_cnt--; 264 act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4; 265 tot_push_len -= act_size; 266 len = act_start + tot_push_len; 267 268 push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len]; 269 push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE; 270 push->head.len_lw = act_size >> NFP_FL_LW_SIZ; 271 push->reserved = 0; 272 push->class = opt->opt_class; 273 push->type = opt->type; 274 push->length = opt->length; 275 memcpy(&push->opt_data, opt->opt_data, opt->length * 4); 276 277 src += sizeof(struct geneve_opt) + opt->length * 4; 278 } 279 280 return 0; 281 } 282 283 static int 284 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app, 285 struct nfp_fl_set_ipv4_udp_tun *set_tun, 286 const struct flow_action_entry *act, 287 struct nfp_fl_pre_tunnel *pre_tun, 288 enum nfp_flower_tun_type tun_type, 289 struct net_device *netdev, 290 struct netlink_ext_ack *extack) 291 { 292 size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); 293 const struct ip_tunnel_info *ip_tun = act->tunnel; 294 struct nfp_flower_priv *priv = app->priv; 295 u32 tmp_set_ip_tun_type_index = 0; 296 /* Currently support one pre-tunnel so index is always 0. */ 297 int pretun_idx = 0; 298 299 BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || 300 NFP_FL_TUNNEL_KEY != TUNNEL_KEY || 301 NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); 302 if (ip_tun->options_len && 303 (tun_type != NFP_FL_TUNNEL_GENEVE || 304 !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) { 305 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload"); 306 return -EOPNOTSUPP; 307 } 308 309 set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; 310 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; 311 312 /* Set tunnel type and pre-tunnel index. */ 313 tmp_set_ip_tun_type_index |= 314 FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) | 315 FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx); 316 317 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); 318 set_tun->tun_id = ip_tun->key.tun_id; 319 320 if (ip_tun->key.ttl) { 321 set_tun->ttl = ip_tun->key.ttl; 322 } else { 323 struct net *net = dev_net(netdev); 324 struct flowi4 flow = {}; 325 struct rtable *rt; 326 int err; 327 328 /* Do a route lookup to determine ttl - if fails then use 329 * default. Note that CONFIG_INET is a requirement of 330 * CONFIG_NET_SWITCHDEV so must be defined here. 331 */ 332 flow.daddr = ip_tun->key.u.ipv4.dst; 333 flow.flowi4_proto = IPPROTO_UDP; 334 rt = ip_route_output_key(net, &flow); 335 err = PTR_ERR_OR_ZERO(rt); 336 if (!err) { 337 set_tun->ttl = ip4_dst_hoplimit(&rt->dst); 338 ip_rt_put(rt); 339 } else { 340 set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; 341 } 342 } 343 344 set_tun->tos = ip_tun->key.tos; 345 346 if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) || 347 ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) { 348 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload"); 349 return -EOPNOTSUPP; 350 } 351 set_tun->tun_flags = ip_tun->key.tun_flags; 352 353 if (tun_type == NFP_FL_TUNNEL_GENEVE) { 354 set_tun->tun_proto = htons(ETH_P_TEB); 355 set_tun->tun_len = ip_tun->options_len / 4; 356 } 357 358 /* Complete pre_tunnel action. */ 359 pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; 360 361 return 0; 362 } 363 364 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask) 365 { 366 u32 oldvalue = get_unaligned((u32 *)p_exact); 367 u32 oldmask = get_unaligned((u32 *)p_mask); 368 369 value &= mask; 370 value |= oldvalue & ~mask; 371 372 put_unaligned(oldmask | mask, (u32 *)p_mask); 373 put_unaligned(value, (u32 *)p_exact); 374 } 375 376 static int 377 nfp_fl_set_eth(const struct flow_action_entry *act, u32 off, 378 struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack) 379 { 380 u32 exact, mask; 381 382 if (off + 4 > ETH_ALEN * 2) { 383 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action"); 384 return -EOPNOTSUPP; 385 } 386 387 mask = ~act->mangle.mask; 388 exact = act->mangle.val; 389 390 if (exact & ~mask) { 391 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action"); 392 return -EOPNOTSUPP; 393 } 394 395 nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off], 396 &set_eth->eth_addr_mask[off]); 397 398 set_eth->reserved = cpu_to_be16(0); 399 set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET; 400 set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ; 401 402 return 0; 403 } 404 405 struct ipv4_ttl_word { 406 __u8 ttl; 407 __u8 protocol; 408 __sum16 check; 409 }; 410 411 static int 412 nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off, 413 struct nfp_fl_set_ip4_addrs *set_ip_addr, 414 struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos, 415 struct netlink_ext_ack *extack) 416 { 417 struct ipv4_ttl_word *ttl_word_mask; 418 struct ipv4_ttl_word *ttl_word; 419 struct iphdr *tos_word_mask; 420 struct iphdr *tos_word; 421 __be32 exact, mask; 422 423 /* We are expecting tcf_pedit to return a big endian value */ 424 mask = (__force __be32)~act->mangle.mask; 425 exact = (__force __be32)act->mangle.val; 426 427 if (exact & ~mask) { 428 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action"); 429 return -EOPNOTSUPP; 430 } 431 432 switch (off) { 433 case offsetof(struct iphdr, daddr): 434 set_ip_addr->ipv4_dst_mask |= mask; 435 set_ip_addr->ipv4_dst &= ~mask; 436 set_ip_addr->ipv4_dst |= exact & mask; 437 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; 438 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> 439 NFP_FL_LW_SIZ; 440 break; 441 case offsetof(struct iphdr, saddr): 442 set_ip_addr->ipv4_src_mask |= mask; 443 set_ip_addr->ipv4_src &= ~mask; 444 set_ip_addr->ipv4_src |= exact & mask; 445 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; 446 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> 447 NFP_FL_LW_SIZ; 448 break; 449 case offsetof(struct iphdr, ttl): 450 ttl_word_mask = (struct ipv4_ttl_word *)&mask; 451 ttl_word = (struct ipv4_ttl_word *)&exact; 452 453 if (ttl_word_mask->protocol || ttl_word_mask->check) { 454 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action"); 455 return -EOPNOTSUPP; 456 } 457 458 set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl; 459 set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl; 460 set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl; 461 set_ip_ttl_tos->head.jump_id = 462 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; 463 set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> 464 NFP_FL_LW_SIZ; 465 break; 466 case round_down(offsetof(struct iphdr, tos), 4): 467 tos_word_mask = (struct iphdr *)&mask; 468 tos_word = (struct iphdr *)&exact; 469 470 if (tos_word_mask->version || tos_word_mask->ihl || 471 tos_word_mask->tot_len) { 472 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action"); 473 return -EOPNOTSUPP; 474 } 475 476 set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos; 477 set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos; 478 set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos; 479 set_ip_ttl_tos->head.jump_id = 480 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; 481 set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> 482 NFP_FL_LW_SIZ; 483 break; 484 default: 485 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header"); 486 return -EOPNOTSUPP; 487 } 488 489 return 0; 490 } 491 492 static void 493 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask, 494 struct nfp_fl_set_ipv6_addr *ip6) 495 { 496 ip6->ipv6[word].mask |= mask; 497 ip6->ipv6[word].exact &= ~mask; 498 ip6->ipv6[word].exact |= exact & mask; 499 500 ip6->reserved = cpu_to_be16(0); 501 ip6->head.jump_id = opcode_tag; 502 ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; 503 } 504 505 struct ipv6_hop_limit_word { 506 __be16 payload_len; 507 u8 nexthdr; 508 u8 hop_limit; 509 }; 510 511 static int 512 nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, 513 struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl, 514 struct netlink_ext_ack *extack) 515 { 516 struct ipv6_hop_limit_word *fl_hl_mask; 517 struct ipv6_hop_limit_word *fl_hl; 518 519 switch (off) { 520 case offsetof(struct ipv6hdr, payload_len): 521 fl_hl_mask = (struct ipv6_hop_limit_word *)&mask; 522 fl_hl = (struct ipv6_hop_limit_word *)&exact; 523 524 if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) { 525 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action"); 526 return -EOPNOTSUPP; 527 } 528 529 ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit; 530 ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit; 531 ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit & 532 fl_hl_mask->hop_limit; 533 break; 534 case round_down(offsetof(struct ipv6hdr, flow_lbl), 4): 535 if (mask & ~IPV6_FLOW_LABEL_MASK || 536 exact & ~IPV6_FLOW_LABEL_MASK) { 537 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action"); 538 return -EOPNOTSUPP; 539 } 540 541 ip_hl_fl->ipv6_label_mask |= mask; 542 ip_hl_fl->ipv6_label &= ~mask; 543 ip_hl_fl->ipv6_label |= exact & mask; 544 break; 545 } 546 547 ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL; 548 ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ; 549 550 return 0; 551 } 552 553 static int 554 nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off, 555 struct nfp_fl_set_ipv6_addr *ip_dst, 556 struct nfp_fl_set_ipv6_addr *ip_src, 557 struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl, 558 struct netlink_ext_ack *extack) 559 { 560 __be32 exact, mask; 561 int err = 0; 562 u8 word; 563 564 /* We are expecting tcf_pedit to return a big endian value */ 565 mask = (__force __be32)~act->mangle.mask; 566 exact = (__force __be32)act->mangle.val; 567 568 if (exact & ~mask) { 569 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action"); 570 return -EOPNOTSUPP; 571 } 572 573 if (off < offsetof(struct ipv6hdr, saddr)) { 574 err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask, 575 ip_hl_fl, extack); 576 } else if (off < offsetof(struct ipv6hdr, daddr)) { 577 word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); 578 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, 579 exact, mask, ip_src); 580 } else if (off < offsetof(struct ipv6hdr, daddr) + 581 sizeof(struct in6_addr)) { 582 word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact); 583 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word, 584 exact, mask, ip_dst); 585 } else { 586 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header"); 587 return -EOPNOTSUPP; 588 } 589 590 return err; 591 } 592 593 static int 594 nfp_fl_set_tport(const struct flow_action_entry *act, u32 off, 595 struct nfp_fl_set_tport *set_tport, int opcode, 596 struct netlink_ext_ack *extack) 597 { 598 u32 exact, mask; 599 600 if (off) { 601 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header"); 602 return -EOPNOTSUPP; 603 } 604 605 mask = ~act->mangle.mask; 606 exact = act->mangle.val; 607 608 if (exact & ~mask) { 609 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action"); 610 return -EOPNOTSUPP; 611 } 612 613 nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val, 614 set_tport->tp_port_mask); 615 616 set_tport->reserved = cpu_to_be16(0); 617 set_tport->head.jump_id = opcode; 618 set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ; 619 620 return 0; 621 } 622 623 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) 624 { 625 switch (ip_proto) { 626 case 0: 627 /* Filter doesn't force proto match, 628 * both TCP and UDP will be updated if encountered 629 */ 630 return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; 631 case IPPROTO_TCP: 632 return TCA_CSUM_UPDATE_FLAG_TCP; 633 case IPPROTO_UDP: 634 return TCA_CSUM_UPDATE_FLAG_UDP; 635 default: 636 /* All other protocols will be ignored by FW */ 637 return 0; 638 } 639 } 640 641 struct nfp_flower_pedit_acts { 642 struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; 643 struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; 644 struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; 645 struct nfp_fl_set_ip4_addrs set_ip_addr; 646 struct nfp_fl_set_tport set_tport; 647 struct nfp_fl_set_eth set_eth; 648 }; 649 650 static int 651 nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action, 652 int *a_len, struct nfp_flower_pedit_acts *set_act, 653 u32 *csum_updated) 654 { 655 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 656 size_t act_size = 0; 657 u8 ip_proto = 0; 658 659 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 660 struct flow_match_basic match; 661 662 flow_rule_match_basic(rule, &match); 663 ip_proto = match.key->ip_proto; 664 } 665 666 if (set_act->set_eth.head.len_lw) { 667 act_size = sizeof(set_act->set_eth); 668 memcpy(nfp_action, &set_act->set_eth, act_size); 669 *a_len += act_size; 670 } 671 672 if (set_act->set_ip_ttl_tos.head.len_lw) { 673 nfp_action += act_size; 674 act_size = sizeof(set_act->set_ip_ttl_tos); 675 memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size); 676 *a_len += act_size; 677 678 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ 679 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | 680 nfp_fl_csum_l4_to_flag(ip_proto); 681 } 682 683 if (set_act->set_ip_addr.head.len_lw) { 684 nfp_action += act_size; 685 act_size = sizeof(set_act->set_ip_addr); 686 memcpy(nfp_action, &set_act->set_ip_addr, act_size); 687 *a_len += act_size; 688 689 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ 690 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | 691 nfp_fl_csum_l4_to_flag(ip_proto); 692 } 693 694 if (set_act->set_ip6_tc_hl_fl.head.len_lw) { 695 nfp_action += act_size; 696 act_size = sizeof(set_act->set_ip6_tc_hl_fl); 697 memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size); 698 *a_len += act_size; 699 700 /* Hardware will automatically fix TCP/UDP checksum. */ 701 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 702 } 703 704 if (set_act->set_ip6_dst.head.len_lw && 705 set_act->set_ip6_src.head.len_lw) { 706 /* TC compiles set src and dst IPv6 address as a single action, 707 * the hardware requires this to be 2 separate actions. 708 */ 709 nfp_action += act_size; 710 act_size = sizeof(set_act->set_ip6_src); 711 memcpy(nfp_action, &set_act->set_ip6_src, act_size); 712 *a_len += act_size; 713 714 act_size = sizeof(set_act->set_ip6_dst); 715 memcpy(&nfp_action[sizeof(set_act->set_ip6_src)], 716 &set_act->set_ip6_dst, act_size); 717 *a_len += act_size; 718 719 /* Hardware will automatically fix TCP/UDP checksum. */ 720 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 721 } else if (set_act->set_ip6_dst.head.len_lw) { 722 nfp_action += act_size; 723 act_size = sizeof(set_act->set_ip6_dst); 724 memcpy(nfp_action, &set_act->set_ip6_dst, act_size); 725 *a_len += act_size; 726 727 /* Hardware will automatically fix TCP/UDP checksum. */ 728 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 729 } else if (set_act->set_ip6_src.head.len_lw) { 730 nfp_action += act_size; 731 act_size = sizeof(set_act->set_ip6_src); 732 memcpy(nfp_action, &set_act->set_ip6_src, act_size); 733 *a_len += act_size; 734 735 /* Hardware will automatically fix TCP/UDP checksum. */ 736 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 737 } 738 if (set_act->set_tport.head.len_lw) { 739 nfp_action += act_size; 740 act_size = sizeof(set_act->set_tport); 741 memcpy(nfp_action, &set_act->set_tport, act_size); 742 *a_len += act_size; 743 744 /* Hardware will automatically fix TCP/UDP checksum. */ 745 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); 746 } 747 748 return 0; 749 } 750 751 static int 752 nfp_fl_pedit(const struct flow_action_entry *act, 753 struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len, 754 u32 *csum_updated, struct nfp_flower_pedit_acts *set_act, 755 struct netlink_ext_ack *extack) 756 { 757 enum flow_action_mangle_base htype; 758 u32 offset; 759 760 htype = act->mangle.htype; 761 offset = act->mangle.offset; 762 763 switch (htype) { 764 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: 765 return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack); 766 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: 767 return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr, 768 &set_act->set_ip_ttl_tos, extack); 769 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: 770 return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst, 771 &set_act->set_ip6_src, 772 &set_act->set_ip6_tc_hl_fl, extack); 773 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: 774 return nfp_fl_set_tport(act, offset, &set_act->set_tport, 775 NFP_FL_ACTION_OPCODE_SET_TCP, extack); 776 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: 777 return nfp_fl_set_tport(act, offset, &set_act->set_tport, 778 NFP_FL_ACTION_OPCODE_SET_UDP, extack); 779 default: 780 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header"); 781 return -EOPNOTSUPP; 782 } 783 } 784 785 static int 786 nfp_flower_output_action(struct nfp_app *app, 787 const struct flow_action_entry *act, 788 struct nfp_fl_payload *nfp_fl, int *a_len, 789 struct net_device *netdev, bool last, 790 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, 791 int *out_cnt, u32 *csum_updated, 792 struct netlink_ext_ack *extack) 793 { 794 struct nfp_flower_priv *priv = app->priv; 795 struct nfp_fl_output *output; 796 int err, prelag_size; 797 798 /* If csum_updated has not been reset by now, it means HW will 799 * incorrectly update csums when they are not requested. 800 */ 801 if (*csum_updated) { 802 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported"); 803 return -EOPNOTSUPP; 804 } 805 806 if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) { 807 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum"); 808 return -EOPNOTSUPP; 809 } 810 811 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; 812 err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, 813 tun_out_cnt, extack); 814 if (err) 815 return err; 816 817 *a_len += sizeof(struct nfp_fl_output); 818 819 if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 820 /* nfp_fl_pre_lag returns -err or size of prelag action added. 821 * This will be 0 if it is not egressing to a lag dev. 822 */ 823 prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack); 824 if (prelag_size < 0) { 825 return prelag_size; 826 } else if (prelag_size > 0 && (!last || *out_cnt)) { 827 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list"); 828 return -EOPNOTSUPP; 829 } 830 831 *a_len += prelag_size; 832 } 833 (*out_cnt)++; 834 835 return 0; 836 } 837 838 static int 839 nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, 840 struct tc_cls_flower_offload *flow, 841 struct nfp_fl_payload *nfp_fl, int *a_len, 842 struct net_device *netdev, 843 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, 844 int *out_cnt, u32 *csum_updated, 845 struct nfp_flower_pedit_acts *set_act, 846 struct netlink_ext_ack *extack) 847 { 848 struct nfp_fl_set_ipv4_udp_tun *set_tun; 849 struct nfp_fl_pre_tunnel *pre_tun; 850 struct nfp_fl_push_vlan *psh_v; 851 struct nfp_fl_pop_vlan *pop_v; 852 int err; 853 854 switch (act->id) { 855 case FLOW_ACTION_DROP: 856 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); 857 break; 858 case FLOW_ACTION_REDIRECT: 859 err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, 860 true, tun_type, tun_out_cnt, 861 out_cnt, csum_updated, extack); 862 if (err) 863 return err; 864 break; 865 case FLOW_ACTION_MIRRED: 866 err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, 867 false, tun_type, tun_out_cnt, 868 out_cnt, csum_updated, extack); 869 if (err) 870 return err; 871 break; 872 case FLOW_ACTION_VLAN_POP: 873 if (*a_len + 874 sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) { 875 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan"); 876 return -EOPNOTSUPP; 877 } 878 879 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len]; 880 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV); 881 882 nfp_fl_pop_vlan(pop_v); 883 *a_len += sizeof(struct nfp_fl_pop_vlan); 884 break; 885 case FLOW_ACTION_VLAN_PUSH: 886 if (*a_len + 887 sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) { 888 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan"); 889 return -EOPNOTSUPP; 890 } 891 892 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len]; 893 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); 894 895 nfp_fl_push_vlan(psh_v, act); 896 *a_len += sizeof(struct nfp_fl_push_vlan); 897 break; 898 case FLOW_ACTION_TUNNEL_ENCAP: { 899 const struct ip_tunnel_info *ip_tun = act->tunnel; 900 901 *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act); 902 if (*tun_type == NFP_FL_TUNNEL_NONE) { 903 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list"); 904 return -EOPNOTSUPP; 905 } 906 907 if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) { 908 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list"); 909 return -EOPNOTSUPP; 910 } 911 912 /* Pre-tunnel action is required for tunnel encap. 913 * This checks for next hop entries on NFP. 914 * If none, the packet falls back before applying other actions. 915 */ 916 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + 917 sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ) { 918 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap"); 919 return -EOPNOTSUPP; 920 } 921 922 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len); 923 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); 924 *a_len += sizeof(struct nfp_fl_pre_tunnel); 925 926 err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack); 927 if (err) 928 return err; 929 930 set_tun = (void *)&nfp_fl->action_data[*a_len]; 931 err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun, 932 *tun_type, netdev, extack); 933 if (err) 934 return err; 935 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); 936 } 937 break; 938 case FLOW_ACTION_TUNNEL_DECAP: 939 /* Tunnel decap is handled by default so accept action. */ 940 return 0; 941 case FLOW_ACTION_MANGLE: 942 if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], 943 a_len, csum_updated, set_act, extack)) 944 return -EOPNOTSUPP; 945 break; 946 case FLOW_ACTION_CSUM: 947 /* csum action requests recalc of something we have not fixed */ 948 if (act->csum_flags & ~*csum_updated) { 949 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list"); 950 return -EOPNOTSUPP; 951 } 952 /* If we will correctly fix the csum we can remove it from the 953 * csum update list. Which will later be used to check support. 954 */ 955 *csum_updated &= ~act->csum_flags; 956 break; 957 default: 958 /* Currently we do not handle any other actions. */ 959 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); 960 return -EOPNOTSUPP; 961 } 962 963 return 0; 964 } 965 966 static bool nfp_fl_check_mangle_start(struct flow_action *flow_act, 967 int current_act_idx) 968 { 969 struct flow_action_entry current_act; 970 struct flow_action_entry prev_act; 971 972 current_act = flow_act->entries[current_act_idx]; 973 if (current_act.id != FLOW_ACTION_MANGLE) 974 return false; 975 976 if (current_act_idx == 0) 977 return true; 978 979 prev_act = flow_act->entries[current_act_idx - 1]; 980 981 return prev_act.id != FLOW_ACTION_MANGLE; 982 } 983 984 static bool nfp_fl_check_mangle_end(struct flow_action *flow_act, 985 int current_act_idx) 986 { 987 struct flow_action_entry current_act; 988 struct flow_action_entry next_act; 989 990 current_act = flow_act->entries[current_act_idx]; 991 if (current_act.id != FLOW_ACTION_MANGLE) 992 return false; 993 994 if (current_act_idx == flow_act->num_entries) 995 return true; 996 997 next_act = flow_act->entries[current_act_idx + 1]; 998 999 return next_act.id != FLOW_ACTION_MANGLE; 1000 } 1001 1002 int nfp_flower_compile_action(struct nfp_app *app, 1003 struct tc_cls_flower_offload *flow, 1004 struct net_device *netdev, 1005 struct nfp_fl_payload *nfp_flow, 1006 struct netlink_ext_ack *extack) 1007 { 1008 int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; 1009 struct nfp_flower_pedit_acts set_act; 1010 enum nfp_flower_tun_type tun_type; 1011 struct flow_action_entry *act; 1012 u32 csum_updated = 0; 1013 1014 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); 1015 nfp_flow->meta.act_len = 0; 1016 tun_type = NFP_FL_TUNNEL_NONE; 1017 act_len = 0; 1018 act_cnt = 0; 1019 tun_out_cnt = 0; 1020 out_cnt = 0; 1021 1022 flow_action_for_each(i, act, &flow->rule->action) { 1023 if (nfp_fl_check_mangle_start(&flow->rule->action, i)) 1024 memset(&set_act, 0, sizeof(set_act)); 1025 err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, 1026 netdev, &tun_type, &tun_out_cnt, 1027 &out_cnt, &csum_updated, &set_act, 1028 extack); 1029 if (err) 1030 return err; 1031 act_cnt++; 1032 if (nfp_fl_check_mangle_end(&flow->rule->action, i)) 1033 nfp_fl_commit_mangle(flow, 1034 &nfp_flow->action_data[act_len], 1035 &act_len, &set_act, &csum_updated); 1036 } 1037 1038 /* We optimise when the action list is small, this can unfortunately 1039 * not happen once we have more than one action in the action list. 1040 */ 1041 if (act_cnt > 1) 1042 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); 1043 1044 nfp_flow->meta.act_len = act_len; 1045 1046 return 0; 1047 } 1048