1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <net/pkt_cls.h> 6 7 #include "cmsg.h" 8 #include "main.h" 9 10 static void 11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, 12 struct nfp_flower_meta_tci *msk, 13 struct flow_rule *rule, u8 key_type) 14 { 15 u16 tmp_tci; 16 17 memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); 18 memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); 19 20 /* Populate the metadata frame. */ 21 ext->nfp_flow_key_layer = key_type; 22 ext->mask_id = ~0; 23 24 msk->nfp_flow_key_layer = key_type; 25 msk->mask_id = ~0; 26 27 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 28 struct flow_match_vlan match; 29 30 flow_rule_match_vlan(rule, &match); 31 /* Populate the tci field. */ 32 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; 33 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 34 match.key->vlan_priority) | 35 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 36 match.key->vlan_id); 37 ext->tci = cpu_to_be16(tmp_tci); 38 39 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; 40 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 41 match.mask->vlan_priority) | 42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 43 match.mask->vlan_id); 44 msk->tci = cpu_to_be16(tmp_tci); 45 } 46 } 47 48 static void 49 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext) 50 { 51 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext); 52 } 53 54 static int 55 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, 56 bool mask_version, enum nfp_flower_tun_type tun_type, 57 struct netlink_ext_ack *extack) 58 { 59 if (mask_version) { 60 frame->in_port = cpu_to_be32(~0); 61 return 0; 62 } 63 64 if (tun_type) { 65 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); 66 } else { 67 if (!cmsg_port) { 68 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload"); 69 return -EOPNOTSUPP; 70 } 71 frame->in_port = cpu_to_be32(cmsg_port); 72 } 73 74 return 0; 75 } 76 77 static int 78 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, 79 struct nfp_flower_mac_mpls *msk, struct flow_rule *rule, 80 struct netlink_ext_ack *extack) 81 { 82 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); 83 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); 84 85 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 86 struct flow_match_eth_addrs match; 87 88 flow_rule_match_eth_addrs(rule, &match); 89 /* Populate mac frame. */ 90 ether_addr_copy(ext->mac_dst, &match.key->dst[0]); 91 ether_addr_copy(ext->mac_src, &match.key->src[0]); 92 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); 93 ether_addr_copy(msk->mac_src, &match.mask->src[0]); 94 } 95 96 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 97 struct flow_match_mpls match; 98 u32 t_mpls; 99 100 flow_rule_match_mpls(rule, &match); 101 102 /* Only support matching the first LSE */ 103 if (match.mask->used_lses != 1) { 104 NL_SET_ERR_MSG_MOD(extack, 105 "unsupported offload: invalid LSE depth for MPLS match offload"); 106 return -EOPNOTSUPP; 107 } 108 109 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, 110 match.key->ls[0].mpls_label) | 111 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, 112 match.key->ls[0].mpls_tc) | 113 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, 114 match.key->ls[0].mpls_bos) | 115 NFP_FLOWER_MASK_MPLS_Q; 116 ext->mpls_lse = cpu_to_be32(t_mpls); 117 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, 118 match.mask->ls[0].mpls_label) | 119 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, 120 match.mask->ls[0].mpls_tc) | 121 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, 122 match.mask->ls[0].mpls_bos) | 123 NFP_FLOWER_MASK_MPLS_Q; 124 msk->mpls_lse = cpu_to_be32(t_mpls); 125 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 126 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q 127 * bit, which indicates an mpls ether type but without any 128 * mpls fields. 129 */ 130 struct flow_match_basic match; 131 132 flow_rule_match_basic(rule, &match); 133 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || 134 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { 135 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 136 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 137 } 138 } 139 140 return 0; 141 } 142 143 static void 144 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, 145 struct nfp_flower_tp_ports *msk, 146 struct flow_rule *rule) 147 { 148 memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); 149 memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); 150 151 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 152 struct flow_match_ports match; 153 154 flow_rule_match_ports(rule, &match); 155 ext->port_src = match.key->src; 156 ext->port_dst = match.key->dst; 157 msk->port_src = match.mask->src; 158 msk->port_dst = match.mask->dst; 159 } 160 } 161 162 static void 163 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, 164 struct nfp_flower_ip_ext *msk, struct flow_rule *rule) 165 { 166 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 167 struct flow_match_basic match; 168 169 flow_rule_match_basic(rule, &match); 170 ext->proto = match.key->ip_proto; 171 msk->proto = match.mask->ip_proto; 172 } 173 174 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 175 struct flow_match_ip match; 176 177 flow_rule_match_ip(rule, &match); 178 ext->tos = match.key->tos; 179 ext->ttl = match.key->ttl; 180 msk->tos = match.mask->tos; 181 msk->ttl = match.mask->ttl; 182 } 183 184 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 185 u16 tcp_flags, tcp_flags_mask; 186 struct flow_match_tcp match; 187 188 flow_rule_match_tcp(rule, &match); 189 tcp_flags = be16_to_cpu(match.key->flags); 190 tcp_flags_mask = be16_to_cpu(match.mask->flags); 191 192 if (tcp_flags & TCPHDR_FIN) 193 ext->flags |= NFP_FL_TCP_FLAG_FIN; 194 if (tcp_flags_mask & TCPHDR_FIN) 195 msk->flags |= NFP_FL_TCP_FLAG_FIN; 196 197 if (tcp_flags & TCPHDR_SYN) 198 ext->flags |= NFP_FL_TCP_FLAG_SYN; 199 if (tcp_flags_mask & TCPHDR_SYN) 200 msk->flags |= NFP_FL_TCP_FLAG_SYN; 201 202 if (tcp_flags & TCPHDR_RST) 203 ext->flags |= NFP_FL_TCP_FLAG_RST; 204 if (tcp_flags_mask & TCPHDR_RST) 205 msk->flags |= NFP_FL_TCP_FLAG_RST; 206 207 if (tcp_flags & TCPHDR_PSH) 208 ext->flags |= NFP_FL_TCP_FLAG_PSH; 209 if (tcp_flags_mask & TCPHDR_PSH) 210 msk->flags |= NFP_FL_TCP_FLAG_PSH; 211 212 if (tcp_flags & TCPHDR_URG) 213 ext->flags |= NFP_FL_TCP_FLAG_URG; 214 if (tcp_flags_mask & TCPHDR_URG) 215 msk->flags |= NFP_FL_TCP_FLAG_URG; 216 } 217 218 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 219 struct flow_match_control match; 220 221 flow_rule_match_control(rule, &match); 222 if (match.key->flags & FLOW_DIS_IS_FRAGMENT) 223 ext->flags |= NFP_FL_IP_FRAGMENTED; 224 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) 225 msk->flags |= NFP_FL_IP_FRAGMENTED; 226 if (match.key->flags & FLOW_DIS_FIRST_FRAG) 227 ext->flags |= NFP_FL_IP_FRAG_FIRST; 228 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) 229 msk->flags |= NFP_FL_IP_FRAG_FIRST; 230 } 231 } 232 233 static void 234 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, 235 struct nfp_flower_ipv4 *msk, struct flow_rule *rule) 236 { 237 struct flow_match_ipv4_addrs match; 238 239 memset(ext, 0, sizeof(struct nfp_flower_ipv4)); 240 memset(msk, 0, sizeof(struct nfp_flower_ipv4)); 241 242 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 243 flow_rule_match_ipv4_addrs(rule, &match); 244 ext->ipv4_src = match.key->src; 245 ext->ipv4_dst = match.key->dst; 246 msk->ipv4_src = match.mask->src; 247 msk->ipv4_dst = match.mask->dst; 248 } 249 250 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); 251 } 252 253 static void 254 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, 255 struct nfp_flower_ipv6 *msk, struct flow_rule *rule) 256 { 257 memset(ext, 0, sizeof(struct nfp_flower_ipv6)); 258 memset(msk, 0, sizeof(struct nfp_flower_ipv6)); 259 260 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 261 struct flow_match_ipv6_addrs match; 262 263 flow_rule_match_ipv6_addrs(rule, &match); 264 ext->ipv6_src = match.key->src; 265 ext->ipv6_dst = match.key->dst; 266 msk->ipv6_src = match.mask->src; 267 msk->ipv6_dst = match.mask->dst; 268 } 269 270 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); 271 } 272 273 static int 274 nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule) 275 { 276 struct flow_match_enc_opts match; 277 278 flow_rule_match_enc_opts(rule, &match); 279 memcpy(ext, match.key->data, match.key->len); 280 memcpy(msk, match.mask->data, match.mask->len); 281 282 return 0; 283 } 284 285 static void 286 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, 287 struct nfp_flower_tun_ipv4 *msk, 288 struct flow_rule *rule) 289 { 290 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 291 struct flow_match_ipv4_addrs match; 292 293 flow_rule_match_enc_ipv4_addrs(rule, &match); 294 ext->src = match.key->src; 295 ext->dst = match.key->dst; 296 msk->src = match.mask->src; 297 msk->dst = match.mask->dst; 298 } 299 } 300 301 static void 302 nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext, 303 struct nfp_flower_tun_ipv6 *msk, 304 struct flow_rule *rule) 305 { 306 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { 307 struct flow_match_ipv6_addrs match; 308 309 flow_rule_match_enc_ipv6_addrs(rule, &match); 310 ext->src = match.key->src; 311 ext->dst = match.key->dst; 312 msk->src = match.mask->src; 313 msk->dst = match.mask->dst; 314 } 315 } 316 317 static void 318 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, 319 struct nfp_flower_tun_ip_ext *msk, 320 struct flow_rule *rule) 321 { 322 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 323 struct flow_match_ip match; 324 325 flow_rule_match_enc_ip(rule, &match); 326 ext->tos = match.key->tos; 327 ext->ttl = match.key->ttl; 328 msk->tos = match.mask->tos; 329 msk->ttl = match.mask->ttl; 330 } 331 } 332 333 static void 334 nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk, 335 struct flow_rule *rule) 336 { 337 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 338 struct flow_match_enc_keyid match; 339 u32 vni; 340 341 flow_rule_match_enc_keyid(rule, &match); 342 vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; 343 *key = cpu_to_be32(vni); 344 vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; 345 *key_msk = cpu_to_be32(vni); 346 } 347 } 348 349 static void 350 nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags, 351 __be16 *flags_msk, struct flow_rule *rule) 352 { 353 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 354 struct flow_match_enc_keyid match; 355 356 flow_rule_match_enc_keyid(rule, &match); 357 *key = match.key->keyid; 358 *key_msk = match.mask->keyid; 359 360 *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); 361 *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); 362 } 363 } 364 365 static void 366 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, 367 struct nfp_flower_ipv4_gre_tun *msk, 368 struct flow_rule *rule) 369 { 370 memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); 371 memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); 372 373 /* NVGRE is the only supported GRE tunnel type */ 374 ext->ethertype = cpu_to_be16(ETH_P_TEB); 375 msk->ethertype = cpu_to_be16(~0); 376 377 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule); 378 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); 379 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key, 380 &ext->tun_flags, &msk->tun_flags, rule); 381 } 382 383 static void 384 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, 385 struct nfp_flower_ipv4_udp_tun *msk, 386 struct flow_rule *rule) 387 { 388 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); 389 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); 390 391 nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule); 392 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); 393 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); 394 } 395 396 static void 397 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext, 398 struct nfp_flower_ipv6_udp_tun *msk, 399 struct flow_rule *rule) 400 { 401 memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); 402 memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); 403 404 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule); 405 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); 406 nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); 407 } 408 409 static void 410 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, 411 struct nfp_flower_ipv6_gre_tun *msk, 412 struct flow_rule *rule) 413 { 414 memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); 415 memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); 416 417 /* NVGRE is the only supported GRE tunnel type */ 418 ext->ethertype = cpu_to_be16(ETH_P_TEB); 419 msk->ethertype = cpu_to_be16(~0); 420 421 nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule); 422 nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); 423 nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key, 424 &ext->tun_flags, &msk->tun_flags, rule); 425 } 426 427 int nfp_flower_compile_flow_match(struct nfp_app *app, 428 struct flow_cls_offload *flow, 429 struct nfp_fl_key_ls *key_ls, 430 struct net_device *netdev, 431 struct nfp_fl_payload *nfp_flow, 432 enum nfp_flower_tun_type tun_type, 433 struct netlink_ext_ack *extack) 434 { 435 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); 436 u32 port_id; 437 int err; 438 u8 *ext; 439 u8 *msk; 440 441 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 442 443 memset(nfp_flow->unmasked_data, 0, key_ls->key_size); 444 memset(nfp_flow->mask_data, 0, key_ls->key_size); 445 446 ext = nfp_flow->unmasked_data; 447 msk = nfp_flow->mask_data; 448 449 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, 450 (struct nfp_flower_meta_tci *)msk, 451 rule, key_ls->key_layer); 452 ext += sizeof(struct nfp_flower_meta_tci); 453 msk += sizeof(struct nfp_flower_meta_tci); 454 455 /* Populate Extended Metadata if Required. */ 456 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) { 457 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext, 458 key_ls->key_layer_two); 459 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, 460 key_ls->key_layer_two); 461 ext += sizeof(struct nfp_flower_ext_meta); 462 msk += sizeof(struct nfp_flower_ext_meta); 463 } 464 465 /* Populate Exact Port data. */ 466 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, 467 port_id, false, tun_type, extack); 468 if (err) 469 return err; 470 471 /* Populate Mask Port Data. */ 472 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, 473 port_id, true, tun_type, extack); 474 if (err) 475 return err; 476 477 ext += sizeof(struct nfp_flower_in_port); 478 msk += sizeof(struct nfp_flower_in_port); 479 480 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { 481 err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, 482 (struct nfp_flower_mac_mpls *)msk, 483 rule, extack); 484 if (err) 485 return err; 486 487 ext += sizeof(struct nfp_flower_mac_mpls); 488 msk += sizeof(struct nfp_flower_mac_mpls); 489 } 490 491 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { 492 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, 493 (struct nfp_flower_tp_ports *)msk, 494 rule); 495 ext += sizeof(struct nfp_flower_tp_ports); 496 msk += sizeof(struct nfp_flower_tp_ports); 497 } 498 499 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { 500 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, 501 (struct nfp_flower_ipv4 *)msk, 502 rule); 503 ext += sizeof(struct nfp_flower_ipv4); 504 msk += sizeof(struct nfp_flower_ipv4); 505 } 506 507 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { 508 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, 509 (struct nfp_flower_ipv6 *)msk, 510 rule); 511 ext += sizeof(struct nfp_flower_ipv6); 512 msk += sizeof(struct nfp_flower_ipv6); 513 } 514 515 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { 516 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { 517 struct nfp_flower_ipv6_gre_tun *gre_match; 518 struct nfp_ipv6_addr_entry *entry; 519 struct in6_addr *dst; 520 521 nfp_flower_compile_ipv6_gre_tun((void *)ext, 522 (void *)msk, rule); 523 gre_match = (struct nfp_flower_ipv6_gre_tun *)ext; 524 dst = &gre_match->ipv6.dst; 525 ext += sizeof(struct nfp_flower_ipv6_gre_tun); 526 msk += sizeof(struct nfp_flower_ipv6_gre_tun); 527 528 entry = nfp_tunnel_add_ipv6_off(app, dst); 529 if (!entry) 530 return -EOPNOTSUPP; 531 532 nfp_flow->nfp_tun_ipv6 = entry; 533 } else { 534 __be32 dst; 535 536 nfp_flower_compile_ipv4_gre_tun((void *)ext, 537 (void *)msk, rule); 538 dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst; 539 ext += sizeof(struct nfp_flower_ipv4_gre_tun); 540 msk += sizeof(struct nfp_flower_ipv4_gre_tun); 541 542 /* Store the tunnel destination in the rule data. 543 * This must be present and be an exact match. 544 */ 545 nfp_flow->nfp_tun_ipv4_addr = dst; 546 nfp_tunnel_add_ipv4_off(app, dst); 547 } 548 } 549 550 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || 551 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { 552 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { 553 struct nfp_flower_ipv6_udp_tun *udp_match; 554 struct nfp_ipv6_addr_entry *entry; 555 struct in6_addr *dst; 556 557 nfp_flower_compile_ipv6_udp_tun((void *)ext, 558 (void *)msk, rule); 559 udp_match = (struct nfp_flower_ipv6_udp_tun *)ext; 560 dst = &udp_match->ipv6.dst; 561 ext += sizeof(struct nfp_flower_ipv6_udp_tun); 562 msk += sizeof(struct nfp_flower_ipv6_udp_tun); 563 564 entry = nfp_tunnel_add_ipv6_off(app, dst); 565 if (!entry) 566 return -EOPNOTSUPP; 567 568 nfp_flow->nfp_tun_ipv6 = entry; 569 } else { 570 __be32 dst; 571 572 nfp_flower_compile_ipv4_udp_tun((void *)ext, 573 (void *)msk, rule); 574 dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst; 575 ext += sizeof(struct nfp_flower_ipv4_udp_tun); 576 msk += sizeof(struct nfp_flower_ipv4_udp_tun); 577 578 /* Store the tunnel destination in the rule data. 579 * This must be present and be an exact match. 580 */ 581 nfp_flow->nfp_tun_ipv4_addr = dst; 582 nfp_tunnel_add_ipv4_off(app, dst); 583 } 584 585 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { 586 err = nfp_flower_compile_geneve_opt(ext, msk, rule); 587 if (err) 588 return err; 589 } 590 } 591 592 return 0; 593 } 594