1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/inetdevice.h> 6 #include <net/netevent.h> 7 #include <linux/idr.h> 8 #include <net/dst_metadata.h> 9 #include <net/arp.h> 10 11 #include "cmsg.h" 12 #include "main.h" 13 #include "../nfp_net_repr.h" 14 #include "../nfp_net.h" 15 16 #define NFP_FL_MAX_ROUTES 32 17 18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 19 #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) 20 #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) 21 #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) 22 23 /** 24 * struct nfp_tun_pre_tun_rule - rule matched before decap 25 * @flags: options for the rule offset 26 * @port_idx: index of destination MAC address for the rule 27 * @vlan_tci: VLAN info associated with MAC 28 * @host_ctx_id: stats context of rule to update 29 */ 30 struct nfp_tun_pre_tun_rule { 31 __be32 flags; 32 __be16 port_idx; 33 __be16 vlan_tci; 34 __be32 host_ctx_id; 35 }; 36 37 /** 38 * struct nfp_tun_active_tuns - periodic message of active tunnels 39 * @seq: sequence number of the message 40 * @count: number of tunnels report in message 41 * @flags: options part of the request 42 * @tun_info.ipv4: dest IPv4 address of active route 43 * @tun_info.egress_port: port the encapsulated packet egressed 44 * @tun_info.extra: reserved for future use 45 * @tun_info: tunnels that have sent traffic in reported period 46 */ 47 struct nfp_tun_active_tuns { 48 __be32 seq; 49 __be32 count; 50 __be32 flags; 51 struct route_ip_info { 52 __be32 ipv4; 53 __be32 egress_port; 54 __be32 extra[2]; 55 } tun_info[]; 56 }; 57 58 /** 59 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels 60 * @seq: sequence number of the message 61 * @count: number of tunnels report in message 62 * @flags: options part of the request 63 * @tun_info.ipv6: dest IPv6 address of active route 64 * @tun_info.egress_port: port the encapsulated packet egressed 65 * @tun_info.extra: reserved for future use 66 * @tun_info: tunnels that have sent traffic in reported period 67 */ 68 struct nfp_tun_active_tuns_v6 { 69 __be32 seq; 70 __be32 count; 71 __be32 flags; 72 struct route_ip_info_v6 { 73 struct in6_addr ipv6; 74 __be32 egress_port; 75 __be32 extra[2]; 76 } tun_info[]; 77 }; 78 79 /** 80 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup 81 * @ingress_port: ingress port of packet that signalled request 82 * @ipv4_addr: destination ipv4 address for route 83 * @reserved: reserved for future use 84 */ 85 struct nfp_tun_req_route_ipv4 { 86 __be32 ingress_port; 87 __be32 ipv4_addr; 88 __be32 reserved[2]; 89 }; 90 91 /** 92 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup 93 * @ingress_port: ingress port of packet that signalled request 94 * @ipv6_addr: destination ipv6 address for route 95 */ 96 struct nfp_tun_req_route_ipv6 { 97 __be32 ingress_port; 98 struct in6_addr ipv6_addr; 99 }; 100 101 /** 102 * struct nfp_offloaded_route - routes that are offloaded to the NFP 103 * @list: list pointer 104 * @ip_add: destination of route - can be IPv4 or IPv6 105 */ 106 struct nfp_offloaded_route { 107 struct list_head list; 108 u8 ip_add[]; 109 }; 110 111 #define NFP_FL_IPV4_ADDRS_MAX 32 112 113 /** 114 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP 115 * @count: number of IPs populated in the array 116 * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses 117 */ 118 struct nfp_tun_ipv4_addr { 119 __be32 count; 120 __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX]; 121 }; 122 123 /** 124 * struct nfp_ipv4_addr_entry - cached IPv4 addresses 125 * @ipv4_addr: IP address 126 * @ref_count: number of rules currently using this IP 127 * @list: list pointer 128 */ 129 struct nfp_ipv4_addr_entry { 130 __be32 ipv4_addr; 131 int ref_count; 132 struct list_head list; 133 }; 134 135 #define NFP_FL_IPV6_ADDRS_MAX 4 136 137 /** 138 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP 139 * @count: number of IPs populated in the array 140 * @ipv6_addr: array of IPV6_ADDRS_MAX 128 bit IPv6 addresses 141 */ 142 struct nfp_tun_ipv6_addr { 143 __be32 count; 144 struct in6_addr ipv6_addr[NFP_FL_IPV6_ADDRS_MAX]; 145 }; 146 147 #define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 148 149 /** 150 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP 151 * @flags: MAC address offload options 152 * @count: number of MAC addresses in the message (should be 1) 153 * @index: index of MAC address in the lookup table 154 * @addr: interface MAC address 155 */ 156 struct nfp_tun_mac_addr_offload { 157 __be16 flags; 158 __be16 count; 159 __be16 index; 160 u8 addr[ETH_ALEN]; 161 }; 162 163 /** 164 * struct nfp_neigh_update_work - update neighbour information to nfp 165 * @work: Work queue for writing neigh to the nfp 166 * @n: neighbour entry 167 * @app: Back pointer to app 168 */ 169 struct nfp_neigh_update_work { 170 struct work_struct work; 171 struct neighbour *n; 172 struct nfp_app *app; 173 }; 174 175 enum nfp_flower_mac_offload_cmd { 176 NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, 177 NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, 178 NFP_TUNNEL_MAC_OFFLOAD_MOD = 2, 179 }; 180 181 #define NFP_MAX_MAC_INDEX 0xff 182 183 /** 184 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC 185 * @ht_node: Hashtable entry 186 * @addr: Offloaded MAC address 187 * @index: Offloaded index for given MAC address 188 * @ref_count: Number of devs using this MAC address 189 * @repr_list: List of reprs sharing this MAC address 190 * @bridge_count: Number of bridge/internal devs with MAC 191 */ 192 struct nfp_tun_offloaded_mac { 193 struct rhash_head ht_node; 194 u8 addr[ETH_ALEN]; 195 u16 index; 196 int ref_count; 197 struct list_head repr_list; 198 int bridge_count; 199 }; 200 201 static const struct rhashtable_params offloaded_macs_params = { 202 .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr), 203 .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node), 204 .key_len = ETH_ALEN, 205 .automatic_shrinking = true, 206 }; 207 208 void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) 209 { 210 struct nfp_tun_active_tuns *payload; 211 struct net_device *netdev; 212 int count, i, pay_len; 213 struct neighbour *n; 214 __be32 ipv4_addr; 215 u32 port; 216 217 payload = nfp_flower_cmsg_get_data(skb); 218 count = be32_to_cpu(payload->count); 219 if (count > NFP_FL_MAX_ROUTES) { 220 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); 221 return; 222 } 223 224 pay_len = nfp_flower_cmsg_get_data_len(skb); 225 if (pay_len != struct_size(payload, tun_info, count)) { 226 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 227 return; 228 } 229 230 rcu_read_lock(); 231 for (i = 0; i < count; i++) { 232 ipv4_addr = payload->tun_info[i].ipv4; 233 port = be32_to_cpu(payload->tun_info[i].egress_port); 234 netdev = nfp_app_dev_get(app, port, NULL); 235 if (!netdev) 236 continue; 237 238 n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); 239 if (!n) 240 continue; 241 242 /* Update the used timestamp of neighbour */ 243 neigh_event_send(n, NULL); 244 neigh_release(n); 245 } 246 rcu_read_unlock(); 247 } 248 249 void nfp_tunnel_keep_alive_v6(struct nfp_app *app, struct sk_buff *skb) 250 { 251 #if IS_ENABLED(CONFIG_IPV6) 252 struct nfp_tun_active_tuns_v6 *payload; 253 struct net_device *netdev; 254 int count, i, pay_len; 255 struct neighbour *n; 256 void *ipv6_add; 257 u32 port; 258 259 payload = nfp_flower_cmsg_get_data(skb); 260 count = be32_to_cpu(payload->count); 261 if (count > NFP_FL_IPV6_ADDRS_MAX) { 262 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n"); 263 return; 264 } 265 266 pay_len = nfp_flower_cmsg_get_data_len(skb); 267 if (pay_len != struct_size(payload, tun_info, count)) { 268 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); 269 return; 270 } 271 272 rcu_read_lock(); 273 for (i = 0; i < count; i++) { 274 ipv6_add = &payload->tun_info[i].ipv6; 275 port = be32_to_cpu(payload->tun_info[i].egress_port); 276 netdev = nfp_app_dev_get(app, port, NULL); 277 if (!netdev) 278 continue; 279 280 n = neigh_lookup(&nd_tbl, ipv6_add, netdev); 281 if (!n) 282 continue; 283 284 /* Update the used timestamp of neighbour */ 285 neigh_event_send(n, NULL); 286 neigh_release(n); 287 } 288 rcu_read_unlock(); 289 #endif 290 } 291 292 static int 293 nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, 294 gfp_t flag) 295 { 296 struct nfp_flower_priv *priv = app->priv; 297 struct sk_buff *skb; 298 unsigned char *msg; 299 300 if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) && 301 (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || 302 mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) 303 plen -= sizeof(struct nfp_tun_neigh_ext); 304 305 if (!(priv->flower_ext_feats & NFP_FL_FEATS_TUNNEL_NEIGH_LAG) && 306 (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || 307 mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) 308 plen -= sizeof(struct nfp_tun_neigh_lag); 309 310 skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); 311 if (!skb) 312 return -ENOMEM; 313 314 msg = nfp_flower_cmsg_get_data(skb); 315 memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb)); 316 317 nfp_ctrl_tx(app->ctrl, skb); 318 return 0; 319 } 320 321 static void 322 nfp_tun_mutual_link(struct nfp_predt_entry *predt, 323 struct nfp_neigh_entry *neigh) 324 { 325 struct nfp_fl_payload *flow_pay = predt->flow_pay; 326 struct nfp_tun_neigh_ext *ext; 327 struct nfp_tun_neigh *common; 328 329 if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6) 330 return; 331 332 /* In the case of bonding it is possible that there might already 333 * be a flow linked (as the MAC address gets shared). If a flow 334 * is already linked just return. 335 */ 336 if (neigh->flow) 337 return; 338 339 common = neigh->is_ipv6 ? 340 &((struct nfp_tun_neigh_v6 *)neigh->payload)->common : 341 &((struct nfp_tun_neigh_v4 *)neigh->payload)->common; 342 ext = neigh->is_ipv6 ? 343 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 344 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 345 346 if (memcmp(flow_pay->pre_tun_rule.loc_mac, 347 common->src_addr, ETH_ALEN) || 348 memcmp(flow_pay->pre_tun_rule.rem_mac, 349 common->dst_addr, ETH_ALEN)) 350 return; 351 352 list_add(&neigh->list_head, &predt->nn_list); 353 neigh->flow = predt; 354 ext->host_ctx = flow_pay->meta.host_ctx_id; 355 ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci; 356 ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid; 357 } 358 359 static void 360 nfp_tun_link_predt_entries(struct nfp_app *app, 361 struct nfp_neigh_entry *nn_entry) 362 { 363 struct nfp_flower_priv *priv = app->priv; 364 struct nfp_predt_entry *predt, *tmp; 365 366 list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) { 367 nfp_tun_mutual_link(predt, nn_entry); 368 } 369 } 370 371 void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, 372 struct nfp_predt_entry *predt) 373 { 374 struct nfp_flower_priv *priv = app->priv; 375 struct nfp_neigh_entry *nn_entry; 376 struct rhashtable_iter iter; 377 size_t neigh_size; 378 u8 type; 379 380 rhashtable_walk_enter(&priv->neigh_table, &iter); 381 rhashtable_walk_start(&iter); 382 while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) { 383 if (IS_ERR(nn_entry)) 384 continue; 385 nfp_tun_mutual_link(predt, nn_entry); 386 neigh_size = nn_entry->is_ipv6 ? 387 sizeof(struct nfp_tun_neigh_v6) : 388 sizeof(struct nfp_tun_neigh_v4); 389 type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 390 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 391 nfp_flower_xmit_tun_conf(app, type, neigh_size, 392 nn_entry->payload, 393 GFP_ATOMIC); 394 } 395 rhashtable_walk_stop(&iter); 396 rhashtable_walk_exit(&iter); 397 } 398 399 static void nfp_tun_cleanup_nn_entries(struct nfp_app *app) 400 { 401 struct nfp_flower_priv *priv = app->priv; 402 struct nfp_neigh_entry *neigh; 403 struct nfp_tun_neigh_ext *ext; 404 struct rhashtable_iter iter; 405 size_t neigh_size; 406 u8 type; 407 408 rhashtable_walk_enter(&priv->neigh_table, &iter); 409 rhashtable_walk_start(&iter); 410 while ((neigh = rhashtable_walk_next(&iter)) != NULL) { 411 if (IS_ERR(neigh)) 412 continue; 413 ext = neigh->is_ipv6 ? 414 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 415 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 416 ext->host_ctx = cpu_to_be32(U32_MAX); 417 ext->vlan_tpid = cpu_to_be16(U16_MAX); 418 ext->vlan_tci = cpu_to_be16(U16_MAX); 419 420 neigh_size = neigh->is_ipv6 ? 421 sizeof(struct nfp_tun_neigh_v6) : 422 sizeof(struct nfp_tun_neigh_v4); 423 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 424 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 425 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 426 GFP_ATOMIC); 427 428 rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node, 429 neigh_table_params); 430 if (neigh->flow) 431 list_del(&neigh->list_head); 432 kfree(neigh); 433 } 434 rhashtable_walk_stop(&iter); 435 rhashtable_walk_exit(&iter); 436 } 437 438 void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, 439 struct nfp_predt_entry *predt) 440 { 441 struct nfp_neigh_entry *neigh, *tmp; 442 struct nfp_tun_neigh_ext *ext; 443 size_t neigh_size; 444 u8 type; 445 446 list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) { 447 ext = neigh->is_ipv6 ? 448 &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : 449 &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; 450 neigh->flow = NULL; 451 ext->host_ctx = cpu_to_be32(U32_MAX); 452 ext->vlan_tpid = cpu_to_be16(U16_MAX); 453 ext->vlan_tci = cpu_to_be16(U16_MAX); 454 list_del(&neigh->list_head); 455 neigh_size = neigh->is_ipv6 ? 456 sizeof(struct nfp_tun_neigh_v6) : 457 sizeof(struct nfp_tun_neigh_v4); 458 type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : 459 NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 460 nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, 461 GFP_ATOMIC); 462 } 463 } 464 465 static void 466 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, 467 void *flow, struct neighbour *neigh, bool is_ipv6, 468 bool override) 469 { 470 bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead; 471 size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : 472 sizeof(struct nfp_tun_neigh_v4); 473 unsigned long cookie = (unsigned long)neigh; 474 struct nfp_flower_priv *priv = app->priv; 475 struct nfp_tun_neigh_lag lag_info; 476 struct nfp_neigh_entry *nn_entry; 477 u32 port_id; 478 u8 mtype; 479 480 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 481 if (!port_id) 482 return; 483 484 if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) { 485 memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag)); 486 nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info); 487 } 488 489 spin_lock_bh(&priv->predt_lock); 490 nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie, 491 neigh_table_params); 492 if (!nn_entry && !neigh_invalid) { 493 struct nfp_tun_neigh_ext *ext; 494 struct nfp_tun_neigh_lag *lag; 495 struct nfp_tun_neigh *common; 496 497 nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size, 498 GFP_ATOMIC); 499 if (!nn_entry) 500 goto err; 501 502 nn_entry->payload = (char *)&nn_entry[1]; 503 nn_entry->neigh_cookie = cookie; 504 nn_entry->is_ipv6 = is_ipv6; 505 nn_entry->flow = NULL; 506 if (is_ipv6) { 507 struct flowi6 *flowi6 = (struct flowi6 *)flow; 508 struct nfp_tun_neigh_v6 *payload; 509 510 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 511 payload->src_ipv6 = flowi6->saddr; 512 payload->dst_ipv6 = flowi6->daddr; 513 common = &payload->common; 514 ext = &payload->ext; 515 lag = &payload->lag; 516 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 517 } else { 518 struct flowi4 *flowi4 = (struct flowi4 *)flow; 519 struct nfp_tun_neigh_v4 *payload; 520 521 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 522 payload->src_ipv4 = flowi4->saddr; 523 payload->dst_ipv4 = flowi4->daddr; 524 common = &payload->common; 525 ext = &payload->ext; 526 lag = &payload->lag; 527 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 528 } 529 ext->host_ctx = cpu_to_be32(U32_MAX); 530 ext->vlan_tpid = cpu_to_be16(U16_MAX); 531 ext->vlan_tci = cpu_to_be16(U16_MAX); 532 ether_addr_copy(common->src_addr, netdev->dev_addr); 533 neigh_ha_snapshot(common->dst_addr, neigh, netdev); 534 535 if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) 536 memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag)); 537 common->port_id = cpu_to_be32(port_id); 538 539 if (rhashtable_insert_fast(&priv->neigh_table, 540 &nn_entry->ht_node, 541 neigh_table_params)) 542 goto err; 543 544 nfp_tun_link_predt_entries(app, nn_entry); 545 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 546 nn_entry->payload, 547 GFP_ATOMIC); 548 } else if (nn_entry && neigh_invalid) { 549 if (is_ipv6) { 550 struct flowi6 *flowi6 = (struct flowi6 *)flow; 551 struct nfp_tun_neigh_v6 *payload; 552 553 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 554 memset(payload, 0, sizeof(struct nfp_tun_neigh_v6)); 555 payload->dst_ipv6 = flowi6->daddr; 556 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 557 } else { 558 struct flowi4 *flowi4 = (struct flowi4 *)flow; 559 struct nfp_tun_neigh_v4 *payload; 560 561 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 562 memset(payload, 0, sizeof(struct nfp_tun_neigh_v4)); 563 payload->dst_ipv4 = flowi4->daddr; 564 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 565 } 566 /* Trigger ARP to verify invalid neighbour state. */ 567 neigh_event_send(neigh, NULL); 568 rhashtable_remove_fast(&priv->neigh_table, 569 &nn_entry->ht_node, 570 neigh_table_params); 571 572 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 573 nn_entry->payload, 574 GFP_ATOMIC); 575 576 if (nn_entry->flow) 577 list_del(&nn_entry->list_head); 578 kfree(nn_entry); 579 } else if (nn_entry && !neigh_invalid) { 580 struct nfp_tun_neigh *common; 581 u8 dst_addr[ETH_ALEN]; 582 bool is_mac_change; 583 584 if (is_ipv6) { 585 struct nfp_tun_neigh_v6 *payload; 586 587 payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; 588 common = &payload->common; 589 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; 590 } else { 591 struct nfp_tun_neigh_v4 *payload; 592 593 payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; 594 common = &payload->common; 595 mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; 596 } 597 598 ether_addr_copy(dst_addr, common->dst_addr); 599 neigh_ha_snapshot(common->dst_addr, neigh, netdev); 600 is_mac_change = !ether_addr_equal(dst_addr, common->dst_addr); 601 if (override || is_mac_change) { 602 if (is_mac_change && nn_entry->flow) { 603 list_del(&nn_entry->list_head); 604 nn_entry->flow = NULL; 605 } 606 nfp_tun_link_predt_entries(app, nn_entry); 607 nfp_flower_xmit_tun_conf(app, mtype, neigh_size, 608 nn_entry->payload, 609 GFP_ATOMIC); 610 } 611 } 612 613 spin_unlock_bh(&priv->predt_lock); 614 return; 615 616 err: 617 kfree(nn_entry); 618 spin_unlock_bh(&priv->predt_lock); 619 nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); 620 } 621 622 static void 623 nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work *update_work) 624 { 625 neigh_release(update_work->n); 626 kfree(update_work); 627 } 628 629 static void nfp_tun_neigh_update(struct work_struct *work) 630 { 631 struct nfp_neigh_update_work *update_work; 632 struct nfp_app *app; 633 struct neighbour *n; 634 bool neigh_invalid; 635 int err; 636 637 update_work = container_of(work, struct nfp_neigh_update_work, work); 638 app = update_work->app; 639 n = update_work->n; 640 641 if (!nfp_flower_get_port_id_from_netdev(app, n->dev)) 642 goto out; 643 644 #if IS_ENABLED(CONFIG_INET) 645 neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; 646 if (n->tbl->family == AF_INET6) { 647 #if IS_ENABLED(CONFIG_IPV6) 648 struct flowi6 flow6 = {}; 649 650 flow6.daddr = *(struct in6_addr *)n->primary_key; 651 if (!neigh_invalid) { 652 struct dst_entry *dst; 653 /* Use ipv6_dst_lookup_flow to populate flow6->saddr 654 * and other fields. This information is only needed 655 * for new entries, lookup can be skipped when an entry 656 * gets invalidated - as only the daddr is needed for 657 * deleting. 658 */ 659 dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, 660 &flow6, NULL); 661 if (IS_ERR(dst)) 662 goto out; 663 664 dst_release(dst); 665 } 666 nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false); 667 #endif /* CONFIG_IPV6 */ 668 } else { 669 struct flowi4 flow4 = {}; 670 671 flow4.daddr = *(__be32 *)n->primary_key; 672 if (!neigh_invalid) { 673 struct rtable *rt; 674 /* Use ip_route_output_key to populate flow4->saddr and 675 * other fields. This information is only needed for 676 * new entries, lookup can be skipped when an entry 677 * gets invalidated - as only the daddr is needed for 678 * deleting. 679 */ 680 rt = ip_route_output_key(dev_net(n->dev), &flow4); 681 err = PTR_ERR_OR_ZERO(rt); 682 if (err) 683 goto out; 684 685 ip_rt_put(rt); 686 } 687 nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false); 688 } 689 #endif /* CONFIG_INET */ 690 out: 691 nfp_tun_release_neigh_update_work(update_work); 692 } 693 694 static struct nfp_neigh_update_work * 695 nfp_tun_alloc_neigh_update_work(struct nfp_app *app, struct neighbour *n) 696 { 697 struct nfp_neigh_update_work *update_work; 698 699 update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC); 700 if (!update_work) 701 return NULL; 702 703 INIT_WORK(&update_work->work, nfp_tun_neigh_update); 704 neigh_hold(n); 705 update_work->n = n; 706 update_work->app = app; 707 708 return update_work; 709 } 710 711 static int 712 nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, 713 void *ptr) 714 { 715 struct nfp_neigh_update_work *update_work; 716 struct nfp_flower_priv *app_priv; 717 struct netevent_redirect *redir; 718 struct neighbour *n; 719 struct nfp_app *app; 720 721 switch (event) { 722 case NETEVENT_REDIRECT: 723 redir = (struct netevent_redirect *)ptr; 724 n = redir->neigh; 725 break; 726 case NETEVENT_NEIGH_UPDATE: 727 n = (struct neighbour *)ptr; 728 break; 729 default: 730 return NOTIFY_DONE; 731 } 732 #if IS_ENABLED(CONFIG_IPV6) 733 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) 734 #else 735 if (n->tbl != &arp_tbl) 736 #endif 737 return NOTIFY_DONE; 738 739 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 740 app = app_priv->app; 741 update_work = nfp_tun_alloc_neigh_update_work(app, n); 742 if (!update_work) 743 return NOTIFY_DONE; 744 745 queue_work(system_highpri_wq, &update_work->work); 746 747 return NOTIFY_DONE; 748 } 749 750 void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) 751 { 752 struct nfp_tun_req_route_ipv4 *payload; 753 struct net_device *netdev; 754 struct flowi4 flow = {}; 755 struct neighbour *n; 756 struct rtable *rt; 757 int err; 758 759 payload = nfp_flower_cmsg_get_data(skb); 760 761 rcu_read_lock(); 762 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 763 if (!netdev) 764 goto fail_rcu_unlock; 765 dev_hold(netdev); 766 767 flow.daddr = payload->ipv4_addr; 768 flow.flowi4_proto = IPPROTO_UDP; 769 770 #if IS_ENABLED(CONFIG_INET) 771 /* Do a route lookup on same namespace as ingress port. */ 772 rt = ip_route_output_key(dev_net(netdev), &flow); 773 err = PTR_ERR_OR_ZERO(rt); 774 if (err) 775 goto fail_rcu_unlock; 776 #else 777 goto fail_rcu_unlock; 778 #endif 779 780 /* Get the neighbour entry for the lookup */ 781 n = dst_neigh_lookup(&rt->dst, &flow.daddr); 782 ip_rt_put(rt); 783 if (!n) 784 goto fail_rcu_unlock; 785 rcu_read_unlock(); 786 787 nfp_tun_write_neigh(n->dev, app, &flow, n, false, true); 788 neigh_release(n); 789 dev_put(netdev); 790 return; 791 792 fail_rcu_unlock: 793 rcu_read_unlock(); 794 dev_put(netdev); 795 nfp_flower_cmsg_warn(app, "Requested route not found.\n"); 796 } 797 798 void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) 799 { 800 struct nfp_tun_req_route_ipv6 *payload; 801 struct net_device *netdev; 802 struct flowi6 flow = {}; 803 struct dst_entry *dst; 804 struct neighbour *n; 805 806 payload = nfp_flower_cmsg_get_data(skb); 807 808 rcu_read_lock(); 809 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 810 if (!netdev) 811 goto fail_rcu_unlock; 812 dev_hold(netdev); 813 814 flow.daddr = payload->ipv6_addr; 815 flow.flowi6_proto = IPPROTO_UDP; 816 817 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 818 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow, 819 NULL); 820 if (IS_ERR(dst)) 821 goto fail_rcu_unlock; 822 #else 823 goto fail_rcu_unlock; 824 #endif 825 826 n = dst_neigh_lookup(dst, &flow.daddr); 827 dst_release(dst); 828 if (!n) 829 goto fail_rcu_unlock; 830 rcu_read_unlock(); 831 832 nfp_tun_write_neigh(n->dev, app, &flow, n, true, true); 833 neigh_release(n); 834 dev_put(netdev); 835 return; 836 837 fail_rcu_unlock: 838 rcu_read_unlock(); 839 dev_put(netdev); 840 nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); 841 } 842 843 static void nfp_tun_write_ipv4_list(struct nfp_app *app) 844 { 845 struct nfp_flower_priv *priv = app->priv; 846 struct nfp_ipv4_addr_entry *entry; 847 struct nfp_tun_ipv4_addr payload; 848 struct list_head *ptr, *storage; 849 int count; 850 851 memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); 852 mutex_lock(&priv->tun.ipv4_off_lock); 853 count = 0; 854 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 855 if (count >= NFP_FL_IPV4_ADDRS_MAX) { 856 mutex_unlock(&priv->tun.ipv4_off_lock); 857 nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); 858 return; 859 } 860 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 861 payload.ipv4_addr[count++] = entry->ipv4_addr; 862 } 863 payload.count = cpu_to_be32(count); 864 mutex_unlock(&priv->tun.ipv4_off_lock); 865 866 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, 867 sizeof(struct nfp_tun_ipv4_addr), 868 &payload, GFP_KERNEL); 869 } 870 871 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) 872 { 873 struct nfp_flower_priv *priv = app->priv; 874 struct nfp_ipv4_addr_entry *entry; 875 struct list_head *ptr, *storage; 876 877 mutex_lock(&priv->tun.ipv4_off_lock); 878 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 879 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 880 if (entry->ipv4_addr == ipv4) { 881 entry->ref_count++; 882 mutex_unlock(&priv->tun.ipv4_off_lock); 883 return; 884 } 885 } 886 887 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 888 if (!entry) { 889 mutex_unlock(&priv->tun.ipv4_off_lock); 890 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 891 return; 892 } 893 entry->ipv4_addr = ipv4; 894 entry->ref_count = 1; 895 list_add_tail(&entry->list, &priv->tun.ipv4_off_list); 896 mutex_unlock(&priv->tun.ipv4_off_lock); 897 898 nfp_tun_write_ipv4_list(app); 899 } 900 901 void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) 902 { 903 struct nfp_flower_priv *priv = app->priv; 904 struct nfp_ipv4_addr_entry *entry; 905 struct list_head *ptr, *storage; 906 907 mutex_lock(&priv->tun.ipv4_off_lock); 908 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 909 entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 910 if (entry->ipv4_addr == ipv4) { 911 entry->ref_count--; 912 if (!entry->ref_count) { 913 list_del(&entry->list); 914 kfree(entry); 915 } 916 break; 917 } 918 } 919 mutex_unlock(&priv->tun.ipv4_off_lock); 920 921 nfp_tun_write_ipv4_list(app); 922 } 923 924 static void nfp_tun_write_ipv6_list(struct nfp_app *app) 925 { 926 struct nfp_flower_priv *priv = app->priv; 927 struct nfp_ipv6_addr_entry *entry; 928 struct nfp_tun_ipv6_addr payload; 929 int count = 0; 930 931 memset(&payload, 0, sizeof(struct nfp_tun_ipv6_addr)); 932 mutex_lock(&priv->tun.ipv6_off_lock); 933 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) { 934 if (count >= NFP_FL_IPV6_ADDRS_MAX) { 935 nfp_flower_cmsg_warn(app, "Too many IPv6 tunnel endpoint addresses, some cannot be offloaded.\n"); 936 break; 937 } 938 payload.ipv6_addr[count++] = entry->ipv6_addr; 939 } 940 mutex_unlock(&priv->tun.ipv6_off_lock); 941 payload.count = cpu_to_be32(count); 942 943 nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, 944 sizeof(struct nfp_tun_ipv6_addr), 945 &payload, GFP_KERNEL); 946 } 947 948 struct nfp_ipv6_addr_entry * 949 nfp_tunnel_add_ipv6_off(struct nfp_app *app, struct in6_addr *ipv6) 950 { 951 struct nfp_flower_priv *priv = app->priv; 952 struct nfp_ipv6_addr_entry *entry; 953 954 mutex_lock(&priv->tun.ipv6_off_lock); 955 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) 956 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) { 957 entry->ref_count++; 958 mutex_unlock(&priv->tun.ipv6_off_lock); 959 return entry; 960 } 961 962 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 963 if (!entry) { 964 mutex_unlock(&priv->tun.ipv6_off_lock); 965 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); 966 return NULL; 967 } 968 entry->ipv6_addr = *ipv6; 969 entry->ref_count = 1; 970 list_add_tail(&entry->list, &priv->tun.ipv6_off_list); 971 mutex_unlock(&priv->tun.ipv6_off_lock); 972 973 nfp_tun_write_ipv6_list(app); 974 975 return entry; 976 } 977 978 void 979 nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry) 980 { 981 struct nfp_flower_priv *priv = app->priv; 982 bool freed = false; 983 984 mutex_lock(&priv->tun.ipv6_off_lock); 985 if (!--entry->ref_count) { 986 list_del(&entry->list); 987 kfree(entry); 988 freed = true; 989 } 990 mutex_unlock(&priv->tun.ipv6_off_lock); 991 992 if (freed) 993 nfp_tun_write_ipv6_list(app); 994 } 995 996 static int 997 __nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del) 998 { 999 struct nfp_tun_mac_addr_offload payload; 1000 1001 memset(&payload, 0, sizeof(payload)); 1002 1003 if (del) 1004 payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG); 1005 1006 /* FW supports multiple MACs per cmsg but restrict to single. */ 1007 payload.count = cpu_to_be16(1); 1008 payload.index = cpu_to_be16(idx); 1009 ether_addr_copy(payload.addr, mac); 1010 1011 return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, 1012 sizeof(struct nfp_tun_mac_addr_offload), 1013 &payload, GFP_KERNEL); 1014 } 1015 1016 static bool nfp_tunnel_port_is_phy_repr(int port) 1017 { 1018 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == 1019 NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) 1020 return true; 1021 1022 return false; 1023 } 1024 1025 static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port) 1026 { 1027 return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; 1028 } 1029 1030 static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id) 1031 { 1032 return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 1033 } 1034 1035 static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx) 1036 { 1037 return nfp_mac_idx >> 8; 1038 } 1039 1040 static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) 1041 { 1042 return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; 1043 } 1044 1045 static struct nfp_tun_offloaded_mac * 1046 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac) 1047 { 1048 struct nfp_flower_priv *priv = app->priv; 1049 1050 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, 1051 offloaded_macs_params); 1052 } 1053 1054 static void 1055 nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, 1056 struct net_device *netdev, bool mod) 1057 { 1058 if (nfp_netdev_is_nfp_repr(netdev)) { 1059 struct nfp_flower_repr_priv *repr_priv; 1060 struct nfp_repr *repr; 1061 1062 repr = netdev_priv(netdev); 1063 repr_priv = repr->app_priv; 1064 1065 /* If modifing MAC, remove repr from old list first. */ 1066 if (mod) 1067 list_del(&repr_priv->mac_list); 1068 1069 list_add_tail(&repr_priv->mac_list, &entry->repr_list); 1070 } else if (nfp_flower_is_supported_bridge(netdev)) { 1071 entry->bridge_count++; 1072 } 1073 1074 entry->ref_count++; 1075 } 1076 1077 static int 1078 nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, 1079 int port, bool mod) 1080 { 1081 struct nfp_flower_priv *priv = app->priv; 1082 struct nfp_tun_offloaded_mac *entry; 1083 int ida_idx = -1, err; 1084 u16 nfp_mac_idx = 0; 1085 1086 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); 1087 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { 1088 if (entry->bridge_count || 1089 !nfp_flower_is_supported_bridge(netdev)) { 1090 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, 1091 netdev, mod); 1092 return 0; 1093 } 1094 1095 /* MAC is global but matches need to go to pre_tun table. */ 1096 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; 1097 } 1098 1099 if (!nfp_mac_idx) { 1100 /* Assign a global index if non-repr or MAC is now shared. */ 1101 if (entry || !port) { 1102 ida_idx = ida_alloc_max(&priv->tun.mac_off_ids, 1103 NFP_MAX_MAC_INDEX, GFP_KERNEL); 1104 if (ida_idx < 0) 1105 return ida_idx; 1106 1107 nfp_mac_idx = 1108 nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); 1109 1110 if (nfp_flower_is_supported_bridge(netdev)) 1111 nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT; 1112 1113 } else { 1114 nfp_mac_idx = 1115 nfp_tunnel_get_mac_idx_from_phy_port_id(port); 1116 } 1117 } 1118 1119 if (!entry) { 1120 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1121 if (!entry) { 1122 err = -ENOMEM; 1123 goto err_free_ida; 1124 } 1125 1126 ether_addr_copy(entry->addr, netdev->dev_addr); 1127 INIT_LIST_HEAD(&entry->repr_list); 1128 1129 if (rhashtable_insert_fast(&priv->tun.offloaded_macs, 1130 &entry->ht_node, 1131 offloaded_macs_params)) { 1132 err = -ENOMEM; 1133 goto err_free_entry; 1134 } 1135 } 1136 1137 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, 1138 nfp_mac_idx, false); 1139 if (err) { 1140 /* If not shared then free. */ 1141 if (!entry->ref_count) 1142 goto err_remove_hash; 1143 goto err_free_ida; 1144 } 1145 1146 entry->index = nfp_mac_idx; 1147 nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); 1148 1149 return 0; 1150 1151 err_remove_hash: 1152 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, 1153 offloaded_macs_params); 1154 err_free_entry: 1155 kfree(entry); 1156 err_free_ida: 1157 if (ida_idx != -1) 1158 ida_free(&priv->tun.mac_off_ids, ida_idx); 1159 1160 return err; 1161 } 1162 1163 static int 1164 nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, 1165 const u8 *mac, bool mod) 1166 { 1167 struct nfp_flower_priv *priv = app->priv; 1168 struct nfp_flower_repr_priv *repr_priv; 1169 struct nfp_tun_offloaded_mac *entry; 1170 struct nfp_repr *repr; 1171 u16 nfp_mac_idx; 1172 int ida_idx; 1173 1174 entry = nfp_tunnel_lookup_offloaded_macs(app, mac); 1175 if (!entry) 1176 return 0; 1177 1178 entry->ref_count--; 1179 /* If del is part of a mod then mac_list is still in use elsewhere. */ 1180 if (nfp_netdev_is_nfp_repr(netdev) && !mod) { 1181 repr = netdev_priv(netdev); 1182 repr_priv = repr->app_priv; 1183 list_del(&repr_priv->mac_list); 1184 } 1185 1186 if (nfp_flower_is_supported_bridge(netdev)) { 1187 entry->bridge_count--; 1188 1189 if (!entry->bridge_count && entry->ref_count) { 1190 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 1191 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, 1192 false)) { 1193 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 1194 netdev_name(netdev)); 1195 return 0; 1196 } 1197 1198 entry->index = nfp_mac_idx; 1199 return 0; 1200 } 1201 } 1202 1203 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ 1204 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { 1205 int port, err; 1206 1207 repr_priv = list_first_entry(&entry->repr_list, 1208 struct nfp_flower_repr_priv, 1209 mac_list); 1210 repr = repr_priv->nfp_repr; 1211 port = nfp_repr_get_port_id(repr->netdev); 1212 nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); 1213 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); 1214 if (err) { 1215 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", 1216 netdev_name(netdev)); 1217 return 0; 1218 } 1219 1220 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 1221 ida_free(&priv->tun.mac_off_ids, ida_idx); 1222 entry->index = nfp_mac_idx; 1223 return 0; 1224 } 1225 1226 if (entry->ref_count) 1227 return 0; 1228 1229 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, 1230 &entry->ht_node, 1231 offloaded_macs_params)); 1232 1233 if (nfp_flower_is_supported_bridge(netdev)) 1234 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; 1235 else 1236 nfp_mac_idx = entry->index; 1237 1238 /* If MAC has global ID then extract and free the ida entry. */ 1239 if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { 1240 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); 1241 ida_free(&priv->tun.mac_off_ids, ida_idx); 1242 } 1243 1244 kfree(entry); 1245 1246 return __nfp_tunnel_offload_mac(app, mac, 0, true); 1247 } 1248 1249 static int 1250 nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, 1251 enum nfp_flower_mac_offload_cmd cmd) 1252 { 1253 struct nfp_flower_non_repr_priv *nr_priv = NULL; 1254 bool non_repr = false, *mac_offloaded; 1255 u8 *off_mac = NULL; 1256 int err, port = 0; 1257 1258 if (nfp_netdev_is_nfp_repr(netdev)) { 1259 struct nfp_flower_repr_priv *repr_priv; 1260 struct nfp_repr *repr; 1261 1262 repr = netdev_priv(netdev); 1263 if (repr->app != app) 1264 return 0; 1265 1266 repr_priv = repr->app_priv; 1267 if (repr_priv->on_bridge) 1268 return 0; 1269 1270 mac_offloaded = &repr_priv->mac_offloaded; 1271 off_mac = &repr_priv->offloaded_mac_addr[0]; 1272 port = nfp_repr_get_port_id(netdev); 1273 if (!nfp_tunnel_port_is_phy_repr(port)) 1274 return 0; 1275 } else if (nfp_fl_is_netdev_to_offload(netdev)) { 1276 nr_priv = nfp_flower_non_repr_priv_get(app, netdev); 1277 if (!nr_priv) 1278 return -ENOMEM; 1279 1280 mac_offloaded = &nr_priv->mac_offloaded; 1281 off_mac = &nr_priv->offloaded_mac_addr[0]; 1282 non_repr = true; 1283 } else { 1284 return 0; 1285 } 1286 1287 if (!is_valid_ether_addr(netdev->dev_addr)) { 1288 err = -EINVAL; 1289 goto err_put_non_repr_priv; 1290 } 1291 1292 if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded) 1293 cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD; 1294 1295 switch (cmd) { 1296 case NFP_TUNNEL_MAC_OFFLOAD_ADD: 1297 err = nfp_tunnel_add_shared_mac(app, netdev, port, false); 1298 if (err) 1299 goto err_put_non_repr_priv; 1300 1301 if (non_repr) 1302 __nfp_flower_non_repr_priv_get(nr_priv); 1303 1304 *mac_offloaded = true; 1305 ether_addr_copy(off_mac, netdev->dev_addr); 1306 break; 1307 case NFP_TUNNEL_MAC_OFFLOAD_DEL: 1308 /* Only attempt delete if add was successful. */ 1309 if (!*mac_offloaded) 1310 break; 1311 1312 if (non_repr) 1313 __nfp_flower_non_repr_priv_put(nr_priv); 1314 1315 *mac_offloaded = false; 1316 1317 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, 1318 false); 1319 if (err) 1320 goto err_put_non_repr_priv; 1321 1322 break; 1323 case NFP_TUNNEL_MAC_OFFLOAD_MOD: 1324 /* Ignore if changing to the same address. */ 1325 if (ether_addr_equal(netdev->dev_addr, off_mac)) 1326 break; 1327 1328 err = nfp_tunnel_add_shared_mac(app, netdev, port, true); 1329 if (err) 1330 goto err_put_non_repr_priv; 1331 1332 /* Delete the previous MAC address. */ 1333 err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); 1334 if (err) 1335 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", 1336 netdev_name(netdev)); 1337 1338 ether_addr_copy(off_mac, netdev->dev_addr); 1339 break; 1340 default: 1341 err = -EINVAL; 1342 goto err_put_non_repr_priv; 1343 } 1344 1345 if (non_repr) 1346 __nfp_flower_non_repr_priv_put(nr_priv); 1347 1348 return 0; 1349 1350 err_put_non_repr_priv: 1351 if (non_repr) 1352 __nfp_flower_non_repr_priv_put(nr_priv); 1353 1354 return err; 1355 } 1356 1357 int nfp_tunnel_mac_event_handler(struct nfp_app *app, 1358 struct net_device *netdev, 1359 unsigned long event, void *ptr) 1360 { 1361 int err; 1362 1363 if (event == NETDEV_DOWN) { 1364 err = nfp_tunnel_offload_mac(app, netdev, 1365 NFP_TUNNEL_MAC_OFFLOAD_DEL); 1366 if (err) 1367 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", 1368 netdev_name(netdev)); 1369 } else if (event == NETDEV_UP) { 1370 err = nfp_tunnel_offload_mac(app, netdev, 1371 NFP_TUNNEL_MAC_OFFLOAD_ADD); 1372 if (err) 1373 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 1374 netdev_name(netdev)); 1375 } else if (event == NETDEV_CHANGEADDR) { 1376 /* Only offload addr change if netdev is already up. */ 1377 if (!(netdev->flags & IFF_UP)) 1378 return NOTIFY_OK; 1379 1380 err = nfp_tunnel_offload_mac(app, netdev, 1381 NFP_TUNNEL_MAC_OFFLOAD_MOD); 1382 if (err) 1383 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", 1384 netdev_name(netdev)); 1385 } else if (event == NETDEV_CHANGEUPPER) { 1386 /* If a repr is attached to a bridge then tunnel packets 1387 * entering the physical port are directed through the bridge 1388 * datapath and cannot be directly detunneled. Therefore, 1389 * associated offloaded MACs and indexes should not be used 1390 * by fw for detunneling. 1391 */ 1392 struct netdev_notifier_changeupper_info *info = ptr; 1393 struct net_device *upper = info->upper_dev; 1394 struct nfp_flower_repr_priv *repr_priv; 1395 struct nfp_repr *repr; 1396 1397 if (!nfp_netdev_is_nfp_repr(netdev) || 1398 !nfp_flower_is_supported_bridge(upper)) 1399 return NOTIFY_OK; 1400 1401 repr = netdev_priv(netdev); 1402 if (repr->app != app) 1403 return NOTIFY_OK; 1404 1405 repr_priv = repr->app_priv; 1406 1407 if (info->linking) { 1408 if (nfp_tunnel_offload_mac(app, netdev, 1409 NFP_TUNNEL_MAC_OFFLOAD_DEL)) 1410 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", 1411 netdev_name(netdev)); 1412 repr_priv->on_bridge = true; 1413 } else { 1414 repr_priv->on_bridge = false; 1415 1416 if (!(netdev->flags & IFF_UP)) 1417 return NOTIFY_OK; 1418 1419 if (nfp_tunnel_offload_mac(app, netdev, 1420 NFP_TUNNEL_MAC_OFFLOAD_ADD)) 1421 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", 1422 netdev_name(netdev)); 1423 } 1424 } 1425 return NOTIFY_OK; 1426 } 1427 1428 int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, 1429 struct nfp_fl_payload *flow) 1430 { 1431 struct nfp_flower_priv *app_priv = app->priv; 1432 struct nfp_tun_offloaded_mac *mac_entry; 1433 struct nfp_flower_meta_tci *key_meta; 1434 struct nfp_tun_pre_tun_rule payload; 1435 struct net_device *internal_dev; 1436 int err; 1437 1438 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) 1439 return -ENOSPC; 1440 1441 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 1442 1443 internal_dev = flow->pre_tun_rule.dev; 1444 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 1445 payload.host_ctx_id = flow->meta.host_ctx_id; 1446 1447 /* Lookup MAC index for the pre-tunnel rule egress device. 1448 * Note that because the device is always an internal port, it will 1449 * have a constant global index so does not need to be tracked. 1450 */ 1451 mac_entry = nfp_tunnel_lookup_offloaded_macs(app, 1452 internal_dev->dev_addr); 1453 if (!mac_entry) 1454 return -ENOENT; 1455 1456 /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being 1457 * set/clear for port_idx. 1458 */ 1459 key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; 1460 if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) 1461 mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; 1462 else 1463 mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; 1464 1465 payload.port_idx = cpu_to_be16(mac_entry->index); 1466 1467 /* Copy mac id and vlan to flow - dev may not exist at delete time. */ 1468 flow->pre_tun_rule.vlan_tci = payload.vlan_tci; 1469 flow->pre_tun_rule.port_idx = payload.port_idx; 1470 1471 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 1472 sizeof(struct nfp_tun_pre_tun_rule), 1473 (unsigned char *)&payload, GFP_KERNEL); 1474 if (err) 1475 return err; 1476 1477 app_priv->pre_tun_rule_cnt++; 1478 1479 return 0; 1480 } 1481 1482 int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, 1483 struct nfp_fl_payload *flow) 1484 { 1485 struct nfp_flower_priv *app_priv = app->priv; 1486 struct nfp_tun_pre_tun_rule payload; 1487 u32 tmp_flags = 0; 1488 int err; 1489 1490 memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule)); 1491 1492 tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL; 1493 payload.flags = cpu_to_be32(tmp_flags); 1494 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; 1495 payload.port_idx = flow->pre_tun_rule.port_idx; 1496 1497 err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, 1498 sizeof(struct nfp_tun_pre_tun_rule), 1499 (unsigned char *)&payload, GFP_KERNEL); 1500 if (err) 1501 return err; 1502 1503 app_priv->pre_tun_rule_cnt--; 1504 1505 return 0; 1506 } 1507 1508 int nfp_tunnel_config_start(struct nfp_app *app) 1509 { 1510 struct nfp_flower_priv *priv = app->priv; 1511 int err; 1512 1513 /* Initialise rhash for MAC offload tracking. */ 1514 err = rhashtable_init(&priv->tun.offloaded_macs, 1515 &offloaded_macs_params); 1516 if (err) 1517 return err; 1518 1519 ida_init(&priv->tun.mac_off_ids); 1520 1521 /* Initialise priv data for IPv4/v6 offloading. */ 1522 mutex_init(&priv->tun.ipv4_off_lock); 1523 INIT_LIST_HEAD(&priv->tun.ipv4_off_list); 1524 mutex_init(&priv->tun.ipv6_off_lock); 1525 INIT_LIST_HEAD(&priv->tun.ipv6_off_list); 1526 1527 /* Initialise priv data for neighbour offloading. */ 1528 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; 1529 1530 err = register_netevent_notifier(&priv->tun.neigh_nb); 1531 if (err) { 1532 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1533 nfp_check_rhashtable_empty, NULL); 1534 return err; 1535 } 1536 1537 return 0; 1538 } 1539 1540 void nfp_tunnel_config_stop(struct nfp_app *app) 1541 { 1542 struct nfp_flower_priv *priv = app->priv; 1543 struct nfp_ipv4_addr_entry *ip_entry; 1544 struct list_head *ptr, *storage; 1545 1546 unregister_netevent_notifier(&priv->tun.neigh_nb); 1547 1548 ida_destroy(&priv->tun.mac_off_ids); 1549 1550 /* Free any memory that may be occupied by ipv4 list. */ 1551 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { 1552 ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); 1553 list_del(&ip_entry->list); 1554 kfree(ip_entry); 1555 } 1556 1557 mutex_destroy(&priv->tun.ipv6_off_lock); 1558 1559 /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ 1560 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, 1561 nfp_check_rhashtable_empty, NULL); 1562 1563 nfp_tun_cleanup_nn_entries(app); 1564 } 1565