1 /*- 2 * Copyright (c) 2023 NVIDIA corporation & affiliates. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include "opt_ipsec.h" 28 29 #include <sys/types.h> 30 #include <netinet/in.h> 31 #include <sys/socket.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <net/pfkeyv2.h> 35 #include <netipsec/key_var.h> 36 #include <netipsec/keydb.h> 37 #include <netipsec/ipsec.h> 38 #include <netipsec/xform.h> 39 #include <netipsec/ipsec_offload.h> 40 #include <dev/mlx5/fs.h> 41 #include <dev/mlx5/mlx5_en/en.h> 42 #include <dev/mlx5/qp.h> 43 #include <dev/mlx5/mlx5_accel/ipsec.h> 44 #include <dev/mlx5/mlx5_core/fs_core.h> 45 #include <dev/mlx5/mlx5_core/fs_chains.h> 46 47 /* 48 * TX tables are organized differently for Ethernet and for RoCE: 49 * 50 * +=========+ 51 * Ethernet Tx | SA KSPI | match 52 * --------------------->|Flowtable|----->+ + 53 * | |\ | / \ 54 * +=========+ | | / \ +=========+ +=========+ 55 * miss | | / \ | Status | | | 56 * DROP<--------+ |---->|Encrypt|------>|Flowtable|---->| TX NS | 57 * | \ / | | | | 58 * | \ / +=========+ +=========+ 59 * +=========+ +=========+ | \ / | 60 * RoCE | Policy | match|SA ReqId |match| + | 61 * Tx |Flowtable|----->|Flowtable|---->+ | 62 * ---->|IP header| |ReqId+IP | | 63 * | | | header |--------------------------------+ 64 * +=========+ +=========+ miss | 65 * | | 66 * | miss | 67 * +------------------------------------------------------- 68 * 69 * +=========+ 70 * | RDMA | 71 * |Flowtable| 72 * | | 73 * Rx Tables and rules: +=========+ 74 * + / 75 * +=========+ +=========+ / \ +=========+ +=========+ /match 76 * | Policy | | SA | / \ | Status | | RoCE |/ 77 * ---->|Flowtable| match|Flowtable| match / \ |Flowtable|----->|Flowtable| 78 * |IP header|----->|IP header|----->|Decrypt|----->| | | Roce V2 | 79 * | | |+ESP+SPI | \ / | | | UDP port|\ 80 * +=========+ +=========+ \ / +=========+ +=========+ \miss 81 * | | \ / \ 82 * | | + +=========+ 83 * | miss | miss | Ethernet| 84 * +--------------->---------------------------------------------------->| RX NS | 85 * | | 86 * +=========+ 87 * 88 */ 89 90 #define NUM_IPSEC_FTE BIT(15) 91 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40 92 93 struct mlx5e_ipsec_fc { 94 struct mlx5_fc *cnt; 95 struct mlx5_fc *drop; 96 }; 97 98 struct mlx5e_ipsec_ft { 99 struct mutex mutex; /* Protect changes to this struct */ 100 struct mlx5_flow_table *pol; 101 struct mlx5_flow_table *sa_kspi; 102 struct mlx5_flow_table *sa; 103 struct mlx5_flow_table *status; 104 u32 refcnt; 105 }; 106 107 struct mlx5e_ipsec_tx_roce { 108 struct mlx5_flow_group *g; 109 struct mlx5_flow_table *ft; 110 struct mlx5_flow_handle *rule; 111 struct mlx5_flow_namespace *ns; 112 }; 113 114 struct mlx5e_ipsec_miss { 115 struct mlx5_flow_group *group; 116 struct mlx5_flow_handle *rule; 117 }; 118 119 struct mlx5e_ipsec_tx { 120 struct mlx5e_ipsec_ft ft; 121 struct mlx5e_ipsec_miss pol; 122 struct mlx5e_ipsec_miss kspi_miss; 123 struct mlx5e_ipsec_rule status; 124 struct mlx5e_ipsec_rule kspi_bypass_rule; /*rule for IPSEC bypass*/ 125 struct mlx5_flow_namespace *ns; 126 struct mlx5e_ipsec_fc *fc; 127 struct mlx5_fs_chains *chains; 128 struct mlx5e_ipsec_tx_roce roce; 129 }; 130 131 struct mlx5e_ipsec_rx_roce { 132 struct mlx5_flow_group *g; 133 struct mlx5_flow_table *ft; 134 struct mlx5_flow_handle *rule; 135 struct mlx5e_ipsec_miss roce_miss; 136 137 struct mlx5_flow_table *ft_rdma; 138 struct mlx5_flow_namespace *ns_rdma; 139 }; 140 141 struct mlx5e_ipsec_rx_ip_type { 142 struct mlx5_flow_table *ft; 143 struct mlx5_flow_namespace *ns; 144 struct mlx5_flow_handle *ipv4_rule; 145 struct mlx5_flow_handle *ipv6_rule; 146 struct mlx5e_ipsec_miss miss; 147 }; 148 149 struct mlx5e_ipsec_rx { 150 struct mlx5e_ipsec_ft ft; 151 struct mlx5e_ipsec_miss pol; 152 struct mlx5e_ipsec_miss sa; 153 struct mlx5e_ipsec_rule status; 154 struct mlx5_flow_namespace *ns; 155 struct mlx5e_ipsec_fc *fc; 156 struct mlx5_fs_chains *chains; 157 struct mlx5e_ipsec_rx_roce roce; 158 }; 159 160 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec, 161 u16 kspi); 162 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec); 163 164 static void setup_fte_no_frags(struct mlx5_flow_spec *spec) 165 { 166 /* Non fragmented */ 167 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 168 169 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag); 170 MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0); 171 } 172 173 static void setup_fte_esp(struct mlx5_flow_spec *spec) 174 { 175 /* ESP header */ 176 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 177 178 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 179 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); 180 } 181 182 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap) 183 { 184 /* SPI number */ 185 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 186 187 if (encap) { 188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.inner_esp_spi); 189 MLX5_SET(fte_match_param, spec->match_value, misc_parameters.inner_esp_spi, spi); 190 } else { 191 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); 192 MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi); 193 } 194 } 195 196 static void 197 setup_fte_vid(struct mlx5_flow_spec *spec, u16 vid) 198 { 199 /* virtual lan tag */ 200 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 201 202 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 203 outer_headers.cvlan_tag); 204 MLX5_SET(fte_match_param, spec->match_value, 205 outer_headers.cvlan_tag, 1); 206 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 207 outer_headers.first_vid); 208 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 209 vid); 210 } 211 212 static void 213 clear_fte_vid(struct mlx5_flow_spec *spec) 214 { 215 MLX5_SET(fte_match_param, spec->match_criteria, 216 outer_headers.cvlan_tag, 0); 217 MLX5_SET(fte_match_param, spec->match_value, 218 outer_headers.cvlan_tag, 0); 219 MLX5_SET(fte_match_param, spec->match_criteria, 220 outer_headers.first_vid, 0); 221 MLX5_SET(fte_match_param, spec->match_value, 222 outer_headers.first_vid, 0); 223 } 224 225 static void 226 setup_fte_no_vid(struct mlx5_flow_spec *spec) 227 { 228 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 229 outer_headers.cvlan_tag); 230 MLX5_SET(fte_match_param, spec->match_value, 231 outer_headers.cvlan_tag, 0); 232 } 233 234 static struct mlx5_fs_chains * 235 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft, 236 enum mlx5_flow_namespace_type ns, int base_prio, 237 int base_level, struct mlx5_flow_table **root_ft) 238 { 239 struct mlx5_chains_attr attr = {}; 240 struct mlx5_fs_chains *chains; 241 struct mlx5_flow_table *ft; 242 int err; 243 244 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | 245 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 246 attr.max_grp_num = 2; 247 attr.default_ft = miss_ft; 248 attr.ns = ns; 249 attr.fs_base_prio = base_prio; 250 attr.fs_base_level = base_level; 251 chains = mlx5_chains_create(mdev, &attr); 252 if (IS_ERR(chains)) 253 return chains; 254 255 /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */ 256 ft = mlx5_chains_get_table(chains, 0, 1, 0); 257 if (IS_ERR(ft)) { 258 err = PTR_ERR(ft); 259 goto err_chains_get; 260 } 261 262 *root_ft = ft; 263 return chains; 264 265 err_chains_get: 266 mlx5_chains_destroy(chains); 267 return ERR_PTR(err); 268 } 269 270 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains) 271 { 272 mlx5_chains_put_table(chains, 0, 1, 0); 273 mlx5_chains_destroy(chains); 274 } 275 276 static struct mlx5_flow_table * 277 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio) 278 { 279 return mlx5_chains_get_table(chains, 0, prio + 1, 0); 280 } 281 282 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio) 283 { 284 mlx5_chains_put_table(chains, 0, prio + 1, 0); 285 } 286 287 static struct mlx5_flow_table *ipsec_rx_ft_create(struct mlx5_flow_namespace *ns, 288 int level, int prio, 289 int max_num_groups) 290 { 291 struct mlx5_flow_table_attr ft_attr = {}; 292 293 ft_attr.max_fte = NUM_IPSEC_FTE; 294 ft_attr.level = level; 295 ft_attr.prio = prio; 296 ft_attr.autogroup.max_num_groups = max_num_groups; 297 ft_attr.autogroup.num_reserved_entries = 1; 298 299 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 300 } 301 302 static int ipsec_miss_create(struct mlx5_core_dev *mdev, 303 struct mlx5_flow_table *ft, 304 struct mlx5e_ipsec_miss *miss, 305 struct mlx5_flow_destination *dest) 306 { 307 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 308 struct mlx5_flow_act flow_act = {}; 309 struct mlx5_flow_spec *spec; 310 u32 *flow_group_in; 311 int err = 0; 312 313 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 315 if (!flow_group_in || !spec) { 316 err = -ENOMEM; 317 goto out; 318 } 319 320 /* Create miss_group */ 321 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); 322 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); 323 miss->group = mlx5_create_flow_group(ft, flow_group_in); 324 if (IS_ERR(miss->group)) { 325 err = PTR_ERR(miss->group); 326 mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n", 327 err); 328 goto out; 329 } 330 331 if (dest) 332 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 333 else 334 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 335 /* Create miss rule */ 336 miss->rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1); 337 if (IS_ERR(miss->rule)) { 338 mlx5_destroy_flow_group(miss->group); 339 err = PTR_ERR(miss->rule); 340 mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n", 341 err); 342 goto out; 343 } 344 out: 345 kvfree(flow_group_in); 346 kvfree(spec); 347 return err; 348 } 349 350 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir, 351 struct mlx5_flow_act *flow_act) 352 { 353 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 354 enum mlx5_flow_namespace_type ns_type; 355 struct mlx5_modify_hdr *modify_hdr; 356 357 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 358 switch (dir) { 359 case IPSEC_DIR_INBOUND: 360 MLX5_SET(set_action_in, action, field, 361 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 362 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 363 break; 364 case IPSEC_DIR_OUTBOUND: 365 MLX5_SET(set_action_in, action, field, 366 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); 367 ns_type = MLX5_FLOW_NAMESPACE_EGRESS; 368 break; 369 default: 370 return -EINVAL; 371 } 372 373 MLX5_SET(set_action_in, action, data, val); 374 MLX5_SET(set_action_in, action, offset, 0); 375 MLX5_SET(set_action_in, action, length, 32); 376 377 modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action); 378 if (IS_ERR(modify_hdr)) { 379 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n", 380 PTR_ERR(modify_hdr)); 381 return PTR_ERR(modify_hdr); 382 } 383 384 flow_act->modify_hdr = modify_hdr; 385 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 386 return 0; 387 } 388 389 static int 390 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs, 391 struct mlx5_pkt_reformat_params *reformat_params) 392 { 393 struct udphdr *udphdr; 394 size_t bfflen = 16; 395 char *reformatbf; 396 __be32 spi; 397 void *hdr; 398 399 if (attrs->family == AF_INET) { 400 if (attrs->encap) 401 reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4; 402 else 403 reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4; 404 } else { 405 if (attrs->encap) 406 reformat_params->type = 407 MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6; 408 else 409 reformat_params->type = 410 MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6; 411 } 412 413 if (attrs->encap) 414 bfflen += sizeof(*udphdr); 415 reformatbf = kzalloc(bfflen, GFP_KERNEL); 416 if (!reformatbf) 417 return -ENOMEM; 418 419 hdr = reformatbf; 420 if (attrs->encap) { 421 udphdr = (struct udphdr *)reformatbf; 422 udphdr->uh_sport = attrs->sport; 423 udphdr->uh_dport = attrs->dport; 424 hdr += sizeof(*udphdr); 425 } 426 427 /* convert to network format */ 428 spi = htonl(attrs->spi); 429 memcpy(hdr, &spi, 4); 430 431 reformat_params->param_0 = attrs->authsize; 432 reformat_params->size = bfflen; 433 reformat_params->data = reformatbf; 434 435 return 0; 436 } 437 438 static int setup_pkt_reformat(struct mlx5_core_dev *mdev, 439 struct mlx5_accel_esp_xfrm_attrs *attrs, 440 struct mlx5_flow_act *flow_act) 441 { 442 enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS; 443 struct mlx5_pkt_reformat_params reformat_params = {}; 444 struct mlx5_pkt_reformat *pkt_reformat; 445 int ret; 446 447 if (attrs->dir == IPSEC_DIR_INBOUND) { 448 if (attrs->encap) 449 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP; 450 else 451 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT; 452 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 453 goto cmd; 454 } 455 456 ret = setup_pkt_transport_reformat(attrs, &reformat_params); 457 if (ret) 458 return ret; 459 cmd: 460 pkt_reformat = 461 mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type); 462 if (reformat_params.data) 463 kfree(reformat_params.data); 464 if (IS_ERR(pkt_reformat)) 465 return PTR_ERR(pkt_reformat); 466 467 flow_act->pkt_reformat = pkt_reformat; 468 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 469 return 0; 470 } 471 472 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr, 473 __be32 *daddr) 474 { 475 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 476 477 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 478 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4); 479 480 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 481 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4); 482 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 483 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4); 484 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 485 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); 486 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 487 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 488 } 489 490 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr, 491 __be32 *daddr) 492 { 493 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 494 495 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 496 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6); 497 498 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 499 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16); 500 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 501 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16); 502 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 503 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16); 504 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 505 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16); 506 } 507 508 static void 509 setup_fte_ip_version(struct mlx5_flow_spec *spec, u8 family) 510 { 511 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 512 513 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 514 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 515 family == AF_INET ? 4 : 6); 516 } 517 518 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 519 { 520 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 521 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 522 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 523 struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 524 struct mlx5_flow_destination dest[2] = {}; 525 struct mlx5_flow_act flow_act = {}; 526 struct mlx5_flow_handle *rule; 527 struct mlx5_flow_spec *spec; 528 struct mlx5e_ipsec_rx *rx; 529 struct mlx5_fc *counter; 530 int err; 531 532 rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6; 533 534 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 535 if (!spec) 536 return -ENOMEM; 537 538 if (!attrs->drop) { 539 err = setup_modify_header(mdev, sa_entry->kspi | BIT(31), IPSEC_DIR_INBOUND, 540 &flow_act); 541 if (err) 542 goto err_mod_header; 543 } 544 545 err = setup_pkt_reformat(mdev, attrs, &flow_act); 546 if (err) 547 goto err_pkt_reformat; 548 549 counter = mlx5_fc_create(mdev, false); 550 if (IS_ERR(counter)) { 551 err = PTR_ERR(counter); 552 goto err_add_cnt; 553 } 554 555 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; 556 flow_act.crypto.op = MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT; 557 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; 558 flow_act.flags |= FLOW_ACT_NO_APPEND; 559 560 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | 561 MLX5_FLOW_CONTEXT_ACTION_COUNT; 562 563 if (attrs->drop) 564 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 565 else 566 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 567 568 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 569 dest[0].ft = rx->ft.status; 570 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 571 dest[1].counter_id = mlx5_fc_id(counter); 572 573 if (attrs->family == AF_INET) 574 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 575 else 576 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 577 578 if (!attrs->encap) 579 setup_fte_esp(spec); 580 581 setup_fte_spi(spec, attrs->spi, attrs->encap); 582 setup_fte_no_frags(spec); 583 584 if (sa_entry->vid != VLAN_NONE) 585 setup_fte_vid(spec, sa_entry->vid); 586 else 587 setup_fte_no_vid(spec); 588 589 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2); 590 if (IS_ERR(rule)) { 591 err = PTR_ERR(rule); 592 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); 593 goto err_add_flow; 594 } 595 ipsec_rule->rule = rule; 596 597 /* Add another rule for zero vid */ 598 if (sa_entry->vid == VLAN_NONE) { 599 clear_fte_vid(spec); 600 setup_fte_vid(spec, 0); 601 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2); 602 if (IS_ERR(rule)) { 603 err = PTR_ERR(rule); 604 mlx5_core_err(mdev, 605 "fail to add RX ipsec zero vid rule err=%d\n", 606 err); 607 goto err_add_flow; 608 } 609 ipsec_rule->vid_zero_rule = rule; 610 } 611 612 kvfree(spec); 613 ipsec_rule->fc = counter; 614 ipsec_rule->modify_hdr = flow_act.modify_hdr; 615 ipsec_rule->pkt_reformat = flow_act.pkt_reformat; 616 return 0; 617 618 err_add_flow: 619 mlx5_fc_destroy(mdev, counter); 620 if (ipsec_rule->rule != NULL) 621 mlx5_del_flow_rules(&ipsec_rule->rule); 622 err_add_cnt: 623 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); 624 err_pkt_reformat: 625 if (flow_act.modify_hdr != NULL) 626 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); 627 err_mod_header: 628 kvfree(spec); 629 630 return err; 631 } 632 633 static struct mlx5_flow_table *ipsec_tx_ft_create(struct mlx5_flow_namespace *ns, 634 int level, int prio, 635 int max_num_groups) 636 { 637 struct mlx5_flow_table_attr ft_attr = {}; 638 639 ft_attr.autogroup.num_reserved_entries = 1; 640 ft_attr.autogroup.max_num_groups = max_num_groups; 641 ft_attr.max_fte = NUM_IPSEC_FTE; 642 ft_attr.level = level; 643 ft_attr.prio = prio; 644 645 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 646 } 647 648 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) 649 { 650 struct mlx5_flow_destination dest = {}; 651 struct mlx5_flow_act flow_act = {}; 652 struct mlx5_flow_handle *fte; 653 int err; 654 655 /* create fte */ 656 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_COUNT | 657 MLX5_FLOW_CONTEXT_ACTION_ALLOW; 658 659 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 660 dest.counter_id = mlx5_fc_id(tx->fc->cnt); 661 fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1); 662 if (IS_ERR_OR_NULL(fte)) { 663 err = PTR_ERR(fte); 664 mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err); 665 goto err_rule; 666 } 667 668 tx->status.rule = fte; 669 return 0; 670 671 err_rule: 672 return err; 673 } 674 675 static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) { 676 if (!tx->roce.ft) 677 return; 678 679 mlx5_del_flow_rules(&tx->roce.rule); 680 mlx5_destroy_flow_group(tx->roce.g); 681 mlx5_destroy_flow_table(tx->roce.ft); 682 tx->roce.ft = NULL; 683 } 684 685 /* IPsec TX flow steering */ 686 static void tx_destroy(struct mlx5e_ipsec_tx *tx) 687 { 688 tx_destroy_roce(tx); 689 if (tx->chains) { 690 ipsec_chains_destroy(tx->chains); 691 } else { 692 mlx5_del_flow_rules(&tx->pol.rule); 693 mlx5_destroy_flow_group(tx->pol.group); 694 mlx5_destroy_flow_table(tx->ft.pol); 695 } 696 mlx5_destroy_flow_table(tx->ft.sa); 697 mlx5_del_flow_rules(&tx->kspi_miss.rule); 698 mlx5_destroy_flow_group(tx->kspi_miss.group); 699 mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule); 700 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); 701 mlx5_destroy_flow_table(tx->ft.sa_kspi); 702 mlx5_del_flow_rules(&tx->status.rule); 703 mlx5_destroy_flow_table(tx->ft.status); 704 } 705 706 static int ipsec_tx_roce_rule_setup(struct mlx5_core_dev *mdev, 707 struct mlx5e_ipsec_tx *tx) 708 { 709 struct mlx5_flow_destination dst = {}; 710 struct mlx5_flow_act flow_act = {}; 711 struct mlx5_flow_handle *rule; 712 int err = 0; 713 714 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 715 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; 716 dst.ft = tx->ft.pol; 717 rule = mlx5_add_flow_rules(tx->roce.ft, NULL, &flow_act, &dst, 1); 718 if (IS_ERR(rule)) { 719 err = PTR_ERR(rule); 720 mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n", 721 err); 722 goto out; 723 } 724 tx->roce.rule = rule; 725 726 out: 727 return err; 728 } 729 730 static int ipsec_tx_create_roce(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) 731 { 732 struct mlx5_flow_table_attr ft_attr = {}; 733 struct mlx5_flow_table *ft; 734 struct mlx5_flow_group *g; 735 int ix = 0; 736 int err; 737 u32 *in; 738 739 if (!tx->roce.ns) 740 return -EOPNOTSUPP; 741 742 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); 743 if (!in) 744 return -ENOMEM; 745 746 ft_attr.max_fte = 1; 747 ft = mlx5_create_flow_table(tx->roce.ns, &ft_attr); 748 if (IS_ERR(ft)) { 749 err = PTR_ERR(ft); 750 mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n", 751 err); 752 goto fail_table; 753 } 754 tx->roce.ft = ft; 755 756 MLX5_SET_CFG(in, start_flow_index, ix); 757 ix += 1; 758 MLX5_SET_CFG(in, end_flow_index, ix - 1); 759 g = mlx5_create_flow_group(ft, in); 760 if (IS_ERR(g)) { 761 err = PTR_ERR(g); 762 mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n", 763 err); 764 goto fail_group; 765 } 766 tx->roce.g = g; 767 768 err = ipsec_tx_roce_rule_setup(mdev, tx); 769 if (err) { 770 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err); 771 goto fail_rule; 772 } 773 774 kvfree(in); 775 return 0; 776 777 fail_rule: 778 mlx5_destroy_flow_group(tx->roce.g); 779 fail_group: 780 mlx5_destroy_flow_table(tx->roce.ft); 781 tx->roce.ft = NULL; 782 fail_table: 783 kvfree(in); 784 return err; 785 } 786 787 /* 788 * Setting a rule in KSPI table for values that should bypass IPSEC. 789 * 790 * mdev - mlx5 core device 791 * tx - IPSEC TX 792 * return - 0 for success errno for failure 793 */ 794 static int tx_create_kspi_bypass_rules(struct mlx5_core_dev *mdev, 795 struct mlx5e_ipsec_tx *tx) 796 { 797 struct mlx5_flow_destination dest = {}; 798 struct mlx5_flow_act flow_act = {}; 799 struct mlx5_flow_act flow_act_kspi = {}; 800 struct mlx5_flow_handle *rule; 801 struct mlx5_flow_spec *spec; 802 int err; 803 804 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 805 if (!spec) 806 return -ENOMEM; 807 808 dest.ft = tx->ft.status; 809 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 810 flow_act_kspi.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 811 812 setup_fte_reg_a_with_tag(spec, IPSEC_ACCEL_DRV_SPI_BYPASS); 813 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act_kspi, 814 &dest, 1); 815 if (IS_ERR(rule)) { 816 err = PTR_ERR(rule); 817 mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", 818 err); 819 goto err_add_kspi_rule; 820 } 821 tx->kspi_bypass_rule.kspi_rule = rule; 822 823 /* set the rule for packets withoiut ipsec tag. */ 824 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 825 memset(spec, 0, sizeof(*spec)); 826 setup_fte_reg_a_no_tag(spec); 827 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act, &dest, 1); 828 if (IS_ERR(rule)) { 829 err = PTR_ERR(rule); 830 mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", err); 831 goto err_add_rule; 832 } 833 tx->kspi_bypass_rule.rule = rule; 834 835 kvfree(spec); 836 return 0; 837 err_add_rule: 838 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); 839 err_add_kspi_rule: 840 kvfree(spec); 841 return err; 842 } 843 844 845 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) 846 { 847 struct mlx5_flow_destination dest = {}; 848 struct mlx5_flow_table *ft; 849 int err; 850 851 /* 852 * Tx flow is different for ethernet traffic then for RoCE packets 853 * For Ethernet packets we start in SA KSPI table that matches KSPI of SA rule 854 * to the KSPI in the packet metadata 855 * For RoCE traffic we start in Policy table, then move to SA table 856 * which matches either reqid of the SA rule to reqid reported by policy table 857 * or ip header fields of SA to the packet IP header fields. 858 * Tables are ordered by their level so we set kspi 859 * with level 0 to have it first one for ethernet traffic. 860 * For RoCE the RoCE TX table direct the packets to policy table explicitly 861 */ 862 ft = ipsec_tx_ft_create(tx->ns, 0, 0, 4); 863 if (IS_ERR(ft)) 864 return PTR_ERR(ft); 865 tx->ft.sa_kspi = ft; 866 867 ft = ipsec_tx_ft_create(tx->ns, 2, 0, 4); 868 if (IS_ERR(ft)) { 869 err = PTR_ERR(ft); 870 goto err_reqid_ft; 871 } 872 tx->ft.sa = ft; 873 874 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { 875 tx->chains = ipsec_chains_create( 876 mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 1, 877 &tx->ft.pol); 878 if (IS_ERR(tx->chains)) { 879 err = PTR_ERR(tx->chains); 880 goto err_pol_ft; 881 } 882 } else { 883 ft = ipsec_tx_ft_create(tx->ns, 1, 0, 2); 884 if (IS_ERR(ft)) { 885 err = PTR_ERR(ft); 886 goto err_pol_ft; 887 } 888 tx->ft.pol = ft; 889 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 890 dest.ft = tx->ft.sa; 891 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest); 892 if (err) 893 goto err_pol_miss; 894 } 895 896 ft = ipsec_tx_ft_create(tx->ns, 2, 0, 1); 897 if (IS_ERR(ft)) { 898 err = PTR_ERR(ft); 899 goto err_status_ft; 900 } 901 tx->ft.status = ft; 902 903 /* set miss rule for kspi table with drop action*/ 904 err = ipsec_miss_create(mdev, tx->ft.sa_kspi, &tx->kspi_miss, NULL); 905 if (err) 906 goto err_kspi_miss; 907 908 err = tx_create_kspi_bypass_rules(mdev, tx); 909 if (err) 910 goto err_kspi_rule; 911 912 err = ipsec_counter_rule_tx(mdev, tx); 913 if (err) 914 goto err_status_rule; 915 916 err = ipsec_tx_create_roce(mdev, tx); 917 if (err) 918 goto err_counter_rule; 919 920 return 0; 921 922 err_counter_rule: 923 mlx5_del_flow_rules(&tx->status.rule); 924 err_status_rule: 925 mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule); 926 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); 927 err_kspi_rule: 928 mlx5_destroy_flow_table(tx->ft.status); 929 err_status_ft: 930 if (tx->chains) { 931 ipsec_chains_destroy(tx->chains); 932 } else { 933 mlx5_del_flow_rules(&tx->pol.rule); 934 mlx5_destroy_flow_group(tx->pol.group); 935 } 936 err_pol_miss: 937 if (!tx->chains) 938 mlx5_destroy_flow_table(tx->ft.pol); 939 err_pol_ft: 940 mlx5_del_flow_rules(&tx->kspi_miss.rule); 941 mlx5_destroy_flow_group(tx->kspi_miss.group); 942 err_kspi_miss: 943 mlx5_destroy_flow_table(tx->ft.sa); 944 err_reqid_ft: 945 mlx5_destroy_flow_table(tx->ft.sa_kspi); 946 return err; 947 } 948 949 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, 950 struct mlx5e_ipsec_tx *tx) 951 { 952 int err; 953 954 if (tx->ft.refcnt) 955 goto skip; 956 957 err = tx_create(mdev, tx); 958 if (err) 959 return err; 960 961 skip: 962 tx->ft.refcnt++; 963 return 0; 964 } 965 966 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx) 967 { 968 if (--tx->ft.refcnt) 969 return; 970 971 tx_destroy(tx); 972 } 973 974 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev, 975 struct mlx5e_ipsec *ipsec) 976 { 977 struct mlx5e_ipsec_tx *tx = ipsec->tx; 978 int err; 979 980 mutex_lock(&tx->ft.mutex); 981 err = tx_get(mdev, ipsec, tx); 982 mutex_unlock(&tx->ft.mutex); 983 if (err) 984 return ERR_PTR(err); 985 986 return tx; 987 } 988 989 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev, 990 struct mlx5e_ipsec *ipsec, 991 u32 prio) 992 { 993 struct mlx5e_ipsec_tx *tx = ipsec->tx; 994 struct mlx5_flow_table *ft; 995 int err; 996 997 mutex_lock(&tx->ft.mutex); 998 err = tx_get(mdev, ipsec, tx); 999 if (err) 1000 goto err_get; 1001 1002 ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol; 1003 if (IS_ERR(ft)) { 1004 err = PTR_ERR(ft); 1005 goto err_get_ft; 1006 } 1007 1008 mutex_unlock(&tx->ft.mutex); 1009 return ft; 1010 1011 err_get_ft: 1012 tx_put(ipsec, tx); 1013 err_get: 1014 mutex_unlock(&tx->ft.mutex); 1015 return ERR_PTR(err); 1016 } 1017 1018 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio) 1019 { 1020 struct mlx5e_ipsec_tx *tx = ipsec->tx; 1021 1022 mutex_lock(&tx->ft.mutex); 1023 if (tx->chains) 1024 ipsec_chains_put_table(tx->chains, prio); 1025 1026 tx_put(ipsec, tx); 1027 mutex_unlock(&tx->ft.mutex); 1028 } 1029 1030 static void tx_ft_put(struct mlx5e_ipsec *ipsec) 1031 { 1032 struct mlx5e_ipsec_tx *tx = ipsec->tx; 1033 1034 mutex_lock(&tx->ft.mutex); 1035 tx_put(ipsec, tx); 1036 mutex_unlock(&tx->ft.mutex); 1037 } 1038 1039 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec, 1040 u16 kspi) 1041 { 1042 /* Add IPsec indicator in metadata_reg_a. */ 1043 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 1044 1045 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 1046 misc_parameters_2.metadata_reg_a); 1047 MLX5_SET(fte_match_param, spec->match_value, 1048 misc_parameters_2.metadata_reg_a, 1049 MLX5_ETH_WQE_FT_META_IPSEC << 23 | kspi); 1050 } 1051 1052 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec) 1053 { 1054 /* Add IPsec indicator in metadata_reg_a. */ 1055 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 1056 1057 MLX5_SET(fte_match_param, spec->match_criteria, 1058 misc_parameters_2.metadata_reg_a, 1059 MLX5_ETH_WQE_FT_META_IPSEC << 23); 1060 MLX5_SET(fte_match_param, spec->match_value, 1061 misc_parameters_2.metadata_reg_a, 1062 0); 1063 } 1064 1065 static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid) 1066 { 1067 /* Pass policy check before choosing this SA */ 1068 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 1069 1070 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 1071 misc_parameters_2.metadata_reg_c_0); 1072 MLX5_SET(fte_match_param, spec->match_value, 1073 misc_parameters_2.metadata_reg_c_0, reqid); 1074 } 1075 1076 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec) 1077 { 1078 switch (upspec->proto) { 1079 case IPPROTO_UDP: 1080 if (upspec->dport) { 1081 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1082 spec->match_criteria, udp_dport); 1083 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1084 udp_dport, upspec->dport); 1085 } 1086 1087 if (upspec->sport) { 1088 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1089 spec->match_criteria, udp_sport); 1090 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1091 udp_dport, upspec->sport); 1092 } 1093 break; 1094 case IPPROTO_TCP: 1095 if (upspec->dport) { 1096 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1097 spec->match_criteria, tcp_dport); 1098 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1099 tcp_dport, upspec->dport); 1100 } 1101 1102 if (upspec->sport) { 1103 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1104 spec->match_criteria, tcp_sport); 1105 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1106 tcp_dport, upspec->sport); 1107 } 1108 break; 1109 default: 1110 return; 1111 } 1112 1113 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1114 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol); 1115 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto); 1116 } 1117 1118 static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry, 1119 struct mlx5e_ipsec_tx *tx, 1120 struct mlx5_flow_act *flow_act, 1121 struct mlx5_flow_destination *dest, 1122 int num_dest) 1123 { 1124 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1125 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1126 struct mlx5_flow_handle *rule; 1127 struct mlx5_flow_spec *spec; 1128 int err; 1129 1130 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1131 if (!spec) 1132 return -ENOMEM; 1133 1134 setup_fte_no_frags(spec); 1135 setup_fte_reg_a_with_tag(spec, sa_entry->kspi); 1136 1137 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest); 1138 if (IS_ERR(rule)) { 1139 err = PTR_ERR(rule); 1140 mlx5_core_err(mdev, "fail to add TX ipsec kspi rule err=%d\n", err); 1141 goto err_add_kspi_flow; 1142 } 1143 ipsec_rule->kspi_rule = rule; 1144 kvfree(spec); 1145 return 0; 1146 1147 err_add_kspi_flow: 1148 kvfree(spec); 1149 return err; 1150 } 1151 1152 static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry, 1153 struct mlx5e_ipsec_tx *tx, 1154 struct mlx5_flow_act *flow_act, 1155 struct mlx5_flow_destination *dest, 1156 int num_dest) 1157 { 1158 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1159 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 1160 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1161 struct mlx5_flow_handle *rule; 1162 struct mlx5_flow_spec *spec; 1163 int err; 1164 1165 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1166 if (!spec) 1167 return -ENOMEM; 1168 1169 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 1170 1171 if(attrs->reqid) { 1172 setup_fte_no_frags(spec); 1173 setup_fte_reg_c0(spec, attrs->reqid); 1174 rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest); 1175 if (IS_ERR(rule)) { 1176 err = PTR_ERR(rule); 1177 mlx5_core_err(mdev, "fail to add TX ipsec reqid rule err=%d\n", err); 1178 goto err_add_reqid_rule; 1179 } 1180 ipsec_rule->reqid_rule = rule; 1181 memset(spec, 0, sizeof(*spec)); 1182 } 1183 1184 if (attrs->family == AF_INET) 1185 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 1186 else 1187 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 1188 setup_fte_no_frags(spec); 1189 1190 rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest); 1191 if (IS_ERR(rule)) { 1192 err = PTR_ERR(rule); 1193 mlx5_core_err(mdev, "fail to add TX ipsec ip rule err=%d\n", err); 1194 goto err_add_ip_rule; 1195 } 1196 ipsec_rule->rule = rule; 1197 kvfree(spec); 1198 return 0; 1199 1200 err_add_ip_rule: 1201 mlx5_del_flow_rules(&ipsec_rule->reqid_rule); 1202 err_add_reqid_rule: 1203 kvfree(spec); 1204 return err; 1205 } 1206 1207 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 1208 { 1209 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1210 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 1211 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1212 struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 1213 struct mlx5_flow_destination dest[2] = {}; 1214 struct mlx5_flow_act flow_act = {}; 1215 struct mlx5e_ipsec_tx *tx; 1216 struct mlx5_fc *counter; 1217 int err; 1218 1219 tx = tx_ft_get(mdev, ipsec); 1220 if (IS_ERR(tx)) 1221 return PTR_ERR(tx); 1222 1223 err = setup_pkt_reformat(mdev, attrs, &flow_act); 1224 if (err) 1225 goto err_pkt_reformat; 1226 1227 counter = mlx5_fc_create(mdev, false); 1228 if (IS_ERR(counter)) { 1229 err = PTR_ERR(counter); 1230 goto err_add_cnt; 1231 } 1232 1233 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; 1234 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; 1235 flow_act.flags |= FLOW_ACT_NO_APPEND; 1236 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | 1237 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1238 1239 if (attrs->drop) 1240 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 1241 else 1242 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1243 1244 dest[0].ft = tx->ft.status; 1245 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1246 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1247 dest[1].counter_id = mlx5_fc_id(counter); 1248 1249 err = tx_add_kspi_rule(sa_entry, tx, &flow_act, dest, 2); 1250 if (err) { 1251 goto err_add_kspi_rule; 1252 } 1253 1254 err = tx_add_reqid_ip_rules(sa_entry, tx, &flow_act, dest, 2); 1255 if (err) { 1256 goto err_add_reqid_ip_rule; 1257 } 1258 1259 ipsec_rule->fc = counter; 1260 ipsec_rule->pkt_reformat = flow_act.pkt_reformat; 1261 return 0; 1262 1263 err_add_reqid_ip_rule: 1264 mlx5_del_flow_rules(&ipsec_rule->kspi_rule); 1265 err_add_kspi_rule: 1266 mlx5_fc_destroy(mdev, counter); 1267 err_add_cnt: 1268 if (flow_act.pkt_reformat) 1269 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); 1270 err_pkt_reformat: 1271 tx_ft_put(ipsec); 1272 return err; 1273 } 1274 1275 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) 1276 { 1277 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; 1278 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 1279 struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx; 1280 struct mlx5_flow_destination dest[2] = {}; 1281 struct mlx5_flow_act flow_act = {}; 1282 struct mlx5_flow_handle *rule; 1283 struct mlx5_flow_spec *spec; 1284 struct mlx5_flow_table *ft; 1285 int err, dstn = 0; 1286 1287 ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio); 1288 if (IS_ERR(ft)) 1289 return PTR_ERR(ft); 1290 1291 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1292 if (!spec) { 1293 err = -ENOMEM; 1294 goto err_alloc; 1295 } 1296 1297 if (attrs->family == AF_INET) 1298 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 1299 else 1300 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 1301 1302 setup_fte_no_frags(spec); 1303 setup_fte_upper_proto_match(spec, &attrs->upspec); 1304 1305 switch (attrs->action) { 1306 case IPSEC_POLICY_IPSEC: 1307 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1308 err = setup_modify_header(mdev, attrs->reqid, 1309 IPSEC_DIR_OUTBOUND, &flow_act); 1310 if (err) 1311 goto err_mod_header; 1312 break; 1313 case IPSEC_POLICY_DISCARD: 1314 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 1315 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1316 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1317 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop); 1318 dstn++; 1319 break; 1320 default: 1321 err = -EINVAL; 1322 goto err_mod_header; 1323 } 1324 1325 flow_act.flags |= FLOW_ACT_NO_APPEND; 1326 dest[dstn].ft = tx->ft.sa; 1327 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1328 dstn++; 1329 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn); 1330 if (IS_ERR(rule)) { 1331 err = PTR_ERR(rule); 1332 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err); 1333 goto err_action; 1334 } 1335 1336 kvfree(spec); 1337 pol_entry->ipsec_rule.rule = rule; 1338 pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr; 1339 return 0; 1340 1341 err_action: 1342 if (flow_act.modify_hdr) 1343 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); 1344 err_mod_header: 1345 kvfree(spec); 1346 err_alloc: 1347 tx_ft_put_policy(pol_entry->ipsec, attrs->prio); 1348 return err; 1349 } 1350 1351 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) 1352 { 1353 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; 1354 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 1355 struct mlx5e_ipsec *ipsec = pol_entry->ipsec; 1356 struct mlx5_flow_destination dest[2]; 1357 struct mlx5_flow_act flow_act = {}; 1358 struct mlx5_flow_handle *rule; 1359 struct mlx5_flow_spec *spec; 1360 struct mlx5_flow_table *ft; 1361 struct mlx5e_ipsec_rx *rx; 1362 int err, dstn = 0; 1363 1364 rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6; 1365 ft = rx->chains ? ipsec_chains_get_table(rx->chains, attrs->prio) : rx->ft.pol; 1366 if (IS_ERR(ft)) 1367 return PTR_ERR(ft); 1368 1369 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1370 if (!spec) { 1371 err = -ENOMEM; 1372 goto err_alloc; 1373 } 1374 1375 switch (attrs->action) { 1376 case IPSEC_POLICY_IPSEC: 1377 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1378 break; 1379 case IPSEC_POLICY_DISCARD: 1380 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; 1381 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1382 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop); 1383 dstn++; 1384 break; 1385 default: 1386 err = -EINVAL; 1387 goto err_action; 1388 } 1389 1390 flow_act.flags |= FLOW_ACT_NO_APPEND; 1391 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1392 dest[dstn].ft = rx->ft.sa; 1393 dstn++; 1394 1395 if (attrs->family == AF_INET) 1396 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 1397 else 1398 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 1399 1400 setup_fte_no_frags(spec); 1401 setup_fte_upper_proto_match(spec, &attrs->upspec); 1402 if (attrs->vid != VLAN_NONE) 1403 setup_fte_vid(spec, attrs->vid); 1404 else 1405 setup_fte_no_vid(spec); 1406 1407 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn); 1408 if (IS_ERR(rule)) { 1409 err = PTR_ERR(rule); 1410 mlx5_core_err(mdev, 1411 "Failed to add RX IPsec policy rule err=%d\n", err); 1412 goto err_action; 1413 } 1414 pol_entry->ipsec_rule.rule = rule; 1415 1416 /* Add also rule for zero vid */ 1417 if (attrs->vid == VLAN_NONE) { 1418 clear_fte_vid(spec); 1419 setup_fte_vid(spec, 0); 1420 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn); 1421 if (IS_ERR(rule)) { 1422 err = PTR_ERR(rule); 1423 mlx5_core_err(mdev, 1424 "Failed to add RX IPsec policy rule err=%d\n", 1425 err); 1426 goto err_action; 1427 } 1428 pol_entry->ipsec_rule.vid_zero_rule = rule; 1429 } 1430 1431 kvfree(spec); 1432 return 0; 1433 1434 err_action: 1435 if (pol_entry->ipsec_rule.rule != NULL) 1436 mlx5_del_flow_rules(&pol_entry->ipsec_rule.rule); 1437 kvfree(spec); 1438 err_alloc: 1439 if (rx->chains != NULL) 1440 ipsec_chains_put_table(rx->chains, attrs->prio); 1441 return err; 1442 } 1443 1444 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec) 1445 { 1446 struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; 1447 struct mlx5_core_dev *mdev = ipsec->mdev; 1448 struct mlx5e_ipsec_tx *tx = ipsec->tx; 1449 1450 mlx5_fc_destroy(mdev, rx_ipv4->fc->drop); 1451 mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); 1452 kfree(rx_ipv4->fc); 1453 mlx5_fc_destroy(mdev, tx->fc->drop); 1454 mlx5_fc_destroy(mdev, tx->fc->cnt); 1455 kfree(tx->fc); 1456 } 1457 1458 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec) 1459 { 1460 struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; 1461 struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6; 1462 struct mlx5_core_dev *mdev = ipsec->mdev; 1463 struct mlx5e_ipsec_tx *tx = ipsec->tx; 1464 struct mlx5e_ipsec_fc *fc; 1465 struct mlx5_fc *counter; 1466 int err; 1467 1468 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); 1469 if (!fc) 1470 return -ENOMEM; 1471 1472 tx->fc = fc; 1473 counter = mlx5_fc_create(mdev, false); 1474 if (IS_ERR(counter)) { 1475 err = PTR_ERR(counter); 1476 goto err_tx_fc_alloc; 1477 } 1478 1479 fc->cnt = counter; 1480 counter = mlx5_fc_create(mdev, false); 1481 if (IS_ERR(counter)) { 1482 err = PTR_ERR(counter); 1483 goto err_tx_fc_cnt; 1484 } 1485 1486 fc->drop = counter; 1487 1488 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); 1489 if (!fc) { 1490 err = -ENOMEM; 1491 goto err_tx_fc_drop; 1492 } 1493 1494 /* Both IPv4 and IPv6 point to same flow counters struct. */ 1495 rx_ipv4->fc = fc; 1496 rx_ipv6->fc = fc; 1497 counter = mlx5_fc_create(mdev, false); 1498 if (IS_ERR(counter)) { 1499 err = PTR_ERR(counter); 1500 goto err_rx_fc_alloc; 1501 } 1502 1503 fc->cnt = counter; 1504 counter = mlx5_fc_create(mdev, false); 1505 if (IS_ERR(counter)) { 1506 err = PTR_ERR(counter); 1507 goto err_rx_fc_cnt; 1508 } 1509 1510 fc->drop = counter; 1511 return 0; 1512 1513 err_rx_fc_cnt: 1514 mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); 1515 err_rx_fc_alloc: 1516 kfree(rx_ipv4->fc); 1517 err_tx_fc_drop: 1518 mlx5_fc_destroy(mdev, tx->fc->drop); 1519 err_tx_fc_cnt: 1520 mlx5_fc_destroy(mdev, tx->fc->cnt); 1521 err_tx_fc_alloc: 1522 kfree(tx->fc); 1523 return err; 1524 } 1525 1526 static int ipsec_status_rule(struct mlx5_core_dev *mdev, 1527 struct mlx5e_ipsec_rx *rx, 1528 struct mlx5_flow_destination *dest) 1529 { 1530 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 1531 struct mlx5_flow_act flow_act = {}; 1532 struct mlx5_modify_hdr *modify_hdr; 1533 struct mlx5_flow_handle *rule; 1534 struct mlx5_flow_spec *spec; 1535 int err; 1536 1537 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1538 if (!spec) 1539 return -ENOMEM; 1540 1541 /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */ 1542 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); 1543 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME); 1544 MLX5_SET(copy_action_in, action, src_offset, 0); 1545 MLX5_SET(copy_action_in, action, length, 7); 1546 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 1547 MLX5_SET(copy_action_in, action, dst_offset, 24); 1548 1549 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1550 1, action); 1551 1552 if (IS_ERR(modify_hdr)) { 1553 err = PTR_ERR(modify_hdr); 1554 mlx5_core_err(mdev, 1555 "fail to alloc ipsec copy modify_header_id err=%d\n", err); 1556 goto out_spec; 1557 } 1558 1559 /* create fte */ 1560 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | 1561 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1562 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1563 flow_act.modify_hdr = modify_hdr; 1564 1565 rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); 1566 if (IS_ERR(rule)) { 1567 err = PTR_ERR(rule); 1568 mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err); 1569 goto out; 1570 } 1571 1572 kvfree(spec); 1573 rx->status.rule = rule; 1574 rx->status.modify_hdr = modify_hdr; 1575 return 0; 1576 1577 out: 1578 mlx5_modify_header_dealloc(mdev, modify_hdr); 1579 out_spec: 1580 kvfree(spec); 1581 return err; 1582 } 1583 1584 static void ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce *rx_roce) 1585 { 1586 if (!rx_roce->ns_rdma) 1587 return; 1588 1589 mlx5_del_flow_rules(&rx_roce->roce_miss.rule); 1590 mlx5_del_flow_rules(&rx_roce->rule); 1591 mlx5_destroy_flow_group(rx_roce->roce_miss.group); 1592 mlx5_destroy_flow_group(rx_roce->g); 1593 } 1594 1595 static void ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx) 1596 { 1597 mutex_lock(&rx->ft.mutex); 1598 mlx5_del_flow_rules(&rx->sa.rule); 1599 mlx5_destroy_flow_group(rx->sa.group); 1600 if (rx->chains == NULL) { 1601 mlx5_del_flow_rules(&rx->pol.rule); 1602 mlx5_destroy_flow_group(rx->pol.group); 1603 } 1604 mlx5_del_flow_rules(&rx->status.rule); 1605 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 1606 ipsec_fs_rx_roce_rules_destroy(&rx->roce); 1607 mutex_unlock(&rx->ft.mutex); 1608 } 1609 1610 static void ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce *rx_roce) 1611 { 1612 if (!rx_roce->ns_rdma) 1613 return; 1614 1615 mlx5_destroy_flow_table(rx_roce->ft_rdma); 1616 mlx5_destroy_flow_table(rx_roce->ft); 1617 } 1618 1619 static void 1620 ipsec_fs_rx_ip_type_catchall_rule_destroy(struct mlx5e_ipsec_rx_ip_type* rx_ip_type) 1621 { 1622 mlx5_del_flow_rules(&rx_ip_type->ipv4_rule); 1623 mlx5_del_flow_rules(&rx_ip_type->ipv6_rule); 1624 mlx5_del_flow_rules(&rx_ip_type->miss.rule); 1625 mlx5_destroy_flow_group(rx_ip_type->miss.group); 1626 rx_ip_type->miss.group = NULL; 1627 } 1628 1629 static void ipsec_fs_rx_table_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx) 1630 { 1631 if (rx->chains) { 1632 ipsec_chains_destroy(rx->chains); 1633 } else { 1634 mlx5_del_flow_rules(&rx->pol.rule); 1635 mlx5_destroy_flow_table(rx->ft.pol); 1636 } 1637 mlx5_destroy_flow_table(rx->ft.sa); 1638 mlx5_destroy_flow_table(rx->ft.status); 1639 ipsec_fs_rx_roce_table_destroy(&rx->roce); 1640 } 1641 1642 static void ipsec_roce_setup_udp_dport(struct mlx5_flow_spec *spec, u16 dport) 1643 { 1644 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1645 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 1646 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP); 1647 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); 1648 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport); 1649 } 1650 1651 static int ipsec_roce_rx_rule_setup(struct mlx5_flow_destination *default_dst, 1652 struct mlx5e_ipsec_rx_roce *roce, struct mlx5_core_dev *mdev) 1653 { 1654 struct mlx5_flow_destination dst = {}; 1655 struct mlx5_flow_act flow_act = {}; 1656 struct mlx5_flow_handle *rule; 1657 struct mlx5_flow_spec *spec; 1658 int err = 0; 1659 1660 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1661 if (!spec) 1662 return -ENOMEM; 1663 1664 ipsec_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT); 1665 1666 //flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;//not needed it is added in command 1667 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; 1668 dst.ft = roce->ft_rdma; 1669 1670 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1671 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); 1672 if (IS_ERR(rule)) { 1673 err = PTR_ERR(rule); 1674 mlx5_core_err(mdev, "Fail to add RX roce ipsec rule err=%d\n", 1675 err); 1676 goto fail_add_rule; 1677 } 1678 1679 roce->rule = rule; 1680 1681 rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, default_dst, 1); 1682 if (IS_ERR(rule)) { 1683 err = PTR_ERR(rule); 1684 mlx5_core_err(mdev, "Fail to add RX roce ipsec miss rule err=%d\n", 1685 err); 1686 goto fail_add_default_rule; 1687 } 1688 1689 roce->roce_miss.rule = rule; 1690 1691 kvfree(spec); 1692 return 0; 1693 1694 fail_add_default_rule: 1695 mlx5_del_flow_rules(&roce->rule); 1696 fail_add_rule: 1697 kvfree(spec); 1698 return err; 1699 } 1700 1701 static int ipsec_roce_rx_rules(struct mlx5e_ipsec_rx *rx, struct mlx5_flow_destination *defdst, 1702 struct mlx5_core_dev *mdev) 1703 { 1704 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1705 struct mlx5_flow_group *g; 1706 void *outer_headers_c; 1707 u32 *in; 1708 int err = 0; 1709 int ix = 0; 1710 u8 *mc; 1711 1712 if (!rx->roce.ns_rdma) 1713 return 0; 1714 1715 in = kvzalloc(inlen, GFP_KERNEL); 1716 if (!in) 1717 return -ENOMEM; 1718 1719 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1720 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); 1721 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); 1722 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); 1723 1724 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1725 MLX5_SET_CFG(in, start_flow_index, ix); 1726 ix += 1; 1727 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1728 g = mlx5_create_flow_group(rx->roce.ft, in); 1729 if (IS_ERR(g)) { 1730 err = PTR_ERR(g); 1731 mlx5_core_err(mdev, "Fail to create ipsec rx roce group at nic err=%d\n", err); 1732 goto fail_group; 1733 } 1734 rx->roce.g = g; 1735 1736 memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in)); 1737 MLX5_SET_CFG(in, start_flow_index, ix); 1738 ix += 1; 1739 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1740 g = mlx5_create_flow_group(rx->roce.ft, in); 1741 if (IS_ERR(g)) { 1742 err = PTR_ERR(g); 1743 mlx5_core_err(mdev, "Fail to create ipsec rx roce miss group at nic err=%d\n", 1744 err); 1745 goto fail_mgroup; 1746 } 1747 rx->roce.roce_miss.group = g; 1748 1749 err = ipsec_roce_rx_rule_setup(defdst, &rx->roce, mdev); 1750 if (err) 1751 goto fail_setup_rule; 1752 1753 kvfree(in); 1754 return 0; 1755 1756 fail_setup_rule: 1757 mlx5_destroy_flow_group(rx->roce.roce_miss.group); 1758 fail_mgroup: 1759 mlx5_destroy_flow_group(rx->roce.g); 1760 fail_group: 1761 kvfree(in); 1762 return err; 1763 } 1764 1765 static int ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv, 1766 struct mlx5e_ipsec_rx *rx, 1767 struct mlx5_flow_destination *defdst) 1768 { 1769 struct mlx5_core_dev *mdev = priv->mdev; 1770 struct mlx5_flow_destination dest[2] = {}; 1771 int err = 0; 1772 1773 mutex_lock(&rx->ft.mutex); 1774 /* IPsec RoCE RX rules */ 1775 err = ipsec_roce_rx_rules(rx, defdst, mdev); 1776 if (err) 1777 goto out; 1778 1779 /* IPsec Rx IP Status table rule */ 1780 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1781 if (rx->roce.ft) 1782 dest[0].ft = rx->roce.ft; 1783 else 1784 dest[0].ft = priv->fts.vlan.t; 1785 1786 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1787 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); 1788 err = ipsec_status_rule(mdev, rx, dest); 1789 if (err) 1790 goto err_roce_rules_destroy; 1791 1792 if (!rx->chains) { 1793 /* IPsec Rx IP policy default miss rule */ 1794 err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, defdst); 1795 if (err) 1796 goto err_status_rule_destroy; 1797 } 1798 1799 /* FIXME: This is workaround to current design 1800 * which installs SA on firt packet. So we need to forward this 1801 * packet to the stack. It doesn't work with RoCE and eswitch traffic, 1802 */ 1803 err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, defdst); 1804 if (err) 1805 goto err_status_sa_rule_destroy; 1806 1807 mutex_unlock(&rx->ft.mutex); 1808 return 0; 1809 1810 err_status_sa_rule_destroy: 1811 if (!rx->chains) { 1812 mlx5_del_flow_rules(&rx->pol.rule); 1813 mlx5_destroy_flow_group(rx->pol.group); 1814 } 1815 err_status_rule_destroy: 1816 mlx5_del_flow_rules(&rx->status.rule); 1817 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 1818 err_roce_rules_destroy: 1819 ipsec_fs_rx_roce_rules_destroy(&rx->roce); 1820 out: 1821 mutex_unlock(&rx->ft.mutex); 1822 return err; 1823 } 1824 1825 static int ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx *rx, 1826 int rx_init_level, int rdma_init_level) 1827 { 1828 struct mlx5_flow_table_attr ft_attr = {}; 1829 struct mlx5_flow_table *ft; 1830 int err = 0; 1831 1832 if (!rx->roce.ns_rdma) 1833 return 0; 1834 1835 ft_attr.max_fte = 2; 1836 ft_attr.level = rx_init_level; 1837 ft = mlx5_create_flow_table(rx->ns, &ft_attr); 1838 if (IS_ERR(ft)) { 1839 err = PTR_ERR(ft); 1840 return err; 1841 } 1842 rx->roce.ft = ft; 1843 1844 ft_attr.max_fte = 0; 1845 ft_attr.level = rdma_init_level; 1846 ft = mlx5_create_flow_table(rx->roce.ns_rdma, &ft_attr); 1847 if (IS_ERR(ft)) { 1848 err = PTR_ERR(ft); 1849 goto out; 1850 } 1851 rx->roce.ft_rdma = ft; 1852 1853 return 0; 1854 out: 1855 mlx5_destroy_flow_table(rx->roce.ft); 1856 rx->roce.ft = NULL; 1857 return err; 1858 } 1859 1860 static int 1861 ipsec_fs_rx_ip_type_catchall_rules_create(struct mlx5e_priv *priv, 1862 struct mlx5_flow_destination *defdst) 1863 { 1864 struct mlx5_core_dev *mdev = priv->mdev; 1865 struct mlx5e_ipsec *ipsec = priv->ipsec; 1866 struct mlx5_flow_destination dst = {}; 1867 struct mlx5_flow_act flow_act = {}; 1868 struct mlx5_flow_handle *rule; 1869 struct mlx5_flow_spec *spec; 1870 int err = 0; 1871 1872 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1873 if (!spec) { 1874 return -ENOMEM; 1875 } 1876 dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1877 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1878 1879 /* Set rule for ipv4 packets */ 1880 dst.ft = ipsec->rx_ipv4->ft.pol; 1881 setup_fte_ip_version(spec, AF_INET); 1882 rule = mlx5_add_flow_rules(ipsec->rx_ip_type->ft, spec, &flow_act, &dst, 1); 1883 if (IS_ERR(rule)) { 1884 err = PTR_ERR(rule); 1885 mlx5_core_err(mdev, "Failed to add ipv4 rule to ip_type table err=%d\n", 1886 err); 1887 goto out; 1888 } 1889 ipsec->rx_ip_type->ipv4_rule = rule; 1890 1891 /* Set rule for ipv6 packets */ 1892 dst.ft = ipsec->rx_ipv6->ft.pol; 1893 setup_fte_ip_version(spec, AF_INET6); 1894 rule = mlx5_add_flow_rules(ipsec->rx_ip_type->ft, spec, &flow_act, &dst, 1); 1895 if (IS_ERR(rule)) { 1896 err = PTR_ERR(rule); 1897 mlx5_core_err(mdev, "Failed to add ipv6 rule to ip_type table err=%d\n", 1898 err); 1899 goto fail_add_ipv6_rule; 1900 } 1901 ipsec->rx_ip_type->ipv6_rule = rule; 1902 1903 /* set miss rule */ 1904 err = ipsec_miss_create(mdev, ipsec->rx_ip_type->ft, &ipsec->rx_ip_type->miss, defdst); 1905 if (err) { 1906 mlx5_core_err(mdev, "Failed to add miss rule to ip_type table err=%d\n", 1907 err); 1908 goto fail_miss_rule; 1909 } 1910 1911 goto out; 1912 1913 fail_miss_rule: 1914 mlx5_del_flow_rules(&ipsec->rx_ip_type->ipv6_rule); 1915 fail_add_ipv6_rule: 1916 mlx5_del_flow_rules(&ipsec->rx_ip_type->ipv4_rule); 1917 out: 1918 kvfree(spec); 1919 return err; 1920 } 1921 1922 static int 1923 ipsec_fs_rx_ip_type_table_create(struct mlx5e_priv *priv, 1924 int level) 1925 { 1926 struct mlx5e_ipsec *ipsec = priv->ipsec; 1927 struct mlx5_flow_table *ft; 1928 int err = 0; 1929 1930 /* Create rx ip type table */ 1931 ft = ipsec_rx_ft_create(ipsec->rx_ip_type->ns, level, 0, 1); 1932 if (IS_ERR(ft)) { 1933 err = PTR_ERR(ft); 1934 goto out; 1935 } 1936 ipsec->rx_ip_type->ft = ft; 1937 1938 priv->fts.ipsec_ft = priv->ipsec->rx_ip_type->ft; 1939 1940 out: 1941 return err; 1942 } 1943 1944 static int ipsec_fs_rx_table_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx, 1945 int rx_init_level, int rdma_init_level) 1946 { 1947 struct mlx5_flow_namespace *ns = rx->ns; 1948 struct mlx5_flow_table *ft; 1949 int err = 0; 1950 1951 mutex_lock(&rx->ft.mutex); 1952 1953 /* IPsec Rx IP SA table create */ 1954 ft = ipsec_rx_ft_create(ns, rx_init_level + 1, 0, 1); 1955 if (IS_ERR(ft)) { 1956 err = PTR_ERR(ft); 1957 goto out; 1958 } 1959 rx->ft.sa = ft; 1960 1961 /* IPsec Rx IP Status table create */ 1962 ft = ipsec_rx_ft_create(ns, rx_init_level + 2, 0, 1); 1963 if (IS_ERR(ft)) { 1964 err = PTR_ERR(ft); 1965 goto err_sa_table_destroy; 1966 } 1967 rx->ft.status = ft; 1968 1969 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { 1970 rx->chains = ipsec_chains_create(mdev, rx->ft.sa, 1971 MLX5_FLOW_NAMESPACE_KERNEL, 0, 1972 rx_init_level, &rx->ft.pol); 1973 if (IS_ERR(rx->chains)) { 1974 err = PTR_ERR(rx->chains); 1975 goto err_status_table_destroy; 1976 } 1977 } else { 1978 ft = ipsec_rx_ft_create(ns, rx_init_level, 0, 1); 1979 if (IS_ERR(ft)) { 1980 err = PTR_ERR(ft); 1981 goto err_status_table_destroy; 1982 } 1983 rx->ft.pol = ft; 1984 } 1985 1986 /* IPsec RoCE RX tables create*/ 1987 err = ipsec_fs_rx_roce_tables_create(rx, rx_init_level + 3, 1988 rdma_init_level); 1989 if (err) 1990 goto err_pol_table_destroy; 1991 1992 goto out; 1993 1994 err_pol_table_destroy: 1995 mlx5_destroy_flow_table(rx->ft.pol); 1996 err_status_table_destroy: 1997 mlx5_destroy_flow_table(rx->ft.status); 1998 err_sa_table_destroy: 1999 mlx5_destroy_flow_table(rx->ft.sa); 2000 out: 2001 mutex_unlock(&rx->ft.mutex); 2002 return err; 2003 } 2004 2005 #define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) 2006 2007 static void mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec *ipsec) 2008 { 2009 struct mlx5_core_dev *mdev = ipsec->mdev; 2010 struct mlx5_flow_namespace *ns; 2011 2012 if ((MLX5_CAP_GEN_2(ipsec->mdev, flow_table_type_2_type) & 2013 NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) { 2014 mlx5_core_dbg(mdev, "Failed to init roce ns, capabilities not supported\n"); 2015 return; 2016 } 2017 2018 ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC); 2019 if (!ns) { 2020 mlx5_core_err(mdev, "Failed to init roce rx ns\n"); 2021 return; 2022 } 2023 2024 ipsec->rx_ipv4->roce.ns_rdma = ns; 2025 ipsec->rx_ipv6->roce.ns_rdma = ns; 2026 2027 ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC); 2028 if (!ns) { 2029 ipsec->rx_ipv4->roce.ns_rdma = NULL; 2030 ipsec->rx_ipv6->roce.ns_rdma = NULL; 2031 mlx5_core_err(mdev, "Failed to init roce tx ns\n"); 2032 return; 2033 } 2034 2035 ipsec->tx->roce.ns = ns; 2036 } 2037 2038 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 2039 { 2040 if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) 2041 return tx_add_rule(sa_entry); 2042 2043 return rx_add_rule(sa_entry); 2044 } 2045 2046 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 2047 { 2048 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 2049 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 2050 2051 mlx5_del_flow_rules(&ipsec_rule->rule); 2052 mlx5_del_flow_rules(&ipsec_rule->kspi_rule); 2053 if (ipsec_rule->vid_zero_rule != NULL) 2054 mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule); 2055 if (ipsec_rule->reqid_rule != NULL) 2056 mlx5_del_flow_rules(&ipsec_rule->reqid_rule); 2057 mlx5_fc_destroy(mdev, ipsec_rule->fc); 2058 mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat); 2059 if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) { 2060 tx_ft_put(sa_entry->ipsec); 2061 return; 2062 } 2063 2064 if (ipsec_rule->modify_hdr != NULL) 2065 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); 2066 } 2067 2068 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry) 2069 { 2070 if (pol_entry->attrs.dir == IPSEC_DIR_OUTBOUND) 2071 return tx_add_policy(pol_entry); 2072 2073 return rx_add_policy(pol_entry); 2074 } 2075 2076 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry) 2077 { 2078 struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule; 2079 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 2080 2081 mlx5_del_flow_rules(&ipsec_rule->rule); 2082 if (ipsec_rule->vid_zero_rule != NULL) 2083 mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule); 2084 2085 if (pol_entry->attrs.dir == IPSEC_DIR_INBOUND) { 2086 struct mlx5e_ipsec_rx *rx; 2087 2088 rx = (pol_entry->attrs.family == AF_INET) 2089 ? pol_entry->ipsec->rx_ipv4 2090 : pol_entry->ipsec->rx_ipv6; 2091 if (rx->chains) 2092 ipsec_chains_put_table(rx->chains, 2093 pol_entry->attrs.prio); 2094 return; 2095 } 2096 2097 if (ipsec_rule->modify_hdr) 2098 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); 2099 2100 tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio); 2101 } 2102 2103 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv) 2104 { 2105 /* Check if IPsec supported */ 2106 if (!priv->ipsec) 2107 return; 2108 2109 ipsec_fs_rx_ip_type_catchall_rule_destroy(priv->ipsec->rx_ip_type); 2110 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4); 2111 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6); 2112 } 2113 2114 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv) 2115 { 2116 struct mlx5e_ipsec *ipsec = priv->ipsec; 2117 struct mlx5_flow_destination dest = {}; 2118 int err = 0; 2119 2120 /* Check if IPsec supported */ 2121 if (!ipsec) 2122 return 0; 2123 2124 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 2125 dest.ft = priv->fts.vlan.t; 2126 err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv6, &dest); 2127 if (err) 2128 goto out; 2129 2130 err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv4, &dest); 2131 if (err) 2132 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6); 2133 2134 err = ipsec_fs_rx_ip_type_catchall_rules_create(priv, &dest); 2135 if (err) { 2136 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6); 2137 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4); 2138 } 2139 2140 out: 2141 return err; 2142 } 2143 2144 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv) 2145 { 2146 struct mlx5_core_dev *mdev = priv->mdev; 2147 struct mlx5e_ipsec *ipsec = priv->ipsec; 2148 2149 /* Check if IPsec supported */ 2150 if (!ipsec) 2151 return; 2152 2153 mlx5_destroy_flow_table(ipsec->rx_ip_type->ft); 2154 ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv6); 2155 ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv4); 2156 } 2157 2158 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv) 2159 { 2160 struct mlx5e_ipsec *ipsec = priv->ipsec; 2161 int err = 0; 2162 2163 /* Check if IPsec supported */ 2164 if (!ipsec) 2165 return 0; 2166 2167 err = ipsec_fs_rx_ip_type_table_create(priv, 0); 2168 if (err) 2169 return err; 2170 2171 err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv4, 1, 0); 2172 if (err) 2173 goto err_ipv4_table; 2174 2175 err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv6, 5, 1); 2176 if (err) 2177 goto err_ipv6_table; 2178 2179 return 0; 2180 2181 err_ipv6_table: 2182 ipsec_fs_rx_table_destroy(priv->mdev, ipsec->rx_ipv4); 2183 err_ipv4_table: 2184 mlx5_destroy_flow_table(ipsec->rx_ip_type->ft); 2185 return err; 2186 } 2187 2188 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) 2189 { 2190 WARN_ON(ipsec->tx->ft.refcnt); 2191 mutex_destroy(&ipsec->rx_ipv6->ft.mutex); 2192 mutex_destroy(&ipsec->rx_ipv4->ft.mutex); 2193 mutex_destroy(&ipsec->tx->ft.mutex); 2194 ipsec_fs_destroy_counters(ipsec); 2195 kfree(ipsec->rx_ip_type); 2196 kfree(ipsec->rx_ipv6); 2197 kfree(ipsec->rx_ipv4); 2198 kfree(ipsec->tx); 2199 } 2200 2201 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) 2202 { 2203 struct mlx5_flow_namespace *tns, *rns; 2204 int err = -ENOMEM; 2205 2206 tns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); 2207 if (!tns) 2208 return -EOPNOTSUPP; 2209 2210 rns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_KERNEL); 2211 if (!rns) 2212 return -EOPNOTSUPP; 2213 2214 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL); 2215 if (!ipsec->tx) 2216 return -ENOMEM; 2217 2218 ipsec->rx_ip_type = kzalloc(sizeof(*ipsec->rx_ip_type), GFP_KERNEL); 2219 if (!ipsec->rx_ip_type) 2220 goto err_tx; 2221 2222 ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL); 2223 if (!ipsec->rx_ipv4) 2224 goto err_ip_type; 2225 2226 ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL); 2227 if (!ipsec->rx_ipv6) 2228 goto err_rx_ipv4; 2229 2230 err = ipsec_fs_init_counters(ipsec); 2231 if (err) 2232 goto err_rx_ipv6; 2233 2234 ipsec->tx->ns = tns; 2235 mutex_init(&ipsec->tx->ft.mutex); 2236 ipsec->rx_ip_type->ns = rns; 2237 ipsec->rx_ipv4->ns = rns; 2238 ipsec->rx_ipv6->ns = rns; 2239 mutex_init(&ipsec->rx_ipv4->ft.mutex); 2240 mutex_init(&ipsec->rx_ipv6->ft.mutex); 2241 2242 mlx5e_accel_ipsec_fs_init_roce(ipsec); 2243 2244 return 0; 2245 2246 err_rx_ipv6: 2247 kfree(ipsec->rx_ipv6); 2248 err_rx_ipv4: 2249 kfree(ipsec->rx_ipv4); 2250 err_ip_type: 2251 kfree(ipsec->rx_ip_type); 2252 err_tx: 2253 kfree(ipsec->tx); 2254 return err; 2255 } 2256 2257 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry) 2258 { 2259 struct mlx5e_ipsec_sa_entry sa_entry_shadow = {}; 2260 int err; 2261 2262 memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry)); 2263 memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule)); 2264 2265 err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow); 2266 if (err) 2267 return; 2268 mlx5e_accel_ipsec_fs_del_rule(sa_entry); 2269 memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry)); 2270 } 2271