1 /*- 2 * Copyright (c) 2023 NVIDIA corporation & affiliates. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include "opt_ipsec.h" 28 29 #include <sys/types.h> 30 #include <netinet/in.h> 31 #include <sys/socket.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <net/pfkeyv2.h> 35 #include <netipsec/key_var.h> 36 #include <netipsec/keydb.h> 37 #include <netipsec/ipsec.h> 38 #include <netipsec/xform.h> 39 #include <netipsec/ipsec_offload.h> 40 #include <dev/mlx5/fs.h> 41 #include <dev/mlx5/mlx5_en/en.h> 42 #include <dev/mlx5/qp.h> 43 #include <dev/mlx5/mlx5_accel/ipsec.h> 44 #include <dev/mlx5/mlx5_core/fs_core.h> 45 #include <dev/mlx5/mlx5_core/fs_chains.h> 46 47 /* 48 * TX tables are organized differently for Ethernet and for RoCE: 49 * 50 * +=========+ 51 * Ethernet Tx | SA KSPI | match 52 * --------------------->|Flowtable|----->+ + 53 * | |\ | / \ 54 * +=========+ | | / \ +=========+ +=========+ 55 * miss | | / \ | Status | | | 56 * DROP<--------+ |---->|Encrypt|------>|Flowtable|---->| TX NS | 57 * | \ / | | | | 58 * | \ / +=========+ +=========+ 59 * +=========+ +=========+ | \ / | 60 * RoCE | Policy | match|SA ReqId |match| + | 61 * Tx |Flowtable|----->|Flowtable|---->+ | 62 * ---->|IP header| |ReqId+IP | | 63 * | | | header |--------------------------------+ 64 * +=========+ +=========+ miss | 65 * | | 66 * | miss | 67 * +------------------------------------------------------- 68 * 69 * +=========+ 70 * | RDMA | 71 * |Flowtable| 72 * | | 73 * Rx Tables and rules: +=========+ 74 * + / 75 * +=========+ +=========+ / \ +=========+ +=========+ /match 76 * | Policy | | SA | / \ | Status | | RoCE |/ 77 * ---->|Flowtable| match|Flowtable| match / \ |Flowtable|----->|Flowtable| 78 * |IP header|----->|IP header|----->|Decrypt|----->| | | Roce V2 | 79 * | | |+ESP+SPI | \ / | | | UDP port|\ 80 * +=========+ +=========+ \ / +=========+ +=========+ \miss 81 * | | \ / \ 82 * | | + +=========+ 83 * | miss | miss | Ethernet| 84 * +--------------->---------------------------------------------------->| RX NS | 85 * | | 86 * +=========+ 87 * 88 */ 89 90 #define NUM_IPSEC_FTE BIT(15) 91 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40 92 93 struct mlx5e_ipsec_fc { 94 struct mlx5_fc *cnt; 95 struct mlx5_fc *drop; 96 }; 97 98 struct mlx5e_ipsec_ft { 99 struct mutex mutex; /* Protect changes to this struct */ 100 struct mlx5_flow_table *pol; 101 struct mlx5_flow_table *sa_kspi; 102 struct mlx5_flow_table *sa; 103 struct mlx5_flow_table *status; 104 u32 refcnt; 105 }; 106 107 struct mlx5e_ipsec_tx_roce { 108 struct mlx5_flow_group *g; 109 struct mlx5_flow_table *ft; 110 struct mlx5_flow_handle *rule; 111 struct mlx5_flow_namespace *ns; 112 }; 113 114 struct mlx5e_ipsec_miss { 115 struct mlx5_flow_group *group; 116 struct mlx5_flow_handle *rule; 117 }; 118 119 struct mlx5e_ipsec_tx { 120 struct mlx5e_ipsec_ft ft; 121 struct mlx5e_ipsec_miss pol; 122 struct mlx5e_ipsec_miss kspi_miss; 123 struct mlx5e_ipsec_rule status; 124 struct mlx5e_ipsec_rule kspi_bypass_rule; /*rule for IPSEC bypass*/ 125 struct mlx5_flow_namespace *ns; 126 struct mlx5e_ipsec_fc *fc; 127 struct mlx5_fs_chains *chains; 128 struct mlx5e_ipsec_tx_roce roce; 129 }; 130 131 struct mlx5e_ipsec_rx_roce { 132 struct mlx5_flow_group *g; 133 struct mlx5_flow_table *ft; 134 struct mlx5_flow_handle *rule; 135 struct mlx5e_ipsec_miss roce_miss; 136 137 struct mlx5_flow_table *ft_rdma; 138 struct mlx5_flow_namespace *ns_rdma; 139 }; 140 141 struct mlx5e_ipsec_rx { 142 struct mlx5e_ipsec_ft ft; 143 struct mlx5e_ipsec_miss pol; 144 struct mlx5e_ipsec_miss sa; 145 struct mlx5e_ipsec_rule status; 146 struct mlx5_flow_namespace *ns; 147 struct mlx5e_ipsec_fc *fc; 148 struct mlx5_fs_chains *chains; 149 struct mlx5e_ipsec_rx_roce roce; 150 }; 151 152 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec, 153 u16 kspi); 154 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec); 155 156 static void setup_fte_no_frags(struct mlx5_flow_spec *spec) 157 { 158 /* Non fragmented */ 159 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 160 161 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag); 162 MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0); 163 } 164 165 static void setup_fte_esp(struct mlx5_flow_spec *spec) 166 { 167 /* ESP header */ 168 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 169 170 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 171 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); 172 } 173 174 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap) 175 { 176 /* SPI number */ 177 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 178 179 if (encap) { 180 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.inner_esp_spi); 181 MLX5_SET(fte_match_param, spec->match_value, misc_parameters.inner_esp_spi, spi); 182 } else { 183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); 184 MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi); 185 } 186 } 187 188 static struct mlx5_fs_chains * 189 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft, 190 enum mlx5_flow_namespace_type ns, int base_prio, 191 int base_level, struct mlx5_flow_table **root_ft) 192 { 193 struct mlx5_chains_attr attr = {}; 194 struct mlx5_fs_chains *chains; 195 struct mlx5_flow_table *ft; 196 int err; 197 198 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | 199 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; 200 attr.max_grp_num = 2; 201 attr.default_ft = miss_ft; 202 attr.ns = ns; 203 attr.fs_base_prio = base_prio; 204 attr.fs_base_level = base_level; 205 chains = mlx5_chains_create(mdev, &attr); 206 if (IS_ERR(chains)) 207 return chains; 208 209 /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */ 210 ft = mlx5_chains_get_table(chains, 0, 1, 0); 211 if (IS_ERR(ft)) { 212 err = PTR_ERR(ft); 213 goto err_chains_get; 214 } 215 216 *root_ft = ft; 217 return chains; 218 219 err_chains_get: 220 mlx5_chains_destroy(chains); 221 return ERR_PTR(err); 222 } 223 224 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains) 225 { 226 mlx5_chains_put_table(chains, 0, 1, 0); 227 mlx5_chains_destroy(chains); 228 } 229 230 static struct mlx5_flow_table * 231 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio) 232 { 233 return mlx5_chains_get_table(chains, 0, prio + 1, 0); 234 } 235 236 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio) 237 { 238 mlx5_chains_put_table(chains, 0, prio + 1, 0); 239 } 240 241 static struct mlx5_flow_table *ipsec_rx_ft_create(struct mlx5_flow_namespace *ns, 242 int level, int prio, 243 int max_num_groups) 244 { 245 struct mlx5_flow_table_attr ft_attr = {}; 246 247 ft_attr.max_fte = NUM_IPSEC_FTE; 248 ft_attr.level = level; 249 ft_attr.prio = prio; 250 ft_attr.autogroup.max_num_groups = max_num_groups; 251 ft_attr.autogroup.num_reserved_entries = 1; 252 253 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 254 } 255 256 static int ipsec_miss_create(struct mlx5_core_dev *mdev, 257 struct mlx5_flow_table *ft, 258 struct mlx5e_ipsec_miss *miss, 259 struct mlx5_flow_destination *dest) 260 { 261 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 262 struct mlx5_flow_act flow_act = {}; 263 struct mlx5_flow_spec *spec; 264 u32 *flow_group_in; 265 int err = 0; 266 267 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 268 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 269 if (!flow_group_in || !spec) { 270 err = -ENOMEM; 271 goto out; 272 } 273 274 /* Create miss_group */ 275 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); 276 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); 277 miss->group = mlx5_create_flow_group(ft, flow_group_in); 278 if (IS_ERR(miss->group)) { 279 err = PTR_ERR(miss->group); 280 mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n", 281 err); 282 goto out; 283 } 284 285 if (dest) 286 flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DEST; 287 else 288 flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DROP; 289 /* Create miss rule */ 290 miss->rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1); 291 if (IS_ERR(miss->rule)) { 292 mlx5_destroy_flow_group(miss->group); 293 err = PTR_ERR(miss->rule); 294 mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n", 295 err); 296 goto out; 297 } 298 out: 299 kvfree(flow_group_in); 300 kvfree(spec); 301 return err; 302 } 303 304 static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir, 305 struct mlx5_flow_act *flow_act) 306 { 307 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 308 enum mlx5_flow_namespace_type ns_type; 309 struct mlx5_modify_hdr *modify_hdr; 310 311 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 312 switch (dir) { 313 case IPSEC_DIR_INBOUND: 314 MLX5_SET(set_action_in, action, field, 315 MLX5_ACTION_IN_FIELD_METADATA_REG_B); 316 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 317 break; 318 case IPSEC_DIR_OUTBOUND: 319 MLX5_SET(set_action_in, action, field, 320 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); 321 ns_type = MLX5_FLOW_NAMESPACE_EGRESS; 322 break; 323 default: 324 return -EINVAL; 325 } 326 327 MLX5_SET(set_action_in, action, data, val); 328 MLX5_SET(set_action_in, action, offset, 0); 329 MLX5_SET(set_action_in, action, length, 32); 330 331 modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action); 332 if (IS_ERR(modify_hdr)) { 333 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n", 334 PTR_ERR(modify_hdr)); 335 return PTR_ERR(modify_hdr); 336 } 337 338 flow_act->modify_hdr = modify_hdr; 339 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 340 return 0; 341 } 342 343 static int 344 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs, 345 struct mlx5_pkt_reformat_params *reformat_params) 346 { 347 struct udphdr *udphdr; 348 size_t bfflen = 16; 349 char *reformatbf; 350 __be32 spi; 351 void *hdr; 352 353 if (attrs->family == AF_INET) 354 if (attrs->encap) 355 reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4; 356 else 357 reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4; 358 else 359 reformat_params->type = 360 MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6; 361 362 if (attrs->encap) 363 bfflen += sizeof(*udphdr); 364 reformatbf = kzalloc(bfflen, GFP_KERNEL); 365 if (!reformatbf) 366 return -ENOMEM; 367 368 hdr = reformatbf; 369 if (attrs->encap) { 370 udphdr = (struct udphdr *)reformatbf; 371 udphdr->uh_sport = attrs->sport; 372 udphdr->uh_dport = attrs->dport; 373 hdr += sizeof(*udphdr); 374 } 375 376 /* convert to network format */ 377 spi = htonl(attrs->spi); 378 memcpy(hdr, &spi, 4); 379 380 reformat_params->param_0 = attrs->authsize; 381 reformat_params->size = bfflen; 382 reformat_params->data = reformatbf; 383 384 return 0; 385 } 386 387 static int setup_pkt_reformat(struct mlx5_core_dev *mdev, 388 struct mlx5_accel_esp_xfrm_attrs *attrs, 389 struct mlx5_flow_act *flow_act) 390 { 391 enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS; 392 struct mlx5_pkt_reformat_params reformat_params = {}; 393 struct mlx5_pkt_reformat *pkt_reformat; 394 int ret; 395 396 if (attrs->dir == IPSEC_DIR_INBOUND) { 397 if (attrs->encap) 398 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP; 399 else 400 reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT; 401 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 402 goto cmd; 403 } 404 405 ret = setup_pkt_transport_reformat(attrs, &reformat_params); 406 if (ret) 407 return ret; 408 cmd: 409 pkt_reformat = 410 mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type); 411 if (reformat_params.data) 412 kfree(reformat_params.data); 413 if (IS_ERR(pkt_reformat)) 414 return PTR_ERR(pkt_reformat); 415 416 flow_act->pkt_reformat = pkt_reformat; 417 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 418 return 0; 419 } 420 421 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr, 422 __be32 *daddr) 423 { 424 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 425 426 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 427 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4); 428 429 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 430 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4); 431 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 432 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4); 433 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 434 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); 435 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 436 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 437 } 438 439 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr, 440 __be32 *daddr) 441 { 442 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 443 444 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); 445 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6); 446 447 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 448 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16); 449 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, 450 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16); 451 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 452 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16); 453 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 454 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16); 455 } 456 457 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 458 { 459 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 460 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 461 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 462 struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 463 struct mlx5_flow_destination dest[2] = {}; 464 struct mlx5_flow_act flow_act = {}; 465 struct mlx5_flow_handle *rule; 466 struct mlx5_flow_spec *spec; 467 struct mlx5e_ipsec_rx *rx; 468 struct mlx5_fc *counter; 469 int err; 470 471 rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6; 472 473 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 474 if (!spec) 475 return -ENOMEM; 476 477 if (attrs->family == AF_INET) 478 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 479 else 480 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 481 482 if (!attrs->encap) 483 setup_fte_esp(spec); 484 485 setup_fte_spi(spec, attrs->spi, attrs->encap); 486 setup_fte_no_frags(spec); 487 488 if (!attrs->drop) { 489 err = setup_modify_header(mdev, sa_entry->kspi | BIT(31), IPSEC_DIR_INBOUND, 490 &flow_act); 491 if (err) 492 goto err_mod_header; 493 } 494 495 err = setup_pkt_reformat(mdev, attrs, &flow_act); 496 if (err) 497 goto err_pkt_reformat; 498 499 counter = mlx5_fc_create(mdev, false); 500 if (IS_ERR(counter)) { 501 err = PTR_ERR(counter); 502 goto err_add_cnt; 503 } 504 505 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; 506 flow_act.crypto.op = MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT; 507 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; 508 flow_act.flags |= FLOW_ACT_NO_APPEND; 509 510 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | 511 MLX5_FLOW_CONTEXT_ACTION_COUNT; 512 513 if (attrs->drop) 514 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 515 else 516 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 517 518 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 519 dest[0].ft = rx->ft.status; 520 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 521 dest[1].counter_id = mlx5_fc_id(counter); 522 523 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2); 524 if (IS_ERR(rule)) { 525 err = PTR_ERR(rule); 526 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); 527 goto err_add_flow; 528 } 529 530 kvfree(spec); 531 ipsec_rule->rule = rule; 532 ipsec_rule->fc = counter; 533 ipsec_rule->modify_hdr = flow_act.modify_hdr; 534 ipsec_rule->pkt_reformat = flow_act.pkt_reformat; 535 return 0; 536 537 err_add_flow: 538 mlx5_fc_destroy(mdev, counter); 539 err_add_cnt: 540 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); 541 err_pkt_reformat: 542 if (flow_act.modify_hdr) 543 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); 544 err_mod_header: 545 kvfree(spec); 546 547 return err; 548 } 549 550 static struct mlx5_flow_table *ipsec_tx_ft_create(struct mlx5_flow_namespace *ns, 551 int level, int prio, 552 int max_num_groups) 553 { 554 struct mlx5_flow_table_attr ft_attr = {}; 555 556 ft_attr.autogroup.num_reserved_entries = 1; 557 ft_attr.autogroup.max_num_groups = max_num_groups; 558 ft_attr.max_fte = NUM_IPSEC_FTE; 559 ft_attr.level = level; 560 ft_attr.prio = prio; 561 562 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 563 } 564 565 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) 566 { 567 struct mlx5_flow_destination dest = {}; 568 struct mlx5_flow_act flow_act = {}; 569 struct mlx5_flow_handle *fte; 570 int err; 571 572 /* create fte */ 573 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_COUNT | 574 MLX5_FLOW_CONTEXT_ACTION_ALLOW; 575 576 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 577 dest.counter_id = mlx5_fc_id(tx->fc->cnt); 578 fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1); 579 if (IS_ERR_OR_NULL(fte)) { 580 err = PTR_ERR(fte); 581 mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err); 582 goto err_rule; 583 } 584 585 tx->status.rule = fte; 586 return 0; 587 588 err_rule: 589 return err; 590 } 591 592 static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) { 593 if (!tx->roce.ft) 594 return; 595 596 mlx5_del_flow_rules(&tx->roce.rule); 597 mlx5_destroy_flow_group(tx->roce.g); 598 mlx5_destroy_flow_table(tx->roce.ft); 599 tx->roce.ft = NULL; 600 } 601 602 /* IPsec TX flow steering */ 603 static void tx_destroy(struct mlx5e_ipsec_tx *tx) 604 { 605 tx_destroy_roce(tx); 606 if (tx->chains) { 607 ipsec_chains_destroy(tx->chains); 608 } else { 609 mlx5_del_flow_rules(&tx->pol.rule); 610 mlx5_destroy_flow_group(tx->pol.group); 611 mlx5_destroy_flow_table(tx->ft.pol); 612 } 613 mlx5_destroy_flow_table(tx->ft.sa); 614 mlx5_del_flow_rules(&tx->kspi_miss.rule); 615 mlx5_destroy_flow_group(tx->kspi_miss.group); 616 mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule); 617 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); 618 mlx5_destroy_flow_table(tx->ft.sa_kspi); 619 mlx5_del_flow_rules(&tx->status.rule); 620 mlx5_destroy_flow_table(tx->ft.status); 621 } 622 623 static int ipsec_tx_roce_rule_setup(struct mlx5_core_dev *mdev, 624 struct mlx5e_ipsec_tx *tx) 625 { 626 struct mlx5_flow_destination dst = {}; 627 struct mlx5_flow_act flow_act = {}; 628 struct mlx5_flow_handle *rule; 629 int err = 0; 630 631 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 632 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; 633 dst.ft = tx->ft.pol; 634 rule = mlx5_add_flow_rules(tx->roce.ft, NULL, &flow_act, &dst, 1); 635 if (IS_ERR(rule)) { 636 err = PTR_ERR(rule); 637 mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n", 638 err); 639 goto out; 640 } 641 tx->roce.rule = rule; 642 643 out: 644 return err; 645 } 646 647 static int ipsec_tx_create_roce(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) 648 { 649 struct mlx5_flow_table_attr ft_attr = {}; 650 struct mlx5_flow_table *ft; 651 struct mlx5_flow_group *g; 652 int ix = 0; 653 int err; 654 u32 *in; 655 656 if (!tx->roce.ns) 657 return -EOPNOTSUPP; 658 659 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); 660 if (!in) 661 return -ENOMEM; 662 663 ft_attr.max_fte = 1; 664 ft = mlx5_create_flow_table(tx->roce.ns, &ft_attr); 665 if (IS_ERR(ft)) { 666 err = PTR_ERR(ft); 667 mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n", 668 err); 669 goto fail_table; 670 } 671 tx->roce.ft = ft; 672 673 MLX5_SET_CFG(in, start_flow_index, ix); 674 ix += 1; 675 MLX5_SET_CFG(in, end_flow_index, ix - 1); 676 g = mlx5_create_flow_group(ft, in); 677 if (IS_ERR(g)) { 678 err = PTR_ERR(g); 679 mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n", 680 err); 681 goto fail_group; 682 } 683 tx->roce.g = g; 684 685 err = ipsec_tx_roce_rule_setup(mdev, tx); 686 if (err) { 687 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err); 688 goto fail_rule; 689 } 690 691 kvfree(in); 692 return 0; 693 694 fail_rule: 695 mlx5_destroy_flow_group(tx->roce.g); 696 fail_group: 697 mlx5_destroy_flow_table(tx->roce.ft); 698 tx->roce.ft = NULL; 699 fail_table: 700 kvfree(in); 701 return err; 702 } 703 704 /* 705 * Setting a rule in KSPI table for values that should bypass IPSEC. 706 * 707 * mdev - mlx5 core device 708 * tx - IPSEC TX 709 * return - 0 for success errno for failure 710 */ 711 static int tx_create_kspi_bypass_rules(struct mlx5_core_dev *mdev, 712 struct mlx5e_ipsec_tx *tx) 713 { 714 struct mlx5_flow_destination dest = {}; 715 struct mlx5_flow_act flow_act = {}; 716 struct mlx5_flow_act flow_act_kspi = {}; 717 struct mlx5_flow_handle *rule; 718 struct mlx5_flow_spec *spec; 719 int err; 720 721 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 722 if (!spec) 723 return -ENOMEM; 724 725 dest.ft = tx->ft.status; 726 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 727 flow_act_kspi.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 728 729 setup_fte_reg_a_with_tag(spec, IPSEC_ACCEL_DRV_SPI_BYPASS); 730 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act_kspi, 731 &dest, 1); 732 if (IS_ERR(rule)) { 733 err = PTR_ERR(rule); 734 mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", 735 err); 736 goto err_add_kspi_rule; 737 } 738 tx->kspi_bypass_rule.kspi_rule = rule; 739 740 /* set the rule for packets withoiut ipsec tag. */ 741 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 742 memset(spec, 0, sizeof(*spec)); 743 setup_fte_reg_a_no_tag(spec); 744 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act, &dest, 1); 745 if (IS_ERR(rule)) { 746 err = PTR_ERR(rule); 747 mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", err); 748 goto err_add_rule; 749 } 750 tx->kspi_bypass_rule.rule = rule; 751 752 kvfree(spec); 753 return 0; 754 err_add_rule: 755 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); 756 err_add_kspi_rule: 757 kvfree(spec); 758 return err; 759 } 760 761 762 static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx) 763 { 764 struct mlx5_flow_destination dest = {}; 765 struct mlx5_flow_table *ft; 766 int err; 767 768 /* 769 * Tx flow is different for ethernet traffic then for RoCE packets 770 * For Ethernet packets we start in SA KSPI table that matches KSPI of SA rule 771 * to the KSPI in the packet metadata 772 * For RoCE traffic we start in Policy table, then move to SA table 773 * which matches either reqid of the SA rule to reqid reported by policy table 774 * or ip header fields of SA to the packet IP header fields. 775 * Tables are ordered by their level so we set kspi 776 * with level 0 to have it first one for ethernet traffic. 777 * For RoCE the RoCE TX table direct the packets to policy table explicitly 778 */ 779 ft = ipsec_tx_ft_create(tx->ns, 0, 0, 4); 780 if (IS_ERR(ft)) 781 return PTR_ERR(ft); 782 tx->ft.sa_kspi = ft; 783 784 ft = ipsec_tx_ft_create(tx->ns, 2, 0, 4); 785 if (IS_ERR(ft)) { 786 err = PTR_ERR(ft); 787 goto err_reqid_ft; 788 } 789 tx->ft.sa = ft; 790 791 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { 792 tx->chains = ipsec_chains_create( 793 mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 1, 794 &tx->ft.pol); 795 if (IS_ERR(tx->chains)) { 796 err = PTR_ERR(tx->chains); 797 goto err_pol_ft; 798 } 799 } else { 800 ft = ipsec_tx_ft_create(tx->ns, 1, 0, 2); 801 if (IS_ERR(ft)) { 802 err = PTR_ERR(ft); 803 goto err_pol_ft; 804 } 805 tx->ft.pol = ft; 806 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 807 dest.ft = tx->ft.sa; 808 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest); 809 if (err) 810 goto err_pol_miss; 811 } 812 813 ft = ipsec_tx_ft_create(tx->ns, 2, 0, 1); 814 if (IS_ERR(ft)) { 815 err = PTR_ERR(ft); 816 goto err_status_ft; 817 } 818 tx->ft.status = ft; 819 820 /* set miss rule for kspi table with drop action*/ 821 err = ipsec_miss_create(mdev, tx->ft.sa_kspi, &tx->kspi_miss, NULL); 822 if (err) 823 goto err_kspi_miss; 824 825 err = tx_create_kspi_bypass_rules(mdev, tx); 826 if (err) 827 goto err_kspi_rule; 828 829 err = ipsec_counter_rule_tx(mdev, tx); 830 if (err) 831 goto err_status_rule; 832 833 err = ipsec_tx_create_roce(mdev, tx); 834 if (err) 835 goto err_counter_rule; 836 837 return 0; 838 839 err_counter_rule: 840 mlx5_del_flow_rules(&tx->status.rule); 841 err_status_rule: 842 mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule); 843 mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule); 844 err_kspi_rule: 845 mlx5_destroy_flow_table(tx->ft.status); 846 err_status_ft: 847 if (tx->chains) { 848 ipsec_chains_destroy(tx->chains); 849 } else { 850 mlx5_del_flow_rules(&tx->pol.rule); 851 mlx5_destroy_flow_group(tx->pol.group); 852 } 853 err_pol_miss: 854 if (!tx->chains) 855 mlx5_destroy_flow_table(tx->ft.pol); 856 err_pol_ft: 857 mlx5_del_flow_rules(&tx->kspi_miss.rule); 858 mlx5_destroy_flow_group(tx->kspi_miss.group); 859 err_kspi_miss: 860 mlx5_destroy_flow_table(tx->ft.sa); 861 err_reqid_ft: 862 mlx5_destroy_flow_table(tx->ft.sa_kspi); 863 return err; 864 } 865 866 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, 867 struct mlx5e_ipsec_tx *tx) 868 { 869 int err; 870 871 if (tx->ft.refcnt) 872 goto skip; 873 874 err = tx_create(mdev, tx); 875 if (err) 876 return err; 877 878 skip: 879 tx->ft.refcnt++; 880 return 0; 881 } 882 883 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx) 884 { 885 if (--tx->ft.refcnt) 886 return; 887 888 tx_destroy(tx); 889 } 890 891 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev, 892 struct mlx5e_ipsec *ipsec) 893 { 894 struct mlx5e_ipsec_tx *tx = ipsec->tx; 895 int err; 896 897 mutex_lock(&tx->ft.mutex); 898 err = tx_get(mdev, ipsec, tx); 899 mutex_unlock(&tx->ft.mutex); 900 if (err) 901 return ERR_PTR(err); 902 903 return tx; 904 } 905 906 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev, 907 struct mlx5e_ipsec *ipsec, 908 u32 prio) 909 { 910 struct mlx5e_ipsec_tx *tx = ipsec->tx; 911 struct mlx5_flow_table *ft; 912 int err; 913 914 mutex_lock(&tx->ft.mutex); 915 err = tx_get(mdev, ipsec, tx); 916 if (err) 917 goto err_get; 918 919 ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol; 920 if (IS_ERR(ft)) { 921 err = PTR_ERR(ft); 922 goto err_get_ft; 923 } 924 925 mutex_unlock(&tx->ft.mutex); 926 return ft; 927 928 err_get_ft: 929 tx_put(ipsec, tx); 930 err_get: 931 mutex_unlock(&tx->ft.mutex); 932 return ERR_PTR(err); 933 } 934 935 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio) 936 { 937 struct mlx5e_ipsec_tx *tx = ipsec->tx; 938 939 mutex_lock(&tx->ft.mutex); 940 if (tx->chains) 941 ipsec_chains_put_table(tx->chains, prio); 942 943 tx_put(ipsec, tx); 944 mutex_unlock(&tx->ft.mutex); 945 } 946 947 static void tx_ft_put(struct mlx5e_ipsec *ipsec) 948 { 949 struct mlx5e_ipsec_tx *tx = ipsec->tx; 950 951 mutex_lock(&tx->ft.mutex); 952 tx_put(ipsec, tx); 953 mutex_unlock(&tx->ft.mutex); 954 } 955 956 static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec, 957 u16 kspi) 958 { 959 /* Add IPsec indicator in metadata_reg_a. */ 960 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 961 962 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 963 misc_parameters_2.metadata_reg_a); 964 MLX5_SET(fte_match_param, spec->match_value, 965 misc_parameters_2.metadata_reg_a, 966 MLX5_ETH_WQE_FT_META_IPSEC << 23 | kspi); 967 } 968 969 static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec) 970 { 971 /* Add IPsec indicator in metadata_reg_a. */ 972 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 973 974 MLX5_SET(fte_match_param, spec->match_criteria, 975 misc_parameters_2.metadata_reg_a, 976 MLX5_ETH_WQE_FT_META_IPSEC << 23); 977 MLX5_SET(fte_match_param, spec->match_value, 978 misc_parameters_2.metadata_reg_a, 979 0); 980 } 981 982 static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid) 983 { 984 /* Pass policy check before choosing this SA */ 985 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 986 987 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, 988 misc_parameters_2.metadata_reg_c_0); 989 MLX5_SET(fte_match_param, spec->match_value, 990 misc_parameters_2.metadata_reg_c_0, reqid); 991 } 992 993 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec) 994 { 995 switch (upspec->proto) { 996 case IPPROTO_UDP: 997 if (upspec->dport) { 998 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 999 spec->match_criteria, udp_dport); 1000 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1001 udp_dport, upspec->dport); 1002 } 1003 1004 if (upspec->sport) { 1005 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1006 spec->match_criteria, udp_sport); 1007 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1008 udp_dport, upspec->sport); 1009 } 1010 break; 1011 case IPPROTO_TCP: 1012 if (upspec->dport) { 1013 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1014 spec->match_criteria, tcp_dport); 1015 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1016 tcp_dport, upspec->dport); 1017 } 1018 1019 if (upspec->sport) { 1020 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, 1021 spec->match_criteria, tcp_sport); 1022 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, 1023 tcp_dport, upspec->sport); 1024 } 1025 break; 1026 default: 1027 return; 1028 } 1029 1030 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1031 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol); 1032 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto); 1033 } 1034 1035 static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry, 1036 struct mlx5e_ipsec_tx *tx, 1037 struct mlx5_flow_act *flow_act, 1038 struct mlx5_flow_destination *dest, 1039 int num_dest) 1040 { 1041 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1042 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1043 struct mlx5_flow_handle *rule; 1044 struct mlx5_flow_spec *spec; 1045 int err; 1046 1047 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1048 if (!spec) 1049 return -ENOMEM; 1050 1051 setup_fte_no_frags(spec); 1052 setup_fte_reg_a_with_tag(spec, sa_entry->kspi); 1053 1054 rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest); 1055 if (IS_ERR(rule)) { 1056 err = PTR_ERR(rule); 1057 mlx5_core_err(mdev, "fail to add TX ipsec kspi rule err=%d\n", err); 1058 goto err_add_kspi_flow; 1059 } 1060 ipsec_rule->kspi_rule = rule; 1061 kvfree(spec); 1062 return 0; 1063 1064 err_add_kspi_flow: 1065 kvfree(spec); 1066 return err; 1067 } 1068 1069 static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry, 1070 struct mlx5e_ipsec_tx *tx, 1071 struct mlx5_flow_act *flow_act, 1072 struct mlx5_flow_destination *dest, 1073 int num_dest) 1074 { 1075 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1076 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 1077 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1078 struct mlx5_flow_handle *rule; 1079 struct mlx5_flow_spec *spec; 1080 int err; 1081 1082 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1083 if (!spec) 1084 return -ENOMEM; 1085 1086 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; 1087 1088 if(attrs->reqid) { 1089 setup_fte_no_frags(spec); 1090 setup_fte_reg_c0(spec, attrs->reqid); 1091 rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest); 1092 if (IS_ERR(rule)) { 1093 err = PTR_ERR(rule); 1094 mlx5_core_err(mdev, "fail to add TX ipsec reqid rule err=%d\n", err); 1095 goto err_add_reqid_rule; 1096 } 1097 ipsec_rule->reqid_rule = rule; 1098 memset(spec, 0, sizeof(*spec)); 1099 } 1100 1101 if (attrs->family == AF_INET) 1102 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 1103 else 1104 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 1105 setup_fte_no_frags(spec); 1106 1107 rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest); 1108 if (IS_ERR(rule)) { 1109 err = PTR_ERR(rule); 1110 mlx5_core_err(mdev, "fail to add TX ipsec ip rule err=%d\n", err); 1111 goto err_add_ip_rule; 1112 } 1113 ipsec_rule->rule = rule; 1114 kvfree(spec); 1115 return 0; 1116 1117 err_add_ip_rule: 1118 mlx5_del_flow_rules(&ipsec_rule->reqid_rule); 1119 err_add_reqid_rule: 1120 kvfree(spec); 1121 return err; 1122 } 1123 1124 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 1125 { 1126 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1127 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 1128 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1129 struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 1130 struct mlx5_flow_destination dest[2] = {}; 1131 struct mlx5_flow_act flow_act = {}; 1132 struct mlx5e_ipsec_tx *tx; 1133 struct mlx5_fc *counter; 1134 int err; 1135 1136 tx = tx_ft_get(mdev, ipsec); 1137 if (IS_ERR(tx)) 1138 return PTR_ERR(tx); 1139 1140 err = setup_pkt_reformat(mdev, attrs, &flow_act); 1141 if (err) 1142 goto err_pkt_reformat; 1143 1144 counter = mlx5_fc_create(mdev, false); 1145 if (IS_ERR(counter)) { 1146 err = PTR_ERR(counter); 1147 goto err_add_cnt; 1148 } 1149 1150 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; 1151 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; 1152 flow_act.flags |= FLOW_ACT_NO_APPEND; 1153 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | 1154 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1155 1156 if (attrs->drop) 1157 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 1158 else 1159 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1160 1161 dest[0].ft = tx->ft.status; 1162 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1163 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1164 dest[1].counter_id = mlx5_fc_id(counter); 1165 1166 err = tx_add_kspi_rule(sa_entry, tx, &flow_act, dest, 2); 1167 if (err) { 1168 goto err_add_kspi_rule; 1169 } 1170 1171 err = tx_add_reqid_ip_rules(sa_entry, tx, &flow_act, dest, 2); 1172 if (err) { 1173 goto err_add_reqid_ip_rule; 1174 } 1175 1176 ipsec_rule->fc = counter; 1177 ipsec_rule->pkt_reformat = flow_act.pkt_reformat; 1178 return 0; 1179 1180 err_add_reqid_ip_rule: 1181 mlx5_del_flow_rules(&ipsec_rule->kspi_rule); 1182 err_add_kspi_rule: 1183 mlx5_fc_destroy(mdev, counter); 1184 err_add_cnt: 1185 if (flow_act.pkt_reformat) 1186 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); 1187 err_pkt_reformat: 1188 tx_ft_put(ipsec); 1189 return err; 1190 } 1191 1192 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) 1193 { 1194 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; 1195 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 1196 struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx; 1197 struct mlx5_flow_destination dest[2] = {}; 1198 struct mlx5_flow_act flow_act = {}; 1199 struct mlx5_flow_handle *rule; 1200 struct mlx5_flow_spec *spec; 1201 struct mlx5_flow_table *ft; 1202 int err, dstn = 0; 1203 1204 ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio); 1205 if (IS_ERR(ft)) 1206 return PTR_ERR(ft); 1207 1208 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1209 if (!spec) { 1210 err = -ENOMEM; 1211 goto err_alloc; 1212 } 1213 1214 if (attrs->family == AF_INET) 1215 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 1216 else 1217 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 1218 1219 setup_fte_no_frags(spec); 1220 setup_fte_upper_proto_match(spec, &attrs->upspec); 1221 1222 switch (attrs->action) { 1223 case IPSEC_POLICY_IPSEC: 1224 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1225 /*if (!attrs->reqid) 1226 break;*/ 1227 err = setup_modify_header(mdev, attrs->reqid, 1228 IPSEC_DIR_OUTBOUND, &flow_act); 1229 if (err) 1230 goto err_mod_header; 1231 break; 1232 case IPSEC_POLICY_DISCARD: 1233 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 1234 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1235 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1236 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop); 1237 dstn++; 1238 break; 1239 default: 1240 err = -EINVAL; 1241 goto err_mod_header; 1242 } 1243 1244 flow_act.flags |= FLOW_ACT_NO_APPEND; 1245 dest[dstn].ft = tx->ft.sa; 1246 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1247 dstn++; 1248 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn); 1249 if (IS_ERR(rule)) { 1250 err = PTR_ERR(rule); 1251 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err); 1252 goto err_action; 1253 } 1254 1255 kvfree(spec); 1256 pol_entry->ipsec_rule.rule = rule; 1257 pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr; 1258 return 0; 1259 1260 err_action: 1261 if (flow_act.modify_hdr) 1262 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); 1263 err_mod_header: 1264 kvfree(spec); 1265 err_alloc: 1266 tx_ft_put_policy(pol_entry->ipsec, attrs->prio); 1267 return err; 1268 } 1269 1270 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) 1271 { 1272 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; 1273 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 1274 struct mlx5e_ipsec *ipsec = pol_entry->ipsec; 1275 struct mlx5_flow_destination dest[2]; 1276 struct mlx5_flow_act flow_act = {}; 1277 struct mlx5_flow_handle *rule; 1278 struct mlx5_flow_spec *spec; 1279 struct mlx5_flow_table *ft; 1280 struct mlx5e_ipsec_rx *rx; 1281 int err, dstn = 0; 1282 1283 rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6; 1284 ft = rx->chains ? ipsec_chains_get_table(rx->chains, attrs->prio) : rx->ft.pol; 1285 if (IS_ERR(ft)) 1286 return PTR_ERR(ft); 1287 1288 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1289 if (!spec) { 1290 err = -ENOMEM; 1291 goto err_alloc; 1292 } 1293 1294 if (attrs->family == AF_INET) 1295 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); 1296 else 1297 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); 1298 1299 setup_fte_no_frags(spec); 1300 setup_fte_upper_proto_match(spec, &attrs->upspec); 1301 1302 switch (attrs->action) { 1303 case IPSEC_POLICY_IPSEC: 1304 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1305 break; 1306 case IPSEC_POLICY_DISCARD: 1307 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; 1308 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1309 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop); 1310 dstn++; 1311 break; 1312 default: 1313 err = -EINVAL; 1314 goto err_action; 1315 } 1316 1317 flow_act.flags |= FLOW_ACT_NO_APPEND; 1318 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1319 dest[dstn].ft = rx->ft.sa; 1320 dstn++; 1321 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn); 1322 if (IS_ERR(rule)) { 1323 err = PTR_ERR(rule); 1324 mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err); 1325 goto err_action; 1326 } 1327 1328 kvfree(spec); 1329 pol_entry->ipsec_rule.rule = rule; 1330 return 0; 1331 1332 err_action: 1333 kvfree(spec); 1334 err_alloc: 1335 if (rx->chains) 1336 ipsec_chains_put_table(rx->chains, attrs->prio); 1337 return err; 1338 } 1339 1340 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec) 1341 { 1342 struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; 1343 struct mlx5_core_dev *mdev = ipsec->mdev; 1344 struct mlx5e_ipsec_tx *tx = ipsec->tx; 1345 1346 mlx5_fc_destroy(mdev, rx_ipv4->fc->drop); 1347 mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); 1348 kfree(rx_ipv4->fc); 1349 mlx5_fc_destroy(mdev, tx->fc->drop); 1350 mlx5_fc_destroy(mdev, tx->fc->cnt); 1351 kfree(tx->fc); 1352 } 1353 1354 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec) 1355 { 1356 struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; 1357 struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6; 1358 struct mlx5_core_dev *mdev = ipsec->mdev; 1359 struct mlx5e_ipsec_tx *tx = ipsec->tx; 1360 struct mlx5e_ipsec_fc *fc; 1361 struct mlx5_fc *counter; 1362 int err; 1363 1364 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); 1365 if (!fc) 1366 return -ENOMEM; 1367 1368 tx->fc = fc; 1369 counter = mlx5_fc_create(mdev, false); 1370 if (IS_ERR(counter)) { 1371 err = PTR_ERR(counter); 1372 goto err_tx_fc_alloc; 1373 } 1374 1375 fc->cnt = counter; 1376 counter = mlx5_fc_create(mdev, false); 1377 if (IS_ERR(counter)) { 1378 err = PTR_ERR(counter); 1379 goto err_tx_fc_cnt; 1380 } 1381 1382 fc->drop = counter; 1383 1384 fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); 1385 if (!fc) { 1386 err = -ENOMEM; 1387 goto err_tx_fc_drop; 1388 } 1389 1390 /* Both IPv4 and IPv6 point to same flow counters struct. */ 1391 rx_ipv4->fc = fc; 1392 rx_ipv6->fc = fc; 1393 counter = mlx5_fc_create(mdev, false); 1394 if (IS_ERR(counter)) { 1395 err = PTR_ERR(counter); 1396 goto err_rx_fc_alloc; 1397 } 1398 1399 fc->cnt = counter; 1400 counter = mlx5_fc_create(mdev, false); 1401 if (IS_ERR(counter)) { 1402 err = PTR_ERR(counter); 1403 goto err_rx_fc_cnt; 1404 } 1405 1406 fc->drop = counter; 1407 return 0; 1408 1409 err_rx_fc_cnt: 1410 mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); 1411 err_rx_fc_alloc: 1412 kfree(rx_ipv4->fc); 1413 err_tx_fc_drop: 1414 mlx5_fc_destroy(mdev, tx->fc->drop); 1415 err_tx_fc_cnt: 1416 mlx5_fc_destroy(mdev, tx->fc->cnt); 1417 err_tx_fc_alloc: 1418 kfree(tx->fc); 1419 return err; 1420 } 1421 1422 static int ipsec_status_rule(struct mlx5_core_dev *mdev, 1423 struct mlx5e_ipsec_rx *rx, 1424 struct mlx5_flow_destination *dest) 1425 { 1426 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 1427 struct mlx5_flow_act flow_act = {}; 1428 struct mlx5_modify_hdr *modify_hdr; 1429 struct mlx5_flow_handle *rule; 1430 struct mlx5_flow_spec *spec; 1431 int err; 1432 1433 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1434 if (!spec) 1435 return -ENOMEM; 1436 1437 /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */ 1438 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); 1439 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME); 1440 MLX5_SET(copy_action_in, action, src_offset, 0); 1441 MLX5_SET(copy_action_in, action, length, 7); 1442 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 1443 MLX5_SET(copy_action_in, action, dst_offset, 24); 1444 1445 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1446 1, action); 1447 1448 if (IS_ERR(modify_hdr)) { 1449 err = PTR_ERR(modify_hdr); 1450 mlx5_core_err(mdev, 1451 "fail to alloc ipsec copy modify_header_id err=%d\n", err); 1452 goto out_spec; 1453 } 1454 1455 /* create fte */ 1456 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | 1457 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1458 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1459 flow_act.modify_hdr = modify_hdr; 1460 1461 rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); 1462 if (IS_ERR(rule)) { 1463 err = PTR_ERR(rule); 1464 mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err); 1465 goto out; 1466 } 1467 1468 kvfree(spec); 1469 rx->status.rule = rule; 1470 rx->status.modify_hdr = modify_hdr; 1471 return 0; 1472 1473 out: 1474 mlx5_modify_header_dealloc(mdev, modify_hdr); 1475 out_spec: 1476 kvfree(spec); 1477 return err; 1478 } 1479 1480 static void ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce *rx_roce) 1481 { 1482 if (!rx_roce->ns_rdma) 1483 return; 1484 1485 mlx5_del_flow_rules(&rx_roce->roce_miss.rule); 1486 mlx5_del_flow_rules(&rx_roce->rule); 1487 mlx5_destroy_flow_group(rx_roce->roce_miss.group); 1488 mlx5_destroy_flow_group(rx_roce->g); 1489 } 1490 1491 static void ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx) 1492 { 1493 mutex_lock(&rx->ft.mutex); 1494 mlx5_del_flow_rules(&rx->sa.rule); 1495 mlx5_destroy_flow_group(rx->sa.group); 1496 if (rx->chains == NULL) { 1497 mlx5_del_flow_rules(&rx->pol.rule); 1498 mlx5_destroy_flow_group(rx->pol.group); 1499 } 1500 mlx5_del_flow_rules(&rx->status.rule); 1501 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 1502 ipsec_fs_rx_roce_rules_destroy(&rx->roce); 1503 mutex_unlock(&rx->ft.mutex); 1504 } 1505 1506 static void ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce *rx_roce) 1507 { 1508 if (!rx_roce->ns_rdma) 1509 return; 1510 1511 mlx5_destroy_flow_table(rx_roce->ft_rdma); 1512 mlx5_destroy_flow_table(rx_roce->ft); 1513 } 1514 1515 static void ipsec_fs_rx_table_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx) 1516 { 1517 mutex_lock(&rx->ft.mutex); 1518 if (rx->chains) { 1519 ipsec_chains_destroy(rx->chains); 1520 } else { 1521 mlx5_del_flow_rules(&rx->pol.rule); 1522 mlx5_destroy_flow_table(rx->ft.pol); 1523 } 1524 mlx5_destroy_flow_table(rx->ft.sa); 1525 mlx5_destroy_flow_table(rx->ft.status); 1526 ipsec_fs_rx_roce_table_destroy(&rx->roce); 1527 mutex_unlock(&rx->ft.mutex); 1528 } 1529 1530 static void ipsec_roce_setup_udp_dport(struct mlx5_flow_spec *spec, u16 dport) 1531 { 1532 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 1533 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); 1534 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP); 1535 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); 1536 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport); 1537 } 1538 1539 static int ipsec_roce_rx_rule_setup(struct mlx5_flow_destination *default_dst, 1540 struct mlx5e_ipsec_rx_roce *roce, struct mlx5_core_dev *mdev) 1541 { 1542 struct mlx5_flow_destination dst = {}; 1543 struct mlx5_flow_act flow_act = {}; 1544 struct mlx5_flow_handle *rule; 1545 struct mlx5_flow_spec *spec; 1546 int err = 0; 1547 1548 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 1549 if (!spec) 1550 return -ENOMEM; 1551 1552 ipsec_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT); 1553 1554 //flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;//not needed it is added in command 1555 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; 1556 dst.ft = roce->ft_rdma; 1557 1558 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 1559 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); 1560 if (IS_ERR(rule)) { 1561 err = PTR_ERR(rule); 1562 mlx5_core_err(mdev, "Fail to add RX roce ipsec rule err=%d\n", 1563 err); 1564 goto fail_add_rule; 1565 } 1566 1567 roce->rule = rule; 1568 1569 rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, default_dst, 1); 1570 if (IS_ERR(rule)) { 1571 err = PTR_ERR(rule); 1572 mlx5_core_err(mdev, "Fail to add RX roce ipsec miss rule err=%d\n", 1573 err); 1574 goto fail_add_default_rule; 1575 } 1576 1577 roce->roce_miss.rule = rule; 1578 1579 kvfree(spec); 1580 return 0; 1581 1582 fail_add_default_rule: 1583 mlx5_del_flow_rules(&roce->rule); 1584 fail_add_rule: 1585 kvfree(spec); 1586 return err; 1587 } 1588 1589 static int ipsec_roce_rx_rules(struct mlx5e_ipsec_rx *rx, struct mlx5_flow_destination *defdst, 1590 struct mlx5_core_dev *mdev) 1591 { 1592 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1593 struct mlx5_flow_group *g; 1594 void *outer_headers_c; 1595 u32 *in; 1596 int err = 0; 1597 int ix = 0; 1598 u8 *mc; 1599 1600 if (!rx->roce.ns_rdma) 1601 return 0; 1602 1603 in = kvzalloc(inlen, GFP_KERNEL); 1604 if (!in) 1605 return -ENOMEM; 1606 1607 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 1608 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); 1609 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); 1610 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); 1611 1612 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 1613 MLX5_SET_CFG(in, start_flow_index, ix); 1614 ix += 1; 1615 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1616 g = mlx5_create_flow_group(rx->roce.ft, in); 1617 if (IS_ERR(g)) { 1618 err = PTR_ERR(g); 1619 mlx5_core_err(mdev, "Fail to create ipsec rx roce group at nic err=%d\n", err); 1620 goto fail_group; 1621 } 1622 rx->roce.g = g; 1623 1624 memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in)); 1625 MLX5_SET_CFG(in, start_flow_index, ix); 1626 ix += 1; 1627 MLX5_SET_CFG(in, end_flow_index, ix - 1); 1628 g = mlx5_create_flow_group(rx->roce.ft, in); 1629 if (IS_ERR(g)) { 1630 err = PTR_ERR(g); 1631 mlx5_core_err(mdev, "Fail to create ipsec rx roce miss group at nic err=%d\n", 1632 err); 1633 goto fail_mgroup; 1634 } 1635 rx->roce.roce_miss.group = g; 1636 1637 err = ipsec_roce_rx_rule_setup(defdst, &rx->roce, mdev); 1638 if (err) 1639 goto fail_setup_rule; 1640 1641 kvfree(in); 1642 return 0; 1643 1644 fail_setup_rule: 1645 mlx5_destroy_flow_group(rx->roce.roce_miss.group); 1646 fail_mgroup: 1647 mlx5_destroy_flow_group(rx->roce.g); 1648 fail_group: 1649 kvfree(in); 1650 return err; 1651 } 1652 1653 static int ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv, 1654 struct mlx5e_ipsec_rx *rx, 1655 struct mlx5_flow_destination *defdst) 1656 { 1657 struct mlx5_core_dev *mdev = priv->mdev; 1658 struct mlx5_flow_destination dest[2] = {}; 1659 int err = 0; 1660 1661 mutex_lock(&rx->ft.mutex); 1662 /* IPsec RoCE RX rules */ 1663 err = ipsec_roce_rx_rules(rx, defdst, mdev); 1664 if (err) 1665 goto out; 1666 1667 /* IPsec Rx IP Status table rule */ 1668 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1669 if (rx->roce.ft) 1670 dest[0].ft = rx->roce.ft; 1671 else 1672 dest[0].ft = priv->fts.vlan.t; 1673 1674 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 1675 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); 1676 err = ipsec_status_rule(mdev, rx, dest); 1677 if (err) 1678 goto err_roce_rules_destroy; 1679 1680 if (!rx->chains) { 1681 /* IPsec Rx IP policy default miss rule */ 1682 err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, defdst); 1683 if (err) 1684 goto err_status_rule_destroy; 1685 } 1686 1687 /* FIXME: This is workaround to current design 1688 * which installs SA on firt packet. So we need to forward this 1689 * packet to the stack. It doesn't work with RoCE and eswitch traffic, 1690 */ 1691 err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, defdst); 1692 if (err) 1693 goto err_status_sa_rule_destroy; 1694 1695 mutex_unlock(&rx->ft.mutex); 1696 return 0; 1697 1698 err_status_sa_rule_destroy: 1699 if (!rx->chains) { 1700 mlx5_del_flow_rules(&rx->pol.rule); 1701 mlx5_destroy_flow_group(rx->pol.group); 1702 } 1703 err_status_rule_destroy: 1704 mlx5_del_flow_rules(&rx->status.rule); 1705 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 1706 err_roce_rules_destroy: 1707 ipsec_fs_rx_roce_rules_destroy(&rx->roce); 1708 out: 1709 mutex_unlock(&rx->ft.mutex); 1710 return err; 1711 } 1712 1713 static int ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx *rx, 1714 int rx_init_level, int rdma_init_level) 1715 { 1716 struct mlx5_flow_table_attr ft_attr = {}; 1717 struct mlx5_flow_table *ft; 1718 int err = 0; 1719 1720 if (!rx->roce.ns_rdma) 1721 return 0; 1722 1723 ft_attr.max_fte = 2; 1724 ft_attr.level = rx_init_level; 1725 ft = mlx5_create_flow_table(rx->ns, &ft_attr); 1726 if (IS_ERR(ft)) { 1727 err = PTR_ERR(ft); 1728 return err; 1729 } 1730 rx->roce.ft = ft; 1731 1732 ft_attr.max_fte = 0; 1733 ft_attr.level = rdma_init_level; 1734 ft = mlx5_create_flow_table(rx->roce.ns_rdma, &ft_attr); 1735 if (IS_ERR(ft)) { 1736 err = PTR_ERR(ft); 1737 goto out; 1738 } 1739 rx->roce.ft_rdma = ft; 1740 1741 return 0; 1742 out: 1743 mlx5_destroy_flow_table(rx->roce.ft); 1744 rx->roce.ft = NULL; 1745 return err; 1746 } 1747 1748 static int ipsec_fs_rx_table_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx, 1749 int rx_init_level, int rdma_init_level) 1750 { 1751 struct mlx5_flow_namespace *ns = rx->ns; 1752 struct mlx5_flow_table *ft; 1753 int err = 0; 1754 1755 mutex_lock(&rx->ft.mutex); 1756 1757 /* IPsec Rx IP SA table create */ 1758 ft = ipsec_rx_ft_create(ns, rx_init_level + 1, 0, 1); 1759 if (IS_ERR(ft)) { 1760 err = PTR_ERR(ft); 1761 goto out; 1762 } 1763 rx->ft.sa = ft; 1764 1765 /* IPsec Rx IP Status table create */ 1766 ft = ipsec_rx_ft_create(ns, rx_init_level + 2, 0, 1); 1767 if (IS_ERR(ft)) { 1768 err = PTR_ERR(ft); 1769 goto err_sa_table_destroy; 1770 } 1771 rx->ft.status = ft; 1772 1773 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { 1774 rx->chains = ipsec_chains_create(mdev, rx->ft.sa, 1775 MLX5_FLOW_NAMESPACE_KERNEL, 0, 1776 rx_init_level, &rx->ft.pol); 1777 if (IS_ERR(rx->chains)) { 1778 err = PTR_ERR(rx->chains); 1779 goto err_status_table_destroy; 1780 } 1781 } else { 1782 ft = ipsec_rx_ft_create(ns, rx_init_level, 0, 1); 1783 if (IS_ERR(ft)) { 1784 err = PTR_ERR(ft); 1785 goto err_status_table_destroy; 1786 } 1787 rx->ft.pol = ft; 1788 } 1789 1790 /* IPsec RoCE RX tables create*/ 1791 err = ipsec_fs_rx_roce_tables_create(rx, rx_init_level + 3, 1792 rdma_init_level); 1793 if (err) 1794 goto err_pol_table_destroy; 1795 1796 goto out; 1797 1798 err_pol_table_destroy: 1799 mlx5_destroy_flow_table(rx->ft.pol); 1800 err_status_table_destroy: 1801 mlx5_destroy_flow_table(rx->ft.status); 1802 err_sa_table_destroy: 1803 mlx5_destroy_flow_table(rx->ft.sa); 1804 out: 1805 mutex_unlock(&rx->ft.mutex); 1806 return err; 1807 } 1808 1809 #define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) 1810 1811 static void mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec *ipsec) 1812 { 1813 struct mlx5_core_dev *mdev = ipsec->mdev; 1814 struct mlx5_flow_namespace *ns; 1815 1816 if ((MLX5_CAP_GEN_2(ipsec->mdev, flow_table_type_2_type) & 1817 NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) { 1818 mlx5_core_dbg(mdev, "Failed to init roce ns, capabilities not supported\n"); 1819 return; 1820 } 1821 1822 ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC); 1823 if (!ns) { 1824 mlx5_core_err(mdev, "Failed to init roce rx ns\n"); 1825 return; 1826 } 1827 1828 ipsec->rx_ipv4->roce.ns_rdma = ns; 1829 ipsec->rx_ipv6->roce.ns_rdma = ns; 1830 1831 ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC); 1832 if (!ns) { 1833 ipsec->rx_ipv4->roce.ns_rdma = NULL; 1834 ipsec->rx_ipv6->roce.ns_rdma = NULL; 1835 mlx5_core_err(mdev, "Failed to init roce tx ns\n"); 1836 return; 1837 } 1838 1839 ipsec->tx->roce.ns = ns; 1840 } 1841 1842 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 1843 { 1844 if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) 1845 return tx_add_rule(sa_entry); 1846 1847 return rx_add_rule(sa_entry); 1848 } 1849 1850 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) 1851 { 1852 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; 1853 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); 1854 1855 mlx5_del_flow_rules(&ipsec_rule->rule); 1856 mlx5_del_flow_rules(&ipsec_rule->kspi_rule); 1857 if (ipsec_rule->reqid_rule) 1858 mlx5_del_flow_rules(&ipsec_rule->reqid_rule); 1859 mlx5_fc_destroy(mdev, ipsec_rule->fc); 1860 mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat); 1861 if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) { 1862 tx_ft_put(sa_entry->ipsec); 1863 return; 1864 } 1865 1866 if (ipsec_rule->modify_hdr) 1867 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); 1868 } 1869 1870 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry) 1871 { 1872 if (pol_entry->attrs.dir == IPSEC_DIR_OUTBOUND) 1873 return tx_add_policy(pol_entry); 1874 1875 return rx_add_policy(pol_entry); 1876 } 1877 1878 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry) 1879 { 1880 struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule; 1881 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); 1882 1883 mlx5_del_flow_rules(&ipsec_rule->rule); 1884 1885 if (pol_entry->attrs.dir == IPSEC_DIR_INBOUND) { 1886 struct mlx5e_ipsec_rx *rx; 1887 1888 rx = (pol_entry->attrs.family == AF_INET) 1889 ? pol_entry->ipsec->rx_ipv4 1890 : pol_entry->ipsec->rx_ipv6; 1891 if (rx->chains) 1892 ipsec_chains_put_table(rx->chains, 1893 pol_entry->attrs.prio); 1894 return; 1895 } 1896 1897 if (ipsec_rule->modify_hdr) 1898 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); 1899 1900 tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio); 1901 } 1902 1903 void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv) 1904 { 1905 /* Check if IPsec supported */ 1906 if (!priv->ipsec) 1907 return; 1908 1909 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4); 1910 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6); 1911 } 1912 1913 int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv) 1914 { 1915 struct mlx5e_ipsec *ipsec = priv->ipsec; 1916 struct mlx5_flow_destination dest = {}; 1917 int err = 0; 1918 1919 /* Check if IPsec supported */ 1920 if (!ipsec) 1921 return 0; 1922 1923 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 1924 dest.ft = priv->fts.vlan.t; 1925 err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv6, &dest); 1926 if (err) 1927 goto out; 1928 1929 err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv4, &dest); 1930 if (err) 1931 ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6); 1932 out: 1933 return err; 1934 } 1935 1936 void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv) 1937 { 1938 struct mlx5_core_dev *mdev = priv->mdev; 1939 struct mlx5e_ipsec *ipsec = priv->ipsec; 1940 1941 /* Check if IPsec supported */ 1942 if (!ipsec) 1943 return; 1944 1945 ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv6); 1946 ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv4); 1947 } 1948 1949 int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv) 1950 { 1951 struct mlx5e_ipsec *ipsec = priv->ipsec; 1952 int err = 0; 1953 1954 /* Check if IPsec supported */ 1955 if (!ipsec) 1956 return 0; 1957 1958 err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv4, 0, 0); 1959 if (err) 1960 goto out; 1961 1962 err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv6, 4, 1); 1963 if (err) { 1964 ipsec_fs_rx_table_destroy(priv->mdev, ipsec->rx_ipv4); 1965 goto out; 1966 } 1967 1968 priv->fts.ipsec_ft = priv->ipsec->rx_ipv4->ft.pol; 1969 out: 1970 return err; 1971 } 1972 1973 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) 1974 { 1975 WARN_ON(ipsec->tx->ft.refcnt); 1976 mutex_destroy(&ipsec->rx_ipv6->ft.mutex); 1977 mutex_destroy(&ipsec->rx_ipv4->ft.mutex); 1978 mutex_destroy(&ipsec->tx->ft.mutex); 1979 ipsec_fs_destroy_counters(ipsec); 1980 kfree(ipsec->rx_ipv6); 1981 kfree(ipsec->rx_ipv4); 1982 kfree(ipsec->tx); 1983 } 1984 1985 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) 1986 { 1987 struct mlx5_flow_namespace *tns, *rns; 1988 int err = -ENOMEM; 1989 1990 tns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); 1991 if (!tns) 1992 return -EOPNOTSUPP; 1993 1994 rns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_KERNEL); 1995 if (!rns) 1996 return -EOPNOTSUPP; 1997 1998 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL); 1999 if (!ipsec->tx) 2000 return -ENOMEM; 2001 2002 ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL); 2003 if (!ipsec->rx_ipv4) 2004 goto err_tx; 2005 2006 ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL); 2007 if (!ipsec->rx_ipv6) 2008 goto err_rx_ipv4; 2009 2010 err = ipsec_fs_init_counters(ipsec); 2011 if (err) 2012 goto err_rx_ipv6; 2013 2014 ipsec->tx->ns = tns; 2015 mutex_init(&ipsec->tx->ft.mutex); 2016 ipsec->rx_ipv4->ns = rns; 2017 ipsec->rx_ipv6->ns = rns; 2018 mutex_init(&ipsec->rx_ipv4->ft.mutex); 2019 mutex_init(&ipsec->rx_ipv6->ft.mutex); 2020 2021 mlx5e_accel_ipsec_fs_init_roce(ipsec); 2022 2023 return 0; 2024 2025 err_rx_ipv6: 2026 kfree(ipsec->rx_ipv6); 2027 err_rx_ipv4: 2028 kfree(ipsec->rx_ipv4); 2029 err_tx: 2030 kfree(ipsec->tx); 2031 return err; 2032 } 2033 2034 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry) 2035 { 2036 struct mlx5e_ipsec_sa_entry sa_entry_shadow = {}; 2037 int err; 2038 2039 memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry)); 2040 memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule)); 2041 2042 err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow); 2043 if (err) 2044 return; 2045 mlx5e_accel_ipsec_fs_del_rule(sa_entry); 2046 memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry)); 2047 } 2048