1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ 3 #include <linux/mlx5/device.h> 4 #include <net/psp.h> 5 #include <linux/psp.h> 6 #include "mlx5_core.h" 7 #include "psp.h" 8 #include "lib/crypto.h" 9 #include "en_accel/psp.h" 10 #include "fs_core.h" 11 12 enum accel_fs_psp_type { 13 ACCEL_FS_PSP4, 14 ACCEL_FS_PSP6, 15 ACCEL_FS_PSP_NUM_TYPES, 16 }; 17 18 enum accel_psp_syndrome { 19 PSP_OK = 0, 20 PSP_ICV_FAIL, 21 PSP_BAD_TRAILER, 22 }; 23 24 struct mlx5e_psp_tx { 25 struct mlx5_flow_namespace *ns; 26 struct mlx5_flow_table *ft; 27 struct mlx5_flow_group *fg; 28 struct mlx5_flow_handle *rule; 29 struct mutex mutex; /* Protect PSP TX steering */ 30 u32 refcnt; 31 struct mlx5_fc *tx_counter; 32 }; 33 34 struct mlx5e_psp_rx_err { 35 struct mlx5_flow_table *ft; 36 struct mlx5_flow_handle *rule; 37 struct mlx5_flow_handle *auth_fail_rule; 38 struct mlx5_flow_handle *err_rule; 39 struct mlx5_flow_handle *bad_rule; 40 struct mlx5_modify_hdr *copy_modify_hdr; 41 }; 42 43 struct mlx5e_accel_fs_psp_prot { 44 struct mlx5_flow_table *ft; 45 struct mlx5_flow_group *miss_group; 46 struct mlx5_flow_handle *miss_rule; 47 struct mlx5_flow_destination default_dest; 48 struct mlx5e_psp_rx_err rx_err; 49 u32 refcnt; 50 struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */ 51 struct mlx5_flow_handle *def_rule; 52 }; 53 54 struct mlx5e_accel_fs_psp { 55 struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES]; 56 struct mlx5_fc *rx_counter; 57 struct mlx5_fc *rx_auth_fail_counter; 58 struct mlx5_fc *rx_err_counter; 59 struct mlx5_fc *rx_bad_counter; 60 }; 61 62 struct mlx5e_psp_fs { 63 struct mlx5_core_dev *mdev; 64 struct mlx5e_psp_tx *tx_fs; 65 /* Rx manage */ 66 struct mlx5e_flow_steering *fs; 67 struct mlx5e_accel_fs_psp *rx_fs; 68 }; 69 70 /* PSP RX flow steering */ 71 static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i) 72 { 73 if (i == ACCEL_FS_PSP4) 74 return MLX5_TT_IPV4_UDP; 75 76 return MLX5_TT_IPV6_UDP; 77 } 78 79 static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs, 80 struct mlx5e_psp_rx_err *rx_err) 81 { 82 if (rx_err->bad_rule) { 83 mlx5_del_flow_rules(rx_err->bad_rule); 84 rx_err->bad_rule = NULL; 85 } 86 87 if (rx_err->err_rule) { 88 mlx5_del_flow_rules(rx_err->err_rule); 89 rx_err->err_rule = NULL; 90 } 91 92 if (rx_err->auth_fail_rule) { 93 mlx5_del_flow_rules(rx_err->auth_fail_rule); 94 rx_err->auth_fail_rule = NULL; 95 } 96 97 if (rx_err->rule) { 98 mlx5_del_flow_rules(rx_err->rule); 99 rx_err->rule = NULL; 100 } 101 102 if (rx_err->copy_modify_hdr) { 103 mlx5_modify_header_dealloc(fs->mdev, rx_err->copy_modify_hdr); 104 rx_err->copy_modify_hdr = NULL; 105 } 106 } 107 108 static void accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs *fs, 109 struct mlx5e_psp_rx_err *rx_err) 110 { 111 accel_psp_fs_rx_err_del_rules(fs, rx_err); 112 113 if (rx_err->ft) { 114 mlx5_destroy_flow_table(rx_err->ft); 115 rx_err->ft = NULL; 116 } 117 } 118 119 static void accel_psp_setup_syndrome_match(struct mlx5_flow_spec *spec, 120 enum accel_psp_syndrome syndrome) 121 { 122 void *misc_params_2; 123 124 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; 125 misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); 126 MLX5_SET_TO_ONES(fte_match_set_misc2, misc_params_2, psp_syndrome); 127 misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); 128 MLX5_SET(fte_match_set_misc2, misc_params_2, psp_syndrome, syndrome); 129 } 130 131 static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, 132 struct mlx5e_accel_fs_psp_prot *fs_prot, 133 struct mlx5e_psp_rx_err *rx_err) 134 { 135 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 136 struct mlx5_core_dev *mdev = fs->mdev; 137 struct mlx5_flow_destination dest[2]; 138 struct mlx5_flow_act flow_act = {}; 139 struct mlx5_modify_hdr *modify_hdr; 140 struct mlx5_flow_handle *fte; 141 struct mlx5_flow_spec *spec; 142 int err = 0; 143 144 spec = kzalloc(sizeof(*spec), GFP_KERNEL); 145 if (!spec) 146 return -ENOMEM; 147 148 /* Action to copy 7 bit psp_syndrome to regB[23:29] */ 149 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); 150 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_PSP_SYNDROME); 151 MLX5_SET(copy_action_in, action, src_offset, 0); 152 MLX5_SET(copy_action_in, action, length, 7); 153 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 154 MLX5_SET(copy_action_in, action, dst_offset, 23); 155 156 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 157 1, action); 158 if (IS_ERR(modify_hdr)) { 159 err = PTR_ERR(modify_hdr); 160 mlx5_core_err(mdev, 161 "fail to alloc psp copy modify_header_id err=%d\n", err); 162 goto out_spec; 163 } 164 165 accel_psp_setup_syndrome_match(spec, PSP_OK); 166 /* create fte */ 167 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | 168 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 169 MLX5_FLOW_CONTEXT_ACTION_COUNT; 170 flow_act.modify_hdr = modify_hdr; 171 dest[0].type = fs_prot->default_dest.type; 172 dest[0].ft = fs_prot->default_dest.ft; 173 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 174 dest[1].counter = fs->rx_fs->rx_counter; 175 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 2); 176 if (IS_ERR(fte)) { 177 err = PTR_ERR(fte); 178 mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err); 179 goto out; 180 } 181 rx_err->rule = fte; 182 183 /* add auth fail drop rule */ 184 memset(spec, 0, sizeof(*spec)); 185 memset(&flow_act, 0, sizeof(flow_act)); 186 accel_psp_setup_syndrome_match(spec, PSP_ICV_FAIL); 187 /* create fte */ 188 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | 189 MLX5_FLOW_CONTEXT_ACTION_COUNT; 190 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 191 dest[0].counter = fs->rx_fs->rx_auth_fail_counter; 192 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); 193 if (IS_ERR(fte)) { 194 err = PTR_ERR(fte); 195 mlx5_core_err(mdev, "fail to add psp rx auth fail drop rule err=%d\n", 196 err); 197 goto out_drop_rule; 198 } 199 rx_err->auth_fail_rule = fte; 200 201 /* add framing drop rule */ 202 memset(spec, 0, sizeof(*spec)); 203 memset(&flow_act, 0, sizeof(flow_act)); 204 accel_psp_setup_syndrome_match(spec, PSP_BAD_TRAILER); 205 /* create fte */ 206 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | 207 MLX5_FLOW_CONTEXT_ACTION_COUNT; 208 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 209 dest[0].counter = fs->rx_fs->rx_err_counter; 210 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); 211 if (IS_ERR(fte)) { 212 err = PTR_ERR(fte); 213 mlx5_core_err(mdev, "fail to add psp rx framing err drop rule err=%d\n", 214 err); 215 goto out_drop_auth_fail_rule; 216 } 217 rx_err->err_rule = fte; 218 219 /* add misc. errors drop rule */ 220 memset(spec, 0, sizeof(*spec)); 221 memset(&flow_act, 0, sizeof(flow_act)); 222 /* create fte */ 223 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | 224 MLX5_FLOW_CONTEXT_ACTION_COUNT; 225 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 226 dest[0].counter = fs->rx_fs->rx_bad_counter; 227 fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); 228 if (IS_ERR(fte)) { 229 err = PTR_ERR(fte); 230 mlx5_core_err(mdev, "fail to add psp rx misc. err drop rule err=%d\n", 231 err); 232 goto out_drop_error_rule; 233 } 234 rx_err->bad_rule = fte; 235 236 rx_err->copy_modify_hdr = modify_hdr; 237 238 goto out_spec; 239 240 out_drop_error_rule: 241 mlx5_del_flow_rules(rx_err->err_rule); 242 rx_err->err_rule = NULL; 243 out_drop_auth_fail_rule: 244 mlx5_del_flow_rules(rx_err->auth_fail_rule); 245 rx_err->auth_fail_rule = NULL; 246 out_drop_rule: 247 mlx5_del_flow_rules(rx_err->rule); 248 rx_err->rule = NULL; 249 out: 250 mlx5_modify_header_dealloc(mdev, modify_hdr); 251 out_spec: 252 kfree(spec); 253 return err; 254 } 255 256 static int accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs *fs, 257 struct mlx5e_accel_fs_psp_prot *fs_prot, 258 struct mlx5e_psp_rx_err *rx_err) 259 { 260 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false); 261 struct mlx5_flow_table_attr ft_attr = {}; 262 struct mlx5_flow_table *ft; 263 int err; 264 265 ft_attr.max_fte = 2; 266 ft_attr.autogroup.max_num_groups = 2; 267 ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; // MLX5E_ACCEL_FS_TCP_FT_LEVEL 268 ft_attr.prio = MLX5E_NIC_PRIO; 269 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 270 if (IS_ERR(ft)) { 271 err = PTR_ERR(ft); 272 mlx5_core_err(fs->mdev, "fail to create psp rx inline ft err=%d\n", err); 273 return err; 274 } 275 276 rx_err->ft = ft; 277 err = accel_psp_fs_rx_err_add_rule(fs, fs_prot, rx_err); 278 if (err) 279 goto out_err; 280 281 return 0; 282 283 out_err: 284 mlx5_destroy_flow_table(ft); 285 rx_err->ft = NULL; 286 return err; 287 } 288 289 static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot) 290 { 291 if (fs_prot->def_rule) { 292 mlx5_del_flow_rules(fs_prot->def_rule); 293 fs_prot->def_rule = NULL; 294 } 295 296 if (fs_prot->miss_rule) { 297 mlx5_del_flow_rules(fs_prot->miss_rule); 298 fs_prot->miss_rule = NULL; 299 } 300 301 if (fs_prot->miss_group) { 302 mlx5_destroy_flow_group(fs_prot->miss_group); 303 fs_prot->miss_group = NULL; 304 } 305 306 if (fs_prot->ft) { 307 mlx5_destroy_flow_table(fs_prot->ft); 308 fs_prot->ft = NULL; 309 } 310 } 311 312 static void setup_fte_udp_psp(struct mlx5_flow_spec *spec, u16 udp_port) 313 { 314 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 315 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, 0xffff); 316 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, udp_port); 317 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol); 318 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, IPPROTO_UDP); 319 } 320 321 static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs, 322 struct mlx5e_accel_fs_psp_prot *fs_prot) 323 { 324 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false); 325 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 326 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 327 struct mlx5_modify_hdr *modify_hdr = NULL; 328 struct mlx5_flow_table_attr ft_attr = {}; 329 struct mlx5_flow_destination dest = {}; 330 struct mlx5_core_dev *mdev = fs->mdev; 331 struct mlx5_flow_group *miss_group; 332 MLX5_DECLARE_FLOW_ACT(flow_act); 333 struct mlx5_flow_handle *rule; 334 struct mlx5_flow_spec *spec; 335 struct mlx5_flow_table *ft; 336 u32 *flow_group_in; 337 int err = 0; 338 339 flow_group_in = kvzalloc(inlen, GFP_KERNEL); 340 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 341 if (!flow_group_in || !spec) { 342 err = -ENOMEM; 343 goto out; 344 } 345 346 /* Create FT */ 347 ft_attr.max_fte = 2; 348 ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; 349 ft_attr.prio = MLX5E_NIC_PRIO; 350 ft_attr.autogroup.num_reserved_entries = 1; 351 ft_attr.autogroup.max_num_groups = 1; 352 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); 353 if (IS_ERR(ft)) { 354 err = PTR_ERR(ft); 355 mlx5_core_err(mdev, "fail to create psp rx ft err=%d\n", err); 356 goto out_err; 357 } 358 fs_prot->ft = ft; 359 360 /* Create miss_group */ 361 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); 362 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); 363 miss_group = mlx5_create_flow_group(ft, flow_group_in); 364 if (IS_ERR(miss_group)) { 365 err = PTR_ERR(miss_group); 366 mlx5_core_err(mdev, "fail to create psp rx miss_group err=%d\n", err); 367 goto out_err; 368 } 369 fs_prot->miss_group = miss_group; 370 371 /* Create miss rule */ 372 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1); 373 if (IS_ERR(rule)) { 374 err = PTR_ERR(rule); 375 mlx5_core_err(mdev, "fail to create psp rx miss_rule err=%d\n", err); 376 goto out_err; 377 } 378 fs_prot->miss_rule = rule; 379 380 /* Add default Rx psp rule */ 381 setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT); 382 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP; 383 /* Set bit[31, 30] PSP marker */ 384 /* Set bit[29-23] psp_syndrome is set in error FT */ 385 #define MLX5E_PSP_MARKER_BIT (BIT(30) | BIT(31)) 386 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 387 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 388 MLX5_SET(set_action_in, action, data, MLX5E_PSP_MARKER_BIT); 389 MLX5_SET(set_action_in, action, offset, 0); 390 MLX5_SET(set_action_in, action, length, 32); 391 392 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1, action); 393 if (IS_ERR(modify_hdr)) { 394 err = PTR_ERR(modify_hdr); 395 mlx5_core_err(mdev, "fail to alloc psp set modify_header_id err=%d\n", err); 396 modify_hdr = NULL; 397 goto out_err; 398 } 399 400 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 401 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | 402 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 403 flow_act.modify_hdr = modify_hdr; 404 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 405 dest.ft = fs_prot->rx_err.ft; 406 rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1); 407 if (IS_ERR(rule)) { 408 err = PTR_ERR(rule); 409 mlx5_core_err(mdev, 410 "fail to add psp rule Rx decryption, err=%d, flow_act.action = %#04X\n", 411 err, flow_act.action); 412 goto out_err; 413 } 414 415 fs_prot->def_rule = rule; 416 goto out; 417 418 out_err: 419 accel_psp_fs_rx_fs_destroy(fs_prot); 420 out: 421 kvfree(flow_group_in); 422 kvfree(spec); 423 return err; 424 } 425 426 static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type) 427 { 428 struct mlx5e_accel_fs_psp_prot *fs_prot; 429 struct mlx5e_accel_fs_psp *accel_psp; 430 431 accel_psp = fs->rx_fs; 432 433 /* The netdev unreg already happened, so all offloaded rule are already removed */ 434 fs_prot = &accel_psp->fs_prot[type]; 435 436 accel_psp_fs_rx_fs_destroy(fs_prot); 437 438 accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err); 439 440 return 0; 441 } 442 443 static int accel_psp_fs_rx_create(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type) 444 { 445 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false); 446 struct mlx5e_accel_fs_psp_prot *fs_prot; 447 struct mlx5e_accel_fs_psp *accel_psp; 448 int err; 449 450 accel_psp = fs->rx_fs; 451 fs_prot = &accel_psp->fs_prot[type]; 452 453 fs_prot->default_dest = mlx5_ttc_get_default_dest(ttc, fs_psp2tt(type)); 454 455 err = accel_psp_fs_rx_err_create_ft(fs, fs_prot, &fs_prot->rx_err); 456 if (err) 457 return err; 458 459 err = accel_psp_fs_rx_create_ft(fs, fs_prot); 460 if (err) 461 accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err); 462 463 return err; 464 } 465 466 static int accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type) 467 { 468 struct mlx5e_accel_fs_psp_prot *fs_prot; 469 struct mlx5_flow_destination dest = {}; 470 struct mlx5e_accel_fs_psp *accel_psp; 471 struct mlx5_ttc_table *ttc; 472 int err = 0; 473 474 if (!fs || !fs->rx_fs) 475 return -EINVAL; 476 477 ttc = mlx5e_fs_get_ttc(fs->fs, false); 478 accel_psp = fs->rx_fs; 479 fs_prot = &accel_psp->fs_prot[type]; 480 mutex_lock(&fs_prot->prot_mutex); 481 if (fs_prot->refcnt++) 482 goto out; 483 484 /* create FT */ 485 err = accel_psp_fs_rx_create(fs, type); 486 if (err) { 487 fs_prot->refcnt--; 488 goto out; 489 } 490 491 /* connect */ 492 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 493 dest.ft = fs_prot->ft; 494 mlx5_ttc_fwd_dest(ttc, fs_psp2tt(type), &dest); 495 496 out: 497 mutex_unlock(&fs_prot->prot_mutex); 498 return err; 499 } 500 501 static void accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type) 502 { 503 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false); 504 struct mlx5e_accel_fs_psp_prot *fs_prot; 505 struct mlx5e_accel_fs_psp *accel_psp; 506 507 accel_psp = fs->rx_fs; 508 fs_prot = &accel_psp->fs_prot[type]; 509 mutex_lock(&fs_prot->prot_mutex); 510 if (--fs_prot->refcnt) 511 goto out; 512 513 /* disconnect */ 514 mlx5_ttc_fwd_default_dest(ttc, fs_psp2tt(type)); 515 516 /* remove FT */ 517 accel_psp_fs_rx_destroy(fs, type); 518 519 out: 520 mutex_unlock(&fs_prot->prot_mutex); 521 } 522 523 static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs) 524 { 525 struct mlx5e_accel_fs_psp_prot *fs_prot; 526 struct mlx5e_accel_fs_psp *accel_psp; 527 enum accel_fs_psp_type i; 528 529 if (!fs->rx_fs) 530 return; 531 532 accel_psp = fs->rx_fs; 533 mlx5_fc_destroy(fs->mdev, accel_psp->rx_bad_counter); 534 mlx5_fc_destroy(fs->mdev, accel_psp->rx_err_counter); 535 mlx5_fc_destroy(fs->mdev, accel_psp->rx_auth_fail_counter); 536 mlx5_fc_destroy(fs->mdev, accel_psp->rx_counter); 537 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { 538 fs_prot = &accel_psp->fs_prot[i]; 539 mutex_destroy(&fs_prot->prot_mutex); 540 WARN_ON(fs_prot->refcnt); 541 } 542 kfree(fs->rx_fs); 543 fs->rx_fs = NULL; 544 } 545 546 static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs) 547 { 548 struct mlx5e_accel_fs_psp_prot *fs_prot; 549 struct mlx5e_accel_fs_psp *accel_psp; 550 struct mlx5_core_dev *mdev = fs->mdev; 551 struct mlx5_fc *flow_counter; 552 enum accel_fs_psp_type i; 553 int err; 554 555 accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL); 556 if (!accel_psp) 557 return -ENOMEM; 558 559 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { 560 fs_prot = &accel_psp->fs_prot[i]; 561 mutex_init(&fs_prot->prot_mutex); 562 } 563 564 flow_counter = mlx5_fc_create(mdev, false); 565 if (IS_ERR(flow_counter)) { 566 mlx5_core_warn(mdev, 567 "fail to create psp rx flow counter err=%pe\n", 568 flow_counter); 569 err = PTR_ERR(flow_counter); 570 goto out_err; 571 } 572 accel_psp->rx_counter = flow_counter; 573 574 flow_counter = mlx5_fc_create(mdev, false); 575 if (IS_ERR(flow_counter)) { 576 mlx5_core_warn(mdev, 577 "fail to create psp rx auth fail flow counter err=%pe\n", 578 flow_counter); 579 err = PTR_ERR(flow_counter); 580 goto out_counter_err; 581 } 582 accel_psp->rx_auth_fail_counter = flow_counter; 583 584 flow_counter = mlx5_fc_create(mdev, false); 585 if (IS_ERR(flow_counter)) { 586 mlx5_core_warn(mdev, 587 "fail to create psp rx error flow counter err=%pe\n", 588 flow_counter); 589 err = PTR_ERR(flow_counter); 590 goto out_auth_fail_counter_err; 591 } 592 accel_psp->rx_err_counter = flow_counter; 593 594 flow_counter = mlx5_fc_create(mdev, false); 595 if (IS_ERR(flow_counter)) { 596 mlx5_core_warn(mdev, 597 "fail to create psp rx bad flow counter err=%pe\n", 598 flow_counter); 599 err = PTR_ERR(flow_counter); 600 goto out_err_counter_err; 601 } 602 accel_psp->rx_bad_counter = flow_counter; 603 604 fs->rx_fs = accel_psp; 605 606 return 0; 607 608 out_err_counter_err: 609 mlx5_fc_destroy(mdev, accel_psp->rx_err_counter); 610 accel_psp->rx_err_counter = NULL; 611 out_auth_fail_counter_err: 612 mlx5_fc_destroy(mdev, accel_psp->rx_auth_fail_counter); 613 accel_psp->rx_auth_fail_counter = NULL; 614 out_counter_err: 615 mlx5_fc_destroy(mdev, accel_psp->rx_counter); 616 accel_psp->rx_counter = NULL; 617 out_err: 618 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { 619 fs_prot = &accel_psp->fs_prot[i]; 620 mutex_destroy(&fs_prot->prot_mutex); 621 } 622 kfree(accel_psp); 623 fs->rx_fs = NULL; 624 625 return err; 626 } 627 628 void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) 629 { 630 int i; 631 632 if (!priv->psp) 633 return; 634 635 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) 636 accel_psp_fs_rx_ft_put(priv->psp->fs, i); 637 } 638 639 int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv) 640 { 641 struct mlx5e_psp_fs *fs; 642 int err, i; 643 644 if (!priv->psp) 645 return 0; 646 647 fs = priv->psp->fs; 648 for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { 649 err = accel_psp_fs_rx_ft_get(fs, i); 650 if (err) 651 goto out_err; 652 } 653 654 return 0; 655 656 out_err: 657 i--; 658 while (i >= 0) { 659 accel_psp_fs_rx_ft_put(fs, i); 660 --i; 661 } 662 663 return err; 664 } 665 666 static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs) 667 { 668 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 669 struct mlx5_flow_table_attr ft_attr = {}; 670 struct mlx5_flow_destination dest = {}; 671 struct mlx5_core_dev *mdev = fs->mdev; 672 struct mlx5_flow_act flow_act = {}; 673 u32 *in, *mc, *outer_headers_c; 674 struct mlx5_flow_handle *rule; 675 struct mlx5_flow_spec *spec; 676 struct mlx5e_psp_tx *tx_fs; 677 struct mlx5_flow_table *ft; 678 struct mlx5_flow_group *fg; 679 int err = 0; 680 681 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 682 in = kvzalloc(inlen, GFP_KERNEL); 683 if (!spec || !in) { 684 err = -ENOMEM; 685 goto out; 686 } 687 688 ft_attr.max_fte = 1; 689 #define MLX5E_PSP_PRIO 0 690 ft_attr.prio = MLX5E_PSP_PRIO; 691 #define MLX5E_PSP_LEVEL 0 692 ft_attr.level = MLX5E_PSP_LEVEL; 693 ft_attr.autogroup.max_num_groups = 1; 694 695 tx_fs = fs->tx_fs; 696 ft = mlx5_create_flow_table(tx_fs->ns, &ft_attr); 697 if (IS_ERR(ft)) { 698 err = PTR_ERR(ft); 699 mlx5_core_err(mdev, "PSP: fail to add psp tx flow table, err = %d\n", err); 700 goto out; 701 } 702 703 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); 704 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); 705 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); 706 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); 707 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); 708 fg = mlx5_create_flow_group(ft, in); 709 if (IS_ERR(fg)) { 710 err = PTR_ERR(fg); 711 mlx5_core_err(mdev, "PSP: fail to add psp tx flow group, err = %d\n", err); 712 goto err_create_fg; 713 } 714 715 setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT); 716 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP; 717 flow_act.flags |= FLOW_ACT_NO_APPEND; 718 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | 719 MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | 720 MLX5_FLOW_CONTEXT_ACTION_COUNT; 721 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 722 dest.counter = tx_fs->tx_counter; 723 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); 724 if (IS_ERR(rule)) { 725 err = PTR_ERR(rule); 726 mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err); 727 goto err_add_flow_rule; 728 } 729 730 tx_fs->ft = ft; 731 tx_fs->fg = fg; 732 tx_fs->rule = rule; 733 goto out; 734 735 err_add_flow_rule: 736 mlx5_destroy_flow_group(fg); 737 err_create_fg: 738 mlx5_destroy_flow_table(ft); 739 out: 740 kvfree(in); 741 kvfree(spec); 742 return err; 743 } 744 745 static void accel_psp_fs_tx_destroy(struct mlx5e_psp_tx *tx_fs) 746 { 747 if (!tx_fs->ft) 748 return; 749 750 mlx5_del_flow_rules(tx_fs->rule); 751 mlx5_destroy_flow_group(tx_fs->fg); 752 mlx5_destroy_flow_table(tx_fs->ft); 753 } 754 755 static int accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs *fs) 756 { 757 struct mlx5e_psp_tx *tx_fs = fs->tx_fs; 758 int err = 0; 759 760 mutex_lock(&tx_fs->mutex); 761 if (tx_fs->refcnt++) 762 goto out; 763 764 err = accel_psp_fs_tx_create_ft_table(fs); 765 if (err) 766 tx_fs->refcnt--; 767 out: 768 mutex_unlock(&tx_fs->mutex); 769 return err; 770 } 771 772 static void accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs *fs) 773 { 774 struct mlx5e_psp_tx *tx_fs = fs->tx_fs; 775 776 mutex_lock(&tx_fs->mutex); 777 if (--tx_fs->refcnt) 778 goto out; 779 780 accel_psp_fs_tx_destroy(tx_fs); 781 out: 782 mutex_unlock(&tx_fs->mutex); 783 } 784 785 static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs) 786 { 787 struct mlx5e_psp_tx *tx_fs = fs->tx_fs; 788 789 if (!tx_fs) 790 return; 791 792 mlx5_fc_destroy(fs->mdev, tx_fs->tx_counter); 793 mutex_destroy(&tx_fs->mutex); 794 WARN_ON(tx_fs->refcnt); 795 kfree(tx_fs); 796 fs->tx_fs = NULL; 797 } 798 799 static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs) 800 { 801 struct mlx5_core_dev *mdev = fs->mdev; 802 struct mlx5_flow_namespace *ns; 803 struct mlx5_fc *flow_counter; 804 struct mlx5e_psp_tx *tx_fs; 805 806 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); 807 if (!ns) 808 return -EOPNOTSUPP; 809 810 tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL); 811 if (!tx_fs) 812 return -ENOMEM; 813 814 flow_counter = mlx5_fc_create(mdev, false); 815 if (IS_ERR(flow_counter)) { 816 mlx5_core_warn(mdev, 817 "fail to create psp tx flow counter err=%pe\n", 818 flow_counter); 819 kfree(tx_fs); 820 return PTR_ERR(flow_counter); 821 } 822 tx_fs->tx_counter = flow_counter; 823 mutex_init(&tx_fs->mutex); 824 tx_fs->ns = ns; 825 fs->tx_fs = tx_fs; 826 return 0; 827 } 828 829 static void 830 mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv *priv, 831 struct mlx5e_psp_stats *stats) 832 { 833 struct mlx5e_psp_tx *tx_fs = priv->psp->fs->tx_fs; 834 struct mlx5_core_dev *mdev = priv->mdev; 835 struct mlx5e_accel_fs_psp *accel_psp; 836 837 accel_psp = (struct mlx5e_accel_fs_psp *)priv->psp->fs->rx_fs; 838 839 if (tx_fs->tx_counter) 840 mlx5_fc_query(mdev, tx_fs->tx_counter, &stats->psp_tx_pkts, 841 &stats->psp_tx_bytes); 842 843 if (accel_psp->rx_counter) 844 mlx5_fc_query(mdev, accel_psp->rx_counter, &stats->psp_rx_pkts, 845 &stats->psp_rx_bytes); 846 847 if (accel_psp->rx_auth_fail_counter) 848 mlx5_fc_query(mdev, accel_psp->rx_auth_fail_counter, 849 &stats->psp_rx_pkts_auth_fail, 850 &stats->psp_rx_bytes_auth_fail); 851 852 if (accel_psp->rx_err_counter) 853 mlx5_fc_query(mdev, accel_psp->rx_err_counter, 854 &stats->psp_rx_pkts_frame_err, 855 &stats->psp_rx_bytes_frame_err); 856 857 if (accel_psp->rx_bad_counter) 858 mlx5_fc_query(mdev, accel_psp->rx_bad_counter, 859 &stats->psp_rx_pkts_drop, 860 &stats->psp_rx_bytes_drop); 861 } 862 863 void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) 864 { 865 if (!priv->psp) 866 return; 867 868 accel_psp_fs_tx_ft_put(priv->psp->fs); 869 } 870 871 int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv) 872 { 873 if (!priv->psp) 874 return 0; 875 876 return accel_psp_fs_tx_ft_get(priv->psp->fs); 877 } 878 879 static void mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs *fs) 880 { 881 accel_psp_fs_cleanup_rx(fs); 882 accel_psp_fs_cleanup_tx(fs); 883 kfree(fs); 884 } 885 886 static struct mlx5e_psp_fs *mlx5e_accel_psp_fs_init(struct mlx5e_priv *priv) 887 { 888 struct mlx5e_psp_fs *fs; 889 int err = 0; 890 891 fs = kzalloc(sizeof(*fs), GFP_KERNEL); 892 if (!fs) 893 return ERR_PTR(-ENOMEM); 894 895 fs->mdev = priv->mdev; 896 err = accel_psp_fs_init_tx(fs); 897 if (err) 898 goto err_tx; 899 900 fs->fs = priv->fs; 901 err = accel_psp_fs_init_rx(fs); 902 if (err) 903 goto err_rx; 904 905 return fs; 906 907 err_rx: 908 accel_psp_fs_cleanup_tx(fs); 909 err_tx: 910 kfree(fs); 911 return ERR_PTR(err); 912 } 913 914 static int 915 mlx5e_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf, 916 struct netlink_ext_ack *extack) 917 { 918 return 0; /* TODO: this should actually do things to the device */ 919 } 920 921 static int 922 mlx5e_psp_generate_key_spi(struct mlx5_core_dev *mdev, 923 enum mlx5_psp_gen_spi_in_key_size keysz, 924 unsigned int keysz_bytes, 925 struct psp_key_parsed *key) 926 { 927 u32 out[MLX5_ST_SZ_DW(psp_gen_spi_out) + MLX5_ST_SZ_DW(key_spi)] = {}; 928 u32 in[MLX5_ST_SZ_DW(psp_gen_spi_in)] = {}; 929 void *outkey; 930 int err; 931 932 WARN_ON_ONCE(keysz_bytes > PSP_MAX_KEY); 933 934 MLX5_SET(psp_gen_spi_in, in, opcode, MLX5_CMD_OP_PSP_GEN_SPI); 935 MLX5_SET(psp_gen_spi_in, in, key_size, keysz); 936 MLX5_SET(psp_gen_spi_in, in, num_of_spi, 1); 937 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); 938 if (err) 939 return err; 940 941 outkey = MLX5_ADDR_OF(psp_gen_spi_out, out, key_spi); 942 key->spi = cpu_to_be32(MLX5_GET(key_spi, outkey, spi)); 943 memcpy(key->key, MLX5_ADDR_OF(key_spi, outkey, key) + 32 - keysz_bytes, 944 keysz_bytes); 945 946 return 0; 947 } 948 949 static int 950 mlx5e_psp_rx_spi_alloc(struct psp_dev *psd, u32 version, 951 struct psp_key_parsed *assoc, 952 struct netlink_ext_ack *extack) 953 { 954 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); 955 enum mlx5_psp_gen_spi_in_key_size keysz; 956 u8 keysz_bytes; 957 958 switch (version) { 959 case PSP_VERSION_HDR0_AES_GCM_128: 960 keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128; 961 keysz_bytes = 16; 962 break; 963 case PSP_VERSION_HDR0_AES_GCM_256: 964 keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256; 965 keysz_bytes = 32; 966 break; 967 default: 968 return -EINVAL; 969 } 970 971 return mlx5e_psp_generate_key_spi(priv->mdev, keysz, keysz_bytes, assoc); 972 } 973 974 struct psp_key { 975 u32 id; 976 }; 977 978 static int mlx5e_psp_assoc_add(struct psp_dev *psd, struct psp_assoc *pas, 979 struct netlink_ext_ack *extack) 980 { 981 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); 982 struct mlx5_core_dev *mdev = priv->mdev; 983 struct psp_key_parsed *tx = &pas->tx; 984 struct mlx5e_psp *psp = priv->psp; 985 struct psp_key *nkey; 986 int err; 987 988 mdev = priv->mdev; 989 nkey = (struct psp_key *)pas->drv_data; 990 991 err = mlx5_create_encryption_key(mdev, tx->key, 992 psp_key_size(pas->version), 993 MLX5_ACCEL_OBJ_PSP_KEY, 994 &nkey->id); 995 if (err) { 996 mlx5_core_err(mdev, "Failed to create encryption key (err = %d)\n", err); 997 return err; 998 } 999 1000 atomic_inc(&psp->tx_key_cnt); 1001 return 0; 1002 } 1003 1004 static void mlx5e_psp_assoc_del(struct psp_dev *psd, struct psp_assoc *pas) 1005 { 1006 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); 1007 struct mlx5e_psp *psp = priv->psp; 1008 struct psp_key *nkey; 1009 1010 nkey = (struct psp_key *)pas->drv_data; 1011 mlx5_destroy_encryption_key(priv->mdev, nkey->id); 1012 atomic_dec(&psp->tx_key_cnt); 1013 } 1014 1015 static int mlx5e_psp_rotate_key(struct mlx5_core_dev *mdev) 1016 { 1017 u32 in[MLX5_ST_SZ_DW(psp_rotate_key_in)] = {}; 1018 u32 out[MLX5_ST_SZ_DW(psp_rotate_key_out)]; 1019 1020 MLX5_SET(psp_rotate_key_in, in, opcode, 1021 MLX5_CMD_OP_PSP_ROTATE_KEY); 1022 1023 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); 1024 } 1025 1026 static int 1027 mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack) 1028 { 1029 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); 1030 1031 /* no support for protecting against external rotations */ 1032 psd->generation = 0; 1033 1034 return mlx5e_psp_rotate_key(priv->mdev); 1035 } 1036 1037 static void 1038 mlx5e_psp_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats) 1039 { 1040 struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); 1041 struct mlx5e_psp_stats nstats; 1042 1043 mlx5e_accel_psp_fs_get_stats_fill(priv, &nstats); 1044 stats->rx_packets = nstats.psp_rx_pkts; 1045 stats->rx_bytes = nstats.psp_rx_bytes; 1046 stats->rx_auth_fail = nstats.psp_rx_pkts_auth_fail; 1047 stats->rx_error = nstats.psp_rx_pkts_frame_err; 1048 stats->rx_bad = nstats.psp_rx_pkts_drop; 1049 stats->tx_packets = nstats.psp_tx_pkts; 1050 stats->tx_bytes = nstats.psp_tx_bytes; 1051 stats->tx_error = atomic_read(&priv->psp->tx_drop); 1052 } 1053 1054 static struct psp_dev_ops mlx5_psp_ops = { 1055 .set_config = mlx5e_psp_set_config, 1056 .rx_spi_alloc = mlx5e_psp_rx_spi_alloc, 1057 .tx_key_add = mlx5e_psp_assoc_add, 1058 .tx_key_del = mlx5e_psp_assoc_del, 1059 .key_rotate = mlx5e_psp_key_rotate, 1060 .get_stats = mlx5e_psp_get_stats, 1061 }; 1062 1063 void mlx5e_psp_unregister(struct mlx5e_priv *priv) 1064 { 1065 if (!priv->psp || !priv->psp->psp) 1066 return; 1067 1068 psp_dev_unregister(priv->psp->psp); 1069 } 1070 1071 void mlx5e_psp_register(struct mlx5e_priv *priv) 1072 { 1073 /* FW Caps missing */ 1074 if (!priv->psp) 1075 return; 1076 1077 priv->psp->caps.assoc_drv_spc = sizeof(u32); 1078 priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128; 1079 if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) && 1080 MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt)) 1081 priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256; 1082 1083 priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops, 1084 &priv->psp->caps, NULL); 1085 if (IS_ERR(priv->psp->psp)) 1086 mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n", 1087 priv->psp->psp); 1088 } 1089 1090 int mlx5e_psp_init(struct mlx5e_priv *priv) 1091 { 1092 struct mlx5_core_dev *mdev = priv->mdev; 1093 struct mlx5e_psp_fs *fs; 1094 struct mlx5e_psp *psp; 1095 int err; 1096 1097 if (!mlx5_is_psp_device(mdev)) { 1098 mlx5_core_dbg(mdev, "PSP offload not supported\n"); 1099 return 0; 1100 } 1101 1102 if (!MLX5_CAP_ETH(mdev, swp)) { 1103 mlx5_core_dbg(mdev, "SWP not supported\n"); 1104 return 0; 1105 } 1106 1107 if (!MLX5_CAP_ETH(mdev, swp_csum)) { 1108 mlx5_core_dbg(mdev, "SWP checksum not supported\n"); 1109 return 0; 1110 } 1111 1112 if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) { 1113 mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n"); 1114 return 0; 1115 } 1116 1117 if (!MLX5_CAP_ETH(mdev, swp_lso)) { 1118 mlx5_core_dbg(mdev, "PSP LSO not supported\n"); 1119 return 0; 1120 } 1121 1122 psp = kzalloc(sizeof(*psp), GFP_KERNEL); 1123 if (!psp) 1124 return -ENOMEM; 1125 1126 priv->psp = psp; 1127 fs = mlx5e_accel_psp_fs_init(priv); 1128 if (IS_ERR(fs)) { 1129 err = PTR_ERR(fs); 1130 goto out_err; 1131 } 1132 1133 psp->fs = fs; 1134 1135 mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n"); 1136 return 0; 1137 1138 out_err: 1139 priv->psp = NULL; 1140 kfree(psp); 1141 return err; 1142 } 1143 1144 void mlx5e_psp_cleanup(struct mlx5e_priv *priv) 1145 { 1146 struct mlx5e_psp *psp = priv->psp; 1147 1148 if (!psp) 1149 return; 1150 1151 WARN_ON(atomic_read(&psp->tx_key_cnt)); 1152 mlx5e_accel_psp_fs_cleanup(psp->fs); 1153 priv->psp = NULL; 1154 kfree(psp); 1155 } 1156