1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/slab.h> 6 #include <linux/errno.h> 7 #include <linux/bitops.h> 8 #include <linux/list.h> 9 #include <linux/rhashtable.h> 10 #include <linux/netdevice.h> 11 #include <linux/mutex.h> 12 #include <linux/refcount.h> 13 #include <net/devlink.h> 14 #include <trace/events/mlxsw.h> 15 16 #include "reg.h" 17 #include "core.h" 18 #include "resources.h" 19 #include "spectrum.h" 20 #include "spectrum_acl_tcam.h" 21 #include "core_acl_flex_keys.h" 22 23 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) 24 { 25 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 26 27 return ops->priv_size; 28 } 29 30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */ 31 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */ 32 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */ 33 34 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, 35 struct mlxsw_sp_acl_rule_info *rulei, 36 u32 *priority, bool fillup_priority) 37 { 38 u64 max_priority; 39 40 if (!fillup_priority) { 41 *priority = 0; 42 return 0; 43 } 44 45 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE)) 46 return -EIO; 47 48 /* Priority range is 1..cap_kvd_size-1. */ 49 max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1; 50 if (rulei->priority >= max_priority) 51 return -EINVAL; 52 53 /* Unlike in TC, in HW, higher number means higher priority. */ 54 *priority = max_priority - rulei->priority; 55 return 0; 56 } 57 58 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, 59 u16 *p_id) 60 { 61 u16 id; 62 63 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions); 64 if (id < tcam->max_regions) { 65 __set_bit(id, tcam->used_regions); 66 *p_id = id; 67 return 0; 68 } 69 return -ENOBUFS; 70 } 71 72 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam, 73 u16 id) 74 { 75 __clear_bit(id, tcam->used_regions); 76 } 77 78 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam, 79 u16 *p_id) 80 { 81 u16 id; 82 83 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups); 84 if (id < tcam->max_groups) { 85 __set_bit(id, tcam->used_groups); 86 *p_id = id; 87 return 0; 88 } 89 return -ENOBUFS; 90 } 91 92 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam, 93 u16 id) 94 { 95 __clear_bit(id, tcam->used_groups); 96 } 97 98 struct mlxsw_sp_acl_tcam_pattern { 99 const enum mlxsw_afk_element *elements; 100 unsigned int elements_count; 101 }; 102 103 struct mlxsw_sp_acl_tcam_group { 104 struct mlxsw_sp_acl_tcam *tcam; 105 u16 id; 106 struct mutex lock; /* guards region list updates */ 107 struct list_head region_list; 108 unsigned int region_count; 109 }; 110 111 struct mlxsw_sp_acl_tcam_vgroup { 112 struct mlxsw_sp_acl_tcam_group group; 113 struct list_head vregion_list; 114 struct rhashtable vchunk_ht; 115 const struct mlxsw_sp_acl_tcam_pattern *patterns; 116 unsigned int patterns_count; 117 bool tmplt_elusage_set; 118 struct mlxsw_afk_element_usage tmplt_elusage; 119 bool vregion_rehash_enabled; 120 unsigned int *p_min_prio; 121 unsigned int *p_max_prio; 122 }; 123 124 struct mlxsw_sp_acl_tcam_rehash_ctx { 125 void *hints_priv; 126 bool this_is_rollback; 127 struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being 128 * currently migrated. 129 */ 130 struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start 131 * migration from in 132 * a vchunk being 133 * currently migrated. 134 */ 135 struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop 136 * migration at 137 * a vchunk being 138 * currently migrated. 139 */ 140 }; 141 142 struct mlxsw_sp_acl_tcam_vregion { 143 struct mutex lock; /* Protects consistency of region, region2 pointers 144 * and vchunk_list. 145 */ 146 struct mlxsw_sp_acl_tcam_region *region; 147 struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */ 148 struct list_head list; /* Member of a TCAM group */ 149 struct list_head tlist; /* Member of a TCAM */ 150 struct list_head vchunk_list; /* List of vchunks under this vregion */ 151 struct mlxsw_afk_key_info *key_info; 152 struct mlxsw_sp_acl_tcam *tcam; 153 struct mlxsw_sp_acl_tcam_vgroup *vgroup; 154 struct { 155 struct delayed_work dw; 156 struct mlxsw_sp_acl_tcam_rehash_ctx ctx; 157 } rehash; 158 struct mlxsw_sp *mlxsw_sp; 159 refcount_t ref_count; 160 }; 161 162 struct mlxsw_sp_acl_tcam_vchunk; 163 164 struct mlxsw_sp_acl_tcam_chunk { 165 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 166 struct mlxsw_sp_acl_tcam_region *region; 167 unsigned long priv[]; 168 /* priv has to be always the last item */ 169 }; 170 171 struct mlxsw_sp_acl_tcam_vchunk { 172 struct mlxsw_sp_acl_tcam_chunk *chunk; 173 struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */ 174 struct list_head list; /* Member of a TCAM vregion */ 175 struct rhash_head ht_node; /* Member of a chunk HT */ 176 struct list_head ventry_list; 177 unsigned int priority; /* Priority within the vregion and group */ 178 struct mlxsw_sp_acl_tcam_vgroup *vgroup; 179 struct mlxsw_sp_acl_tcam_vregion *vregion; 180 refcount_t ref_count; 181 }; 182 183 struct mlxsw_sp_acl_tcam_entry { 184 struct mlxsw_sp_acl_tcam_ventry *ventry; 185 struct mlxsw_sp_acl_tcam_chunk *chunk; 186 unsigned long priv[]; 187 /* priv has to be always the last item */ 188 }; 189 190 struct mlxsw_sp_acl_tcam_ventry { 191 struct mlxsw_sp_acl_tcam_entry *entry; 192 struct list_head list; /* Member of a TCAM vchunk */ 193 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 194 struct mlxsw_sp_acl_rule_info *rulei; 195 }; 196 197 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = { 198 .key_len = sizeof(unsigned int), 199 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority), 200 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node), 201 .automatic_shrinking = true, 202 }; 203 204 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, 205 struct mlxsw_sp_acl_tcam_group *group) 206 { 207 struct mlxsw_sp_acl_tcam_region *region; 208 char pagt_pl[MLXSW_REG_PAGT_LEN]; 209 int acl_index = 0; 210 211 mlxsw_reg_pagt_pack(pagt_pl, group->id); 212 list_for_each_entry(region, &group->region_list, list) { 213 bool multi = false; 214 215 /* Check if the next entry in the list has the same vregion. */ 216 if (region->list.next != &group->region_list && 217 list_next_entry(region, list)->vregion == region->vregion) 218 multi = true; 219 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, 220 region->id, multi); 221 } 222 mlxsw_reg_pagt_size_set(pagt_pl, acl_index); 223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); 224 } 225 226 static int 227 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam, 228 struct mlxsw_sp_acl_tcam_group *group) 229 { 230 int err; 231 232 group->tcam = tcam; 233 INIT_LIST_HEAD(&group->region_list); 234 235 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); 236 if (err) 237 return err; 238 239 mutex_init(&group->lock); 240 241 return 0; 242 } 243 244 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group) 245 { 246 struct mlxsw_sp_acl_tcam *tcam = group->tcam; 247 248 mutex_destroy(&group->lock); 249 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); 250 WARN_ON(!list_empty(&group->region_list)); 251 } 252 253 static int 254 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp, 255 struct mlxsw_sp_acl_tcam *tcam, 256 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 257 const struct mlxsw_sp_acl_tcam_pattern *patterns, 258 unsigned int patterns_count, 259 struct mlxsw_afk_element_usage *tmplt_elusage, 260 bool vregion_rehash_enabled, 261 unsigned int *p_min_prio, 262 unsigned int *p_max_prio) 263 { 264 int err; 265 266 vgroup->patterns = patterns; 267 vgroup->patterns_count = patterns_count; 268 vgroup->vregion_rehash_enabled = vregion_rehash_enabled; 269 vgroup->p_min_prio = p_min_prio; 270 vgroup->p_max_prio = p_max_prio; 271 272 if (tmplt_elusage) { 273 vgroup->tmplt_elusage_set = true; 274 memcpy(&vgroup->tmplt_elusage, tmplt_elusage, 275 sizeof(vgroup->tmplt_elusage)); 276 } 277 INIT_LIST_HEAD(&vgroup->vregion_list); 278 279 err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group); 280 if (err) 281 return err; 282 283 err = rhashtable_init(&vgroup->vchunk_ht, 284 &mlxsw_sp_acl_tcam_vchunk_ht_params); 285 if (err) 286 goto err_rhashtable_init; 287 288 return 0; 289 290 err_rhashtable_init: 291 mlxsw_sp_acl_tcam_group_del(&vgroup->group); 292 return err; 293 } 294 295 static void 296 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup) 297 { 298 rhashtable_destroy(&vgroup->vchunk_ht); 299 mlxsw_sp_acl_tcam_group_del(&vgroup->group); 300 WARN_ON(!list_empty(&vgroup->vregion_list)); 301 } 302 303 static int 304 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, 305 struct mlxsw_sp_acl_tcam_group *group, 306 struct mlxsw_sp_port *mlxsw_sp_port, 307 bool ingress) 308 { 309 char ppbt_pl[MLXSW_REG_PPBT_LEN]; 310 311 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL : 312 MLXSW_REG_PXBT_E_EACL, 313 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port, 314 group->id); 315 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); 316 } 317 318 static void 319 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, 320 struct mlxsw_sp_acl_tcam_group *group, 321 struct mlxsw_sp_port *mlxsw_sp_port, 322 bool ingress) 323 { 324 char ppbt_pl[MLXSW_REG_PPBT_LEN]; 325 326 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL : 327 MLXSW_REG_PXBT_E_EACL, 328 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port, 329 group->id); 330 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); 331 } 332 333 static u16 334 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group) 335 { 336 return group->id; 337 } 338 339 static unsigned int 340 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion) 341 { 342 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 343 344 if (list_empty(&vregion->vchunk_list)) 345 return 0; 346 /* As a priority of a vregion, return priority of the first vchunk */ 347 vchunk = list_first_entry(&vregion->vchunk_list, 348 typeof(*vchunk), list); 349 return vchunk->priority; 350 } 351 352 static unsigned int 353 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion) 354 { 355 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 356 357 if (list_empty(&vregion->vchunk_list)) 358 return 0; 359 vchunk = list_last_entry(&vregion->vchunk_list, 360 typeof(*vchunk), list); 361 return vchunk->priority; 362 } 363 364 static void 365 mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup) 366 { 367 struct mlxsw_sp_acl_tcam_vregion *vregion; 368 369 if (list_empty(&vgroup->vregion_list)) 370 return; 371 vregion = list_first_entry(&vgroup->vregion_list, 372 typeof(*vregion), list); 373 *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion); 374 vregion = list_last_entry(&vgroup->vregion_list, 375 typeof(*vregion), list); 376 *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion); 377 } 378 379 static int 380 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, 381 struct mlxsw_sp_acl_tcam_group *group, 382 struct mlxsw_sp_acl_tcam_region *region, 383 unsigned int priority, 384 struct mlxsw_sp_acl_tcam_region *next_region) 385 { 386 struct mlxsw_sp_acl_tcam_region *region2; 387 struct list_head *pos; 388 int err; 389 390 mutex_lock(&group->lock); 391 if (group->region_count == group->tcam->max_group_size) { 392 err = -ENOBUFS; 393 goto err_region_count_check; 394 } 395 396 if (next_region) { 397 /* If the next region is defined, place the new one 398 * before it. The next one is a sibling. 399 */ 400 pos = &next_region->list; 401 } else { 402 /* Position the region inside the list according to priority */ 403 list_for_each(pos, &group->region_list) { 404 region2 = list_entry(pos, typeof(*region2), list); 405 if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) > 406 priority) 407 break; 408 } 409 } 410 list_add_tail(®ion->list, pos); 411 region->group = group; 412 413 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 414 if (err) 415 goto err_group_update; 416 417 group->region_count++; 418 mutex_unlock(&group->lock); 419 return 0; 420 421 err_group_update: 422 list_del(®ion->list); 423 err_region_count_check: 424 mutex_unlock(&group->lock); 425 return err; 426 } 427 428 static void 429 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, 430 struct mlxsw_sp_acl_tcam_region *region) 431 { 432 struct mlxsw_sp_acl_tcam_group *group = region->group; 433 434 mutex_lock(&group->lock); 435 list_del(®ion->list); 436 group->region_count--; 437 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 438 mutex_unlock(&group->lock); 439 } 440 441 static int 442 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp, 443 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 444 struct mlxsw_sp_acl_tcam_vregion *vregion, 445 unsigned int priority) 446 { 447 struct mlxsw_sp_acl_tcam_vregion *vregion2; 448 struct list_head *pos; 449 int err; 450 451 /* Position the vregion inside the list according to priority */ 452 list_for_each(pos, &vgroup->vregion_list) { 453 vregion2 = list_entry(pos, typeof(*vregion2), list); 454 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority) 455 break; 456 } 457 list_add_tail(&vregion->list, pos); 458 459 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group, 460 vregion->region, 461 priority, NULL); 462 if (err) 463 goto err_region_attach; 464 465 return 0; 466 467 err_region_attach: 468 list_del(&vregion->list); 469 return err; 470 } 471 472 static void 473 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp, 474 struct mlxsw_sp_acl_tcam_vregion *vregion) 475 { 476 list_del(&vregion->list); 477 if (vregion->region2) 478 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, 479 vregion->region2); 480 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region); 481 } 482 483 static struct mlxsw_sp_acl_tcam_vregion * 484 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup, 485 unsigned int priority, 486 struct mlxsw_afk_element_usage *elusage, 487 bool *p_need_split) 488 { 489 struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2; 490 struct list_head *pos; 491 bool issubset; 492 493 list_for_each(pos, &vgroup->vregion_list) { 494 vregion = list_entry(pos, typeof(*vregion), list); 495 496 /* First, check if the requested priority does not rather belong 497 * under some of the next vregions. 498 */ 499 if (pos->next != &vgroup->vregion_list) { /* not last */ 500 vregion2 = list_entry(pos->next, typeof(*vregion2), 501 list); 502 if (priority >= 503 mlxsw_sp_acl_tcam_vregion_prio(vregion2)) 504 continue; 505 } 506 507 issubset = mlxsw_afk_key_info_subset(vregion->key_info, 508 elusage); 509 510 /* If requested element usage would not fit and the priority 511 * is lower than the currently inspected vregion we cannot 512 * use this region, so return NULL to indicate new vregion has 513 * to be created. 514 */ 515 if (!issubset && 516 priority < mlxsw_sp_acl_tcam_vregion_prio(vregion)) 517 return NULL; 518 519 /* If requested element usage would not fit and the priority 520 * is higher than the currently inspected vregion we cannot 521 * use this vregion. There is still some hope that the next 522 * vregion would be the fit. So let it be processed and 523 * eventually break at the check right above this. 524 */ 525 if (!issubset && 526 priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion)) 527 continue; 528 529 /* Indicate if the vregion needs to be split in order to add 530 * the requested priority. Split is needed when requested 531 * element usage won't fit into the found vregion. 532 */ 533 *p_need_split = !issubset; 534 return vregion; 535 } 536 return NULL; /* New vregion has to be created. */ 537 } 538 539 static void 540 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup, 541 struct mlxsw_afk_element_usage *elusage, 542 struct mlxsw_afk_element_usage *out) 543 { 544 const struct mlxsw_sp_acl_tcam_pattern *pattern; 545 int i; 546 547 /* In case the template is set, we don't have to look up the pattern 548 * and just use the template. 549 */ 550 if (vgroup->tmplt_elusage_set) { 551 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out)); 552 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out)); 553 return; 554 } 555 556 for (i = 0; i < vgroup->patterns_count; i++) { 557 pattern = &vgroup->patterns[i]; 558 mlxsw_afk_element_usage_fill(out, pattern->elements, 559 pattern->elements_count); 560 if (mlxsw_afk_element_usage_subset(elusage, out)) 561 return; 562 } 563 memcpy(out, elusage, sizeof(*out)); 564 } 565 566 static int 567 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, 568 struct mlxsw_sp_acl_tcam_region *region) 569 { 570 struct mlxsw_afk_key_info *key_info = region->key_info; 571 char ptar_pl[MLXSW_REG_PTAR_LEN]; 572 unsigned int encodings_count; 573 int i; 574 int err; 575 576 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, 577 region->key_type, 578 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, 579 region->id, region->tcam_region_info); 580 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); 581 for (i = 0; i < encodings_count; i++) { 582 u16 encoding; 583 584 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i); 585 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding); 586 } 587 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 588 if (err) 589 return err; 590 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info); 591 return 0; 592 } 593 594 static void 595 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, 596 struct mlxsw_sp_acl_tcam_region *region) 597 { 598 char ptar_pl[MLXSW_REG_PTAR_LEN]; 599 600 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 601 region->key_type, 0, region->id, 602 region->tcam_region_info); 603 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 604 } 605 606 static int 607 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, 608 struct mlxsw_sp_acl_tcam_region *region) 609 { 610 char pacl_pl[MLXSW_REG_PACL_LEN]; 611 612 mlxsw_reg_pacl_pack(pacl_pl, region->id, true, 613 region->tcam_region_info); 614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); 615 } 616 617 static void 618 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, 619 struct mlxsw_sp_acl_tcam_region *region) 620 { 621 char pacl_pl[MLXSW_REG_PACL_LEN]; 622 623 mlxsw_reg_pacl_pack(pacl_pl, region->id, false, 624 region->tcam_region_info); 625 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); 626 } 627 628 static struct mlxsw_sp_acl_tcam_region * 629 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, 630 struct mlxsw_sp_acl_tcam *tcam, 631 struct mlxsw_sp_acl_tcam_vregion *vregion, 632 void *hints_priv) 633 { 634 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 635 struct mlxsw_sp_acl_tcam_region *region; 636 int err; 637 638 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL); 639 if (!region) 640 return ERR_PTR(-ENOMEM); 641 region->mlxsw_sp = mlxsw_sp; 642 region->vregion = vregion; 643 region->key_info = vregion->key_info; 644 645 err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); 646 if (err) 647 goto err_region_id_get; 648 649 err = ops->region_associate(mlxsw_sp, region); 650 if (err) 651 goto err_tcam_region_associate; 652 653 region->key_type = ops->key_type; 654 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); 655 if (err) 656 goto err_tcam_region_alloc; 657 658 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region); 659 if (err) 660 goto err_tcam_region_enable; 661 662 err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, 663 region, hints_priv); 664 if (err) 665 goto err_tcam_region_init; 666 667 return region; 668 669 err_tcam_region_init: 670 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); 671 err_tcam_region_enable: 672 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); 673 err_tcam_region_alloc: 674 err_tcam_region_associate: 675 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); 676 err_region_id_get: 677 kfree(region); 678 return ERR_PTR(err); 679 } 680 681 static void 682 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, 683 struct mlxsw_sp_acl_tcam_region *region) 684 { 685 struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl); 686 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 687 688 ops->region_fini(mlxsw_sp, region->priv); 689 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); 690 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); 691 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); 692 kfree(region); 693 } 694 695 static void 696 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion) 697 { 698 unsigned long interval = vregion->tcam->vregion_rehash_intrvl; 699 700 if (!interval) 701 return; 702 mlxsw_core_schedule_dw(&vregion->rehash.dw, 703 msecs_to_jiffies(interval)); 704 } 705 706 static void 707 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, 708 struct mlxsw_sp_acl_tcam_vregion *vregion, 709 int *credits); 710 711 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work) 712 { 713 struct mlxsw_sp_acl_tcam_vregion *vregion = 714 container_of(work, struct mlxsw_sp_acl_tcam_vregion, 715 rehash.dw.work); 716 int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS; 717 718 mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits); 719 if (credits < 0) 720 /* Rehash gone out of credits so it was interrupted. 721 * Schedule the work as soon as possible to continue. 722 */ 723 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0); 724 else 725 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); 726 } 727 728 static void 729 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk) 730 { 731 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; 732 733 /* If a rule was added or deleted from vchunk which is currently 734 * under rehash migration, we have to reset the ventry pointers 735 * to make sure all rules are properly migrated. 736 */ 737 if (vregion->rehash.ctx.current_vchunk == vchunk) { 738 vregion->rehash.ctx.start_ventry = NULL; 739 vregion->rehash.ctx.stop_ventry = NULL; 740 } 741 } 742 743 static void 744 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion) 745 { 746 /* If a chunk was added or deleted from vregion we have to reset 747 * the current chunk pointer to make sure all chunks 748 * are properly migrated. 749 */ 750 vregion->rehash.ctx.current_vchunk = NULL; 751 } 752 753 static struct mlxsw_sp_acl_tcam_vregion * 754 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp, 755 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 756 unsigned int priority, 757 struct mlxsw_afk_element_usage *elusage) 758 { 759 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 760 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); 761 struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam; 762 struct mlxsw_sp_acl_tcam_vregion *vregion; 763 int err; 764 765 vregion = kzalloc(sizeof(*vregion), GFP_KERNEL); 766 if (!vregion) 767 return ERR_PTR(-ENOMEM); 768 INIT_LIST_HEAD(&vregion->vchunk_list); 769 mutex_init(&vregion->lock); 770 vregion->tcam = tcam; 771 vregion->mlxsw_sp = mlxsw_sp; 772 vregion->vgroup = vgroup; 773 refcount_set(&vregion->ref_count, 1); 774 775 vregion->key_info = mlxsw_afk_key_info_get(afk, elusage); 776 if (IS_ERR(vregion->key_info)) { 777 err = PTR_ERR(vregion->key_info); 778 goto err_key_info_get; 779 } 780 781 vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam, 782 vregion, NULL); 783 if (IS_ERR(vregion->region)) { 784 err = PTR_ERR(vregion->region); 785 goto err_region_create; 786 } 787 788 err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion, 789 priority); 790 if (err) 791 goto err_vgroup_vregion_attach; 792 793 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) { 794 /* Create the delayed work for vregion periodic rehash */ 795 INIT_DELAYED_WORK(&vregion->rehash.dw, 796 mlxsw_sp_acl_tcam_vregion_rehash_work); 797 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); 798 mutex_lock(&tcam->lock); 799 list_add_tail(&vregion->tlist, &tcam->vregion_list); 800 mutex_unlock(&tcam->lock); 801 } 802 803 return vregion; 804 805 err_vgroup_vregion_attach: 806 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region); 807 err_region_create: 808 mlxsw_afk_key_info_put(vregion->key_info); 809 err_key_info_get: 810 kfree(vregion); 811 return ERR_PTR(err); 812 } 813 814 static void 815 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp, 816 struct mlxsw_sp_acl_tcam_vregion *vregion) 817 { 818 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 819 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup; 820 struct mlxsw_sp_acl_tcam *tcam = vregion->tcam; 821 822 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) { 823 mutex_lock(&tcam->lock); 824 list_del(&vregion->tlist); 825 mutex_unlock(&tcam->lock); 826 cancel_delayed_work_sync(&vregion->rehash.dw); 827 } 828 mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion); 829 if (vregion->region2) 830 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2); 831 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region); 832 mlxsw_afk_key_info_put(vregion->key_info); 833 mutex_destroy(&vregion->lock); 834 kfree(vregion); 835 } 836 837 static struct mlxsw_sp_acl_tcam_vregion * 838 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp, 839 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 840 unsigned int priority, 841 struct mlxsw_afk_element_usage *elusage) 842 { 843 struct mlxsw_afk_element_usage vregion_elusage; 844 struct mlxsw_sp_acl_tcam_vregion *vregion; 845 bool need_split; 846 847 vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority, 848 elusage, &need_split); 849 if (vregion) { 850 if (need_split) { 851 /* According to priority, new vchunk should belong to 852 * an existing vregion. However, this vchunk needs 853 * elements that vregion does not contain. We need 854 * to split the existing vregion into two and create 855 * a new vregion for the new vchunk in between. 856 * This is not supported now. 857 */ 858 return ERR_PTR(-EOPNOTSUPP); 859 } 860 refcount_inc(&vregion->ref_count); 861 return vregion; 862 } 863 864 mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage, 865 &vregion_elusage); 866 867 return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority, 868 &vregion_elusage); 869 } 870 871 static void 872 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp, 873 struct mlxsw_sp_acl_tcam_vregion *vregion) 874 { 875 if (!refcount_dec_and_test(&vregion->ref_count)) 876 return; 877 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion); 878 } 879 880 static struct mlxsw_sp_acl_tcam_chunk * 881 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, 882 struct mlxsw_sp_acl_tcam_vchunk *vchunk, 883 struct mlxsw_sp_acl_tcam_region *region) 884 { 885 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 886 struct mlxsw_sp_acl_tcam_chunk *chunk; 887 888 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); 889 if (!chunk) 890 return ERR_PTR(-ENOMEM); 891 chunk->vchunk = vchunk; 892 chunk->region = region; 893 894 ops->chunk_init(region->priv, chunk->priv, vchunk->priority); 895 return chunk; 896 } 897 898 static void 899 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, 900 struct mlxsw_sp_acl_tcam_chunk *chunk) 901 { 902 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 903 904 ops->chunk_fini(chunk->priv); 905 kfree(chunk); 906 } 907 908 static struct mlxsw_sp_acl_tcam_vchunk * 909 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, 910 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 911 unsigned int priority, 912 struct mlxsw_afk_element_usage *elusage) 913 { 914 struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2; 915 struct mlxsw_sp_acl_tcam_vregion *vregion; 916 struct list_head *pos; 917 int err; 918 919 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) 920 return ERR_PTR(-EINVAL); 921 922 vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL); 923 if (!vchunk) 924 return ERR_PTR(-ENOMEM); 925 INIT_LIST_HEAD(&vchunk->ventry_list); 926 vchunk->priority = priority; 927 vchunk->vgroup = vgroup; 928 refcount_set(&vchunk->ref_count, 1); 929 930 vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup, 931 priority, elusage); 932 if (IS_ERR(vregion)) { 933 err = PTR_ERR(vregion); 934 goto err_vregion_get; 935 } 936 937 vchunk->vregion = vregion; 938 939 err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node, 940 mlxsw_sp_acl_tcam_vchunk_ht_params); 941 if (err) 942 goto err_rhashtable_insert; 943 944 mutex_lock(&vregion->lock); 945 vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, 946 vchunk->vregion->region); 947 if (IS_ERR(vchunk->chunk)) { 948 mutex_unlock(&vregion->lock); 949 err = PTR_ERR(vchunk->chunk); 950 goto err_chunk_create; 951 } 952 953 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); 954 955 /* Position the vchunk inside the list according to priority */ 956 list_for_each(pos, &vregion->vchunk_list) { 957 vchunk2 = list_entry(pos, typeof(*vchunk2), list); 958 if (vchunk2->priority > priority) 959 break; 960 } 961 list_add_tail(&vchunk->list, pos); 962 mutex_unlock(&vregion->lock); 963 mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup); 964 965 return vchunk; 966 967 err_chunk_create: 968 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node, 969 mlxsw_sp_acl_tcam_vchunk_ht_params); 970 err_rhashtable_insert: 971 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion); 972 err_vregion_get: 973 kfree(vchunk); 974 return ERR_PTR(err); 975 } 976 977 static void 978 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp, 979 struct mlxsw_sp_acl_tcam_vchunk *vchunk) 980 { 981 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; 982 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup; 983 984 mutex_lock(&vregion->lock); 985 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); 986 list_del(&vchunk->list); 987 if (vchunk->chunk2) 988 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); 989 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk); 990 mutex_unlock(&vregion->lock); 991 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node, 992 mlxsw_sp_acl_tcam_vchunk_ht_params); 993 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion); 994 kfree(vchunk); 995 mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup); 996 } 997 998 static struct mlxsw_sp_acl_tcam_vchunk * 999 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp, 1000 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 1001 unsigned int priority, 1002 struct mlxsw_afk_element_usage *elusage) 1003 { 1004 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 1005 1006 vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority, 1007 mlxsw_sp_acl_tcam_vchunk_ht_params); 1008 if (vchunk) { 1009 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info, 1010 elusage))) 1011 return ERR_PTR(-EINVAL); 1012 refcount_inc(&vchunk->ref_count); 1013 return vchunk; 1014 } 1015 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup, 1016 priority, elusage); 1017 } 1018 1019 static void 1020 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp, 1021 struct mlxsw_sp_acl_tcam_vchunk *vchunk) 1022 { 1023 if (!refcount_dec_and_test(&vchunk->ref_count)) 1024 return; 1025 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk); 1026 } 1027 1028 static struct mlxsw_sp_acl_tcam_entry * 1029 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp, 1030 struct mlxsw_sp_acl_tcam_ventry *ventry, 1031 struct mlxsw_sp_acl_tcam_chunk *chunk) 1032 { 1033 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1034 struct mlxsw_sp_acl_tcam_entry *entry; 1035 int err; 1036 1037 entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL); 1038 if (!entry) 1039 return ERR_PTR(-ENOMEM); 1040 entry->ventry = ventry; 1041 entry->chunk = chunk; 1042 1043 err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv, 1044 entry->priv, ventry->rulei); 1045 if (err) 1046 goto err_entry_add; 1047 1048 return entry; 1049 1050 err_entry_add: 1051 kfree(entry); 1052 return ERR_PTR(err); 1053 } 1054 1055 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp, 1056 struct mlxsw_sp_acl_tcam_entry *entry) 1057 { 1058 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1059 1060 ops->entry_del(mlxsw_sp, entry->chunk->region->priv, 1061 entry->chunk->priv, entry->priv); 1062 kfree(entry); 1063 } 1064 1065 static int 1066 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, 1067 struct mlxsw_sp_acl_tcam_region *region, 1068 struct mlxsw_sp_acl_tcam_entry *entry, 1069 struct mlxsw_sp_acl_rule_info *rulei) 1070 { 1071 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1072 1073 return ops->entry_action_replace(mlxsw_sp, region->priv, 1074 entry->priv, rulei); 1075 } 1076 1077 static int 1078 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, 1079 struct mlxsw_sp_acl_tcam_entry *entry, 1080 bool *activity) 1081 { 1082 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1083 1084 return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv, 1085 entry->priv, activity); 1086 } 1087 1088 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp, 1089 struct mlxsw_sp_acl_tcam_vgroup *vgroup, 1090 struct mlxsw_sp_acl_tcam_ventry *ventry, 1091 struct mlxsw_sp_acl_rule_info *rulei) 1092 { 1093 struct mlxsw_sp_acl_tcam_vregion *vregion; 1094 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 1095 int err; 1096 1097 vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority, 1098 &rulei->values.elusage); 1099 if (IS_ERR(vchunk)) 1100 return PTR_ERR(vchunk); 1101 1102 ventry->vchunk = vchunk; 1103 ventry->rulei = rulei; 1104 vregion = vchunk->vregion; 1105 1106 mutex_lock(&vregion->lock); 1107 ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, 1108 vchunk->chunk); 1109 if (IS_ERR(ventry->entry)) { 1110 mutex_unlock(&vregion->lock); 1111 err = PTR_ERR(ventry->entry); 1112 goto err_entry_create; 1113 } 1114 1115 list_add_tail(&ventry->list, &vchunk->ventry_list); 1116 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk); 1117 mutex_unlock(&vregion->lock); 1118 1119 return 0; 1120 1121 err_entry_create: 1122 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk); 1123 return err; 1124 } 1125 1126 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp, 1127 struct mlxsw_sp_acl_tcam_ventry *ventry) 1128 { 1129 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk; 1130 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; 1131 1132 mutex_lock(&vregion->lock); 1133 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk); 1134 list_del(&ventry->list); 1135 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry); 1136 mutex_unlock(&vregion->lock); 1137 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk); 1138 } 1139 1140 static int 1141 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp, 1142 struct mlxsw_sp_acl_tcam_ventry *ventry, 1143 struct mlxsw_sp_acl_rule_info *rulei) 1144 { 1145 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk; 1146 1147 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, 1148 vchunk->vregion->region, 1149 ventry->entry, rulei); 1150 } 1151 1152 static int 1153 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp, 1154 struct mlxsw_sp_acl_tcam_ventry *ventry, 1155 bool *activity) 1156 { 1157 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, 1158 ventry->entry, activity); 1159 } 1160 1161 static int 1162 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp, 1163 struct mlxsw_sp_acl_tcam_ventry *ventry, 1164 struct mlxsw_sp_acl_tcam_chunk *chunk, 1165 int *credits) 1166 { 1167 struct mlxsw_sp_acl_tcam_entry *new_entry; 1168 1169 /* First check if the entry is not already where we want it to be. */ 1170 if (ventry->entry->chunk == chunk) 1171 return 0; 1172 1173 if (--(*credits) < 0) 1174 return 0; 1175 1176 new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk); 1177 if (IS_ERR(new_entry)) 1178 return PTR_ERR(new_entry); 1179 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry); 1180 ventry->entry = new_entry; 1181 return 0; 1182 } 1183 1184 static int 1185 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp, 1186 struct mlxsw_sp_acl_tcam_vchunk *vchunk, 1187 struct mlxsw_sp_acl_tcam_region *region, 1188 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) 1189 { 1190 struct mlxsw_sp_acl_tcam_chunk *new_chunk; 1191 1192 new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region); 1193 if (IS_ERR(new_chunk)) 1194 return PTR_ERR(new_chunk); 1195 vchunk->chunk2 = vchunk->chunk; 1196 vchunk->chunk = new_chunk; 1197 ctx->current_vchunk = vchunk; 1198 ctx->start_ventry = NULL; 1199 ctx->stop_ventry = NULL; 1200 return 0; 1201 } 1202 1203 static void 1204 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp, 1205 struct mlxsw_sp_acl_tcam_vchunk *vchunk, 1206 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) 1207 { 1208 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); 1209 vchunk->chunk2 = NULL; 1210 ctx->current_vchunk = NULL; 1211 } 1212 1213 static int 1214 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, 1215 struct mlxsw_sp_acl_tcam_vchunk *vchunk, 1216 struct mlxsw_sp_acl_tcam_region *region, 1217 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx, 1218 int *credits) 1219 { 1220 struct mlxsw_sp_acl_tcam_ventry *ventry; 1221 int err; 1222 1223 if (vchunk->chunk->region != region) { 1224 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk, 1225 region, ctx); 1226 if (err) 1227 return err; 1228 } else if (!vchunk->chunk2) { 1229 /* The chunk is already as it should be, nothing to do. */ 1230 return 0; 1231 } 1232 1233 /* If the migration got interrupted, we have the ventry to start from 1234 * stored in context. 1235 */ 1236 if (ctx->start_ventry) 1237 ventry = ctx->start_ventry; 1238 else 1239 ventry = list_first_entry(&vchunk->ventry_list, 1240 typeof(*ventry), list); 1241 1242 list_for_each_entry_from(ventry, &vchunk->ventry_list, list) { 1243 /* During rollback, once we reach the ventry that failed 1244 * to migrate, we are done. 1245 */ 1246 if (ventry == ctx->stop_ventry) 1247 break; 1248 1249 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, 1250 vchunk->chunk, credits); 1251 if (err) { 1252 if (ctx->this_is_rollback) { 1253 /* Save the ventry which we ended with and try 1254 * to continue later on. 1255 */ 1256 ctx->start_ventry = ventry; 1257 return err; 1258 } 1259 /* Swap the chunk and chunk2 pointers so the follow-up 1260 * rollback call will see the original chunk pointer 1261 * in vchunk->chunk. 1262 */ 1263 swap(vchunk->chunk, vchunk->chunk2); 1264 /* The rollback has to be done from beginning of the 1265 * chunk, that is why we have to null the start_ventry. 1266 * However, we know where to stop the rollback, 1267 * at the current ventry. 1268 */ 1269 ctx->start_ventry = NULL; 1270 ctx->stop_ventry = ventry; 1271 return err; 1272 } else if (*credits < 0) { 1273 /* We are out of credits, the rest of the ventries 1274 * will be migrated later. Save the ventry 1275 * which we ended with. 1276 */ 1277 ctx->start_ventry = ventry; 1278 return 0; 1279 } 1280 } 1281 1282 mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx); 1283 return 0; 1284 } 1285 1286 static int 1287 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, 1288 struct mlxsw_sp_acl_tcam_vregion *vregion, 1289 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx, 1290 int *credits) 1291 { 1292 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 1293 int err; 1294 1295 /* If the migration got interrupted, we have the vchunk 1296 * we are working on stored in context. 1297 */ 1298 if (ctx->current_vchunk) 1299 vchunk = ctx->current_vchunk; 1300 else 1301 vchunk = list_first_entry(&vregion->vchunk_list, 1302 typeof(*vchunk), list); 1303 1304 list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) { 1305 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk, 1306 vregion->region, 1307 ctx, credits); 1308 if (err || *credits < 0) 1309 return err; 1310 } 1311 return 0; 1312 } 1313 1314 static int 1315 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, 1316 struct mlxsw_sp_acl_tcam_vregion *vregion, 1317 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx, 1318 int *credits) 1319 { 1320 int err, err2; 1321 1322 trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion); 1323 mutex_lock(&vregion->lock); 1324 err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, 1325 ctx, credits); 1326 if (err) { 1327 /* In case migration was not successful, we need to swap 1328 * so the original region pointer is assigned again 1329 * to vregion->region. 1330 */ 1331 swap(vregion->region, vregion->region2); 1332 ctx->current_vchunk = NULL; 1333 ctx->this_is_rollback = true; 1334 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, 1335 ctx, credits); 1336 if (err2) { 1337 trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp, 1338 vregion); 1339 dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n"); 1340 /* Let the rollback to be continued later on. */ 1341 } 1342 } 1343 mutex_unlock(&vregion->lock); 1344 trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion); 1345 return err; 1346 } 1347 1348 static bool 1349 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) 1350 { 1351 return ctx->hints_priv; 1352 } 1353 1354 static int 1355 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp, 1356 struct mlxsw_sp_acl_tcam_vregion *vregion, 1357 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) 1358 { 1359 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1360 unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion); 1361 struct mlxsw_sp_acl_tcam_region *new_region; 1362 void *hints_priv; 1363 int err; 1364 1365 trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion); 1366 1367 hints_priv = ops->region_rehash_hints_get(vregion->region->priv); 1368 if (IS_ERR(hints_priv)) 1369 return PTR_ERR(hints_priv); 1370 1371 new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam, 1372 vregion, hints_priv); 1373 if (IS_ERR(new_region)) { 1374 err = PTR_ERR(new_region); 1375 goto err_region_create; 1376 } 1377 1378 /* vregion->region contains the pointer to the new region 1379 * we are going to migrate to. 1380 */ 1381 vregion->region2 = vregion->region; 1382 vregion->region = new_region; 1383 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, 1384 vregion->region2->group, 1385 new_region, priority, 1386 vregion->region2); 1387 if (err) 1388 goto err_group_region_attach; 1389 1390 ctx->hints_priv = hints_priv; 1391 ctx->this_is_rollback = false; 1392 1393 return 0; 1394 1395 err_group_region_attach: 1396 vregion->region = vregion->region2; 1397 vregion->region2 = NULL; 1398 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region); 1399 err_region_create: 1400 ops->region_rehash_hints_put(hints_priv); 1401 return err; 1402 } 1403 1404 static void 1405 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp, 1406 struct mlxsw_sp_acl_tcam_vregion *vregion, 1407 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) 1408 { 1409 struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2; 1410 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1411 1412 vregion->region2 = NULL; 1413 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region); 1414 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region); 1415 ops->region_rehash_hints_put(ctx->hints_priv); 1416 ctx->hints_priv = NULL; 1417 } 1418 1419 static void 1420 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, 1421 struct mlxsw_sp_acl_tcam_vregion *vregion, 1422 int *credits) 1423 { 1424 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx; 1425 int err; 1426 1427 /* Check if the previous rehash work was interrupted 1428 * which means we have to continue it now. 1429 * If not, start a new rehash. 1430 */ 1431 if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) { 1432 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp, 1433 vregion, ctx); 1434 if (err) { 1435 if (err != -EAGAIN) 1436 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n"); 1437 return; 1438 } 1439 } 1440 1441 err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, 1442 ctx, credits); 1443 if (err) { 1444 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n"); 1445 } 1446 1447 if (*credits >= 0) 1448 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx); 1449 } 1450 1451 static int 1452 mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id, 1453 struct devlink_param_gset_ctx *ctx) 1454 { 1455 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1456 struct mlxsw_sp_acl_tcam *tcam; 1457 struct mlxsw_sp *mlxsw_sp; 1458 1459 mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1460 tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl); 1461 ctx->val.vu32 = tcam->vregion_rehash_intrvl; 1462 1463 return 0; 1464 } 1465 1466 static int 1467 mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id, 1468 struct devlink_param_gset_ctx *ctx) 1469 { 1470 struct mlxsw_core *mlxsw_core = devlink_priv(devlink); 1471 struct mlxsw_sp_acl_tcam_vregion *vregion; 1472 struct mlxsw_sp_acl_tcam *tcam; 1473 struct mlxsw_sp *mlxsw_sp; 1474 u32 val = ctx->val.vu32; 1475 1476 if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val) 1477 return -EINVAL; 1478 1479 mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1480 tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl); 1481 tcam->vregion_rehash_intrvl = val; 1482 mutex_lock(&tcam->lock); 1483 list_for_each_entry(vregion, &tcam->vregion_list, tlist) { 1484 if (val) 1485 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0); 1486 else 1487 cancel_delayed_work_sync(&vregion->rehash.dw); 1488 } 1489 mutex_unlock(&tcam->lock); 1490 return 0; 1491 } 1492 1493 static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = { 1494 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, 1495 "acl_region_rehash_interval", 1496 DEVLINK_PARAM_TYPE_U32, 1497 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1498 mlxsw_sp_acl_tcam_region_rehash_intrvl_get, 1499 mlxsw_sp_acl_tcam_region_rehash_intrvl_set, 1500 NULL), 1501 }; 1502 1503 static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp) 1504 { 1505 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 1506 1507 if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get) 1508 return 0; 1509 1510 return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params, 1511 ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params)); 1512 } 1513 1514 static void 1515 mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp) 1516 { 1517 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); 1518 1519 if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get) 1520 return; 1521 1522 devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params, 1523 ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params)); 1524 } 1525 1526 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, 1527 struct mlxsw_sp_acl_tcam *tcam) 1528 { 1529 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1530 u64 max_tcam_regions; 1531 u64 max_regions; 1532 u64 max_groups; 1533 int err; 1534 1535 mutex_init(&tcam->lock); 1536 tcam->vregion_rehash_intrvl = 1537 MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT; 1538 INIT_LIST_HEAD(&tcam->vregion_list); 1539 1540 err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp); 1541 if (err) 1542 goto err_rehash_params_register; 1543 1544 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1545 ACL_MAX_TCAM_REGIONS); 1546 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); 1547 1548 /* Use 1:1 mapping between ACL region and TCAM region */ 1549 if (max_tcam_regions < max_regions) 1550 max_regions = max_tcam_regions; 1551 1552 tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL); 1553 if (!tcam->used_regions) { 1554 err = -ENOMEM; 1555 goto err_alloc_used_regions; 1556 } 1557 tcam->max_regions = max_regions; 1558 1559 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); 1560 tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL); 1561 if (!tcam->used_groups) { 1562 err = -ENOMEM; 1563 goto err_alloc_used_groups; 1564 } 1565 tcam->max_groups = max_groups; 1566 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, 1567 ACL_MAX_GROUP_SIZE); 1568 tcam->max_group_size = min_t(unsigned int, tcam->max_group_size, 1569 MLXSW_REG_PAGT_ACL_MAX_NUM); 1570 1571 err = ops->init(mlxsw_sp, tcam->priv, tcam); 1572 if (err) 1573 goto err_tcam_init; 1574 1575 return 0; 1576 1577 err_tcam_init: 1578 bitmap_free(tcam->used_groups); 1579 err_alloc_used_groups: 1580 bitmap_free(tcam->used_regions); 1581 err_alloc_used_regions: 1582 mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp); 1583 err_rehash_params_register: 1584 mutex_destroy(&tcam->lock); 1585 return err; 1586 } 1587 1588 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, 1589 struct mlxsw_sp_acl_tcam *tcam) 1590 { 1591 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; 1592 1593 ops->fini(mlxsw_sp, tcam->priv); 1594 bitmap_free(tcam->used_groups); 1595 bitmap_free(tcam->used_regions); 1596 mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp); 1597 mutex_destroy(&tcam->lock); 1598 } 1599 1600 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { 1601 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 1602 MLXSW_AFK_ELEMENT_DMAC_32_47, 1603 MLXSW_AFK_ELEMENT_DMAC_0_31, 1604 MLXSW_AFK_ELEMENT_SMAC_32_47, 1605 MLXSW_AFK_ELEMENT_SMAC_0_31, 1606 MLXSW_AFK_ELEMENT_ETHERTYPE, 1607 MLXSW_AFK_ELEMENT_IP_PROTO, 1608 MLXSW_AFK_ELEMENT_SRC_IP_0_31, 1609 MLXSW_AFK_ELEMENT_DST_IP_0_31, 1610 MLXSW_AFK_ELEMENT_DST_L4_PORT, 1611 MLXSW_AFK_ELEMENT_SRC_L4_PORT, 1612 MLXSW_AFK_ELEMENT_VID, 1613 MLXSW_AFK_ELEMENT_PCP, 1614 MLXSW_AFK_ELEMENT_TCP_FLAGS, 1615 MLXSW_AFK_ELEMENT_IP_TTL_, 1616 MLXSW_AFK_ELEMENT_IP_ECN, 1617 MLXSW_AFK_ELEMENT_IP_DSCP, 1618 }; 1619 1620 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { 1621 MLXSW_AFK_ELEMENT_ETHERTYPE, 1622 MLXSW_AFK_ELEMENT_IP_PROTO, 1623 MLXSW_AFK_ELEMENT_SRC_IP_96_127, 1624 MLXSW_AFK_ELEMENT_SRC_IP_64_95, 1625 MLXSW_AFK_ELEMENT_SRC_IP_32_63, 1626 MLXSW_AFK_ELEMENT_SRC_IP_0_31, 1627 MLXSW_AFK_ELEMENT_DST_IP_96_127, 1628 MLXSW_AFK_ELEMENT_DST_IP_64_95, 1629 MLXSW_AFK_ELEMENT_DST_IP_32_63, 1630 MLXSW_AFK_ELEMENT_DST_IP_0_31, 1631 MLXSW_AFK_ELEMENT_DST_L4_PORT, 1632 MLXSW_AFK_ELEMENT_SRC_L4_PORT, 1633 }; 1634 1635 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { 1636 { 1637 .elements = mlxsw_sp_acl_tcam_pattern_ipv4, 1638 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4), 1639 }, 1640 { 1641 .elements = mlxsw_sp_acl_tcam_pattern_ipv6, 1642 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6), 1643 }, 1644 }; 1645 1646 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \ 1647 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) 1648 1649 struct mlxsw_sp_acl_tcam_flower_ruleset { 1650 struct mlxsw_sp_acl_tcam_vgroup vgroup; 1651 }; 1652 1653 struct mlxsw_sp_acl_tcam_flower_rule { 1654 struct mlxsw_sp_acl_tcam_ventry ventry; 1655 }; 1656 1657 static int 1658 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, 1659 struct mlxsw_sp_acl_tcam *tcam, 1660 void *ruleset_priv, 1661 struct mlxsw_afk_element_usage *tmplt_elusage, 1662 unsigned int *p_min_prio, 1663 unsigned int *p_max_prio) 1664 { 1665 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1666 1667 return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup, 1668 mlxsw_sp_acl_tcam_patterns, 1669 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, 1670 tmplt_elusage, true, 1671 p_min_prio, p_max_prio); 1672 } 1673 1674 static void 1675 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, 1676 void *ruleset_priv) 1677 { 1678 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1679 1680 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup); 1681 } 1682 1683 static int 1684 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, 1685 void *ruleset_priv, 1686 struct mlxsw_sp_port *mlxsw_sp_port, 1687 bool ingress) 1688 { 1689 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1690 1691 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group, 1692 mlxsw_sp_port, ingress); 1693 } 1694 1695 static void 1696 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, 1697 void *ruleset_priv, 1698 struct mlxsw_sp_port *mlxsw_sp_port, 1699 bool ingress) 1700 { 1701 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1702 1703 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group, 1704 mlxsw_sp_port, ingress); 1705 } 1706 1707 static u16 1708 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) 1709 { 1710 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1711 1712 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group); 1713 } 1714 1715 static int 1716 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, 1717 void *ruleset_priv, void *rule_priv, 1718 struct mlxsw_sp_acl_rule_info *rulei) 1719 { 1720 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1721 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1722 1723 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup, 1724 &rule->ventry, rulei); 1725 } 1726 1727 static void 1728 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) 1729 { 1730 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1731 1732 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry); 1733 } 1734 1735 static int 1736 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp, 1737 void *rule_priv, 1738 struct mlxsw_sp_acl_rule_info *rulei) 1739 { 1740 return -EOPNOTSUPP; 1741 } 1742 1743 static int 1744 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, 1745 void *rule_priv, bool *activity) 1746 { 1747 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1748 1749 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry, 1750 activity); 1751 } 1752 1753 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { 1754 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), 1755 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, 1756 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, 1757 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, 1758 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, 1759 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, 1760 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), 1761 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, 1762 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, 1763 .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace, 1764 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, 1765 }; 1766 1767 struct mlxsw_sp_acl_tcam_mr_ruleset { 1768 struct mlxsw_sp_acl_tcam_vchunk *vchunk; 1769 struct mlxsw_sp_acl_tcam_vgroup vgroup; 1770 }; 1771 1772 struct mlxsw_sp_acl_tcam_mr_rule { 1773 struct mlxsw_sp_acl_tcam_ventry ventry; 1774 }; 1775 1776 static int 1777 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, 1778 struct mlxsw_sp_acl_tcam *tcam, 1779 void *ruleset_priv, 1780 struct mlxsw_afk_element_usage *tmplt_elusage, 1781 unsigned int *p_min_prio, 1782 unsigned int *p_max_prio) 1783 { 1784 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; 1785 int err; 1786 1787 err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup, 1788 mlxsw_sp_acl_tcam_patterns, 1789 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, 1790 tmplt_elusage, false, 1791 p_min_prio, p_max_prio); 1792 if (err) 1793 return err; 1794 1795 /* For most of the TCAM clients it would make sense to take a tcam chunk 1796 * only when the first rule is written. This is not the case for 1797 * multicast router as it is required to bind the multicast router to a 1798 * specific ACL Group ID which must exist in HW before multicast router 1799 * is initialized. 1800 */ 1801 ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, 1802 &ruleset->vgroup, 1, 1803 tmplt_elusage); 1804 if (IS_ERR(ruleset->vchunk)) { 1805 err = PTR_ERR(ruleset->vchunk); 1806 goto err_chunk_get; 1807 } 1808 1809 return 0; 1810 1811 err_chunk_get: 1812 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup); 1813 return err; 1814 } 1815 1816 static void 1817 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv) 1818 { 1819 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; 1820 1821 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk); 1822 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup); 1823 } 1824 1825 static int 1826 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, 1827 struct mlxsw_sp_port *mlxsw_sp_port, 1828 bool ingress) 1829 { 1830 /* Binding is done when initializing multicast router */ 1831 return 0; 1832 } 1833 1834 static void 1835 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, 1836 void *ruleset_priv, 1837 struct mlxsw_sp_port *mlxsw_sp_port, 1838 bool ingress) 1839 { 1840 } 1841 1842 static u16 1843 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv) 1844 { 1845 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; 1846 1847 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group); 1848 } 1849 1850 static int 1851 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, 1852 void *rule_priv, 1853 struct mlxsw_sp_acl_rule_info *rulei) 1854 { 1855 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; 1856 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; 1857 1858 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup, 1859 &rule->ventry, rulei); 1860 } 1861 1862 static void 1863 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) 1864 { 1865 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; 1866 1867 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry); 1868 } 1869 1870 static int 1871 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp, 1872 void *rule_priv, 1873 struct mlxsw_sp_acl_rule_info *rulei) 1874 { 1875 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; 1876 1877 return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry, 1878 rulei); 1879 } 1880 1881 static int 1882 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp, 1883 void *rule_priv, bool *activity) 1884 { 1885 *activity = false; 1886 1887 return 0; 1888 } 1889 1890 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { 1891 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset), 1892 .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add, 1893 .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del, 1894 .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind, 1895 .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind, 1896 .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id, 1897 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule), 1898 .rule_add = mlxsw_sp_acl_tcam_mr_rule_add, 1899 .rule_del = mlxsw_sp_acl_tcam_mr_rule_del, 1900 .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace, 1901 .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get, 1902 }; 1903 1904 static const struct mlxsw_sp_acl_profile_ops * 1905 mlxsw_sp_acl_tcam_profile_ops_arr[] = { 1906 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, 1907 [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops, 1908 }; 1909 1910 const struct mlxsw_sp_acl_profile_ops * 1911 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, 1912 enum mlxsw_sp_acl_profile profile) 1913 { 1914 const struct mlxsw_sp_acl_profile_ops *ops; 1915 1916 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr))) 1917 return NULL; 1918 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile]; 1919 if (WARN_ON(!ops)) 1920 return NULL; 1921 return ops; 1922 } 1923