1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <net/pkt_cls.h> 8 #include <net/red.h> 9 10 #include "spectrum.h" 11 #include "reg.h" 12 13 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) 14 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \ 15 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1)) 16 17 enum mlxsw_sp_qdisc_type { 18 MLXSW_SP_QDISC_NO_QDISC, 19 MLXSW_SP_QDISC_RED, 20 MLXSW_SP_QDISC_PRIO, 21 MLXSW_SP_QDISC_ETS, 22 MLXSW_SP_QDISC_TBF, 23 }; 24 25 struct mlxsw_sp_qdisc_ops { 26 enum mlxsw_sp_qdisc_type type; 27 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port, 28 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 29 void *params); 30 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, 31 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); 32 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port, 33 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); 34 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 35 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 36 struct tc_qopt_offload_stats *stats_ptr); 37 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port, 38 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 39 void *xstats_ptr); 40 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port, 41 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); 42 /* unoffload - to be used for a qdisc that stops being offloaded without 43 * being destroyed. 44 */ 45 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port, 46 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); 47 }; 48 49 struct mlxsw_sp_qdisc { 50 u32 handle; 51 u8 tclass_num; 52 u8 prio_bitmap; 53 union { 54 struct red_stats red; 55 } xstats_base; 56 struct mlxsw_sp_qdisc_stats { 57 u64 tx_bytes; 58 u64 tx_packets; 59 u64 drops; 60 u64 overlimits; 61 u64 backlog; 62 } stats_base; 63 64 struct mlxsw_sp_qdisc_ops *ops; 65 }; 66 67 static bool 68 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, 69 enum mlxsw_sp_qdisc_type type) 70 { 71 return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && 72 mlxsw_sp_qdisc->ops->type == type && 73 mlxsw_sp_qdisc->handle == handle; 74 } 75 76 static struct mlxsw_sp_qdisc * 77 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent, 78 bool root_only) 79 { 80 int tclass, child_index; 81 82 if (parent == TC_H_ROOT) 83 return mlxsw_sp_port->root_qdisc; 84 85 if (root_only || !mlxsw_sp_port->root_qdisc || 86 !mlxsw_sp_port->root_qdisc->ops || 87 TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle || 88 TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS) 89 return NULL; 90 91 child_index = TC_H_MIN(parent); 92 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index); 93 return &mlxsw_sp_port->tclass_qdiscs[tclass]; 94 } 95 96 static struct mlxsw_sp_qdisc * 97 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle) 98 { 99 int i; 100 101 if (mlxsw_sp_port->root_qdisc->handle == handle) 102 return mlxsw_sp_port->root_qdisc; 103 104 if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC) 105 return NULL; 106 107 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 108 if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle) 109 return &mlxsw_sp_port->tclass_qdiscs[i]; 110 111 return NULL; 112 } 113 114 static int 115 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 116 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 117 { 118 int err = 0; 119 120 if (!mlxsw_sp_qdisc) 121 return 0; 122 123 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy) 124 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port, 125 mlxsw_sp_qdisc); 126 127 mlxsw_sp_qdisc->handle = TC_H_UNSPEC; 128 mlxsw_sp_qdisc->ops = NULL; 129 return err; 130 } 131 132 static int 133 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, 134 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 135 struct mlxsw_sp_qdisc_ops *ops, void *params) 136 { 137 int err; 138 139 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type) 140 /* In case this location contained a different qdisc of the 141 * same type we can override the old qdisc configuration. 142 * Otherwise, we need to remove the old qdisc before setting the 143 * new one. 144 */ 145 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 146 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params); 147 if (err) 148 goto err_bad_param; 149 150 err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params); 151 if (err) 152 goto err_config; 153 154 if (mlxsw_sp_qdisc->handle != handle) { 155 mlxsw_sp_qdisc->ops = ops; 156 if (ops->clean_stats) 157 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc); 158 } 159 160 mlxsw_sp_qdisc->handle = handle; 161 return 0; 162 163 err_bad_param: 164 err_config: 165 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload) 166 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params); 167 168 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 169 return err; 170 } 171 172 static int 173 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, 174 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 175 struct tc_qopt_offload_stats *stats_ptr) 176 { 177 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && 178 mlxsw_sp_qdisc->ops->get_stats) 179 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port, 180 mlxsw_sp_qdisc, 181 stats_ptr); 182 183 return -EOPNOTSUPP; 184 } 185 186 static int 187 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, 188 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 189 void *xstats_ptr) 190 { 191 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && 192 mlxsw_sp_qdisc->ops->get_xstats) 193 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port, 194 mlxsw_sp_qdisc, 195 xstats_ptr); 196 197 return -EOPNOTSUPP; 198 } 199 200 static u64 201 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num) 202 { 203 return xstats->backlog[tclass_num] + 204 xstats->backlog[tclass_num + 8]; 205 } 206 207 static u64 208 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num) 209 { 210 return xstats->tail_drop[tclass_num] + 211 xstats->tail_drop[tclass_num + 8]; 212 } 213 214 static void 215 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, 216 u8 prio_bitmap, u64 *tx_packets, 217 u64 *tx_bytes) 218 { 219 int i; 220 221 *tx_packets = 0; 222 *tx_bytes = 0; 223 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 224 if (prio_bitmap & BIT(i)) { 225 *tx_packets += xstats->tx_packets[i]; 226 *tx_bytes += xstats->tx_bytes[i]; 227 } 228 } 229 } 230 231 static void 232 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port, 233 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 234 u64 *p_tx_bytes, u64 *p_tx_packets, 235 u64 *p_drops, u64 *p_backlog) 236 { 237 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 238 struct mlxsw_sp_port_xstats *xstats; 239 u64 tx_bytes, tx_packets; 240 241 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 242 mlxsw_sp_qdisc_bstats_per_priority_get(xstats, 243 mlxsw_sp_qdisc->prio_bitmap, 244 &tx_packets, &tx_bytes); 245 246 *p_tx_packets += tx_packets; 247 *p_tx_bytes += tx_bytes; 248 *p_drops += xstats->wred_drop[tclass_num] + 249 mlxsw_sp_xstats_tail_drop(xstats, tclass_num); 250 *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num); 251 } 252 253 static void 254 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp, 255 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 256 u64 tx_bytes, u64 tx_packets, 257 u64 drops, u64 backlog, 258 struct tc_qopt_offload_stats *stats_ptr) 259 { 260 struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base; 261 262 tx_bytes -= stats_base->tx_bytes; 263 tx_packets -= stats_base->tx_packets; 264 drops -= stats_base->drops; 265 backlog -= stats_base->backlog; 266 267 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); 268 stats_ptr->qstats->drops += drops; 269 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog); 270 271 stats_base->backlog += backlog; 272 stats_base->drops += drops; 273 stats_base->tx_bytes += tx_bytes; 274 stats_base->tx_packets += tx_packets; 275 } 276 277 static void 278 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port, 279 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 280 struct tc_qopt_offload_stats *stats_ptr) 281 { 282 u64 tx_packets = 0; 283 u64 tx_bytes = 0; 284 u64 backlog = 0; 285 u64 drops = 0; 286 287 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 288 &tx_bytes, &tx_packets, 289 &drops, &backlog); 290 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc, 291 tx_bytes, tx_packets, drops, backlog, 292 stats_ptr); 293 } 294 295 static int 296 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, 297 int tclass_num, u32 min, u32 max, 298 u32 probability, bool is_ecn) 299 { 300 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; 301 char cwtp_cmd[MLXSW_REG_CWTP_LEN]; 302 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 303 int err; 304 305 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num); 306 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE, 307 roundup(min, MLXSW_REG_CWTP_MIN_VALUE), 308 roundup(max, MLXSW_REG_CWTP_MIN_VALUE), 309 probability); 310 311 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd); 312 if (err) 313 return err; 314 315 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, 316 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 317 318 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); 319 } 320 321 static int 322 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, 323 int tclass_num) 324 { 325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 326 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; 327 328 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, 329 MLXSW_REG_CWTPM_RESET_PROFILE, false, false); 330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); 331 } 332 333 static void 334 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, 335 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 336 { 337 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 338 struct mlxsw_sp_qdisc_stats *stats_base; 339 struct mlxsw_sp_port_xstats *xstats; 340 struct red_stats *red_base; 341 342 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 343 stats_base = &mlxsw_sp_qdisc->stats_base; 344 red_base = &mlxsw_sp_qdisc->xstats_base.red; 345 346 mlxsw_sp_qdisc_bstats_per_priority_get(xstats, 347 mlxsw_sp_qdisc->prio_bitmap, 348 &stats_base->tx_packets, 349 &stats_base->tx_bytes); 350 red_base->prob_drop = xstats->wred_drop[tclass_num]; 351 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num); 352 353 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; 354 stats_base->drops = red_base->prob_drop + red_base->pdrop; 355 356 stats_base->backlog = 0; 357 } 358 359 static int 360 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 361 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 362 { 363 struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc; 364 365 if (root_qdisc != mlxsw_sp_qdisc) 366 root_qdisc->stats_base.backlog -= 367 mlxsw_sp_qdisc->stats_base.backlog; 368 369 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, 370 mlxsw_sp_qdisc->tclass_num); 371 } 372 373 static int 374 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, 375 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 376 void *params) 377 { 378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 379 struct tc_red_qopt_offload_params *p = params; 380 381 if (p->min > p->max) { 382 dev_err(mlxsw_sp->bus_info->dev, 383 "spectrum: RED: min %u is bigger then max %u\n", p->min, 384 p->max); 385 return -EINVAL; 386 } 387 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, 388 GUARANTEED_SHARED_BUFFER)) { 389 dev_err(mlxsw_sp->bus_info->dev, 390 "spectrum: RED: max value %u is too big\n", p->max); 391 return -EINVAL; 392 } 393 if (p->min == 0 || p->max == 0) { 394 dev_err(mlxsw_sp->bus_info->dev, 395 "spectrum: RED: 0 value is illegal for min and max\n"); 396 return -EINVAL; 397 } 398 return 0; 399 } 400 401 static int 402 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, 403 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 404 void *params) 405 { 406 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 407 struct tc_red_qopt_offload_params *p = params; 408 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 409 u32 min, max; 410 u64 prob; 411 412 /* calculate probability in percentage */ 413 prob = p->probability; 414 prob *= 100; 415 prob = DIV_ROUND_UP(prob, 1 << 16); 416 prob = DIV_ROUND_UP(prob, 1 << 16); 417 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); 418 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); 419 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, 420 max, prob, p->is_ecn); 421 } 422 423 static void 424 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 425 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 426 struct gnet_stats_queue *qstats) 427 { 428 u64 backlog; 429 430 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 431 mlxsw_sp_qdisc->stats_base.backlog); 432 qstats->backlog -= backlog; 433 mlxsw_sp_qdisc->stats_base.backlog = 0; 434 } 435 436 static void 437 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 438 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 439 void *params) 440 { 441 struct tc_red_qopt_offload_params *p = params; 442 443 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats); 444 } 445 446 static int 447 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, 448 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 449 void *xstats_ptr) 450 { 451 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red; 452 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 453 struct mlxsw_sp_port_xstats *xstats; 454 struct red_stats *res = xstats_ptr; 455 int early_drops, pdrops; 456 457 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 458 459 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; 460 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) - 461 xstats_base->pdrop; 462 463 res->pdrop += pdrops; 464 res->prob_drop += early_drops; 465 466 xstats_base->pdrop += pdrops; 467 xstats_base->prob_drop += early_drops; 468 return 0; 469 } 470 471 static int 472 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, 473 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 474 struct tc_qopt_offload_stats *stats_ptr) 475 { 476 u8 tclass_num = mlxsw_sp_qdisc->tclass_num; 477 struct mlxsw_sp_qdisc_stats *stats_base; 478 struct mlxsw_sp_port_xstats *xstats; 479 u64 overlimits; 480 481 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 482 stats_base = &mlxsw_sp_qdisc->stats_base; 483 484 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr); 485 overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits; 486 487 stats_ptr->qstats->overlimits += overlimits; 488 stats_base->overlimits += overlimits; 489 490 return 0; 491 } 492 493 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 494 495 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { 496 .type = MLXSW_SP_QDISC_RED, 497 .check_params = mlxsw_sp_qdisc_red_check_params, 498 .replace = mlxsw_sp_qdisc_red_replace, 499 .unoffload = mlxsw_sp_qdisc_red_unoffload, 500 .destroy = mlxsw_sp_qdisc_red_destroy, 501 .get_stats = mlxsw_sp_qdisc_get_red_stats, 502 .get_xstats = mlxsw_sp_qdisc_get_red_xstats, 503 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats, 504 }; 505 506 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, 507 struct tc_red_qopt_offload *p) 508 { 509 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 510 511 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); 512 if (!mlxsw_sp_qdisc) 513 return -EOPNOTSUPP; 514 515 if (p->command == TC_RED_REPLACE) 516 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, 517 mlxsw_sp_qdisc, 518 &mlxsw_sp_qdisc_ops_red, 519 &p->set); 520 521 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, 522 MLXSW_SP_QDISC_RED)) 523 return -EOPNOTSUPP; 524 525 switch (p->command) { 526 case TC_RED_DESTROY: 527 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 528 case TC_RED_XSTATS: 529 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc, 530 p->xstats); 531 case TC_RED_STATS: 532 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 533 &p->stats); 534 default: 535 return -EOPNOTSUPP; 536 } 537 } 538 539 static void 540 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, 541 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 542 { 543 u64 backlog_cells = 0; 544 u64 tx_packets = 0; 545 u64 tx_bytes = 0; 546 u64 drops = 0; 547 548 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 549 &tx_bytes, &tx_packets, 550 &drops, &backlog_cells); 551 552 mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets; 553 mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes; 554 mlxsw_sp_qdisc->stats_base.drops = drops; 555 mlxsw_sp_qdisc->stats_base.backlog = 0; 556 } 557 558 static int 559 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 560 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 561 { 562 struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc; 563 564 if (root_qdisc != mlxsw_sp_qdisc) 565 root_qdisc->stats_base.backlog -= 566 mlxsw_sp_qdisc->stats_base.backlog; 567 568 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 569 MLXSW_REG_QEEC_HR_SUBGROUP, 570 mlxsw_sp_qdisc->tclass_num, 0, 571 MLXSW_REG_QEEC_MAS_DIS, 0); 572 } 573 574 static int 575 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port, 576 u32 max_size, u8 *p_burst_size) 577 { 578 /* TBF burst size is configured in bytes. The ASIC burst size value is 579 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units. 580 */ 581 u32 bs512 = max_size / 64; 582 u8 bs = fls(bs512); 583 584 if (!bs) 585 return -EINVAL; 586 --bs; 587 588 /* Demand a power of two. */ 589 if ((1 << bs) != bs512) 590 return -EINVAL; 591 592 if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs || 593 bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS) 594 return -EINVAL; 595 596 *p_burst_size = bs; 597 return 0; 598 } 599 600 static u32 601 mlxsw_sp_qdisc_tbf_max_size(u8 bs) 602 { 603 return (1U << bs) * 64; 604 } 605 606 static u64 607 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p) 608 { 609 /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in 610 * Kbits/s. 611 */ 612 return div_u64(p->rate.rate_bytes_ps, 1000) * 8; 613 } 614 615 static int 616 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port, 617 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 618 void *params) 619 { 620 struct tc_tbf_qopt_offload_replace_params *p = params; 621 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 622 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p); 623 u8 burst_size; 624 int err; 625 626 if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) { 627 dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev, 628 "spectrum: TBF: rate of %lluKbps must be below %u\n", 629 rate_kbps, MLXSW_REG_QEEC_MAS_DIS); 630 return -EINVAL; 631 } 632 633 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size); 634 if (err) { 635 u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS; 636 637 dev_err(mlxsw_sp->bus_info->dev, 638 "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u", 639 p->max_size, 640 mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs), 641 mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs)); 642 return -EINVAL; 643 } 644 645 return 0; 646 } 647 648 static int 649 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, 650 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 651 void *params) 652 { 653 struct tc_tbf_qopt_offload_replace_params *p = params; 654 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p); 655 u8 burst_size; 656 int err; 657 658 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size); 659 if (WARN_ON_ONCE(err)) 660 /* check_params above was supposed to reject this value. */ 661 return -EINVAL; 662 663 /* Configure subgroup shaper, so that both UC and MC traffic is subject 664 * to shaping. That is unlike RED, however UC queue lengths are going to 665 * be different than MC ones due to different pool and quota 666 * configurations, so the configuration is not applicable. For shaper on 667 * the other hand, subjecting the overall stream to the configured 668 * shaper makes sense. Also note that that is what we do for 669 * ieee_setmaxrate(). 670 */ 671 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, 672 MLXSW_REG_QEEC_HR_SUBGROUP, 673 mlxsw_sp_qdisc->tclass_num, 0, 674 rate_kbps, burst_size); 675 } 676 677 static void 678 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 679 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 680 void *params) 681 { 682 struct tc_tbf_qopt_offload_replace_params *p = params; 683 684 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats); 685 } 686 687 static int 688 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port, 689 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 690 struct tc_qopt_offload_stats *stats_ptr) 691 { 692 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 693 stats_ptr); 694 return 0; 695 } 696 697 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = { 698 .type = MLXSW_SP_QDISC_TBF, 699 .check_params = mlxsw_sp_qdisc_tbf_check_params, 700 .replace = mlxsw_sp_qdisc_tbf_replace, 701 .unoffload = mlxsw_sp_qdisc_tbf_unoffload, 702 .destroy = mlxsw_sp_qdisc_tbf_destroy, 703 .get_stats = mlxsw_sp_qdisc_get_tbf_stats, 704 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats, 705 }; 706 707 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, 708 struct tc_tbf_qopt_offload *p) 709 { 710 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 711 712 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); 713 if (!mlxsw_sp_qdisc) 714 return -EOPNOTSUPP; 715 716 if (p->command == TC_TBF_REPLACE) 717 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, 718 mlxsw_sp_qdisc, 719 &mlxsw_sp_qdisc_ops_tbf, 720 &p->replace_params); 721 722 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, 723 MLXSW_SP_QDISC_TBF)) 724 return -EOPNOTSUPP; 725 726 switch (p->command) { 727 case TC_TBF_DESTROY: 728 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 729 case TC_TBF_STATS: 730 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 731 &p->stats); 732 default: 733 return -EOPNOTSUPP; 734 } 735 } 736 737 static int 738 __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port) 739 { 740 int i; 741 742 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 743 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 744 MLXSW_SP_PORT_DEFAULT_TCLASS); 745 mlxsw_sp_port_ets_set(mlxsw_sp_port, 746 MLXSW_REG_QEEC_HR_SUBGROUP, 747 i, 0, false, 0); 748 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, 749 &mlxsw_sp_port->tclass_qdiscs[i]); 750 mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0; 751 } 752 753 return 0; 754 } 755 756 static int 757 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 758 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 759 { 760 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port); 761 } 762 763 static int 764 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands) 765 { 766 if (nbands > IEEE_8021QAZ_MAX_TCS) 767 return -EOPNOTSUPP; 768 769 return 0; 770 } 771 772 static int 773 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port, 774 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 775 void *params) 776 { 777 struct tc_prio_qopt_offload_params *p = params; 778 779 return __mlxsw_sp_qdisc_ets_check_params(p->bands); 780 } 781 782 static int 783 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, 784 unsigned int nbands, 785 const unsigned int *quanta, 786 const unsigned int *weights, 787 const u8 *priomap) 788 { 789 struct mlxsw_sp_qdisc *child_qdisc; 790 int tclass, i, band, backlog; 791 u8 old_priomap; 792 int err; 793 794 for (band = 0; band < nbands; band++) { 795 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); 796 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; 797 old_priomap = child_qdisc->prio_bitmap; 798 child_qdisc->prio_bitmap = 0; 799 800 err = mlxsw_sp_port_ets_set(mlxsw_sp_port, 801 MLXSW_REG_QEEC_HR_SUBGROUP, 802 tclass, 0, !!quanta[band], 803 weights[band]); 804 if (err) 805 return err; 806 807 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 808 if (priomap[i] == band) { 809 child_qdisc->prio_bitmap |= BIT(i); 810 if (BIT(i) & old_priomap) 811 continue; 812 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, 813 i, tclass); 814 if (err) 815 return err; 816 } 817 } 818 if (old_priomap != child_qdisc->prio_bitmap && 819 child_qdisc->ops && child_qdisc->ops->clean_stats) { 820 backlog = child_qdisc->stats_base.backlog; 821 child_qdisc->ops->clean_stats(mlxsw_sp_port, 822 child_qdisc); 823 child_qdisc->stats_base.backlog = backlog; 824 } 825 } 826 for (; band < IEEE_8021QAZ_MAX_TCS; band++) { 827 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); 828 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; 829 child_qdisc->prio_bitmap = 0; 830 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc); 831 mlxsw_sp_port_ets_set(mlxsw_sp_port, 832 MLXSW_REG_QEEC_HR_SUBGROUP, 833 tclass, 0, false, 0); 834 } 835 return 0; 836 } 837 838 static int 839 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, 840 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 841 void *params) 842 { 843 struct tc_prio_qopt_offload_params *p = params; 844 unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0}; 845 846 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands, 847 zeroes, zeroes, p->priomap); 848 } 849 850 static void 851 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 852 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 853 struct gnet_stats_queue *qstats) 854 { 855 u64 backlog; 856 857 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, 858 mlxsw_sp_qdisc->stats_base.backlog); 859 qstats->backlog -= backlog; 860 } 861 862 static void 863 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 864 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 865 void *params) 866 { 867 struct tc_prio_qopt_offload_params *p = params; 868 869 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, 870 p->qstats); 871 } 872 873 static int 874 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, 875 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 876 struct tc_qopt_offload_stats *stats_ptr) 877 { 878 struct mlxsw_sp_qdisc *tc_qdisc; 879 u64 tx_packets = 0; 880 u64 tx_bytes = 0; 881 u64 backlog = 0; 882 u64 drops = 0; 883 int i; 884 885 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 886 tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i]; 887 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc, 888 &tx_bytes, &tx_packets, 889 &drops, &backlog); 890 } 891 892 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc, 893 tx_bytes, tx_packets, drops, backlog, 894 stats_ptr); 895 return 0; 896 } 897 898 static void 899 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, 900 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 901 { 902 struct mlxsw_sp_qdisc_stats *stats_base; 903 struct mlxsw_sp_port_xstats *xstats; 904 struct rtnl_link_stats64 *stats; 905 int i; 906 907 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; 908 stats = &mlxsw_sp_port->periodic_hw_stats.stats; 909 stats_base = &mlxsw_sp_qdisc->stats_base; 910 911 stats_base->tx_packets = stats->tx_packets; 912 stats_base->tx_bytes = stats->tx_bytes; 913 914 stats_base->drops = 0; 915 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 916 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i); 917 stats_base->drops += xstats->wred_drop[i]; 918 } 919 920 mlxsw_sp_qdisc->stats_base.backlog = 0; 921 } 922 923 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { 924 .type = MLXSW_SP_QDISC_PRIO, 925 .check_params = mlxsw_sp_qdisc_prio_check_params, 926 .replace = mlxsw_sp_qdisc_prio_replace, 927 .unoffload = mlxsw_sp_qdisc_prio_unoffload, 928 .destroy = mlxsw_sp_qdisc_prio_destroy, 929 .get_stats = mlxsw_sp_qdisc_get_prio_stats, 930 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, 931 }; 932 933 static int 934 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port, 935 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 936 void *params) 937 { 938 struct tc_ets_qopt_offload_replace_params *p = params; 939 940 return __mlxsw_sp_qdisc_ets_check_params(p->bands); 941 } 942 943 static int 944 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, 945 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 946 void *params) 947 { 948 struct tc_ets_qopt_offload_replace_params *p = params; 949 950 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands, 951 p->quanta, p->weights, p->priomap); 952 } 953 954 static void 955 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, 956 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 957 void *params) 958 { 959 struct tc_ets_qopt_offload_replace_params *p = params; 960 961 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, 962 p->qstats); 963 } 964 965 static int 966 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port, 967 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) 968 { 969 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port); 970 } 971 972 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = { 973 .type = MLXSW_SP_QDISC_ETS, 974 .check_params = mlxsw_sp_qdisc_ets_check_params, 975 .replace = mlxsw_sp_qdisc_ets_replace, 976 .unoffload = mlxsw_sp_qdisc_ets_unoffload, 977 .destroy = mlxsw_sp_qdisc_ets_destroy, 978 .get_stats = mlxsw_sp_qdisc_get_prio_stats, 979 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, 980 }; 981 982 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting 983 * graph is free of cycles). These operations do not change the parent handle 984 * though, which means it can be incomplete (if there is more than one class 985 * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was 986 * linked to a different class and then removed from the original class). 987 * 988 * E.g. consider this sequence of operations: 989 * 990 * # tc qdisc add dev swp1 root handle 1: prio 991 * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000 992 * RED: set bandwidth to 10Mbit 993 * # tc qdisc link dev swp1 handle 13: parent 1:2 994 * 995 * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their 996 * child. But RED will still only claim that 1:3 is its parent. If it's removed 997 * from that band, its only parent will be 1:2, but it will continue to claim 998 * that it is in fact 1:3. 999 * 1000 * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before 1001 * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace 1002 * notification to offload the child Qdisc, based on its parent handle, and use 1003 * the graft operation to validate that the class where the child is actually 1004 * grafted corresponds to the parent handle. If the two don't match, we 1005 * unoffload the child. 1006 */ 1007 static int 1008 __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port, 1009 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 1010 u8 band, u32 child_handle) 1011 { 1012 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); 1013 struct mlxsw_sp_qdisc *old_qdisc; 1014 1015 if (band < IEEE_8021QAZ_MAX_TCS && 1016 mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle) 1017 return 0; 1018 1019 if (!child_handle) { 1020 /* This is an invisible FIFO replacing the original Qdisc. 1021 * Ignore it--the original Qdisc's destroy will follow. 1022 */ 1023 return 0; 1024 } 1025 1026 /* See if the grafted qdisc is already offloaded on any tclass. If so, 1027 * unoffload it. 1028 */ 1029 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, 1030 child_handle); 1031 if (old_qdisc) 1032 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc); 1033 1034 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, 1035 &mlxsw_sp_port->tclass_qdiscs[tclass_num]); 1036 return -EOPNOTSUPP; 1037 } 1038 1039 static int 1040 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, 1041 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, 1042 struct tc_prio_qopt_offload_graft_params *p) 1043 { 1044 return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 1045 p->band, p->child_handle); 1046 } 1047 1048 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, 1049 struct tc_prio_qopt_offload *p) 1050 { 1051 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 1052 1053 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); 1054 if (!mlxsw_sp_qdisc) 1055 return -EOPNOTSUPP; 1056 1057 if (p->command == TC_PRIO_REPLACE) 1058 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, 1059 mlxsw_sp_qdisc, 1060 &mlxsw_sp_qdisc_ops_prio, 1061 &p->replace_params); 1062 1063 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, 1064 MLXSW_SP_QDISC_PRIO)) 1065 return -EOPNOTSUPP; 1066 1067 switch (p->command) { 1068 case TC_PRIO_DESTROY: 1069 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 1070 case TC_PRIO_STATS: 1071 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 1072 &p->stats); 1073 case TC_PRIO_GRAFT: 1074 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 1075 &p->graft_params); 1076 default: 1077 return -EOPNOTSUPP; 1078 } 1079 } 1080 1081 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, 1082 struct tc_ets_qopt_offload *p) 1083 { 1084 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 1085 1086 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); 1087 if (!mlxsw_sp_qdisc) 1088 return -EOPNOTSUPP; 1089 1090 if (p->command == TC_ETS_REPLACE) 1091 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, 1092 mlxsw_sp_qdisc, 1093 &mlxsw_sp_qdisc_ops_ets, 1094 &p->replace_params); 1095 1096 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, 1097 MLXSW_SP_QDISC_ETS)) 1098 return -EOPNOTSUPP; 1099 1100 switch (p->command) { 1101 case TC_ETS_DESTROY: 1102 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); 1103 case TC_ETS_STATS: 1104 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, 1105 &p->stats); 1106 case TC_ETS_GRAFT: 1107 return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 1108 p->graft_params.band, 1109 p->graft_params.child_handle); 1110 default: 1111 return -EOPNOTSUPP; 1112 } 1113 } 1114 1115 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) 1116 { 1117 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; 1118 int i; 1119 1120 mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL); 1121 if (!mlxsw_sp_qdisc) 1122 goto err_root_qdisc_init; 1123 1124 mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc; 1125 mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff; 1126 mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; 1127 1128 mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS, 1129 sizeof(*mlxsw_sp_qdisc), 1130 GFP_KERNEL); 1131 if (!mlxsw_sp_qdisc) 1132 goto err_tclass_qdiscs_init; 1133 1134 mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc; 1135 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1136 mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i; 1137 1138 return 0; 1139 1140 err_tclass_qdiscs_init: 1141 kfree(mlxsw_sp_port->root_qdisc); 1142 err_root_qdisc_init: 1143 return -ENOMEM; 1144 } 1145 1146 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1147 { 1148 kfree(mlxsw_sp_port->tclass_qdiscs); 1149 kfree(mlxsw_sp_port->root_qdisc); 1150 } 1151