1 // SPDX-License-Identifier: GPL-2.0 2 /* Texas Instruments K3 AM65 Ethernet QoS submodule 3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ 4 * 5 * quality of service module includes: 6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2) 7 * Interspersed Express Traffic (IET - P802.3br/D2.0) 8 */ 9 10 #include <linux/pm_runtime.h> 11 #include <linux/math.h> 12 #include <linux/time.h> 13 #include <linux/units.h> 14 #include <net/pkt_cls.h> 15 16 #include "am65-cpsw-nuss.h" 17 #include "am65-cpsw-qos.h" 18 #include "am65-cpts.h" 19 #include "cpsw_ale.h" 20 21 #define TO_MBPS(x) DIV_ROUND_UP((x), BYTES_PER_MBIT) 22 23 enum timer_act { 24 TACT_PROG, /* need program timer */ 25 TACT_NEED_STOP, /* need stop first */ 26 TACT_SKIP_PROG, /* just buffer can be updated */ 27 }; 28 29 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs); 30 31 static u32 32 am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq) 33 { 34 u32 ir; 35 36 bus_freq /= 1000000; 37 ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq); 38 return ir; 39 } 40 41 static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port) 42 { 43 int prio; 44 45 for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) { 46 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio)); 47 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio)); 48 } 49 } 50 51 static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port) 52 { 53 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; 54 struct am65_cpsw_common *common = port->common; 55 struct tc_mqprio_qopt_offload *mqprio; 56 bool enable, shaper_susp = false; 57 u32 rate_mbps; 58 int tc, prio; 59 60 mqprio = &p_mqprio->mqprio_hw; 61 /* takes care of no link case as well */ 62 if (p_mqprio->max_rate_total > port->qos.link_speed) 63 shaper_susp = true; 64 65 am65_cpsw_tx_pn_shaper_reset(port); 66 67 enable = p_mqprio->shaper_en && !shaper_susp; 68 if (!enable) 69 return; 70 71 /* Rate limit is specified per Traffic Class but 72 * for CPSW, rate limit can be applied per priority 73 * at port FIFO. 74 * 75 * We have assigned the same priority (TCn) to all queues 76 * of a Traffic Class so they share the same shaper 77 * bandwidth. 78 */ 79 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { 80 prio = tc; 81 82 rate_mbps = TO_MBPS(mqprio->min_rate[tc]); 83 rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps, 84 common->bus_freq); 85 writel(rate_mbps, 86 port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio)); 87 88 rate_mbps = 0; 89 90 if (mqprio->max_rate[tc]) { 91 rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc]; 92 rate_mbps = TO_MBPS(rate_mbps); 93 rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps, 94 common->bus_freq); 95 } 96 97 writel(rate_mbps, 98 port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio)); 99 } 100 } 101 102 static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port, 103 struct tc_mqprio_qopt_offload *mqprio) 104 { 105 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; 106 struct netlink_ext_ack *extack = mqprio->extack; 107 u64 min_rate_total = 0, max_rate_total = 0; 108 u32 min_rate_msk = 0, max_rate_msk = 0; 109 bool has_min_rate, has_max_rate; 110 int num_tc, i; 111 112 if (!(mqprio->flags & TC_MQPRIO_F_SHAPER)) 113 return 0; 114 115 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) 116 return 0; 117 118 has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE); 119 has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE); 120 121 if (!has_min_rate && has_max_rate) { 122 NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate"); 123 return -EOPNOTSUPP; 124 } 125 126 if (!has_min_rate) 127 return 0; 128 129 num_tc = mqprio->qopt.num_tc; 130 131 for (i = num_tc - 1; i >= 0; i--) { 132 u32 ch_msk; 133 134 if (mqprio->min_rate[i]) 135 min_rate_msk |= BIT(i); 136 min_rate_total += mqprio->min_rate[i]; 137 138 if (has_max_rate) { 139 if (mqprio->max_rate[i]) 140 max_rate_msk |= BIT(i); 141 max_rate_total += mqprio->max_rate[i]; 142 143 if (!mqprio->min_rate[i] && mqprio->max_rate[i]) { 144 NL_SET_ERR_MSG_FMT_MOD(extack, 145 "TX tc%d rate max>0 but min=0", 146 i); 147 return -EINVAL; 148 } 149 150 if (mqprio->max_rate[i] && 151 mqprio->max_rate[i] < mqprio->min_rate[i]) { 152 NL_SET_ERR_MSG_FMT_MOD(extack, 153 "TX tc%d rate min(%llu)>max(%llu)", 154 i, mqprio->min_rate[i], 155 mqprio->max_rate[i]); 156 return -EINVAL; 157 } 158 } 159 160 ch_msk = GENMASK(num_tc - 1, i); 161 if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) { 162 NL_SET_ERR_MSG_FMT_MOD(extack, 163 "Min rate must be set sequentially hi->lo tx_rate_msk%x", 164 min_rate_msk); 165 return -EINVAL; 166 } 167 168 if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) { 169 NL_SET_ERR_MSG_FMT_MOD(extack, 170 "Max rate must be set sequentially hi->lo tx_rate_msk%x", 171 max_rate_msk); 172 return -EINVAL; 173 } 174 } 175 176 min_rate_total = TO_MBPS(min_rate_total); 177 max_rate_total = TO_MBPS(max_rate_total); 178 179 p_mqprio->shaper_en = true; 180 p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total); 181 182 return 0; 183 } 184 185 static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev) 186 { 187 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 188 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; 189 190 p_mqprio->shaper_en = false; 191 p_mqprio->max_rate_total = 0; 192 193 am65_cpsw_tx_pn_shaper_reset(port); 194 netdev_reset_tc(ndev); 195 196 /* Reset all Queue priorities to 0 */ 197 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); 198 199 am65_cpsw_iet_change_preemptible_tcs(port, 0); 200 } 201 202 static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data) 203 { 204 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 205 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio; 206 struct tc_mqprio_qopt_offload *mqprio = type_data; 207 struct am65_cpsw_common *common = port->common; 208 struct tc_mqprio_qopt *qopt = &mqprio->qopt; 209 int i, tc, offset, count, prio, ret; 210 u8 num_tc = qopt->num_tc; 211 u32 tx_prio_map = 0; 212 213 memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio)); 214 215 ret = pm_runtime_get_sync(common->dev); 216 if (ret < 0) { 217 pm_runtime_put_noidle(common->dev); 218 return ret; 219 } 220 221 if (!num_tc) { 222 am65_cpsw_reset_tc_mqprio(ndev); 223 ret = 0; 224 goto exit_put; 225 } 226 227 ret = am65_cpsw_mqprio_verify_shaper(port, mqprio); 228 if (ret) 229 goto exit_put; 230 231 netdev_set_num_tc(ndev, num_tc); 232 233 /* Multiple Linux priorities can map to a Traffic Class 234 * A Traffic Class can have multiple contiguous Queues, 235 * Queues get mapped to Channels (thread_id), 236 * if not VLAN tagged, thread_id is used as packet_priority 237 * if VLAN tagged. VLAN priority is used as packet_priority 238 * packet_priority gets mapped to header_priority in p0_rx_pri_map, 239 * header_priority gets mapped to switch_priority in pn_tx_pri_map. 240 * As p0_rx_pri_map is left at defaults (0x76543210), we can 241 * assume that Queue_n gets mapped to header_priority_n. We can then 242 * set the switch priority in pn_tx_pri_map. 243 */ 244 245 for (tc = 0; tc < num_tc; tc++) { 246 prio = tc; 247 248 /* For simplicity we assign the same priority (TCn) to 249 * all queues of a Traffic Class. 250 */ 251 for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++) 252 tx_prio_map |= prio << (4 * i); 253 254 count = qopt->count[tc]; 255 offset = qopt->offset[tc]; 256 netdev_set_tc_queue(ndev, tc, count, offset); 257 } 258 259 writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP); 260 261 am65_cpsw_tx_pn_shaper_apply(port); 262 am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs); 263 264 exit_put: 265 pm_runtime_put(common->dev); 266 267 return ret; 268 } 269 270 static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port) 271 { 272 int verify_time_ms = port->qos.iet.verify_time_ms; 273 u32 val; 274 275 /* The number of wireside clocks contained in the verify 276 * timeout counter. The default is 0x1312d0 277 * (10ms at 125Mhz in 1G mode). 278 */ 279 val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */ 280 281 val /= MILLIHZ_PER_HZ; /* count per ms timeout */ 282 val *= verify_time_ms; /* count for timeout ms */ 283 284 if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK) 285 return -EINVAL; 286 287 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY); 288 289 return 0; 290 } 291 292 static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port) 293 { 294 u32 ctrl, status; 295 int try; 296 297 try = 20; 298 do { 299 /* Reset the verify state machine by writing 1 300 * to LINKFAIL 301 */ 302 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 303 ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL; 304 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 305 306 /* Clear MAC_LINKFAIL bit to start Verify. */ 307 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 308 ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL; 309 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 310 311 msleep(port->qos.iet.verify_time_ms); 312 313 status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS); 314 if (status & AM65_CPSW_PN_MAC_VERIFIED) 315 return 0; 316 317 if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) { 318 netdev_dbg(port->ndev, 319 "MAC Merge verify failed, trying again\n"); 320 continue; 321 } 322 323 if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) { 324 netdev_dbg(port->ndev, "MAC Merge respond error\n"); 325 return -ENODEV; 326 } 327 328 if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) { 329 netdev_dbg(port->ndev, "MAC Merge verify error\n"); 330 return -ENODEV; 331 } 332 } while (try-- > 0); 333 334 netdev_dbg(port->ndev, "MAC Merge verify timeout\n"); 335 return -ETIMEDOUT; 336 } 337 338 static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs) 339 { 340 u32 val; 341 342 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 343 val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK; 344 val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs); 345 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 346 } 347 348 /* enable common IET_ENABLE only if at least 1 port has rx IET enabled. 349 * UAPI doesn't allow tx enable without rx enable. 350 */ 351 void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common) 352 { 353 struct am65_cpsw_port *port; 354 bool rx_enable = false; 355 u32 val; 356 int i; 357 358 for (i = 0; i < common->port_num; i++) { 359 port = &common->ports[i]; 360 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); 361 rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN); 362 if (rx_enable) 363 break; 364 } 365 366 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL); 367 368 if (rx_enable) 369 val |= AM65_CPSW_CTL_IET_EN; 370 else 371 val &= ~AM65_CPSW_CTL_IET_EN; 372 373 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL); 374 common->iet_enabled = rx_enable; 375 } 376 377 /* CPSW does not have an IRQ to notify changes to the MAC Merge TX status 378 * (active/inactive), but the preemptible traffic classes should only be 379 * committed to hardware once TX is active. Resort to polling. 380 */ 381 void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port) 382 { 383 u8 preemptible_tcs; 384 int err; 385 u32 val; 386 387 if (port->qos.link_speed == SPEED_UNKNOWN) 388 return; 389 390 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); 391 if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN)) 392 return; 393 394 /* update common IET enable */ 395 am65_cpsw_iet_common_enable(port->common); 396 397 /* update verify count */ 398 err = am65_cpsw_iet_set_verify_timeout_count(port); 399 if (err) { 400 netdev_err(port->ndev, "couldn't set verify count: %d\n", err); 401 return; 402 } 403 404 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); 405 if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) { 406 err = am65_cpsw_iet_verify_wait(port); 407 if (err) 408 return; 409 } 410 411 preemptible_tcs = port->qos.iet.preemptible_tcs; 412 am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs); 413 } 414 415 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs) 416 { 417 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev); 418 419 port->qos.iet.preemptible_tcs = preemptible_tcs; 420 mutex_lock(&priv->mm_lock); 421 am65_cpsw_iet_commit_preemptible_tcs(port); 422 mutex_unlock(&priv->mm_lock); 423 } 424 425 static void am65_cpsw_iet_link_state_update(struct net_device *ndev) 426 { 427 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); 428 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 429 430 mutex_lock(&priv->mm_lock); 431 am65_cpsw_iet_commit_preemptible_tcs(port); 432 mutex_unlock(&priv->mm_lock); 433 } 434 435 static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port) 436 { 437 return port->qos.est_oper || port->qos.est_admin; 438 } 439 440 static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable) 441 { 442 u32 val; 443 444 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL); 445 446 if (enable) 447 val |= AM65_CPSW_CTL_EST_EN; 448 else 449 val &= ~AM65_CPSW_CTL_EST_EN; 450 451 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL); 452 common->est_enabled = enable; 453 } 454 455 static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable) 456 { 457 u32 val; 458 459 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL); 460 if (enable) 461 val |= AM65_CPSW_PN_CTL_EST_PORT_EN; 462 else 463 val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN; 464 465 writel(val, port->port_base + AM65_CPSW_PN_REG_CTL); 466 } 467 468 /* target new EST RAM buffer, actual toggle happens after cycle completion */ 469 static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev, 470 int buf_num) 471 { 472 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 473 u32 val; 474 475 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL); 476 if (buf_num) 477 val |= AM65_CPSW_PN_EST_BUFSEL; 478 else 479 val &= ~AM65_CPSW_PN_EST_BUFSEL; 480 481 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL); 482 } 483 484 /* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned 485 * admin -> oper or not 486 * 487 * Return true if already transitioned. i.e oper is equal to admin and buf 488 * numbers match (est_oper->buf match with est_admin->buf). 489 * false if before transition. i.e oper is not equal to admin, (i.e a 490 * previous admin command is waiting to be transitioned to oper state 491 * and est_oper->buf not match with est_oper->buf). 492 */ 493 static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper, 494 int *admin) 495 { 496 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 497 u32 val; 498 499 val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS); 500 *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT); 501 502 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL); 503 *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL); 504 505 return *admin == *oper; 506 } 507 508 /* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for 509 * Admin to program the new schedule. 510 * 511 * Logic as follows:- 512 * If oper is same as admin, return the other buffer (!oper) as the admin 513 * buffer. If oper is not the same, driver let the current oper to continue 514 * as it is in the process of transitioning from admin -> oper. So keep the 515 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in 516 * EST CTL register. In the second iteration they will match and code returns. 517 * The actual buffer to write command is selected later before it is ready 518 * to update the schedule. 519 */ 520 static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev) 521 { 522 int oper, admin; 523 int roll = 2; 524 525 while (roll--) { 526 if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin)) 527 return !oper; 528 529 /* admin is not set, so hinder transition as it's not allowed 530 * to touch memory in-flight, by targeting same oper buf. 531 */ 532 am65_cpsw_port_est_assign_buf_num(ndev, oper); 533 534 dev_info(&ndev->dev, 535 "Prev. EST admin cycle is in transit %d -> %d\n", 536 oper, admin); 537 } 538 539 return admin; 540 } 541 542 static void am65_cpsw_admin_to_oper(struct net_device *ndev) 543 { 544 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 545 546 devm_kfree(&ndev->dev, port->qos.est_oper); 547 548 port->qos.est_oper = port->qos.est_admin; 549 port->qos.est_admin = NULL; 550 } 551 552 static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev, 553 struct am65_cpsw_est *est_new) 554 { 555 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 556 u32 val; 557 558 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL); 559 val &= ~AM65_CPSW_PN_EST_ONEBUF; 560 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL); 561 562 est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev); 563 564 /* rolled buf num means changed buf while configuring */ 565 if (port->qos.est_oper && port->qos.est_admin && 566 est_new->buf == port->qos.est_oper->buf) 567 am65_cpsw_admin_to_oper(ndev); 568 } 569 570 static void am65_cpsw_est_set(struct net_device *ndev, int enable) 571 { 572 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 573 struct am65_cpsw_common *common = port->common; 574 int common_enable = 0; 575 int i; 576 577 am65_cpsw_port_est_enable(port, enable); 578 579 for (i = 0; i < common->port_num; i++) 580 common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]); 581 582 common_enable |= enable; 583 am65_cpsw_est_enable(common, common_enable); 584 } 585 586 /* This update is supposed to be used in any routine before getting real state 587 * of admin -> oper transition, particularly it's supposed to be used in some 588 * generic routine for providing real state to Taprio Qdisc. 589 */ 590 static void am65_cpsw_est_update_state(struct net_device *ndev) 591 { 592 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 593 int oper, admin; 594 595 if (!port->qos.est_admin) 596 return; 597 598 if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin)) 599 return; 600 601 am65_cpsw_admin_to_oper(ndev); 602 } 603 604 /* Fetch command count it's number of bytes in Gigabit mode or nibbles in 605 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of 606 * bytes/nibbles that can be sent while transmission on given speed. 607 */ 608 static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed) 609 { 610 u64 temp; 611 612 temp = ns * link_speed; 613 if (link_speed < SPEED_1000) 614 temp <<= 1; 615 616 return DIV_ROUND_UP(temp, 8 * 1000); 617 } 618 619 static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr, 620 int fetch_cnt, 621 int fetch_allow) 622 { 623 u32 prio_mask, cmd_fetch_cnt, cmd; 624 625 do { 626 if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) { 627 fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX; 628 cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX; 629 } else { 630 cmd_fetch_cnt = fetch_cnt; 631 /* fetch count can't be less than 16? */ 632 if (cmd_fetch_cnt && cmd_fetch_cnt < 16) 633 cmd_fetch_cnt = 16; 634 635 fetch_cnt = 0; 636 } 637 638 prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK; 639 cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask; 640 641 writel(cmd, addr); 642 addr += 4; 643 } while (fetch_cnt); 644 645 return addr; 646 } 647 648 static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev, 649 struct tc_taprio_qopt_offload *taprio, 650 int link_speed) 651 { 652 int i, cmd_cnt, cmd_sum = 0; 653 u32 fetch_cnt; 654 655 for (i = 0; i < taprio->num_entries; i++) { 656 if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) { 657 dev_err(&ndev->dev, "Only SET command is supported"); 658 return -EINVAL; 659 } 660 661 fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval, 662 link_speed); 663 664 cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX); 665 if (!cmd_cnt) 666 cmd_cnt++; 667 668 cmd_sum += cmd_cnt; 669 670 if (!fetch_cnt) 671 break; 672 } 673 674 return cmd_sum; 675 } 676 677 static int am65_cpsw_est_check_scheds(struct net_device *ndev, 678 struct am65_cpsw_est *est_new) 679 { 680 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 681 int cmd_num; 682 683 cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio, 684 port->qos.link_speed); 685 if (cmd_num < 0) 686 return cmd_num; 687 688 if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) { 689 dev_err(&ndev->dev, "No fetch RAM"); 690 return -ENOMEM; 691 } 692 693 return 0; 694 } 695 696 static void am65_cpsw_est_set_sched_list(struct net_device *ndev, 697 struct am65_cpsw_est *est_new) 698 { 699 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 700 u32 fetch_cnt, fetch_allow, all_fetch_allow = 0; 701 void __iomem *ram_addr, *max_ram_addr; 702 struct tc_taprio_sched_entry *entry; 703 int i, ram_size; 704 705 ram_addr = port->fetch_ram_base; 706 ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2; 707 ram_addr += est_new->buf * ram_size; 708 709 max_ram_addr = ram_size + ram_addr; 710 for (i = 0; i < est_new->taprio.num_entries; i++) { 711 entry = &est_new->taprio.entries[i]; 712 713 fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval, 714 port->qos.link_speed); 715 fetch_allow = entry->gate_mask; 716 if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX) 717 dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n", 718 fetch_allow); 719 720 ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt, 721 fetch_allow); 722 723 if (!fetch_cnt && i < est_new->taprio.num_entries - 1) { 724 dev_info(&ndev->dev, 725 "next scheds after %d have no impact", i + 1); 726 break; 727 } 728 729 all_fetch_allow |= fetch_allow; 730 } 731 732 /* end cmd, enabling non-timed queues for potential over cycle time */ 733 if (ram_addr < max_ram_addr) 734 writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr); 735 } 736 737 /* 738 * Enable ESTf periodic output, set cycle start time and interval. 739 */ 740 static int am65_cpsw_timer_set(struct net_device *ndev, 741 struct am65_cpsw_est *est_new) 742 { 743 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 744 struct am65_cpsw_common *common = port->common; 745 struct am65_cpts *cpts = common->cpts; 746 struct am65_cpts_estf_cfg cfg; 747 748 cfg.ns_period = est_new->taprio.cycle_time; 749 cfg.ns_start = est_new->taprio.base_time; 750 751 return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg); 752 } 753 754 static void am65_cpsw_timer_stop(struct net_device *ndev) 755 { 756 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 757 struct am65_cpts *cpts = port->common->cpts; 758 759 am65_cpts_estf_disable(cpts, port->port_id - 1); 760 } 761 762 static enum timer_act am65_cpsw_timer_act(struct net_device *ndev, 763 struct am65_cpsw_est *est_new) 764 { 765 struct tc_taprio_qopt_offload *taprio_oper, *taprio_new; 766 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 767 struct am65_cpts *cpts = port->common->cpts; 768 u64 cur_time; 769 s64 diff; 770 771 if (!port->qos.est_oper) 772 return TACT_PROG; 773 774 taprio_new = &est_new->taprio; 775 taprio_oper = &port->qos.est_oper->taprio; 776 777 if (taprio_new->cycle_time != taprio_oper->cycle_time) 778 return TACT_NEED_STOP; 779 780 /* in order to avoid timer reset get base_time form oper taprio */ 781 if (!taprio_new->base_time && taprio_oper) 782 taprio_new->base_time = taprio_oper->base_time; 783 784 if (taprio_new->base_time == taprio_oper->base_time) 785 return TACT_SKIP_PROG; 786 787 /* base times are cycle synchronized */ 788 diff = taprio_new->base_time - taprio_oper->base_time; 789 diff = diff < 0 ? -diff : diff; 790 if (diff % taprio_new->cycle_time) 791 return TACT_NEED_STOP; 792 793 cur_time = am65_cpts_ns_gettime(cpts); 794 if (taprio_new->base_time <= cur_time + taprio_new->cycle_time) 795 return TACT_SKIP_PROG; 796 797 /* TODO: Admin schedule at future time is not currently supported */ 798 return TACT_NEED_STOP; 799 } 800 801 static void am65_cpsw_stop_est(struct net_device *ndev) 802 { 803 am65_cpsw_est_set(ndev, 0); 804 am65_cpsw_timer_stop(ndev); 805 } 806 807 static void am65_cpsw_taprio_destroy(struct net_device *ndev) 808 { 809 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 810 811 am65_cpsw_stop_est(ndev); 812 813 devm_kfree(&ndev->dev, port->qos.est_admin); 814 devm_kfree(&ndev->dev, port->qos.est_oper); 815 816 port->qos.est_oper = NULL; 817 port->qos.est_admin = NULL; 818 819 am65_cpsw_reset_tc_mqprio(ndev); 820 } 821 822 static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from, 823 struct tc_taprio_qopt_offload *to) 824 { 825 int i; 826 827 *to = *from; 828 for (i = 0; i < from->num_entries; i++) 829 to->entries[i] = from->entries[i]; 830 } 831 832 static int am65_cpsw_taprio_replace(struct net_device *ndev, 833 struct tc_taprio_qopt_offload *taprio) 834 { 835 struct am65_cpsw_common *common = am65_ndev_to_common(ndev); 836 struct netlink_ext_ack *extack = taprio->mqprio.extack; 837 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 838 struct am65_cpts *cpts = common->cpts; 839 struct am65_cpsw_est *est_new; 840 int ret, tact; 841 842 if (!netif_running(ndev)) { 843 NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown"); 844 return -ENETDOWN; 845 } 846 847 if (common->pf_p0_rx_ptype_rrobin) { 848 NL_SET_ERR_MSG_MOD(extack, 849 "p0-rx-ptype-rrobin flag conflicts with taprio qdisc"); 850 return -EINVAL; 851 } 852 853 if (port->qos.link_speed == SPEED_UNKNOWN) 854 return -ENOLINK; 855 856 if (taprio->cycle_time_extension) { 857 NL_SET_ERR_MSG_MOD(extack, 858 "cycle time extension not supported"); 859 return -EOPNOTSUPP; 860 } 861 862 est_new = devm_kzalloc(&ndev->dev, 863 struct_size(est_new, taprio.entries, taprio->num_entries), 864 GFP_KERNEL); 865 if (!est_new) 866 return -ENOMEM; 867 868 ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio); 869 if (ret) 870 return ret; 871 872 am65_cpsw_cp_taprio(taprio, &est_new->taprio); 873 874 am65_cpsw_est_update_state(ndev); 875 876 ret = am65_cpsw_est_check_scheds(ndev, est_new); 877 if (ret < 0) 878 goto fail; 879 880 tact = am65_cpsw_timer_act(ndev, est_new); 881 if (tact == TACT_NEED_STOP) { 882 NL_SET_ERR_MSG_MOD(extack, 883 "Can't toggle estf timer, stop taprio first"); 884 ret = -EINVAL; 885 goto fail; 886 } 887 888 if (tact == TACT_PROG) 889 am65_cpsw_timer_stop(ndev); 890 891 if (!est_new->taprio.base_time) 892 est_new->taprio.base_time = am65_cpts_ns_gettime(cpts); 893 894 am65_cpsw_port_est_get_buf_num(ndev, est_new); 895 am65_cpsw_est_set_sched_list(ndev, est_new); 896 am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf); 897 898 am65_cpsw_est_set(ndev, 1); 899 900 if (tact == TACT_PROG) { 901 ret = am65_cpsw_timer_set(ndev, est_new); 902 if (ret) { 903 NL_SET_ERR_MSG_MOD(extack, 904 "Failed to set cycle time"); 905 goto fail; 906 } 907 } 908 909 devm_kfree(&ndev->dev, port->qos.est_admin); 910 port->qos.est_admin = est_new; 911 am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs); 912 913 return 0; 914 915 fail: 916 am65_cpsw_reset_tc_mqprio(ndev); 917 devm_kfree(&ndev->dev, est_new); 918 return ret; 919 } 920 921 static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed) 922 { 923 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 924 ktime_t cur_time; 925 s64 delta; 926 927 if (!am65_cpsw_port_est_enabled(port)) 928 return; 929 930 if (port->qos.link_down_time) { 931 cur_time = ktime_get(); 932 delta = ktime_us_delta(cur_time, port->qos.link_down_time); 933 if (delta > USEC_PER_SEC) { 934 dev_err(&ndev->dev, 935 "Link has been lost too long, stopping TAS"); 936 goto purge_est; 937 } 938 } 939 940 return; 941 942 purge_est: 943 am65_cpsw_taprio_destroy(ndev); 944 } 945 946 static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data) 947 { 948 struct tc_taprio_qopt_offload *taprio = type_data; 949 int err = 0; 950 951 switch (taprio->cmd) { 952 case TAPRIO_CMD_REPLACE: 953 err = am65_cpsw_taprio_replace(ndev, taprio); 954 break; 955 case TAPRIO_CMD_DESTROY: 956 am65_cpsw_taprio_destroy(ndev); 957 break; 958 default: 959 err = -EOPNOTSUPP; 960 } 961 962 return err; 963 } 964 965 static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data) 966 { 967 struct tc_query_caps_base *base = type_data; 968 969 switch (base->type) { 970 case TC_SETUP_QDISC_MQPRIO: { 971 struct tc_mqprio_caps *caps = base->caps; 972 973 caps->validate_queue_counts = true; 974 975 return 0; 976 } 977 978 case TC_SETUP_QDISC_TAPRIO: { 979 struct tc_taprio_caps *caps = base->caps; 980 981 caps->gate_mask_per_txq = true; 982 983 return 0; 984 } 985 default: 986 return -EOPNOTSUPP; 987 } 988 } 989 990 static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port, 991 struct netlink_ext_ack *extack, 992 struct flow_cls_offload *cls, 993 u64 rate_pkt_ps) 994 { 995 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 996 struct flow_dissector *dissector = rule->match.dissector; 997 static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; 998 struct am65_cpsw_qos *qos = &port->qos; 999 struct flow_match_eth_addrs match; 1000 int ret; 1001 1002 if (dissector->used_keys & 1003 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 1004 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 1005 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { 1006 NL_SET_ERR_MSG_MOD(extack, 1007 "Unsupported keys used"); 1008 return -EOPNOTSUPP; 1009 } 1010 1011 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1012 NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); 1013 return -EOPNOTSUPP; 1014 } 1015 1016 flow_rule_match_eth_addrs(rule, &match); 1017 1018 if (!is_zero_ether_addr(match.mask->src)) { 1019 NL_SET_ERR_MSG_MOD(extack, 1020 "Matching on source MAC not supported"); 1021 return -EOPNOTSUPP; 1022 } 1023 1024 if (is_broadcast_ether_addr(match.key->dst) && 1025 is_broadcast_ether_addr(match.mask->dst)) { 1026 ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps); 1027 if (ret) 1028 return ret; 1029 1030 qos->ale_bc_ratelimit.cookie = cls->cookie; 1031 qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps; 1032 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) && 1033 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) { 1034 ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps); 1035 if (ret) 1036 return ret; 1037 1038 qos->ale_mc_ratelimit.cookie = cls->cookie; 1039 qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps; 1040 } else { 1041 NL_SET_ERR_MSG_MOD(extack, "Not supported matching key"); 1042 return -EOPNOTSUPP; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action, 1049 const struct flow_action_entry *act, 1050 struct netlink_ext_ack *extack) 1051 { 1052 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 1053 NL_SET_ERR_MSG_MOD(extack, 1054 "Offload not supported when exceed action is not drop"); 1055 return -EOPNOTSUPP; 1056 } 1057 1058 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 1059 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 1060 NL_SET_ERR_MSG_MOD(extack, 1061 "Offload not supported when conform action is not pipe or ok"); 1062 return -EOPNOTSUPP; 1063 } 1064 1065 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 1066 !flow_action_is_last_entry(action, act)) { 1067 NL_SET_ERR_MSG_MOD(extack, 1068 "Offload not supported when conform action is ok, but action is not last"); 1069 return -EOPNOTSUPP; 1070 } 1071 1072 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps || 1073 act->police.avrate || act->police.overhead) { 1074 NL_SET_ERR_MSG_MOD(extack, 1075 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured"); 1076 return -EOPNOTSUPP; 1077 } 1078 1079 return 0; 1080 } 1081 1082 static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port, 1083 struct flow_cls_offload *cls) 1084 { 1085 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 1086 struct netlink_ext_ack *extack = cls->common.extack; 1087 const struct flow_action_entry *act; 1088 int i, ret; 1089 1090 flow_action_for_each(i, act, &rule->action) { 1091 switch (act->id) { 1092 case FLOW_ACTION_POLICE: 1093 ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack); 1094 if (ret) 1095 return ret; 1096 1097 return am65_cpsw_qos_clsflower_add_policer(port, extack, cls, 1098 act->police.rate_pkt_ps); 1099 default: 1100 NL_SET_ERR_MSG_MOD(extack, 1101 "Action not supported"); 1102 return -EOPNOTSUPP; 1103 } 1104 } 1105 return -EOPNOTSUPP; 1106 } 1107 1108 static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls) 1109 { 1110 struct am65_cpsw_qos *qos = &port->qos; 1111 1112 if (cls->cookie == qos->ale_bc_ratelimit.cookie) { 1113 qos->ale_bc_ratelimit.cookie = 0; 1114 qos->ale_bc_ratelimit.rate_packet_ps = 0; 1115 cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0); 1116 } 1117 1118 if (cls->cookie == qos->ale_mc_ratelimit.cookie) { 1119 qos->ale_mc_ratelimit.cookie = 0; 1120 qos->ale_mc_ratelimit.rate_packet_ps = 0; 1121 cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0); 1122 } 1123 1124 return 0; 1125 } 1126 1127 static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port, 1128 struct flow_cls_offload *cls_flower) 1129 { 1130 switch (cls_flower->command) { 1131 case FLOW_CLS_REPLACE: 1132 return am65_cpsw_qos_configure_clsflower(port, cls_flower); 1133 case FLOW_CLS_DESTROY: 1134 return am65_cpsw_qos_delete_clsflower(port, cls_flower); 1135 default: 1136 return -EOPNOTSUPP; 1137 } 1138 } 1139 1140 static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 1141 { 1142 struct am65_cpsw_port *port = cb_priv; 1143 1144 if (!tc_cls_can_offload_and_chain0(port->ndev, type_data)) 1145 return -EOPNOTSUPP; 1146 1147 switch (type) { 1148 case TC_SETUP_CLSFLOWER: 1149 return am65_cpsw_qos_setup_tc_clsflower(port, type_data); 1150 default: 1151 return -EOPNOTSUPP; 1152 } 1153 } 1154 1155 static LIST_HEAD(am65_cpsw_qos_block_cb_list); 1156 1157 static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f) 1158 { 1159 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1160 1161 return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list, 1162 am65_cpsw_qos_setup_tc_block_cb, 1163 port, port, true); 1164 } 1165 1166 static void 1167 am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common, 1168 int tx_ch, u32 rate_mbps) 1169 { 1170 struct am65_cpsw_host *host = am65_common_get_host(common); 1171 u32 ch_cir; 1172 int i; 1173 1174 ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq); 1175 writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch)); 1176 1177 /* update rates for every port tx queues */ 1178 for (i = 0; i < common->port_num; i++) { 1179 struct net_device *ndev = common->ports[i].ndev; 1180 1181 if (!ndev) 1182 continue; 1183 netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps; 1184 } 1185 } 1186 1187 int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev, 1188 int queue, u32 rate_mbps) 1189 { 1190 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1191 struct am65_cpsw_common *common = port->common; 1192 struct am65_cpsw_tx_chn *tx_chn; 1193 u32 ch_rate, tx_ch_rate_msk_new; 1194 u32 ch_msk = 0; 1195 int ret; 1196 1197 dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n", 1198 queue, rate_mbps, common->tx_ch_rate_msk); 1199 1200 if (common->pf_p0_rx_ptype_rrobin) { 1201 dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n"); 1202 return -EINVAL; 1203 } 1204 1205 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; 1206 if (ch_rate == rate_mbps) 1207 return 0; 1208 1209 ret = pm_runtime_get_sync(common->dev); 1210 if (ret < 0) { 1211 pm_runtime_put_noidle(common->dev); 1212 return ret; 1213 } 1214 ret = 0; 1215 1216 tx_ch_rate_msk_new = common->tx_ch_rate_msk; 1217 if (rate_mbps && !(tx_ch_rate_msk_new & BIT(queue))) { 1218 tx_ch_rate_msk_new |= BIT(queue); 1219 ch_msk = GENMASK(common->tx_ch_num - 1, queue); 1220 ch_msk = tx_ch_rate_msk_new ^ ch_msk; 1221 } else if (!rate_mbps) { 1222 tx_ch_rate_msk_new &= ~BIT(queue); 1223 ch_msk = queue ? GENMASK(queue - 1, 0) : 0; 1224 ch_msk = tx_ch_rate_msk_new & ch_msk; 1225 } 1226 1227 if (ch_msk) { 1228 dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n", 1229 common->tx_ch_rate_msk, tx_ch_rate_msk_new); 1230 ret = -EINVAL; 1231 goto exit_put; 1232 } 1233 1234 tx_chn = &common->tx_chns[queue]; 1235 tx_chn->rate_mbps = rate_mbps; 1236 common->tx_ch_rate_msk = tx_ch_rate_msk_new; 1237 1238 if (!common->usage_count) 1239 /* will be applied on next netif up */ 1240 goto exit_put; 1241 1242 am65_cpsw_qos_tx_p0_rate_apply(common, queue, rate_mbps); 1243 1244 exit_put: 1245 pm_runtime_put(common->dev); 1246 return ret; 1247 } 1248 1249 void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common) 1250 { 1251 struct am65_cpsw_host *host = am65_common_get_host(common); 1252 int tx_ch; 1253 1254 for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) { 1255 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch]; 1256 u32 ch_cir; 1257 1258 if (!tx_chn->rate_mbps) 1259 continue; 1260 1261 ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps, 1262 common->bus_freq); 1263 writel(ch_cir, 1264 host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch)); 1265 } 1266 } 1267 1268 int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, 1269 void *type_data) 1270 { 1271 switch (type) { 1272 case TC_QUERY_CAPS: 1273 return am65_cpsw_tc_query_caps(ndev, type_data); 1274 case TC_SETUP_QDISC_TAPRIO: 1275 return am65_cpsw_setup_taprio(ndev, type_data); 1276 case TC_SETUP_QDISC_MQPRIO: 1277 return am65_cpsw_setup_mqprio(ndev, type_data); 1278 case TC_SETUP_BLOCK: 1279 return am65_cpsw_qos_setup_tc_block(ndev, type_data); 1280 default: 1281 return -EOPNOTSUPP; 1282 } 1283 } 1284 1285 void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed) 1286 { 1287 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1288 1289 port->qos.link_speed = link_speed; 1290 am65_cpsw_tx_pn_shaper_apply(port); 1291 am65_cpsw_iet_link_state_update(ndev); 1292 1293 am65_cpsw_est_link_up(ndev, link_speed); 1294 port->qos.link_down_time = 0; 1295 } 1296 1297 void am65_cpsw_qos_link_down(struct net_device *ndev) 1298 { 1299 struct am65_cpsw_port *port = am65_ndev_to_port(ndev); 1300 1301 port->qos.link_speed = SPEED_UNKNOWN; 1302 am65_cpsw_tx_pn_shaper_apply(port); 1303 am65_cpsw_iet_link_state_update(ndev); 1304 1305 if (!port->qos.link_down_time) 1306 port->qos.link_down_time = ktime_get(); 1307 } 1308