1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet QoS submodule
3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
4 *
5 * quality of service module includes:
6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
7 * Interspersed Express Traffic (IET - P802.3br/D2.0)
8 */
9
10 #include <linux/pm_runtime.h>
11 #include <linux/math.h>
12 #include <linux/math64.h>
13 #include <linux/time.h>
14 #include <linux/units.h>
15 #include <net/pkt_cls.h>
16
17 #include "am65-cpsw-nuss.h"
18 #include "am65-cpsw-qos.h"
19 #include "am65-cpts.h"
20 #include "cpsw_ale.h"
21
22 #define TO_MBPS(x) DIV_ROUND_UP((x), BYTES_PER_MBIT)
23
24 enum timer_act {
25 TACT_PROG, /* need program timer */
26 TACT_NEED_STOP, /* need stop first */
27 TACT_SKIP_PROG, /* just buffer can be updated */
28 };
29
30 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs);
31
32 static u32
am65_cpsw_qos_tx_rate_calc(u32 rate_mbps,unsigned long bus_freq)33 am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
34 {
35 u32 ir;
36
37 bus_freq /= 1000000;
38 ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
39 return ir;
40 }
41
am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port * port)42 static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port)
43 {
44 int prio;
45
46 for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) {
47 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
48 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
49 }
50 }
51
am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port * port)52 static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
53 {
54 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
55 struct am65_cpsw_common *common = port->common;
56 struct tc_mqprio_qopt_offload *mqprio;
57 bool enable, shaper_susp = false;
58 u32 rate_mbps;
59 int tc, prio;
60
61 mqprio = &p_mqprio->mqprio_hw;
62 /* takes care of no link case as well */
63 if (p_mqprio->max_rate_total > port->qos.link_speed)
64 shaper_susp = true;
65
66 am65_cpsw_tx_pn_shaper_reset(port);
67
68 enable = p_mqprio->shaper_en && !shaper_susp;
69 if (!enable)
70 return;
71
72 /* Rate limit is specified per Traffic Class but
73 * for CPSW, rate limit can be applied per priority
74 * at port FIFO.
75 *
76 * We have assigned the same priority (TCn) to all queues
77 * of a Traffic Class so they share the same shaper
78 * bandwidth.
79 */
80 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
81 prio = tc;
82
83 rate_mbps = TO_MBPS(mqprio->min_rate[tc]);
84 rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
85 common->bus_freq);
86 writel(rate_mbps,
87 port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
88
89 rate_mbps = 0;
90
91 if (mqprio->max_rate[tc]) {
92 rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc];
93 rate_mbps = TO_MBPS(rate_mbps);
94 rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
95 common->bus_freq);
96 }
97
98 writel(rate_mbps,
99 port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
100 }
101 }
102
am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port * port,struct tc_mqprio_qopt_offload * mqprio)103 static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
104 struct tc_mqprio_qopt_offload *mqprio)
105 {
106 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
107 struct netlink_ext_ack *extack = mqprio->extack;
108 u64 min_rate_total = 0, max_rate_total = 0;
109 u32 min_rate_msk = 0, max_rate_msk = 0;
110 bool has_min_rate, has_max_rate;
111 int num_tc, i;
112
113 if (!(mqprio->flags & TC_MQPRIO_F_SHAPER))
114 return 0;
115
116 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
117 return 0;
118
119 has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
120 has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
121
122 if (!has_min_rate && has_max_rate) {
123 NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate");
124 return -EOPNOTSUPP;
125 }
126
127 if (!has_min_rate)
128 return 0;
129
130 num_tc = mqprio->qopt.num_tc;
131
132 for (i = num_tc - 1; i >= 0; i--) {
133 u32 ch_msk;
134
135 if (mqprio->min_rate[i])
136 min_rate_msk |= BIT(i);
137 min_rate_total += mqprio->min_rate[i];
138
139 if (has_max_rate) {
140 if (mqprio->max_rate[i])
141 max_rate_msk |= BIT(i);
142 max_rate_total += mqprio->max_rate[i];
143
144 if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
145 NL_SET_ERR_MSG_FMT_MOD(extack,
146 "TX tc%d rate max>0 but min=0",
147 i);
148 return -EINVAL;
149 }
150
151 if (mqprio->max_rate[i] &&
152 mqprio->max_rate[i] < mqprio->min_rate[i]) {
153 NL_SET_ERR_MSG_FMT_MOD(extack,
154 "TX tc%d rate min(%llu)>max(%llu)",
155 i, mqprio->min_rate[i],
156 mqprio->max_rate[i]);
157 return -EINVAL;
158 }
159 }
160
161 ch_msk = GENMASK(num_tc - 1, i);
162 if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
163 NL_SET_ERR_MSG_FMT_MOD(extack,
164 "Min rate must be set sequentially hi->lo tx_rate_msk%x",
165 min_rate_msk);
166 return -EINVAL;
167 }
168
169 if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
170 NL_SET_ERR_MSG_FMT_MOD(extack,
171 "Max rate must be set sequentially hi->lo tx_rate_msk%x",
172 max_rate_msk);
173 return -EINVAL;
174 }
175 }
176
177 min_rate_total = TO_MBPS(min_rate_total);
178 max_rate_total = TO_MBPS(max_rate_total);
179
180 p_mqprio->shaper_en = true;
181 p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total);
182
183 return 0;
184 }
185
am65_cpsw_reset_tc_mqprio(struct net_device * ndev)186 static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev)
187 {
188 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
189 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
190
191 p_mqprio->shaper_en = false;
192 p_mqprio->max_rate_total = 0;
193
194 am65_cpsw_tx_pn_shaper_reset(port);
195 netdev_reset_tc(ndev);
196
197 /* Reset all Queue priorities to 0 */
198 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
199
200 am65_cpsw_iet_change_preemptible_tcs(port, 0);
201 }
202
am65_cpsw_setup_mqprio(struct net_device * ndev,void * type_data)203 static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
204 {
205 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
206 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
207 struct tc_mqprio_qopt_offload *mqprio = type_data;
208 struct am65_cpsw_common *common = port->common;
209 struct tc_mqprio_qopt *qopt = &mqprio->qopt;
210 int i, tc, offset, count, prio, ret;
211 u8 num_tc = qopt->num_tc;
212 u32 tx_prio_map = 0;
213
214 memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
215
216 ret = pm_runtime_get_sync(common->dev);
217 if (ret < 0) {
218 pm_runtime_put_noidle(common->dev);
219 return ret;
220 }
221
222 if (!num_tc) {
223 am65_cpsw_reset_tc_mqprio(ndev);
224 ret = 0;
225 goto exit_put;
226 }
227
228 ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
229 if (ret)
230 goto exit_put;
231
232 netdev_set_num_tc(ndev, num_tc);
233
234 /* Multiple Linux priorities can map to a Traffic Class
235 * A Traffic Class can have multiple contiguous Queues,
236 * Queues get mapped to Channels (thread_id),
237 * if not VLAN tagged, thread_id is used as packet_priority
238 * if VLAN tagged. VLAN priority is used as packet_priority
239 * packet_priority gets mapped to header_priority in p0_rx_pri_map,
240 * header_priority gets mapped to switch_priority in pn_tx_pri_map.
241 * As p0_rx_pri_map is left at defaults (0x76543210), we can
242 * assume that Queue_n gets mapped to header_priority_n. We can then
243 * set the switch priority in pn_tx_pri_map.
244 */
245
246 for (tc = 0; tc < num_tc; tc++) {
247 prio = tc;
248
249 /* For simplicity we assign the same priority (TCn) to
250 * all queues of a Traffic Class.
251 */
252 for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
253 tx_prio_map |= prio << (4 * i);
254
255 count = qopt->count[tc];
256 offset = qopt->offset[tc];
257 netdev_set_tc_queue(ndev, tc, count, offset);
258 }
259
260 writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
261
262 am65_cpsw_tx_pn_shaper_apply(port);
263 am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs);
264
265 exit_put:
266 pm_runtime_put(common->dev);
267
268 return ret;
269 }
270
am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port * port)271 static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
272 {
273 int verify_time_ms = port->qos.iet.verify_time_ms;
274 u32 val;
275
276 /* The number of wireside clocks contained in the verify
277 * timeout counter. The default is 0x1312d0
278 * (10ms at 125Mhz in 1G mode).
279 * The frequency of the clock depends on the link speed
280 * and the PHY interface.
281 */
282 switch (port->slave.phy_if) {
283 case PHY_INTERFACE_MODE_RGMII:
284 case PHY_INTERFACE_MODE_RGMII_ID:
285 case PHY_INTERFACE_MODE_RGMII_RXID:
286 case PHY_INTERFACE_MODE_RGMII_TXID:
287 if (port->qos.link_speed == SPEED_1000)
288 val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/
289 else if (port->qos.link_speed == SPEED_100)
290 val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/
291 else
292 val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/
293 break;
294
295 case PHY_INTERFACE_MODE_QSGMII:
296 case PHY_INTERFACE_MODE_SGMII:
297 val = 125 * HZ_PER_MHZ; /* 125 MHz */
298 break;
299
300 default:
301 netdev_err(port->ndev, "selected mode does not supported IET\n");
302 return -EOPNOTSUPP;
303 }
304 val /= MILLIHZ_PER_HZ; /* count per ms timeout */
305 val *= verify_time_ms; /* count for timeout ms */
306
307 if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
308 return -EINVAL;
309
310 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
311
312 return 0;
313 }
314
am65_cpsw_iet_verify_wait(struct am65_cpsw_port * port)315 static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
316 {
317 u32 ctrl, status;
318 int try;
319
320 try = 3;
321
322 /* Reset the verify state machine by writing 1
323 * to LINKFAIL
324 */
325 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
326 ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
327 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
328
329 /* Clear MAC_LINKFAIL bit to start Verify. */
330 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
331 ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
332 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
333
334 do {
335 msleep(port->qos.iet.verify_time_ms);
336
337 status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
338 if (status & AM65_CPSW_PN_MAC_VERIFIED)
339 return 0;
340
341 if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
342 netdev_dbg(port->ndev,
343 "MAC Merge verify failed, trying again\n");
344 continue;
345 }
346
347 if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) {
348 netdev_dbg(port->ndev, "MAC Merge respond error\n");
349 return -ENODEV;
350 }
351
352 if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) {
353 netdev_dbg(port->ndev, "MAC Merge verify error\n");
354 return -ENODEV;
355 }
356 } while (--try > 0);
357
358 netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
359 return -ETIMEDOUT;
360 }
361
am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port * port,u8 preemptible_tcs)362 static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs)
363 {
364 u32 val;
365
366 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
367 val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
368 val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
369 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
370 }
371
372 /* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
373 * UAPI doesn't allow tx enable without rx enable.
374 */
am65_cpsw_iet_common_enable(struct am65_cpsw_common * common)375 void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
376 {
377 struct am65_cpsw_port *port;
378 bool rx_enable = false;
379 u32 val;
380 int i;
381
382 for (i = 0; i < common->port_num; i++) {
383 port = &common->ports[i];
384 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
385 rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN);
386 if (rx_enable)
387 break;
388 }
389
390 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
391
392 if (rx_enable)
393 val |= AM65_CPSW_CTL_IET_EN;
394 else
395 val &= ~AM65_CPSW_CTL_IET_EN;
396
397 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
398 common->iet_enabled = rx_enable;
399 }
400
401 /* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
402 * (active/inactive), but the preemptible traffic classes should only be
403 * committed to hardware once TX is active. Resort to polling.
404 */
am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port * port)405 void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
406 {
407 u8 preemptible_tcs;
408 int err;
409 u32 val;
410
411 if (port->qos.link_speed == SPEED_UNKNOWN)
412 return;
413
414 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
415 if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN))
416 return;
417
418 /* update common IET enable */
419 am65_cpsw_iet_common_enable(port->common);
420
421 /* update verify count */
422 err = am65_cpsw_iet_set_verify_timeout_count(port);
423 if (err) {
424 netdev_err(port->ndev, "couldn't set verify count: %d\n", err);
425 return;
426 }
427
428 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
429 if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
430 err = am65_cpsw_iet_verify_wait(port);
431 if (err)
432 return;
433 }
434
435 preemptible_tcs = port->qos.iet.preemptible_tcs;
436 am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs);
437 }
438
am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port * port,u8 preemptible_tcs)439 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs)
440 {
441 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
442
443 port->qos.iet.preemptible_tcs = preemptible_tcs;
444 mutex_lock(&priv->mm_lock);
445 am65_cpsw_iet_commit_preemptible_tcs(port);
446 mutex_unlock(&priv->mm_lock);
447 }
448
am65_cpsw_iet_link_state_update(struct net_device * ndev)449 static void am65_cpsw_iet_link_state_update(struct net_device *ndev)
450 {
451 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
452 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
453
454 mutex_lock(&priv->mm_lock);
455 am65_cpsw_iet_commit_preemptible_tcs(port);
456 mutex_unlock(&priv->mm_lock);
457 }
458
am65_cpsw_port_est_enabled(struct am65_cpsw_port * port)459 static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
460 {
461 return port->qos.est_oper || port->qos.est_admin;
462 }
463
am65_cpsw_est_enable(struct am65_cpsw_common * common,int enable)464 static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
465 {
466 u32 val;
467
468 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
469
470 if (enable)
471 val |= AM65_CPSW_CTL_EST_EN;
472 else
473 val &= ~AM65_CPSW_CTL_EST_EN;
474
475 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
476 common->est_enabled = enable;
477 }
478
am65_cpsw_port_est_enable(struct am65_cpsw_port * port,int enable)479 static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
480 {
481 u32 val;
482
483 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
484 if (enable)
485 val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
486 else
487 val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
488
489 writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
490 }
491
492 /* target new EST RAM buffer, actual toggle happens after cycle completion */
am65_cpsw_port_est_assign_buf_num(struct net_device * ndev,int buf_num)493 static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
494 int buf_num)
495 {
496 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
497 u32 val;
498
499 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
500 if (buf_num)
501 val |= AM65_CPSW_PN_EST_BUFSEL;
502 else
503 val &= ~AM65_CPSW_PN_EST_BUFSEL;
504
505 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
506 }
507
508 /* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
509 * admin -> oper or not
510 *
511 * Return true if already transitioned. i.e oper is equal to admin and buf
512 * numbers match (est_oper->buf match with est_admin->buf).
513 * false if before transition. i.e oper is not equal to admin, (i.e a
514 * previous admin command is waiting to be transitioned to oper state
515 * and est_oper->buf not match with est_oper->buf).
516 */
am65_cpsw_port_est_is_swapped(struct net_device * ndev,int * oper,int * admin)517 static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
518 int *admin)
519 {
520 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
521 u32 val;
522
523 val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
524 *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
525
526 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
527 *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
528
529 return *admin == *oper;
530 }
531
532 /* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
533 * Admin to program the new schedule.
534 *
535 * Logic as follows:-
536 * If oper is same as admin, return the other buffer (!oper) as the admin
537 * buffer. If oper is not the same, driver let the current oper to continue
538 * as it is in the process of transitioning from admin -> oper. So keep the
539 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
540 * EST CTL register. In the second iteration they will match and code returns.
541 * The actual buffer to write command is selected later before it is ready
542 * to update the schedule.
543 */
am65_cpsw_port_est_get_free_buf_num(struct net_device * ndev)544 static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
545 {
546 int oper, admin;
547 int roll = 2;
548
549 while (roll--) {
550 if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
551 return !oper;
552
553 /* admin is not set, so hinder transition as it's not allowed
554 * to touch memory in-flight, by targeting same oper buf.
555 */
556 am65_cpsw_port_est_assign_buf_num(ndev, oper);
557
558 dev_info(&ndev->dev,
559 "Prev. EST admin cycle is in transit %d -> %d\n",
560 oper, admin);
561 }
562
563 return admin;
564 }
565
am65_cpsw_admin_to_oper(struct net_device * ndev)566 static void am65_cpsw_admin_to_oper(struct net_device *ndev)
567 {
568 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
569
570 devm_kfree(&ndev->dev, port->qos.est_oper);
571
572 port->qos.est_oper = port->qos.est_admin;
573 port->qos.est_admin = NULL;
574 }
575
am65_cpsw_port_est_get_buf_num(struct net_device * ndev,struct am65_cpsw_est * est_new)576 static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
577 struct am65_cpsw_est *est_new)
578 {
579 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
580 u32 val;
581
582 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
583 val &= ~AM65_CPSW_PN_EST_ONEBUF;
584 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
585
586 est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
587
588 /* rolled buf num means changed buf while configuring */
589 if (port->qos.est_oper && port->qos.est_admin &&
590 est_new->buf == port->qos.est_oper->buf)
591 am65_cpsw_admin_to_oper(ndev);
592 }
593
am65_cpsw_est_set(struct net_device * ndev,int enable)594 static void am65_cpsw_est_set(struct net_device *ndev, int enable)
595 {
596 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
597 struct am65_cpsw_common *common = port->common;
598 int common_enable = 0;
599 int i;
600
601 am65_cpsw_port_est_enable(port, enable);
602
603 for (i = 0; i < common->port_num; i++)
604 common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
605
606 common_enable |= enable;
607 am65_cpsw_est_enable(common, common_enable);
608 }
609
610 /* This update is supposed to be used in any routine before getting real state
611 * of admin -> oper transition, particularly it's supposed to be used in some
612 * generic routine for providing real state to Taprio Qdisc.
613 */
am65_cpsw_est_update_state(struct net_device * ndev)614 static void am65_cpsw_est_update_state(struct net_device *ndev)
615 {
616 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
617 int oper, admin;
618
619 if (!port->qos.est_admin)
620 return;
621
622 if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
623 return;
624
625 am65_cpsw_admin_to_oper(ndev);
626 }
627
628 /* Fetch command count it's number of bytes in Gigabit mode or nibbles in
629 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
630 * bytes/nibbles that can be sent while transmission on given speed.
631 */
am65_est_cmd_ns_to_cnt(u64 ns,int link_speed)632 static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
633 {
634 u64 temp;
635
636 temp = ns * link_speed;
637 if (link_speed < SPEED_1000)
638 temp <<= 1;
639
640 return DIV_ROUND_UP(temp, 8 * 1000);
641 }
642
am65_cpsw_est_set_sched_cmds(void __iomem * addr,int fetch_cnt,int fetch_allow)643 static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
644 int fetch_cnt,
645 int fetch_allow)
646 {
647 u32 prio_mask, cmd_fetch_cnt, cmd;
648
649 do {
650 if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
651 fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
652 cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
653 } else {
654 cmd_fetch_cnt = fetch_cnt;
655 /* fetch count can't be less than 16? */
656 if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
657 cmd_fetch_cnt = 16;
658
659 fetch_cnt = 0;
660 }
661
662 prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
663 cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
664
665 writel(cmd, addr);
666 addr += 4;
667 } while (fetch_cnt);
668
669 return addr;
670 }
671
am65_cpsw_est_calc_cmd_num(struct net_device * ndev,struct tc_taprio_qopt_offload * taprio,int link_speed)672 static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
673 struct tc_taprio_qopt_offload *taprio,
674 int link_speed)
675 {
676 int i, cmd_cnt, cmd_sum = 0;
677 u32 fetch_cnt;
678
679 for (i = 0; i < taprio->num_entries; i++) {
680 if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
681 dev_err(&ndev->dev, "Only SET command is supported");
682 return -EINVAL;
683 }
684
685 fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
686 link_speed);
687
688 cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
689 if (!cmd_cnt)
690 cmd_cnt++;
691
692 cmd_sum += cmd_cnt;
693
694 if (!fetch_cnt)
695 break;
696 }
697
698 return cmd_sum;
699 }
700
am65_cpsw_est_check_scheds(struct net_device * ndev,struct am65_cpsw_est * est_new)701 static int am65_cpsw_est_check_scheds(struct net_device *ndev,
702 struct am65_cpsw_est *est_new)
703 {
704 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
705 int cmd_num;
706
707 cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
708 port->qos.link_speed);
709 if (cmd_num < 0)
710 return cmd_num;
711
712 if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
713 dev_err(&ndev->dev, "No fetch RAM");
714 return -ENOMEM;
715 }
716
717 return 0;
718 }
719
am65_cpsw_est_set_sched_list(struct net_device * ndev,struct am65_cpsw_est * est_new)720 static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
721 struct am65_cpsw_est *est_new)
722 {
723 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
724 u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
725 void __iomem *ram_addr, *max_ram_addr;
726 struct tc_taprio_sched_entry *entry;
727 int i, ram_size;
728
729 ram_addr = port->fetch_ram_base;
730 ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
731 ram_addr += est_new->buf * ram_size;
732
733 max_ram_addr = ram_size + ram_addr;
734 for (i = 0; i < est_new->taprio.num_entries; i++) {
735 entry = &est_new->taprio.entries[i];
736
737 fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
738 port->qos.link_speed);
739 fetch_allow = entry->gate_mask;
740 if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
741 dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
742 fetch_allow);
743
744 ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
745 fetch_allow);
746
747 if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
748 dev_info(&ndev->dev,
749 "next scheds after %d have no impact", i + 1);
750 break;
751 }
752
753 all_fetch_allow |= fetch_allow;
754 }
755
756 /* end cmd, enabling non-timed queues for potential over cycle time */
757 if (ram_addr < max_ram_addr)
758 writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
759 }
760
761 /*
762 * Enable ESTf periodic output, set cycle start time and interval.
763 */
am65_cpsw_timer_set(struct net_device * ndev,struct am65_cpsw_est * est_new)764 static int am65_cpsw_timer_set(struct net_device *ndev,
765 struct am65_cpsw_est *est_new)
766 {
767 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
768 struct am65_cpsw_common *common = port->common;
769 struct am65_cpts *cpts = common->cpts;
770 struct am65_cpts_estf_cfg cfg;
771
772 cfg.ns_period = est_new->taprio.cycle_time;
773 cfg.ns_start = est_new->taprio.base_time;
774
775 return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
776 }
777
am65_cpsw_timer_stop(struct net_device * ndev)778 static void am65_cpsw_timer_stop(struct net_device *ndev)
779 {
780 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
781 struct am65_cpts *cpts = port->common->cpts;
782
783 am65_cpts_estf_disable(cpts, port->port_id - 1);
784 }
785
am65_cpsw_timer_act(struct net_device * ndev,struct am65_cpsw_est * est_new)786 static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
787 struct am65_cpsw_est *est_new)
788 {
789 struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
790 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
791 struct am65_cpts *cpts = port->common->cpts;
792 u64 cur_time;
793 s64 diff;
794
795 if (!port->qos.est_oper)
796 return TACT_PROG;
797
798 taprio_new = &est_new->taprio;
799 taprio_oper = &port->qos.est_oper->taprio;
800
801 if (taprio_new->cycle_time != taprio_oper->cycle_time)
802 return TACT_NEED_STOP;
803
804 /* in order to avoid timer reset get base_time form oper taprio */
805 if (!taprio_new->base_time && taprio_oper)
806 taprio_new->base_time = taprio_oper->base_time;
807
808 if (taprio_new->base_time == taprio_oper->base_time)
809 return TACT_SKIP_PROG;
810
811 /* base times are cycle synchronized */
812 diff = taprio_new->base_time - taprio_oper->base_time;
813 diff = diff < 0 ? -diff : diff;
814 if (diff % taprio_new->cycle_time)
815 return TACT_NEED_STOP;
816
817 cur_time = am65_cpts_ns_gettime(cpts);
818 if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
819 return TACT_SKIP_PROG;
820
821 /* TODO: Admin schedule at future time is not currently supported */
822 return TACT_NEED_STOP;
823 }
824
am65_cpsw_stop_est(struct net_device * ndev)825 static void am65_cpsw_stop_est(struct net_device *ndev)
826 {
827 am65_cpsw_est_set(ndev, 0);
828 am65_cpsw_timer_stop(ndev);
829 }
830
am65_cpsw_taprio_destroy(struct net_device * ndev)831 static void am65_cpsw_taprio_destroy(struct net_device *ndev)
832 {
833 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
834
835 am65_cpsw_stop_est(ndev);
836
837 devm_kfree(&ndev->dev, port->qos.est_admin);
838 devm_kfree(&ndev->dev, port->qos.est_oper);
839
840 port->qos.est_oper = NULL;
841 port->qos.est_admin = NULL;
842
843 am65_cpsw_reset_tc_mqprio(ndev);
844 }
845
am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload * from,struct tc_taprio_qopt_offload * to)846 static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
847 struct tc_taprio_qopt_offload *to)
848 {
849 int i;
850
851 *to = *from;
852 for (i = 0; i < from->num_entries; i++)
853 to->entries[i] = from->entries[i];
854 }
855
am65_cpsw_taprio_replace(struct net_device * ndev,struct tc_taprio_qopt_offload * taprio)856 static int am65_cpsw_taprio_replace(struct net_device *ndev,
857 struct tc_taprio_qopt_offload *taprio)
858 {
859 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
860 struct netlink_ext_ack *extack = taprio->mqprio.extack;
861 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
862 struct am65_cpts *cpts = common->cpts;
863 struct am65_cpsw_est *est_new;
864 u64 cur_time, n;
865 int ret, tact;
866
867 if (!netif_running(ndev)) {
868 NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown");
869 return -ENETDOWN;
870 }
871
872 if (common->pf_p0_rx_ptype_rrobin) {
873 NL_SET_ERR_MSG_MOD(extack,
874 "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
875 return -EINVAL;
876 }
877
878 if (port->qos.link_speed == SPEED_UNKNOWN)
879 return -ENOLINK;
880
881 if (taprio->cycle_time_extension) {
882 NL_SET_ERR_MSG_MOD(extack,
883 "cycle time extension not supported");
884 return -EOPNOTSUPP;
885 }
886
887 est_new = devm_kzalloc(&ndev->dev,
888 struct_size(est_new, taprio.entries, taprio->num_entries),
889 GFP_KERNEL);
890 if (!est_new)
891 return -ENOMEM;
892
893 ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio);
894 if (ret)
895 return ret;
896
897 am65_cpsw_cp_taprio(taprio, &est_new->taprio);
898
899 am65_cpsw_est_update_state(ndev);
900
901 ret = am65_cpsw_est_check_scheds(ndev, est_new);
902 if (ret < 0)
903 goto fail;
904
905 tact = am65_cpsw_timer_act(ndev, est_new);
906 if (tact == TACT_NEED_STOP) {
907 NL_SET_ERR_MSG_MOD(extack,
908 "Can't toggle estf timer, stop taprio first");
909 ret = -EINVAL;
910 goto fail;
911 }
912
913 if (tact == TACT_PROG)
914 am65_cpsw_timer_stop(ndev);
915
916 am65_cpsw_port_est_get_buf_num(ndev, est_new);
917 am65_cpsw_est_set_sched_list(ndev, est_new);
918 am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
919
920 /* If the base-time is in the past, start schedule from the time:
921 * base_time + (N*cycle_time)
922 * where N is the smallest possible integer such that the above
923 * time is in the future.
924 */
925 cur_time = am65_cpts_ns_gettime(cpts);
926 if (est_new->taprio.base_time < cur_time) {
927 n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
928 est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
929 }
930
931 am65_cpsw_est_set(ndev, 1);
932
933 if (tact == TACT_PROG) {
934 ret = am65_cpsw_timer_set(ndev, est_new);
935 if (ret) {
936 NL_SET_ERR_MSG_MOD(extack,
937 "Failed to set cycle time");
938 goto fail;
939 }
940 }
941
942 devm_kfree(&ndev->dev, port->qos.est_admin);
943 port->qos.est_admin = est_new;
944 am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs);
945
946 return 0;
947
948 fail:
949 am65_cpsw_reset_tc_mqprio(ndev);
950 devm_kfree(&ndev->dev, est_new);
951 return ret;
952 }
953
am65_cpsw_est_link_up(struct net_device * ndev,int link_speed)954 static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
955 {
956 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
957 ktime_t cur_time;
958 s64 delta;
959
960 if (!am65_cpsw_port_est_enabled(port))
961 return;
962
963 if (port->qos.link_down_time) {
964 cur_time = ktime_get();
965 delta = ktime_us_delta(cur_time, port->qos.link_down_time);
966 if (delta > USEC_PER_SEC) {
967 dev_err(&ndev->dev,
968 "Link has been lost too long, stopping TAS");
969 goto purge_est;
970 }
971 }
972
973 return;
974
975 purge_est:
976 am65_cpsw_taprio_destroy(ndev);
977 }
978
am65_cpsw_setup_taprio(struct net_device * ndev,void * type_data)979 static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
980 {
981 struct tc_taprio_qopt_offload *taprio = type_data;
982 int err = 0;
983
984 switch (taprio->cmd) {
985 case TAPRIO_CMD_REPLACE:
986 err = am65_cpsw_taprio_replace(ndev, taprio);
987 break;
988 case TAPRIO_CMD_DESTROY:
989 am65_cpsw_taprio_destroy(ndev);
990 break;
991 default:
992 err = -EOPNOTSUPP;
993 }
994
995 return err;
996 }
997
am65_cpsw_tc_query_caps(struct net_device * ndev,void * type_data)998 static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
999 {
1000 struct tc_query_caps_base *base = type_data;
1001
1002 switch (base->type) {
1003 case TC_SETUP_QDISC_MQPRIO: {
1004 struct tc_mqprio_caps *caps = base->caps;
1005
1006 caps->validate_queue_counts = true;
1007
1008 return 0;
1009 }
1010
1011 case TC_SETUP_QDISC_TAPRIO: {
1012 struct tc_taprio_caps *caps = base->caps;
1013
1014 caps->gate_mask_per_txq = true;
1015
1016 return 0;
1017 }
1018 default:
1019 return -EOPNOTSUPP;
1020 }
1021 }
1022
am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port * port,struct netlink_ext_ack * extack,struct flow_cls_offload * cls,u64 rate_pkt_ps)1023 static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
1024 struct netlink_ext_ack *extack,
1025 struct flow_cls_offload *cls,
1026 u64 rate_pkt_ps)
1027 {
1028 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1029 struct flow_dissector *dissector = rule->match.dissector;
1030 static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1031 struct am65_cpsw_qos *qos = &port->qos;
1032 struct flow_match_eth_addrs match;
1033 int ret;
1034
1035 if (dissector->used_keys &
1036 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1037 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1038 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1039 NL_SET_ERR_MSG_MOD(extack,
1040 "Unsupported keys used");
1041 return -EOPNOTSUPP;
1042 }
1043
1044 if (flow_rule_match_has_control_flags(rule, extack))
1045 return -EOPNOTSUPP;
1046
1047 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1048 NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1049 return -EOPNOTSUPP;
1050 }
1051
1052 flow_rule_match_eth_addrs(rule, &match);
1053
1054 if (!is_zero_ether_addr(match.mask->src)) {
1055 NL_SET_ERR_MSG_MOD(extack,
1056 "Matching on source MAC not supported");
1057 return -EOPNOTSUPP;
1058 }
1059
1060 if (is_broadcast_ether_addr(match.key->dst) &&
1061 is_broadcast_ether_addr(match.mask->dst)) {
1062 ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
1063 if (ret)
1064 return ret;
1065
1066 qos->ale_bc_ratelimit.cookie = cls->cookie;
1067 qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1068 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1069 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1070 ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
1071 if (ret)
1072 return ret;
1073
1074 qos->ale_mc_ratelimit.cookie = cls->cookie;
1075 qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1076 } else {
1077 NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1078 return -EOPNOTSUPP;
1079 }
1080
1081 return 0;
1082 }
1083
am65_cpsw_qos_clsflower_policer_validate(const struct flow_action * action,const struct flow_action_entry * act,struct netlink_ext_ack * extack)1084 static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1085 const struct flow_action_entry *act,
1086 struct netlink_ext_ack *extack)
1087 {
1088 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1089 NL_SET_ERR_MSG_MOD(extack,
1090 "Offload not supported when exceed action is not drop");
1091 return -EOPNOTSUPP;
1092 }
1093
1094 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1095 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1096 NL_SET_ERR_MSG_MOD(extack,
1097 "Offload not supported when conform action is not pipe or ok");
1098 return -EOPNOTSUPP;
1099 }
1100
1101 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1102 !flow_action_is_last_entry(action, act)) {
1103 NL_SET_ERR_MSG_MOD(extack,
1104 "Offload not supported when conform action is ok, but action is not last");
1105 return -EOPNOTSUPP;
1106 }
1107
1108 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1109 act->police.avrate || act->police.overhead) {
1110 NL_SET_ERR_MSG_MOD(extack,
1111 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1112 return -EOPNOTSUPP;
1113 }
1114
1115 return 0;
1116 }
1117
am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port * port,struct flow_cls_offload * cls)1118 static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
1119 struct flow_cls_offload *cls)
1120 {
1121 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1122 struct netlink_ext_ack *extack = cls->common.extack;
1123 const struct flow_action_entry *act;
1124 int i, ret;
1125
1126 flow_action_for_each(i, act, &rule->action) {
1127 switch (act->id) {
1128 case FLOW_ACTION_POLICE:
1129 ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1130 if (ret)
1131 return ret;
1132
1133 return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
1134 act->police.rate_pkt_ps);
1135 default:
1136 NL_SET_ERR_MSG_MOD(extack,
1137 "Action not supported");
1138 return -EOPNOTSUPP;
1139 }
1140 }
1141 return -EOPNOTSUPP;
1142 }
1143
am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port * port,struct flow_cls_offload * cls)1144 static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
1145 {
1146 struct am65_cpsw_qos *qos = &port->qos;
1147
1148 if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
1149 qos->ale_bc_ratelimit.cookie = 0;
1150 qos->ale_bc_ratelimit.rate_packet_ps = 0;
1151 cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
1152 }
1153
1154 if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
1155 qos->ale_mc_ratelimit.cookie = 0;
1156 qos->ale_mc_ratelimit.rate_packet_ps = 0;
1157 cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
1158 }
1159
1160 return 0;
1161 }
1162
am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port * port,struct flow_cls_offload * cls_flower)1163 static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
1164 struct flow_cls_offload *cls_flower)
1165 {
1166 switch (cls_flower->command) {
1167 case FLOW_CLS_REPLACE:
1168 return am65_cpsw_qos_configure_clsflower(port, cls_flower);
1169 case FLOW_CLS_DESTROY:
1170 return am65_cpsw_qos_delete_clsflower(port, cls_flower);
1171 default:
1172 return -EOPNOTSUPP;
1173 }
1174 }
1175
am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1176 static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1177 {
1178 struct am65_cpsw_port *port = cb_priv;
1179
1180 if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
1181 return -EOPNOTSUPP;
1182
1183 switch (type) {
1184 case TC_SETUP_CLSFLOWER:
1185 return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
1186 default:
1187 return -EOPNOTSUPP;
1188 }
1189 }
1190
1191 static LIST_HEAD(am65_cpsw_qos_block_cb_list);
1192
am65_cpsw_qos_setup_tc_block(struct net_device * ndev,struct flow_block_offload * f)1193 static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1194 {
1195 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1196
1197 return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
1198 am65_cpsw_qos_setup_tc_block_cb,
1199 port, port, true);
1200 }
1201
1202 static void
am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common * common,int tx_ch,u32 rate_mbps)1203 am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
1204 int tx_ch, u32 rate_mbps)
1205 {
1206 struct am65_cpsw_host *host = am65_common_get_host(common);
1207 u32 ch_cir;
1208 int i;
1209
1210 ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq);
1211 writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1212
1213 /* update rates for every port tx queues */
1214 for (i = 0; i < common->port_num; i++) {
1215 struct net_device *ndev = common->ports[i].ndev;
1216
1217 if (!ndev)
1218 continue;
1219 netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps;
1220 }
1221 }
1222
am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device * ndev,int queue,u32 rate_mbps)1223 int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
1224 int queue, u32 rate_mbps)
1225 {
1226 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1227 struct am65_cpsw_common *common = port->common;
1228 struct am65_cpsw_tx_chn *tx_chn;
1229 u32 ch_rate, tx_ch_rate_msk_new;
1230 u32 ch_msk = 0;
1231 int ret;
1232
1233 dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n",
1234 queue, rate_mbps, common->tx_ch_rate_msk);
1235
1236 if (common->pf_p0_rx_ptype_rrobin) {
1237 dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n");
1238 return -EINVAL;
1239 }
1240
1241 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1242 if (ch_rate == rate_mbps)
1243 return 0;
1244
1245 ret = pm_runtime_get_sync(common->dev);
1246 if (ret < 0) {
1247 pm_runtime_put_noidle(common->dev);
1248 return ret;
1249 }
1250 ret = 0;
1251
1252 tx_ch_rate_msk_new = common->tx_ch_rate_msk;
1253 if (rate_mbps && !(tx_ch_rate_msk_new & BIT(queue))) {
1254 tx_ch_rate_msk_new |= BIT(queue);
1255 ch_msk = GENMASK(common->tx_ch_num - 1, queue);
1256 ch_msk = tx_ch_rate_msk_new ^ ch_msk;
1257 } else if (!rate_mbps) {
1258 tx_ch_rate_msk_new &= ~BIT(queue);
1259 ch_msk = queue ? GENMASK(queue - 1, 0) : 0;
1260 ch_msk = tx_ch_rate_msk_new & ch_msk;
1261 }
1262
1263 if (ch_msk) {
1264 dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
1265 common->tx_ch_rate_msk, tx_ch_rate_msk_new);
1266 ret = -EINVAL;
1267 goto exit_put;
1268 }
1269
1270 tx_chn = &common->tx_chns[queue];
1271 tx_chn->rate_mbps = rate_mbps;
1272 common->tx_ch_rate_msk = tx_ch_rate_msk_new;
1273
1274 if (!common->usage_count)
1275 /* will be applied on next netif up */
1276 goto exit_put;
1277
1278 am65_cpsw_qos_tx_p0_rate_apply(common, queue, rate_mbps);
1279
1280 exit_put:
1281 pm_runtime_put(common->dev);
1282 return ret;
1283 }
1284
am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common * common)1285 void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
1286 {
1287 struct am65_cpsw_host *host = am65_common_get_host(common);
1288 int tx_ch;
1289
1290 for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) {
1291 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch];
1292 u32 ch_cir;
1293
1294 if (!tx_chn->rate_mbps)
1295 continue;
1296
1297 ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps,
1298 common->bus_freq);
1299 writel(ch_cir,
1300 host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1301 }
1302 }
1303
am65_cpsw_qos_ndo_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)1304 int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1305 void *type_data)
1306 {
1307 switch (type) {
1308 case TC_QUERY_CAPS:
1309 return am65_cpsw_tc_query_caps(ndev, type_data);
1310 case TC_SETUP_QDISC_TAPRIO:
1311 return am65_cpsw_setup_taprio(ndev, type_data);
1312 case TC_SETUP_QDISC_MQPRIO:
1313 return am65_cpsw_setup_mqprio(ndev, type_data);
1314 case TC_SETUP_BLOCK:
1315 return am65_cpsw_qos_setup_tc_block(ndev, type_data);
1316 default:
1317 return -EOPNOTSUPP;
1318 }
1319 }
1320
am65_cpsw_qos_link_up(struct net_device * ndev,int link_speed)1321 void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
1322 {
1323 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1324
1325 port->qos.link_speed = link_speed;
1326 am65_cpsw_tx_pn_shaper_apply(port);
1327 am65_cpsw_iet_link_state_update(ndev);
1328
1329 am65_cpsw_est_link_up(ndev, link_speed);
1330 port->qos.link_down_time = 0;
1331 }
1332
am65_cpsw_qos_link_down(struct net_device * ndev)1333 void am65_cpsw_qos_link_down(struct net_device *ndev)
1334 {
1335 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1336
1337 port->qos.link_speed = SPEED_UNKNOWN;
1338 am65_cpsw_tx_pn_shaper_apply(port);
1339 am65_cpsw_iet_link_state_update(ndev);
1340
1341 if (!port->qos.link_down_time)
1342 port->qos.link_down_time = ktime_get();
1343 }
1344