1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Intel Corporation */
3
4 #include "igc.h"
5 #include "igc_base.h"
6 #include "igc_hw.h"
7 #include "igc_tsn.h"
8
9 #define MIN_MULTPLIER_TX_MIN_FRAG 0
10 #define MAX_MULTPLIER_TX_MIN_FRAG 3
11 /* Frag size is based on the Section 8.12.2 of the SW User Manual */
12 #define TX_MIN_FRAG_SIZE 64
13 #define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
14 (MAX_MULTPLIER_TX_MIN_FRAG + 1))
15
16 enum tx_queue {
17 TX_QUEUE_0 = 0,
18 TX_QUEUE_1,
19 TX_QUEUE_2,
20 TX_QUEUE_3,
21 };
22
23 DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
24
igc_fpe_init_smd_frame(struct igc_ring * ring,struct igc_tx_buffer * buffer,struct sk_buff * skb)25 static int igc_fpe_init_smd_frame(struct igc_ring *ring,
26 struct igc_tx_buffer *buffer,
27 struct sk_buff *skb)
28 {
29 dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
30 DMA_TO_DEVICE);
31
32 if (dma_mapping_error(ring->dev, dma)) {
33 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
34 return -ENOMEM;
35 }
36
37 buffer->skb = skb;
38 buffer->protocol = 0;
39 buffer->bytecount = skb->len;
40 buffer->gso_segs = 1;
41 buffer->time_stamp = jiffies;
42 dma_unmap_len_set(buffer, len, skb->len);
43 dma_unmap_addr_set(buffer, dma, dma);
44
45 return 0;
46 }
47
igc_fpe_init_tx_descriptor(struct igc_ring * ring,struct sk_buff * skb,enum igc_txd_popts_type type)48 static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
49 struct sk_buff *skb,
50 enum igc_txd_popts_type type)
51 {
52 u32 cmd_type, olinfo_status = 0;
53 struct igc_tx_buffer *buffer;
54 union igc_adv_tx_desc *desc;
55 int err;
56
57 if (!igc_desc_unused(ring))
58 return -EBUSY;
59
60 buffer = &ring->tx_buffer_info[ring->next_to_use];
61 err = igc_fpe_init_smd_frame(ring, buffer, skb);
62 if (err)
63 return err;
64
65 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
66 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
67 buffer->bytecount;
68
69 olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
70
71 switch (type) {
72 case SMD_V:
73 case SMD_R:
74 olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
75 break;
76 }
77
78 desc = IGC_TX_DESC(ring, ring->next_to_use);
79 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
80 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
81 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
82
83 netdev_tx_sent_queue(txring_txq(ring), skb->len);
84
85 buffer->next_to_watch = desc;
86 ring->next_to_use = (ring->next_to_use + 1) % ring->count;
87
88 return 0;
89 }
90
igc_fpe_xmit_smd_frame(struct igc_adapter * adapter,enum igc_txd_popts_type type)91 static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
92 enum igc_txd_popts_type type)
93 {
94 int cpu = smp_processor_id();
95 struct netdev_queue *nq;
96 struct igc_ring *ring;
97 struct sk_buff *skb;
98 int err;
99
100 ring = igc_get_tx_ring(adapter, cpu);
101 nq = txring_txq(ring);
102
103 skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
104 if (!skb)
105 return -ENOMEM;
106
107 skb_put_zero(skb, SMD_FRAME_SIZE);
108
109 __netif_tx_lock(nq, cpu);
110
111 err = igc_fpe_init_tx_descriptor(ring, skb, type);
112 igc_flush_tx_descriptors(ring);
113
114 __netif_tx_unlock(nq);
115
116 return err;
117 }
118
igc_fpe_configure_tx(struct ethtool_mmsv * mmsv,bool tx_enable)119 static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
120 {
121 struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
122 struct igc_adapter *adapter;
123
124 adapter = container_of(fpe, struct igc_adapter, fpe);
125 adapter->fpe.tx_enabled = tx_enable;
126
127 /* Update config since tx_enabled affects preemptible queue configuration */
128 igc_tsn_offload_apply(adapter);
129 }
130
igc_fpe_send_mpacket(struct ethtool_mmsv * mmsv,enum ethtool_mpacket type)131 static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
132 enum ethtool_mpacket type)
133 {
134 struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
135 struct igc_adapter *adapter;
136 int err;
137
138 adapter = container_of(fpe, struct igc_adapter, fpe);
139
140 if (type == ETHTOOL_MPACKET_VERIFY) {
141 err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
142 if (err && net_ratelimit())
143 netdev_err(adapter->netdev, "Error sending SMD-V\n");
144 } else if (type == ETHTOOL_MPACKET_RESPONSE) {
145 err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
146 if (err && net_ratelimit())
147 netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
148 }
149 }
150
151 static const struct ethtool_mmsv_ops igc_mmsv_ops = {
152 .configure_tx = igc_fpe_configure_tx,
153 .send_mpacket = igc_fpe_send_mpacket,
154 };
155
igc_fpe_init(struct igc_adapter * adapter)156 void igc_fpe_init(struct igc_adapter *adapter)
157 {
158 adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
159 adapter->fpe.tx_enabled = false;
160 ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
161 }
162
igc_fpe_clear_preempt_queue(struct igc_adapter * adapter)163 void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter)
164 {
165 for (int i = 0; i < adapter->num_tx_queues; i++) {
166 struct igc_ring *tx_ring = adapter->tx_ring[i];
167
168 tx_ring->preemptible = false;
169 }
170 }
171
igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter * adapter,unsigned long preemptible_tcs)172 static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter,
173 unsigned long preemptible_tcs)
174 {
175 struct net_device *dev = adapter->netdev;
176 u32 i, queue = 0;
177
178 for (i = 0; i < dev->num_tc; i++) {
179 u32 offset, count;
180
181 if (!(preemptible_tcs & BIT(i)))
182 continue;
183
184 offset = dev->tc_to_txq[i].offset;
185 count = dev->tc_to_txq[i].count;
186 queue |= GENMASK(offset + count - 1, offset);
187 }
188
189 return queue;
190 }
191
igc_fpe_save_preempt_queue(struct igc_adapter * adapter,const struct tc_mqprio_qopt_offload * mqprio)192 void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
193 const struct tc_mqprio_qopt_offload *mqprio)
194 {
195 u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter,
196 mqprio->preemptible_tcs);
197
198 for (int i = 0; i < adapter->num_tx_queues; i++) {
199 struct igc_ring *tx_ring = adapter->tx_ring[i];
200
201 tx_ring->preemptible = !!(preemptible_queue & BIT(i));
202 }
203 }
204
is_any_launchtime(struct igc_adapter * adapter)205 static bool is_any_launchtime(struct igc_adapter *adapter)
206 {
207 int i;
208
209 for (i = 0; i < adapter->num_tx_queues; i++) {
210 struct igc_ring *ring = adapter->tx_ring[i];
211
212 if (ring->launchtime_enable)
213 return true;
214 }
215
216 return false;
217 }
218
is_cbs_enabled(struct igc_adapter * adapter)219 static bool is_cbs_enabled(struct igc_adapter *adapter)
220 {
221 int i;
222
223 for (i = 0; i < adapter->num_tx_queues; i++) {
224 struct igc_ring *ring = adapter->tx_ring[i];
225
226 if (ring->cbs_enable)
227 return true;
228 }
229
230 return false;
231 }
232
igc_tsn_new_flags(struct igc_adapter * adapter)233 static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
234 {
235 unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
236
237
238 if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
239 adapter->strict_priority_enable)
240 new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
241
242 if (is_cbs_enabled(adapter))
243 new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
244
245 if (adapter->fpe.mmsv.pmac_enabled)
246 new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
247
248 return new_flags;
249 }
250
igc_tsn_is_tx_mode_in_tsn(struct igc_adapter * adapter)251 static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
252 {
253 struct igc_hw *hw = &adapter->hw;
254
255 return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
256 }
257
igc_tsn_adjust_txtime_offset(struct igc_adapter * adapter)258 void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
259 {
260 struct igc_hw *hw = &adapter->hw;
261 u16 txoffset;
262
263 if (!igc_tsn_is_tx_mode_in_tsn(adapter))
264 return;
265
266 switch (adapter->link_speed) {
267 case SPEED_10:
268 txoffset = IGC_TXOFFSET_SPEED_10;
269 break;
270 case SPEED_100:
271 txoffset = IGC_TXOFFSET_SPEED_100;
272 break;
273 case SPEED_1000:
274 txoffset = IGC_TXOFFSET_SPEED_1000;
275 break;
276 case SPEED_2500:
277 txoffset = IGC_TXOFFSET_SPEED_2500;
278 break;
279 default:
280 txoffset = 0;
281 break;
282 }
283
284 wr32(IGC_GTXOFFSET, txoffset);
285 }
286
igc_tsn_restore_retx_default(struct igc_adapter * adapter)287 static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
288 {
289 struct igc_hw *hw = &adapter->hw;
290 u32 retxctl;
291
292 retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
293 wr32(IGC_RETX_CTL, retxctl);
294 }
295
igc_tsn_is_taprio_activated_by_user(struct igc_adapter * adapter)296 bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
297 {
298 struct igc_hw *hw = &adapter->hw;
299
300 return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
301 adapter->taprio_offload_enable;
302 }
303
igc_tsn_tx_arb(struct igc_adapter * adapter,bool reverse_prio)304 static void igc_tsn_tx_arb(struct igc_adapter *adapter, bool reverse_prio)
305 {
306 struct igc_hw *hw = &adapter->hw;
307 u32 txarb;
308
309 txarb = rd32(IGC_TXARB);
310
311 txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK |
312 IGC_TXARB_TXQ_PRIO_1_MASK |
313 IGC_TXARB_TXQ_PRIO_2_MASK |
314 IGC_TXARB_TXQ_PRIO_3_MASK);
315
316 if (reverse_prio) {
317 txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_3);
318 txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_2);
319 txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_1);
320 txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_0);
321 } else {
322 txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_0);
323 txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_1);
324 txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_2);
325 txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_3);
326 }
327
328 wr32(IGC_TXARB, txarb);
329 }
330
331 /**
332 * igc_tsn_set_rxpbsize - Set the receive packet buffer size
333 * @adapter: Pointer to the igc_adapter structure
334 * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
335 * express buffer, BMC buffer, and Best Effort buffer
336 *
337 * The IGC_RXPBS register value may include allocations for the Express buffer,
338 * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
339 * (IGC_RXPBS_CFG_TS_EN).
340 */
igc_tsn_set_rxpbsize(struct igc_adapter * adapter,u32 rxpbs_exp_bmc_be)341 static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
342 u32 rxpbs_exp_bmc_be)
343 {
344 struct igc_hw *hw = &adapter->hw;
345 u32 rxpbs = rd32(IGC_RXPBS);
346
347 rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
348 IGC_RXPBSIZE_BE_MASK);
349 rxpbs |= rxpbs_exp_bmc_be;
350
351 wr32(IGC_RXPBS, rxpbs);
352 }
353
354 /* Returns the TSN specific registers to their default values after
355 * the adapter is reset.
356 */
igc_tsn_disable_offload(struct igc_adapter * adapter)357 static int igc_tsn_disable_offload(struct igc_adapter *adapter)
358 {
359 struct igc_hw *hw = &adapter->hw;
360 u32 tqavctrl;
361 int i;
362
363 wr32(IGC_GTXOFFSET, 0);
364 wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
365 wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
366
367 igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
368
369 if (igc_is_device_id_i226(hw))
370 igc_tsn_restore_retx_default(adapter);
371
372 tqavctrl = rd32(IGC_TQAVCTRL);
373 tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
374 IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
375 IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
376
377 wr32(IGC_TQAVCTRL, tqavctrl);
378
379 for (i = 0; i < adapter->num_tx_queues; i++) {
380 int reg_idx = adapter->tx_ring[i]->reg_idx;
381 u32 txdctl;
382
383 wr32(IGC_TXQCTL(i), 0);
384 wr32(IGC_STQT(i), 0);
385 wr32(IGC_ENDQT(i), NSEC_PER_SEC);
386
387 txdctl = rd32(IGC_TXDCTL(reg_idx));
388 txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
389 wr32(IGC_TXDCTL(reg_idx), txdctl);
390 }
391
392 wr32(IGC_QBVCYCLET_S, 0);
393 wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
394
395 /* Restore the default Tx arbitration: Priority 0 has the highest
396 * priority and is assigned to queue 0 and so on and so forth.
397 */
398 igc_tsn_tx_arb(adapter, false);
399
400 adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
401
402 return 0;
403 }
404
405 /* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
406 * to 88 Bytes by setting RETX_CTL register using the recommendation from:
407 * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
408 * Item 9: TSN: Packet Transmission Might Cross the Qbv Window
409 * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
410 */
igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter * adapter)411 static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
412 {
413 struct igc_hw *hw = &adapter->hw;
414 u32 retxctl, watermark;
415
416 retxctl = rd32(IGC_RETX_CTL);
417 watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
418 /* Set QBVFULLTH value using watermark and set QBVFULLEN */
419 retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
420 IGC_RETX_CTL_QBVFULLEN;
421 wr32(IGC_RETX_CTL, retxctl);
422 }
423
igc_fpe_get_frag_size_mult(const struct igc_fpe_t * fpe)424 static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
425 {
426 u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
427
428 return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
429 MAX_MULTPLIER_TX_MIN_FRAG);
430 }
431
igc_fpe_get_supported_frag_size(u32 frag_size)432 u32 igc_fpe_get_supported_frag_size(u32 frag_size)
433 {
434 static const u32 supported_sizes[] = { 64, 128, 192, 256 };
435
436 /* Find the smallest supported size that is >= frag_size */
437 for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
438 if (frag_size <= supported_sizes[i])
439 return supported_sizes[i];
440 }
441
442 /* Should not happen */
443 return TX_MAX_FRAG_SIZE;
444 }
445
igc_tsn_enable_offload(struct igc_adapter * adapter)446 static int igc_tsn_enable_offload(struct igc_adapter *adapter)
447 {
448 struct igc_hw *hw = &adapter->hw;
449 u32 tqavctrl, baset_l, baset_h;
450 u32 sec, nsec, cycle;
451 ktime_t base_time, systim;
452 u32 frag_size_mult;
453 int i;
454
455 wr32(IGC_TSAUXC, 0);
456 wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
457 wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
458
459 igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
460
461 if (igc_is_device_id_i226(hw))
462 igc_tsn_set_retx_qbvfullthreshold(adapter);
463
464 if (adapter->strict_priority_enable ||
465 adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
466 igc_tsn_tx_arb(adapter, true);
467
468 for (i = 0; i < adapter->num_tx_queues; i++) {
469 struct igc_ring *ring = adapter->tx_ring[i];
470 u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx));
471 u32 txqctl = 0;
472 u16 cbs_value;
473 u32 tqavcc;
474
475 wr32(IGC_STQT(i), ring->start_time);
476 wr32(IGC_ENDQT(i), ring->end_time);
477
478 if (adapter->taprio_offload_enable) {
479 /* If taprio_offload_enable is set we are in "taprio"
480 * mode and we need to be strict about the
481 * cycles: only transmit a packet if it can be
482 * completed during that cycle.
483 *
484 * If taprio_offload_enable is NOT true when
485 * enabling TSN offload, the cycle should have
486 * no external effects, but is only used internally
487 * to adapt the base time register after a second
488 * has passed.
489 *
490 * Enabling strict mode in this case would
491 * unnecessarily prevent the transmission of
492 * certain packets (i.e. at the boundary of a
493 * second) and thus interfere with the launchtime
494 * feature that promises transmission at a
495 * certain point in time.
496 */
497 txqctl |= IGC_TXQCTL_STRICT_CYCLE |
498 IGC_TXQCTL_STRICT_END;
499 }
500
501 if (ring->launchtime_enable)
502 txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
503
504 if (!adapter->fpe.tx_enabled) {
505 /* fpe inactive: clear both flags */
506 txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
507 txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
508 } else if (ring->preemptible) {
509 /* fpe active + preemptible: enable preemptible queue + set low priority */
510 txqctl |= IGC_TXQCTL_PREEMPTIBLE;
511 txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
512 } else {
513 /* fpe active + express: enable express queue + set high priority */
514 txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
515 txdctl |= IGC_TXDCTL_PRIORITY_HIGH;
516 }
517
518 wr32(IGC_TXDCTL(ring->reg_idx), txdctl);
519
520 /* Skip configuring CBS for Q2 and Q3 */
521 if (i > 1)
522 goto skip_cbs;
523
524 if (ring->cbs_enable) {
525 if (i == 0)
526 txqctl |= IGC_TXQCTL_QAV_SEL_CBS0;
527 else
528 txqctl |= IGC_TXQCTL_QAV_SEL_CBS1;
529
530 /* According to i225 datasheet section 7.5.2.7, we
531 * should set the 'idleSlope' field from TQAVCC
532 * register following the equation:
533 *
534 * value = link-speed 0x7736 * BW * 0.2
535 * ---------- * ----------------- (E1)
536 * 100Mbps 2.5
537 *
538 * Note that 'link-speed' is in Mbps.
539 *
540 * 'BW' is the percentage bandwidth out of full
541 * link speed which can be found with the
542 * following equation. Note that idleSlope here
543 * is the parameter from this function
544 * which is in kbps.
545 *
546 * BW = idleSlope
547 * ----------------- (E2)
548 * link-speed * 1000
549 *
550 * That said, we can come up with a generic
551 * equation to calculate the value we should set
552 * it TQAVCC register by replacing 'BW' in E1 by E2.
553 * The resulting equation is:
554 *
555 * value = link-speed * 0x7736 * idleSlope * 0.2
556 * ------------------------------------- (E3)
557 * 100 * 2.5 * link-speed * 1000
558 *
559 * 'link-speed' is present in both sides of the
560 * fraction so it is canceled out. The final
561 * equation is the following:
562 *
563 * value = idleSlope * 61036
564 * ----------------- (E4)
565 * 2500000
566 *
567 * NOTE: For i225, given the above, we can see
568 * that idleslope is represented in
569 * 40.959433 kbps units by the value at
570 * the TQAVCC register (2.5Gbps / 61036),
571 * which reduces the granularity for
572 * idleslope increments.
573 *
574 * In i225 controller, the sendSlope and loCredit
575 * parameters from CBS are not configurable
576 * by software so we don't do any
577 * 'controller configuration' in respect to
578 * these parameters.
579 */
580 cbs_value = DIV_ROUND_UP_ULL(ring->idleslope
581 * 61036ULL, 2500000);
582
583 tqavcc = rd32(IGC_TQAVCC(i));
584 tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK;
585 tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS;
586 wr32(IGC_TQAVCC(i), tqavcc);
587
588 wr32(IGC_TQAVHC(i),
589 0x80000000 + ring->hicredit * 0x7736);
590 } else {
591 /* Disable any CBS for the queue */
592 txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
593
594 /* Set idleSlope to zero. */
595 tqavcc = rd32(IGC_TQAVCC(i));
596 tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK |
597 IGC_TQAVCC_KEEP_CREDITS);
598 wr32(IGC_TQAVCC(i), tqavcc);
599
600 /* Set hiCredit to zero. */
601 wr32(IGC_TQAVHC(i), 0);
602 }
603 skip_cbs:
604 wr32(IGC_TXQCTL(i), txqctl);
605 }
606
607 tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
608 IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
609 tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
610
611 if (adapter->fpe.mmsv.pmac_enabled)
612 tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
613
614 frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
615 tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
616
617 adapter->qbv_count++;
618
619 cycle = adapter->cycle_time;
620 base_time = adapter->base_time;
621
622 nsec = rd32(IGC_SYSTIML);
623 sec = rd32(IGC_SYSTIMH);
624
625 systim = ktime_set(sec, nsec);
626 if (ktime_compare(systim, base_time) > 0) {
627 s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
628
629 base_time = ktime_add_ns(base_time, (n + 1) * cycle);
630 } else {
631 if (igc_is_device_id_i226(hw)) {
632 ktime_t adjust_time, expires_time;
633
634 /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
635 * has to be configured before the cycle time and base time.
636 * Tx won't hang if a GCL is already running,
637 * so in this case we don't need to set FutScdDis.
638 */
639 if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
640 tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
641
642 nsec = rd32(IGC_SYSTIML);
643 sec = rd32(IGC_SYSTIMH);
644 systim = ktime_set(sec, nsec);
645
646 adjust_time = adapter->base_time;
647 expires_time = ktime_sub_ns(adjust_time, systim);
648 hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL);
649 }
650 }
651
652 wr32(IGC_TQAVCTRL, tqavctrl);
653
654 wr32(IGC_QBVCYCLET_S, cycle);
655 wr32(IGC_QBVCYCLET, cycle);
656
657 baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
658 wr32(IGC_BASET_H, baset_h);
659
660 /* In i226, Future base time is only supported when FutScdDis bit
661 * is enabled and only active for re-configuration.
662 * In this case, initialize the base time with zero to create
663 * "re-configuration" scenario then only set the desired base time.
664 */
665 if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS)
666 wr32(IGC_BASET_L, 0);
667 wr32(IGC_BASET_L, baset_l);
668
669 return 0;
670 }
671
igc_tsn_reset(struct igc_adapter * adapter)672 int igc_tsn_reset(struct igc_adapter *adapter)
673 {
674 unsigned int new_flags;
675 int err = 0;
676
677 if (adapter->fpe.mmsv.pmac_enabled) {
678 err = igc_enable_empty_addr_recv(adapter);
679 if (err && net_ratelimit())
680 netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
681 } else {
682 igc_disable_empty_addr_recv(adapter);
683 }
684
685 new_flags = igc_tsn_new_flags(adapter);
686
687 if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
688 return igc_tsn_disable_offload(adapter);
689
690 err = igc_tsn_enable_offload(adapter);
691 if (err < 0)
692 return err;
693
694 adapter->flags = new_flags;
695
696 return err;
697 }
698
igc_tsn_will_tx_mode_change(struct igc_adapter * adapter)699 static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
700 {
701 bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
702 IGC_FLAG_TSN_ANY_ENABLED);
703
704 return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
705 (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
706 }
707
igc_tsn_offload_apply(struct igc_adapter * adapter)708 int igc_tsn_offload_apply(struct igc_adapter *adapter)
709 {
710 /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
711 * from legacy->tsn or tsn->legacy, then reset adapter is needed.
712 */
713 if (netif_running(adapter->netdev) &&
714 igc_tsn_will_tx_mode_change(adapter)) {
715 schedule_work(&adapter->reset_task);
716 return 0;
717 }
718
719 igc_tsn_reset(adapter);
720
721 return 0;
722 }
723