xref: /linux/drivers/net/ethernet/intel/igc/igc_tsn.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c)  2019 Intel Corporation */
3 
4 #include "igc.h"
5 #include "igc_base.h"
6 #include "igc_hw.h"
7 #include "igc_tsn.h"
8 
9 #define MIN_MULTPLIER_TX_MIN_FRAG	0
10 #define MAX_MULTPLIER_TX_MIN_FRAG	3
11 /* Frag size is based on the Section 8.12.2 of the SW User Manual */
12 #define TX_MIN_FRAG_SIZE		64
13 #define TX_MAX_FRAG_SIZE	(TX_MIN_FRAG_SIZE * \
14 				 (MAX_MULTPLIER_TX_MIN_FRAG + 1))
15 
16 DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
17 
18 static int igc_fpe_init_smd_frame(struct igc_ring *ring,
19 				  struct igc_tx_buffer *buffer,
20 				  struct sk_buff *skb)
21 {
22 	dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
23 					DMA_TO_DEVICE);
24 
25 	if (dma_mapping_error(ring->dev, dma)) {
26 		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
27 		return -ENOMEM;
28 	}
29 
30 	buffer->skb = skb;
31 	buffer->protocol = 0;
32 	buffer->bytecount = skb->len;
33 	buffer->gso_segs = 1;
34 	buffer->time_stamp = jiffies;
35 	dma_unmap_len_set(buffer, len, skb->len);
36 	dma_unmap_addr_set(buffer, dma, dma);
37 
38 	return 0;
39 }
40 
41 static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
42 				      struct sk_buff *skb,
43 				      enum igc_txd_popts_type type)
44 {
45 	u32 cmd_type, olinfo_status = 0;
46 	struct igc_tx_buffer *buffer;
47 	union igc_adv_tx_desc *desc;
48 	int err;
49 
50 	if (!igc_desc_unused(ring))
51 		return -EBUSY;
52 
53 	buffer = &ring->tx_buffer_info[ring->next_to_use];
54 	err = igc_fpe_init_smd_frame(ring, buffer, skb);
55 	if (err)
56 		return err;
57 
58 	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
59 		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
60 		   buffer->bytecount;
61 
62 	olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
63 
64 	switch (type) {
65 	case SMD_V:
66 	case SMD_R:
67 		olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
68 		break;
69 	}
70 
71 	desc = IGC_TX_DESC(ring, ring->next_to_use);
72 	desc->read.cmd_type_len = cpu_to_le32(cmd_type);
73 	desc->read.olinfo_status = cpu_to_le32(olinfo_status);
74 	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
75 
76 	netdev_tx_sent_queue(txring_txq(ring), skb->len);
77 
78 	buffer->next_to_watch = desc;
79 	ring->next_to_use = (ring->next_to_use + 1) % ring->count;
80 
81 	return 0;
82 }
83 
84 static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
85 				  enum igc_txd_popts_type type)
86 {
87 	int cpu = smp_processor_id();
88 	struct netdev_queue *nq;
89 	struct igc_ring *ring;
90 	struct sk_buff *skb;
91 	int err;
92 
93 	ring = igc_get_tx_ring(adapter, cpu);
94 	nq = txring_txq(ring);
95 
96 	skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
97 	if (!skb)
98 		return -ENOMEM;
99 
100 	skb_put_zero(skb, SMD_FRAME_SIZE);
101 
102 	__netif_tx_lock(nq, cpu);
103 
104 	err = igc_fpe_init_tx_descriptor(ring, skb, type);
105 	igc_flush_tx_descriptors(ring);
106 
107 	__netif_tx_unlock(nq);
108 
109 	return err;
110 }
111 
112 static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
113 				 enum ethtool_mpacket type)
114 {
115 	struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
116 	struct igc_adapter *adapter;
117 	int err;
118 
119 	adapter = container_of(fpe, struct igc_adapter, fpe);
120 
121 	if (type == ETHTOOL_MPACKET_VERIFY) {
122 		err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
123 		if (err && net_ratelimit())
124 			netdev_err(adapter->netdev, "Error sending SMD-V\n");
125 	} else if (type == ETHTOOL_MPACKET_RESPONSE) {
126 		err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
127 		if (err && net_ratelimit())
128 			netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
129 	}
130 }
131 
132 static const struct ethtool_mmsv_ops igc_mmsv_ops = {
133 	.send_mpacket = igc_fpe_send_mpacket,
134 };
135 
136 void igc_fpe_init(struct igc_adapter *adapter)
137 {
138 	adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
139 	ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
140 }
141 
142 static bool is_any_launchtime(struct igc_adapter *adapter)
143 {
144 	int i;
145 
146 	for (i = 0; i < adapter->num_tx_queues; i++) {
147 		struct igc_ring *ring = adapter->tx_ring[i];
148 
149 		if (ring->launchtime_enable)
150 			return true;
151 	}
152 
153 	return false;
154 }
155 
156 static bool is_cbs_enabled(struct igc_adapter *adapter)
157 {
158 	int i;
159 
160 	for (i = 0; i < adapter->num_tx_queues; i++) {
161 		struct igc_ring *ring = adapter->tx_ring[i];
162 
163 		if (ring->cbs_enable)
164 			return true;
165 	}
166 
167 	return false;
168 }
169 
170 static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
171 {
172 	unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
173 
174 
175 	if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
176 	    adapter->strict_priority_enable)
177 		new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
178 
179 	if (is_cbs_enabled(adapter))
180 		new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
181 
182 	if (adapter->fpe.mmsv.pmac_enabled)
183 		new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
184 
185 	return new_flags;
186 }
187 
188 static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
189 {
190 	struct igc_hw *hw = &adapter->hw;
191 
192 	return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
193 }
194 
195 void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
196 {
197 	struct igc_hw *hw = &adapter->hw;
198 	u16 txoffset;
199 
200 	if (!igc_tsn_is_tx_mode_in_tsn(adapter))
201 		return;
202 
203 	switch (adapter->link_speed) {
204 	case SPEED_10:
205 		txoffset = IGC_TXOFFSET_SPEED_10;
206 		break;
207 	case SPEED_100:
208 		txoffset = IGC_TXOFFSET_SPEED_100;
209 		break;
210 	case SPEED_1000:
211 		txoffset = IGC_TXOFFSET_SPEED_1000;
212 		break;
213 	case SPEED_2500:
214 		txoffset = IGC_TXOFFSET_SPEED_2500;
215 		break;
216 	default:
217 		txoffset = 0;
218 		break;
219 	}
220 
221 	wr32(IGC_GTXOFFSET, txoffset);
222 }
223 
224 static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
225 {
226 	struct igc_hw *hw = &adapter->hw;
227 	u32 retxctl;
228 
229 	retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
230 	wr32(IGC_RETX_CTL, retxctl);
231 }
232 
233 bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
234 {
235 	struct igc_hw *hw = &adapter->hw;
236 
237 	return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
238 		adapter->taprio_offload_enable;
239 }
240 
241 static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
242 {
243 	struct igc_hw *hw = &adapter->hw;
244 	u32 txarb;
245 
246 	txarb = rd32(IGC_TXARB);
247 
248 	txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK |
249 		   IGC_TXARB_TXQ_PRIO_1_MASK |
250 		   IGC_TXARB_TXQ_PRIO_2_MASK |
251 		   IGC_TXARB_TXQ_PRIO_3_MASK);
252 
253 	txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
254 	txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
255 	txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
256 	txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
257 
258 	wr32(IGC_TXARB, txarb);
259 }
260 
261 /**
262  * igc_tsn_set_rxpbsize - Set the receive packet buffer size
263  * @adapter: Pointer to the igc_adapter structure
264  * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
265  *                    express buffer, BMC buffer, and Best Effort buffer
266  *
267  * The IGC_RXPBS register value may include allocations for the Express buffer,
268  * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
269  * (IGC_RXPBS_CFG_TS_EN).
270  */
271 static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
272 				 u32 rxpbs_exp_bmc_be)
273 {
274 	struct igc_hw *hw = &adapter->hw;
275 	u32 rxpbs = rd32(IGC_RXPBS);
276 
277 	rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
278 		   IGC_RXPBSIZE_BE_MASK);
279 	rxpbs |= rxpbs_exp_bmc_be;
280 
281 	wr32(IGC_RXPBS, rxpbs);
282 }
283 
284 /* Returns the TSN specific registers to their default values after
285  * the adapter is reset.
286  */
287 static int igc_tsn_disable_offload(struct igc_adapter *adapter)
288 {
289 	u16 queue_per_tc[4] = { 3, 2, 1, 0 };
290 	struct igc_hw *hw = &adapter->hw;
291 	u32 tqavctrl;
292 	int i;
293 
294 	wr32(IGC_GTXOFFSET, 0);
295 	wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
296 	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
297 
298 	igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
299 
300 	if (igc_is_device_id_i226(hw))
301 		igc_tsn_restore_retx_default(adapter);
302 
303 	tqavctrl = rd32(IGC_TQAVCTRL);
304 	tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
305 		      IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
306 		      IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
307 
308 	wr32(IGC_TQAVCTRL, tqavctrl);
309 
310 	for (i = 0; i < adapter->num_tx_queues; i++) {
311 		wr32(IGC_TXQCTL(i), 0);
312 		wr32(IGC_STQT(i), 0);
313 		wr32(IGC_ENDQT(i), NSEC_PER_SEC);
314 	}
315 
316 	wr32(IGC_QBVCYCLET_S, 0);
317 	wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
318 
319 	/* Restore the default Tx arbitration: Priority 0 has the highest
320 	 * priority and is assigned to queue 0 and so on and so forth.
321 	 */
322 	igc_tsn_tx_arb(adapter, queue_per_tc);
323 
324 	adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
325 
326 	return 0;
327 }
328 
329 /* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
330  * to 88 Bytes by setting RETX_CTL register using the recommendation from:
331  * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
332  *    Item 9: TSN: Packet Transmission Might Cross the Qbv Window
333  * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
334  */
335 static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
336 {
337 	struct igc_hw *hw = &adapter->hw;
338 	u32 retxctl, watermark;
339 
340 	retxctl = rd32(IGC_RETX_CTL);
341 	watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
342 	/* Set QBVFULLTH value using watermark and set QBVFULLEN */
343 	retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
344 		   IGC_RETX_CTL_QBVFULLEN;
345 	wr32(IGC_RETX_CTL, retxctl);
346 }
347 
348 static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
349 {
350 	u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
351 
352 	return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
353 		       MAX_MULTPLIER_TX_MIN_FRAG);
354 }
355 
356 u32 igc_fpe_get_supported_frag_size(u32 frag_size)
357 {
358 	const u32 supported_sizes[] = {64, 128, 192, 256};
359 
360 	/* Find the smallest supported size that is >= frag_size */
361 	for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
362 		if (frag_size <= supported_sizes[i])
363 			return supported_sizes[i];
364 	}
365 
366 	/* Should not happen */
367 	return TX_MAX_FRAG_SIZE;
368 }
369 
370 static int igc_tsn_enable_offload(struct igc_adapter *adapter)
371 {
372 	struct igc_hw *hw = &adapter->hw;
373 	u32 tqavctrl, baset_l, baset_h;
374 	u32 sec, nsec, cycle;
375 	ktime_t base_time, systim;
376 	u32 frag_size_mult;
377 	int i;
378 
379 	wr32(IGC_TSAUXC, 0);
380 	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
381 	wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
382 
383 	igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
384 
385 	if (igc_is_device_id_i226(hw))
386 		igc_tsn_set_retx_qbvfullthreshold(adapter);
387 
388 	if (adapter->strict_priority_enable) {
389 		/* Configure queue priorities according to the user provided
390 		 * mapping.
391 		 */
392 		igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
393 	}
394 
395 	for (i = 0; i < adapter->num_tx_queues; i++) {
396 		struct igc_ring *ring = adapter->tx_ring[i];
397 		u32 txqctl = 0;
398 		u16 cbs_value;
399 		u32 tqavcc;
400 
401 		wr32(IGC_STQT(i), ring->start_time);
402 		wr32(IGC_ENDQT(i), ring->end_time);
403 
404 		if (adapter->taprio_offload_enable) {
405 			/* If taprio_offload_enable is set we are in "taprio"
406 			 * mode and we need to be strict about the
407 			 * cycles: only transmit a packet if it can be
408 			 * completed during that cycle.
409 			 *
410 			 * If taprio_offload_enable is NOT true when
411 			 * enabling TSN offload, the cycle should have
412 			 * no external effects, but is only used internally
413 			 * to adapt the base time register after a second
414 			 * has passed.
415 			 *
416 			 * Enabling strict mode in this case would
417 			 * unnecessarily prevent the transmission of
418 			 * certain packets (i.e. at the boundary of a
419 			 * second) and thus interfere with the launchtime
420 			 * feature that promises transmission at a
421 			 * certain point in time.
422 			 */
423 			txqctl |= IGC_TXQCTL_STRICT_CYCLE |
424 				IGC_TXQCTL_STRICT_END;
425 		}
426 
427 		if (ring->launchtime_enable)
428 			txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
429 
430 		/* Skip configuring CBS for Q2 and Q3 */
431 		if (i > 1)
432 			goto skip_cbs;
433 
434 		if (ring->cbs_enable) {
435 			if (i == 0)
436 				txqctl |= IGC_TXQCTL_QAV_SEL_CBS0;
437 			else
438 				txqctl |= IGC_TXQCTL_QAV_SEL_CBS1;
439 
440 			/* According to i225 datasheet section 7.5.2.7, we
441 			 * should set the 'idleSlope' field from TQAVCC
442 			 * register following the equation:
443 			 *
444 			 * value = link-speed   0x7736 * BW * 0.2
445 			 *         ---------- *  -----------------         (E1)
446 			 *          100Mbps            2.5
447 			 *
448 			 * Note that 'link-speed' is in Mbps.
449 			 *
450 			 * 'BW' is the percentage bandwidth out of full
451 			 * link speed which can be found with the
452 			 * following equation. Note that idleSlope here
453 			 * is the parameter from this function
454 			 * which is in kbps.
455 			 *
456 			 *     BW =     idleSlope
457 			 *          -----------------                      (E2)
458 			 *          link-speed * 1000
459 			 *
460 			 * That said, we can come up with a generic
461 			 * equation to calculate the value we should set
462 			 * it TQAVCC register by replacing 'BW' in E1 by E2.
463 			 * The resulting equation is:
464 			 *
465 			 * value = link-speed * 0x7736 * idleSlope * 0.2
466 			 *         -------------------------------------   (E3)
467 			 *             100 * 2.5 * link-speed * 1000
468 			 *
469 			 * 'link-speed' is present in both sides of the
470 			 * fraction so it is canceled out. The final
471 			 * equation is the following:
472 			 *
473 			 *     value = idleSlope * 61036
474 			 *             -----------------                   (E4)
475 			 *                  2500000
476 			 *
477 			 * NOTE: For i225, given the above, we can see
478 			 *       that idleslope is represented in
479 			 *       40.959433 kbps units by the value at
480 			 *       the TQAVCC register (2.5Gbps / 61036),
481 			 *       which reduces the granularity for
482 			 *       idleslope increments.
483 			 *
484 			 * In i225 controller, the sendSlope and loCredit
485 			 * parameters from CBS are not configurable
486 			 * by software so we don't do any
487 			 * 'controller configuration' in respect to
488 			 * these parameters.
489 			 */
490 			cbs_value = DIV_ROUND_UP_ULL(ring->idleslope
491 						     * 61036ULL, 2500000);
492 
493 			tqavcc = rd32(IGC_TQAVCC(i));
494 			tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK;
495 			tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS;
496 			wr32(IGC_TQAVCC(i), tqavcc);
497 
498 			wr32(IGC_TQAVHC(i),
499 			     0x80000000 + ring->hicredit * 0x7736);
500 		} else {
501 			/* Disable any CBS for the queue */
502 			txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);
503 
504 			/* Set idleSlope to zero. */
505 			tqavcc = rd32(IGC_TQAVCC(i));
506 			tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK |
507 				    IGC_TQAVCC_KEEP_CREDITS);
508 			wr32(IGC_TQAVCC(i), tqavcc);
509 
510 			/* Set hiCredit to zero. */
511 			wr32(IGC_TQAVHC(i), 0);
512 		}
513 skip_cbs:
514 		wr32(IGC_TXQCTL(i), txqctl);
515 	}
516 
517 	tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
518 		   IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
519 	tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
520 
521 	if (adapter->fpe.mmsv.pmac_enabled)
522 		tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
523 
524 	frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
525 	tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
526 
527 	adapter->qbv_count++;
528 
529 	cycle = adapter->cycle_time;
530 	base_time = adapter->base_time;
531 
532 	nsec = rd32(IGC_SYSTIML);
533 	sec = rd32(IGC_SYSTIMH);
534 
535 	systim = ktime_set(sec, nsec);
536 	if (ktime_compare(systim, base_time) > 0) {
537 		s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
538 
539 		base_time = ktime_add_ns(base_time, (n + 1) * cycle);
540 	} else {
541 		if (igc_is_device_id_i226(hw)) {
542 			ktime_t adjust_time, expires_time;
543 
544 		       /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
545 			* has to be configured before the cycle time and base time.
546 			* Tx won't hang if a GCL is already running,
547 			* so in this case we don't need to set FutScdDis.
548 			*/
549 			if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
550 				tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
551 
552 			nsec = rd32(IGC_SYSTIML);
553 			sec = rd32(IGC_SYSTIMH);
554 			systim = ktime_set(sec, nsec);
555 
556 			adjust_time = adapter->base_time;
557 			expires_time = ktime_sub_ns(adjust_time, systim);
558 			hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL);
559 		}
560 	}
561 
562 	wr32(IGC_TQAVCTRL, tqavctrl);
563 
564 	wr32(IGC_QBVCYCLET_S, cycle);
565 	wr32(IGC_QBVCYCLET, cycle);
566 
567 	baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
568 	wr32(IGC_BASET_H, baset_h);
569 
570 	/* In i226, Future base time is only supported when FutScdDis bit
571 	 * is enabled and only active for re-configuration.
572 	 * In this case, initialize the base time with zero to create
573 	 * "re-configuration" scenario then only set the desired base time.
574 	 */
575 	if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS)
576 		wr32(IGC_BASET_L, 0);
577 	wr32(IGC_BASET_L, baset_l);
578 
579 	return 0;
580 }
581 
582 int igc_tsn_reset(struct igc_adapter *adapter)
583 {
584 	unsigned int new_flags;
585 	int err = 0;
586 
587 	if (adapter->fpe.mmsv.pmac_enabled) {
588 		err = igc_enable_empty_addr_recv(adapter);
589 		if (err && net_ratelimit())
590 			netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
591 	} else {
592 		igc_disable_empty_addr_recv(adapter);
593 	}
594 
595 	new_flags = igc_tsn_new_flags(adapter);
596 
597 	if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
598 		return igc_tsn_disable_offload(adapter);
599 
600 	err = igc_tsn_enable_offload(adapter);
601 	if (err < 0)
602 		return err;
603 
604 	adapter->flags = new_flags;
605 
606 	return err;
607 }
608 
609 static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
610 {
611 	bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
612 				  IGC_FLAG_TSN_ANY_ENABLED);
613 
614 	return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
615 	       (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
616 }
617 
618 int igc_tsn_offload_apply(struct igc_adapter *adapter)
619 {
620 	/* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
621 	 * from legacy->tsn or tsn->legacy, then reset adapter is needed.
622 	 */
623 	if (netif_running(adapter->netdev) &&
624 	    igc_tsn_will_tx_mode_change(adapter)) {
625 		schedule_work(&adapter->reset_task);
626 		return 0;
627 	}
628 
629 	igc_tsn_reset(adapter);
630 
631 	return 0;
632 }
633