xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c (revision ea518afc992032f7570c0a89ac9240b387dc0faf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This contains the functions to handle the descriptors for DesignWare databook
4  * 4.xx.
5  *
6  * Copyright (C) 2015  STMicroelectronics Ltd
7  *
8  * Author: Alexandre Torgue <alexandre.torgue@st.com>
9  */
10 
11 #include <linux/stmmac.h>
12 #include "common.h"
13 #include "dwmac4.h"
14 #include "dwmac4_descs.h"
15 
16 static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
17 				       struct dma_desc *p,
18 				       void __iomem *ioaddr)
19 {
20 	unsigned int tdes3;
21 	int ret = tx_done;
22 
23 	tdes3 = le32_to_cpu(p->des3);
24 
25 	/* Get tx owner first */
26 	if (unlikely(tdes3 & TDES3_OWN))
27 		return tx_dma_own;
28 
29 	/* Verify tx error by looking at the last segment. */
30 	if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
31 		return tx_not_ls;
32 
33 	if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
34 		ret = tx_err;
35 
36 		if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
37 			x->tx_jabber++;
38 		if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
39 			x->tx_frame_flushed++;
40 		if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
41 			x->tx_losscarrier++;
42 		}
43 		if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
44 			x->tx_carrier++;
45 		}
46 		if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
47 			     (tdes3 & TDES3_EXCESSIVE_COLLISION)))
48 			x->tx_collision +=
49 			    (tdes3 & TDES3_COLLISION_COUNT_MASK)
50 			    >> TDES3_COLLISION_COUNT_SHIFT;
51 
52 		if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
53 			x->tx_deferred++;
54 
55 		if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
56 			x->tx_underflow++;
57 			ret |= tx_err_bump_tc;
58 		}
59 
60 		if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
61 			x->tx_ip_header_error++;
62 
63 		if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
64 			x->tx_payload_error++;
65 	}
66 
67 	if (unlikely(tdes3 & TDES3_DEFERRED))
68 		x->tx_deferred++;
69 
70 	return ret;
71 }
72 
73 static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
74 				       struct dma_desc *p)
75 {
76 	unsigned int rdes1 = le32_to_cpu(p->des1);
77 	unsigned int rdes2 = le32_to_cpu(p->des2);
78 	unsigned int rdes3 = le32_to_cpu(p->des3);
79 	int message_type;
80 	int ret = good_frame;
81 
82 	if (unlikely(rdes3 & RDES3_OWN))
83 		return dma_own;
84 
85 	if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
86 		return discard_frame;
87 	if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
88 		return rx_not_ls;
89 
90 	if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
91 		if (unlikely(rdes3 & RDES3_GIANT_PACKET))
92 			x->rx_length++;
93 		if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
94 			x->rx_gmac_overflow++;
95 
96 		if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
97 			x->rx_watchdog++;
98 
99 		if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
100 			x->rx_mii++;
101 
102 		if (unlikely(rdes3 & RDES3_CRC_ERROR))
103 			x->rx_crc_errors++;
104 
105 		if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
106 			x->dribbling_bit++;
107 
108 		ret = discard_frame;
109 	}
110 
111 	message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
112 
113 	if (rdes1 & RDES1_IP_HDR_ERROR)
114 		x->ip_hdr_err++;
115 	if (rdes1 & RDES1_IP_CSUM_BYPASSED)
116 		x->ip_csum_bypassed++;
117 	if (rdes1 & RDES1_IPV4_HEADER)
118 		x->ipv4_pkt_rcvd++;
119 	if (rdes1 & RDES1_IPV6_HEADER)
120 		x->ipv6_pkt_rcvd++;
121 
122 	if (message_type == RDES_EXT_NO_PTP)
123 		x->no_ptp_rx_msg_type_ext++;
124 	else if (message_type == RDES_EXT_SYNC)
125 		x->ptp_rx_msg_type_sync++;
126 	else if (message_type == RDES_EXT_FOLLOW_UP)
127 		x->ptp_rx_msg_type_follow_up++;
128 	else if (message_type == RDES_EXT_DELAY_REQ)
129 		x->ptp_rx_msg_type_delay_req++;
130 	else if (message_type == RDES_EXT_DELAY_RESP)
131 		x->ptp_rx_msg_type_delay_resp++;
132 	else if (message_type == RDES_EXT_PDELAY_REQ)
133 		x->ptp_rx_msg_type_pdelay_req++;
134 	else if (message_type == RDES_EXT_PDELAY_RESP)
135 		x->ptp_rx_msg_type_pdelay_resp++;
136 	else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
137 		x->ptp_rx_msg_type_pdelay_follow_up++;
138 	else if (message_type == RDES_PTP_ANNOUNCE)
139 		x->ptp_rx_msg_type_announce++;
140 	else if (message_type == RDES_PTP_MANAGEMENT)
141 		x->ptp_rx_msg_type_management++;
142 	else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
143 		x->ptp_rx_msg_pkt_reserved_type++;
144 
145 	if (rdes1 & RDES1_PTP_PACKET_TYPE)
146 		x->ptp_frame_type++;
147 	if (rdes1 & RDES1_PTP_VER)
148 		x->ptp_ver++;
149 	if (rdes1 & RDES1_TIMESTAMP_DROPPED)
150 		x->timestamp_dropped++;
151 
152 	if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
153 		x->sa_rx_filter_fail++;
154 		ret = discard_frame;
155 	}
156 	if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
157 		x->da_rx_filter_fail++;
158 		ret = discard_frame;
159 	}
160 
161 	if (rdes2 & RDES2_L3_FILTER_MATCH)
162 		x->l3_filter_match++;
163 	if (rdes2 & RDES2_L4_FILTER_MATCH)
164 		x->l4_filter_match++;
165 	if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
166 	    >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
167 		x->l3_l4_filter_no_match++;
168 
169 	return ret;
170 }
171 
172 static int dwmac4_rd_get_tx_len(struct dma_desc *p)
173 {
174 	return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
175 }
176 
177 static int dwmac4_get_tx_owner(struct dma_desc *p)
178 {
179 	return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
180 }
181 
182 static void dwmac4_set_tx_owner(struct dma_desc *p)
183 {
184 	p->des3 |= cpu_to_le32(TDES3_OWN);
185 }
186 
187 static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
188 {
189 	p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
190 
191 	if (!disable_rx_ic)
192 		p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
193 }
194 
195 static int dwmac4_get_tx_ls(struct dma_desc *p)
196 {
197 	return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
198 		>> TDES3_LAST_DESCRIPTOR_SHIFT;
199 }
200 
201 static u16 dwmac4_wrback_get_rx_vlan_tci(struct dma_desc *p)
202 {
203 	return (le32_to_cpu(p->des0) & RDES0_VLAN_TAG_MASK);
204 }
205 
206 static bool dwmac4_wrback_get_rx_vlan_valid(struct dma_desc *p)
207 {
208 	return ((le32_to_cpu(p->des3) & RDES3_LAST_DESCRIPTOR) &&
209 		(le32_to_cpu(p->des3) & RDES3_RDES0_VALID));
210 }
211 
212 static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
213 {
214 	return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
215 }
216 
217 static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
218 {
219 	p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
220 }
221 
222 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
223 {
224 	/* Context type from W/B descriptor must be zero */
225 	if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
226 		return 0;
227 
228 	/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
229 	if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
230 		return 1;
231 
232 	return 0;
233 }
234 
235 static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
236 {
237 	struct dma_desc *p = (struct dma_desc *)desc;
238 	u64 ns;
239 
240 	ns = le32_to_cpu(p->des0);
241 	/* convert high/sec time stamp value to nanosecond */
242 	ns += le32_to_cpu(p->des1) * 1000000000ULL;
243 
244 	*ts = ns;
245 }
246 
247 static int dwmac4_rx_check_timestamp(void *desc)
248 {
249 	struct dma_desc *p = (struct dma_desc *)desc;
250 	unsigned int rdes0 = le32_to_cpu(p->des0);
251 	unsigned int rdes1 = le32_to_cpu(p->des1);
252 	unsigned int rdes3 = le32_to_cpu(p->des3);
253 	u32 own, ctxt;
254 	int ret = 1;
255 
256 	own = rdes3 & RDES3_OWN;
257 	ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
258 		>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
259 
260 	if (likely(!own && ctxt)) {
261 		if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
262 			/* Corrupted value */
263 			ret = -EINVAL;
264 		else
265 			/* A valid Timestamp is ready to be read */
266 			ret = 0;
267 	}
268 
269 	/* Timestamp not ready */
270 	return ret;
271 }
272 
273 static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
274 						 u32 ats)
275 {
276 	struct dma_desc *p = (struct dma_desc *)desc;
277 	int ret = -EINVAL;
278 
279 	/* Get the status from normal w/b descriptor */
280 	if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) {
281 		if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
282 			int i = 0;
283 
284 			/* Check if timestamp is OK from context descriptor */
285 			do {
286 				ret = dwmac4_rx_check_timestamp(next_desc);
287 				if (ret < 0)
288 					goto exit;
289 				i++;
290 
291 			} while ((ret == 1) && (i < 10));
292 
293 			if (i == 10)
294 				ret = -EBUSY;
295 		}
296 	}
297 exit:
298 	if (likely(ret == 0))
299 		return 1;
300 
301 	return 0;
302 }
303 
304 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
305 				   int mode, int end, int bfsize)
306 {
307 	dwmac4_set_rx_owner(p, disable_rx_ic);
308 }
309 
310 static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
311 {
312 	p->des0 = 0;
313 	p->des1 = 0;
314 	p->des2 = 0;
315 	p->des3 = 0;
316 }
317 
318 static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
319 				      bool csum_flag, int mode, bool tx_own,
320 				      bool ls, unsigned int tot_pkt_len)
321 {
322 	unsigned int tdes3 = le32_to_cpu(p->des3);
323 
324 	p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
325 
326 	tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
327 	if (is_fs)
328 		tdes3 |= TDES3_FIRST_DESCRIPTOR;
329 	else
330 		tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
331 
332 	if (likely(csum_flag))
333 		tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
334 	else
335 		tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
336 
337 	if (ls)
338 		tdes3 |= TDES3_LAST_DESCRIPTOR;
339 	else
340 		tdes3 &= ~TDES3_LAST_DESCRIPTOR;
341 
342 	/* Finally set the OWN bit. Later the DMA will start! */
343 	if (tx_own)
344 		tdes3 |= TDES3_OWN;
345 
346 	if (is_fs && tx_own)
347 		/* When the own bit, for the first frame, has to be set, all
348 		 * descriptors for the same frame has to be set before, to
349 		 * avoid race condition.
350 		 */
351 		dma_wmb();
352 
353 	p->des3 = cpu_to_le32(tdes3);
354 }
355 
356 static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
357 					  int len1, int len2, bool tx_own,
358 					  bool ls, unsigned int tcphdrlen,
359 					  unsigned int tcppayloadlen)
360 {
361 	unsigned int tdes3 = le32_to_cpu(p->des3);
362 
363 	if (len1)
364 		p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
365 
366 	if (len2)
367 		p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
368 			    & TDES2_BUFFER2_SIZE_MASK);
369 
370 	if (is_fs) {
371 		tdes3 |= TDES3_FIRST_DESCRIPTOR |
372 			 TDES3_TCP_SEGMENTATION_ENABLE |
373 			 ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
374 			  TDES3_SLOT_NUMBER_MASK) |
375 			 ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
376 	} else {
377 		tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
378 	}
379 
380 	if (ls)
381 		tdes3 |= TDES3_LAST_DESCRIPTOR;
382 	else
383 		tdes3 &= ~TDES3_LAST_DESCRIPTOR;
384 
385 	/* Finally set the OWN bit. Later the DMA will start! */
386 	if (tx_own)
387 		tdes3 |= TDES3_OWN;
388 
389 	if (is_fs && tx_own)
390 		/* When the own bit, for the first frame, has to be set, all
391 		 * descriptors for the same frame has to be set before, to
392 		 * avoid race condition.
393 		 */
394 		dma_wmb();
395 
396 	p->des3 = cpu_to_le32(tdes3);
397 }
398 
399 static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
400 {
401 	p->des0 = 0;
402 	p->des1 = 0;
403 	p->des2 = 0;
404 	p->des3 = 0;
405 }
406 
407 static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
408 {
409 	p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
410 }
411 
412 static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
413 				dma_addr_t dma_rx_phy, unsigned int desc_size)
414 {
415 	dma_addr_t dma_addr;
416 	int i;
417 
418 	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
419 
420 	if (desc_size == sizeof(struct dma_desc)) {
421 		struct dma_desc *p = (struct dma_desc *)head;
422 
423 		for (i = 0; i < size; i++) {
424 			dma_addr = dma_rx_phy + i * sizeof(*p);
425 			pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
426 				i, &dma_addr,
427 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
428 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
429 			p++;
430 		}
431 	} else if (desc_size == sizeof(struct dma_extended_desc)) {
432 		struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
433 
434 		for (i = 0; i < size; i++) {
435 			dma_addr = dma_rx_phy + i * sizeof(*extp);
436 			pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
437 				i, &dma_addr,
438 				le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
439 				le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
440 				le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
441 				le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
442 			extp++;
443 		}
444 	} else if (desc_size == sizeof(struct dma_edesc)) {
445 		struct dma_edesc *ep = (struct dma_edesc *)head;
446 
447 		for (i = 0; i < size; i++) {
448 			dma_addr = dma_rx_phy + i * sizeof(*ep);
449 			pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
450 				i, &dma_addr,
451 				le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
452 				le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
453 				le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
454 				le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
455 			ep++;
456 		}
457 	} else {
458 		pr_err("unsupported descriptor!");
459 	}
460 }
461 
462 static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
463 {
464 	p->des0 = 0;
465 	p->des1 = 0;
466 	p->des2 = cpu_to_le32(mss);
467 	p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
468 }
469 
470 static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
471 {
472 	p->des0 = cpu_to_le32(lower_32_bits(addr));
473 	p->des1 = cpu_to_le32(upper_32_bits(addr));
474 }
475 
476 static void dwmac4_clear(struct dma_desc *p)
477 {
478 	p->des0 = 0;
479 	p->des1 = 0;
480 	p->des2 = 0;
481 	p->des3 = 0;
482 }
483 
484 static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
485 {
486 	sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT;
487 
488 	p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
489 }
490 
491 static int set_16kib_bfsize(int mtu)
492 {
493 	int ret = 0;
494 
495 	if (unlikely(mtu >= BUF_SIZE_8KiB))
496 		ret = BUF_SIZE_16KiB;
497 	return ret;
498 }
499 
500 static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
501 				u32 inner_type)
502 {
503 	p->des0 = 0;
504 	p->des1 = 0;
505 	p->des2 = 0;
506 	p->des3 = 0;
507 
508 	/* Inner VLAN */
509 	if (inner_type) {
510 		u32 des = inner_tag << TDES2_IVT_SHIFT;
511 
512 		des &= TDES2_IVT_MASK;
513 		p->des2 = cpu_to_le32(des);
514 
515 		des = inner_type << TDES3_IVTIR_SHIFT;
516 		des &= TDES3_IVTIR_MASK;
517 		p->des3 = cpu_to_le32(des | TDES3_IVLTV);
518 	}
519 
520 	/* Outer VLAN */
521 	p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG);
522 	p->des3 |= cpu_to_le32(TDES3_VLTV);
523 
524 	p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE);
525 }
526 
527 static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
528 {
529 	type <<= TDES2_VLAN_TAG_SHIFT;
530 	p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
531 }
532 
533 static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
534 {
535 	*len = le32_to_cpu(p->des2) & RDES2_HL;
536 }
537 
538 static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid)
539 {
540 	p->des2 = cpu_to_le32(lower_32_bits(addr));
541 	p->des3 = cpu_to_le32(upper_32_bits(addr));
542 
543 	if (buf2_valid)
544 		p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR);
545 	else
546 		p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR);
547 }
548 
549 static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
550 {
551 	p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
552 	p->des5 = cpu_to_le32(nsec & TDES5_LT);
553 	p->des6 = 0;
554 	p->des7 = 0;
555 }
556 
557 const struct stmmac_desc_ops dwmac4_desc_ops = {
558 	.tx_status = dwmac4_wrback_get_tx_status,
559 	.rx_status = dwmac4_wrback_get_rx_status,
560 	.get_tx_len = dwmac4_rd_get_tx_len,
561 	.get_tx_owner = dwmac4_get_tx_owner,
562 	.set_tx_owner = dwmac4_set_tx_owner,
563 	.set_rx_owner = dwmac4_set_rx_owner,
564 	.get_tx_ls = dwmac4_get_tx_ls,
565 	.get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci,
566 	.get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid,
567 	.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
568 	.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
569 	.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
570 	.get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
571 	.get_timestamp = dwmac4_get_timestamp,
572 	.set_tx_ic = dwmac4_rd_set_tx_ic,
573 	.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
574 	.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
575 	.release_tx_desc = dwmac4_release_tx_desc,
576 	.init_rx_desc = dwmac4_rd_init_rx_desc,
577 	.init_tx_desc = dwmac4_rd_init_tx_desc,
578 	.display_ring = dwmac4_display_ring,
579 	.set_mss = dwmac4_set_mss_ctxt,
580 	.set_addr = dwmac4_set_addr,
581 	.clear = dwmac4_clear,
582 	.set_sarc = dwmac4_set_sarc,
583 	.set_vlan_tag = dwmac4_set_vlan_tag,
584 	.set_vlan = dwmac4_set_vlan,
585 	.get_rx_header_len = dwmac4_get_rx_header_len,
586 	.set_sec_addr = dwmac4_set_sec_addr,
587 	.set_tbs = dwmac4_set_tbs,
588 };
589 
590 const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
591 	.set_16kib_bfsize = set_16kib_bfsize,
592 };
593