xref: /linux/drivers/net/ethernet/stmicro/stmmac/enh_desc.c (revision 55223394d56bab42ebac71ba52e0fd8bfdc6fc07)
1 /*******************************************************************************
2   This contains the functions to handle the enhanced descriptors.
3 
4   Copyright (C) 2007-2014  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17 
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20 
21 #include <linux/stmmac.h>
22 #include "common.h"
23 #include "descs_com.h"
24 
25 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
26 				  struct dma_desc *p, void __iomem *ioaddr)
27 {
28 	struct net_device_stats *stats = (struct net_device_stats *)data;
29 	unsigned int tdes0 = le32_to_cpu(p->des0);
30 	int ret = tx_done;
31 
32 	/* Get tx owner first */
33 	if (unlikely(tdes0 & ETDES0_OWN))
34 		return tx_dma_own;
35 
36 	/* Verify tx error by looking at the last segment. */
37 	if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
38 		return tx_not_ls;
39 
40 	if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
41 		if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
42 			x->tx_jabber++;
43 
44 		if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
45 			x->tx_frame_flushed++;
46 			dwmac_dma_flush_tx_fifo(ioaddr);
47 		}
48 
49 		if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
50 			x->tx_losscarrier++;
51 			stats->tx_carrier_errors++;
52 		}
53 		if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
54 			x->tx_carrier++;
55 			stats->tx_carrier_errors++;
56 		}
57 		if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
58 			     (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
59 			stats->collisions +=
60 				(tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
61 
62 		if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
63 			x->tx_deferred++;
64 
65 		if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
66 			dwmac_dma_flush_tx_fifo(ioaddr);
67 			x->tx_underflow++;
68 		}
69 
70 		if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
71 			x->tx_ip_header_error++;
72 
73 		if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
74 			x->tx_payload_error++;
75 			dwmac_dma_flush_tx_fifo(ioaddr);
76 		}
77 
78 		ret = tx_err;
79 	}
80 
81 	if (unlikely(tdes0 & ETDES0_DEFERRED))
82 		x->tx_deferred++;
83 
84 #ifdef STMMAC_VLAN_TAG_USED
85 	if (tdes0 & ETDES0_VLAN_FRAME)
86 		x->tx_vlan++;
87 #endif
88 
89 	return ret;
90 }
91 
92 static int enh_desc_get_tx_len(struct dma_desc *p)
93 {
94 	return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
95 }
96 
97 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
98 {
99 	int ret = good_frame;
100 	u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
101 
102 	/* bits 5 7 0 | Frame status
103 	 * ----------------------------------------------------------
104 	 *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
105 	 *      1 0 0 | IPv4/6 No CSUM errorS.
106 	 *      1 0 1 | IPv4/6 CSUM PAYLOAD error
107 	 *      1 1 0 | IPv4/6 CSUM IP HR error
108 	 *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
109 	 *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
110 	 *      0 1 1 | COE bypassed.. no IPv4/6 frame
111 	 *      0 1 0 | Reserved.
112 	 */
113 	if (status == 0x0)
114 		ret = llc_snap;
115 	else if (status == 0x4)
116 		ret = good_frame;
117 	else if (status == 0x5)
118 		ret = csum_none;
119 	else if (status == 0x6)
120 		ret = csum_none;
121 	else if (status == 0x7)
122 		ret = csum_none;
123 	else if (status == 0x1)
124 		ret = discard_frame;
125 	else if (status == 0x3)
126 		ret = discard_frame;
127 	return ret;
128 }
129 
130 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
131 				    struct dma_extended_desc *p)
132 {
133 	unsigned int rdes0 = le32_to_cpu(p->basic.des0);
134 	unsigned int rdes4 = le32_to_cpu(p->des4);
135 
136 	if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
137 		int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
138 
139 		if (rdes4 & ERDES4_IP_HDR_ERR)
140 			x->ip_hdr_err++;
141 		if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
142 			x->ip_payload_err++;
143 		if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
144 			x->ip_csum_bypassed++;
145 		if (rdes4 & ERDES4_IPV4_PKT_RCVD)
146 			x->ipv4_pkt_rcvd++;
147 		if (rdes4 & ERDES4_IPV6_PKT_RCVD)
148 			x->ipv6_pkt_rcvd++;
149 
150 		if (message_type == RDES_EXT_NO_PTP)
151 			x->no_ptp_rx_msg_type_ext++;
152 		else if (message_type == RDES_EXT_SYNC)
153 			x->ptp_rx_msg_type_sync++;
154 		else if (message_type == RDES_EXT_FOLLOW_UP)
155 			x->ptp_rx_msg_type_follow_up++;
156 		else if (message_type == RDES_EXT_DELAY_REQ)
157 			x->ptp_rx_msg_type_delay_req++;
158 		else if (message_type == RDES_EXT_DELAY_RESP)
159 			x->ptp_rx_msg_type_delay_resp++;
160 		else if (message_type == RDES_EXT_PDELAY_REQ)
161 			x->ptp_rx_msg_type_pdelay_req++;
162 		else if (message_type == RDES_EXT_PDELAY_RESP)
163 			x->ptp_rx_msg_type_pdelay_resp++;
164 		else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
165 			x->ptp_rx_msg_type_pdelay_follow_up++;
166 		else if (message_type == RDES_PTP_ANNOUNCE)
167 			x->ptp_rx_msg_type_announce++;
168 		else if (message_type == RDES_PTP_MANAGEMENT)
169 			x->ptp_rx_msg_type_management++;
170 		else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
171 			x->ptp_rx_msg_pkt_reserved_type++;
172 
173 		if (rdes4 & ERDES4_PTP_FRAME_TYPE)
174 			x->ptp_frame_type++;
175 		if (rdes4 & ERDES4_PTP_VER)
176 			x->ptp_ver++;
177 		if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
178 			x->timestamp_dropped++;
179 		if (rdes4 & ERDES4_AV_PKT_RCVD)
180 			x->av_pkt_rcvd++;
181 		if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
182 			x->av_tagged_pkt_rcvd++;
183 		if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
184 			x->vlan_tag_priority_val++;
185 		if (rdes4 & ERDES4_L3_FILTER_MATCH)
186 			x->l3_filter_match++;
187 		if (rdes4 & ERDES4_L4_FILTER_MATCH)
188 			x->l4_filter_match++;
189 		if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
190 			x->l3_l4_filter_no_match++;
191 	}
192 }
193 
194 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
195 				  struct dma_desc *p)
196 {
197 	struct net_device_stats *stats = (struct net_device_stats *)data;
198 	unsigned int rdes0 = le32_to_cpu(p->des0);
199 	int ret = good_frame;
200 
201 	if (unlikely(rdes0 & RDES0_OWN))
202 		return dma_own;
203 
204 	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
205 		stats->rx_length_errors++;
206 		return discard_frame;
207 	}
208 
209 	if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
210 		if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
211 			x->rx_desc++;
212 			stats->rx_length_errors++;
213 		}
214 		if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
215 			x->rx_gmac_overflow++;
216 
217 		if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
218 			pr_err("\tIPC Csum Error/Giant frame\n");
219 
220 		if (unlikely(rdes0 & RDES0_COLLISION))
221 			stats->collisions++;
222 		if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
223 			x->rx_watchdog++;
224 
225 		if (unlikely(rdes0 & RDES0_MII_ERROR))	/* GMII */
226 			x->rx_mii++;
227 
228 		if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
229 			x->rx_crc_errors++;
230 			stats->rx_crc_errors++;
231 		}
232 		ret = discard_frame;
233 	}
234 
235 	/* After a payload csum error, the ES bit is set.
236 	 * It doesn't match with the information reported into the databook.
237 	 * At any rate, we need to understand if the CSUM hw computation is ok
238 	 * and report this info to the upper layers. */
239 	if (likely(ret == good_frame))
240 		ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
241 					 !!(rdes0 & RDES0_FRAME_TYPE),
242 					 !!(rdes0 & ERDES0_RX_MAC_ADDR));
243 
244 	if (unlikely(rdes0 & RDES0_DRIBBLING))
245 		x->dribbling_bit++;
246 
247 	if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
248 		x->sa_rx_filter_fail++;
249 		ret = discard_frame;
250 	}
251 	if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
252 		x->da_rx_filter_fail++;
253 		ret = discard_frame;
254 	}
255 	if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
256 		x->rx_length++;
257 		ret = discard_frame;
258 	}
259 #ifdef STMMAC_VLAN_TAG_USED
260 	if (rdes0 & RDES0_VLAN_TAG)
261 		x->rx_vlan++;
262 #endif
263 
264 	return ret;
265 }
266 
267 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
268 				  int mode, int end, int bfsize)
269 {
270 	int bfsize1;
271 
272 	p->des0 |= cpu_to_le32(RDES0_OWN);
273 
274 	bfsize1 = min(bfsize, BUF_SIZE_8KiB);
275 	p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
276 
277 	if (mode == STMMAC_CHAIN_MODE)
278 		ehn_desc_rx_set_on_chain(p);
279 	else
280 		ehn_desc_rx_set_on_ring(p, end, bfsize);
281 
282 	if (disable_rx_ic)
283 		p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
284 }
285 
286 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
287 {
288 	p->des0 &= cpu_to_le32(~ETDES0_OWN);
289 	if (mode == STMMAC_CHAIN_MODE)
290 		enh_desc_end_tx_desc_on_chain(p);
291 	else
292 		enh_desc_end_tx_desc_on_ring(p, end);
293 }
294 
295 static int enh_desc_get_tx_owner(struct dma_desc *p)
296 {
297 	return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
298 }
299 
300 static void enh_desc_set_tx_owner(struct dma_desc *p)
301 {
302 	p->des0 |= cpu_to_le32(ETDES0_OWN);
303 }
304 
305 static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
306 {
307 	p->des0 |= cpu_to_le32(RDES0_OWN);
308 }
309 
310 static int enh_desc_get_tx_ls(struct dma_desc *p)
311 {
312 	return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
313 }
314 
315 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
316 {
317 	int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
318 
319 	memset(p, 0, offsetof(struct dma_desc, des2));
320 	if (mode == STMMAC_CHAIN_MODE)
321 		enh_desc_end_tx_desc_on_chain(p);
322 	else
323 		enh_desc_end_tx_desc_on_ring(p, ter);
324 }
325 
326 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
327 				     bool csum_flag, int mode, bool tx_own,
328 				     bool ls, unsigned int tot_pkt_len)
329 {
330 	unsigned int tdes0 = le32_to_cpu(p->des0);
331 
332 	if (mode == STMMAC_CHAIN_MODE)
333 		enh_set_tx_desc_len_on_chain(p, len);
334 	else
335 		enh_set_tx_desc_len_on_ring(p, len);
336 
337 	if (is_fs)
338 		tdes0 |= ETDES0_FIRST_SEGMENT;
339 	else
340 		tdes0 &= ~ETDES0_FIRST_SEGMENT;
341 
342 	if (likely(csum_flag))
343 		tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
344 	else
345 		tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
346 
347 	if (ls)
348 		tdes0 |= ETDES0_LAST_SEGMENT;
349 
350 	/* Finally set the OWN bit. Later the DMA will start! */
351 	if (tx_own)
352 		tdes0 |= ETDES0_OWN;
353 
354 	if (is_fs && tx_own)
355 		/* When the own bit, for the first frame, has to be set, all
356 		 * descriptors for the same frame has to be set before, to
357 		 * avoid race condition.
358 		 */
359 		dma_wmb();
360 
361 	p->des0 = cpu_to_le32(tdes0);
362 }
363 
364 static void enh_desc_set_tx_ic(struct dma_desc *p)
365 {
366 	p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
367 }
368 
369 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
370 {
371 	unsigned int csum = 0;
372 	/* The type-1 checksum offload engines append the checksum at
373 	 * the end of frame and the two bytes of checksum are added in
374 	 * the length.
375 	 * Adjust for that in the framelen for type-1 checksum offload
376 	 * engines.
377 	 */
378 	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
379 		csum = 2;
380 
381 	return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
382 				>> RDES0_FRAME_LEN_SHIFT) - csum);
383 }
384 
385 static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
386 {
387 	p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
388 }
389 
390 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
391 {
392 	return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
393 }
394 
395 static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
396 {
397 	u64 ns;
398 
399 	if (ats) {
400 		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
401 		ns = le32_to_cpu(p->des6);
402 		/* convert high/sec time stamp value to nanosecond */
403 		ns += le32_to_cpu(p->des7) * 1000000000ULL;
404 	} else {
405 		struct dma_desc *p = (struct dma_desc *)desc;
406 		ns = le32_to_cpu(p->des2);
407 		ns += le32_to_cpu(p->des3) * 1000000000ULL;
408 	}
409 
410 	*ts = ns;
411 }
412 
413 static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
414 					    u32 ats)
415 {
416 	if (ats) {
417 		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
418 		return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
419 	} else {
420 		struct dma_desc *p = (struct dma_desc *)desc;
421 		if ((le32_to_cpu(p->des2) == 0xffffffff) &&
422 		    (le32_to_cpu(p->des3) == 0xffffffff))
423 			/* timestamp is corrupted, hence don't store it */
424 			return 0;
425 		else
426 			return 1;
427 	}
428 }
429 
430 static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
431 {
432 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
433 	int i;
434 
435 	pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
436 
437 	for (i = 0; i < size; i++) {
438 		u64 x;
439 
440 		x = *(u64 *)ep;
441 		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
442 			i, (unsigned int)virt_to_phys(ep),
443 			(unsigned int)x, (unsigned int)(x >> 32),
444 			ep->basic.des2, ep->basic.des3);
445 		ep++;
446 	}
447 	pr_info("\n");
448 }
449 
450 static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
451 {
452 	*addr = le32_to_cpu(p->des2);
453 }
454 
455 static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
456 {
457 	p->des2 = cpu_to_le32(addr);
458 }
459 
460 static void enh_desc_clear(struct dma_desc *p)
461 {
462 	p->des2 = 0;
463 }
464 
465 const struct stmmac_desc_ops enh_desc_ops = {
466 	.tx_status = enh_desc_get_tx_status,
467 	.rx_status = enh_desc_get_rx_status,
468 	.get_tx_len = enh_desc_get_tx_len,
469 	.init_rx_desc = enh_desc_init_rx_desc,
470 	.init_tx_desc = enh_desc_init_tx_desc,
471 	.get_tx_owner = enh_desc_get_tx_owner,
472 	.release_tx_desc = enh_desc_release_tx_desc,
473 	.prepare_tx_desc = enh_desc_prepare_tx_desc,
474 	.set_tx_ic = enh_desc_set_tx_ic,
475 	.get_tx_ls = enh_desc_get_tx_ls,
476 	.set_tx_owner = enh_desc_set_tx_owner,
477 	.set_rx_owner = enh_desc_set_rx_owner,
478 	.get_rx_frame_len = enh_desc_get_rx_frame_len,
479 	.rx_extended_status = enh_desc_get_ext_status,
480 	.enable_tx_timestamp = enh_desc_enable_tx_timestamp,
481 	.get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
482 	.get_timestamp = enh_desc_get_timestamp,
483 	.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
484 	.display_ring = enh_desc_display_ring,
485 	.get_addr = enh_desc_get_addr,
486 	.set_addr = enh_desc_set_addr,
487 	.clear = enh_desc_clear,
488 };
489