1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This contains the functions to handle the normal descriptors.
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7
8 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
9 *******************************************************************************/
10
11 #include <linux/stmmac.h>
12 #include "common.h"
13 #include "descs_com.h"
14
ndesc_get_tx_status(struct stmmac_extra_stats * x,struct dma_desc * p,void __iomem * ioaddr)15 static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
16 struct dma_desc *p, void __iomem *ioaddr)
17 {
18 unsigned int tdes0 = le32_to_cpu(p->des0);
19 unsigned int tdes1 = le32_to_cpu(p->des1);
20 int ret = tx_done;
21
22 /* Get tx owner first */
23 if (unlikely(tdes0 & TDES0_OWN))
24 return tx_dma_own;
25
26 /* Verify tx error by looking at the last segment. */
27 if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
28 return tx_not_ls;
29
30 if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
31 if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
32 x->tx_underflow++;
33 }
34 if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
35 x->tx_carrier++;
36 }
37 if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
38 x->tx_losscarrier++;
39 }
40 if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
41 (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
42 (tdes0 & TDES0_LATE_COLLISION))) {
43 unsigned int collisions;
44
45 collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
46 x->tx_collision += collisions;
47 }
48 ret = tx_err;
49 }
50
51 if (tdes0 & TDES0_VLAN_FRAME)
52 x->tx_vlan++;
53
54 if (unlikely(tdes0 & TDES0_DEFERRED))
55 x->tx_deferred++;
56
57 return ret;
58 }
59
ndesc_get_tx_len(struct dma_desc * p)60 static int ndesc_get_tx_len(struct dma_desc *p)
61 {
62 return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
63 }
64
65 /* This function verifies if each incoming frame has some errors
66 * and, if required, updates the multicast statistics.
67 * In case of success, it returns good_frame because the GMAC device
68 * is supposed to be able to compute the csum in HW. */
ndesc_get_rx_status(struct stmmac_extra_stats * x,struct dma_desc * p)69 static int ndesc_get_rx_status(struct stmmac_extra_stats *x,
70 struct dma_desc *p)
71 {
72 int ret = good_frame;
73 unsigned int rdes0 = le32_to_cpu(p->des0);
74
75 if (unlikely(rdes0 & RDES0_OWN))
76 return dma_own;
77
78 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
79 x->rx_length++;
80 return discard_frame;
81 }
82
83 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
84 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
85 x->rx_desc++;
86 if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
87 x->sa_filter_fail++;
88 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
89 x->overflow_error++;
90 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
91 x->ipc_csum_error++;
92 if (unlikely(rdes0 & RDES0_COLLISION)) {
93 x->rx_collision++;
94 }
95 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
96 x->rx_crc_errors++;
97 }
98 ret = discard_frame;
99 }
100 if (unlikely(rdes0 & RDES0_DRIBBLING))
101 x->dribbling_bit++;
102
103 if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
104 x->rx_length++;
105 ret = discard_frame;
106 }
107 if (unlikely(rdes0 & RDES0_MII_ERROR)) {
108 x->rx_mii++;
109 ret = discard_frame;
110 }
111 #ifdef STMMAC_VLAN_TAG_USED
112 if (rdes0 & RDES0_VLAN_TAG)
113 x->vlan_tag++;
114 #endif
115 return ret;
116 }
117
ndesc_init_rx_desc(struct dma_desc * p,int disable_rx_ic,int mode,int end,int bfsize)118 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
119 int end, int bfsize)
120 {
121 int bfsize1;
122
123 p->des0 |= cpu_to_le32(RDES0_OWN);
124
125 bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
126 p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
127
128 if (mode == STMMAC_CHAIN_MODE)
129 ndesc_rx_set_on_chain(p, end);
130 else
131 ndesc_rx_set_on_ring(p, end, bfsize);
132
133 if (disable_rx_ic)
134 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
135 }
136
ndesc_init_tx_desc(struct dma_desc * p,int mode,int end)137 static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
138 {
139 p->des0 &= cpu_to_le32(~TDES0_OWN);
140 if (mode == STMMAC_CHAIN_MODE)
141 ndesc_tx_set_on_chain(p);
142 else
143 ndesc_end_tx_desc_on_ring(p, end);
144 }
145
ndesc_get_tx_owner(struct dma_desc * p)146 static int ndesc_get_tx_owner(struct dma_desc *p)
147 {
148 return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
149 }
150
ndesc_set_tx_owner(struct dma_desc * p)151 static void ndesc_set_tx_owner(struct dma_desc *p)
152 {
153 p->des0 |= cpu_to_le32(TDES0_OWN);
154 }
155
ndesc_set_rx_owner(struct dma_desc * p,int disable_rx_ic)156 static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
157 {
158 p->des0 |= cpu_to_le32(RDES0_OWN);
159 }
160
ndesc_get_tx_ls(struct dma_desc * p)161 static int ndesc_get_tx_ls(struct dma_desc *p)
162 {
163 return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
164 }
165
ndesc_release_tx_desc(struct dma_desc * p,int mode)166 static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
167 {
168 int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
169
170 memset(p, 0, offsetof(struct dma_desc, des2));
171 if (mode == STMMAC_CHAIN_MODE)
172 ndesc_tx_set_on_chain(p);
173 else
174 ndesc_end_tx_desc_on_ring(p, ter);
175 }
176
ndesc_prepare_tx_desc(struct dma_desc * p,int is_fs,int len,bool csum_flag,int mode,bool tx_own,bool ls,unsigned int tot_pkt_len)177 static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
178 bool csum_flag, int mode, bool tx_own,
179 bool ls, unsigned int tot_pkt_len)
180 {
181 unsigned int tdes1 = le32_to_cpu(p->des1);
182
183 if (is_fs)
184 tdes1 |= TDES1_FIRST_SEGMENT;
185 else
186 tdes1 &= ~TDES1_FIRST_SEGMENT;
187
188 if (likely(csum_flag))
189 tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
190 else
191 tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
192
193 if (ls)
194 tdes1 |= TDES1_LAST_SEGMENT;
195
196 p->des1 = cpu_to_le32(tdes1);
197
198 if (mode == STMMAC_CHAIN_MODE)
199 norm_set_tx_desc_len_on_chain(p, len);
200 else
201 norm_set_tx_desc_len_on_ring(p, len);
202
203 if (tx_own)
204 p->des0 |= cpu_to_le32(TDES0_OWN);
205 }
206
ndesc_set_tx_ic(struct dma_desc * p)207 static void ndesc_set_tx_ic(struct dma_desc *p)
208 {
209 p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
210 }
211
ndesc_get_rx_frame_len(struct dma_desc * p,int rx_coe_type)212 static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
213 {
214 unsigned int csum = 0;
215
216 /* The type-1 checksum offload engines append the checksum at
217 * the end of frame and the two bytes of checksum are added in
218 * the length.
219 * Adjust for that in the framelen for type-1 checksum offload
220 * engines
221 */
222 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
223 csum = 2;
224
225 return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
226 >> RDES0_FRAME_LEN_SHIFT) -
227 csum);
228
229 }
230
ndesc_enable_tx_timestamp(struct dma_desc * p)231 static void ndesc_enable_tx_timestamp(struct dma_desc *p)
232 {
233 p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
234 }
235
ndesc_get_tx_timestamp_status(struct dma_desc * p)236 static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
237 {
238 return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
239 }
240
ndesc_get_timestamp(void * desc,u32 ats,u64 * ts)241 static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts)
242 {
243 struct dma_desc *p = (struct dma_desc *)desc;
244 u64 ns;
245
246 ns = le32_to_cpu(p->des2);
247 /* convert high/sec time stamp value to nanosecond */
248 ns += le32_to_cpu(p->des3) * 1000000000ULL;
249
250 *ts = ns;
251 }
252
ndesc_get_rx_timestamp_status(void * desc,void * next_desc,u32 ats)253 static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
254 {
255 struct dma_desc *p = (struct dma_desc *)desc;
256
257 if ((le32_to_cpu(p->des2) == 0xffffffff) &&
258 (le32_to_cpu(p->des3) == 0xffffffff))
259 /* timestamp is corrupted, hence don't store it */
260 return 0;
261 else
262 return 1;
263 }
264
ndesc_display_ring(void * head,unsigned int size,bool rx,dma_addr_t dma_rx_phy,unsigned int desc_size)265 static void ndesc_display_ring(void *head, unsigned int size, bool rx,
266 dma_addr_t dma_rx_phy, unsigned int desc_size)
267 {
268 struct dma_desc *p = (struct dma_desc *)head;
269 dma_addr_t dma_addr;
270 int i;
271
272 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
273
274 for (i = 0; i < size; i++) {
275 u64 x;
276 dma_addr = dma_rx_phy + i * sizeof(*p);
277
278 x = *(u64 *)p;
279 pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x",
280 i, &dma_addr,
281 (unsigned int)x, (unsigned int)(x >> 32),
282 p->des2, p->des3);
283 p++;
284 }
285 pr_info("\n");
286 }
287
ndesc_set_addr(struct dma_desc * p,dma_addr_t addr)288 static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
289 {
290 p->des2 = cpu_to_le32(addr);
291 }
292
ndesc_clear(struct dma_desc * p)293 static void ndesc_clear(struct dma_desc *p)
294 {
295 p->des2 = 0;
296 }
297
298 const struct stmmac_desc_ops ndesc_ops = {
299 .tx_status = ndesc_get_tx_status,
300 .rx_status = ndesc_get_rx_status,
301 .get_tx_len = ndesc_get_tx_len,
302 .init_rx_desc = ndesc_init_rx_desc,
303 .init_tx_desc = ndesc_init_tx_desc,
304 .get_tx_owner = ndesc_get_tx_owner,
305 .release_tx_desc = ndesc_release_tx_desc,
306 .prepare_tx_desc = ndesc_prepare_tx_desc,
307 .set_tx_ic = ndesc_set_tx_ic,
308 .get_tx_ls = ndesc_get_tx_ls,
309 .set_tx_owner = ndesc_set_tx_owner,
310 .set_rx_owner = ndesc_set_rx_owner,
311 .get_rx_frame_len = ndesc_get_rx_frame_len,
312 .enable_tx_timestamp = ndesc_enable_tx_timestamp,
313 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
314 .get_timestamp = ndesc_get_timestamp,
315 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
316 .display_ring = ndesc_display_ring,
317 .set_addr = ndesc_set_addr,
318 .clear = ndesc_clear,
319 };
320