1 /*
2 * AMD 10Gb Ethernet driver
3 *
4 * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
5 *
6 * This file is available to you under your choice of the following two
7 * licenses:
8 *
9 * License 1: GPLv2
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Redistribution and use in source and binary forms, with or without
60 * modification, are permitted provided that the following conditions are met:
61 * * Redistributions of source code must retain the above copyright
62 * notice, this list of conditions and the following disclaimer.
63 * * Redistributions in binary form must reproduce the above copyright
64 * notice, this list of conditions and the following disclaimer in the
65 * documentation and/or other materials provided with the distribution.
66 * * Neither the name of Advanced Micro Devices, Inc. nor the
67 * names of its contributors may be used to endorse or promote products
68 * derived from this software without specific prior written permission.
69 *
70 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
71 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
73 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
74 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
75 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
76 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
77 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
78 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
79 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
80 *
81 * This file incorporates work covered by the following copyright and
82 * permission notice:
83 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
84 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
85 * Inc. unless otherwise expressly agreed to in writing between Synopsys
86 * and you.
87 *
88 * The Software IS NOT an item of Licensed Software or Licensed Product
89 * under any End User Software License Agreement or Agreement for Licensed
90 * Product with Synopsys or any supplement thereto. Permission is hereby
91 * granted, free of charge, to any person obtaining a copy of this software
92 * annotated with this license and the Software, to deal in the Software
93 * without restriction, including without limitation the rights to use,
94 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
95 * of the Software, and to permit persons to whom the Software is furnished
96 * to do so, subject to the following conditions:
97 *
98 * The above copyright notice and this permission notice shall be included
99 * in all copies or substantial portions of the Software.
100 *
101 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
102 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
103 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
104 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
105 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
106 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
107 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
108 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
109 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
110 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
111 * THE POSSIBILITY OF SUCH DAMAGE.
112 */
113
114 #include <sys/cdefs.h>
115 #include "xgbe.h"
116 #include "xgbe-common.h"
117
118 #include <net/if_dl.h>
119
xgbe_get_max_frame(struct xgbe_prv_data * pdata)120 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
121 {
122 return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
123 }
124
125 static unsigned int
xgbe_usec_to_riwt(struct xgbe_prv_data * pdata,unsigned int usec)126 xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec)
127 {
128 unsigned long rate;
129 unsigned int ret;
130
131 rate = pdata->sysclk_rate;
132
133 /*
134 * Convert the input usec value to the watchdog timer value. Each
135 * watchdog timer value is equivalent to 256 clock cycles.
136 * Calculate the required value as:
137 * ( usec * ( system_clock_mhz / 10^6 ) / 256
138 */
139 ret = (usec * (rate / 1000000)) / 256;
140
141 return (ret);
142 }
143
144 static unsigned int
xgbe_riwt_to_usec(struct xgbe_prv_data * pdata,unsigned int riwt)145 xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt)
146 {
147 unsigned long rate;
148 unsigned int ret;
149
150 rate = pdata->sysclk_rate;
151
152 /*
153 * Convert the input watchdog timer value to the usec value. Each
154 * watchdog timer value is equivalent to 256 clock cycles.
155 * Calculate the required value as:
156 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
157 */
158 ret = (riwt * 256) / (rate / 1000000);
159
160 return (ret);
161 }
162
163 static int
xgbe_config_pbl_val(struct xgbe_prv_data * pdata)164 xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
165 {
166 unsigned int pblx8, pbl;
167 unsigned int i;
168
169 pblx8 = DMA_PBL_X8_DISABLE;
170 pbl = pdata->pbl;
171
172 if (pdata->pbl > 32) {
173 pblx8 = DMA_PBL_X8_ENABLE;
174 pbl >>= 3;
175 }
176
177 for (i = 0; i < pdata->channel_count; i++) {
178 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
179 pblx8);
180
181 if (pdata->channel[i]->tx_ring)
182 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
183 PBL, pbl);
184
185 if (pdata->channel[i]->rx_ring)
186 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
187 PBL, pbl);
188 }
189
190 return (0);
191 }
192
193 static int
xgbe_config_osp_mode(struct xgbe_prv_data * pdata)194 xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
195 {
196 unsigned int i;
197
198 for (i = 0; i < pdata->channel_count; i++) {
199 if (!pdata->channel[i]->tx_ring)
200 break;
201
202 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
203 pdata->tx_osp_mode);
204 }
205
206 return (0);
207 }
208
209 static int
xgbe_config_rsf_mode(struct xgbe_prv_data * pdata,unsigned int val)210 xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
211 {
212 unsigned int i;
213
214 for (i = 0; i < pdata->rx_q_count; i++)
215 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
216
217 return (0);
218 }
219
220 static int
xgbe_config_tsf_mode(struct xgbe_prv_data * pdata,unsigned int val)221 xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
222 {
223 unsigned int i;
224
225 for (i = 0; i < pdata->tx_q_count; i++)
226 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
227
228 return (0);
229 }
230
231 static int
xgbe_config_rx_threshold(struct xgbe_prv_data * pdata,unsigned int val)232 xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val)
233 {
234 unsigned int i;
235
236 for (i = 0; i < pdata->rx_q_count; i++)
237 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
238
239 return (0);
240 }
241
242 static int
xgbe_config_tx_threshold(struct xgbe_prv_data * pdata,unsigned int val)243 xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val)
244 {
245 unsigned int i;
246
247 for (i = 0; i < pdata->tx_q_count; i++)
248 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
249
250 return (0);
251 }
252
253 static int
xgbe_config_rx_coalesce(struct xgbe_prv_data * pdata)254 xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
255 {
256 unsigned int i;
257
258 for (i = 0; i < pdata->channel_count; i++) {
259 if (!pdata->channel[i]->rx_ring)
260 break;
261
262 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
263 pdata->rx_riwt);
264 }
265
266 return (0);
267 }
268
269 static int
xgbe_config_tx_coalesce(struct xgbe_prv_data * pdata)270 xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
271 {
272 return (0);
273 }
274
275 static void
xgbe_config_rx_buffer_size(struct xgbe_prv_data * pdata)276 xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
277 {
278 unsigned int i;
279
280 for (i = 0; i < pdata->channel_count; i++) {
281 if (!pdata->channel[i]->rx_ring)
282 break;
283
284 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
285 pdata->rx_buf_size);
286 }
287 }
288
289 static void
xgbe_config_tso_mode(struct xgbe_prv_data * pdata)290 xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
291 {
292 unsigned int i;
293
294 int tso_enabled = (if_getcapenable(pdata->netdev) & IFCAP_TSO);
295
296 for (i = 0; i < pdata->channel_count; i++) {
297 if (!pdata->channel[i]->tx_ring)
298 break;
299
300 axgbe_printf(1, "TSO in channel %d %s\n", i, tso_enabled ? "enabled" : "disabled");
301 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, tso_enabled ? 1 : 0);
302 }
303 }
304
305 static void
xgbe_config_sph_mode(struct xgbe_prv_data * pdata)306 xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
307 {
308 unsigned int i;
309 int sph_enable_flag = XGMAC_IOREAD_BITS(pdata, MAC_HWF1R, SPHEN);
310
311 axgbe_printf(1, "sph_enable %d sph feature enabled?: %d\n",
312 pdata->sph_enable, sph_enable_flag);
313
314 if (pdata->sph_enable && sph_enable_flag)
315 axgbe_printf(0, "SPH Enabled\n");
316
317 for (i = 0; i < pdata->channel_count; i++) {
318 if (!pdata->channel[i]->rx_ring)
319 break;
320 if (pdata->sph_enable && sph_enable_flag) {
321 /* Enable split header feature */
322 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
323 } else {
324 /* Disable split header feature */
325 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
326 }
327
328 /* per-channel confirmation of SPH being disabled/enabled */
329 int val = XGMAC_DMA_IOREAD_BITS(pdata->channel[i], DMA_CH_CR, SPH);
330 axgbe_printf(0, "%s: SPH %s in channel %d\n", __func__,
331 (val ? "enabled" : "disabled"), i);
332 }
333
334 if (pdata->sph_enable && sph_enable_flag)
335 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
336 }
337
338 static int
xgbe_write_rss_reg(struct xgbe_prv_data * pdata,unsigned int type,unsigned int index,unsigned int val)339 xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
340 unsigned int index, unsigned int val)
341 {
342 unsigned int wait;
343 int ret = 0;
344
345 mtx_lock(&pdata->rss_mutex);
346
347 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
348 ret = -EBUSY;
349 goto unlock;
350 }
351
352 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
353
354 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
355 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
356 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
357 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
358
359 wait = 1000;
360 while (wait--) {
361 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
362 goto unlock;
363
364 DELAY(1000);
365 }
366
367 ret = -EBUSY;
368
369 unlock:
370 mtx_unlock(&pdata->rss_mutex);
371
372 return (ret);
373 }
374
375 static int
xgbe_write_rss_hash_key(struct xgbe_prv_data * pdata)376 xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
377 {
378 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t);
379 unsigned int *key = (unsigned int *)&pdata->rss_key;
380 int ret;
381
382 while (key_regs--) {
383 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
384 key_regs, *key++);
385 if (ret)
386 return (ret);
387 }
388
389 return (0);
390 }
391
392 static int
xgbe_write_rss_lookup_table(struct xgbe_prv_data * pdata)393 xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
394 {
395 unsigned int i;
396 int ret;
397
398 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
399 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i,
400 pdata->rss_table[i]);
401 if (ret)
402 return (ret);
403 }
404
405 return (0);
406 }
407
408 static int
xgbe_set_rss_hash_key(struct xgbe_prv_data * pdata,const uint8_t * key)409 xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key)
410 {
411 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
412
413 return (xgbe_write_rss_hash_key(pdata));
414 }
415
416 static int
xgbe_set_rss_lookup_table(struct xgbe_prv_data * pdata,const uint32_t * table)417 xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table)
418 {
419 unsigned int i;
420
421 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
422 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
423
424 return (xgbe_write_rss_lookup_table(pdata));
425 }
426
427 static int
xgbe_enable_rss(struct xgbe_prv_data * pdata)428 xgbe_enable_rss(struct xgbe_prv_data *pdata)
429 {
430 int ret;
431
432 if (!pdata->hw_feat.rss)
433 return (-EOPNOTSUPP);
434
435 /* Program the hash key */
436 ret = xgbe_write_rss_hash_key(pdata);
437 if (ret)
438 return (ret);
439
440 /* Program the lookup table */
441 ret = xgbe_write_rss_lookup_table(pdata);
442 if (ret)
443 return (ret);
444
445 /* Set the RSS options */
446 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
447
448 /* Enable RSS */
449 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
450
451 axgbe_printf(0, "RSS Enabled\n");
452
453 return (0);
454 }
455
456 static int
xgbe_disable_rss(struct xgbe_prv_data * pdata)457 xgbe_disable_rss(struct xgbe_prv_data *pdata)
458 {
459 if (!pdata->hw_feat.rss)
460 return (-EOPNOTSUPP);
461
462 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
463
464 axgbe_printf(0, "RSS Disabled\n");
465
466 return (0);
467 }
468
469 static void
xgbe_config_rss(struct xgbe_prv_data * pdata)470 xgbe_config_rss(struct xgbe_prv_data *pdata)
471 {
472 int ret;
473
474 if (!pdata->hw_feat.rss)
475 return;
476
477 /* Check if the interface has RSS capability */
478 if (pdata->enable_rss)
479 ret = xgbe_enable_rss(pdata);
480 else
481 ret = xgbe_disable_rss(pdata);
482
483 if (ret)
484 axgbe_error("error configuring RSS, RSS disabled\n");
485 }
486
487 static int
xgbe_disable_tx_flow_control(struct xgbe_prv_data * pdata)488 xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
489 {
490 unsigned int max_q_count, q_count;
491 unsigned int reg, reg_val;
492 unsigned int i;
493
494 /* Clear MTL flow control */
495 for (i = 0; i < pdata->rx_q_count; i++)
496 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
497
498 /* Clear MAC flow control */
499 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
500 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
501 reg = MAC_Q0TFCR;
502 for (i = 0; i < q_count; i++) {
503 reg_val = XGMAC_IOREAD(pdata, reg);
504 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
505 XGMAC_IOWRITE(pdata, reg, reg_val);
506
507 reg += MAC_QTFCR_INC;
508 }
509
510 return (0);
511 }
512
513 static int
xgbe_enable_tx_flow_control(struct xgbe_prv_data * pdata)514 xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
515 {
516 unsigned int max_q_count, q_count;
517 unsigned int reg, reg_val;
518 unsigned int i;
519
520 /* Set MTL flow control */
521 for (i = 0; i < pdata->rx_q_count; i++) {
522 unsigned int ehfc = 0;
523
524 if (pdata->rx_rfd[i]) {
525 /* Flow control thresholds are established */
526 /* TODO - enable pfc/ets support */
527 ehfc = 1;
528 }
529
530 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
531
532 axgbe_printf(1, "flow control %s for RXq%u\n",
533 ehfc ? "enabled" : "disabled", i);
534 }
535
536 /* Set MAC flow control */
537 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
538 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
539 reg = MAC_Q0TFCR;
540 for (i = 0; i < q_count; i++) {
541 reg_val = XGMAC_IOREAD(pdata, reg);
542
543 /* Enable transmit flow control */
544 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
545
546 /* Set pause time */
547 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
548
549 XGMAC_IOWRITE(pdata, reg, reg_val);
550
551 reg += MAC_QTFCR_INC;
552 }
553
554 return (0);
555 }
556
557 static int
xgbe_disable_rx_flow_control(struct xgbe_prv_data * pdata)558 xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
559 {
560 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
561
562 return (0);
563 }
564
565 static int
xgbe_enable_rx_flow_control(struct xgbe_prv_data * pdata)566 xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
567 {
568 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
569
570 return (0);
571 }
572
573 static int
xgbe_config_tx_flow_control(struct xgbe_prv_data * pdata)574 xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
575 {
576 if (pdata->tx_pause)
577 xgbe_enable_tx_flow_control(pdata);
578 else
579 xgbe_disable_tx_flow_control(pdata);
580
581 return (0);
582 }
583
584 static int
xgbe_config_rx_flow_control(struct xgbe_prv_data * pdata)585 xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
586 {
587 if (pdata->rx_pause)
588 xgbe_enable_rx_flow_control(pdata);
589 else
590 xgbe_disable_rx_flow_control(pdata);
591
592 return (0);
593 }
594
595 static void
xgbe_config_flow_control(struct xgbe_prv_data * pdata)596 xgbe_config_flow_control(struct xgbe_prv_data *pdata)
597 {
598 xgbe_config_tx_flow_control(pdata);
599 xgbe_config_rx_flow_control(pdata);
600
601 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
602 }
603
604 static void
xgbe_enable_dma_interrupts(struct xgbe_prv_data * pdata)605 xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
606 {
607 struct xgbe_channel *channel;
608 unsigned int i, ver;
609
610 /* Set the interrupt mode if supported */
611 if (pdata->channel_irq_mode)
612 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
613 pdata->channel_irq_mode);
614
615 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
616
617 for (i = 0; i < pdata->channel_count; i++) {
618 channel = pdata->channel[i];
619
620 /* Clear all the interrupts which are set */
621 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
622 XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
623
624 /* Clear all interrupt enable bits */
625 channel->curr_ier = 0;
626
627 /* Enable following interrupts
628 * NIE - Normal Interrupt Summary Enable
629 * AIE - Abnormal Interrupt Summary Enable
630 * FBEE - Fatal Bus Error Enable
631 */
632 if (ver < 0x21) {
633 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
634 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
635 } else {
636 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
637 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
638 }
639 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
640
641 if (channel->tx_ring) {
642 /* Enable the following Tx interrupts
643 * TIE - Transmit Interrupt Enable (unless using
644 * per channel interrupts in edge triggered
645 * mode)
646 */
647 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
648 XGMAC_SET_BITS(channel->curr_ier,
649 DMA_CH_IER, TIE, 1);
650 }
651 if (channel->rx_ring) {
652 /* Enable following Rx interrupts
653 * RBUE - Receive Buffer Unavailable Enable
654 * RIE - Receive Interrupt Enable (unless using
655 * per channel interrupts in edge triggered
656 * mode)
657 */
658 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
659 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
660 XGMAC_SET_BITS(channel->curr_ier,
661 DMA_CH_IER, RIE, 1);
662 }
663
664 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
665 }
666 }
667
668 static void
xgbe_enable_mtl_interrupts(struct xgbe_prv_data * pdata)669 xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
670 {
671 unsigned int mtl_q_isr;
672 unsigned int q_count, i;
673
674 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
675 for (i = 0; i < q_count; i++) {
676 /* Clear all the interrupts which are set */
677 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
678 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
679
680 /* No MTL interrupts to be enabled */
681 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
682 }
683 }
684
685 static void
xgbe_enable_mac_interrupts(struct xgbe_prv_data * pdata)686 xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
687 {
688 unsigned int mac_ier = 0;
689
690 /* Enable Timestamp interrupt */
691 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
692
693 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
694
695 /* Enable all counter interrupts */
696 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
697 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
698
699 /* Enable MDIO single command completion interrupt */
700 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
701 }
702
703 static int
xgbe_set_speed(struct xgbe_prv_data * pdata,int speed)704 xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
705 {
706 unsigned int ss;
707
708 switch (speed) {
709 case SPEED_1000:
710 ss = 0x03;
711 break;
712 case SPEED_2500:
713 ss = 0x02;
714 break;
715 case SPEED_10000:
716 ss = 0x00;
717 break;
718 default:
719 return (-EINVAL);
720 }
721
722 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
723 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
724
725 return (0);
726 }
727
728 static int
xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data * pdata)729 xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
730 {
731 /* Put the VLAN tag in the Rx descriptor */
732 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
733
734 /* Don't check the VLAN type */
735 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
736
737 /* Check only C-TAG (0x8100) packets */
738 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
739
740 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
741 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
742
743 /* Enable VLAN tag stripping */
744 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
745
746 axgbe_printf(0, "VLAN Stripping Enabled\n");
747
748 return (0);
749 }
750
751 static int
xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data * pdata)752 xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
753 {
754 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
755
756 axgbe_printf(0, "VLAN Stripping Disabled\n");
757
758 return (0);
759 }
760
761 static int
xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data * pdata)762 xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
763 {
764 /* Enable VLAN filtering */
765 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
766
767 /* Enable VLAN Hash Table filtering */
768 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
769
770 /* Disable VLAN tag inverse matching */
771 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
772
773 /* Only filter on the lower 12-bits of the VLAN tag */
774 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
775
776 /* In order for the VLAN Hash Table filtering to be effective,
777 * the VLAN tag identifier in the VLAN Tag Register must not
778 * be zero. Set the VLAN tag identifier to "1" to enable the
779 * VLAN Hash Table filtering. This implies that a VLAN tag of
780 * 1 will always pass filtering.
781 */
782 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
783
784 axgbe_printf(0, "VLAN filtering Enabled\n");
785
786 return (0);
787 }
788
789 static int
xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data * pdata)790 xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
791 {
792 /* Disable VLAN filtering */
793 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
794
795 axgbe_printf(0, "VLAN filtering Disabled\n");
796
797 return (0);
798 }
799
800 static uint32_t
xgbe_vid_crc32_le(__le16 vid_le)801 xgbe_vid_crc32_le(__le16 vid_le)
802 {
803 uint32_t crc = ~0;
804 uint32_t temp = 0;
805 unsigned char *data = (unsigned char *)&vid_le;
806 unsigned char data_byte = 0;
807 int i, bits;
808
809 bits = get_bitmask_order(VLAN_VID_MASK);
810 for (i = 0; i < bits; i++) {
811 if ((i % 8) == 0)
812 data_byte = data[i / 8];
813
814 temp = ((crc & 1) ^ data_byte) & 1;
815 crc >>= 1;
816 data_byte >>= 1;
817
818 if (temp)
819 crc ^= CRC32_POLY_LE;
820 }
821
822 return (crc);
823 }
824
825 static int
xgbe_update_vlan_hash_table(struct xgbe_prv_data * pdata)826 xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
827 {
828 uint32_t crc;
829 size_t vid;
830 uint16_t vlan_hash_table = 0;
831 __le16 vid_le = 0;
832
833 axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__,
834 XGMAC_IOREAD(pdata, MAC_VLANHTR));
835
836 /* Generate the VLAN Hash Table value */
837 bit_foreach(pdata->active_vlans, VLAN_NVID, vid) {
838 /* Get the CRC32 value of the VLAN ID */
839 vid_le = cpu_to_le16(vid);
840 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
841
842 vlan_hash_table |= (1 << crc);
843 axgbe_printf(1, "%s: vid 0x%lx vid_le 0x%x crc 0x%x "
844 "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc,
845 vlan_hash_table);
846 }
847
848 /* Set the VLAN Hash Table filtering register */
849 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
850
851 axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__,
852 XGMAC_IOREAD(pdata, MAC_VLANHTR));
853
854 return (0);
855 }
856
857 static int
xgbe_set_promiscuous_mode(struct xgbe_prv_data * pdata,unsigned int enable)858 xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable)
859 {
860 unsigned int val = enable ? 1 : 0;
861
862 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
863 return (0);
864
865 axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving");
866
867 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
868
869 /* Hardware will still perform VLAN filtering in promiscuous mode */
870 if (enable) {
871 axgbe_printf(1, "Disabling rx vlan filtering\n");
872 xgbe_disable_rx_vlan_filtering(pdata);
873 } else {
874 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) {
875 axgbe_printf(1, "Enabling rx vlan filtering\n");
876 xgbe_enable_rx_vlan_filtering(pdata);
877 }
878 }
879
880 return (0);
881 }
882
883 static int
xgbe_set_all_multicast_mode(struct xgbe_prv_data * pdata,unsigned int enable)884 xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable)
885 {
886 unsigned int val = enable ? 1 : 0;
887
888 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
889 return (0);
890
891 axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving");
892 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
893
894 return (0);
895 }
896
897 static void
xgbe_set_mac_reg(struct xgbe_prv_data * pdata,char * addr,unsigned int * mac_reg)898 xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg)
899 {
900 unsigned int mac_addr_hi, mac_addr_lo;
901 uint8_t *mac_addr;
902
903 mac_addr_lo = 0;
904 mac_addr_hi = 0;
905
906 if (addr) {
907 mac_addr = (uint8_t *)&mac_addr_lo;
908 mac_addr[0] = addr[0];
909 mac_addr[1] = addr[1];
910 mac_addr[2] = addr[2];
911 mac_addr[3] = addr[3];
912 mac_addr = (uint8_t *)&mac_addr_hi;
913 mac_addr[0] = addr[4];
914 mac_addr[1] = addr[5];
915
916 axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg);
917
918 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
919 }
920
921 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
922 *mac_reg += MAC_MACA_INC;
923 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
924 *mac_reg += MAC_MACA_INC;
925 }
926
927 static void
xgbe_set_mac_addn_addrs(struct xgbe_prv_data * pdata)928 xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
929 {
930 unsigned int mac_reg;
931 unsigned int addn_macs;
932
933 mac_reg = MAC_MACA1HR;
934 addn_macs = pdata->hw_feat.addn_mac;
935
936 xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg);
937 addn_macs--;
938
939 /* Clear remaining additional MAC address entries */
940 while (addn_macs--)
941 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
942 }
943
944 static int
xgbe_add_mac_addresses(struct xgbe_prv_data * pdata)945 xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
946 {
947 /* TODO - add support to set mac hash table */
948 xgbe_set_mac_addn_addrs(pdata);
949
950 return (0);
951 }
952
953 static int
xgbe_set_mac_address(struct xgbe_prv_data * pdata,uint8_t * addr)954 xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr)
955 {
956 unsigned int mac_addr_hi, mac_addr_lo;
957
958 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
959 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
960 (addr[1] << 8) | (addr[0] << 0);
961
962 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
963 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
964
965 return (0);
966 }
967
968 static int
xgbe_config_rx_mode(struct xgbe_prv_data * pdata)969 xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
970 {
971 unsigned int pr_mode, am_mode;
972
973 pr_mode = ((if_getflags(pdata->netdev) & IFF_PROMISC) != 0);
974 am_mode = ((if_getflags(pdata->netdev) & IFF_ALLMULTI) != 0);
975
976 xgbe_set_promiscuous_mode(pdata, pr_mode);
977 xgbe_set_all_multicast_mode(pdata, am_mode);
978
979 xgbe_add_mac_addresses(pdata);
980
981 return (0);
982 }
983
984 static int
xgbe_clr_gpio(struct xgbe_prv_data * pdata,unsigned int gpio)985 xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
986 {
987 unsigned int reg;
988
989 if (gpio > 15)
990 return (-EINVAL);
991
992 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
993
994 reg &= ~(1 << (gpio + 16));
995 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
996
997 return (0);
998 }
999
1000 static int
xgbe_set_gpio(struct xgbe_prv_data * pdata,unsigned int gpio)1001 xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1002 {
1003 unsigned int reg;
1004
1005 if (gpio > 15)
1006 return (-EINVAL);
1007
1008 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1009
1010 reg |= (1 << (gpio + 16));
1011 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1012
1013 return (0);
1014 }
1015
1016 static int
xgbe_read_mmd_regs_v2(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1017 xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
1018 {
1019 unsigned long flags;
1020 unsigned int mmd_address, index, offset;
1021 int mmd_data;
1022
1023 if (mmd_reg & MII_ADDR_C45)
1024 mmd_address = mmd_reg & ~MII_ADDR_C45;
1025 else
1026 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1027
1028 /* The PCS registers are accessed using mmio. The underlying
1029 * management interface uses indirect addressing to access the MMD
1030 * register sets. This requires accessing of the PCS register in two
1031 * phases, an address phase and a data phase.
1032 *
1033 * The mmio interface is based on 16-bit offsets and values. All
1034 * register offsets must therefore be adjusted by left shifting the
1035 * offset 1 bit and reading 16 bits of data.
1036 */
1037 mmd_address <<= 1;
1038 index = mmd_address & ~pdata->xpcs_window_mask;
1039 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1040
1041 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1042 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1043 mmd_data = XPCS16_IOREAD(pdata, offset);
1044 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1045
1046 return (mmd_data);
1047 }
1048
1049 static void
xgbe_write_mmd_regs_v2(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1050 xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
1051 int mmd_data)
1052 {
1053 unsigned long flags;
1054 unsigned int mmd_address, index, offset;
1055
1056 if (mmd_reg & MII_ADDR_C45)
1057 mmd_address = mmd_reg & ~MII_ADDR_C45;
1058 else
1059 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1060
1061 /* The PCS registers are accessed using mmio. The underlying
1062 * management interface uses indirect addressing to access the MMD
1063 * register sets. This requires accessing of the PCS register in two
1064 * phases, an address phase and a data phase.
1065 *
1066 * The mmio interface is based on 16-bit offsets and values. All
1067 * register offsets must therefore be adjusted by left shifting the
1068 * offset 1 bit and writing 16 bits of data.
1069 */
1070 mmd_address <<= 1;
1071 index = mmd_address & ~pdata->xpcs_window_mask;
1072 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1073
1074 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1075 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1076 XPCS16_IOWRITE(pdata, offset, mmd_data);
1077 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1078 }
1079
1080 static int
xgbe_read_mmd_regs_v1(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1081 xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
1082 {
1083 unsigned long flags;
1084 unsigned int mmd_address;
1085 int mmd_data;
1086
1087 if (mmd_reg & MII_ADDR_C45)
1088 mmd_address = mmd_reg & ~MII_ADDR_C45;
1089 else
1090 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1091
1092 /* The PCS registers are accessed using mmio. The underlying APB3
1093 * management interface uses indirect addressing to access the MMD
1094 * register sets. This requires accessing of the PCS register in two
1095 * phases, an address phase and a data phase.
1096 *
1097 * The mmio interface is based on 32-bit offsets and values. All
1098 * register offsets must therefore be adjusted by left shifting the
1099 * offset 2 bits and reading 32 bits of data.
1100 */
1101 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1102 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1103 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1104 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1105
1106 return (mmd_data);
1107 }
1108
1109 static void
xgbe_write_mmd_regs_v1(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1110 xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
1111 int mmd_data)
1112 {
1113 unsigned int mmd_address;
1114 unsigned long flags;
1115
1116 if (mmd_reg & MII_ADDR_C45)
1117 mmd_address = mmd_reg & ~MII_ADDR_C45;
1118 else
1119 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1120
1121 /* The PCS registers are accessed using mmio. The underlying APB3
1122 * management interface uses indirect addressing to access the MMD
1123 * register sets. This requires accessing of the PCS register in two
1124 * phases, an address phase and a data phase.
1125 *
1126 * The mmio interface is based on 32-bit offsets and values. All
1127 * register offsets must therefore be adjusted by left shifting the
1128 * offset 2 bits and writing 32 bits of data.
1129 */
1130 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1131 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1132 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1133 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1134 }
1135
1136 static int
xgbe_read_mmd_regs(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1137 xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
1138 {
1139 switch (pdata->vdata->xpcs_access) {
1140 case XGBE_XPCS_ACCESS_V1:
1141 return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg));
1142
1143 case XGBE_XPCS_ACCESS_V2:
1144 default:
1145 return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg));
1146 }
1147 }
1148
1149 static void
xgbe_write_mmd_regs(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1150 xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
1151 int mmd_data)
1152 {
1153 switch (pdata->vdata->xpcs_access) {
1154 case XGBE_XPCS_ACCESS_V1:
1155 return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data));
1156
1157 case XGBE_XPCS_ACCESS_V2:
1158 default:
1159 return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data));
1160 }
1161 }
1162
1163 static unsigned int
xgbe_create_mdio_sca(int port,int reg)1164 xgbe_create_mdio_sca(int port, int reg)
1165 {
1166 unsigned int mdio_sca, da;
1167
1168 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1169
1170 mdio_sca = 0;
1171 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1172 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1173 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1174
1175 return (mdio_sca);
1176 }
1177
1178 static int
xgbe_write_ext_mii_regs(struct xgbe_prv_data * pdata,int addr,int reg,uint16_t val)1179 xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg,
1180 uint16_t val)
1181 {
1182 unsigned int mdio_sca, mdio_sccd;
1183
1184 mtx_lock_spin(&pdata->mdio_mutex);
1185
1186 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1187 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1188
1189 mdio_sccd = 0;
1190 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1191 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1192 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1193 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1194
1195 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) ==
1196 EWOULDBLOCK) {
1197 axgbe_error("%s: MDIO write error\n", __func__);
1198 mtx_unlock_spin(&pdata->mdio_mutex);
1199 return (-ETIMEDOUT);
1200 }
1201
1202 mtx_unlock_spin(&pdata->mdio_mutex);
1203 return (0);
1204 }
1205
1206 static int
xgbe_read_ext_mii_regs(struct xgbe_prv_data * pdata,int addr,int reg)1207 xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg)
1208 {
1209 unsigned int mdio_sca, mdio_sccd;
1210
1211 mtx_lock_spin(&pdata->mdio_mutex);
1212
1213 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1214 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1215
1216 mdio_sccd = 0;
1217 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1218 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1219 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1220
1221 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) ==
1222 EWOULDBLOCK) {
1223 axgbe_error("%s: MDIO read error\n", __func__);
1224 mtx_unlock_spin(&pdata->mdio_mutex);
1225 return (-ETIMEDOUT);
1226 }
1227
1228 mtx_unlock_spin(&pdata->mdio_mutex);
1229
1230 return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA));
1231 }
1232
1233 static int
xgbe_set_ext_mii_mode(struct xgbe_prv_data * pdata,unsigned int port,enum xgbe_mdio_mode mode)1234 xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1235 enum xgbe_mdio_mode mode)
1236 {
1237 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1238
1239 switch (mode) {
1240 case XGBE_MDIO_MODE_CL22:
1241 if (port > XGMAC_MAX_C22_PORT)
1242 return (-EINVAL);
1243 reg_val |= (1 << port);
1244 break;
1245 case XGBE_MDIO_MODE_CL45:
1246 break;
1247 default:
1248 return (-EINVAL);
1249 }
1250
1251 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1252
1253 return (0);
1254 }
1255
1256 static int
xgbe_tx_complete(struct xgbe_ring_desc * rdesc)1257 xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1258 {
1259 return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN));
1260 }
1261
1262 static int
xgbe_disable_rx_csum(struct xgbe_prv_data * pdata)1263 xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1264 {
1265 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1266
1267 axgbe_printf(0, "Receive checksum offload Disabled\n");
1268 return (0);
1269 }
1270
1271 static int
xgbe_enable_rx_csum(struct xgbe_prv_data * pdata)1272 xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1273 {
1274 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1275
1276 axgbe_printf(0, "Receive checksum offload Enabled\n");
1277 return (0);
1278 }
1279
1280 static void
xgbe_tx_desc_reset(struct xgbe_ring_data * rdata)1281 xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1282 {
1283 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1284
1285 /* Reset the Tx descriptor
1286 * Set buffer 1 (lo) address to zero
1287 * Set buffer 1 (hi) address to zero
1288 * Reset all other control bits (IC, TTSE, B2L & B1L)
1289 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1290 */
1291 rdesc->desc0 = 0;
1292 rdesc->desc1 = 0;
1293 rdesc->desc2 = 0;
1294 rdesc->desc3 = 0;
1295
1296 wmb();
1297 }
1298
1299 static void
xgbe_tx_desc_init(struct xgbe_channel * channel)1300 xgbe_tx_desc_init(struct xgbe_channel *channel)
1301 {
1302 struct xgbe_ring *ring = channel->tx_ring;
1303 struct xgbe_ring_data *rdata;
1304 int i;
1305 int start_index = ring->cur;
1306
1307 /* Initialze all descriptors */
1308 for (i = 0; i < ring->rdesc_count; i++) {
1309 rdata = XGBE_GET_DESC_DATA(ring, i);
1310
1311 /* Initialize Tx descriptor */
1312 xgbe_tx_desc_reset(rdata);
1313 }
1314
1315 /* Update the total number of Tx descriptors */
1316 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1317
1318 /* Update the starting address of descriptor ring */
1319 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1320 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1321 upper_32_bits(rdata->rdata_paddr));
1322 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1323 lower_32_bits(rdata->rdata_paddr));
1324 }
1325
1326 static void
xgbe_rx_desc_init(struct xgbe_channel * channel)1327 xgbe_rx_desc_init(struct xgbe_channel *channel)
1328 {
1329 struct xgbe_ring *ring = channel->rx_ring;
1330 struct xgbe_ring_data *rdata;
1331 unsigned int start_index = ring->cur;
1332
1333 /*
1334 * Just set desc_count and the starting address of the desc list
1335 * here. Rest will be done as part of the txrx path.
1336 */
1337
1338 /* Update the total number of Rx descriptors */
1339 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1340
1341 /* Update the starting address of descriptor ring */
1342 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1343 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1344 upper_32_bits(rdata->rdata_paddr));
1345 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1346 lower_32_bits(rdata->rdata_paddr));
1347 }
1348
1349 static int
xgbe_dev_read(struct xgbe_channel * channel)1350 xgbe_dev_read(struct xgbe_channel *channel)
1351 {
1352 struct xgbe_prv_data *pdata = channel->pdata;
1353 struct xgbe_ring *ring = channel->rx_ring;
1354 struct xgbe_ring_data *rdata;
1355 struct xgbe_ring_desc *rdesc;
1356 struct xgbe_packet_data *packet = &ring->packet_data;
1357 unsigned int err, etlt, l34t = 0;
1358
1359 axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur);
1360
1361 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1362 rdesc = rdata->rdesc;
1363
1364 /* Check for data availability */
1365 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1366 return (1);
1367
1368 rmb();
1369
1370 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1371 /* TODO - Timestamp Context Descriptor */
1372 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1373 CONTEXT, 1);
1374 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1375 CONTEXT_NEXT, 0);
1376 return (0);
1377 }
1378
1379 /* Normal Descriptor, be sure Context Descriptor bit is off */
1380 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1381
1382 /* Indicate if a Context Descriptor is next */
1383 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1384 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1385 CONTEXT_NEXT, 1);
1386
1387 /* Get the header length */
1388 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1389 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1390 FIRST, 1);
1391 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1392 RX_NORMAL_DESC2, HL);
1393 if (rdata->rx.hdr_len)
1394 pdata->ext_stats.rx_split_header_packets++;
1395 } else
1396 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1397 FIRST, 0);
1398
1399 /* Get the RSS hash */
1400 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1401 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1402 RSS_HASH, 1);
1403
1404 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1405
1406 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1407 switch (l34t) {
1408 case RX_DESC3_L34T_IPV4_TCP:
1409 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4;
1410 break;
1411 case RX_DESC3_L34T_IPV4_UDP:
1412 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4;
1413 break;
1414 case RX_DESC3_L34T_IPV6_TCP:
1415 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6;
1416 break;
1417 case RX_DESC3_L34T_IPV6_UDP:
1418 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6;
1419 break;
1420 default:
1421 packet->rss_hash_type = M_HASHTYPE_OPAQUE;
1422 break;
1423 }
1424 }
1425
1426 /* Not all the data has been transferred for this packet */
1427 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1428 /* This is not the last of the data for this packet */
1429 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1430 LAST, 0);
1431 return (0);
1432 }
1433
1434 /* This is the last of the data for this packet */
1435 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1436 LAST, 1);
1437
1438 /* Get the packet length */
1439 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1440
1441 /* Set checksum done indicator as appropriate */
1442 /* TODO - add tunneling support */
1443 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1444 CSUM_DONE, 1);
1445
1446 /* Check for errors (only valid in last descriptor) */
1447 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1448 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1449 axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt);
1450
1451 if (!err || !etlt) {
1452 /* No error if err is 0 or etlt is 0 */
1453 if (etlt == 0x09 &&
1454 (if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) {
1455 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1456 VLAN_CTAG, 1);
1457 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1458 RX_NORMAL_DESC0, OVT);
1459 axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag);
1460 }
1461 } else {
1462 unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
1463 RX_PACKET_ATTRIBUTES, TNP);
1464
1465 if ((etlt == 0x05) || (etlt == 0x06)) {
1466 axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n",
1467 __func__, l34t, err, etlt);
1468 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1469 CSUM_DONE, 0);
1470 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1471 TNPCSUM_DONE, 0);
1472 pdata->ext_stats.rx_csum_errors++;
1473 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
1474 axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n",
1475 __func__, l34t, err, etlt);
1476 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1477 CSUM_DONE, 0);
1478 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1479 TNPCSUM_DONE, 0);
1480 pdata->ext_stats.rx_vxlan_csum_errors++;
1481 } else {
1482 axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n",
1483 __func__, tnp, l34t, err, etlt);
1484 axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n",
1485 __func__, channel->queue_index,
1486 XGMAC_DMA_IOREAD(channel, DMA_CH_SR),
1487 XGMAC_DMA_IOREAD(channel, DMA_CH_DSR));
1488 axgbe_printf(1, "%s: ring cur %d dirty %d\n",
1489 __func__, ring->cur, ring->dirty);
1490 axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n",
1491 __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2,
1492 rdesc->desc3);
1493 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1494 FRAME, 1);
1495 }
1496 }
1497
1498 axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n",
1499 channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur);
1500
1501 return (0);
1502 }
1503
1504 static int
xgbe_is_context_desc(struct xgbe_ring_desc * rdesc)1505 xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1506 {
1507 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1508 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT));
1509 }
1510
1511 static int
xgbe_is_last_desc(struct xgbe_ring_desc * rdesc)1512 xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1513 {
1514 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1515 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD));
1516 }
1517
1518 static int
xgbe_enable_int(struct xgbe_channel * channel,enum xgbe_int int_id)1519 xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id)
1520 {
1521 struct xgbe_prv_data *pdata = channel->pdata;
1522
1523 axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n",
1524 channel->curr_ier);
1525
1526 switch (int_id) {
1527 case XGMAC_INT_DMA_CH_SR_TI:
1528 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
1529 break;
1530 case XGMAC_INT_DMA_CH_SR_TPS:
1531 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
1532 break;
1533 case XGMAC_INT_DMA_CH_SR_TBU:
1534 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
1535 break;
1536 case XGMAC_INT_DMA_CH_SR_RI:
1537 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
1538 break;
1539 case XGMAC_INT_DMA_CH_SR_RBU:
1540 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
1541 break;
1542 case XGMAC_INT_DMA_CH_SR_RPS:
1543 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
1544 break;
1545 case XGMAC_INT_DMA_CH_SR_TI_RI:
1546 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
1547 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
1548 break;
1549 case XGMAC_INT_DMA_CH_SR_FBE:
1550 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
1551 break;
1552 case XGMAC_INT_DMA_ALL:
1553 channel->curr_ier |= channel->saved_ier;
1554 break;
1555 default:
1556 return (-1);
1557 }
1558
1559 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
1560
1561 axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n",
1562 channel->curr_ier);
1563
1564 return (0);
1565 }
1566
1567 static int
xgbe_disable_int(struct xgbe_channel * channel,enum xgbe_int int_id)1568 xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id)
1569 {
1570 struct xgbe_prv_data *pdata = channel->pdata;
1571
1572 axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n",
1573 channel->curr_ier);
1574
1575 switch (int_id) {
1576 case XGMAC_INT_DMA_CH_SR_TI:
1577 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
1578 break;
1579 case XGMAC_INT_DMA_CH_SR_TPS:
1580 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
1581 break;
1582 case XGMAC_INT_DMA_CH_SR_TBU:
1583 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
1584 break;
1585 case XGMAC_INT_DMA_CH_SR_RI:
1586 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
1587 break;
1588 case XGMAC_INT_DMA_CH_SR_RBU:
1589 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
1590 break;
1591 case XGMAC_INT_DMA_CH_SR_RPS:
1592 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
1593 break;
1594 case XGMAC_INT_DMA_CH_SR_TI_RI:
1595 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
1596 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
1597 break;
1598 case XGMAC_INT_DMA_CH_SR_FBE:
1599 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
1600 break;
1601 case XGMAC_INT_DMA_ALL:
1602 channel->saved_ier = channel->curr_ier;
1603 channel->curr_ier = 0;
1604 break;
1605 default:
1606 return (-1);
1607 }
1608
1609 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
1610
1611 axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n",
1612 channel->curr_ier);
1613
1614 return (0);
1615 }
1616
1617 static int
__xgbe_exit(struct xgbe_prv_data * pdata)1618 __xgbe_exit(struct xgbe_prv_data *pdata)
1619 {
1620 unsigned int count = 2000;
1621
1622 /* Issue a software reset */
1623 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1624 DELAY(10);
1625
1626 /* Poll Until Poll Condition */
1627 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1628 DELAY(500);
1629
1630 if (!count)
1631 return (-EBUSY);
1632
1633 return (0);
1634 }
1635
1636 static int
xgbe_exit(struct xgbe_prv_data * pdata)1637 xgbe_exit(struct xgbe_prv_data *pdata)
1638 {
1639 int ret;
1640
1641 /* To guard against possible incorrectly generated interrupts,
1642 * issue the software reset twice.
1643 */
1644 ret = __xgbe_exit(pdata);
1645 if (ret) {
1646 axgbe_error("%s: exit error %d\n", __func__, ret);
1647 return (ret);
1648 }
1649
1650 return (__xgbe_exit(pdata));
1651 }
1652
1653 static int
xgbe_flush_tx_queues(struct xgbe_prv_data * pdata)1654 xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1655 {
1656 unsigned int i, count;
1657
1658 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1659 return (0);
1660
1661 for (i = 0; i < pdata->tx_q_count; i++)
1662 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1663
1664 /* Poll Until Poll Condition */
1665 for (i = 0; i < pdata->tx_q_count; i++) {
1666 count = 2000;
1667 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1668 MTL_Q_TQOMR, FTQ))
1669 DELAY(500);
1670
1671 if (!count)
1672 return (-EBUSY);
1673 }
1674
1675 return (0);
1676 }
1677
1678 static void
xgbe_config_dma_bus(struct xgbe_prv_data * pdata)1679 xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1680 {
1681 unsigned int sbmr;
1682
1683 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
1684
1685 /* Set enhanced addressing mode */
1686 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
1687
1688 /* Set the System Bus mode */
1689 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
1690 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
1691 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
1692 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
1693 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
1694
1695 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
1696
1697 /* Set descriptor fetching threshold */
1698 if (pdata->vdata->tx_desc_prefetch)
1699 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
1700 pdata->vdata->tx_desc_prefetch);
1701
1702 if (pdata->vdata->rx_desc_prefetch)
1703 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
1704 pdata->vdata->rx_desc_prefetch);
1705 }
1706
1707 static void
xgbe_config_dma_cache(struct xgbe_prv_data * pdata)1708 xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1709 {
1710 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
1711 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
1712 if (pdata->awarcr)
1713 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
1714 }
1715
1716 static void
xgbe_config_mtl_mode(struct xgbe_prv_data * pdata)1717 xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1718 {
1719 unsigned int i;
1720
1721 /* Set Tx to weighted round robin scheduling algorithm */
1722 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1723
1724 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1725 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1726 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1727 MTL_TSA_ETS);
1728 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1729 }
1730
1731 /* Set Rx to strict priority algorithm */
1732 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1733 }
1734
1735 static void
xgbe_queue_flow_control_threshold(struct xgbe_prv_data * pdata,unsigned int queue,unsigned int q_fifo_size)1736 xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
1737 unsigned int queue, unsigned int q_fifo_size)
1738 {
1739 unsigned int frame_fifo_size;
1740 unsigned int rfa, rfd;
1741
1742 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
1743 axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n",
1744 __func__, queue, q_fifo_size, frame_fifo_size);
1745
1746 /* TODO - add pfc/ets related support */
1747
1748 /* This path deals with just maximum frame sizes which are
1749 * limited to a jumbo frame of 9,000 (plus headers, etc.)
1750 * so we can never exceed the maximum allowable RFA/RFD
1751 * values.
1752 */
1753 if (q_fifo_size <= 2048) {
1754 /* rx_rfd to zero to signal no flow control */
1755 pdata->rx_rfa[queue] = 0;
1756 pdata->rx_rfd[queue] = 0;
1757 return;
1758 }
1759
1760 if (q_fifo_size <= 4096) {
1761 /* Between 2048 and 4096 */
1762 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
1763 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
1764 return;
1765 }
1766
1767 if (q_fifo_size <= frame_fifo_size) {
1768 /* Between 4096 and max-frame */
1769 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
1770 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
1771 return;
1772 }
1773
1774 if (q_fifo_size <= (frame_fifo_size * 3)) {
1775 /* Between max-frame and 3 max-frames,
1776 * trigger if we get just over a frame of data and
1777 * resume when we have just under half a frame left.
1778 */
1779 rfa = q_fifo_size - frame_fifo_size;
1780 rfd = rfa + (frame_fifo_size / 2);
1781 } else {
1782 /* Above 3 max-frames - trigger when just over
1783 * 2 frames of space available
1784 */
1785 rfa = frame_fifo_size * 2;
1786 rfa += XGMAC_FLOW_CONTROL_UNIT;
1787 rfd = rfa + frame_fifo_size;
1788 }
1789
1790 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
1791 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
1792 axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__,
1793 queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]);
1794 }
1795
1796 static void
xgbe_calculate_flow_control_threshold(struct xgbe_prv_data * pdata,unsigned int * fifo)1797 xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
1798 unsigned int *fifo)
1799 {
1800 unsigned int q_fifo_size;
1801 unsigned int i;
1802
1803 for (i = 0; i < pdata->rx_q_count; i++) {
1804 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
1805
1806 axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n",
1807 __func__, i, fifo[i], q_fifo_size);
1808 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
1809 }
1810 }
1811
1812 static void
xgbe_config_flow_control_threshold(struct xgbe_prv_data * pdata)1813 xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1814 {
1815 unsigned int i;
1816
1817 for (i = 0; i < pdata->rx_q_count; i++) {
1818 axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i,
1819 pdata->rx_rfa[i], pdata->rx_rfd[i]);
1820
1821 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
1822 pdata->rx_rfa[i]);
1823 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
1824 pdata->rx_rfd[i]);
1825
1826 axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__,
1827 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR));
1828 }
1829 }
1830
1831 static unsigned int
xgbe_get_tx_fifo_size(struct xgbe_prv_data * pdata)1832 xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
1833 {
1834 /* The configured value may not be the actual amount of fifo RAM */
1835 return (min_t(unsigned int, pdata->tx_max_fifo_size,
1836 pdata->hw_feat.tx_fifo_size));
1837 }
1838
1839 static unsigned int
xgbe_get_rx_fifo_size(struct xgbe_prv_data * pdata)1840 xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
1841 {
1842 /* The configured value may not be the actual amount of fifo RAM */
1843 return (min_t(unsigned int, pdata->rx_max_fifo_size,
1844 pdata->hw_feat.rx_fifo_size));
1845 }
1846
1847 static void
xgbe_calculate_equal_fifo(unsigned int fifo_size,unsigned int queue_count,unsigned int * fifo)1848 xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count,
1849 unsigned int *fifo)
1850 {
1851 unsigned int q_fifo_size;
1852 unsigned int p_fifo;
1853 unsigned int i;
1854
1855 q_fifo_size = fifo_size / queue_count;
1856
1857 /* Calculate the fifo setting by dividing the queue's fifo size
1858 * by the fifo allocation increment (with 0 representing the
1859 * base allocation increment so decrement the result by 1).
1860 */
1861 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
1862 if (p_fifo)
1863 p_fifo--;
1864
1865 /* Distribute the fifo equally amongst the queues */
1866 for (i = 0; i < queue_count; i++)
1867 fifo[i] = p_fifo;
1868 }
1869
1870 static unsigned int
xgbe_set_nonprio_fifos(unsigned int fifo_size,unsigned int queue_count,unsigned int * fifo)1871 xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count,
1872 unsigned int *fifo)
1873 {
1874 unsigned int i;
1875
1876 MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC));
1877
1878 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
1879 return (fifo_size);
1880
1881 /* Rx queues 9 and up are for specialized packets,
1882 * such as PTP or DCB control packets, etc. and
1883 * don't require a large fifo
1884 */
1885 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
1886 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
1887 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
1888 }
1889
1890 return (fifo_size);
1891 }
1892
1893 static void
xgbe_config_tx_fifo_size(struct xgbe_prv_data * pdata)1894 xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1895 {
1896 unsigned int fifo_size;
1897 unsigned int fifo[XGBE_MAX_QUEUES];
1898 unsigned int i;
1899
1900 fifo_size = xgbe_get_tx_fifo_size(pdata);
1901 axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size);
1902
1903 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
1904
1905 for (i = 0; i < pdata->tx_q_count; i++) {
1906 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
1907 axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i,
1908 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR));
1909 }
1910
1911 axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n",
1912 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
1913 }
1914
1915 static void
xgbe_config_rx_fifo_size(struct xgbe_prv_data * pdata)1916 xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1917 {
1918 unsigned int fifo_size;
1919 unsigned int fifo[XGBE_MAX_QUEUES];
1920 unsigned int prio_queues;
1921 unsigned int i;
1922
1923 /* TODO - add pfc/ets related support */
1924
1925 /* Clear any DCB related fifo/queue information */
1926 fifo_size = xgbe_get_rx_fifo_size(pdata);
1927 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
1928 axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__,
1929 fifo_size, pdata->rx_q_count, prio_queues);
1930
1931 /* Assign a minimum fifo to the non-VLAN priority queues */
1932 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
1933
1934 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
1935
1936 for (i = 0; i < pdata->rx_q_count; i++) {
1937 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
1938 axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i,
1939 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR));
1940 }
1941
1942 xgbe_calculate_flow_control_threshold(pdata, fifo);
1943 xgbe_config_flow_control_threshold(pdata);
1944
1945 axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n",
1946 pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
1947 }
1948
1949 static void
xgbe_config_queue_mapping(struct xgbe_prv_data * pdata)1950 xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
1951 {
1952 unsigned int qptc, qptc_extra, queue;
1953 unsigned int prio_queues;
1954 unsigned int ppq, ppq_extra, prio;
1955 unsigned int mask;
1956 unsigned int i, j, reg, reg_val;
1957
1958 /* Map the MTL Tx Queues to Traffic Classes
1959 * Note: Tx Queues >= Traffic Classes
1960 */
1961 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
1962 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
1963
1964 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
1965 for (j = 0; j < qptc; j++) {
1966 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
1967 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1968 Q2TCMAP, i);
1969 pdata->q2tc_map[queue++] = i;
1970 }
1971
1972 if (i < qptc_extra) {
1973 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
1974 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1975 Q2TCMAP, i);
1976 pdata->q2tc_map[queue++] = i;
1977 }
1978 }
1979
1980 /* Map the 8 VLAN priority values to available MTL Rx queues */
1981 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
1982 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
1983 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
1984
1985 reg = MAC_RQC2R;
1986 reg_val = 0;
1987 for (i = 0, prio = 0; i < prio_queues;) {
1988 mask = 0;
1989 for (j = 0; j < ppq; j++) {
1990 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i);
1991 mask |= (1 << prio);
1992 pdata->prio2q_map[prio++] = i;
1993 }
1994
1995 if (i < ppq_extra) {
1996 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i);
1997 mask |= (1 << prio);
1998 pdata->prio2q_map[prio++] = i;
1999 }
2000
2001 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2002
2003 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2004 continue;
2005
2006 XGMAC_IOWRITE(pdata, reg, reg_val);
2007 reg += MAC_RQC2_INC;
2008 reg_val = 0;
2009 }
2010
2011 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2012 reg = MTL_RQDCM0R;
2013 reg_val = 0;
2014 for (i = 0; i < pdata->rx_q_count;) {
2015 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2016
2017 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2018 continue;
2019
2020 XGMAC_IOWRITE(pdata, reg, reg_val);
2021
2022 reg += MTL_RQDCM_INC;
2023 reg_val = 0;
2024 }
2025 }
2026
2027 static void
xgbe_config_mac_address(struct xgbe_prv_data * pdata)2028 xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2029 {
2030 xgbe_set_mac_address(pdata, if_getlladdr(pdata->netdev));
2031
2032 /*
2033 * Promisc mode does not work as intended. Multicast traffic
2034 * is triggering the filter, so enable Receive All.
2035 */
2036 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, RA, 1);
2037
2038 /* Filtering is done using perfect filtering and hash filtering */
2039 if (pdata->hw_feat.hash_table_size) {
2040 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2041 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2042 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2043 }
2044 }
2045
2046 static void
xgbe_config_jumbo_enable(struct xgbe_prv_data * pdata)2047 xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2048 {
2049 unsigned int val;
2050
2051 val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2052
2053 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2054 }
2055
2056 static void
xgbe_config_mac_speed(struct xgbe_prv_data * pdata)2057 xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2058 {
2059 xgbe_set_speed(pdata, pdata->phy_speed);
2060 }
2061
2062 static void
xgbe_config_checksum_offload(struct xgbe_prv_data * pdata)2063 xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2064 {
2065 if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM))
2066 xgbe_enable_rx_csum(pdata);
2067 else
2068 xgbe_disable_rx_csum(pdata);
2069 }
2070
2071 static void
xgbe_config_vlan_support(struct xgbe_prv_data * pdata)2072 xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2073 {
2074 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2075 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2076 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2077
2078 /* Set the current VLAN Hash Table register value */
2079 xgbe_update_vlan_hash_table(pdata);
2080
2081 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) {
2082 axgbe_printf(1, "Enabling rx vlan filtering\n");
2083 xgbe_enable_rx_vlan_filtering(pdata);
2084 } else {
2085 axgbe_printf(1, "Disabling rx vlan filtering\n");
2086 xgbe_disable_rx_vlan_filtering(pdata);
2087 }
2088
2089 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) {
2090 axgbe_printf(1, "Enabling rx vlan stripping\n");
2091 xgbe_enable_rx_vlan_stripping(pdata);
2092 } else {
2093 axgbe_printf(1, "Disabling rx vlan stripping\n");
2094 xgbe_disable_rx_vlan_stripping(pdata);
2095 }
2096 }
2097
2098 static uint64_t
xgbe_mmc_read(struct xgbe_prv_data * pdata,unsigned int reg_lo)2099 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2100 {
2101 bool read_hi;
2102 uint64_t val;
2103
2104 if (pdata->vdata->mmc_64bit) {
2105 switch (reg_lo) {
2106 /* These registers are always 32 bit */
2107 case MMC_RXRUNTERROR:
2108 case MMC_RXJABBERERROR:
2109 case MMC_RXUNDERSIZE_G:
2110 case MMC_RXOVERSIZE_G:
2111 case MMC_RXWATCHDOGERROR:
2112 read_hi = false;
2113 break;
2114
2115 default:
2116 read_hi = true;
2117 }
2118 } else {
2119 switch (reg_lo) {
2120 /* These registers are always 64 bit */
2121 case MMC_TXOCTETCOUNT_GB_LO:
2122 case MMC_TXOCTETCOUNT_G_LO:
2123 case MMC_RXOCTETCOUNT_GB_LO:
2124 case MMC_RXOCTETCOUNT_G_LO:
2125 read_hi = true;
2126 break;
2127
2128 default:
2129 read_hi = false;
2130 }
2131 }
2132
2133 val = XGMAC_IOREAD(pdata, reg_lo);
2134
2135 if (read_hi)
2136 val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2137
2138 return (val);
2139 }
2140
2141 static void
xgbe_tx_mmc_int(struct xgbe_prv_data * pdata)2142 xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2143 {
2144 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2145 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2146
2147 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2148 stats->txoctetcount_gb +=
2149 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2150
2151 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2152 stats->txframecount_gb +=
2153 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2154
2155 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2156 stats->txbroadcastframes_g +=
2157 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2158
2159 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2160 stats->txmulticastframes_g +=
2161 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2162
2163 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2164 stats->tx64octets_gb +=
2165 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2166
2167 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2168 stats->tx65to127octets_gb +=
2169 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2170
2171 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2172 stats->tx128to255octets_gb +=
2173 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2174
2175 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2176 stats->tx256to511octets_gb +=
2177 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2178
2179 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2180 stats->tx512to1023octets_gb +=
2181 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2182
2183 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2184 stats->tx1024tomaxoctets_gb +=
2185 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2186
2187 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2188 stats->txunicastframes_gb +=
2189 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2190
2191 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2192 stats->txmulticastframes_gb +=
2193 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2194
2195 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2196 stats->txbroadcastframes_g +=
2197 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2198
2199 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2200 stats->txunderflowerror +=
2201 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2202
2203 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2204 stats->txoctetcount_g +=
2205 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2206
2207 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2208 stats->txframecount_g +=
2209 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2210
2211 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2212 stats->txpauseframes +=
2213 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2214
2215 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2216 stats->txvlanframes_g +=
2217 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2218 }
2219
2220 static void
xgbe_rx_mmc_int(struct xgbe_prv_data * pdata)2221 xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2222 {
2223 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2224 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2225
2226 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2227 stats->rxframecount_gb +=
2228 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2229
2230 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2231 stats->rxoctetcount_gb +=
2232 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2233
2234 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2235 stats->rxoctetcount_g +=
2236 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2237
2238 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2239 stats->rxbroadcastframes_g +=
2240 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2241
2242 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2243 stats->rxmulticastframes_g +=
2244 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2245
2246 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2247 stats->rxcrcerror +=
2248 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2249
2250 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2251 stats->rxrunterror +=
2252 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2253
2254 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2255 stats->rxjabbererror +=
2256 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2257
2258 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2259 stats->rxundersize_g +=
2260 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2261
2262 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2263 stats->rxoversize_g +=
2264 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2265
2266 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2267 stats->rx64octets_gb +=
2268 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2269
2270 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2271 stats->rx65to127octets_gb +=
2272 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2273
2274 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2275 stats->rx128to255octets_gb +=
2276 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2277
2278 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2279 stats->rx256to511octets_gb +=
2280 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2281
2282 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2283 stats->rx512to1023octets_gb +=
2284 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2285
2286 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2287 stats->rx1024tomaxoctets_gb +=
2288 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2289
2290 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2291 stats->rxunicastframes_g +=
2292 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2293
2294 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2295 stats->rxlengtherror +=
2296 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2297
2298 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2299 stats->rxoutofrangetype +=
2300 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2301
2302 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2303 stats->rxpauseframes +=
2304 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2305
2306 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2307 stats->rxfifooverflow +=
2308 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2309
2310 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2311 stats->rxvlanframes_gb +=
2312 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2313
2314 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2315 stats->rxwatchdogerror +=
2316 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2317 }
2318
2319 static void
xgbe_read_mmc_stats(struct xgbe_prv_data * pdata)2320 xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2321 {
2322 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2323
2324 /* Freeze counters */
2325 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2326
2327 stats->txoctetcount_gb +=
2328 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2329
2330 stats->txframecount_gb +=
2331 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2332
2333 stats->txbroadcastframes_g +=
2334 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2335
2336 stats->txmulticastframes_g +=
2337 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2338
2339 stats->tx64octets_gb +=
2340 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2341
2342 stats->tx65to127octets_gb +=
2343 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2344
2345 stats->tx128to255octets_gb +=
2346 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2347
2348 stats->tx256to511octets_gb +=
2349 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2350
2351 stats->tx512to1023octets_gb +=
2352 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2353
2354 stats->tx1024tomaxoctets_gb +=
2355 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2356
2357 stats->txunicastframes_gb +=
2358 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2359
2360 stats->txmulticastframes_gb +=
2361 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2362
2363 stats->txbroadcastframes_gb +=
2364 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2365
2366 stats->txunderflowerror +=
2367 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2368
2369 stats->txoctetcount_g +=
2370 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2371
2372 stats->txframecount_g +=
2373 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2374
2375 stats->txpauseframes +=
2376 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2377
2378 stats->txvlanframes_g +=
2379 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2380
2381 stats->rxframecount_gb +=
2382 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2383
2384 stats->rxoctetcount_gb +=
2385 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2386
2387 stats->rxoctetcount_g +=
2388 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2389
2390 stats->rxbroadcastframes_g +=
2391 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2392
2393 stats->rxmulticastframes_g +=
2394 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2395
2396 stats->rxcrcerror +=
2397 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2398
2399 stats->rxrunterror +=
2400 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2401
2402 stats->rxjabbererror +=
2403 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2404
2405 stats->rxundersize_g +=
2406 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2407
2408 stats->rxoversize_g +=
2409 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2410
2411 stats->rx64octets_gb +=
2412 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2413
2414 stats->rx65to127octets_gb +=
2415 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2416
2417 stats->rx128to255octets_gb +=
2418 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2419
2420 stats->rx256to511octets_gb +=
2421 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2422
2423 stats->rx512to1023octets_gb +=
2424 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2425
2426 stats->rx1024tomaxoctets_gb +=
2427 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2428
2429 stats->rxunicastframes_g +=
2430 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2431
2432 stats->rxlengtherror +=
2433 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2434
2435 stats->rxoutofrangetype +=
2436 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2437
2438 stats->rxpauseframes +=
2439 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2440
2441 stats->rxfifooverflow +=
2442 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2443
2444 stats->rxvlanframes_gb +=
2445 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2446
2447 stats->rxwatchdogerror +=
2448 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2449
2450 /* Un-freeze counters */
2451 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
2452 }
2453
2454 static void
xgbe_config_mmc(struct xgbe_prv_data * pdata)2455 xgbe_config_mmc(struct xgbe_prv_data *pdata)
2456 {
2457 /* Set counters to reset on read */
2458 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2459
2460 /* Reset the counters */
2461 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2462 }
2463
2464 static void
xgbe_txq_prepare_tx_stop(struct xgbe_prv_data * pdata,unsigned int queue)2465 xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
2466 {
2467 unsigned int tx_status;
2468 unsigned long tx_timeout;
2469
2470 /* The Tx engine cannot be stopped if it is actively processing
2471 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
2472 * wait forever though...
2473 */
2474 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
2475 while (ticks < tx_timeout) {
2476 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
2477 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
2478 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
2479 break;
2480
2481 DELAY(500);
2482 }
2483
2484 if (ticks >= tx_timeout)
2485 axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n",
2486 queue);
2487 }
2488
2489 static void
xgbe_prepare_tx_stop(struct xgbe_prv_data * pdata,unsigned int queue)2490 xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
2491 {
2492 unsigned int tx_dsr, tx_pos, tx_qidx;
2493 unsigned int tx_status;
2494 unsigned long tx_timeout;
2495
2496 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
2497 return (xgbe_txq_prepare_tx_stop(pdata, queue));
2498
2499 /* Calculate the status register to read and the position within */
2500 if (queue < DMA_DSRX_FIRST_QUEUE) {
2501 tx_dsr = DMA_DSR0;
2502 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
2503 } else {
2504 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
2505
2506 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
2507 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
2508 DMA_DSRX_TPS_START;
2509 }
2510
2511 /* The Tx engine cannot be stopped if it is actively processing
2512 * descriptors. Wait for the Tx engine to enter the stopped or
2513 * suspended state. Don't wait forever though...
2514 */
2515 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
2516 while (ticks < tx_timeout) {
2517 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
2518 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
2519 if ((tx_status == DMA_TPS_STOPPED) ||
2520 (tx_status == DMA_TPS_SUSPENDED))
2521 break;
2522
2523 DELAY(500);
2524 }
2525
2526 if (ticks >= tx_timeout)
2527 axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n",
2528 queue);
2529 }
2530
2531 static void
xgbe_enable_tx(struct xgbe_prv_data * pdata)2532 xgbe_enable_tx(struct xgbe_prv_data *pdata)
2533 {
2534 unsigned int i;
2535
2536 /* Enable each Tx DMA channel */
2537 for (i = 0; i < pdata->channel_count; i++) {
2538 if (!pdata->channel[i]->tx_ring)
2539 break;
2540
2541 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
2542 }
2543
2544 /* Enable each Tx queue */
2545 for (i = 0; i < pdata->tx_q_count; i++)
2546 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2547 MTL_Q_ENABLED);
2548
2549 /* Enable MAC Tx */
2550 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2551 }
2552
2553 static void
xgbe_disable_tx(struct xgbe_prv_data * pdata)2554 xgbe_disable_tx(struct xgbe_prv_data *pdata)
2555 {
2556 unsigned int i;
2557
2558 /* Prepare for Tx DMA channel stop */
2559 for (i = 0; i < pdata->tx_q_count; i++)
2560 xgbe_prepare_tx_stop(pdata, i);
2561
2562 /* Disable MAC Tx */
2563 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2564
2565 /* Disable each Tx queue */
2566 for (i = 0; i < pdata->tx_q_count; i++)
2567 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2568
2569 /* Disable each Tx DMA channel */
2570 for (i = 0; i < pdata->channel_count; i++) {
2571 if (!pdata->channel[i]->tx_ring)
2572 break;
2573
2574 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
2575 }
2576 }
2577
2578 static void
xgbe_prepare_rx_stop(struct xgbe_prv_data * pdata,unsigned int queue)2579 xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
2580 {
2581 unsigned int rx_status;
2582 unsigned long rx_timeout;
2583
2584 /* The Rx engine cannot be stopped if it is actively processing
2585 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
2586 * wait forever though...
2587 */
2588 rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
2589 while (ticks < rx_timeout) {
2590 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
2591 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
2592 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
2593 break;
2594
2595 DELAY(500);
2596 }
2597
2598 if (ticks >= rx_timeout)
2599 axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n",
2600 queue);
2601 }
2602
2603 static void
xgbe_enable_rx(struct xgbe_prv_data * pdata)2604 xgbe_enable_rx(struct xgbe_prv_data *pdata)
2605 {
2606 unsigned int reg_val, i;
2607
2608 /* Enable each Rx DMA channel */
2609 for (i = 0; i < pdata->channel_count; i++) {
2610 if (!pdata->channel[i]->rx_ring)
2611 break;
2612
2613 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
2614 }
2615
2616 /* Enable each Rx queue */
2617 reg_val = 0;
2618 for (i = 0; i < pdata->rx_q_count; i++)
2619 reg_val |= (0x02 << (i << 1));
2620 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2621
2622 /* Enable MAC Rx */
2623 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2624 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2625 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2626 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2627 }
2628
2629 static void
xgbe_disable_rx(struct xgbe_prv_data * pdata)2630 xgbe_disable_rx(struct xgbe_prv_data *pdata)
2631 {
2632 unsigned int i;
2633
2634 /* Disable MAC Rx */
2635 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2636 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2637 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2638 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2639
2640 /* Prepare for Rx DMA channel stop */
2641 for (i = 0; i < pdata->rx_q_count; i++)
2642 xgbe_prepare_rx_stop(pdata, i);
2643
2644 /* Disable each Rx queue */
2645 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2646
2647 /* Disable each Rx DMA channel */
2648 for (i = 0; i < pdata->channel_count; i++) {
2649 if (!pdata->channel[i]->rx_ring)
2650 break;
2651
2652 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
2653 }
2654 }
2655
2656 static void
xgbe_powerup_tx(struct xgbe_prv_data * pdata)2657 xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2658 {
2659 unsigned int i;
2660
2661 /* Enable each Tx DMA channel */
2662 for (i = 0; i < pdata->channel_count; i++) {
2663 if (!pdata->channel[i]->tx_ring)
2664 break;
2665
2666 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
2667 }
2668
2669 /* Enable MAC Tx */
2670 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2671 }
2672
2673 static void
xgbe_powerdown_tx(struct xgbe_prv_data * pdata)2674 xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2675 {
2676 unsigned int i;
2677
2678 /* Prepare for Tx DMA channel stop */
2679 for (i = 0; i < pdata->tx_q_count; i++)
2680 xgbe_prepare_tx_stop(pdata, i);
2681
2682 /* Disable MAC Tx */
2683 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2684
2685 /* Disable each Tx DMA channel */
2686 for (i = 0; i < pdata->channel_count; i++) {
2687 if (!pdata->channel[i]->tx_ring)
2688 break;
2689
2690 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
2691 }
2692 }
2693
2694 static void
xgbe_powerup_rx(struct xgbe_prv_data * pdata)2695 xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2696 {
2697 unsigned int i;
2698
2699 /* Enable each Rx DMA channel */
2700 for (i = 0; i < pdata->channel_count; i++) {
2701 if (!pdata->channel[i]->rx_ring)
2702 break;
2703
2704 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
2705 }
2706 }
2707
2708 static void
xgbe_powerdown_rx(struct xgbe_prv_data * pdata)2709 xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2710 {
2711 unsigned int i;
2712
2713 /* Disable each Rx DMA channel */
2714 for (i = 0; i < pdata->channel_count; i++) {
2715 if (!pdata->channel[i]->rx_ring)
2716 break;
2717
2718 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
2719 }
2720 }
2721
2722 static int
xgbe_init(struct xgbe_prv_data * pdata)2723 xgbe_init(struct xgbe_prv_data *pdata)
2724 {
2725 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2726 int ret;
2727
2728 /* Flush Tx queues */
2729 ret = xgbe_flush_tx_queues(pdata);
2730 if (ret) {
2731 axgbe_error("error flushing TX queues\n");
2732 return (ret);
2733 }
2734
2735 /*
2736 * Initialize DMA related features
2737 */
2738 xgbe_config_dma_bus(pdata);
2739 xgbe_config_dma_cache(pdata);
2740 xgbe_config_osp_mode(pdata);
2741 xgbe_config_pbl_val(pdata);
2742 xgbe_config_rx_coalesce(pdata);
2743 xgbe_config_tx_coalesce(pdata);
2744 xgbe_config_rx_buffer_size(pdata);
2745 xgbe_config_tso_mode(pdata);
2746 xgbe_config_sph_mode(pdata);
2747 xgbe_config_rss(pdata);
2748 desc_if->wrapper_tx_desc_init(pdata);
2749 desc_if->wrapper_rx_desc_init(pdata);
2750 xgbe_enable_dma_interrupts(pdata);
2751
2752 /*
2753 * Initialize MTL related features
2754 */
2755 xgbe_config_mtl_mode(pdata);
2756 xgbe_config_queue_mapping(pdata);
2757 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2758 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2759 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2760 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2761 xgbe_config_tx_fifo_size(pdata);
2762 xgbe_config_rx_fifo_size(pdata);
2763 /*TODO: Error Packet and undersized good Packet forwarding enable
2764 (FEP and FUP)
2765 */
2766 xgbe_enable_mtl_interrupts(pdata);
2767
2768 /*
2769 * Initialize MAC related features
2770 */
2771 xgbe_config_mac_address(pdata);
2772 xgbe_config_rx_mode(pdata);
2773 xgbe_config_jumbo_enable(pdata);
2774 xgbe_config_flow_control(pdata);
2775 xgbe_config_mac_speed(pdata);
2776 xgbe_config_checksum_offload(pdata);
2777 xgbe_config_vlan_support(pdata);
2778 xgbe_config_mmc(pdata);
2779 xgbe_enable_mac_interrupts(pdata);
2780
2781 return (0);
2782 }
2783
2784 void
xgbe_init_function_ptrs_dev(struct xgbe_hw_if * hw_if)2785 xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2786 {
2787
2788 hw_if->tx_complete = xgbe_tx_complete;
2789
2790 hw_if->set_mac_address = xgbe_set_mac_address;
2791 hw_if->config_rx_mode = xgbe_config_rx_mode;
2792
2793 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2794 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2795
2796 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2797 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2798 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2799 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2800 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2801
2802 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2803 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2804
2805 hw_if->set_speed = xgbe_set_speed;
2806
2807 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
2808 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
2809 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
2810
2811 hw_if->set_gpio = xgbe_set_gpio;
2812 hw_if->clr_gpio = xgbe_clr_gpio;
2813
2814 hw_if->enable_tx = xgbe_enable_tx;
2815 hw_if->disable_tx = xgbe_disable_tx;
2816 hw_if->enable_rx = xgbe_enable_rx;
2817 hw_if->disable_rx = xgbe_disable_rx;
2818
2819 hw_if->powerup_tx = xgbe_powerup_tx;
2820 hw_if->powerdown_tx = xgbe_powerdown_tx;
2821 hw_if->powerup_rx = xgbe_powerup_rx;
2822 hw_if->powerdown_rx = xgbe_powerdown_rx;
2823
2824 hw_if->dev_read = xgbe_dev_read;
2825 hw_if->enable_int = xgbe_enable_int;
2826 hw_if->disable_int = xgbe_disable_int;
2827 hw_if->init = xgbe_init;
2828 hw_if->exit = xgbe_exit;
2829
2830 /* Descriptor related Sequences have to be initialized here */
2831 hw_if->tx_desc_init = xgbe_tx_desc_init;
2832 hw_if->rx_desc_init = xgbe_rx_desc_init;
2833 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2834 hw_if->is_last_desc = xgbe_is_last_desc;
2835 hw_if->is_context_desc = xgbe_is_context_desc;
2836
2837 /* For FLOW ctrl */
2838 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2839 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2840
2841 /* For RX coalescing */
2842 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2843 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2844 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2845 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2846
2847 /* For RX and TX threshold config */
2848 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2849 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2850
2851 /* For RX and TX Store and Forward Mode config */
2852 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2853 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2854
2855 /* For TX DMA Operating on Second Frame config */
2856 hw_if->config_osp_mode = xgbe_config_osp_mode;
2857
2858 /* For MMC statistics support */
2859 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2860 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2861 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2862
2863 /* For Receive Side Scaling */
2864 hw_if->enable_rss = xgbe_enable_rss;
2865 hw_if->disable_rss = xgbe_disable_rss;
2866 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2867 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
2868 }
2869