1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2020, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34
35
36 #include "ixgbe_type.h"
37 #include "ixgbe_dcb.h"
38 #include "ixgbe_dcb_82599.h"
39
40 /**
41 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
42 * @hw: pointer to hardware structure
43 * @stats: pointer to statistics structure
44 * @tc_count: Number of elements in bwg_array.
45 *
46 * This function returns the status data for each of the Traffic Classes in use.
47 */
ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_hw_stats * stats,u8 tc_count)48 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
49 struct ixgbe_hw_stats *stats,
50 u8 tc_count)
51 {
52 int tc;
53
54 DEBUGFUNC("dcb_get_tc_stats");
55
56 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
57 return IXGBE_ERR_PARAM;
58
59 /* Statistics pertaining to each traffic class */
60 for (tc = 0; tc < tc_count; tc++) {
61 /* Transmitted Packets */
62 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
63 /* Transmitted Bytes (read low first to prevent missed carry) */
64 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
65 stats->qbtc[tc] +=
66 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
67 /* Received Packets */
68 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
69 /* Received Bytes (read low first to prevent missed carry) */
70 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
71 stats->qbrc[tc] +=
72 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
73
74 /* Received Dropped Packet */
75 stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
76 }
77
78 return IXGBE_SUCCESS;
79 }
80
81 /**
82 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
83 * @hw: pointer to hardware structure
84 * @stats: pointer to statistics structure
85 * @tc_count: Number of elements in bwg_array.
86 *
87 * This function returns the CBFC status data for each of the Traffic Classes.
88 */
ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_hw_stats * stats,u8 tc_count)89 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
90 struct ixgbe_hw_stats *stats,
91 u8 tc_count)
92 {
93 int tc;
94
95 DEBUGFUNC("dcb_get_pfc_stats");
96
97 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
98 return IXGBE_ERR_PARAM;
99
100 for (tc = 0; tc < tc_count; tc++) {
101 /* Priority XOFF Transmitted */
102 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
103 /* Priority XOFF Received */
104 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
105 }
106
107 return IXGBE_SUCCESS;
108 }
109
110 /**
111 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
112 * @hw: pointer to hardware structure
113 * @refill: refill credits index by traffic class
114 * @max: max credits index by traffic class
115 * @bwg_id: bandwidth grouping indexed by traffic class
116 * @tsa: transmission selection algorithm indexed by traffic class
117 * @map: priority to tc assignments indexed by priority
118 *
119 * Configure Rx Packet Arbiter and credits for each traffic class.
120 */
ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)121 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
122 u16 *max, u8 *bwg_id, u8 *tsa,
123 u8 *map)
124 {
125 u32 reg = 0;
126 u32 credit_refill = 0;
127 u32 credit_max = 0;
128 u8 i = 0;
129
130 /*
131 * Disable the arbiter before changing parameters
132 * (always enable recycle mode; WSP)
133 */
134 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
135 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
136
137 /*
138 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
139 * bits sets for the UPs that needs to be mappped to that TC.
140 * e.g if priorities 6 and 7 are to be mapped to a TC then the
141 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
142 */
143 reg = 0;
144 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
145 reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
146
147 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
148
149 /* Configure traffic class credits and priority */
150 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
151 credit_refill = refill[i];
152 credit_max = max[i];
153 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
154
155 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
156
157 if (tsa[i] == ixgbe_dcb_tsa_strict)
158 reg |= IXGBE_RTRPT4C_LSP;
159
160 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
161 }
162
163 /*
164 * Configure Rx packet plane (recycle mode; WSP) and
165 * enable arbiter
166 */
167 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
168 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
169
170 return IXGBE_SUCCESS;
171 }
172
173 /**
174 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
175 * @hw: pointer to hardware structure
176 * @refill: refill credits index by traffic class
177 * @max: max credits index by traffic class
178 * @bwg_id: bandwidth grouping indexed by traffic class
179 * @tsa: transmission selection algorithm indexed by traffic class
180 *
181 * Configure Tx Descriptor Arbiter and credits for each traffic class.
182 */
ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa)183 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
184 u16 *max, u8 *bwg_id, u8 *tsa)
185 {
186 u32 reg, max_credits;
187 u8 i;
188
189 /* Clear the per-Tx queue credits; we use per-TC instead */
190 for (i = 0; i < 128; i++) {
191 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
192 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
193 }
194
195 /* Configure traffic class credits and priority */
196 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
197 max_credits = max[i];
198 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
199 reg |= (u32)(refill[i]);
200 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
201
202 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
203 reg |= IXGBE_RTTDT2C_GSP;
204
205 if (tsa[i] == ixgbe_dcb_tsa_strict)
206 reg |= IXGBE_RTTDT2C_LSP;
207
208 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
209 }
210
211 /*
212 * Configure Tx descriptor plane (recycle mode; WSP) and
213 * enable arbiter
214 */
215 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
216 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
217
218 return IXGBE_SUCCESS;
219 }
220
221 /**
222 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
223 * @hw: pointer to hardware structure
224 * @refill: refill credits index by traffic class
225 * @max: max credits index by traffic class
226 * @bwg_id: bandwidth grouping indexed by traffic class
227 * @tsa: transmission selection algorithm indexed by traffic class
228 * @map: priority to tc assignments indexed by priority
229 *
230 * Configure Tx Packet Arbiter and credits for each traffic class.
231 */
ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)232 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
233 u16 *max, u8 *bwg_id, u8 *tsa,
234 u8 *map)
235 {
236 u32 reg;
237 u8 i;
238
239 /*
240 * Disable the arbiter before changing parameters
241 * (always enable recycle mode; SP; arb delay)
242 */
243 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
244 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
245 IXGBE_RTTPCS_ARBDIS;
246 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
247
248 /*
249 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
250 * bits sets for the UPs that needs to be mappped to that TC.
251 * e.g if priorities 6 and 7 are to be mapped to a TC then the
252 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
253 */
254 reg = 0;
255 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
256 reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
257
258 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
259
260 /* Configure traffic class credits and priority */
261 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
262 reg = refill[i];
263 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
264 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
265
266 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
267 reg |= IXGBE_RTTPT2C_GSP;
268
269 if (tsa[i] == ixgbe_dcb_tsa_strict)
270 reg |= IXGBE_RTTPT2C_LSP;
271
272 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
273 }
274
275 /*
276 * Configure Tx packet plane (recycle mode; SP; arb delay) and
277 * enable arbiter
278 */
279 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
280 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
281 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
282
283 return IXGBE_SUCCESS;
284 }
285
286 /**
287 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
288 * @hw: pointer to hardware structure
289 * @pfc_en: enabled pfc bitmask
290 * @map: priority to tc assignments indexed by priority
291 *
292 * Configure Priority Flow Control (PFC) for each traffic class.
293 */
ixgbe_dcb_config_pfc_82599(struct ixgbe_hw * hw,u8 pfc_en,u8 * map)294 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
295 {
296 u32 i, j, fcrtl, reg;
297 u8 max_tc = 0;
298
299 /* Enable Transmit Priority Flow Control */
300 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
301
302 /* Enable Receive Priority Flow Control */
303 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
304 reg |= IXGBE_MFLCN_DPF;
305
306 /*
307 * X540 supports per TC Rx priority flow control. So
308 * clear all TCs and only enable those that should be
309 * enabled.
310 */
311 reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
312
313 if (hw->mac.type >= ixgbe_mac_X540)
314 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
315
316 if (pfc_en)
317 reg |= IXGBE_MFLCN_RPFCE;
318
319 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
320
321 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
322 if (map[i] > max_tc)
323 max_tc = map[i];
324 }
325
326
327 /* Configure PFC Tx thresholds per TC */
328 for (i = 0; i <= max_tc; i++) {
329 int enabled = 0;
330
331 for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
332 if ((map[j] == i) && (pfc_en & (1 << j))) {
333 enabled = 1;
334 break;
335 }
336 }
337
338 if (enabled) {
339 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
340 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
341 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
342 } else {
343 /*
344 * In order to prevent Tx hangs when the internal Tx
345 * switch is enabled we must set the high water mark
346 * to the Rx packet buffer size - 24KB. This allows
347 * the Tx switch to function even under heavy Rx
348 * workloads.
349 */
350 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
351 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
352 }
353
354 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
355 }
356
357 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
358 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
359 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
360 }
361
362 /* Configure pause time (2 TCs per register) */
363 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
364 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
365 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
366
367 /* Configure flow control refresh threshold value */
368 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
369
370 return IXGBE_SUCCESS;
371 }
372
373 /**
374 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
375 * @hw: pointer to hardware structure
376 * @dcb_config: pointer to ixgbe_dcb_config structure
377 *
378 * Configure queue statistics registers, all queues belonging to same traffic
379 * class uses a single set of queue statistics counters.
380 */
ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)381 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
382 struct ixgbe_dcb_config *dcb_config)
383 {
384 u32 reg = 0;
385 u8 i = 0;
386 u8 tc_count = 8;
387 bool vt_mode = false;
388
389 if (dcb_config != NULL) {
390 tc_count = dcb_config->num_tcs.pg_tcs;
391 vt_mode = dcb_config->vt_mode;
392 }
393
394 if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
395 return IXGBE_ERR_PARAM;
396
397 if (tc_count == 8 && vt_mode == false) {
398 /*
399 * Receive Queues stats setting
400 * 32 RQSMR registers, each configuring 4 queues.
401 *
402 * Set all 16 queues of each TC to the same stat
403 * with TC 'n' going to stat 'n'.
404 */
405 for (i = 0; i < 32; i++) {
406 reg = 0x01010101 * (i / 4);
407 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
408 }
409 /*
410 * Transmit Queues stats setting
411 * 32 TQSM registers, each controlling 4 queues.
412 *
413 * Set all queues of each TC to the same stat
414 * with TC 'n' going to stat 'n'.
415 * Tx queues are allocated non-uniformly to TCs:
416 * 32, 32, 16, 16, 8, 8, 8, 8.
417 */
418 for (i = 0; i < 32; i++) {
419 if (i < 8)
420 reg = 0x00000000;
421 else if (i < 16)
422 reg = 0x01010101;
423 else if (i < 20)
424 reg = 0x02020202;
425 else if (i < 24)
426 reg = 0x03030303;
427 else if (i < 26)
428 reg = 0x04040404;
429 else if (i < 28)
430 reg = 0x05050505;
431 else if (i < 30)
432 reg = 0x06060606;
433 else
434 reg = 0x07070707;
435 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
436 }
437 } else if (tc_count == 4 && vt_mode == false) {
438 /*
439 * Receive Queues stats setting
440 * 32 RQSMR registers, each configuring 4 queues.
441 *
442 * Set all 16 queues of each TC to the same stat
443 * with TC 'n' going to stat 'n'.
444 */
445 for (i = 0; i < 32; i++) {
446 if (i % 8 > 3)
447 /* In 4 TC mode, odd 16-queue ranges are
448 * not used.
449 */
450 continue;
451 reg = 0x01010101 * (i / 8);
452 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
453 }
454 /*
455 * Transmit Queues stats setting
456 * 32 TQSM registers, each controlling 4 queues.
457 *
458 * Set all queues of each TC to the same stat
459 * with TC 'n' going to stat 'n'.
460 * Tx queues are allocated non-uniformly to TCs:
461 * 64, 32, 16, 16.
462 */
463 for (i = 0; i < 32; i++) {
464 if (i < 16)
465 reg = 0x00000000;
466 else if (i < 24)
467 reg = 0x01010101;
468 else if (i < 28)
469 reg = 0x02020202;
470 else
471 reg = 0x03030303;
472 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
473 }
474 } else if (tc_count == 4 && vt_mode == true) {
475 /*
476 * Receive Queues stats setting
477 * 32 RQSMR registers, each configuring 4 queues.
478 *
479 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
480 * pool. Set all 32 queues of each TC across pools to the same
481 * stat with TC 'n' going to stat 'n'.
482 */
483 for (i = 0; i < 32; i++)
484 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
485 /*
486 * Transmit Queues stats setting
487 * 32 TQSM registers, each controlling 4 queues.
488 *
489 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
490 * pool. Set all 32 queues of each TC across pools to the same
491 * stat with TC 'n' going to stat 'n'.
492 */
493 for (i = 0; i < 32; i++)
494 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
495 }
496
497 return IXGBE_SUCCESS;
498 }
499
500 /**
501 * ixgbe_dcb_config_82599 - Configure general DCB parameters
502 * @hw: pointer to hardware structure
503 * @dcb_config: pointer to ixgbe_dcb_config structure
504 *
505 * Configure general DCB parameters.
506 */
ixgbe_dcb_config_82599(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)507 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
508 struct ixgbe_dcb_config *dcb_config)
509 {
510 u32 reg;
511 u32 q;
512
513 /* Disable the Tx desc arbiter so that MTQC can be changed */
514 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
515 reg |= IXGBE_RTTDCS_ARBDIS;
516 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
517
518 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
519 if (dcb_config->num_tcs.pg_tcs == 8) {
520 /* Enable DCB for Rx with 8 TCs */
521 switch (reg & IXGBE_MRQC_MRQE_MASK) {
522 case 0:
523 case IXGBE_MRQC_RT4TCEN:
524 /* RSS disabled cases */
525 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
526 IXGBE_MRQC_RT8TCEN;
527 break;
528 case IXGBE_MRQC_RSSEN:
529 case IXGBE_MRQC_RTRSS4TCEN:
530 /* RSS enabled cases */
531 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
532 IXGBE_MRQC_RTRSS8TCEN;
533 break;
534 default:
535 /*
536 * Unsupported value, assume stale data,
537 * overwrite no RSS
538 */
539 ASSERT(0);
540 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
541 IXGBE_MRQC_RT8TCEN;
542 }
543 }
544 if (dcb_config->num_tcs.pg_tcs == 4) {
545 /* We support both VT-on and VT-off with 4 TCs. */
546 if (dcb_config->vt_mode)
547 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
548 IXGBE_MRQC_VMDQRT4TCEN;
549 else
550 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
551 IXGBE_MRQC_RTRSS4TCEN;
552 }
553 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
554
555 /* Enable DCB for Tx with 8 TCs */
556 if (dcb_config->num_tcs.pg_tcs == 8)
557 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
558 else {
559 /* We support both VT-on and VT-off with 4 TCs. */
560 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
561 if (dcb_config->vt_mode)
562 reg |= IXGBE_MTQC_VT_ENA;
563 }
564 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
565
566 /* Disable drop for all queues */
567 for (q = 0; q < 128; q++)
568 IXGBE_WRITE_REG(hw, IXGBE_QDE,
569 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
570
571 /* Enable the Tx desc arbiter */
572 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
573 reg &= ~IXGBE_RTTDCS_ARBDIS;
574 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
575
576 /* Enable Security TX Buffer IFG for DCB */
577 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
578 reg |= IXGBE_SECTX_DCB;
579 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
580
581 return IXGBE_SUCCESS;
582 }
583
584 /**
585 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
586 * @hw: pointer to hardware structure
587 * @link_speed: unused
588 * @refill: refill credits index by traffic class
589 * @max: max credits index by traffic class
590 * @bwg_id: bandwidth grouping indexed by traffic class
591 * @tsa: transmission selection algorithm indexed by traffic class
592 * @map: priority to tc assignments indexed by priority
593 *
594 * Configure dcb settings and enable dcb mode.
595 */
ixgbe_dcb_hw_config_82599(struct ixgbe_hw * hw,int link_speed,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)596 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
597 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
598 u8 *map)
599 {
600 UNREFERENCED_1PARAMETER(link_speed);
601
602 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
603 map);
604 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
605 tsa);
606 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
607 tsa, map);
608
609 return IXGBE_SUCCESS;
610 }
611
612