1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2017, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34 /*$FreeBSD$*/
35
36
37 #include "ixgbe_type.h"
38 #include "ixgbe_dcb.h"
39 #include "ixgbe_dcb_82599.h"
40
41 /**
42 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
43 * @hw: pointer to hardware structure
44 * @stats: pointer to statistics structure
45 * @tc_count: Number of elements in bwg_array.
46 *
47 * This function returns the status data for each of the Traffic Classes in use.
48 */
ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_hw_stats * stats,u8 tc_count)49 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
50 struct ixgbe_hw_stats *stats,
51 u8 tc_count)
52 {
53 int tc;
54
55 DEBUGFUNC("dcb_get_tc_stats");
56
57 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
58 return IXGBE_ERR_PARAM;
59
60 /* Statistics pertaining to each traffic class */
61 for (tc = 0; tc < tc_count; tc++) {
62 /* Transmitted Packets */
63 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
64 /* Transmitted Bytes (read low first to prevent missed carry) */
65 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
66 stats->qbtc[tc] +=
67 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
68 /* Received Packets */
69 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
70 /* Received Bytes (read low first to prevent missed carry) */
71 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
72 stats->qbrc[tc] +=
73 (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
74
75 /* Received Dropped Packet */
76 stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
77 }
78
79 return IXGBE_SUCCESS;
80 }
81
82 /**
83 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
84 * @hw: pointer to hardware structure
85 * @stats: pointer to statistics structure
86 * @tc_count: Number of elements in bwg_array.
87 *
88 * This function returns the CBFC status data for each of the Traffic Classes.
89 */
ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_hw_stats * stats,u8 tc_count)90 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
91 struct ixgbe_hw_stats *stats,
92 u8 tc_count)
93 {
94 int tc;
95
96 DEBUGFUNC("dcb_get_pfc_stats");
97
98 if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
99 return IXGBE_ERR_PARAM;
100
101 for (tc = 0; tc < tc_count; tc++) {
102 /* Priority XOFF Transmitted */
103 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
104 /* Priority XOFF Received */
105 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
106 }
107
108 return IXGBE_SUCCESS;
109 }
110
111 /**
112 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
113 * @hw: pointer to hardware structure
114 * @refill: refill credits index by traffic class
115 * @max: max credits index by traffic class
116 * @bwg_id: bandwidth grouping indexed by traffic class
117 * @tsa: transmission selection algorithm indexed by traffic class
118 * @map: priority to tc assignments indexed by priority
119 *
120 * Configure Rx Packet Arbiter and credits for each traffic class.
121 */
ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)122 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
123 u16 *max, u8 *bwg_id, u8 *tsa,
124 u8 *map)
125 {
126 u32 reg = 0;
127 u32 credit_refill = 0;
128 u32 credit_max = 0;
129 u8 i = 0;
130
131 /*
132 * Disable the arbiter before changing parameters
133 * (always enable recycle mode; WSP)
134 */
135 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
136 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
137
138 /*
139 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
140 * bits sets for the UPs that needs to be mappped to that TC.
141 * e.g if priorities 6 and 7 are to be mapped to a TC then the
142 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
143 */
144 reg = 0;
145 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
146 reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
147
148 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
149
150 /* Configure traffic class credits and priority */
151 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
152 credit_refill = refill[i];
153 credit_max = max[i];
154 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
155
156 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
157
158 if (tsa[i] == ixgbe_dcb_tsa_strict)
159 reg |= IXGBE_RTRPT4C_LSP;
160
161 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
162 }
163
164 /*
165 * Configure Rx packet plane (recycle mode; WSP) and
166 * enable arbiter
167 */
168 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
169 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
170
171 return IXGBE_SUCCESS;
172 }
173
174 /**
175 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
176 * @hw: pointer to hardware structure
177 * @refill: refill credits index by traffic class
178 * @max: max credits index by traffic class
179 * @bwg_id: bandwidth grouping indexed by traffic class
180 * @tsa: transmission selection algorithm indexed by traffic class
181 *
182 * Configure Tx Descriptor Arbiter and credits for each traffic class.
183 */
ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa)184 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
185 u16 *max, u8 *bwg_id, u8 *tsa)
186 {
187 u32 reg, max_credits;
188 u8 i;
189
190 /* Clear the per-Tx queue credits; we use per-TC instead */
191 for (i = 0; i < 128; i++) {
192 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
193 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
194 }
195
196 /* Configure traffic class credits and priority */
197 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
198 max_credits = max[i];
199 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
200 reg |= refill[i];
201 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
202
203 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
204 reg |= IXGBE_RTTDT2C_GSP;
205
206 if (tsa[i] == ixgbe_dcb_tsa_strict)
207 reg |= IXGBE_RTTDT2C_LSP;
208
209 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
210 }
211
212 /*
213 * Configure Tx descriptor plane (recycle mode; WSP) and
214 * enable arbiter
215 */
216 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
217 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
218
219 return IXGBE_SUCCESS;
220 }
221
222 /**
223 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
224 * @hw: pointer to hardware structure
225 * @refill: refill credits index by traffic class
226 * @max: max credits index by traffic class
227 * @bwg_id: bandwidth grouping indexed by traffic class
228 * @tsa: transmission selection algorithm indexed by traffic class
229 * @map: priority to tc assignments indexed by priority
230 *
231 * Configure Tx Packet Arbiter and credits for each traffic class.
232 */
ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw * hw,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)233 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
234 u16 *max, u8 *bwg_id, u8 *tsa,
235 u8 *map)
236 {
237 u32 reg;
238 u8 i;
239
240 /*
241 * Disable the arbiter before changing parameters
242 * (always enable recycle mode; SP; arb delay)
243 */
244 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
245 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
246 IXGBE_RTTPCS_ARBDIS;
247 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
248
249 /*
250 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
251 * bits sets for the UPs that needs to be mappped to that TC.
252 * e.g if priorities 6 and 7 are to be mapped to a TC then the
253 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
254 */
255 reg = 0;
256 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
257 reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
258
259 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
260
261 /* Configure traffic class credits and priority */
262 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
263 reg = refill[i];
264 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
265 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
266
267 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
268 reg |= IXGBE_RTTPT2C_GSP;
269
270 if (tsa[i] == ixgbe_dcb_tsa_strict)
271 reg |= IXGBE_RTTPT2C_LSP;
272
273 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
274 }
275
276 /*
277 * Configure Tx packet plane (recycle mode; SP; arb delay) and
278 * enable arbiter
279 */
280 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
281 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
282 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
283
284 return IXGBE_SUCCESS;
285 }
286
287 /**
288 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
289 * @hw: pointer to hardware structure
290 * @pfc_en: enabled pfc bitmask
291 * @map: priority to tc assignments indexed by priority
292 *
293 * Configure Priority Flow Control (PFC) for each traffic class.
294 */
ixgbe_dcb_config_pfc_82599(struct ixgbe_hw * hw,u8 pfc_en,u8 * map)295 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
296 {
297 u32 i, j, fcrtl, reg;
298 u8 max_tc = 0;
299
300 /* Enable Transmit Priority Flow Control */
301 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
302
303 /* Enable Receive Priority Flow Control */
304 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
305 reg |= IXGBE_MFLCN_DPF;
306
307 /*
308 * X540 supports per TC Rx priority flow control. So
309 * clear all TCs and only enable those that should be
310 * enabled.
311 */
312 reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
313
314 if (hw->mac.type >= ixgbe_mac_X540)
315 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
316
317 if (pfc_en)
318 reg |= IXGBE_MFLCN_RPFCE;
319
320 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
321
322 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
323 if (map[i] > max_tc)
324 max_tc = map[i];
325 }
326
327
328 /* Configure PFC Tx thresholds per TC */
329 for (i = 0; i <= max_tc; i++) {
330 int enabled = 0;
331
332 for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
333 if ((map[j] == i) && (pfc_en & (1 << j))) {
334 enabled = 1;
335 break;
336 }
337 }
338
339 if (enabled) {
340 reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
341 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
342 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
343 } else {
344 /*
345 * In order to prevent Tx hangs when the internal Tx
346 * switch is enabled we must set the high water mark
347 * to the Rx packet buffer size - 24KB. This allows
348 * the Tx switch to function even under heavy Rx
349 * workloads.
350 */
351 reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
352 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
353 }
354
355 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
356 }
357
358 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
359 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
360 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
361 }
362
363 /* Configure pause time (2 TCs per register) */
364 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
365 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
366 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
367
368 /* Configure flow control refresh threshold value */
369 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
370
371 return IXGBE_SUCCESS;
372 }
373
374 /**
375 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
376 * @hw: pointer to hardware structure
377 * @dcb_config: pointer to ixgbe_dcb_config structure
378 *
379 * Configure queue statistics registers, all queues belonging to same traffic
380 * class uses a single set of queue statistics counters.
381 */
ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)382 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
383 struct ixgbe_dcb_config *dcb_config)
384 {
385 u32 reg = 0;
386 u8 i = 0;
387 u8 tc_count = 8;
388 bool vt_mode = FALSE;
389
390 if (dcb_config != NULL) {
391 tc_count = dcb_config->num_tcs.pg_tcs;
392 vt_mode = dcb_config->vt_mode;
393 }
394
395 if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
396 return IXGBE_ERR_PARAM;
397
398 if (tc_count == 8 && vt_mode == FALSE) {
399 /*
400 * Receive Queues stats setting
401 * 32 RQSMR registers, each configuring 4 queues.
402 *
403 * Set all 16 queues of each TC to the same stat
404 * with TC 'n' going to stat 'n'.
405 */
406 for (i = 0; i < 32; i++) {
407 reg = 0x01010101 * (i / 4);
408 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
409 }
410 /*
411 * Transmit Queues stats setting
412 * 32 TQSM registers, each controlling 4 queues.
413 *
414 * Set all queues of each TC to the same stat
415 * with TC 'n' going to stat 'n'.
416 * Tx queues are allocated non-uniformly to TCs:
417 * 32, 32, 16, 16, 8, 8, 8, 8.
418 */
419 for (i = 0; i < 32; i++) {
420 if (i < 8)
421 reg = 0x00000000;
422 else if (i < 16)
423 reg = 0x01010101;
424 else if (i < 20)
425 reg = 0x02020202;
426 else if (i < 24)
427 reg = 0x03030303;
428 else if (i < 26)
429 reg = 0x04040404;
430 else if (i < 28)
431 reg = 0x05050505;
432 else if (i < 30)
433 reg = 0x06060606;
434 else
435 reg = 0x07070707;
436 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
437 }
438 } else if (tc_count == 4 && vt_mode == FALSE) {
439 /*
440 * Receive Queues stats setting
441 * 32 RQSMR registers, each configuring 4 queues.
442 *
443 * Set all 16 queues of each TC to the same stat
444 * with TC 'n' going to stat 'n'.
445 */
446 for (i = 0; i < 32; i++) {
447 if (i % 8 > 3)
448 /* In 4 TC mode, odd 16-queue ranges are
449 * not used.
450 */
451 continue;
452 reg = 0x01010101 * (i / 8);
453 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
454 }
455 /*
456 * Transmit Queues stats setting
457 * 32 TQSM registers, each controlling 4 queues.
458 *
459 * Set all queues of each TC to the same stat
460 * with TC 'n' going to stat 'n'.
461 * Tx queues are allocated non-uniformly to TCs:
462 * 64, 32, 16, 16.
463 */
464 for (i = 0; i < 32; i++) {
465 if (i < 16)
466 reg = 0x00000000;
467 else if (i < 24)
468 reg = 0x01010101;
469 else if (i < 28)
470 reg = 0x02020202;
471 else
472 reg = 0x03030303;
473 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
474 }
475 } else if (tc_count == 4 && vt_mode == TRUE) {
476 /*
477 * Receive Queues stats setting
478 * 32 RQSMR registers, each configuring 4 queues.
479 *
480 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
481 * pool. Set all 32 queues of each TC across pools to the same
482 * stat with TC 'n' going to stat 'n'.
483 */
484 for (i = 0; i < 32; i++)
485 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
486 /*
487 * Transmit Queues stats setting
488 * 32 TQSM registers, each controlling 4 queues.
489 *
490 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
491 * pool. Set all 32 queues of each TC across pools to the same
492 * stat with TC 'n' going to stat 'n'.
493 */
494 for (i = 0; i < 32; i++)
495 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
496 }
497
498 return IXGBE_SUCCESS;
499 }
500
501 /**
502 * ixgbe_dcb_config_82599 - Configure general DCB parameters
503 * @hw: pointer to hardware structure
504 * @dcb_config: pointer to ixgbe_dcb_config structure
505 *
506 * Configure general DCB parameters.
507 */
ixgbe_dcb_config_82599(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)508 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
509 struct ixgbe_dcb_config *dcb_config)
510 {
511 u32 reg;
512 u32 q;
513
514 /* Disable the Tx desc arbiter so that MTQC can be changed */
515 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
516 reg |= IXGBE_RTTDCS_ARBDIS;
517 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
518
519 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
520 if (dcb_config->num_tcs.pg_tcs == 8) {
521 /* Enable DCB for Rx with 8 TCs */
522 switch (reg & IXGBE_MRQC_MRQE_MASK) {
523 case 0:
524 case IXGBE_MRQC_RT4TCEN:
525 /* RSS disabled cases */
526 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
527 IXGBE_MRQC_RT8TCEN;
528 break;
529 case IXGBE_MRQC_RSSEN:
530 case IXGBE_MRQC_RTRSS4TCEN:
531 /* RSS enabled cases */
532 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
533 IXGBE_MRQC_RTRSS8TCEN;
534 break;
535 default:
536 /*
537 * Unsupported value, assume stale data,
538 * overwrite no RSS
539 */
540 ASSERT(0);
541 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
542 IXGBE_MRQC_RT8TCEN;
543 }
544 }
545 if (dcb_config->num_tcs.pg_tcs == 4) {
546 /* We support both VT-on and VT-off with 4 TCs. */
547 if (dcb_config->vt_mode)
548 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
549 IXGBE_MRQC_VMDQRT4TCEN;
550 else
551 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
552 IXGBE_MRQC_RTRSS4TCEN;
553 }
554 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
555
556 /* Enable DCB for Tx with 8 TCs */
557 if (dcb_config->num_tcs.pg_tcs == 8)
558 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
559 else {
560 /* We support both VT-on and VT-off with 4 TCs. */
561 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
562 if (dcb_config->vt_mode)
563 reg |= IXGBE_MTQC_VT_ENA;
564 }
565 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
566
567 /* Disable drop for all queues */
568 for (q = 0; q < 128; q++)
569 IXGBE_WRITE_REG(hw, IXGBE_QDE,
570 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
571
572 /* Enable the Tx desc arbiter */
573 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
574 reg &= ~IXGBE_RTTDCS_ARBDIS;
575 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
576
577 /* Enable Security TX Buffer IFG for DCB */
578 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
579 reg |= IXGBE_SECTX_DCB;
580 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
581
582 return IXGBE_SUCCESS;
583 }
584
585 /**
586 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
587 * @hw: pointer to hardware structure
588 * @link_speed: unused
589 * @refill: refill credits index by traffic class
590 * @max: max credits index by traffic class
591 * @bwg_id: bandwidth grouping indexed by traffic class
592 * @tsa: transmission selection algorithm indexed by traffic class
593 * @map: priority to tc assignments indexed by priority
594 *
595 * Configure dcb settings and enable dcb mode.
596 */
ixgbe_dcb_hw_config_82599(struct ixgbe_hw * hw,int link_speed,u16 * refill,u16 * max,u8 * bwg_id,u8 * tsa,u8 * map)597 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
598 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
599 u8 *map)
600 {
601 UNREFERENCED_1PARAMETER(link_speed);
602
603 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
604 map);
605 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
606 tsa);
607 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
608 tsa, map);
609
610 return IXGBE_SUCCESS;
611 }
612
613