1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2024 Intel Corporation. */ 3 4 #include "ixgbe.h" 5 #include "ixgbe_sriov.h" 6 7 #ifdef CONFIG_IXGBE_DCB 8 /** 9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 10 * @adapter: board private structure to initialize 11 * 12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 13 * will also try to cache the proper offsets if RSS/FCoE are enabled along 14 * with VMDq. 15 * 16 **/ 17 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 18 { 19 #ifdef IXGBE_FCOE 20 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 21 #endif /* IXGBE_FCOE */ 22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 23 int i; 24 u16 reg_idx, pool; 25 u8 tcs = adapter->hw_tcs; 26 27 /* verify we have DCB queueing enabled before proceeding */ 28 if (tcs <= 1) 29 return false; 30 31 /* verify we have VMDq enabled before proceeding */ 32 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 33 return false; 34 35 /* start at VMDq register offset for SR-IOV enabled setups */ 36 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 37 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 38 /* If we are greater than indices move to next pool */ 39 if ((reg_idx & ~vmdq->mask) >= tcs) { 40 pool++; 41 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 42 } 43 adapter->rx_ring[i]->reg_idx = reg_idx; 44 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 45 } 46 47 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 49 /* If we are greater than indices move to next pool */ 50 if ((reg_idx & ~vmdq->mask) >= tcs) 51 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 52 adapter->tx_ring[i]->reg_idx = reg_idx; 53 } 54 55 #ifdef IXGBE_FCOE 56 /* nothing to do if FCoE is disabled */ 57 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 58 return true; 59 60 /* The work is already done if the FCoE ring is shared */ 61 if (fcoe->offset < tcs) 62 return true; 63 64 /* The FCoE rings exist separately, we need to move their reg_idx */ 65 if (fcoe->indices) { 66 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 67 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 68 69 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 70 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 71 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 72 adapter->rx_ring[i]->reg_idx = reg_idx; 73 adapter->rx_ring[i]->netdev = adapter->netdev; 74 reg_idx++; 75 } 76 77 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 79 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 80 adapter->tx_ring[i]->reg_idx = reg_idx; 81 reg_idx++; 82 } 83 } 84 85 #endif /* IXGBE_FCOE */ 86 return true; 87 } 88 89 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 90 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 91 unsigned int *tx, unsigned int *rx) 92 { 93 struct ixgbe_hw *hw = &adapter->hw; 94 u8 num_tcs = adapter->hw_tcs; 95 96 *tx = 0; 97 *rx = 0; 98 99 switch (hw->mac.type) { 100 case ixgbe_mac_82598EB: 101 /* TxQs/TC: 4 RxQs/TC: 8 */ 102 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 103 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 104 break; 105 case ixgbe_mac_82599EB: 106 case ixgbe_mac_X540: 107 case ixgbe_mac_X550: 108 case ixgbe_mac_X550EM_x: 109 case ixgbe_mac_x550em_a: 110 case ixgbe_mac_e610: 111 if (num_tcs > 4) { 112 /* 113 * TCs : TC0/1 TC2/3 TC4-7 114 * TxQs/TC: 32 16 8 115 * RxQs/TC: 16 16 16 116 */ 117 *rx = tc << 4; 118 if (tc < 3) 119 *tx = tc << 5; /* 0, 32, 64 */ 120 else if (tc < 5) 121 *tx = (tc + 2) << 4; /* 80, 96 */ 122 else 123 *tx = (tc + 8) << 3; /* 104, 112, 120 */ 124 } else { 125 /* 126 * TCs : TC0 TC1 TC2/3 127 * TxQs/TC: 64 32 16 128 * RxQs/TC: 32 32 32 129 */ 130 *rx = tc << 5; 131 if (tc < 2) 132 *tx = tc << 6; /* 0, 64 */ 133 else 134 *tx = (tc + 4) << 4; /* 96, 112 */ 135 } 136 break; 137 default: 138 break; 139 } 140 } 141 142 /** 143 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 144 * @adapter: board private structure to initialize 145 * 146 * Cache the descriptor ring offsets for DCB to the assigned rings. 147 * 148 **/ 149 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 150 { 151 u8 num_tcs = adapter->hw_tcs; 152 unsigned int tx_idx, rx_idx; 153 int tc, offset, rss_i, i; 154 155 /* verify we have DCB queueing enabled before proceeding */ 156 if (num_tcs <= 1) 157 return false; 158 159 rss_i = adapter->ring_feature[RING_F_RSS].indices; 160 161 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 162 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 163 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 164 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 165 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 166 adapter->rx_ring[offset + i]->netdev = adapter->netdev; 167 adapter->tx_ring[offset + i]->dcb_tc = tc; 168 adapter->rx_ring[offset + i]->dcb_tc = tc; 169 } 170 } 171 172 return true; 173 } 174 175 #endif 176 /** 177 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 178 * @adapter: board private structure to initialize 179 * 180 * SR-IOV doesn't use any descriptor rings but changes the default if 181 * no other mapping is used. 182 * 183 */ 184 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 185 { 186 #ifdef IXGBE_FCOE 187 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 188 #endif /* IXGBE_FCOE */ 189 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 190 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 191 u16 reg_idx, pool; 192 int i; 193 194 /* only proceed if VMDq is enabled */ 195 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 196 return false; 197 198 /* start at VMDq register offset for SR-IOV enabled setups */ 199 pool = 0; 200 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 201 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 202 #ifdef IXGBE_FCOE 203 /* Allow first FCoE queue to be mapped as RSS */ 204 if (fcoe->offset && (i > fcoe->offset)) 205 break; 206 #endif 207 /* If we are greater than indices move to next pool */ 208 if ((reg_idx & ~vmdq->mask) >= rss->indices) { 209 pool++; 210 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 211 } 212 adapter->rx_ring[i]->reg_idx = reg_idx; 213 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 214 } 215 216 #ifdef IXGBE_FCOE 217 /* FCoE uses a linear block of queues so just assigning 1:1 */ 218 for (; i < adapter->num_rx_queues; i++, reg_idx++) { 219 adapter->rx_ring[i]->reg_idx = reg_idx; 220 adapter->rx_ring[i]->netdev = adapter->netdev; 221 } 222 223 #endif 224 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 225 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 226 #ifdef IXGBE_FCOE 227 /* Allow first FCoE queue to be mapped as RSS */ 228 if (fcoe->offset && (i > fcoe->offset)) 229 break; 230 #endif 231 /* If we are greater than indices move to next pool */ 232 if ((reg_idx & rss->mask) >= rss->indices) 233 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 234 adapter->tx_ring[i]->reg_idx = reg_idx; 235 } 236 237 #ifdef IXGBE_FCOE 238 /* FCoE uses a linear block of queues so just assigning 1:1 */ 239 for (; i < adapter->num_tx_queues; i++, reg_idx++) 240 adapter->tx_ring[i]->reg_idx = reg_idx; 241 242 #endif 243 244 return true; 245 } 246 247 /** 248 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 249 * @adapter: board private structure to initialize 250 * 251 * Cache the descriptor ring offsets for RSS to the assigned rings. 252 * 253 **/ 254 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 255 { 256 int i, reg_idx; 257 258 for (i = 0; i < adapter->num_rx_queues; i++) { 259 adapter->rx_ring[i]->reg_idx = i; 260 adapter->rx_ring[i]->netdev = adapter->netdev; 261 } 262 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) 263 adapter->tx_ring[i]->reg_idx = reg_idx; 264 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) 265 adapter->xdp_ring[i]->reg_idx = reg_idx; 266 267 return true; 268 } 269 270 /** 271 * ixgbe_cache_ring_register - Descriptor ring to register mapping 272 * @adapter: board private structure to initialize 273 * 274 * Once we know the feature-set enabled for the device, we'll cache 275 * the register offset the descriptor ring is assigned to. 276 * 277 * Note, the order the various feature calls is important. It must start with 278 * the "most" features enabled at the same time, then trickle down to the 279 * least amount of features turned on at once. 280 **/ 281 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 282 { 283 /* start with default case */ 284 adapter->rx_ring[0]->reg_idx = 0; 285 adapter->tx_ring[0]->reg_idx = 0; 286 287 #ifdef CONFIG_IXGBE_DCB 288 if (ixgbe_cache_ring_dcb_sriov(adapter)) 289 return; 290 291 if (ixgbe_cache_ring_dcb(adapter)) 292 return; 293 294 #endif 295 if (ixgbe_cache_ring_sriov(adapter)) 296 return; 297 298 ixgbe_cache_ring_rss(adapter); 299 } 300 301 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) 302 { 303 int queues; 304 305 queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); 306 return adapter->xdp_prog ? queues : 0; 307 } 308 309 #define IXGBE_RSS_64Q_MASK 0x3F 310 #define IXGBE_RSS_16Q_MASK 0xF 311 #define IXGBE_RSS_8Q_MASK 0x7 312 #define IXGBE_RSS_4Q_MASK 0x3 313 #define IXGBE_RSS_2Q_MASK 0x1 314 #define IXGBE_RSS_DISABLED_MASK 0x0 315 316 #ifdef CONFIG_IXGBE_DCB 317 /** 318 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 319 * @adapter: board private structure to initialize 320 * 321 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 322 * and VM pools where appropriate. Also assign queues based on DCB 323 * priorities and map accordingly.. 324 * 325 **/ 326 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 327 { 328 int i; 329 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 330 u16 vmdq_m = 0; 331 #ifdef IXGBE_FCOE 332 u16 fcoe_i = 0; 333 #endif 334 u8 tcs = adapter->hw_tcs; 335 336 /* verify we have DCB queueing enabled before proceeding */ 337 if (tcs <= 1) 338 return false; 339 340 /* verify we have VMDq enabled before proceeding */ 341 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 342 return false; 343 344 /* limit VMDq instances on the PF by number of Tx queues */ 345 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); 346 347 /* Add starting offset to total pool count */ 348 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 349 350 /* 16 pools w/ 8 TC per pool */ 351 if (tcs > 4) { 352 vmdq_i = min_t(u16, vmdq_i, 16); 353 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 354 /* 32 pools w/ 4 TC per pool */ 355 } else { 356 vmdq_i = min_t(u16, vmdq_i, 32); 357 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 358 } 359 360 #ifdef IXGBE_FCOE 361 /* queues in the remaining pools are available for FCoE */ 362 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 363 364 #endif 365 /* remove the starting offset from the pool count */ 366 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 367 368 /* save features for later use */ 369 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 370 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 371 372 /* 373 * We do not support DCB, VMDq, and RSS all simultaneously 374 * so we will disable RSS since it is the lowest priority 375 */ 376 adapter->ring_feature[RING_F_RSS].indices = 1; 377 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 378 379 /* disable ATR as it is not supported when VMDq is enabled */ 380 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 381 382 adapter->num_rx_pools = vmdq_i; 383 adapter->num_rx_queues_per_pool = tcs; 384 385 adapter->num_tx_queues = vmdq_i * tcs; 386 adapter->num_xdp_queues = 0; 387 adapter->num_rx_queues = vmdq_i * tcs; 388 389 #ifdef IXGBE_FCOE 390 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 391 struct ixgbe_ring_feature *fcoe; 392 393 fcoe = &adapter->ring_feature[RING_F_FCOE]; 394 395 /* limit ourselves based on feature limits */ 396 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 397 398 if (fcoe_i) { 399 /* alloc queues for FCoE separately */ 400 fcoe->indices = fcoe_i; 401 fcoe->offset = vmdq_i * tcs; 402 403 /* add queues to adapter */ 404 adapter->num_tx_queues += fcoe_i; 405 adapter->num_rx_queues += fcoe_i; 406 } else if (tcs > 1) { 407 /* use queue belonging to FcoE TC */ 408 fcoe->indices = 1; 409 fcoe->offset = ixgbe_fcoe_get_tc(adapter); 410 } else { 411 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 412 413 fcoe->indices = 0; 414 fcoe->offset = 0; 415 } 416 } 417 418 #endif /* IXGBE_FCOE */ 419 /* configure TC to queue mapping */ 420 for (i = 0; i < tcs; i++) 421 netdev_set_tc_queue(adapter->netdev, i, 1, i); 422 423 return true; 424 } 425 426 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 427 { 428 struct net_device *dev = adapter->netdev; 429 struct ixgbe_ring_feature *f; 430 int rss_i, rss_m, i; 431 int tcs; 432 433 /* Map queue offset and counts onto allocated tx queues */ 434 tcs = adapter->hw_tcs; 435 436 /* verify we have DCB queueing enabled before proceeding */ 437 if (tcs <= 1) 438 return false; 439 440 /* determine the upper limit for our current DCB mode */ 441 rss_i = dev->num_tx_queues / tcs; 442 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 443 /* 8 TC w/ 4 queues per TC */ 444 rss_i = min_t(u16, rss_i, 4); 445 rss_m = IXGBE_RSS_4Q_MASK; 446 } else if (tcs > 4) { 447 /* 8 TC w/ 8 queues per TC */ 448 rss_i = min_t(u16, rss_i, 8); 449 rss_m = IXGBE_RSS_8Q_MASK; 450 } else { 451 /* 4 TC w/ 16 queues per TC */ 452 rss_i = min_t(u16, rss_i, 16); 453 rss_m = IXGBE_RSS_16Q_MASK; 454 } 455 456 /* set RSS mask and indices */ 457 f = &adapter->ring_feature[RING_F_RSS]; 458 rss_i = min_t(int, rss_i, f->limit); 459 f->indices = rss_i; 460 f->mask = rss_m; 461 462 /* disable ATR as it is not supported when multiple TCs are enabled */ 463 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 464 465 #ifdef IXGBE_FCOE 466 /* FCoE enabled queues require special configuration indexed 467 * by feature specific indices and offset. Here we map FCoE 468 * indices onto the DCB queue pairs allowing FCoE to own 469 * configuration later. 470 */ 471 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 472 u8 tc = ixgbe_fcoe_get_tc(adapter); 473 474 f = &adapter->ring_feature[RING_F_FCOE]; 475 f->indices = min_t(u16, rss_i, f->limit); 476 f->offset = rss_i * tc; 477 } 478 479 #endif /* IXGBE_FCOE */ 480 for (i = 0; i < tcs; i++) 481 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 482 483 adapter->num_tx_queues = rss_i * tcs; 484 adapter->num_xdp_queues = 0; 485 adapter->num_rx_queues = rss_i * tcs; 486 487 return true; 488 } 489 490 #endif 491 /** 492 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 493 * @adapter: board private structure to initialize 494 * 495 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 496 * and VM pools where appropriate. If RSS is available, then also try and 497 * enable RSS and map accordingly. 498 * 499 **/ 500 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 501 { 502 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 503 u16 vmdq_m = 0; 504 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 505 u16 rss_m = IXGBE_RSS_DISABLED_MASK; 506 #ifdef IXGBE_FCOE 507 u16 fcoe_i = 0; 508 #endif 509 510 /* only proceed if SR-IOV is enabled */ 511 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 512 return false; 513 514 /* limit l2fwd RSS based on total Tx queue limit */ 515 rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); 516 517 /* Add starting offset to total pool count */ 518 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 519 520 /* double check we are limited to maximum pools */ 521 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 522 523 /* 64 pool mode with 2 queues per pool */ 524 if (vmdq_i > 32) { 525 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 526 rss_m = IXGBE_RSS_2Q_MASK; 527 rss_i = min_t(u16, rss_i, 2); 528 /* 32 pool mode with up to 4 queues per pool */ 529 } else { 530 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 531 rss_m = IXGBE_RSS_4Q_MASK; 532 /* We can support 4, 2, or 1 queues */ 533 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; 534 } 535 536 #ifdef IXGBE_FCOE 537 /* queues in the remaining pools are available for FCoE */ 538 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 539 540 #endif 541 /* remove the starting offset from the pool count */ 542 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 543 544 /* save features for later use */ 545 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 546 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 547 548 /* limit RSS based on user input and save for later use */ 549 adapter->ring_feature[RING_F_RSS].indices = rss_i; 550 adapter->ring_feature[RING_F_RSS].mask = rss_m; 551 552 adapter->num_rx_pools = vmdq_i; 553 adapter->num_rx_queues_per_pool = rss_i; 554 555 adapter->num_rx_queues = vmdq_i * rss_i; 556 adapter->num_tx_queues = vmdq_i * rss_i; 557 adapter->num_xdp_queues = 0; 558 559 /* disable ATR as it is not supported when VMDq is enabled */ 560 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 561 562 #ifdef IXGBE_FCOE 563 /* 564 * FCoE can use rings from adjacent buffers to allow RSS 565 * like behavior. To account for this we need to add the 566 * FCoE indices to the total ring count. 567 */ 568 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 569 struct ixgbe_ring_feature *fcoe; 570 571 fcoe = &adapter->ring_feature[RING_F_FCOE]; 572 573 /* limit ourselves based on feature limits */ 574 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 575 576 if (vmdq_i > 1 && fcoe_i) { 577 /* alloc queues for FCoE separately */ 578 fcoe->indices = fcoe_i; 579 fcoe->offset = vmdq_i * rss_i; 580 } else { 581 /* merge FCoE queues with RSS queues */ 582 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 583 584 /* limit indices to rss_i if MSI-X is disabled */ 585 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 586 fcoe_i = rss_i; 587 588 /* attempt to reserve some queues for just FCoE */ 589 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 590 fcoe->offset = fcoe_i - fcoe->indices; 591 592 fcoe_i -= rss_i; 593 } 594 595 /* add queues to adapter */ 596 adapter->num_tx_queues += fcoe_i; 597 adapter->num_rx_queues += fcoe_i; 598 } 599 600 #endif 601 /* To support macvlan offload we have to use num_tc to 602 * restrict the queues that can be used by the device. 603 * By doing this we can avoid reporting a false number of 604 * queues. 605 */ 606 if (vmdq_i > 1) 607 netdev_set_num_tc(adapter->netdev, 1); 608 609 /* populate TC0 for use by pool 0 */ 610 netdev_set_tc_queue(adapter->netdev, 0, 611 adapter->num_rx_queues_per_pool, 0); 612 613 return true; 614 } 615 616 /** 617 * ixgbe_set_rss_queues - Allocate queues for RSS 618 * @adapter: board private structure to initialize 619 * 620 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 621 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 622 * 623 **/ 624 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 625 { 626 struct ixgbe_hw *hw = &adapter->hw; 627 struct ixgbe_ring_feature *f; 628 u16 rss_i; 629 630 /* set mask for 16 queue limit of RSS */ 631 f = &adapter->ring_feature[RING_F_RSS]; 632 rss_i = f->limit; 633 634 f->indices = rss_i; 635 636 if (hw->mac.type < ixgbe_mac_X550) 637 f->mask = IXGBE_RSS_16Q_MASK; 638 else 639 f->mask = IXGBE_RSS_64Q_MASK; 640 641 /* disable ATR by default, it will be configured below */ 642 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 643 644 /* 645 * Use Flow Director in addition to RSS to ensure the best 646 * distribution of flows across cores, even when an FDIR flow 647 * isn't matched. 648 */ 649 if (rss_i > 1 && adapter->atr_sample_rate) { 650 f = &adapter->ring_feature[RING_F_FDIR]; 651 652 rss_i = f->indices = f->limit; 653 654 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 655 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 656 } 657 658 #ifdef IXGBE_FCOE 659 /* 660 * FCoE can exist on the same rings as standard network traffic 661 * however it is preferred to avoid that if possible. In order 662 * to get the best performance we allocate as many FCoE queues 663 * as we can and we place them at the end of the ring array to 664 * avoid sharing queues with standard RSS on systems with 24 or 665 * more CPUs. 666 */ 667 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 668 struct net_device *dev = adapter->netdev; 669 u16 fcoe_i; 670 671 f = &adapter->ring_feature[RING_F_FCOE]; 672 673 /* merge FCoE queues with RSS queues */ 674 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 675 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 676 677 /* limit indices to rss_i if MSI-X is disabled */ 678 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 679 fcoe_i = rss_i; 680 681 /* attempt to reserve some queues for just FCoE */ 682 f->indices = min_t(u16, fcoe_i, f->limit); 683 f->offset = fcoe_i - f->indices; 684 rss_i = max_t(u16, fcoe_i, rss_i); 685 } 686 687 #endif /* IXGBE_FCOE */ 688 adapter->num_rx_queues = rss_i; 689 adapter->num_tx_queues = rss_i; 690 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); 691 692 return true; 693 } 694 695 /** 696 * ixgbe_set_num_queues - Allocate queues for device, feature dependent 697 * @adapter: board private structure to initialize 698 * 699 * This is the top level queue allocation routine. The order here is very 700 * important, starting with the "most" number of features turned on at once, 701 * and ending with the smallest set of features. This way large combinations 702 * can be allocated if they're turned on, and smaller combinations are the 703 * fallthrough conditions. 704 * 705 **/ 706 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 707 { 708 /* Start with base case */ 709 adapter->num_rx_queues = 1; 710 adapter->num_tx_queues = 1; 711 adapter->num_xdp_queues = 0; 712 adapter->num_rx_pools = 1; 713 adapter->num_rx_queues_per_pool = 1; 714 715 #ifdef CONFIG_IXGBE_DCB 716 if (ixgbe_set_dcb_sriov_queues(adapter)) 717 return; 718 719 if (ixgbe_set_dcb_queues(adapter)) 720 return; 721 722 #endif 723 if (ixgbe_set_sriov_queues(adapter)) 724 return; 725 726 ixgbe_set_rss_queues(adapter); 727 } 728 729 /** 730 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors 731 * @adapter: board private structure 732 * 733 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 734 * return a negative error code if unable to acquire MSI-X vectors for any 735 * reason. 736 */ 737 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) 738 { 739 struct ixgbe_hw *hw = &adapter->hw; 740 int i, vectors, vector_threshold; 741 742 /* We start by asking for one vector per queue pair with XDP queues 743 * being stacked with TX queues. 744 */ 745 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); 746 vectors = max(vectors, adapter->num_xdp_queues); 747 748 /* It is easy to be greedy for MSI-X vectors. However, it really 749 * doesn't do much good if we have a lot more vectors than CPUs. We'll 750 * be somewhat conservative and only ask for (roughly) the same number 751 * of vectors as there are CPUs. 752 */ 753 vectors = min_t(int, vectors, num_online_cpus()); 754 755 /* Some vectors are necessary for non-queue interrupts */ 756 vectors += NON_Q_VECTORS; 757 758 /* Hardware can only support a maximum of hw.mac->max_msix_vectors. 759 * With features such as RSS and VMDq, we can easily surpass the 760 * number of Rx and Tx descriptor queues supported by our device. 761 * Thus, we cap the maximum in the rare cases where the CPU count also 762 * exceeds our vector limit 763 */ 764 vectors = min_t(int, vectors, hw->mac.max_msix_vectors); 765 766 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] 767 * handler, and (2) an Other (Link Status Change, etc.) handler. 768 */ 769 vector_threshold = MIN_MSIX_COUNT; 770 771 adapter->msix_entries = kcalloc(vectors, 772 sizeof(struct msix_entry), 773 GFP_KERNEL); 774 if (!adapter->msix_entries) 775 return -ENOMEM; 776 777 for (i = 0; i < vectors; i++) 778 adapter->msix_entries[i].entry = i; 779 780 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 781 vector_threshold, vectors); 782 783 if (vectors < 0) { 784 /* A negative count of allocated vectors indicates an error in 785 * acquiring within the specified range of MSI-X vectors 786 */ 787 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", 788 vectors); 789 790 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 791 kfree(adapter->msix_entries); 792 adapter->msix_entries = NULL; 793 794 return vectors; 795 } 796 797 /* we successfully allocated some number of vectors within our 798 * requested range. 799 */ 800 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; 801 802 /* Adjust for only the vectors we'll use, which is minimum 803 * of max_q_vectors, or the number of vectors we were allocated. 804 */ 805 vectors -= NON_Q_VECTORS; 806 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); 807 808 return 0; 809 } 810 811 static void ixgbe_add_ring(struct ixgbe_ring *ring, 812 struct ixgbe_ring_container *head) 813 { 814 ring->next = head->ring; 815 head->ring = ring; 816 head->count++; 817 head->next_update = jiffies + 1; 818 } 819 820 /** 821 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 822 * @adapter: board private structure to initialize 823 * @v_count: q_vectors allocated on adapter, used for ring interleaving 824 * @v_idx: index of vector in adapter struct 825 * @txr_count: total number of Tx rings to allocate 826 * @txr_idx: index of first Tx ring to allocate 827 * @xdp_count: total number of XDP rings to allocate 828 * @xdp_idx: index of first XDP ring to allocate 829 * @rxr_count: total number of Rx rings to allocate 830 * @rxr_idx: index of first Rx ring to allocate 831 * 832 * We allocate one q_vector. If allocation fails we return -ENOMEM. 833 **/ 834 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 835 int v_count, int v_idx, 836 int txr_count, int txr_idx, 837 int xdp_count, int xdp_idx, 838 int rxr_count, int rxr_idx) 839 { 840 int node = dev_to_node(&adapter->pdev->dev); 841 struct ixgbe_q_vector *q_vector; 842 struct ixgbe_ring *ring; 843 int cpu = -1; 844 int ring_count; 845 u8 tcs = adapter->hw_tcs; 846 847 ring_count = txr_count + rxr_count + xdp_count; 848 849 /* customize cpu for Flow Director mapping */ 850 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 851 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 852 if (rss_i > 1 && adapter->atr_sample_rate) { 853 cpu = cpumask_local_spread(v_idx, node); 854 node = cpu_to_node(cpu); 855 } 856 } 857 858 /* allocate q_vector and rings */ 859 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), 860 GFP_KERNEL, node); 861 if (!q_vector) 862 q_vector = kzalloc(struct_size(q_vector, ring, ring_count), 863 GFP_KERNEL); 864 if (!q_vector) 865 return -ENOMEM; 866 867 /* setup affinity mask and node */ 868 if (cpu != -1) 869 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 870 q_vector->numa_node = node; 871 872 #ifdef CONFIG_IXGBE_DCA 873 /* initialize CPU for DCA */ 874 q_vector->cpu = -1; 875 876 #endif 877 /* initialize NAPI */ 878 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll); 879 880 /* tie q_vector and adapter together */ 881 adapter->q_vector[v_idx] = q_vector; 882 q_vector->adapter = adapter; 883 q_vector->v_idx = v_idx; 884 885 /* initialize work limits */ 886 q_vector->tx.work_limit = adapter->tx_work_limit; 887 888 /* Initialize setting for adaptive ITR */ 889 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 890 IXGBE_ITR_ADAPTIVE_LATENCY; 891 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 892 IXGBE_ITR_ADAPTIVE_LATENCY; 893 894 /* intialize ITR */ 895 if (txr_count && !rxr_count) { 896 /* tx only vector */ 897 if (adapter->tx_itr_setting == 1) 898 q_vector->itr = IXGBE_12K_ITR; 899 else 900 q_vector->itr = adapter->tx_itr_setting; 901 } else { 902 /* rx or rx/tx vector */ 903 if (adapter->rx_itr_setting == 1) 904 q_vector->itr = IXGBE_20K_ITR; 905 else 906 q_vector->itr = adapter->rx_itr_setting; 907 } 908 909 /* initialize pointer to rings */ 910 ring = q_vector->ring; 911 912 while (txr_count) { 913 /* assign generic ring traits */ 914 ring->dev = &adapter->pdev->dev; 915 ring->netdev = adapter->netdev; 916 917 /* configure backlink on ring */ 918 ring->q_vector = q_vector; 919 920 /* update q_vector Tx values */ 921 ixgbe_add_ring(ring, &q_vector->tx); 922 923 /* apply Tx specific ring traits */ 924 ring->count = adapter->tx_ring_count; 925 ring->queue_index = txr_idx; 926 927 /* assign ring to adapter */ 928 WRITE_ONCE(adapter->tx_ring[txr_idx], ring); 929 930 /* update count and index */ 931 txr_count--; 932 txr_idx += v_count; 933 934 /* push pointer to next ring */ 935 ring++; 936 } 937 938 while (xdp_count) { 939 /* assign generic ring traits */ 940 ring->dev = &adapter->pdev->dev; 941 ring->netdev = adapter->netdev; 942 943 /* configure backlink on ring */ 944 ring->q_vector = q_vector; 945 946 /* update q_vector Tx values */ 947 ixgbe_add_ring(ring, &q_vector->tx); 948 949 /* apply Tx specific ring traits */ 950 ring->count = adapter->tx_ring_count; 951 ring->queue_index = xdp_idx; 952 set_ring_xdp(ring); 953 spin_lock_init(&ring->tx_lock); 954 955 /* assign ring to adapter */ 956 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); 957 958 /* update count and index */ 959 xdp_count--; 960 xdp_idx++; 961 962 /* push pointer to next ring */ 963 ring++; 964 } 965 966 while (rxr_count) { 967 /* assign generic ring traits */ 968 ring->dev = &adapter->pdev->dev; 969 ring->netdev = adapter->netdev; 970 971 /* configure backlink on ring */ 972 ring->q_vector = q_vector; 973 974 /* update q_vector Rx values */ 975 ixgbe_add_ring(ring, &q_vector->rx); 976 977 /* 978 * 82599 errata, UDP frames with a 0 checksum 979 * can be marked as checksum errors. 980 */ 981 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 982 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 983 984 #ifdef IXGBE_FCOE 985 if (adapter->netdev->fcoe_mtu) { 986 struct ixgbe_ring_feature *f; 987 f = &adapter->ring_feature[RING_F_FCOE]; 988 if ((rxr_idx >= f->offset) && 989 (rxr_idx < f->offset + f->indices)) 990 set_bit(__IXGBE_RX_FCOE, &ring->state); 991 } 992 993 #endif /* IXGBE_FCOE */ 994 /* apply Rx specific ring traits */ 995 ring->count = adapter->rx_ring_count; 996 ring->queue_index = rxr_idx; 997 998 /* assign ring to adapter */ 999 WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); 1000 1001 /* update count and index */ 1002 rxr_count--; 1003 rxr_idx += v_count; 1004 1005 /* push pointer to next ring */ 1006 ring++; 1007 } 1008 1009 return 0; 1010 } 1011 1012 /** 1013 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 1014 * @adapter: board private structure to initialize 1015 * @v_idx: Index of vector to be freed 1016 * 1017 * This function frees the memory allocated to the q_vector. In addition if 1018 * NAPI is enabled it will delete any references to the NAPI struct prior 1019 * to freeing the q_vector. 1020 **/ 1021 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 1022 { 1023 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 1024 struct ixgbe_ring *ring; 1025 1026 ixgbe_for_each_ring(ring, q_vector->tx) { 1027 if (ring_is_xdp(ring)) 1028 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); 1029 else 1030 WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); 1031 } 1032 1033 ixgbe_for_each_ring(ring, q_vector->rx) 1034 WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); 1035 1036 adapter->q_vector[v_idx] = NULL; 1037 __netif_napi_del(&q_vector->napi); 1038 1039 /* 1040 * after a call to __netif_napi_del() napi may still be used and 1041 * ixgbe_get_stats64() might access the rings on this vector, 1042 * we must wait a grace period before freeing it. 1043 */ 1044 kfree_rcu(q_vector, rcu); 1045 } 1046 1047 /** 1048 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 1049 * @adapter: board private structure to initialize 1050 * 1051 * We allocate one q_vector per queue interrupt. If allocation fails we 1052 * return -ENOMEM. 1053 **/ 1054 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 1055 { 1056 int q_vectors = adapter->num_q_vectors; 1057 int rxr_remaining = adapter->num_rx_queues; 1058 int txr_remaining = adapter->num_tx_queues; 1059 int xdp_remaining = adapter->num_xdp_queues; 1060 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; 1061 int err, i; 1062 1063 /* only one q_vector if MSI-X is disabled. */ 1064 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1065 q_vectors = 1; 1066 1067 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { 1068 for (; rxr_remaining; v_idx++) { 1069 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1070 0, 0, 0, 0, 1, rxr_idx); 1071 1072 if (err) 1073 goto err_out; 1074 1075 /* update counts and index */ 1076 rxr_remaining--; 1077 rxr_idx++; 1078 } 1079 } 1080 1081 for (; v_idx < q_vectors; v_idx++) { 1082 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1083 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1084 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); 1085 1086 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1087 tqpv, txr_idx, 1088 xqpv, xdp_idx, 1089 rqpv, rxr_idx); 1090 1091 if (err) 1092 goto err_out; 1093 1094 /* update counts and index */ 1095 rxr_remaining -= rqpv; 1096 txr_remaining -= tqpv; 1097 xdp_remaining -= xqpv; 1098 rxr_idx++; 1099 txr_idx++; 1100 xdp_idx += xqpv; 1101 } 1102 1103 for (i = 0; i < adapter->num_rx_queues; i++) { 1104 if (adapter->rx_ring[i]) 1105 adapter->rx_ring[i]->ring_idx = i; 1106 } 1107 1108 for (i = 0; i < adapter->num_tx_queues; i++) { 1109 if (adapter->tx_ring[i]) 1110 adapter->tx_ring[i]->ring_idx = i; 1111 } 1112 1113 for (i = 0; i < adapter->num_xdp_queues; i++) { 1114 if (adapter->xdp_ring[i]) 1115 adapter->xdp_ring[i]->ring_idx = i; 1116 } 1117 1118 return 0; 1119 1120 err_out: 1121 adapter->num_tx_queues = 0; 1122 adapter->num_xdp_queues = 0; 1123 adapter->num_rx_queues = 0; 1124 adapter->num_q_vectors = 0; 1125 1126 while (v_idx--) 1127 ixgbe_free_q_vector(adapter, v_idx); 1128 1129 return -ENOMEM; 1130 } 1131 1132 /** 1133 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 1134 * @adapter: board private structure to initialize 1135 * 1136 * This function frees the memory allocated to the q_vectors. In addition if 1137 * NAPI is enabled it will delete any references to the NAPI struct prior 1138 * to freeing the q_vector. 1139 **/ 1140 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 1141 { 1142 int v_idx = adapter->num_q_vectors; 1143 1144 adapter->num_tx_queues = 0; 1145 adapter->num_xdp_queues = 0; 1146 adapter->num_rx_queues = 0; 1147 adapter->num_q_vectors = 0; 1148 1149 while (v_idx--) 1150 ixgbe_free_q_vector(adapter, v_idx); 1151 } 1152 1153 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 1154 { 1155 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1156 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1157 pci_disable_msix(adapter->pdev); 1158 kfree(adapter->msix_entries); 1159 adapter->msix_entries = NULL; 1160 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1161 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1162 pci_disable_msi(adapter->pdev); 1163 } 1164 } 1165 1166 /** 1167 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 1168 * @adapter: board private structure to initialize 1169 * 1170 * Attempt to configure the interrupts using the best available 1171 * capabilities of the hardware and the kernel. 1172 **/ 1173 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1174 { 1175 int err; 1176 1177 /* We will try to get MSI-X interrupts first */ 1178 if (!ixgbe_acquire_msix_vectors(adapter)) 1179 return; 1180 1181 /* At this point, we do not have MSI-X capabilities. We need to 1182 * reconfigure or disable various features which require MSI-X 1183 * capability. 1184 */ 1185 1186 /* Disable DCB unless we only have a single traffic class */ 1187 if (adapter->hw_tcs > 1) { 1188 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1189 netdev_reset_tc(adapter->netdev); 1190 1191 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1192 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1193 1194 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1195 adapter->temp_dcb_cfg.pfc_mode_enable = false; 1196 adapter->dcb_cfg.pfc_mode_enable = false; 1197 } 1198 1199 adapter->hw_tcs = 0; 1200 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1201 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1202 1203 /* Disable SR-IOV support */ 1204 e_dev_warn("Disabling SR-IOV support\n"); 1205 ixgbe_disable_sriov(adapter); 1206 1207 /* Disable RSS */ 1208 e_dev_warn("Disabling RSS support\n"); 1209 adapter->ring_feature[RING_F_RSS].limit = 1; 1210 1211 /* recalculate number of queues now that many features have been 1212 * changed or disabled. 1213 */ 1214 ixgbe_set_num_queues(adapter); 1215 adapter->num_q_vectors = 1; 1216 1217 err = pci_enable_msi(adapter->pdev); 1218 if (err) 1219 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", 1220 err); 1221 else 1222 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1223 } 1224 1225 /** 1226 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 1227 * @adapter: board private structure to initialize 1228 * 1229 * We determine which interrupt scheme to use based on... 1230 * - Kernel support (MSI, MSI-X) 1231 * - which can be user-defined (via MODULE_PARAM) 1232 * - Hardware queue count (num_*_queues) 1233 * - defined by miscellaneous hardware support/features (RSS, etc.) 1234 **/ 1235 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 1236 { 1237 int err; 1238 1239 /* Number of supported queues */ 1240 ixgbe_set_num_queues(adapter); 1241 1242 /* Set interrupt mode */ 1243 ixgbe_set_interrupt_capability(adapter); 1244 1245 err = ixgbe_alloc_q_vectors(adapter); 1246 if (err) { 1247 e_dev_err("Unable to allocate memory for queue vectors\n"); 1248 goto err_alloc_q_vectors; 1249 } 1250 1251 ixgbe_cache_ring_register(adapter); 1252 1253 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", 1254 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 1255 adapter->num_rx_queues, adapter->num_tx_queues, 1256 adapter->num_xdp_queues); 1257 1258 set_bit(__IXGBE_DOWN, &adapter->state); 1259 1260 return 0; 1261 1262 err_alloc_q_vectors: 1263 ixgbe_reset_interrupt_capability(adapter); 1264 return err; 1265 } 1266 1267 /** 1268 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 1269 * @adapter: board private structure to clear interrupt scheme on 1270 * 1271 * We go through and clear interrupt specific resources and reset the structure 1272 * to pre-load conditions 1273 **/ 1274 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 1275 { 1276 adapter->num_tx_queues = 0; 1277 adapter->num_xdp_queues = 0; 1278 adapter->num_rx_queues = 0; 1279 1280 ixgbe_free_q_vectors(adapter); 1281 ixgbe_reset_interrupt_capability(adapter); 1282 } 1283 1284 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1285 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) 1286 { 1287 struct ixgbe_adv_tx_context_desc *context_desc; 1288 u16 i = tx_ring->next_to_use; 1289 1290 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 1291 1292 i++; 1293 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1294 1295 /* set bits to identify this as an advanced context descriptor */ 1296 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1297 1298 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1299 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); 1300 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1301 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1302 } 1303 1304