1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2024 Intel Corporation. */ 3 4 #include "ixgbe.h" 5 #include "ixgbe_sriov.h" 6 7 #ifdef CONFIG_IXGBE_DCB 8 /** 9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 10 * @adapter: board private structure to initialize 11 * 12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 13 * will also try to cache the proper offsets if RSS/FCoE are enabled along 14 * with VMDq. 15 * 16 **/ 17 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 18 { 19 #ifdef IXGBE_FCOE 20 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 21 #endif /* IXGBE_FCOE */ 22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 23 int i; 24 u16 reg_idx, pool; 25 u8 tcs = adapter->hw_tcs; 26 27 /* verify we have DCB queueing enabled before proceeding */ 28 if (tcs <= 1) 29 return false; 30 31 /* verify we have VMDq enabled before proceeding */ 32 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 33 return false; 34 35 /* start at VMDq register offset for SR-IOV enabled setups */ 36 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 37 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 38 /* If we are greater than indices move to next pool */ 39 if ((reg_idx & ~vmdq->mask) >= tcs) { 40 pool++; 41 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 42 } 43 adapter->rx_ring[i]->reg_idx = reg_idx; 44 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 45 } 46 47 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 49 /* If we are greater than indices move to next pool */ 50 if ((reg_idx & ~vmdq->mask) >= tcs) 51 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 52 adapter->tx_ring[i]->reg_idx = reg_idx; 53 } 54 55 #ifdef IXGBE_FCOE 56 /* nothing to do if FCoE is disabled */ 57 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 58 return true; 59 60 /* The work is already done if the FCoE ring is shared */ 61 if (fcoe->offset < tcs) 62 return true; 63 64 /* The FCoE rings exist separately, we need to move their reg_idx */ 65 if (fcoe->indices) { 66 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 67 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 68 69 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 70 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 71 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 72 adapter->rx_ring[i]->reg_idx = reg_idx; 73 adapter->rx_ring[i]->netdev = adapter->netdev; 74 reg_idx++; 75 } 76 77 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 79 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 80 adapter->tx_ring[i]->reg_idx = reg_idx; 81 reg_idx++; 82 } 83 } 84 85 #endif /* IXGBE_FCOE */ 86 return true; 87 } 88 89 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 90 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 91 unsigned int *tx, unsigned int *rx) 92 { 93 struct ixgbe_hw *hw = &adapter->hw; 94 u8 num_tcs = adapter->hw_tcs; 95 96 *tx = 0; 97 *rx = 0; 98 99 switch (hw->mac.type) { 100 case ixgbe_mac_82598EB: 101 /* TxQs/TC: 4 RxQs/TC: 8 */ 102 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ 103 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ 104 break; 105 case ixgbe_mac_82599EB: 106 case ixgbe_mac_X540: 107 case ixgbe_mac_X550: 108 case ixgbe_mac_X550EM_x: 109 case ixgbe_mac_x550em_a: 110 case ixgbe_mac_e610: 111 if (num_tcs > 4) { 112 /* 113 * TCs : TC0/1 TC2/3 TC4-7 114 * TxQs/TC: 32 16 8 115 * RxQs/TC: 16 16 16 116 */ 117 *rx = tc << 4; 118 if (tc < 3) 119 *tx = tc << 5; /* 0, 32, 64 */ 120 else if (tc < 5) 121 *tx = (tc + 2) << 4; /* 80, 96 */ 122 else 123 *tx = (tc + 8) << 3; /* 104, 112, 120 */ 124 } else { 125 /* 126 * TCs : TC0 TC1 TC2/3 127 * TxQs/TC: 64 32 16 128 * RxQs/TC: 32 32 32 129 */ 130 *rx = tc << 5; 131 if (tc < 2) 132 *tx = tc << 6; /* 0, 64 */ 133 else 134 *tx = (tc + 4) << 4; /* 96, 112 */ 135 } 136 break; 137 default: 138 break; 139 } 140 } 141 142 /** 143 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 144 * @adapter: board private structure to initialize 145 * 146 * Cache the descriptor ring offsets for DCB to the assigned rings. 147 * 148 **/ 149 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 150 { 151 u8 num_tcs = adapter->hw_tcs; 152 unsigned int tx_idx, rx_idx; 153 int tc, offset, rss_i, i; 154 155 /* verify we have DCB queueing enabled before proceeding */ 156 if (num_tcs <= 1) 157 return false; 158 159 rss_i = adapter->ring_feature[RING_F_RSS].indices; 160 161 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { 162 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); 163 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 164 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 165 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 166 adapter->rx_ring[offset + i]->netdev = adapter->netdev; 167 adapter->tx_ring[offset + i]->dcb_tc = tc; 168 adapter->rx_ring[offset + i]->dcb_tc = tc; 169 } 170 } 171 172 return true; 173 } 174 175 #endif 176 /** 177 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov 178 * @adapter: board private structure to initialize 179 * 180 * SR-IOV doesn't use any descriptor rings but changes the default if 181 * no other mapping is used. 182 * 183 */ 184 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 185 { 186 #ifdef IXGBE_FCOE 187 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 188 #endif /* IXGBE_FCOE */ 189 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 190 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 191 u16 reg_idx, pool; 192 int i; 193 194 /* only proceed if VMDq is enabled */ 195 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 196 return false; 197 198 /* start at VMDq register offset for SR-IOV enabled setups */ 199 pool = 0; 200 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 201 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 202 #ifdef IXGBE_FCOE 203 /* Allow first FCoE queue to be mapped as RSS */ 204 if (fcoe->offset && (i > fcoe->offset)) 205 break; 206 #endif 207 /* If we are greater than indices move to next pool */ 208 if ((reg_idx & ~vmdq->mask) >= rss->indices) { 209 pool++; 210 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 211 } 212 adapter->rx_ring[i]->reg_idx = reg_idx; 213 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; 214 } 215 216 #ifdef IXGBE_FCOE 217 /* FCoE uses a linear block of queues so just assigning 1:1 */ 218 for (; i < adapter->num_rx_queues; i++, reg_idx++) { 219 adapter->rx_ring[i]->reg_idx = reg_idx; 220 adapter->rx_ring[i]->netdev = adapter->netdev; 221 } 222 223 #endif 224 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 225 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 226 #ifdef IXGBE_FCOE 227 /* Allow first FCoE queue to be mapped as RSS */ 228 if (fcoe->offset && (i > fcoe->offset)) 229 break; 230 #endif 231 /* If we are greater than indices move to next pool */ 232 if ((reg_idx & rss->mask) >= rss->indices) 233 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 234 adapter->tx_ring[i]->reg_idx = reg_idx; 235 } 236 237 #ifdef IXGBE_FCOE 238 /* FCoE uses a linear block of queues so just assigning 1:1 */ 239 for (; i < adapter->num_tx_queues; i++, reg_idx++) 240 adapter->tx_ring[i]->reg_idx = reg_idx; 241 242 #endif 243 244 return true; 245 } 246 247 /** 248 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 249 * @adapter: board private structure to initialize 250 * 251 * Cache the descriptor ring offsets for RSS to the assigned rings. 252 * 253 **/ 254 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 255 { 256 int i, reg_idx; 257 258 for (i = 0; i < adapter->num_rx_queues; i++) { 259 adapter->rx_ring[i]->reg_idx = i; 260 adapter->rx_ring[i]->netdev = adapter->netdev; 261 } 262 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) 263 adapter->tx_ring[i]->reg_idx = reg_idx; 264 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) 265 adapter->xdp_ring[i]->reg_idx = reg_idx; 266 267 return true; 268 } 269 270 /** 271 * ixgbe_cache_ring_register - Descriptor ring to register mapping 272 * @adapter: board private structure to initialize 273 * 274 * Once we know the feature-set enabled for the device, we'll cache 275 * the register offset the descriptor ring is assigned to. 276 * 277 * Note, the order the various feature calls is important. It must start with 278 * the "most" features enabled at the same time, then trickle down to the 279 * least amount of features turned on at once. 280 **/ 281 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 282 { 283 /* start with default case */ 284 adapter->rx_ring[0]->reg_idx = 0; 285 adapter->tx_ring[0]->reg_idx = 0; 286 287 #ifdef CONFIG_IXGBE_DCB 288 if (ixgbe_cache_ring_dcb_sriov(adapter)) 289 return; 290 291 if (ixgbe_cache_ring_dcb(adapter)) 292 return; 293 294 #endif 295 if (ixgbe_cache_ring_sriov(adapter)) 296 return; 297 298 ixgbe_cache_ring_rss(adapter); 299 } 300 301 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) 302 { 303 int queues; 304 305 queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); 306 return adapter->xdp_prog ? queues : 0; 307 } 308 309 #define IXGBE_RSS_64Q_MASK 0x3F 310 #define IXGBE_RSS_16Q_MASK 0xF 311 #define IXGBE_RSS_8Q_MASK 0x7 312 #define IXGBE_RSS_4Q_MASK 0x3 313 #define IXGBE_RSS_2Q_MASK 0x1 314 #define IXGBE_RSS_DISABLED_MASK 0x0 315 316 #ifdef CONFIG_IXGBE_DCB 317 /** 318 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 319 * @adapter: board private structure to initialize 320 * 321 * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues 322 * and VM pools where appropriate. Also assign queues based on DCB 323 * priorities and map accordingly.. 324 * 325 **/ 326 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 327 { 328 int i; 329 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 330 u16 vmdq_m = 0; 331 #ifdef IXGBE_FCOE 332 u16 fcoe_i = 0; 333 #endif 334 u8 tcs = adapter->hw_tcs; 335 336 /* verify we have DCB queueing enabled before proceeding */ 337 if (tcs <= 1) 338 return false; 339 340 /* verify we have VMDq enabled before proceeding */ 341 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 342 return false; 343 344 /* limit VMDq instances on the PF by number of Tx queues */ 345 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); 346 347 /* Add starting offset to total pool count */ 348 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 349 350 /* 16 pools w/ 8 TC per pool */ 351 if (tcs > 4) { 352 vmdq_i = min_t(u16, vmdq_i, 16); 353 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 354 /* 32 pools w/ 4 TC per pool */ 355 } else { 356 vmdq_i = min_t(u16, vmdq_i, 32); 357 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 358 } 359 360 #ifdef IXGBE_FCOE 361 /* queues in the remaining pools are available for FCoE */ 362 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 363 364 #endif 365 /* remove the starting offset from the pool count */ 366 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 367 368 /* save features for later use */ 369 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 370 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 371 372 /* 373 * We do not support DCB, VMDq, and RSS all simultaneously 374 * so we will disable RSS since it is the lowest priority 375 */ 376 adapter->ring_feature[RING_F_RSS].indices = 1; 377 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 378 379 /* disable ATR as it is not supported when VMDq is enabled */ 380 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 381 382 adapter->num_rx_pools = vmdq_i; 383 adapter->num_rx_queues_per_pool = tcs; 384 385 adapter->num_tx_queues = vmdq_i * tcs; 386 adapter->num_xdp_queues = 0; 387 adapter->num_rx_queues = vmdq_i * tcs; 388 389 #ifdef IXGBE_FCOE 390 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 391 struct ixgbe_ring_feature *fcoe; 392 393 fcoe = &adapter->ring_feature[RING_F_FCOE]; 394 395 /* limit ourselves based on feature limits */ 396 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 397 398 if (fcoe_i) { 399 /* alloc queues for FCoE separately */ 400 fcoe->indices = fcoe_i; 401 fcoe->offset = vmdq_i * tcs; 402 403 /* add queues to adapter */ 404 adapter->num_tx_queues += fcoe_i; 405 adapter->num_rx_queues += fcoe_i; 406 } else if (tcs > 1) { 407 /* use queue belonging to FcoE TC */ 408 fcoe->indices = 1; 409 fcoe->offset = ixgbe_fcoe_get_tc(adapter); 410 } else { 411 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 412 413 fcoe->indices = 0; 414 fcoe->offset = 0; 415 } 416 } 417 418 #endif /* IXGBE_FCOE */ 419 /* configure TC to queue mapping */ 420 for (i = 0; i < tcs; i++) 421 netdev_set_tc_queue(adapter->netdev, i, 1, i); 422 423 return true; 424 } 425 426 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 427 { 428 struct net_device *dev = adapter->netdev; 429 struct ixgbe_ring_feature *f; 430 int rss_i, rss_m, i; 431 int tcs; 432 433 /* Map queue offset and counts onto allocated tx queues */ 434 tcs = adapter->hw_tcs; 435 436 /* verify we have DCB queueing enabled before proceeding */ 437 if (tcs <= 1) 438 return false; 439 440 /* determine the upper limit for our current DCB mode */ 441 rss_i = dev->num_tx_queues / tcs; 442 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 443 /* 8 TC w/ 4 queues per TC */ 444 rss_i = min_t(u16, rss_i, 4); 445 rss_m = IXGBE_RSS_4Q_MASK; 446 } else if (tcs > 4) { 447 /* 8 TC w/ 8 queues per TC */ 448 rss_i = min_t(u16, rss_i, 8); 449 rss_m = IXGBE_RSS_8Q_MASK; 450 } else { 451 /* 4 TC w/ 16 queues per TC */ 452 rss_i = min_t(u16, rss_i, 16); 453 rss_m = IXGBE_RSS_16Q_MASK; 454 } 455 456 /* set RSS mask and indices */ 457 f = &adapter->ring_feature[RING_F_RSS]; 458 rss_i = min_t(int, rss_i, f->limit); 459 f->indices = rss_i; 460 f->mask = rss_m; 461 462 /* disable ATR as it is not supported when multiple TCs are enabled */ 463 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 464 465 #ifdef IXGBE_FCOE 466 /* FCoE enabled queues require special configuration indexed 467 * by feature specific indices and offset. Here we map FCoE 468 * indices onto the DCB queue pairs allowing FCoE to own 469 * configuration later. 470 */ 471 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 472 u8 tc = ixgbe_fcoe_get_tc(adapter); 473 474 f = &adapter->ring_feature[RING_F_FCOE]; 475 f->indices = min_t(u16, rss_i, f->limit); 476 f->offset = rss_i * tc; 477 } 478 479 #endif /* IXGBE_FCOE */ 480 for (i = 0; i < tcs; i++) 481 netdev_set_tc_queue(dev, i, rss_i, rss_i * i); 482 483 adapter->num_tx_queues = rss_i * tcs; 484 adapter->num_xdp_queues = 0; 485 adapter->num_rx_queues = rss_i * tcs; 486 487 return true; 488 } 489 490 #endif 491 /** 492 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 493 * @adapter: board private structure to initialize 494 * 495 * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues 496 * and VM pools where appropriate. If RSS is available, then also try and 497 * enable RSS and map accordingly. 498 * 499 **/ 500 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 501 { 502 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 503 u16 vmdq_m = 0; 504 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 505 u16 rss_m = IXGBE_RSS_DISABLED_MASK; 506 #ifdef IXGBE_FCOE 507 u16 fcoe_i = 0; 508 #endif 509 510 /* only proceed if SR-IOV is enabled */ 511 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 512 return false; 513 514 /* limit l2fwd RSS based on total Tx queue limit */ 515 rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); 516 517 /* Add starting offset to total pool count */ 518 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 519 520 /* double check we are limited to maximum pools */ 521 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 522 523 /* 64 pool mode with 2 queues per pool */ 524 if (vmdq_i > 32) { 525 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 526 rss_m = IXGBE_RSS_2Q_MASK; 527 rss_i = min_t(u16, rss_i, 2); 528 /* 32 pool mode with up to 4 queues per pool */ 529 } else { 530 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 531 rss_m = IXGBE_RSS_4Q_MASK; 532 /* We can support 4, 2, or 1 queues */ 533 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; 534 } 535 536 #ifdef IXGBE_FCOE 537 /* queues in the remaining pools are available for FCoE */ 538 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 539 540 #endif 541 /* remove the starting offset from the pool count */ 542 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 543 544 /* save features for later use */ 545 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 546 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 547 548 /* limit RSS based on user input and save for later use */ 549 adapter->ring_feature[RING_F_RSS].indices = rss_i; 550 adapter->ring_feature[RING_F_RSS].mask = rss_m; 551 552 adapter->num_rx_pools = vmdq_i; 553 adapter->num_rx_queues_per_pool = rss_i; 554 555 adapter->num_rx_queues = vmdq_i * rss_i; 556 adapter->num_tx_queues = vmdq_i * rss_i; 557 adapter->num_xdp_queues = 0; 558 559 /* disable ATR as it is not supported when VMDq is enabled */ 560 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 561 562 #ifdef IXGBE_FCOE 563 /* 564 * FCoE can use rings from adjacent buffers to allow RSS 565 * like behavior. To account for this we need to add the 566 * FCoE indices to the total ring count. 567 */ 568 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 569 struct ixgbe_ring_feature *fcoe; 570 571 fcoe = &adapter->ring_feature[RING_F_FCOE]; 572 573 /* limit ourselves based on feature limits */ 574 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 575 576 if (vmdq_i > 1 && fcoe_i) { 577 /* alloc queues for FCoE separately */ 578 fcoe->indices = fcoe_i; 579 fcoe->offset = vmdq_i * rss_i; 580 } else { 581 /* merge FCoE queues with RSS queues */ 582 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 583 584 /* limit indices to rss_i if MSI-X is disabled */ 585 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 586 fcoe_i = rss_i; 587 588 /* attempt to reserve some queues for just FCoE */ 589 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 590 fcoe->offset = fcoe_i - fcoe->indices; 591 592 fcoe_i -= rss_i; 593 } 594 595 /* add queues to adapter */ 596 adapter->num_tx_queues += fcoe_i; 597 adapter->num_rx_queues += fcoe_i; 598 } 599 600 #endif 601 /* To support macvlan offload we have to use num_tc to 602 * restrict the queues that can be used by the device. 603 * By doing this we can avoid reporting a false number of 604 * queues. 605 */ 606 if (vmdq_i > 1) 607 netdev_set_num_tc(adapter->netdev, 1); 608 609 /* populate TC0 for use by pool 0 */ 610 netdev_set_tc_queue(adapter->netdev, 0, 611 adapter->num_rx_queues_per_pool, 0); 612 613 return true; 614 } 615 616 /** 617 * ixgbe_set_rss_queues - Allocate queues for RSS 618 * @adapter: board private structure to initialize 619 * 620 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 621 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 622 * 623 **/ 624 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 625 { 626 struct ixgbe_hw *hw = &adapter->hw; 627 struct ixgbe_ring_feature *f; 628 u16 rss_i; 629 630 /* set mask for 16 queue limit of RSS */ 631 f = &adapter->ring_feature[RING_F_RSS]; 632 rss_i = f->limit; 633 634 f->indices = rss_i; 635 636 if (hw->mac.type < ixgbe_mac_X550) 637 f->mask = IXGBE_RSS_16Q_MASK; 638 else 639 f->mask = IXGBE_RSS_64Q_MASK; 640 641 /* disable ATR by default, it will be configured below */ 642 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 643 644 /* 645 * Use Flow Director in addition to RSS to ensure the best 646 * distribution of flows across cores, even when an FDIR flow 647 * isn't matched. 648 */ 649 if (rss_i > 1 && adapter->atr_sample_rate) { 650 f = &adapter->ring_feature[RING_F_FDIR]; 651 652 rss_i = f->indices = f->limit; 653 654 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 655 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 656 } 657 658 #ifdef IXGBE_FCOE 659 /* 660 * FCoE can exist on the same rings as standard network traffic 661 * however it is preferred to avoid that if possible. In order 662 * to get the best performance we allocate as many FCoE queues 663 * as we can and we place them at the end of the ring array to 664 * avoid sharing queues with standard RSS on systems with 24 or 665 * more CPUs. 666 */ 667 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 668 struct net_device *dev = adapter->netdev; 669 u16 fcoe_i; 670 671 f = &adapter->ring_feature[RING_F_FCOE]; 672 673 /* merge FCoE queues with RSS queues */ 674 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); 675 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); 676 677 /* limit indices to rss_i if MSI-X is disabled */ 678 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 679 fcoe_i = rss_i; 680 681 /* attempt to reserve some queues for just FCoE */ 682 f->indices = min_t(u16, fcoe_i, f->limit); 683 f->offset = fcoe_i - f->indices; 684 rss_i = max_t(u16, fcoe_i, rss_i); 685 } 686 687 #endif /* IXGBE_FCOE */ 688 adapter->num_rx_queues = rss_i; 689 adapter->num_tx_queues = rss_i; 690 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); 691 692 return true; 693 } 694 695 /** 696 * ixgbe_set_num_queues - Allocate queues for device, feature dependent 697 * @adapter: board private structure to initialize 698 * 699 * This is the top level queue allocation routine. The order here is very 700 * important, starting with the "most" number of features turned on at once, 701 * and ending with the smallest set of features. This way large combinations 702 * can be allocated if they're turned on, and smaller combinations are the 703 * fallthrough conditions. 704 * 705 **/ 706 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 707 { 708 /* Start with base case */ 709 adapter->num_rx_queues = 1; 710 adapter->num_tx_queues = 1; 711 adapter->num_xdp_queues = 0; 712 adapter->num_rx_pools = 1; 713 adapter->num_rx_queues_per_pool = 1; 714 715 #ifdef CONFIG_IXGBE_DCB 716 if (ixgbe_set_dcb_sriov_queues(adapter)) 717 return; 718 719 if (ixgbe_set_dcb_queues(adapter)) 720 return; 721 722 #endif 723 if (ixgbe_set_sriov_queues(adapter)) 724 return; 725 726 ixgbe_set_rss_queues(adapter); 727 } 728 729 /** 730 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors 731 * @adapter: board private structure 732 * 733 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will 734 * return a negative error code if unable to acquire MSI-X vectors for any 735 * reason. 736 */ 737 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) 738 { 739 struct ixgbe_hw *hw = &adapter->hw; 740 int i, vectors, vector_threshold; 741 742 /* We start by asking for one vector per queue pair with XDP queues 743 * being stacked with TX queues. 744 */ 745 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); 746 vectors = max(vectors, adapter->num_xdp_queues); 747 748 /* It is easy to be greedy for MSI-X vectors. However, it really 749 * doesn't do much good if we have a lot more vectors than CPUs. We'll 750 * be somewhat conservative and only ask for (roughly) the same number 751 * of vectors as there are CPUs. 752 */ 753 vectors = min_t(int, vectors, num_online_cpus()); 754 755 /* Some vectors are necessary for non-queue interrupts */ 756 vectors += NON_Q_VECTORS; 757 758 /* Hardware can only support a maximum of hw.mac->max_msix_vectors. 759 * With features such as RSS and VMDq, we can easily surpass the 760 * number of Rx and Tx descriptor queues supported by our device. 761 * Thus, we cap the maximum in the rare cases where the CPU count also 762 * exceeds our vector limit 763 */ 764 vectors = min_t(int, vectors, hw->mac.max_msix_vectors); 765 766 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] 767 * handler, and (2) an Other (Link Status Change, etc.) handler. 768 */ 769 vector_threshold = MIN_MSIX_COUNT; 770 771 adapter->msix_entries = kzalloc_objs(struct msix_entry, vectors); 772 if (!adapter->msix_entries) 773 return -ENOMEM; 774 775 for (i = 0; i < vectors; i++) 776 adapter->msix_entries[i].entry = i; 777 778 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 779 vector_threshold, vectors); 780 781 if (vectors < 0) { 782 /* A negative count of allocated vectors indicates an error in 783 * acquiring within the specified range of MSI-X vectors 784 */ 785 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", 786 vectors); 787 788 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 789 kfree(adapter->msix_entries); 790 adapter->msix_entries = NULL; 791 792 return vectors; 793 } 794 795 /* we successfully allocated some number of vectors within our 796 * requested range. 797 */ 798 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; 799 800 /* Adjust for only the vectors we'll use, which is minimum 801 * of max_q_vectors, or the number of vectors we were allocated. 802 */ 803 vectors -= NON_Q_VECTORS; 804 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); 805 806 return 0; 807 } 808 809 static void ixgbe_add_ring(struct ixgbe_ring *ring, 810 struct ixgbe_ring_container *head) 811 { 812 ring->next = head->ring; 813 head->ring = ring; 814 head->count++; 815 head->next_update = jiffies + 1; 816 } 817 818 /** 819 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 820 * @adapter: board private structure to initialize 821 * @v_count: q_vectors allocated on adapter, used for ring interleaving 822 * @v_idx: index of vector in adapter struct 823 * @txr_count: total number of Tx rings to allocate 824 * @txr_idx: index of first Tx ring to allocate 825 * @xdp_count: total number of XDP rings to allocate 826 * @xdp_idx: index of first XDP ring to allocate 827 * @rxr_count: total number of Rx rings to allocate 828 * @rxr_idx: index of first Rx ring to allocate 829 * 830 * We allocate one q_vector. If allocation fails we return -ENOMEM. 831 **/ 832 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, 833 int v_count, int v_idx, 834 int txr_count, int txr_idx, 835 int xdp_count, int xdp_idx, 836 int rxr_count, int rxr_idx) 837 { 838 int node = dev_to_node(&adapter->pdev->dev); 839 struct ixgbe_q_vector *q_vector; 840 struct ixgbe_ring *ring; 841 int cpu = -1; 842 int ring_count; 843 u8 tcs = adapter->hw_tcs; 844 845 ring_count = txr_count + rxr_count + xdp_count; 846 847 /* customize cpu for Flow Director mapping */ 848 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { 849 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 850 if (rss_i > 1 && adapter->atr_sample_rate) { 851 cpu = cpumask_local_spread(v_idx, node); 852 node = cpu_to_node(cpu); 853 } 854 } 855 856 /* allocate q_vector and rings */ 857 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), 858 GFP_KERNEL, node); 859 if (!q_vector) 860 q_vector = kzalloc_flex(*q_vector, ring, ring_count); 861 if (!q_vector) 862 return -ENOMEM; 863 864 /* setup affinity mask and node */ 865 if (cpu != -1) 866 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 867 q_vector->numa_node = node; 868 869 #ifdef CONFIG_IXGBE_DCA 870 /* initialize CPU for DCA */ 871 q_vector->cpu = -1; 872 873 #endif 874 /* initialize NAPI */ 875 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll); 876 877 /* tie q_vector and adapter together */ 878 adapter->q_vector[v_idx] = q_vector; 879 q_vector->adapter = adapter; 880 q_vector->v_idx = v_idx; 881 882 /* initialize work limits */ 883 q_vector->tx.work_limit = adapter->tx_work_limit; 884 885 /* Initialize setting for adaptive ITR */ 886 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 887 IXGBE_ITR_ADAPTIVE_LATENCY; 888 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | 889 IXGBE_ITR_ADAPTIVE_LATENCY; 890 891 /* initialize ITR */ 892 if (txr_count && !rxr_count) { 893 /* tx only vector */ 894 if (adapter->tx_itr_setting == 1) 895 q_vector->itr = IXGBE_12K_ITR; 896 else 897 q_vector->itr = adapter->tx_itr_setting; 898 } else { 899 /* rx or rx/tx vector */ 900 if (adapter->rx_itr_setting == 1) 901 q_vector->itr = IXGBE_20K_ITR; 902 else 903 q_vector->itr = adapter->rx_itr_setting; 904 } 905 906 /* initialize pointer to rings */ 907 ring = q_vector->ring; 908 909 while (txr_count) { 910 /* assign generic ring traits */ 911 ring->dev = &adapter->pdev->dev; 912 ring->netdev = adapter->netdev; 913 914 /* configure backlink on ring */ 915 ring->q_vector = q_vector; 916 917 /* update q_vector Tx values */ 918 ixgbe_add_ring(ring, &q_vector->tx); 919 920 /* apply Tx specific ring traits */ 921 ring->count = adapter->tx_ring_count; 922 ring->queue_index = txr_idx; 923 924 /* assign ring to adapter */ 925 WRITE_ONCE(adapter->tx_ring[txr_idx], ring); 926 927 /* update count and index */ 928 txr_count--; 929 txr_idx += v_count; 930 931 /* push pointer to next ring */ 932 ring++; 933 } 934 935 while (xdp_count) { 936 /* assign generic ring traits */ 937 ring->dev = &adapter->pdev->dev; 938 ring->netdev = adapter->netdev; 939 940 /* configure backlink on ring */ 941 ring->q_vector = q_vector; 942 943 /* update q_vector Tx values */ 944 ixgbe_add_ring(ring, &q_vector->tx); 945 946 /* apply Tx specific ring traits */ 947 ring->count = adapter->tx_ring_count; 948 ring->queue_index = xdp_idx; 949 set_ring_xdp(ring); 950 spin_lock_init(&ring->tx_lock); 951 952 /* assign ring to adapter */ 953 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); 954 955 /* update count and index */ 956 xdp_count--; 957 xdp_idx++; 958 959 /* push pointer to next ring */ 960 ring++; 961 } 962 963 while (rxr_count) { 964 /* assign generic ring traits */ 965 ring->dev = &adapter->pdev->dev; 966 ring->netdev = adapter->netdev; 967 968 /* configure backlink on ring */ 969 ring->q_vector = q_vector; 970 971 /* update q_vector Rx values */ 972 ixgbe_add_ring(ring, &q_vector->rx); 973 974 /* 975 * 82599 errata, UDP frames with a 0 checksum 976 * can be marked as checksum errors. 977 */ 978 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 979 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); 980 981 #ifdef IXGBE_FCOE 982 if (adapter->netdev->fcoe_mtu) { 983 struct ixgbe_ring_feature *f; 984 f = &adapter->ring_feature[RING_F_FCOE]; 985 if ((rxr_idx >= f->offset) && 986 (rxr_idx < f->offset + f->indices)) 987 set_bit(__IXGBE_RX_FCOE, &ring->state); 988 } 989 990 #endif /* IXGBE_FCOE */ 991 /* apply Rx specific ring traits */ 992 ring->count = adapter->rx_ring_count; 993 ring->queue_index = rxr_idx; 994 995 /* assign ring to adapter */ 996 WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); 997 998 /* update count and index */ 999 rxr_count--; 1000 rxr_idx += v_count; 1001 1002 /* push pointer to next ring */ 1003 ring++; 1004 } 1005 1006 return 0; 1007 } 1008 1009 /** 1010 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector 1011 * @adapter: board private structure to initialize 1012 * @v_idx: Index of vector to be freed 1013 * 1014 * This function frees the memory allocated to the q_vector. In addition if 1015 * NAPI is enabled it will delete any references to the NAPI struct prior 1016 * to freeing the q_vector. 1017 **/ 1018 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) 1019 { 1020 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; 1021 struct ixgbe_ring *ring; 1022 1023 ixgbe_for_each_ring(ring, q_vector->tx) { 1024 if (ring_is_xdp(ring)) 1025 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); 1026 else 1027 WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); 1028 } 1029 1030 ixgbe_for_each_ring(ring, q_vector->rx) 1031 WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); 1032 1033 adapter->q_vector[v_idx] = NULL; 1034 __netif_napi_del(&q_vector->napi); 1035 1036 /* 1037 * after a call to __netif_napi_del() napi may still be used and 1038 * ixgbe_get_stats64() might access the rings on this vector, 1039 * we must wait a grace period before freeing it. 1040 */ 1041 kfree_rcu(q_vector, rcu); 1042 } 1043 1044 /** 1045 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 1046 * @adapter: board private structure to initialize 1047 * 1048 * We allocate one q_vector per queue interrupt. If allocation fails we 1049 * return -ENOMEM. 1050 **/ 1051 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 1052 { 1053 int q_vectors = adapter->num_q_vectors; 1054 int rxr_remaining = adapter->num_rx_queues; 1055 int txr_remaining = adapter->num_tx_queues; 1056 int xdp_remaining = adapter->num_xdp_queues; 1057 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; 1058 int err, i; 1059 1060 /* only one q_vector if MSI-X is disabled. */ 1061 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1062 q_vectors = 1; 1063 1064 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) { 1065 for (; rxr_remaining; v_idx++) { 1066 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1067 0, 0, 0, 0, 1, rxr_idx); 1068 1069 if (err) 1070 goto err_out; 1071 1072 /* update counts and index */ 1073 rxr_remaining--; 1074 rxr_idx++; 1075 } 1076 } 1077 1078 for (; v_idx < q_vectors; v_idx++) { 1079 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); 1080 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); 1081 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); 1082 1083 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, 1084 tqpv, txr_idx, 1085 xqpv, xdp_idx, 1086 rqpv, rxr_idx); 1087 1088 if (err) 1089 goto err_out; 1090 1091 /* update counts and index */ 1092 rxr_remaining -= rqpv; 1093 txr_remaining -= tqpv; 1094 xdp_remaining -= xqpv; 1095 rxr_idx++; 1096 txr_idx++; 1097 xdp_idx += xqpv; 1098 } 1099 1100 for (i = 0; i < adapter->num_rx_queues; i++) { 1101 if (adapter->rx_ring[i]) 1102 adapter->rx_ring[i]->ring_idx = i; 1103 } 1104 1105 for (i = 0; i < adapter->num_tx_queues; i++) { 1106 if (adapter->tx_ring[i]) 1107 adapter->tx_ring[i]->ring_idx = i; 1108 } 1109 1110 for (i = 0; i < adapter->num_xdp_queues; i++) { 1111 if (adapter->xdp_ring[i]) 1112 adapter->xdp_ring[i]->ring_idx = i; 1113 } 1114 1115 return 0; 1116 1117 err_out: 1118 adapter->num_tx_queues = 0; 1119 adapter->num_xdp_queues = 0; 1120 adapter->num_rx_queues = 0; 1121 adapter->num_q_vectors = 0; 1122 1123 while (v_idx--) 1124 ixgbe_free_q_vector(adapter, v_idx); 1125 1126 return -ENOMEM; 1127 } 1128 1129 /** 1130 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 1131 * @adapter: board private structure to initialize 1132 * 1133 * This function frees the memory allocated to the q_vectors. In addition if 1134 * NAPI is enabled it will delete any references to the NAPI struct prior 1135 * to freeing the q_vector. 1136 **/ 1137 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 1138 { 1139 int v_idx = adapter->num_q_vectors; 1140 1141 adapter->num_tx_queues = 0; 1142 adapter->num_xdp_queues = 0; 1143 adapter->num_rx_queues = 0; 1144 adapter->num_q_vectors = 0; 1145 1146 while (v_idx--) 1147 ixgbe_free_q_vector(adapter, v_idx); 1148 } 1149 1150 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 1151 { 1152 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1153 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1154 pci_disable_msix(adapter->pdev); 1155 kfree(adapter->msix_entries); 1156 adapter->msix_entries = NULL; 1157 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1158 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1159 pci_disable_msi(adapter->pdev); 1160 } 1161 } 1162 1163 /** 1164 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 1165 * @adapter: board private structure to initialize 1166 * 1167 * Attempt to configure the interrupts using the best available 1168 * capabilities of the hardware and the kernel. 1169 **/ 1170 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 1171 { 1172 int err; 1173 1174 /* We will try to get MSI-X interrupts first */ 1175 if (!ixgbe_acquire_msix_vectors(adapter)) 1176 return; 1177 1178 /* At this point, we do not have MSI-X capabilities. We need to 1179 * reconfigure or disable various features which require MSI-X 1180 * capability. 1181 */ 1182 1183 /* Disable DCB unless we only have a single traffic class */ 1184 if (adapter->hw_tcs > 1) { 1185 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1186 netdev_reset_tc(adapter->netdev); 1187 1188 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1189 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 1190 1191 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1192 adapter->temp_dcb_cfg.pfc_mode_enable = false; 1193 adapter->dcb_cfg.pfc_mode_enable = false; 1194 } 1195 1196 adapter->hw_tcs = 0; 1197 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1198 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1199 1200 /* Disable SR-IOV support */ 1201 e_dev_warn("Disabling SR-IOV support\n"); 1202 ixgbe_disable_sriov(adapter); 1203 1204 /* Disable RSS */ 1205 e_dev_warn("Disabling RSS support\n"); 1206 adapter->ring_feature[RING_F_RSS].limit = 1; 1207 1208 /* recalculate number of queues now that many features have been 1209 * changed or disabled. 1210 */ 1211 ixgbe_set_num_queues(adapter); 1212 adapter->num_q_vectors = 1; 1213 1214 err = pci_enable_msi(adapter->pdev); 1215 if (err) 1216 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", 1217 err); 1218 else 1219 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1220 } 1221 1222 /** 1223 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 1224 * @adapter: board private structure to initialize 1225 * 1226 * We determine which interrupt scheme to use based on... 1227 * - Kernel support (MSI, MSI-X) 1228 * - which can be user-defined (via MODULE_PARAM) 1229 * - Hardware queue count (num_*_queues) 1230 * - defined by miscellaneous hardware support/features (RSS, etc.) 1231 **/ 1232 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 1233 { 1234 int err; 1235 1236 /* Number of supported queues */ 1237 ixgbe_set_num_queues(adapter); 1238 1239 /* Set interrupt mode */ 1240 ixgbe_set_interrupt_capability(adapter); 1241 1242 err = ixgbe_alloc_q_vectors(adapter); 1243 if (err) { 1244 e_dev_err("Unable to allocate memory for queue vectors\n"); 1245 goto err_alloc_q_vectors; 1246 } 1247 1248 ixgbe_cache_ring_register(adapter); 1249 1250 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n", 1251 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", 1252 adapter->num_rx_queues, adapter->num_tx_queues, 1253 adapter->num_xdp_queues); 1254 1255 set_bit(__IXGBE_DOWN, &adapter->state); 1256 1257 return 0; 1258 1259 err_alloc_q_vectors: 1260 ixgbe_reset_interrupt_capability(adapter); 1261 return err; 1262 } 1263 1264 /** 1265 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 1266 * @adapter: board private structure to clear interrupt scheme on 1267 * 1268 * We go through and clear interrupt specific resources and reset the structure 1269 * to pre-load conditions 1270 **/ 1271 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 1272 { 1273 adapter->num_tx_queues = 0; 1274 adapter->num_xdp_queues = 0; 1275 adapter->num_rx_queues = 0; 1276 1277 ixgbe_free_q_vectors(adapter); 1278 ixgbe_reset_interrupt_capability(adapter); 1279 } 1280 1281 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1282 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx) 1283 { 1284 struct ixgbe_adv_tx_context_desc *context_desc; 1285 u16 i = tx_ring->next_to_use; 1286 1287 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); 1288 1289 i++; 1290 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1291 1292 /* set bits to identify this as an advanced context descriptor */ 1293 type_tucmd |= IXGBE_TXD_CMD_DEXT | 1294 FIELD_PREP(IXGBE_ADVTXD_DTYP_MASK, IXGBE_ADVTXD_DTYP_CTXT); 1295 1296 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1297 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); 1298 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1299 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1300 } 1301 1302