Lines Matching +full:tcs +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
20 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_cache_ring_dcb_sriov()
22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_dcb_sriov()
25 u8 tcs = adapter->hw_tcs; in ixgbe_cache_ring_dcb_sriov() local
28 if (tcs <= 1) in ixgbe_cache_ring_dcb_sriov()
32 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_cache_ring_dcb_sriov()
35 /* start at VMDq register offset for SR-IOV enabled setups */ in ixgbe_cache_ring_dcb_sriov()
36 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
37 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { in ixgbe_cache_ring_dcb_sriov()
39 if ((reg_idx & ~vmdq->mask) >= tcs) { in ixgbe_cache_ring_dcb_sriov()
41 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
43 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
44 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; in ixgbe_cache_ring_dcb_sriov()
47 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_dcb_sriov()
50 if ((reg_idx & ~vmdq->mask) >= tcs) in ixgbe_cache_ring_dcb_sriov()
51 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
52 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
57 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) in ixgbe_cache_ring_dcb_sriov()
61 if (fcoe->offset < tcs) in ixgbe_cache_ring_dcb_sriov()
65 if (fcoe->indices) { in ixgbe_cache_ring_dcb_sriov()
66 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_dcb_sriov()
69 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; in ixgbe_cache_ring_dcb_sriov()
70 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { in ixgbe_cache_ring_dcb_sriov()
71 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; in ixgbe_cache_ring_dcb_sriov()
72 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
73 adapter->rx_ring[i]->netdev = adapter->netdev; in ixgbe_cache_ring_dcb_sriov()
77 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; in ixgbe_cache_ring_dcb_sriov()
78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { in ixgbe_cache_ring_dcb_sriov()
79 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; in ixgbe_cache_ring_dcb_sriov()
80 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_dcb_sriov()
89 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
93 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_first_reg_idx()
94 u8 num_tcs = adapter->hw_tcs; in ixgbe_get_first_reg_idx()
99 switch (hw->mac.type) { in ixgbe_get_first_reg_idx()
112 * TCs : TC0/1 TC2/3 TC4-7 in ixgbe_get_first_reg_idx()
125 * TCs : TC0 TC1 TC2/3 in ixgbe_get_first_reg_idx()
142 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
150 u8 num_tcs = adapter->hw_tcs; in ixgbe_cache_ring_dcb()
152 int tc, offset, rss_i, i; in ixgbe_cache_ring_dcb() local
158 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_cache_ring_dcb()
160 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { in ixgbe_cache_ring_dcb()
163 adapter->tx_ring[offset + i]->reg_idx = tx_idx; in ixgbe_cache_ring_dcb()
164 adapter->rx_ring[offset + i]->reg_idx = rx_idx; in ixgbe_cache_ring_dcb()
165 adapter->rx_ring[offset + i]->netdev = adapter->netdev; in ixgbe_cache_ring_dcb()
166 adapter->tx_ring[offset + i]->dcb_tc = tc; in ixgbe_cache_ring_dcb()
167 adapter->rx_ring[offset + i]->dcb_tc = tc; in ixgbe_cache_ring_dcb()
176 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
179 * SR-IOV doesn't use any descriptor rings but changes the default if
186 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_cache_ring_sriov()
188 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_cache_ring_sriov()
189 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; in ixgbe_cache_ring_sriov()
194 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) in ixgbe_cache_ring_sriov()
197 /* start at VMDq register offset for SR-IOV enabled setups */ in ixgbe_cache_ring_sriov()
199 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_sriov()
200 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov()
203 if (fcoe->offset && (i > fcoe->offset)) in ixgbe_cache_ring_sriov()
207 if ((reg_idx & ~vmdq->mask) >= rss->indices) { in ixgbe_cache_ring_sriov()
209 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_sriov()
211 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
212 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev; in ixgbe_cache_ring_sriov()
217 for (; i < adapter->num_rx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov()
218 adapter->rx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
219 adapter->rx_ring[i]->netdev = adapter->netdev; in ixgbe_cache_ring_sriov()
223 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); in ixgbe_cache_ring_sriov()
224 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov()
227 if (fcoe->offset && (i > fcoe->offset)) in ixgbe_cache_ring_sriov()
231 if ((reg_idx & rss->mask) >= rss->indices) in ixgbe_cache_ring_sriov()
232 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); in ixgbe_cache_ring_sriov()
233 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
238 for (; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_sriov()
239 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_sriov()
247 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
257 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_cache_ring_rss()
258 adapter->rx_ring[i]->reg_idx = i; in ixgbe_cache_ring_rss()
259 adapter->rx_ring[i]->netdev = adapter->netdev; in ixgbe_cache_ring_rss()
261 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_rss()
262 adapter->tx_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_rss()
263 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) in ixgbe_cache_ring_rss()
264 adapter->xdp_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_rss()
270 * ixgbe_cache_ring_register - Descriptor ring to register mapping
273 * Once we know the feature-set enabled for the device, we'll cache
274 * the register offset the descriptor ring is assigned to.
283 adapter->rx_ring[0]->reg_idx = 0; in ixgbe_cache_ring_register()
284 adapter->tx_ring[0]->reg_idx = 0; in ixgbe_cache_ring_register()
305 return adapter->xdp_prog ? queues : 0; in ixgbe_xdp_queues()
317 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
320 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
328 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; in ixgbe_set_dcb_sriov_queues()
333 u8 tcs = adapter->hw_tcs; in ixgbe_set_dcb_sriov_queues() local
336 if (tcs <= 1) in ixgbe_set_dcb_sriov_queues()
340 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_set_dcb_sriov_queues()
344 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); in ixgbe_set_dcb_sriov_queues()
346 /* Add starting offset to total pool count */ in ixgbe_set_dcb_sriov_queues()
347 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_dcb_sriov_queues()
350 if (tcs > 4) { in ixgbe_set_dcb_sriov_queues()
361 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; in ixgbe_set_dcb_sriov_queues()
364 /* remove the starting offset from the pool count */ in ixgbe_set_dcb_sriov_queues()
365 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_dcb_sriov_queues()
368 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; in ixgbe_set_dcb_sriov_queues()
369 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; in ixgbe_set_dcb_sriov_queues()
375 adapter->ring_feature[RING_F_RSS].indices = 1; in ixgbe_set_dcb_sriov_queues()
376 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; in ixgbe_set_dcb_sriov_queues()
379 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_dcb_sriov_queues()
381 adapter->num_rx_pools = vmdq_i; in ixgbe_set_dcb_sriov_queues()
382 adapter->num_rx_queues_per_pool = tcs; in ixgbe_set_dcb_sriov_queues()
384 adapter->num_tx_queues = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues()
385 adapter->num_xdp_queues = 0; in ixgbe_set_dcb_sriov_queues()
386 adapter->num_rx_queues = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues()
389 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_dcb_sriov_queues()
392 fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_dcb_sriov_queues()
395 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); in ixgbe_set_dcb_sriov_queues()
399 fcoe->indices = fcoe_i; in ixgbe_set_dcb_sriov_queues()
400 fcoe->offset = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues()
403 adapter->num_tx_queues += fcoe_i; in ixgbe_set_dcb_sriov_queues()
404 adapter->num_rx_queues += fcoe_i; in ixgbe_set_dcb_sriov_queues()
405 } else if (tcs > 1) { in ixgbe_set_dcb_sriov_queues()
407 fcoe->indices = 1; in ixgbe_set_dcb_sriov_queues()
408 fcoe->offset = ixgbe_fcoe_get_tc(adapter); in ixgbe_set_dcb_sriov_queues()
410 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_set_dcb_sriov_queues()
412 fcoe->indices = 0; in ixgbe_set_dcb_sriov_queues()
413 fcoe->offset = 0; in ixgbe_set_dcb_sriov_queues()
419 for (i = 0; i < tcs; i++) in ixgbe_set_dcb_sriov_queues()
420 netdev_set_tc_queue(adapter->netdev, i, 1, i); in ixgbe_set_dcb_sriov_queues()
427 struct net_device *dev = adapter->netdev; in ixgbe_set_dcb_queues()
430 int tcs; in ixgbe_set_dcb_queues() local
432 /* Map queue offset and counts onto allocated tx queues */ in ixgbe_set_dcb_queues()
433 tcs = adapter->hw_tcs; in ixgbe_set_dcb_queues()
436 if (tcs <= 1) in ixgbe_set_dcb_queues()
440 rss_i = dev->num_tx_queues / tcs; in ixgbe_set_dcb_queues()
441 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_set_dcb_queues()
445 } else if (tcs > 4) { in ixgbe_set_dcb_queues()
456 f = &adapter->ring_feature[RING_F_RSS]; in ixgbe_set_dcb_queues()
457 rss_i = min_t(int, rss_i, f->limit); in ixgbe_set_dcb_queues()
458 f->indices = rss_i; in ixgbe_set_dcb_queues()
459 f->mask = rss_m; in ixgbe_set_dcb_queues()
461 /* disable ATR as it is not supported when multiple TCs are enabled */ in ixgbe_set_dcb_queues()
462 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_dcb_queues()
466 * by feature specific indices and offset. Here we map FCoE in ixgbe_set_dcb_queues()
470 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_dcb_queues()
473 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_dcb_queues()
474 f->indices = min_t(u16, rss_i, f->limit); in ixgbe_set_dcb_queues()
475 f->offset = rss_i * tc; in ixgbe_set_dcb_queues()
479 for (i = 0; i < tcs; i++) in ixgbe_set_dcb_queues()
482 adapter->num_tx_queues = rss_i * tcs; in ixgbe_set_dcb_queues()
483 adapter->num_xdp_queues = 0; in ixgbe_set_dcb_queues()
484 adapter->num_rx_queues = rss_i * tcs; in ixgbe_set_dcb_queues()
491 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
494 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
501 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; in ixgbe_set_sriov_queues()
503 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; in ixgbe_set_sriov_queues()
509 /* only proceed if SR-IOV is enabled */ in ixgbe_set_sriov_queues()
510 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_set_sriov_queues()
516 /* Add starting offset to total pool count */ in ixgbe_set_sriov_queues()
517 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_sriov_queues()
537 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); in ixgbe_set_sriov_queues()
540 /* remove the starting offset from the pool count */ in ixgbe_set_sriov_queues()
541 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; in ixgbe_set_sriov_queues()
544 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; in ixgbe_set_sriov_queues()
545 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; in ixgbe_set_sriov_queues()
548 adapter->ring_feature[RING_F_RSS].indices = rss_i; in ixgbe_set_sriov_queues()
549 adapter->ring_feature[RING_F_RSS].mask = rss_m; in ixgbe_set_sriov_queues()
551 adapter->num_rx_pools = vmdq_i; in ixgbe_set_sriov_queues()
552 adapter->num_rx_queues_per_pool = rss_i; in ixgbe_set_sriov_queues()
554 adapter->num_rx_queues = vmdq_i * rss_i; in ixgbe_set_sriov_queues()
555 adapter->num_tx_queues = vmdq_i * rss_i; in ixgbe_set_sriov_queues()
556 adapter->num_xdp_queues = 0; in ixgbe_set_sriov_queues()
559 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_sriov_queues()
567 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_sriov_queues()
570 fcoe = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_sriov_queues()
573 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); in ixgbe_set_sriov_queues()
577 fcoe->indices = fcoe_i; in ixgbe_set_sriov_queues()
578 fcoe->offset = vmdq_i * rss_i; in ixgbe_set_sriov_queues()
583 /* limit indices to rss_i if MSI-X is disabled */ in ixgbe_set_sriov_queues()
584 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) in ixgbe_set_sriov_queues()
588 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); in ixgbe_set_sriov_queues()
589 fcoe->offset = fcoe_i - fcoe->indices; in ixgbe_set_sriov_queues()
591 fcoe_i -= rss_i; in ixgbe_set_sriov_queues()
595 adapter->num_tx_queues += fcoe_i; in ixgbe_set_sriov_queues()
596 adapter->num_rx_queues += fcoe_i; in ixgbe_set_sriov_queues()
606 netdev_set_num_tc(adapter->netdev, 1); in ixgbe_set_sriov_queues()
609 netdev_set_tc_queue(adapter->netdev, 0, in ixgbe_set_sriov_queues()
610 adapter->num_rx_queues_per_pool, 0); in ixgbe_set_sriov_queues()
616 * ixgbe_set_rss_queues - Allocate queues for RSS
625 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rss_queues()
630 f = &adapter->ring_feature[RING_F_RSS]; in ixgbe_set_rss_queues()
631 rss_i = f->limit; in ixgbe_set_rss_queues()
633 f->indices = rss_i; in ixgbe_set_rss_queues()
635 if (hw->mac.type < ixgbe_mac_X550) in ixgbe_set_rss_queues()
636 f->mask = IXGBE_RSS_16Q_MASK; in ixgbe_set_rss_queues()
638 f->mask = IXGBE_RSS_64Q_MASK; in ixgbe_set_rss_queues()
641 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_rss_queues()
648 if (rss_i > 1 && adapter->atr_sample_rate) { in ixgbe_set_rss_queues()
649 f = &adapter->ring_feature[RING_F_FDIR]; in ixgbe_set_rss_queues()
651 rss_i = f->indices = f->limit; in ixgbe_set_rss_queues()
653 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) in ixgbe_set_rss_queues()
654 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_rss_queues()
666 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { in ixgbe_set_rss_queues()
667 struct net_device *dev = adapter->netdev; in ixgbe_set_rss_queues()
670 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_set_rss_queues()
673 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); in ixgbe_set_rss_queues()
674 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); in ixgbe_set_rss_queues()
676 /* limit indices to rss_i if MSI-X is disabled */ in ixgbe_set_rss_queues()
677 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) in ixgbe_set_rss_queues()
681 f->indices = min_t(u16, fcoe_i, f->limit); in ixgbe_set_rss_queues()
682 f->offset = fcoe_i - f->indices; in ixgbe_set_rss_queues()
687 adapter->num_rx_queues = rss_i; in ixgbe_set_rss_queues()
688 adapter->num_tx_queues = rss_i; in ixgbe_set_rss_queues()
689 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter); in ixgbe_set_rss_queues()
695 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
708 adapter->num_rx_queues = 1; in ixgbe_set_num_queues()
709 adapter->num_tx_queues = 1; in ixgbe_set_num_queues()
710 adapter->num_xdp_queues = 0; in ixgbe_set_num_queues()
711 adapter->num_rx_pools = 1; in ixgbe_set_num_queues()
712 adapter->num_rx_queues_per_pool = 1; in ixgbe_set_num_queues()
729 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
732 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
733 * return a negative error code if unable to acquire MSI-X vectors for any
738 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_acquire_msix_vectors()
744 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbe_acquire_msix_vectors()
745 vectors = max(vectors, adapter->num_xdp_queues); in ixgbe_acquire_msix_vectors()
747 /* It is easy to be greedy for MSI-X vectors. However, it really in ixgbe_acquire_msix_vectors()
754 /* Some vectors are necessary for non-queue interrupts */ in ixgbe_acquire_msix_vectors()
757 /* Hardware can only support a maximum of hw.mac->max_msix_vectors. in ixgbe_acquire_msix_vectors()
763 vectors = min_t(int, vectors, hw->mac.max_msix_vectors); in ixgbe_acquire_msix_vectors()
765 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] in ixgbe_acquire_msix_vectors()
770 adapter->msix_entries = kcalloc(vectors, in ixgbe_acquire_msix_vectors()
773 if (!adapter->msix_entries) in ixgbe_acquire_msix_vectors()
774 return -ENOMEM; in ixgbe_acquire_msix_vectors()
777 adapter->msix_entries[i].entry = i; in ixgbe_acquire_msix_vectors()
779 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, in ixgbe_acquire_msix_vectors()
784 * acquiring within the specified range of MSI-X vectors in ixgbe_acquire_msix_vectors()
786 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", in ixgbe_acquire_msix_vectors()
789 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_acquire_msix_vectors()
790 kfree(adapter->msix_entries); in ixgbe_acquire_msix_vectors()
791 adapter->msix_entries = NULL; in ixgbe_acquire_msix_vectors()
799 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; in ixgbe_acquire_msix_vectors()
804 vectors -= NON_Q_VECTORS; in ixgbe_acquire_msix_vectors()
805 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); in ixgbe_acquire_msix_vectors()
813 ring->next = head->ring; in ixgbe_add_ring()
814 head->ring = ring; in ixgbe_add_ring()
815 head->count++; in ixgbe_add_ring()
816 head->next_update = jiffies + 1; in ixgbe_add_ring()
820 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
831 * We allocate one q_vector. If allocation fails we return -ENOMEM.
839 int node = dev_to_node(&adapter->pdev->dev); in ixgbe_alloc_q_vector()
842 int cpu = -1; in ixgbe_alloc_q_vector()
844 u8 tcs = adapter->hw_tcs; in ixgbe_alloc_q_vector() local
849 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { in ixgbe_alloc_q_vector()
850 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_alloc_q_vector()
851 if (rss_i > 1 && adapter->atr_sample_rate) { in ixgbe_alloc_q_vector()
864 return -ENOMEM; in ixgbe_alloc_q_vector()
867 if (cpu != -1) in ixgbe_alloc_q_vector()
868 cpumask_set_cpu(cpu, &q_vector->affinity_mask); in ixgbe_alloc_q_vector()
869 q_vector->numa_node = node; in ixgbe_alloc_q_vector()
873 q_vector->cpu = -1; in ixgbe_alloc_q_vector()
877 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll); in ixgbe_alloc_q_vector()
880 adapter->q_vector[v_idx] = q_vector; in ixgbe_alloc_q_vector()
881 q_vector->adapter = adapter; in ixgbe_alloc_q_vector()
882 q_vector->v_idx = v_idx; in ixgbe_alloc_q_vector()
885 q_vector->tx.work_limit = adapter->tx_work_limit; in ixgbe_alloc_q_vector()
888 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | in ixgbe_alloc_q_vector()
890 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | in ixgbe_alloc_q_vector()
896 if (adapter->tx_itr_setting == 1) in ixgbe_alloc_q_vector()
897 q_vector->itr = IXGBE_12K_ITR; in ixgbe_alloc_q_vector()
899 q_vector->itr = adapter->tx_itr_setting; in ixgbe_alloc_q_vector()
902 if (adapter->rx_itr_setting == 1) in ixgbe_alloc_q_vector()
903 q_vector->itr = IXGBE_20K_ITR; in ixgbe_alloc_q_vector()
905 q_vector->itr = adapter->rx_itr_setting; in ixgbe_alloc_q_vector()
909 ring = q_vector->ring; in ixgbe_alloc_q_vector()
913 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector()
914 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector()
917 ring->q_vector = q_vector; in ixgbe_alloc_q_vector()
920 ixgbe_add_ring(ring, &q_vector->tx); in ixgbe_alloc_q_vector()
923 ring->count = adapter->tx_ring_count; in ixgbe_alloc_q_vector()
924 ring->queue_index = txr_idx; in ixgbe_alloc_q_vector()
927 WRITE_ONCE(adapter->tx_ring[txr_idx], ring); in ixgbe_alloc_q_vector()
930 txr_count--; in ixgbe_alloc_q_vector()
939 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector()
940 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector()
943 ring->q_vector = q_vector; in ixgbe_alloc_q_vector()
946 ixgbe_add_ring(ring, &q_vector->tx); in ixgbe_alloc_q_vector()
949 ring->count = adapter->tx_ring_count; in ixgbe_alloc_q_vector()
950 ring->queue_index = xdp_idx; in ixgbe_alloc_q_vector()
952 spin_lock_init(&ring->tx_lock); in ixgbe_alloc_q_vector()
955 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); in ixgbe_alloc_q_vector()
958 xdp_count--; in ixgbe_alloc_q_vector()
967 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector()
968 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector()
971 ring->q_vector = q_vector; in ixgbe_alloc_q_vector()
974 ixgbe_add_ring(ring, &q_vector->rx); in ixgbe_alloc_q_vector()
980 if (adapter->hw.mac.type == ixgbe_mac_82599EB) in ixgbe_alloc_q_vector()
981 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); in ixgbe_alloc_q_vector()
984 if (adapter->netdev->fcoe_mtu) { in ixgbe_alloc_q_vector()
986 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_alloc_q_vector()
987 if ((rxr_idx >= f->offset) && in ixgbe_alloc_q_vector()
988 (rxr_idx < f->offset + f->indices)) in ixgbe_alloc_q_vector()
989 set_bit(__IXGBE_RX_FCOE, &ring->state); in ixgbe_alloc_q_vector()
994 ring->count = adapter->rx_ring_count; in ixgbe_alloc_q_vector()
995 ring->queue_index = rxr_idx; in ixgbe_alloc_q_vector()
998 WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); in ixgbe_alloc_q_vector()
1001 rxr_count--; in ixgbe_alloc_q_vector()
1012 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1022 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; in ixgbe_free_q_vector()
1025 ixgbe_for_each_ring(ring, q_vector->tx) { in ixgbe_free_q_vector()
1027 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); in ixgbe_free_q_vector()
1029 WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); in ixgbe_free_q_vector()
1032 ixgbe_for_each_ring(ring, q_vector->rx) in ixgbe_free_q_vector()
1033 WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); in ixgbe_free_q_vector()
1035 adapter->q_vector[v_idx] = NULL; in ixgbe_free_q_vector()
1036 __netif_napi_del(&q_vector->napi); in ixgbe_free_q_vector()
1047 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1051 * return -ENOMEM.
1055 int q_vectors = adapter->num_q_vectors; in ixgbe_alloc_q_vectors()
1056 int rxr_remaining = adapter->num_rx_queues; in ixgbe_alloc_q_vectors()
1057 int txr_remaining = adapter->num_tx_queues; in ixgbe_alloc_q_vectors()
1058 int xdp_remaining = adapter->num_xdp_queues; in ixgbe_alloc_q_vectors()
1062 /* only one q_vector if MSI-X is disabled. */ in ixgbe_alloc_q_vectors()
1063 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) in ixgbe_alloc_q_vectors()
1075 rxr_remaining--; in ixgbe_alloc_q_vectors()
1081 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in ixgbe_alloc_q_vectors()
1082 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in ixgbe_alloc_q_vectors()
1083 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx); in ixgbe_alloc_q_vectors()
1094 rxr_remaining -= rqpv; in ixgbe_alloc_q_vectors()
1095 txr_remaining -= tqpv; in ixgbe_alloc_q_vectors()
1096 xdp_remaining -= xqpv; in ixgbe_alloc_q_vectors()
1102 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_alloc_q_vectors()
1103 if (adapter->rx_ring[i]) in ixgbe_alloc_q_vectors()
1104 adapter->rx_ring[i]->ring_idx = i; in ixgbe_alloc_q_vectors()
1107 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_alloc_q_vectors()
1108 if (adapter->tx_ring[i]) in ixgbe_alloc_q_vectors()
1109 adapter->tx_ring[i]->ring_idx = i; in ixgbe_alloc_q_vectors()
1112 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbe_alloc_q_vectors()
1113 if (adapter->xdp_ring[i]) in ixgbe_alloc_q_vectors()
1114 adapter->xdp_ring[i]->ring_idx = i; in ixgbe_alloc_q_vectors()
1120 adapter->num_tx_queues = 0; in ixgbe_alloc_q_vectors()
1121 adapter->num_xdp_queues = 0; in ixgbe_alloc_q_vectors()
1122 adapter->num_rx_queues = 0; in ixgbe_alloc_q_vectors()
1123 adapter->num_q_vectors = 0; in ixgbe_alloc_q_vectors()
1125 while (v_idx--) in ixgbe_alloc_q_vectors()
1128 return -ENOMEM; in ixgbe_alloc_q_vectors()
1132 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1141 int v_idx = adapter->num_q_vectors; in ixgbe_free_q_vectors()
1143 adapter->num_tx_queues = 0; in ixgbe_free_q_vectors()
1144 adapter->num_xdp_queues = 0; in ixgbe_free_q_vectors()
1145 adapter->num_rx_queues = 0; in ixgbe_free_q_vectors()
1146 adapter->num_q_vectors = 0; in ixgbe_free_q_vectors()
1148 while (v_idx--) in ixgbe_free_q_vectors()
1154 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_reset_interrupt_capability()
1155 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_reset_interrupt_capability()
1156 pci_disable_msix(adapter->pdev); in ixgbe_reset_interrupt_capability()
1157 kfree(adapter->msix_entries); in ixgbe_reset_interrupt_capability()
1158 adapter->msix_entries = NULL; in ixgbe_reset_interrupt_capability()
1159 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { in ixgbe_reset_interrupt_capability()
1160 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; in ixgbe_reset_interrupt_capability()
1161 pci_disable_msi(adapter->pdev); in ixgbe_reset_interrupt_capability()
1166 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1176 /* We will try to get MSI-X interrupts first */ in ixgbe_set_interrupt_capability()
1180 /* At this point, we do not have MSI-X capabilities. We need to in ixgbe_set_interrupt_capability()
1181 * reconfigure or disable various features which require MSI-X in ixgbe_set_interrupt_capability()
1186 if (adapter->hw_tcs > 1) { in ixgbe_set_interrupt_capability()
1187 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); in ixgbe_set_interrupt_capability()
1188 netdev_reset_tc(adapter->netdev); in ixgbe_set_interrupt_capability()
1190 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_set_interrupt_capability()
1191 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; in ixgbe_set_interrupt_capability()
1193 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_set_interrupt_capability()
1194 adapter->temp_dcb_cfg.pfc_mode_enable = false; in ixgbe_set_interrupt_capability()
1195 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_set_interrupt_capability()
1198 adapter->hw_tcs = 0; in ixgbe_set_interrupt_capability()
1199 adapter->dcb_cfg.num_tcs.pg_tcs = 1; in ixgbe_set_interrupt_capability()
1200 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; in ixgbe_set_interrupt_capability()
1202 /* Disable SR-IOV support */ in ixgbe_set_interrupt_capability()
1203 e_dev_warn("Disabling SR-IOV support\n"); in ixgbe_set_interrupt_capability()
1208 adapter->ring_feature[RING_F_RSS].limit = 1; in ixgbe_set_interrupt_capability()
1214 adapter->num_q_vectors = 1; in ixgbe_set_interrupt_capability()
1216 err = pci_enable_msi(adapter->pdev); in ixgbe_set_interrupt_capability()
1221 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; in ixgbe_set_interrupt_capability()
1225 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1229 * - Kernel support (MSI, MSI-X)
1230 * - which can be user-defined (via MODULE_PARAM)
1231 * - Hardware queue count (num_*_queues)
1232 * - defined by miscellaneous hardware support/features (RSS, etc.)
1253 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", in ixgbe_init_interrupt_scheme()
1254 adapter->num_rx_queues, adapter->num_tx_queues, in ixgbe_init_interrupt_scheme()
1255 adapter->num_xdp_queues); in ixgbe_init_interrupt_scheme()
1257 set_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_init_interrupt_scheme()
1267 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1271 * to pre-load conditions
1275 adapter->num_tx_queues = 0; in ixgbe_clear_interrupt_scheme()
1276 adapter->num_xdp_queues = 0; in ixgbe_clear_interrupt_scheme()
1277 adapter->num_rx_queues = 0; in ixgbe_clear_interrupt_scheme()
1287 u16 i = tx_ring->next_to_use; in ixgbe_tx_ctxtdesc()
1292 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ixgbe_tx_ctxtdesc()
1297 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in ixgbe_tx_ctxtdesc()
1298 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); in ixgbe_tx_ctxtdesc()
1299 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in ixgbe_tx_ctxtdesc()
1300 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in ixgbe_tx_ctxtdesc()