15ec8b7d1SJesse Brandeburg // SPDX-License-Identifier: GPL-2.0
25ec8b7d1SJesse Brandeburg /* Copyright(c) 2013 - 2018 Intel Corporation. */
35ec8b7d1SJesse Brandeburg
45fa4caffSAlexander Lobakin #include <linux/net/intel/libie/rx.h>
55fa4caffSAlexander Lobakin
65ec8b7d1SJesse Brandeburg #include "iavf.h"
766bc8e0fSJesse Brandeburg #include "iavf_prototype.h"
85ec8b7d1SJesse Brandeburg /* All iavf tracepoints are defined by the include below, which must
95ec8b7d1SJesse Brandeburg * be included exactly once across the whole kernel with
105ec8b7d1SJesse Brandeburg * CREATE_TRACE_POINTS defined
115ec8b7d1SJesse Brandeburg */
125ec8b7d1SJesse Brandeburg #define CREATE_TRACE_POINTS
13ad64ed8bSJesse Brandeburg #include "iavf_trace.h"
145ec8b7d1SJesse Brandeburg
155ec8b7d1SJesse Brandeburg static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
165ec8b7d1SJesse Brandeburg static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
175ec8b7d1SJesse Brandeburg static int iavf_close(struct net_device *netdev);
1859756ad6SMateusz Palczewski static void iavf_init_get_resources(struct iavf_adapter *adapter);
19b66c7bc1SJakub Pawlak static int iavf_check_reset_complete(struct iavf_hw *hw);
205ec8b7d1SJesse Brandeburg
215ec8b7d1SJesse Brandeburg char iavf_driver_name[] = "iavf";
225ec8b7d1SJesse Brandeburg static const char iavf_driver_string[] =
235ec8b7d1SJesse Brandeburg "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
245ec8b7d1SJesse Brandeburg
255ec8b7d1SJesse Brandeburg static const char iavf_copyright[] =
265ec8b7d1SJesse Brandeburg "Copyright (c) 2013 - 2018 Intel Corporation.";
275ec8b7d1SJesse Brandeburg
285ec8b7d1SJesse Brandeburg /* iavf_pci_tbl - PCI Device ID Table
295ec8b7d1SJesse Brandeburg *
305ec8b7d1SJesse Brandeburg * Wildcard entries (PCI_ANY_ID) should come last
315ec8b7d1SJesse Brandeburg * Last entry must be all 0s
325ec8b7d1SJesse Brandeburg *
335ec8b7d1SJesse Brandeburg * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
345ec8b7d1SJesse Brandeburg * Class, Class Mask, private data (not used) }
355ec8b7d1SJesse Brandeburg */
365ec8b7d1SJesse Brandeburg static const struct pci_device_id iavf_pci_tbl[] = {
374dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
384dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
394dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
404dbc76e0SJesse Brandeburg {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
415ec8b7d1SJesse Brandeburg /* required last entry */
425ec8b7d1SJesse Brandeburg {0, }
435ec8b7d1SJesse Brandeburg };
445ec8b7d1SJesse Brandeburg
455ec8b7d1SJesse Brandeburg MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
465ec8b7d1SJesse Brandeburg
475ec8b7d1SJesse Brandeburg MODULE_ALIAS("i40evf");
4898674ebeSJesse Brandeburg MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
495fa4caffSAlexander Lobakin MODULE_IMPORT_NS(LIBETH);
50306ec721SAlexander Lobakin MODULE_IMPORT_NS(LIBIE);
5198674ebeSJesse Brandeburg MODULE_LICENSE("GPL v2");
525ec8b7d1SJesse Brandeburg
53b66c7bc1SJakub Pawlak static const struct net_device_ops iavf_netdev_ops;
545ec8b7d1SJesse Brandeburg
iavf_status_to_errno(enum iavf_status status)55bae569d0SMateusz Palczewski int iavf_status_to_errno(enum iavf_status status)
56bae569d0SMateusz Palczewski {
57bae569d0SMateusz Palczewski switch (status) {
58bae569d0SMateusz Palczewski case IAVF_SUCCESS:
59bae569d0SMateusz Palczewski return 0;
60bae569d0SMateusz Palczewski case IAVF_ERR_PARAM:
61bae569d0SMateusz Palczewski case IAVF_ERR_MAC_TYPE:
62bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_MAC_ADDR:
63bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_LINK_SETTINGS:
64bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PD_ID:
65bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_QP_ID:
66bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_CQ_ID:
67bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_CEQ_ID:
68bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_AEQ_ID:
69bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SIZE:
70bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_ARP_INDEX:
71bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_FPM_FUNC_ID:
72bae569d0SMateusz Palczewski case IAVF_ERR_QP_INVALID_MSG_SIZE:
73bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_FRAG_COUNT:
74bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_ALIGNMENT:
75bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
76bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_IMM_DATA_SIZE:
77bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_VF_ID:
78bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_HMCFN_ID:
79bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PBLE_INDEX:
80bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SD_INDEX:
81bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
82bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SD_TYPE:
83bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
84bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
85bae569d0SMateusz Palczewski case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
86bae569d0SMateusz Palczewski return -EINVAL;
87bae569d0SMateusz Palczewski case IAVF_ERR_NVM:
88bae569d0SMateusz Palczewski case IAVF_ERR_NVM_CHECKSUM:
89bae569d0SMateusz Palczewski case IAVF_ERR_PHY:
90bae569d0SMateusz Palczewski case IAVF_ERR_CONFIG:
91bae569d0SMateusz Palczewski case IAVF_ERR_UNKNOWN_PHY:
92bae569d0SMateusz Palczewski case IAVF_ERR_LINK_SETUP:
93bae569d0SMateusz Palczewski case IAVF_ERR_ADAPTER_STOPPED:
940a62b209SMateusz Palczewski case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
95bae569d0SMateusz Palczewski case IAVF_ERR_AUTONEG_NOT_COMPLETE:
96bae569d0SMateusz Palczewski case IAVF_ERR_RESET_FAILED:
97bae569d0SMateusz Palczewski case IAVF_ERR_BAD_PTR:
98bae569d0SMateusz Palczewski case IAVF_ERR_SWFW_SYNC:
99bae569d0SMateusz Palczewski case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
100bae569d0SMateusz Palczewski case IAVF_ERR_QUEUE_EMPTY:
101bae569d0SMateusz Palczewski case IAVF_ERR_FLUSHED_QUEUE:
102bae569d0SMateusz Palczewski case IAVF_ERR_OPCODE_MISMATCH:
103bae569d0SMateusz Palczewski case IAVF_ERR_CQP_COMPL_ERROR:
104bae569d0SMateusz Palczewski case IAVF_ERR_BACKING_PAGE_ERROR:
105bae569d0SMateusz Palczewski case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
106bae569d0SMateusz Palczewski case IAVF_ERR_MEMCPY_FAILED:
107bae569d0SMateusz Palczewski case IAVF_ERR_SRQ_ENABLED:
108bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_ERROR:
109bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_FULL:
1102723f3b5SJesse Brandeburg case IAVF_ERR_BAD_RDMA_CQE:
111bae569d0SMateusz Palczewski case IAVF_ERR_NVM_BLANK_MODE:
112bae569d0SMateusz Palczewski case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
113bae569d0SMateusz Palczewski case IAVF_ERR_DIAG_TEST_FAILED:
114bae569d0SMateusz Palczewski case IAVF_ERR_FIRMWARE_API_VERSION:
115bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
116bae569d0SMateusz Palczewski return -EIO;
117bae569d0SMateusz Palczewski case IAVF_ERR_DEVICE_NOT_SUPPORTED:
118bae569d0SMateusz Palczewski return -ENODEV;
119bae569d0SMateusz Palczewski case IAVF_ERR_NO_AVAILABLE_VSI:
120bae569d0SMateusz Palczewski case IAVF_ERR_RING_FULL:
121bae569d0SMateusz Palczewski return -ENOSPC;
122bae569d0SMateusz Palczewski case IAVF_ERR_NO_MEMORY:
123bae569d0SMateusz Palczewski return -ENOMEM;
124bae569d0SMateusz Palczewski case IAVF_ERR_TIMEOUT:
125bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
126bae569d0SMateusz Palczewski return -ETIMEDOUT;
127bae569d0SMateusz Palczewski case IAVF_ERR_NOT_IMPLEMENTED:
128bae569d0SMateusz Palczewski case IAVF_NOT_SUPPORTED:
129bae569d0SMateusz Palczewski return -EOPNOTSUPP;
130bae569d0SMateusz Palczewski case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
131bae569d0SMateusz Palczewski return -EALREADY;
132bae569d0SMateusz Palczewski case IAVF_ERR_NOT_READY:
133bae569d0SMateusz Palczewski return -EBUSY;
134bae569d0SMateusz Palczewski case IAVF_ERR_BUF_TOO_SHORT:
135bae569d0SMateusz Palczewski return -EMSGSIZE;
136bae569d0SMateusz Palczewski }
137bae569d0SMateusz Palczewski
138bae569d0SMateusz Palczewski return -EIO;
139bae569d0SMateusz Palczewski }
140bae569d0SMateusz Palczewski
virtchnl_status_to_errno(enum virtchnl_status_code v_status)141bae569d0SMateusz Palczewski int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
142bae569d0SMateusz Palczewski {
143bae569d0SMateusz Palczewski switch (v_status) {
144bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_SUCCESS:
145bae569d0SMateusz Palczewski return 0;
146bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_PARAM:
147bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
148bae569d0SMateusz Palczewski return -EINVAL;
149bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_NO_MEMORY:
150bae569d0SMateusz Palczewski return -ENOMEM;
151bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
152bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
153bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
154bae569d0SMateusz Palczewski return -EIO;
155bae569d0SMateusz Palczewski case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
156bae569d0SMateusz Palczewski return -EOPNOTSUPP;
157bae569d0SMateusz Palczewski }
158bae569d0SMateusz Palczewski
159bae569d0SMateusz Palczewski return -EIO;
160bae569d0SMateusz Palczewski }
161bae569d0SMateusz Palczewski
1625ec8b7d1SJesse Brandeburg /**
163247aa001SKaren Sornek * iavf_pdev_to_adapter - go from pci_dev to adapter
164247aa001SKaren Sornek * @pdev: pci_dev pointer
165247aa001SKaren Sornek */
iavf_pdev_to_adapter(struct pci_dev * pdev)166247aa001SKaren Sornek static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
167247aa001SKaren Sornek {
168247aa001SKaren Sornek return netdev_priv(pci_get_drvdata(pdev));
169247aa001SKaren Sornek }
170247aa001SKaren Sornek
171247aa001SKaren Sornek /**
172c2ed2403SMarcin Szycik * iavf_is_reset_in_progress - Check if a reset is in progress
173c2ed2403SMarcin Szycik * @adapter: board private structure
174c2ed2403SMarcin Szycik */
iavf_is_reset_in_progress(struct iavf_adapter * adapter)175c2ed2403SMarcin Szycik static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
176c2ed2403SMarcin Szycik {
177c2ed2403SMarcin Szycik if (adapter->state == __IAVF_RESETTING ||
178c2ed2403SMarcin Szycik adapter->flags & (IAVF_FLAG_RESET_PENDING |
179c2ed2403SMarcin Szycik IAVF_FLAG_RESET_NEEDED))
180c2ed2403SMarcin Szycik return true;
181c2ed2403SMarcin Szycik
182c2ed2403SMarcin Szycik return false;
183c2ed2403SMarcin Szycik }
184c2ed2403SMarcin Szycik
185c2ed2403SMarcin Szycik /**
186c2ed2403SMarcin Szycik * iavf_wait_for_reset - Wait for reset to finish.
187c2ed2403SMarcin Szycik * @adapter: board private structure
188c2ed2403SMarcin Szycik *
189c2ed2403SMarcin Szycik * Returns 0 if reset finished successfully, negative on timeout or interrupt.
190c2ed2403SMarcin Szycik */
iavf_wait_for_reset(struct iavf_adapter * adapter)191c2ed2403SMarcin Szycik int iavf_wait_for_reset(struct iavf_adapter *adapter)
192c2ed2403SMarcin Szycik {
193c2ed2403SMarcin Szycik int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
194c2ed2403SMarcin Szycik !iavf_is_reset_in_progress(adapter),
195c2ed2403SMarcin Szycik msecs_to_jiffies(5000));
196c2ed2403SMarcin Szycik
197c2ed2403SMarcin Szycik /* If ret < 0 then it means wait was interrupted.
198c2ed2403SMarcin Szycik * If ret == 0 then it means we got a timeout while waiting
199c2ed2403SMarcin Szycik * for reset to finish.
200c2ed2403SMarcin Szycik * If ret > 0 it means reset has finished.
201c2ed2403SMarcin Szycik */
202c2ed2403SMarcin Szycik if (ret > 0)
203c2ed2403SMarcin Szycik return 0;
204c2ed2403SMarcin Szycik else if (ret < 0)
205c2ed2403SMarcin Szycik return -EINTR;
206c2ed2403SMarcin Szycik else
207c2ed2403SMarcin Szycik return -EBUSY;
208c2ed2403SMarcin Szycik }
209c2ed2403SMarcin Szycik
210c2ed2403SMarcin Szycik /**
2115ec8b7d1SJesse Brandeburg * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
2125ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2135ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to fill out
2145ec8b7d1SJesse Brandeburg * @size: size of memory requested
2155ec8b7d1SJesse Brandeburg * @alignment: what to align the allocation to
2165ec8b7d1SJesse Brandeburg **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)21780754bbcSSergey Nemov enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
21856184e01SJesse Brandeburg struct iavf_dma_mem *mem,
2195ec8b7d1SJesse Brandeburg u64 size, u32 alignment)
2205ec8b7d1SJesse Brandeburg {
2215ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
2225ec8b7d1SJesse Brandeburg
2235ec8b7d1SJesse Brandeburg if (!mem)
2248821b3faSAlice Michael return IAVF_ERR_PARAM;
2255ec8b7d1SJesse Brandeburg
2265ec8b7d1SJesse Brandeburg mem->size = ALIGN(size, alignment);
2275ec8b7d1SJesse Brandeburg mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
2285ec8b7d1SJesse Brandeburg (dma_addr_t *)&mem->pa, GFP_KERNEL);
2295ec8b7d1SJesse Brandeburg if (mem->va)
2305ec8b7d1SJesse Brandeburg return 0;
2315ec8b7d1SJesse Brandeburg else
2328821b3faSAlice Michael return IAVF_ERR_NO_MEMORY;
2335ec8b7d1SJesse Brandeburg }
2345ec8b7d1SJesse Brandeburg
2355ec8b7d1SJesse Brandeburg /**
236b855bcdeSPrzemek Kitszel * iavf_free_dma_mem - wrapper for DMA memory freeing
2375ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2385ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to free
2395ec8b7d1SJesse Brandeburg **/
iavf_free_dma_mem(struct iavf_hw * hw,struct iavf_dma_mem * mem)240b855bcdeSPrzemek Kitszel enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
2415ec8b7d1SJesse Brandeburg {
2425ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
2435ec8b7d1SJesse Brandeburg
2445ec8b7d1SJesse Brandeburg if (!mem || !mem->va)
2458821b3faSAlice Michael return IAVF_ERR_PARAM;
2465ec8b7d1SJesse Brandeburg dma_free_coherent(&adapter->pdev->dev, mem->size,
2475ec8b7d1SJesse Brandeburg mem->va, (dma_addr_t)mem->pa);
2485ec8b7d1SJesse Brandeburg return 0;
2495ec8b7d1SJesse Brandeburg }
2505ec8b7d1SJesse Brandeburg
2515ec8b7d1SJesse Brandeburg /**
252b855bcdeSPrzemek Kitszel * iavf_allocate_virt_mem - virt memory alloc wrapper
2535ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2545ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to fill out
2555ec8b7d1SJesse Brandeburg * @size: size of memory requested
2565ec8b7d1SJesse Brandeburg **/
iavf_allocate_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)257b855bcdeSPrzemek Kitszel enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
25856184e01SJesse Brandeburg struct iavf_virt_mem *mem, u32 size)
2595ec8b7d1SJesse Brandeburg {
2605ec8b7d1SJesse Brandeburg if (!mem)
2618821b3faSAlice Michael return IAVF_ERR_PARAM;
2625ec8b7d1SJesse Brandeburg
2635ec8b7d1SJesse Brandeburg mem->size = size;
2645ec8b7d1SJesse Brandeburg mem->va = kzalloc(size, GFP_KERNEL);
2655ec8b7d1SJesse Brandeburg
2665ec8b7d1SJesse Brandeburg if (mem->va)
2675ec8b7d1SJesse Brandeburg return 0;
2685ec8b7d1SJesse Brandeburg else
2698821b3faSAlice Michael return IAVF_ERR_NO_MEMORY;
2705ec8b7d1SJesse Brandeburg }
2715ec8b7d1SJesse Brandeburg
2725ec8b7d1SJesse Brandeburg /**
273b855bcdeSPrzemek Kitszel * iavf_free_virt_mem - virt memory free wrapper
2745ec8b7d1SJesse Brandeburg * @hw: pointer to the HW structure
2755ec8b7d1SJesse Brandeburg * @mem: ptr to mem struct to free
2765ec8b7d1SJesse Brandeburg **/
iavf_free_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem)277b855bcdeSPrzemek Kitszel void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
2785ec8b7d1SJesse Brandeburg {
2795ec8b7d1SJesse Brandeburg kfree(mem->va);
2805ec8b7d1SJesse Brandeburg }
2815ec8b7d1SJesse Brandeburg
2825ec8b7d1SJesse Brandeburg /**
2835ec8b7d1SJesse Brandeburg * iavf_schedule_reset - Set the flags and schedule a reset event
2845ec8b7d1SJesse Brandeburg * @adapter: board private structure
285c34743daSAhmed Zaki * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
2865ec8b7d1SJesse Brandeburg **/
iavf_schedule_reset(struct iavf_adapter * adapter,u64 flags)287c34743daSAhmed Zaki void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
2885ec8b7d1SJesse Brandeburg {
289c34743daSAhmed Zaki if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
290c34743daSAhmed Zaki !(adapter->flags &
2915ec8b7d1SJesse Brandeburg (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
292c34743daSAhmed Zaki adapter->flags |= flags;
2934411a608SMichal Schmidt queue_work(adapter->wq, &adapter->reset_task);
2945ec8b7d1SJesse Brandeburg }
2955ec8b7d1SJesse Brandeburg }
2965ec8b7d1SJesse Brandeburg
2975ec8b7d1SJesse Brandeburg /**
298ed4cad33SPetr Oros * iavf_schedule_aq_request - Set the flags and schedule aq request
2993b5bdd18SJedrzej Jagielski * @adapter: board private structure
300ed4cad33SPetr Oros * @flags: requested aq flags
3013b5bdd18SJedrzej Jagielski **/
iavf_schedule_aq_request(struct iavf_adapter * adapter,u64 flags)302ed4cad33SPetr Oros void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
3033b5bdd18SJedrzej Jagielski {
304ed4cad33SPetr Oros adapter->aq_required |= flags;
3054411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
3063b5bdd18SJedrzej Jagielski }
3073b5bdd18SJedrzej Jagielski
3083b5bdd18SJedrzej Jagielski /**
3095ec8b7d1SJesse Brandeburg * iavf_tx_timeout - Respond to a Tx Hang
3105ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
311b50f7bcaSJesse Brandeburg * @txqueue: queue number that is timing out
3125ec8b7d1SJesse Brandeburg **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)3130290bd29SMichael S. Tsirkin static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3145ec8b7d1SJesse Brandeburg {
3155ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
3165ec8b7d1SJesse Brandeburg
3175ec8b7d1SJesse Brandeburg adapter->tx_timeout_count++;
318c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
3195ec8b7d1SJesse Brandeburg }
3205ec8b7d1SJesse Brandeburg
3215ec8b7d1SJesse Brandeburg /**
3225ec8b7d1SJesse Brandeburg * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
3235ec8b7d1SJesse Brandeburg * @adapter: board private structure
3245ec8b7d1SJesse Brandeburg **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)3255ec8b7d1SJesse Brandeburg static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
3265ec8b7d1SJesse Brandeburg {
327f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3285ec8b7d1SJesse Brandeburg
3295ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
3305ec8b7d1SJesse Brandeburg return;
3315ec8b7d1SJesse Brandeburg
332f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
3335ec8b7d1SJesse Brandeburg
334f1cad2ceSJesse Brandeburg iavf_flush(hw);
3355ec8b7d1SJesse Brandeburg
3365ec8b7d1SJesse Brandeburg synchronize_irq(adapter->msix_entries[0].vector);
3375ec8b7d1SJesse Brandeburg }
3385ec8b7d1SJesse Brandeburg
3395ec8b7d1SJesse Brandeburg /**
3405ec8b7d1SJesse Brandeburg * iavf_misc_irq_enable - Enable default interrupt generation settings
3415ec8b7d1SJesse Brandeburg * @adapter: board private structure
3425ec8b7d1SJesse Brandeburg **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)3435ec8b7d1SJesse Brandeburg static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
3445ec8b7d1SJesse Brandeburg {
345f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3465ec8b7d1SJesse Brandeburg
347f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
348f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
349f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
3505ec8b7d1SJesse Brandeburg
351f1cad2ceSJesse Brandeburg iavf_flush(hw);
3525ec8b7d1SJesse Brandeburg }
3535ec8b7d1SJesse Brandeburg
3545ec8b7d1SJesse Brandeburg /**
3555ec8b7d1SJesse Brandeburg * iavf_irq_disable - Mask off interrupt generation on the NIC
3565ec8b7d1SJesse Brandeburg * @adapter: board private structure
3575ec8b7d1SJesse Brandeburg **/
iavf_irq_disable(struct iavf_adapter * adapter)3585ec8b7d1SJesse Brandeburg static void iavf_irq_disable(struct iavf_adapter *adapter)
3595ec8b7d1SJesse Brandeburg {
3605ec8b7d1SJesse Brandeburg int i;
361f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3625ec8b7d1SJesse Brandeburg
3635ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
3645ec8b7d1SJesse Brandeburg return;
3655ec8b7d1SJesse Brandeburg
3665ec8b7d1SJesse Brandeburg for (i = 1; i < adapter->num_msix_vectors; i++) {
367f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
3685ec8b7d1SJesse Brandeburg synchronize_irq(adapter->msix_entries[i].vector);
3695ec8b7d1SJesse Brandeburg }
370f1cad2ceSJesse Brandeburg iavf_flush(hw);
3715ec8b7d1SJesse Brandeburg }
3725ec8b7d1SJesse Brandeburg
3735ec8b7d1SJesse Brandeburg /**
374c37cf54cSAhmed Zaki * iavf_irq_enable_queues - Enable interrupt for all queues
3755ec8b7d1SJesse Brandeburg * @adapter: board private structure
3765ec8b7d1SJesse Brandeburg **/
iavf_irq_enable_queues(struct iavf_adapter * adapter)377a4aadf0fSPrzemek Kitszel static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
3785ec8b7d1SJesse Brandeburg {
379f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3805ec8b7d1SJesse Brandeburg int i;
3815ec8b7d1SJesse Brandeburg
3825ec8b7d1SJesse Brandeburg for (i = 1; i < adapter->num_msix_vectors; i++) {
383f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
384f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
385f1cad2ceSJesse Brandeburg IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
3865ec8b7d1SJesse Brandeburg }
3875ec8b7d1SJesse Brandeburg }
3885ec8b7d1SJesse Brandeburg
3895ec8b7d1SJesse Brandeburg /**
3905ec8b7d1SJesse Brandeburg * iavf_irq_enable - Enable default interrupt generation settings
3915ec8b7d1SJesse Brandeburg * @adapter: board private structure
3925ec8b7d1SJesse Brandeburg * @flush: boolean value whether to run rd32()
3935ec8b7d1SJesse Brandeburg **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)3945ec8b7d1SJesse Brandeburg void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
3955ec8b7d1SJesse Brandeburg {
396f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
3975ec8b7d1SJesse Brandeburg
3985ec8b7d1SJesse Brandeburg iavf_misc_irq_enable(adapter);
399c37cf54cSAhmed Zaki iavf_irq_enable_queues(adapter);
4005ec8b7d1SJesse Brandeburg
4015ec8b7d1SJesse Brandeburg if (flush)
402f1cad2ceSJesse Brandeburg iavf_flush(hw);
4035ec8b7d1SJesse Brandeburg }
4045ec8b7d1SJesse Brandeburg
4055ec8b7d1SJesse Brandeburg /**
4065ec8b7d1SJesse Brandeburg * iavf_msix_aq - Interrupt handler for vector 0
4075ec8b7d1SJesse Brandeburg * @irq: interrupt number
4085ec8b7d1SJesse Brandeburg * @data: pointer to netdev
4095ec8b7d1SJesse Brandeburg **/
iavf_msix_aq(int irq,void * data)4105ec8b7d1SJesse Brandeburg static irqreturn_t iavf_msix_aq(int irq, void *data)
4115ec8b7d1SJesse Brandeburg {
4125ec8b7d1SJesse Brandeburg struct net_device *netdev = data;
4135ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
414f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
4155ec8b7d1SJesse Brandeburg
4165ec8b7d1SJesse Brandeburg /* handle non-queue interrupts, these reads clear the registers */
417f1cad2ceSJesse Brandeburg rd32(hw, IAVF_VFINT_ICR01);
418f1cad2ceSJesse Brandeburg rd32(hw, IAVF_VFINT_ICR0_ENA1);
4195ec8b7d1SJesse Brandeburg
420fc2e6b3bSSlawomir Laba if (adapter->state != __IAVF_REMOVE)
4215ec8b7d1SJesse Brandeburg /* schedule work on the private workqueue */
4224411a608SMichal Schmidt queue_work(adapter->wq, &adapter->adminq_task);
4235ec8b7d1SJesse Brandeburg
4245ec8b7d1SJesse Brandeburg return IRQ_HANDLED;
4255ec8b7d1SJesse Brandeburg }
4265ec8b7d1SJesse Brandeburg
4275ec8b7d1SJesse Brandeburg /**
4285ec8b7d1SJesse Brandeburg * iavf_msix_clean_rings - MSIX mode Interrupt Handler
4295ec8b7d1SJesse Brandeburg * @irq: interrupt number
4305ec8b7d1SJesse Brandeburg * @data: pointer to a q_vector
4315ec8b7d1SJesse Brandeburg **/
iavf_msix_clean_rings(int irq,void * data)4325ec8b7d1SJesse Brandeburg static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
4335ec8b7d1SJesse Brandeburg {
43456184e01SJesse Brandeburg struct iavf_q_vector *q_vector = data;
4355ec8b7d1SJesse Brandeburg
4365ec8b7d1SJesse Brandeburg if (!q_vector->tx.ring && !q_vector->rx.ring)
4375ec8b7d1SJesse Brandeburg return IRQ_HANDLED;
4385ec8b7d1SJesse Brandeburg
4395ec8b7d1SJesse Brandeburg napi_schedule_irqoff(&q_vector->napi);
4405ec8b7d1SJesse Brandeburg
4415ec8b7d1SJesse Brandeburg return IRQ_HANDLED;
4425ec8b7d1SJesse Brandeburg }
4435ec8b7d1SJesse Brandeburg
4445ec8b7d1SJesse Brandeburg /**
4455ec8b7d1SJesse Brandeburg * iavf_map_vector_to_rxq - associate irqs with rx queues
4465ec8b7d1SJesse Brandeburg * @adapter: board private structure
4475ec8b7d1SJesse Brandeburg * @v_idx: interrupt number
4485ec8b7d1SJesse Brandeburg * @r_idx: queue number
4495ec8b7d1SJesse Brandeburg **/
4505ec8b7d1SJesse Brandeburg static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)4515ec8b7d1SJesse Brandeburg iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
4525ec8b7d1SJesse Brandeburg {
45356184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
45456184e01SJesse Brandeburg struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
455f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
4565ec8b7d1SJesse Brandeburg
4575ec8b7d1SJesse Brandeburg rx_ring->q_vector = q_vector;
4585ec8b7d1SJesse Brandeburg rx_ring->next = q_vector->rx.ring;
4595ec8b7d1SJesse Brandeburg rx_ring->vsi = &adapter->vsi;
4605ec8b7d1SJesse Brandeburg q_vector->rx.ring = rx_ring;
4615ec8b7d1SJesse Brandeburg q_vector->rx.count++;
4625ec8b7d1SJesse Brandeburg q_vector->rx.next_update = jiffies + 1;
4635ec8b7d1SJesse Brandeburg q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
4645ec8b7d1SJesse Brandeburg q_vector->ring_mask |= BIT(r_idx);
46556184e01SJesse Brandeburg wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
4664eda4e00SNicholas Nunley q_vector->rx.current_itr >> 1);
4675ec8b7d1SJesse Brandeburg q_vector->rx.current_itr = q_vector->rx.target_itr;
4685ec8b7d1SJesse Brandeburg }
4695ec8b7d1SJesse Brandeburg
4705ec8b7d1SJesse Brandeburg /**
4715ec8b7d1SJesse Brandeburg * iavf_map_vector_to_txq - associate irqs with tx queues
4725ec8b7d1SJesse Brandeburg * @adapter: board private structure
4735ec8b7d1SJesse Brandeburg * @v_idx: interrupt number
4745ec8b7d1SJesse Brandeburg * @t_idx: queue number
4755ec8b7d1SJesse Brandeburg **/
4765ec8b7d1SJesse Brandeburg static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)4775ec8b7d1SJesse Brandeburg iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
4785ec8b7d1SJesse Brandeburg {
47956184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
48056184e01SJesse Brandeburg struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
481f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
4825ec8b7d1SJesse Brandeburg
4835ec8b7d1SJesse Brandeburg tx_ring->q_vector = q_vector;
4845ec8b7d1SJesse Brandeburg tx_ring->next = q_vector->tx.ring;
4855ec8b7d1SJesse Brandeburg tx_ring->vsi = &adapter->vsi;
4865ec8b7d1SJesse Brandeburg q_vector->tx.ring = tx_ring;
4875ec8b7d1SJesse Brandeburg q_vector->tx.count++;
4885ec8b7d1SJesse Brandeburg q_vector->tx.next_update = jiffies + 1;
4895ec8b7d1SJesse Brandeburg q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
4905ec8b7d1SJesse Brandeburg q_vector->num_ringpairs++;
49156184e01SJesse Brandeburg wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
4924eda4e00SNicholas Nunley q_vector->tx.target_itr >> 1);
4935ec8b7d1SJesse Brandeburg q_vector->tx.current_itr = q_vector->tx.target_itr;
4945ec8b7d1SJesse Brandeburg }
4955ec8b7d1SJesse Brandeburg
4965ec8b7d1SJesse Brandeburg /**
4975ec8b7d1SJesse Brandeburg * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
4985ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
4995ec8b7d1SJesse Brandeburg *
5005ec8b7d1SJesse Brandeburg * This function maps descriptor rings to the queue-specific vectors
5015ec8b7d1SJesse Brandeburg * we were allotted through the MSI-X enabling code. Ideally, we'd have
5025ec8b7d1SJesse Brandeburg * one vector per ring/queue, but on a constrained vector budget, we
5035ec8b7d1SJesse Brandeburg * group the rings as "efficiently" as possible. You would add new
5045ec8b7d1SJesse Brandeburg * mapping configurations in here.
5055ec8b7d1SJesse Brandeburg **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)5065ec8b7d1SJesse Brandeburg static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
5075ec8b7d1SJesse Brandeburg {
5085ec8b7d1SJesse Brandeburg int rings_remaining = adapter->num_active_queues;
5095ec8b7d1SJesse Brandeburg int ridx = 0, vidx = 0;
5105ec8b7d1SJesse Brandeburg int q_vectors;
5115ec8b7d1SJesse Brandeburg
5125ec8b7d1SJesse Brandeburg q_vectors = adapter->num_msix_vectors - NONQ_VECS;
5135ec8b7d1SJesse Brandeburg
5145ec8b7d1SJesse Brandeburg for (; ridx < rings_remaining; ridx++) {
5155ec8b7d1SJesse Brandeburg iavf_map_vector_to_rxq(adapter, vidx, ridx);
5165ec8b7d1SJesse Brandeburg iavf_map_vector_to_txq(adapter, vidx, ridx);
5175ec8b7d1SJesse Brandeburg
5185ec8b7d1SJesse Brandeburg /* In the case where we have more queues than vectors, continue
5195ec8b7d1SJesse Brandeburg * round-robin on vectors until all queues are mapped.
5205ec8b7d1SJesse Brandeburg */
5215ec8b7d1SJesse Brandeburg if (++vidx >= q_vectors)
5225ec8b7d1SJesse Brandeburg vidx = 0;
5235ec8b7d1SJesse Brandeburg }
5245ec8b7d1SJesse Brandeburg
5255ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
5265ec8b7d1SJesse Brandeburg }
5275ec8b7d1SJesse Brandeburg
5285ec8b7d1SJesse Brandeburg /**
5295ec8b7d1SJesse Brandeburg * iavf_irq_affinity_notify - Callback for affinity changes
5305ec8b7d1SJesse Brandeburg * @notify: context as to what irq was changed
5315ec8b7d1SJesse Brandeburg * @mask: the new affinity mask
5325ec8b7d1SJesse Brandeburg *
5335ec8b7d1SJesse Brandeburg * This is a callback function used by the irq_set_affinity_notifier function
5345ec8b7d1SJesse Brandeburg * so that we may register to receive changes to the irq affinity masks.
5355ec8b7d1SJesse Brandeburg **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)5365ec8b7d1SJesse Brandeburg static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
5375ec8b7d1SJesse Brandeburg const cpumask_t *mask)
5385ec8b7d1SJesse Brandeburg {
53956184e01SJesse Brandeburg struct iavf_q_vector *q_vector =
54056184e01SJesse Brandeburg container_of(notify, struct iavf_q_vector, affinity_notify);
5415ec8b7d1SJesse Brandeburg
5425ec8b7d1SJesse Brandeburg cpumask_copy(&q_vector->affinity_mask, mask);
5435ec8b7d1SJesse Brandeburg }
5445ec8b7d1SJesse Brandeburg
5455ec8b7d1SJesse Brandeburg /**
5465ec8b7d1SJesse Brandeburg * iavf_irq_affinity_release - Callback for affinity notifier release
5475ec8b7d1SJesse Brandeburg * @ref: internal core kernel usage
5485ec8b7d1SJesse Brandeburg *
5495ec8b7d1SJesse Brandeburg * This is a callback function used by the irq_set_affinity_notifier function
5505ec8b7d1SJesse Brandeburg * to inform the current notification subscriber that they will no longer
5515ec8b7d1SJesse Brandeburg * receive notifications.
5525ec8b7d1SJesse Brandeburg **/
iavf_irq_affinity_release(struct kref * ref)5535ec8b7d1SJesse Brandeburg static void iavf_irq_affinity_release(struct kref *ref) {}
5545ec8b7d1SJesse Brandeburg
5555ec8b7d1SJesse Brandeburg /**
5565ec8b7d1SJesse Brandeburg * iavf_request_traffic_irqs - Initialize MSI-X interrupts
5575ec8b7d1SJesse Brandeburg * @adapter: board private structure
5585ec8b7d1SJesse Brandeburg * @basename: device basename
5595ec8b7d1SJesse Brandeburg *
5605ec8b7d1SJesse Brandeburg * Allocates MSI-X vectors for tx and rx handling, and requests
5615ec8b7d1SJesse Brandeburg * interrupts from the kernel.
5625ec8b7d1SJesse Brandeburg **/
5635ec8b7d1SJesse Brandeburg static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)5645ec8b7d1SJesse Brandeburg iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
5655ec8b7d1SJesse Brandeburg {
5665ec8b7d1SJesse Brandeburg unsigned int vector, q_vectors;
5675ec8b7d1SJesse Brandeburg unsigned int rx_int_idx = 0, tx_int_idx = 0;
5685ec8b7d1SJesse Brandeburg int irq_num, err;
5695ec8b7d1SJesse Brandeburg int cpu;
5705ec8b7d1SJesse Brandeburg
5715ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
5725ec8b7d1SJesse Brandeburg /* Decrement for Other and TCP Timer vectors */
5735ec8b7d1SJesse Brandeburg q_vectors = adapter->num_msix_vectors - NONQ_VECS;
5745ec8b7d1SJesse Brandeburg
5755ec8b7d1SJesse Brandeburg for (vector = 0; vector < q_vectors; vector++) {
57656184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
5775ec8b7d1SJesse Brandeburg
5785ec8b7d1SJesse Brandeburg irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
5795ec8b7d1SJesse Brandeburg
5805ec8b7d1SJesse Brandeburg if (q_vector->tx.ring && q_vector->rx.ring) {
5815ec8b7d1SJesse Brandeburg snprintf(q_vector->name, sizeof(q_vector->name),
582c2fbcc94SKaren Sornek "iavf-%s-TxRx-%u", basename, rx_int_idx++);
5835ec8b7d1SJesse Brandeburg tx_int_idx++;
5845ec8b7d1SJesse Brandeburg } else if (q_vector->rx.ring) {
5855ec8b7d1SJesse Brandeburg snprintf(q_vector->name, sizeof(q_vector->name),
586c2fbcc94SKaren Sornek "iavf-%s-rx-%u", basename, rx_int_idx++);
5875ec8b7d1SJesse Brandeburg } else if (q_vector->tx.ring) {
5885ec8b7d1SJesse Brandeburg snprintf(q_vector->name, sizeof(q_vector->name),
589c2fbcc94SKaren Sornek "iavf-%s-tx-%u", basename, tx_int_idx++);
5905ec8b7d1SJesse Brandeburg } else {
5915ec8b7d1SJesse Brandeburg /* skip this unused q_vector */
5925ec8b7d1SJesse Brandeburg continue;
5935ec8b7d1SJesse Brandeburg }
5945ec8b7d1SJesse Brandeburg err = request_irq(irq_num,
5955ec8b7d1SJesse Brandeburg iavf_msix_clean_rings,
5965ec8b7d1SJesse Brandeburg 0,
5975ec8b7d1SJesse Brandeburg q_vector->name,
5985ec8b7d1SJesse Brandeburg q_vector);
5995ec8b7d1SJesse Brandeburg if (err) {
6005ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev,
6015ec8b7d1SJesse Brandeburg "Request_irq failed, error: %d\n", err);
6025ec8b7d1SJesse Brandeburg goto free_queue_irqs;
6035ec8b7d1SJesse Brandeburg }
6045ec8b7d1SJesse Brandeburg /* register for affinity change notifications */
6055ec8b7d1SJesse Brandeburg q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
6065ec8b7d1SJesse Brandeburg q_vector->affinity_notify.release =
6075ec8b7d1SJesse Brandeburg iavf_irq_affinity_release;
6085ec8b7d1SJesse Brandeburg irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
6095ec8b7d1SJesse Brandeburg /* Spread the IRQ affinity hints across online CPUs. Note that
6105ec8b7d1SJesse Brandeburg * get_cpu_mask returns a mask with a permanent lifetime so
6110f9744f4SNitesh Narayan Lal * it's safe to use as a hint for irq_update_affinity_hint.
6125ec8b7d1SJesse Brandeburg */
6135ec8b7d1SJesse Brandeburg cpu = cpumask_local_spread(q_vector->v_idx, -1);
6140f9744f4SNitesh Narayan Lal irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
6155ec8b7d1SJesse Brandeburg }
6165ec8b7d1SJesse Brandeburg
6175ec8b7d1SJesse Brandeburg return 0;
6185ec8b7d1SJesse Brandeburg
6195ec8b7d1SJesse Brandeburg free_queue_irqs:
6205ec8b7d1SJesse Brandeburg while (vector) {
6215ec8b7d1SJesse Brandeburg vector--;
6225ec8b7d1SJesse Brandeburg irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
6235ec8b7d1SJesse Brandeburg irq_set_affinity_notifier(irq_num, NULL);
6240f9744f4SNitesh Narayan Lal irq_update_affinity_hint(irq_num, NULL);
6255ec8b7d1SJesse Brandeburg free_irq(irq_num, &adapter->q_vectors[vector]);
6265ec8b7d1SJesse Brandeburg }
6275ec8b7d1SJesse Brandeburg return err;
6285ec8b7d1SJesse Brandeburg }
6295ec8b7d1SJesse Brandeburg
6305ec8b7d1SJesse Brandeburg /**
6315ec8b7d1SJesse Brandeburg * iavf_request_misc_irq - Initialize MSI-X interrupts
6325ec8b7d1SJesse Brandeburg * @adapter: board private structure
6335ec8b7d1SJesse Brandeburg *
6345ec8b7d1SJesse Brandeburg * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
6355ec8b7d1SJesse Brandeburg * vector is only for the admin queue, and stays active even when the netdev
6365ec8b7d1SJesse Brandeburg * is closed.
6375ec8b7d1SJesse Brandeburg **/
iavf_request_misc_irq(struct iavf_adapter * adapter)6385ec8b7d1SJesse Brandeburg static int iavf_request_misc_irq(struct iavf_adapter *adapter)
6395ec8b7d1SJesse Brandeburg {
6405ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
6415ec8b7d1SJesse Brandeburg int err;
6425ec8b7d1SJesse Brandeburg
6435ec8b7d1SJesse Brandeburg snprintf(adapter->misc_vector_name,
6445ec8b7d1SJesse Brandeburg sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
6455ec8b7d1SJesse Brandeburg dev_name(&adapter->pdev->dev));
6465ec8b7d1SJesse Brandeburg err = request_irq(adapter->msix_entries[0].vector,
6475ec8b7d1SJesse Brandeburg &iavf_msix_aq, 0,
6485ec8b7d1SJesse Brandeburg adapter->misc_vector_name, netdev);
6495ec8b7d1SJesse Brandeburg if (err) {
6505ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
6515ec8b7d1SJesse Brandeburg "request_irq for %s failed: %d\n",
6525ec8b7d1SJesse Brandeburg adapter->misc_vector_name, err);
6535ec8b7d1SJesse Brandeburg free_irq(adapter->msix_entries[0].vector, netdev);
6545ec8b7d1SJesse Brandeburg }
6555ec8b7d1SJesse Brandeburg return err;
6565ec8b7d1SJesse Brandeburg }
6575ec8b7d1SJesse Brandeburg
6585ec8b7d1SJesse Brandeburg /**
6595ec8b7d1SJesse Brandeburg * iavf_free_traffic_irqs - Free MSI-X interrupts
6605ec8b7d1SJesse Brandeburg * @adapter: board private structure
6615ec8b7d1SJesse Brandeburg *
6625ec8b7d1SJesse Brandeburg * Frees all MSI-X vectors other than 0.
6635ec8b7d1SJesse Brandeburg **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)6645ec8b7d1SJesse Brandeburg static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
6655ec8b7d1SJesse Brandeburg {
6665ec8b7d1SJesse Brandeburg int vector, irq_num, q_vectors;
6675ec8b7d1SJesse Brandeburg
6685ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
6695ec8b7d1SJesse Brandeburg return;
6705ec8b7d1SJesse Brandeburg
6715ec8b7d1SJesse Brandeburg q_vectors = adapter->num_msix_vectors - NONQ_VECS;
6725ec8b7d1SJesse Brandeburg
6735ec8b7d1SJesse Brandeburg for (vector = 0; vector < q_vectors; vector++) {
6745ec8b7d1SJesse Brandeburg irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
6755ec8b7d1SJesse Brandeburg irq_set_affinity_notifier(irq_num, NULL);
6760f9744f4SNitesh Narayan Lal irq_update_affinity_hint(irq_num, NULL);
6775ec8b7d1SJesse Brandeburg free_irq(irq_num, &adapter->q_vectors[vector]);
6785ec8b7d1SJesse Brandeburg }
6795ec8b7d1SJesse Brandeburg }
6805ec8b7d1SJesse Brandeburg
6815ec8b7d1SJesse Brandeburg /**
6825ec8b7d1SJesse Brandeburg * iavf_free_misc_irq - Free MSI-X miscellaneous vector
6835ec8b7d1SJesse Brandeburg * @adapter: board private structure
6845ec8b7d1SJesse Brandeburg *
6855ec8b7d1SJesse Brandeburg * Frees MSI-X vector 0.
6865ec8b7d1SJesse Brandeburg **/
iavf_free_misc_irq(struct iavf_adapter * adapter)6875ec8b7d1SJesse Brandeburg static void iavf_free_misc_irq(struct iavf_adapter *adapter)
6885ec8b7d1SJesse Brandeburg {
6895ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
6905ec8b7d1SJesse Brandeburg
6915ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
6925ec8b7d1SJesse Brandeburg return;
6935ec8b7d1SJesse Brandeburg
6945ec8b7d1SJesse Brandeburg free_irq(adapter->msix_entries[0].vector, netdev);
6955ec8b7d1SJesse Brandeburg }
6965ec8b7d1SJesse Brandeburg
6975ec8b7d1SJesse Brandeburg /**
6985ec8b7d1SJesse Brandeburg * iavf_configure_tx - Configure Transmit Unit after Reset
6995ec8b7d1SJesse Brandeburg * @adapter: board private structure
7005ec8b7d1SJesse Brandeburg *
7015ec8b7d1SJesse Brandeburg * Configure the Tx unit of the MAC after a reset.
7025ec8b7d1SJesse Brandeburg **/
iavf_configure_tx(struct iavf_adapter * adapter)7035ec8b7d1SJesse Brandeburg static void iavf_configure_tx(struct iavf_adapter *adapter)
7045ec8b7d1SJesse Brandeburg {
705f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
7065ec8b7d1SJesse Brandeburg int i;
7075ec8b7d1SJesse Brandeburg
7085ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++)
709f1cad2ceSJesse Brandeburg adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
7105ec8b7d1SJesse Brandeburg }
7115ec8b7d1SJesse Brandeburg
7125ec8b7d1SJesse Brandeburg /**
7135ec8b7d1SJesse Brandeburg * iavf_configure_rx - Configure Receive Unit after Reset
7145ec8b7d1SJesse Brandeburg * @adapter: board private structure
7155ec8b7d1SJesse Brandeburg *
7165ec8b7d1SJesse Brandeburg * Configure the Rx unit of the MAC after a reset.
7175ec8b7d1SJesse Brandeburg **/
iavf_configure_rx(struct iavf_adapter * adapter)7185ec8b7d1SJesse Brandeburg static void iavf_configure_rx(struct iavf_adapter *adapter)
7195ec8b7d1SJesse Brandeburg {
720f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
7215ec8b7d1SJesse Brandeburg
722920d86f3SAlexander Lobakin for (u32 i = 0; i < adapter->num_active_queues; i++)
723f1cad2ceSJesse Brandeburg adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
7245ec8b7d1SJesse Brandeburg }
7255ec8b7d1SJesse Brandeburg
7265ec8b7d1SJesse Brandeburg /**
7275ec8b7d1SJesse Brandeburg * iavf_find_vlan - Search filter list for specific vlan filter
7285ec8b7d1SJesse Brandeburg * @adapter: board private structure
7295ec8b7d1SJesse Brandeburg * @vlan: vlan tag
7305ec8b7d1SJesse Brandeburg *
7315ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL. Must be called while holding the
7325ec8b7d1SJesse Brandeburg * mac_vlan_list_lock.
7335ec8b7d1SJesse Brandeburg **/
7345ec8b7d1SJesse Brandeburg static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)73548ccc43eSBrett Creeley iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
73648ccc43eSBrett Creeley struct iavf_vlan vlan)
7375ec8b7d1SJesse Brandeburg {
7385ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *f;
7395ec8b7d1SJesse Brandeburg
7405ec8b7d1SJesse Brandeburg list_for_each_entry(f, &adapter->vlan_filter_list, list) {
74148ccc43eSBrett Creeley if (f->vlan.vid == vlan.vid &&
74248ccc43eSBrett Creeley f->vlan.tpid == vlan.tpid)
7435ec8b7d1SJesse Brandeburg return f;
7445ec8b7d1SJesse Brandeburg }
74548ccc43eSBrett Creeley
7465ec8b7d1SJesse Brandeburg return NULL;
7475ec8b7d1SJesse Brandeburg }
7485ec8b7d1SJesse Brandeburg
7495ec8b7d1SJesse Brandeburg /**
7505ec8b7d1SJesse Brandeburg * iavf_add_vlan - Add a vlan filter to the list
7515ec8b7d1SJesse Brandeburg * @adapter: board private structure
7525ec8b7d1SJesse Brandeburg * @vlan: VLAN tag
7535ec8b7d1SJesse Brandeburg *
7545ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL when no memory available.
7555ec8b7d1SJesse Brandeburg **/
7565ec8b7d1SJesse Brandeburg static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)75748ccc43eSBrett Creeley iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
75848ccc43eSBrett Creeley struct iavf_vlan vlan)
7595ec8b7d1SJesse Brandeburg {
7605ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *f = NULL;
7615ec8b7d1SJesse Brandeburg
7625ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
7635ec8b7d1SJesse Brandeburg
7645ec8b7d1SJesse Brandeburg f = iavf_find_vlan(adapter, vlan);
7655ec8b7d1SJesse Brandeburg if (!f) {
766f0a48fb4SAleksandr Loktionov f = kzalloc(sizeof(*f), GFP_ATOMIC);
7675ec8b7d1SJesse Brandeburg if (!f)
7685ec8b7d1SJesse Brandeburg goto clearout;
7695ec8b7d1SJesse Brandeburg
7705ec8b7d1SJesse Brandeburg f->vlan = vlan;
7715ec8b7d1SJesse Brandeburg
772c2417a7bSAkeem G Abodunrin list_add_tail(&f->list, &adapter->vlan_filter_list);
7730c0da0e9SAhmed Zaki f->state = IAVF_VLAN_ADD;
7749c85b7faSAhmed Zaki adapter->num_vlan_filters++;
7755f3d319aSPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
7765ec8b7d1SJesse Brandeburg }
7775ec8b7d1SJesse Brandeburg
7785ec8b7d1SJesse Brandeburg clearout:
7795ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
7805ec8b7d1SJesse Brandeburg return f;
7815ec8b7d1SJesse Brandeburg }
7825ec8b7d1SJesse Brandeburg
7835ec8b7d1SJesse Brandeburg /**
7845ec8b7d1SJesse Brandeburg * iavf_del_vlan - Remove a vlan filter from the list
7855ec8b7d1SJesse Brandeburg * @adapter: board private structure
7865ec8b7d1SJesse Brandeburg * @vlan: VLAN tag
7875ec8b7d1SJesse Brandeburg **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)78848ccc43eSBrett Creeley static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
7895ec8b7d1SJesse Brandeburg {
7905ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *f;
7915ec8b7d1SJesse Brandeburg
7925ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
7935ec8b7d1SJesse Brandeburg
7945ec8b7d1SJesse Brandeburg f = iavf_find_vlan(adapter, vlan);
7955ec8b7d1SJesse Brandeburg if (f) {
7960c0da0e9SAhmed Zaki f->state = IAVF_VLAN_REMOVE;
7975f3d319aSPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
7985ec8b7d1SJesse Brandeburg }
7995ec8b7d1SJesse Brandeburg
8005ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
8015ec8b7d1SJesse Brandeburg }
8025ec8b7d1SJesse Brandeburg
8035ec8b7d1SJesse Brandeburg /**
80442930142SAkeem G Abodunrin * iavf_restore_filters
80542930142SAkeem G Abodunrin * @adapter: board private structure
80642930142SAkeem G Abodunrin *
80742930142SAkeem G Abodunrin * Restore existing non MAC filters when VF netdev comes back up
80842930142SAkeem G Abodunrin **/
iavf_restore_filters(struct iavf_adapter * adapter)80942930142SAkeem G Abodunrin static void iavf_restore_filters(struct iavf_adapter *adapter)
81042930142SAkeem G Abodunrin {
8119c85b7faSAhmed Zaki struct iavf_vlan_filter *f;
81242930142SAkeem G Abodunrin
8135951a2b9SBrett Creeley /* re-add all VLAN filters */
8149c85b7faSAhmed Zaki spin_lock_bh(&adapter->mac_vlan_list_lock);
81548ccc43eSBrett Creeley
8169c85b7faSAhmed Zaki list_for_each_entry(f, &adapter->vlan_filter_list, list) {
8179c85b7faSAhmed Zaki if (f->state == IAVF_VLAN_INACTIVE)
8189c85b7faSAhmed Zaki f->state = IAVF_VLAN_ADD;
8199c85b7faSAhmed Zaki }
8209c85b7faSAhmed Zaki
8219c85b7faSAhmed Zaki spin_unlock_bh(&adapter->mac_vlan_list_lock);
8229c85b7faSAhmed Zaki adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
82342930142SAkeem G Abodunrin }
82442930142SAkeem G Abodunrin
82542930142SAkeem G Abodunrin /**
82692fc5085SBrett Creeley * iavf_get_num_vlans_added - get number of VLANs added
82792fc5085SBrett Creeley * @adapter: board private structure
82892fc5085SBrett Creeley */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)829968996c0SPrzemyslaw Patynowski u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
83092fc5085SBrett Creeley {
8319c85b7faSAhmed Zaki return adapter->num_vlan_filters;
83292fc5085SBrett Creeley }
83392fc5085SBrett Creeley
83492fc5085SBrett Creeley /**
83592fc5085SBrett Creeley * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
83692fc5085SBrett Creeley * @adapter: board private structure
83792fc5085SBrett Creeley *
83892fc5085SBrett Creeley * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
83992fc5085SBrett Creeley * do not impose a limit as that maintains current behavior and for
84092fc5085SBrett Creeley * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
84192fc5085SBrett Creeley **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)84292fc5085SBrett Creeley static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
84392fc5085SBrett Creeley {
84492fc5085SBrett Creeley /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
84592fc5085SBrett Creeley * never been a limit on the VF driver side
84692fc5085SBrett Creeley */
84792fc5085SBrett Creeley if (VLAN_ALLOWED(adapter))
84892fc5085SBrett Creeley return VLAN_N_VID;
84992fc5085SBrett Creeley else if (VLAN_V2_ALLOWED(adapter))
85092fc5085SBrett Creeley return adapter->vlan_v2_caps.filtering.max_filters;
85192fc5085SBrett Creeley
85292fc5085SBrett Creeley return 0;
85392fc5085SBrett Creeley }
85492fc5085SBrett Creeley
85592fc5085SBrett Creeley /**
85692fc5085SBrett Creeley * iavf_max_vlans_added - check if maximum VLANs allowed already exist
85792fc5085SBrett Creeley * @adapter: board private structure
85892fc5085SBrett Creeley **/
iavf_max_vlans_added(struct iavf_adapter * adapter)85992fc5085SBrett Creeley static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
86092fc5085SBrett Creeley {
86192fc5085SBrett Creeley if (iavf_get_num_vlans_added(adapter) <
86292fc5085SBrett Creeley iavf_get_max_vlans_allowed(adapter))
86392fc5085SBrett Creeley return false;
86492fc5085SBrett Creeley
86592fc5085SBrett Creeley return true;
86692fc5085SBrett Creeley }
86792fc5085SBrett Creeley
86892fc5085SBrett Creeley /**
8695ec8b7d1SJesse Brandeburg * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
8705ec8b7d1SJesse Brandeburg * @netdev: network device struct
8715ec8b7d1SJesse Brandeburg * @proto: unused protocol data
8725ec8b7d1SJesse Brandeburg * @vid: VLAN tag
8735ec8b7d1SJesse Brandeburg **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)8745ec8b7d1SJesse Brandeburg static int iavf_vlan_rx_add_vid(struct net_device *netdev,
8755ec8b7d1SJesse Brandeburg __always_unused __be16 proto, u16 vid)
8765ec8b7d1SJesse Brandeburg {
8775ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
8785ec8b7d1SJesse Brandeburg
879964290ffSAhmed Zaki /* Do not track VLAN 0 filter, always added by the PF on VF init */
880964290ffSAhmed Zaki if (!vid)
881964290ffSAhmed Zaki return 0;
882964290ffSAhmed Zaki
88348ccc43eSBrett Creeley if (!VLAN_FILTERING_ALLOWED(adapter))
8845ec8b7d1SJesse Brandeburg return -EIO;
88542930142SAkeem G Abodunrin
88692fc5085SBrett Creeley if (iavf_max_vlans_added(adapter)) {
88792fc5085SBrett Creeley netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
88892fc5085SBrett Creeley iavf_get_max_vlans_allowed(adapter));
88992fc5085SBrett Creeley return -EIO;
89092fc5085SBrett Creeley }
89192fc5085SBrett Creeley
89248ccc43eSBrett Creeley if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
8935ec8b7d1SJesse Brandeburg return -ENOMEM;
89442930142SAkeem G Abodunrin
8955ec8b7d1SJesse Brandeburg return 0;
8965ec8b7d1SJesse Brandeburg }
8975ec8b7d1SJesse Brandeburg
8985ec8b7d1SJesse Brandeburg /**
8995ec8b7d1SJesse Brandeburg * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
9005ec8b7d1SJesse Brandeburg * @netdev: network device struct
9015ec8b7d1SJesse Brandeburg * @proto: unused protocol data
9025ec8b7d1SJesse Brandeburg * @vid: VLAN tag
9035ec8b7d1SJesse Brandeburg **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)9045ec8b7d1SJesse Brandeburg static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
9055ec8b7d1SJesse Brandeburg __always_unused __be16 proto, u16 vid)
9065ec8b7d1SJesse Brandeburg {
9075ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
9085ec8b7d1SJesse Brandeburg
909964290ffSAhmed Zaki /* We do not track VLAN 0 filter */
910964290ffSAhmed Zaki if (!vid)
911964290ffSAhmed Zaki return 0;
912964290ffSAhmed Zaki
91348ccc43eSBrett Creeley iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
91442930142SAkeem G Abodunrin return 0;
9155ec8b7d1SJesse Brandeburg }
9165ec8b7d1SJesse Brandeburg
9175ec8b7d1SJesse Brandeburg /**
9185ec8b7d1SJesse Brandeburg * iavf_find_filter - Search filter list for specific mac filter
9195ec8b7d1SJesse Brandeburg * @adapter: board private structure
9205ec8b7d1SJesse Brandeburg * @macaddr: the MAC address
9215ec8b7d1SJesse Brandeburg *
9225ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL. Must be called while holding the
9235ec8b7d1SJesse Brandeburg * mac_vlan_list_lock.
9245ec8b7d1SJesse Brandeburg **/
9255ec8b7d1SJesse Brandeburg static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)9265ec8b7d1SJesse Brandeburg iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
9275ec8b7d1SJesse Brandeburg const u8 *macaddr)
9285ec8b7d1SJesse Brandeburg {
9295ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f;
9305ec8b7d1SJesse Brandeburg
9315ec8b7d1SJesse Brandeburg if (!macaddr)
9325ec8b7d1SJesse Brandeburg return NULL;
9335ec8b7d1SJesse Brandeburg
9345ec8b7d1SJesse Brandeburg list_for_each_entry(f, &adapter->mac_filter_list, list) {
9355ec8b7d1SJesse Brandeburg if (ether_addr_equal(macaddr, f->macaddr))
9365ec8b7d1SJesse Brandeburg return f;
9375ec8b7d1SJesse Brandeburg }
9385ec8b7d1SJesse Brandeburg return NULL;
9395ec8b7d1SJesse Brandeburg }
9405ec8b7d1SJesse Brandeburg
9415ec8b7d1SJesse Brandeburg /**
94256184e01SJesse Brandeburg * iavf_add_filter - Add a mac filter to the filter list
9435ec8b7d1SJesse Brandeburg * @adapter: board private structure
9445ec8b7d1SJesse Brandeburg * @macaddr: the MAC address
9455ec8b7d1SJesse Brandeburg *
9465ec8b7d1SJesse Brandeburg * Returns ptr to the filter object or NULL when no memory available.
9475ec8b7d1SJesse Brandeburg **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)9489e052291SStefan Assmann struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
9495ec8b7d1SJesse Brandeburg const u8 *macaddr)
9505ec8b7d1SJesse Brandeburg {
9515ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f;
9525ec8b7d1SJesse Brandeburg
9535ec8b7d1SJesse Brandeburg if (!macaddr)
9545ec8b7d1SJesse Brandeburg return NULL;
9555ec8b7d1SJesse Brandeburg
9565ec8b7d1SJesse Brandeburg f = iavf_find_filter(adapter, macaddr);
9575ec8b7d1SJesse Brandeburg if (!f) {
9585ec8b7d1SJesse Brandeburg f = kzalloc(sizeof(*f), GFP_ATOMIC);
9595ec8b7d1SJesse Brandeburg if (!f)
9605ec8b7d1SJesse Brandeburg return f;
9615ec8b7d1SJesse Brandeburg
9625ec8b7d1SJesse Brandeburg ether_addr_copy(f->macaddr, macaddr);
9635ec8b7d1SJesse Brandeburg
9645ec8b7d1SJesse Brandeburg list_add_tail(&f->list, &adapter->mac_filter_list);
9655ec8b7d1SJesse Brandeburg f->add = true;
96635a2443dSMateusz Palczewski f->add_handled = false;
9678da80c9dSSylwester Dziedziuch f->is_new_mac = true;
96864560384SMichal Wilczynski f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
9695ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
9705ec8b7d1SJesse Brandeburg } else {
9715ec8b7d1SJesse Brandeburg f->remove = false;
9725ec8b7d1SJesse Brandeburg }
9735ec8b7d1SJesse Brandeburg
9745ec8b7d1SJesse Brandeburg return f;
9755ec8b7d1SJesse Brandeburg }
9765ec8b7d1SJesse Brandeburg
9775ec8b7d1SJesse Brandeburg /**
97835a2443dSMateusz Palczewski * iavf_replace_primary_mac - Replace current primary address
97935a2443dSMateusz Palczewski * @adapter: board private structure
98035a2443dSMateusz Palczewski * @new_mac: new MAC address to be applied
9815ec8b7d1SJesse Brandeburg *
98235a2443dSMateusz Palczewski * Replace current dev_addr and send request to PF for removal of previous
98335a2443dSMateusz Palczewski * primary MAC address filter and addition of new primary MAC filter.
98435a2443dSMateusz Palczewski * Return 0 for success, -ENOMEM for failure.
98535a2443dSMateusz Palczewski *
98635a2443dSMateusz Palczewski * Do not call this with mac_vlan_list_lock!
9875ec8b7d1SJesse Brandeburg **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)988a4aadf0fSPrzemek Kitszel static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
98935a2443dSMateusz Palczewski const u8 *new_mac)
9905ec8b7d1SJesse Brandeburg {
991f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
99261f723e6SPrzemek Kitszel struct iavf_mac_filter *new_f;
99361f723e6SPrzemek Kitszel struct iavf_mac_filter *old_f;
9945ec8b7d1SJesse Brandeburg
9955ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
9965ec8b7d1SJesse Brandeburg
99761f723e6SPrzemek Kitszel new_f = iavf_add_filter(adapter, new_mac);
99861f723e6SPrzemek Kitszel if (!new_f) {
99961f723e6SPrzemek Kitszel spin_unlock_bh(&adapter->mac_vlan_list_lock);
100061f723e6SPrzemek Kitszel return -ENOMEM;
100135a2443dSMateusz Palczewski }
100235a2443dSMateusz Palczewski
100361f723e6SPrzemek Kitszel old_f = iavf_find_filter(adapter, hw->mac.addr);
100461f723e6SPrzemek Kitszel if (old_f) {
100561f723e6SPrzemek Kitszel old_f->is_primary = false;
100661f723e6SPrzemek Kitszel old_f->remove = true;
10075ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
10085ec8b7d1SJesse Brandeburg }
100961f723e6SPrzemek Kitszel /* Always send the request to add if changing primary MAC,
101035a2443dSMateusz Palczewski * even if filter is already present on the list
101135a2443dSMateusz Palczewski */
101261f723e6SPrzemek Kitszel new_f->is_primary = true;
101361f723e6SPrzemek Kitszel new_f->add = true;
101435a2443dSMateusz Palczewski ether_addr_copy(hw->mac.addr, new_mac);
10155ec8b7d1SJesse Brandeburg
10165ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
10175ec8b7d1SJesse Brandeburg
1018a3e839d5SMateusz Palczewski /* schedule the watchdog task to immediately process the request */
101995260816SPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
102035a2443dSMateusz Palczewski return 0;
102135a2443dSMateusz Palczewski }
10225ec8b7d1SJesse Brandeburg
102335a2443dSMateusz Palczewski /**
102435a2443dSMateusz Palczewski * iavf_is_mac_set_handled - wait for a response to set MAC from PF
102535a2443dSMateusz Palczewski * @netdev: network interface device structure
102635a2443dSMateusz Palczewski * @macaddr: MAC address to set
102735a2443dSMateusz Palczewski *
102835a2443dSMateusz Palczewski * Returns true on success, false on failure
102935a2443dSMateusz Palczewski */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)103035a2443dSMateusz Palczewski static bool iavf_is_mac_set_handled(struct net_device *netdev,
103135a2443dSMateusz Palczewski const u8 *macaddr)
103235a2443dSMateusz Palczewski {
103335a2443dSMateusz Palczewski struct iavf_adapter *adapter = netdev_priv(netdev);
103435a2443dSMateusz Palczewski struct iavf_mac_filter *f;
103535a2443dSMateusz Palczewski bool ret = false;
103635a2443dSMateusz Palczewski
103735a2443dSMateusz Palczewski spin_lock_bh(&adapter->mac_vlan_list_lock);
103835a2443dSMateusz Palczewski
103935a2443dSMateusz Palczewski f = iavf_find_filter(adapter, macaddr);
104035a2443dSMateusz Palczewski
104135a2443dSMateusz Palczewski if (!f || (!f->add && f->add_handled))
104235a2443dSMateusz Palczewski ret = true;
104335a2443dSMateusz Palczewski
104435a2443dSMateusz Palczewski spin_unlock_bh(&adapter->mac_vlan_list_lock);
104535a2443dSMateusz Palczewski
104635a2443dSMateusz Palczewski return ret;
104735a2443dSMateusz Palczewski }
104835a2443dSMateusz Palczewski
104935a2443dSMateusz Palczewski /**
105035a2443dSMateusz Palczewski * iavf_set_mac - NDO callback to set port MAC address
105135a2443dSMateusz Palczewski * @netdev: network interface device structure
105235a2443dSMateusz Palczewski * @p: pointer to an address structure
105335a2443dSMateusz Palczewski *
105435a2443dSMateusz Palczewski * Returns 0 on success, negative on failure
105535a2443dSMateusz Palczewski */
iavf_set_mac(struct net_device * netdev,void * p)105635a2443dSMateusz Palczewski static int iavf_set_mac(struct net_device *netdev, void *p)
105735a2443dSMateusz Palczewski {
105835a2443dSMateusz Palczewski struct iavf_adapter *adapter = netdev_priv(netdev);
105935a2443dSMateusz Palczewski struct sockaddr *addr = p;
106035a2443dSMateusz Palczewski int ret;
106135a2443dSMateusz Palczewski
106235a2443dSMateusz Palczewski if (!is_valid_ether_addr(addr->sa_data))
106335a2443dSMateusz Palczewski return -EADDRNOTAVAIL;
106435a2443dSMateusz Palczewski
106535a2443dSMateusz Palczewski ret = iavf_replace_primary_mac(adapter, addr->sa_data);
106635a2443dSMateusz Palczewski
106735a2443dSMateusz Palczewski if (ret)
106835a2443dSMateusz Palczewski return ret;
106935a2443dSMateusz Palczewski
1070f66b98c8SSylwester Dziedziuch ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1071f66b98c8SSylwester Dziedziuch iavf_is_mac_set_handled(netdev, addr->sa_data),
1072f66b98c8SSylwester Dziedziuch msecs_to_jiffies(2500));
107335a2443dSMateusz Palczewski
107435a2443dSMateusz Palczewski /* If ret < 0 then it means wait was interrupted.
107535a2443dSMateusz Palczewski * If ret == 0 then it means we got a timeout.
107635a2443dSMateusz Palczewski * else it means we got response for set MAC from PF,
107735a2443dSMateusz Palczewski * check if netdev MAC was updated to requested MAC,
107835a2443dSMateusz Palczewski * if yes then set MAC succeeded otherwise it failed return -EACCES
107935a2443dSMateusz Palczewski */
108035a2443dSMateusz Palczewski if (ret < 0)
108135a2443dSMateusz Palczewski return ret;
108235a2443dSMateusz Palczewski
108335a2443dSMateusz Palczewski if (!ret)
108435a2443dSMateusz Palczewski return -EAGAIN;
108535a2443dSMateusz Palczewski
108635a2443dSMateusz Palczewski if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
108735a2443dSMateusz Palczewski return -EACCES;
108835a2443dSMateusz Palczewski
108935a2443dSMateusz Palczewski return 0;
10905ec8b7d1SJesse Brandeburg }
10915ec8b7d1SJesse Brandeburg
10925ec8b7d1SJesse Brandeburg /**
10935ec8b7d1SJesse Brandeburg * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
10945ec8b7d1SJesse Brandeburg * @netdev: the netdevice
10955ec8b7d1SJesse Brandeburg * @addr: address to add
10965ec8b7d1SJesse Brandeburg *
10975ec8b7d1SJesse Brandeburg * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
10985ec8b7d1SJesse Brandeburg * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
10995ec8b7d1SJesse Brandeburg */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)11005ec8b7d1SJesse Brandeburg static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
11015ec8b7d1SJesse Brandeburg {
11025ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
11035ec8b7d1SJesse Brandeburg
11045ec8b7d1SJesse Brandeburg if (iavf_add_filter(adapter, addr))
11055ec8b7d1SJesse Brandeburg return 0;
11065ec8b7d1SJesse Brandeburg else
11075ec8b7d1SJesse Brandeburg return -ENOMEM;
11085ec8b7d1SJesse Brandeburg }
11095ec8b7d1SJesse Brandeburg
11105ec8b7d1SJesse Brandeburg /**
11115ec8b7d1SJesse Brandeburg * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
11125ec8b7d1SJesse Brandeburg * @netdev: the netdevice
11135ec8b7d1SJesse Brandeburg * @addr: address to add
11145ec8b7d1SJesse Brandeburg *
11155ec8b7d1SJesse Brandeburg * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
11165ec8b7d1SJesse Brandeburg * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
11175ec8b7d1SJesse Brandeburg */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)11185ec8b7d1SJesse Brandeburg static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
11195ec8b7d1SJesse Brandeburg {
11205ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
11215ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f;
11225ec8b7d1SJesse Brandeburg
11235ec8b7d1SJesse Brandeburg /* Under some circumstances, we might receive a request to delete
11245ec8b7d1SJesse Brandeburg * our own device address from our uc list. Because we store the
11255ec8b7d1SJesse Brandeburg * device address in the VSI's MAC/VLAN filter list, we need to ignore
11265ec8b7d1SJesse Brandeburg * such requests and not delete our device address from this list.
11275ec8b7d1SJesse Brandeburg */
11285ec8b7d1SJesse Brandeburg if (ether_addr_equal(addr, netdev->dev_addr))
11295ec8b7d1SJesse Brandeburg return 0;
11305ec8b7d1SJesse Brandeburg
11315ec8b7d1SJesse Brandeburg f = iavf_find_filter(adapter, addr);
11325ec8b7d1SJesse Brandeburg if (f) {
11335ec8b7d1SJesse Brandeburg f->remove = true;
11345ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
11355ec8b7d1SJesse Brandeburg }
11365ec8b7d1SJesse Brandeburg return 0;
11375ec8b7d1SJesse Brandeburg }
11385ec8b7d1SJesse Brandeburg
11395ec8b7d1SJesse Brandeburg /**
1140221465deSBrett Creeley * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
1141221465deSBrett Creeley * @adapter: device specific adapter
1142221465deSBrett Creeley */
iavf_promiscuous_mode_changed(struct iavf_adapter * adapter)1143221465deSBrett Creeley bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
1144221465deSBrett Creeley {
1145221465deSBrett Creeley return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
1146221465deSBrett Creeley (IFF_PROMISC | IFF_ALLMULTI);
1147221465deSBrett Creeley }
1148221465deSBrett Creeley
1149221465deSBrett Creeley /**
11505ec8b7d1SJesse Brandeburg * iavf_set_rx_mode - NDO callback to set the netdev filters
11515ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
11525ec8b7d1SJesse Brandeburg **/
iavf_set_rx_mode(struct net_device * netdev)11535ec8b7d1SJesse Brandeburg static void iavf_set_rx_mode(struct net_device *netdev)
11545ec8b7d1SJesse Brandeburg {
11555ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
11565ec8b7d1SJesse Brandeburg
11575ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
11585ec8b7d1SJesse Brandeburg __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
11595ec8b7d1SJesse Brandeburg __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
11605ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
11615ec8b7d1SJesse Brandeburg
1162221465deSBrett Creeley spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
1163221465deSBrett Creeley if (iavf_promiscuous_mode_changed(adapter))
1164221465deSBrett Creeley adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1165221465deSBrett Creeley spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
11665ec8b7d1SJesse Brandeburg }
11675ec8b7d1SJesse Brandeburg
11685ec8b7d1SJesse Brandeburg /**
11695ec8b7d1SJesse Brandeburg * iavf_napi_enable_all - enable NAPI on all queue vectors
11705ec8b7d1SJesse Brandeburg * @adapter: board private structure
11715ec8b7d1SJesse Brandeburg **/
iavf_napi_enable_all(struct iavf_adapter * adapter)11725ec8b7d1SJesse Brandeburg static void iavf_napi_enable_all(struct iavf_adapter *adapter)
11735ec8b7d1SJesse Brandeburg {
11745ec8b7d1SJesse Brandeburg int q_idx;
117556184e01SJesse Brandeburg struct iavf_q_vector *q_vector;
11765ec8b7d1SJesse Brandeburg int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
11775ec8b7d1SJesse Brandeburg
11785ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < q_vectors; q_idx++) {
11795ec8b7d1SJesse Brandeburg struct napi_struct *napi;
11805ec8b7d1SJesse Brandeburg
11815ec8b7d1SJesse Brandeburg q_vector = &adapter->q_vectors[q_idx];
11825ec8b7d1SJesse Brandeburg napi = &q_vector->napi;
11835ec8b7d1SJesse Brandeburg napi_enable(napi);
11845ec8b7d1SJesse Brandeburg }
11855ec8b7d1SJesse Brandeburg }
11865ec8b7d1SJesse Brandeburg
11875ec8b7d1SJesse Brandeburg /**
11885ec8b7d1SJesse Brandeburg * iavf_napi_disable_all - disable NAPI on all queue vectors
11895ec8b7d1SJesse Brandeburg * @adapter: board private structure
11905ec8b7d1SJesse Brandeburg **/
iavf_napi_disable_all(struct iavf_adapter * adapter)11915ec8b7d1SJesse Brandeburg static void iavf_napi_disable_all(struct iavf_adapter *adapter)
11925ec8b7d1SJesse Brandeburg {
11935ec8b7d1SJesse Brandeburg int q_idx;
119456184e01SJesse Brandeburg struct iavf_q_vector *q_vector;
11955ec8b7d1SJesse Brandeburg int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
11965ec8b7d1SJesse Brandeburg
11975ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < q_vectors; q_idx++) {
11985ec8b7d1SJesse Brandeburg q_vector = &adapter->q_vectors[q_idx];
11995ec8b7d1SJesse Brandeburg napi_disable(&q_vector->napi);
12005ec8b7d1SJesse Brandeburg }
12015ec8b7d1SJesse Brandeburg }
12025ec8b7d1SJesse Brandeburg
12035ec8b7d1SJesse Brandeburg /**
12045ec8b7d1SJesse Brandeburg * iavf_configure - set up transmit and receive data structures
12055ec8b7d1SJesse Brandeburg * @adapter: board private structure
12065ec8b7d1SJesse Brandeburg **/
iavf_configure(struct iavf_adapter * adapter)12075ec8b7d1SJesse Brandeburg static void iavf_configure(struct iavf_adapter *adapter)
12085ec8b7d1SJesse Brandeburg {
12095ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
12105ec8b7d1SJesse Brandeburg int i;
12115ec8b7d1SJesse Brandeburg
12125ec8b7d1SJesse Brandeburg iavf_set_rx_mode(netdev);
12135ec8b7d1SJesse Brandeburg
12145ec8b7d1SJesse Brandeburg iavf_configure_tx(adapter);
12155ec8b7d1SJesse Brandeburg iavf_configure_rx(adapter);
12165ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
12175ec8b7d1SJesse Brandeburg
12185ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
121956184e01SJesse Brandeburg struct iavf_ring *ring = &adapter->rx_rings[i];
12205ec8b7d1SJesse Brandeburg
122156184e01SJesse Brandeburg iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
12225ec8b7d1SJesse Brandeburg }
12235ec8b7d1SJesse Brandeburg }
12245ec8b7d1SJesse Brandeburg
12255ec8b7d1SJesse Brandeburg /**
12265ec8b7d1SJesse Brandeburg * iavf_up_complete - Finish the last steps of bringing up a connection
12275ec8b7d1SJesse Brandeburg * @adapter: board private structure
12285ec8b7d1SJesse Brandeburg *
122977361cb9SMichal Schmidt * Expects to be called while holding crit_lock.
12305ec8b7d1SJesse Brandeburg **/
iavf_up_complete(struct iavf_adapter * adapter)12315ec8b7d1SJesse Brandeburg static void iavf_up_complete(struct iavf_adapter *adapter)
12325ec8b7d1SJesse Brandeburg {
123345eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_RUNNING);
123456184e01SJesse Brandeburg clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
12355ec8b7d1SJesse Brandeburg
12365ec8b7d1SJesse Brandeburg iavf_napi_enable_all(adapter);
12375ec8b7d1SJesse Brandeburg
123895260816SPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
12395ec8b7d1SJesse Brandeburg }
12405ec8b7d1SJesse Brandeburg
12415ec8b7d1SJesse Brandeburg /**
124211c12adcSMichal Jaron * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
124311c12adcSMichal Jaron * yet and mark other to be removed.
124411c12adcSMichal Jaron * @adapter: board private structure
124511c12adcSMichal Jaron **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)124611c12adcSMichal Jaron static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
124711c12adcSMichal Jaron {
124811c12adcSMichal Jaron struct iavf_vlan_filter *vlf, *vlftmp;
124911c12adcSMichal Jaron struct iavf_mac_filter *f, *ftmp;
125011c12adcSMichal Jaron
125111c12adcSMichal Jaron spin_lock_bh(&adapter->mac_vlan_list_lock);
125211c12adcSMichal Jaron /* clear the sync flag on all filters */
125311c12adcSMichal Jaron __dev_uc_unsync(adapter->netdev, NULL);
125411c12adcSMichal Jaron __dev_mc_unsync(adapter->netdev, NULL);
125511c12adcSMichal Jaron
125611c12adcSMichal Jaron /* remove all MAC filters */
125711c12adcSMichal Jaron list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
125811c12adcSMichal Jaron list) {
125911c12adcSMichal Jaron if (f->add) {
126011c12adcSMichal Jaron list_del(&f->list);
126111c12adcSMichal Jaron kfree(f);
126211c12adcSMichal Jaron } else {
126311c12adcSMichal Jaron f->remove = true;
126411c12adcSMichal Jaron }
126511c12adcSMichal Jaron }
126611c12adcSMichal Jaron
12679c85b7faSAhmed Zaki /* disable all VLAN filters */
126811c12adcSMichal Jaron list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
12699c85b7faSAhmed Zaki list)
12709c85b7faSAhmed Zaki vlf->state = IAVF_VLAN_DISABLE;
12719c85b7faSAhmed Zaki
127211c12adcSMichal Jaron spin_unlock_bh(&adapter->mac_vlan_list_lock);
127311c12adcSMichal Jaron }
127411c12adcSMichal Jaron
127511c12adcSMichal Jaron /**
127611c12adcSMichal Jaron * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
127711c12adcSMichal Jaron * mark other to be removed.
127811c12adcSMichal Jaron * @adapter: board private structure
127911c12adcSMichal Jaron **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)128011c12adcSMichal Jaron static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
128111c12adcSMichal Jaron {
128211c12adcSMichal Jaron struct iavf_cloud_filter *cf, *cftmp;
128311c12adcSMichal Jaron
128411c12adcSMichal Jaron /* remove all cloud filters */
128511c12adcSMichal Jaron spin_lock_bh(&adapter->cloud_filter_list_lock);
128611c12adcSMichal Jaron list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
128711c12adcSMichal Jaron list) {
128811c12adcSMichal Jaron if (cf->add) {
128911c12adcSMichal Jaron list_del(&cf->list);
129011c12adcSMichal Jaron kfree(cf);
129111c12adcSMichal Jaron adapter->num_cloud_filters--;
129211c12adcSMichal Jaron } else {
129311c12adcSMichal Jaron cf->del = true;
129411c12adcSMichal Jaron }
129511c12adcSMichal Jaron }
129611c12adcSMichal Jaron spin_unlock_bh(&adapter->cloud_filter_list_lock);
129711c12adcSMichal Jaron }
129811c12adcSMichal Jaron
129911c12adcSMichal Jaron /**
130011c12adcSMichal Jaron * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
130111c12adcSMichal Jaron * other to be removed.
130211c12adcSMichal Jaron * @adapter: board private structure
130311c12adcSMichal Jaron **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)130411c12adcSMichal Jaron static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
130511c12adcSMichal Jaron {
13063a0b5a29SPiotr Gardocki struct iavf_fdir_fltr *fdir;
130711c12adcSMichal Jaron
130811c12adcSMichal Jaron /* remove all Flow Director filters */
130911c12adcSMichal Jaron spin_lock_bh(&adapter->fdir_fltr_lock);
13103a0b5a29SPiotr Gardocki list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
131111c12adcSMichal Jaron if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
13123a0b5a29SPiotr Gardocki /* Cancel a request, keep filter as inactive */
13133a0b5a29SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_INACTIVE;
13143a0b5a29SPiotr Gardocki } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
13153a0b5a29SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
13163a0b5a29SPiotr Gardocki /* Disable filters which are active or have a pending
13173a0b5a29SPiotr Gardocki * request to PF to be added
13183a0b5a29SPiotr Gardocki */
13193a0b5a29SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
132011c12adcSMichal Jaron }
132111c12adcSMichal Jaron }
132211c12adcSMichal Jaron spin_unlock_bh(&adapter->fdir_fltr_lock);
132311c12adcSMichal Jaron }
132411c12adcSMichal Jaron
132511c12adcSMichal Jaron /**
132611c12adcSMichal Jaron * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
132711c12adcSMichal Jaron * other to be removed.
132811c12adcSMichal Jaron * @adapter: board private structure
132911c12adcSMichal Jaron **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)133011c12adcSMichal Jaron static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
133111c12adcSMichal Jaron {
133211c12adcSMichal Jaron struct iavf_adv_rss *rss, *rsstmp;
133311c12adcSMichal Jaron
133411c12adcSMichal Jaron /* remove all advance RSS configuration */
133511c12adcSMichal Jaron spin_lock_bh(&adapter->adv_rss_lock);
133611c12adcSMichal Jaron list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
133711c12adcSMichal Jaron list) {
133811c12adcSMichal Jaron if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
133911c12adcSMichal Jaron list_del(&rss->list);
134011c12adcSMichal Jaron kfree(rss);
134111c12adcSMichal Jaron } else {
134211c12adcSMichal Jaron rss->state = IAVF_ADV_RSS_DEL_REQUEST;
134311c12adcSMichal Jaron }
134411c12adcSMichal Jaron }
134511c12adcSMichal Jaron spin_unlock_bh(&adapter->adv_rss_lock);
134611c12adcSMichal Jaron }
134711c12adcSMichal Jaron
134811c12adcSMichal Jaron /**
134956184e01SJesse Brandeburg * iavf_down - Shutdown the connection processing
13505ec8b7d1SJesse Brandeburg * @adapter: board private structure
13515ec8b7d1SJesse Brandeburg *
135277361cb9SMichal Schmidt * Expects to be called while holding crit_lock.
13535ec8b7d1SJesse Brandeburg **/
iavf_down(struct iavf_adapter * adapter)13545ec8b7d1SJesse Brandeburg void iavf_down(struct iavf_adapter *adapter)
13555ec8b7d1SJesse Brandeburg {
13565ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
13575ec8b7d1SJesse Brandeburg
13585ec8b7d1SJesse Brandeburg if (adapter->state <= __IAVF_DOWN_PENDING)
13595ec8b7d1SJesse Brandeburg return;
13605ec8b7d1SJesse Brandeburg
13615ec8b7d1SJesse Brandeburg netif_carrier_off(netdev);
13625ec8b7d1SJesse Brandeburg netif_tx_disable(netdev);
13635ec8b7d1SJesse Brandeburg adapter->link_up = false;
13645ec8b7d1SJesse Brandeburg iavf_napi_disable_all(adapter);
13655ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
13665ec8b7d1SJesse Brandeburg
136711c12adcSMichal Jaron iavf_clear_mac_vlan_filters(adapter);
136811c12adcSMichal Jaron iavf_clear_cloud_filters(adapter);
136911c12adcSMichal Jaron iavf_clear_fdir_filters(adapter);
137011c12adcSMichal Jaron iavf_clear_adv_rss_conf(adapter);
13710aaeb4fbSHaiyue Wang
13726a0d989dSMichal Schmidt if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
13736a0d989dSMichal Schmidt return;
13746a0d989dSMichal Schmidt
13756a0d989dSMichal Schmidt if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
13765ec8b7d1SJesse Brandeburg /* cancel any current operation */
13775ec8b7d1SJesse Brandeburg adapter->current_op = VIRTCHNL_OP_UNKNOWN;
13785ec8b7d1SJesse Brandeburg /* Schedule operations to close down the HW. Don't wait
13795ec8b7d1SJesse Brandeburg * here for this to complete. The watchdog is still running
13805ec8b7d1SJesse Brandeburg * and it will take care of this.
13815ec8b7d1SJesse Brandeburg */
138211c12adcSMichal Jaron if (!list_empty(&adapter->mac_filter_list))
138311c12adcSMichal Jaron adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
138411c12adcSMichal Jaron if (!list_empty(&adapter->vlan_filter_list))
13855ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
138611c12adcSMichal Jaron if (!list_empty(&adapter->cloud_filter_list))
13875ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
138811c12adcSMichal Jaron if (!list_empty(&adapter->fdir_list_head))
13890dbfbabbSHaiyue Wang adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
139011c12adcSMichal Jaron if (!list_empty(&adapter->adv_rss_list_head))
13910aaeb4fbSHaiyue Wang adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
13925ec8b7d1SJesse Brandeburg }
13935ec8b7d1SJesse Brandeburg
139495260816SPetr Oros iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
13955ec8b7d1SJesse Brandeburg }
13965ec8b7d1SJesse Brandeburg
13975ec8b7d1SJesse Brandeburg /**
13985ec8b7d1SJesse Brandeburg * iavf_acquire_msix_vectors - Setup the MSIX capability
13995ec8b7d1SJesse Brandeburg * @adapter: board private structure
14005ec8b7d1SJesse Brandeburg * @vectors: number of vectors to request
14015ec8b7d1SJesse Brandeburg *
14025ec8b7d1SJesse Brandeburg * Work with the OS to set up the MSIX vectors needed.
14035ec8b7d1SJesse Brandeburg *
14045ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
14055ec8b7d1SJesse Brandeburg **/
14065ec8b7d1SJesse Brandeburg static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)14075ec8b7d1SJesse Brandeburg iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
14085ec8b7d1SJesse Brandeburg {
14095ec8b7d1SJesse Brandeburg int err, vector_threshold;
14105ec8b7d1SJesse Brandeburg
14115ec8b7d1SJesse Brandeburg /* We'll want at least 3 (vector_threshold):
14125ec8b7d1SJesse Brandeburg * 0) Other (Admin Queue and link, mostly)
14135ec8b7d1SJesse Brandeburg * 1) TxQ[0] Cleanup
14145ec8b7d1SJesse Brandeburg * 2) RxQ[0] Cleanup
14155ec8b7d1SJesse Brandeburg */
14165ec8b7d1SJesse Brandeburg vector_threshold = MIN_MSIX_COUNT;
14175ec8b7d1SJesse Brandeburg
14185ec8b7d1SJesse Brandeburg /* The more we get, the more we will assign to Tx/Rx Cleanup
14195ec8b7d1SJesse Brandeburg * for the separate queues...where Rx Cleanup >= Tx Cleanup.
14205ec8b7d1SJesse Brandeburg * Right now, we simply care about how many we'll get; we'll
14215ec8b7d1SJesse Brandeburg * set them up later while requesting irq's.
14225ec8b7d1SJesse Brandeburg */
14235ec8b7d1SJesse Brandeburg err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
14245ec8b7d1SJesse Brandeburg vector_threshold, vectors);
14255ec8b7d1SJesse Brandeburg if (err < 0) {
14265ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
14275ec8b7d1SJesse Brandeburg kfree(adapter->msix_entries);
14285ec8b7d1SJesse Brandeburg adapter->msix_entries = NULL;
14295ec8b7d1SJesse Brandeburg return err;
14305ec8b7d1SJesse Brandeburg }
14315ec8b7d1SJesse Brandeburg
14325ec8b7d1SJesse Brandeburg /* Adjust for only the vectors we'll use, which is minimum
14335ec8b7d1SJesse Brandeburg * of max_msix_q_vectors + NONQ_VECS, or the number of
14345ec8b7d1SJesse Brandeburg * vectors we were allocated.
14355ec8b7d1SJesse Brandeburg */
14365ec8b7d1SJesse Brandeburg adapter->num_msix_vectors = err;
14375ec8b7d1SJesse Brandeburg return 0;
14385ec8b7d1SJesse Brandeburg }
14395ec8b7d1SJesse Brandeburg
14405ec8b7d1SJesse Brandeburg /**
14415ec8b7d1SJesse Brandeburg * iavf_free_queues - Free memory for all rings
14425ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
14435ec8b7d1SJesse Brandeburg *
14445ec8b7d1SJesse Brandeburg * Free all of the memory associated with queue pairs.
14455ec8b7d1SJesse Brandeburg **/
iavf_free_queues(struct iavf_adapter * adapter)14465ec8b7d1SJesse Brandeburg static void iavf_free_queues(struct iavf_adapter *adapter)
14475ec8b7d1SJesse Brandeburg {
14485ec8b7d1SJesse Brandeburg if (!adapter->vsi_res)
14495ec8b7d1SJesse Brandeburg return;
14505ec8b7d1SJesse Brandeburg adapter->num_active_queues = 0;
14515ec8b7d1SJesse Brandeburg kfree(adapter->tx_rings);
14525ec8b7d1SJesse Brandeburg adapter->tx_rings = NULL;
14535ec8b7d1SJesse Brandeburg kfree(adapter->rx_rings);
14545ec8b7d1SJesse Brandeburg adapter->rx_rings = NULL;
14555ec8b7d1SJesse Brandeburg }
14565ec8b7d1SJesse Brandeburg
14575ec8b7d1SJesse Brandeburg /**
1458ccd219d2SBrett Creeley * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1459ccd219d2SBrett Creeley * @adapter: board private structure
1460ccd219d2SBrett Creeley *
1461ccd219d2SBrett Creeley * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1462ccd219d2SBrett Creeley * stripped in certain descriptor fields. Instead of checking the offload
1463ccd219d2SBrett Creeley * capability bits in the hot path, cache the location the ring specific
1464ccd219d2SBrett Creeley * flags.
1465ccd219d2SBrett Creeley */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1466ccd219d2SBrett Creeley void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1467ccd219d2SBrett Creeley {
1468ccd219d2SBrett Creeley int i;
1469ccd219d2SBrett Creeley
1470ccd219d2SBrett Creeley for (i = 0; i < adapter->num_active_queues; i++) {
1471ccd219d2SBrett Creeley struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1472ccd219d2SBrett Creeley struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1473ccd219d2SBrett Creeley
1474ccd219d2SBrett Creeley /* prevent multiple L2TAG bits being set after VFR */
1475ccd219d2SBrett Creeley tx_ring->flags &=
1476ccd219d2SBrett Creeley ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1477ccd219d2SBrett Creeley IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1478ccd219d2SBrett Creeley rx_ring->flags &=
1479ccd219d2SBrett Creeley ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1480ccd219d2SBrett Creeley IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1481ccd219d2SBrett Creeley
1482ccd219d2SBrett Creeley if (VLAN_ALLOWED(adapter)) {
1483ccd219d2SBrett Creeley tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1484ccd219d2SBrett Creeley rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1485ccd219d2SBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
1486ccd219d2SBrett Creeley struct virtchnl_vlan_supported_caps *stripping_support;
1487ccd219d2SBrett Creeley struct virtchnl_vlan_supported_caps *insertion_support;
1488ccd219d2SBrett Creeley
1489ccd219d2SBrett Creeley stripping_support =
1490ccd219d2SBrett Creeley &adapter->vlan_v2_caps.offloads.stripping_support;
1491ccd219d2SBrett Creeley insertion_support =
1492ccd219d2SBrett Creeley &adapter->vlan_v2_caps.offloads.insertion_support;
1493ccd219d2SBrett Creeley
1494ccd219d2SBrett Creeley if (stripping_support->outer) {
1495ccd219d2SBrett Creeley if (stripping_support->outer &
1496ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1497ccd219d2SBrett Creeley rx_ring->flags |=
1498ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1499ccd219d2SBrett Creeley else if (stripping_support->outer &
1500ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1501ccd219d2SBrett Creeley rx_ring->flags |=
1502ccd219d2SBrett Creeley IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1503ccd219d2SBrett Creeley } else if (stripping_support->inner) {
1504ccd219d2SBrett Creeley if (stripping_support->inner &
1505ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1506ccd219d2SBrett Creeley rx_ring->flags |=
1507ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1508ccd219d2SBrett Creeley else if (stripping_support->inner &
1509ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1510ccd219d2SBrett Creeley rx_ring->flags |=
1511ccd219d2SBrett Creeley IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1512ccd219d2SBrett Creeley }
1513ccd219d2SBrett Creeley
1514ccd219d2SBrett Creeley if (insertion_support->outer) {
1515ccd219d2SBrett Creeley if (insertion_support->outer &
1516ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1517ccd219d2SBrett Creeley tx_ring->flags |=
1518ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1519ccd219d2SBrett Creeley else if (insertion_support->outer &
1520ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1521ccd219d2SBrett Creeley tx_ring->flags |=
1522ccd219d2SBrett Creeley IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1523ccd219d2SBrett Creeley } else if (insertion_support->inner) {
1524ccd219d2SBrett Creeley if (insertion_support->inner &
1525ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1526ccd219d2SBrett Creeley tx_ring->flags |=
1527ccd219d2SBrett Creeley IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1528ccd219d2SBrett Creeley else if (insertion_support->inner &
1529ccd219d2SBrett Creeley VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1530ccd219d2SBrett Creeley tx_ring->flags |=
1531ccd219d2SBrett Creeley IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1532ccd219d2SBrett Creeley }
1533ccd219d2SBrett Creeley }
1534ccd219d2SBrett Creeley }
1535ccd219d2SBrett Creeley }
1536ccd219d2SBrett Creeley
1537ccd219d2SBrett Creeley /**
15385ec8b7d1SJesse Brandeburg * iavf_alloc_queues - Allocate memory for all rings
15395ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
15405ec8b7d1SJesse Brandeburg *
15415ec8b7d1SJesse Brandeburg * We allocate one ring per queue at run-time since we don't know the
15425ec8b7d1SJesse Brandeburg * number of queues at compile-time. The polling_netdev array is
15435ec8b7d1SJesse Brandeburg * intended for Multiqueue, but should work fine with a single queue.
15445ec8b7d1SJesse Brandeburg **/
iavf_alloc_queues(struct iavf_adapter * adapter)15455ec8b7d1SJesse Brandeburg static int iavf_alloc_queues(struct iavf_adapter *adapter)
15465ec8b7d1SJesse Brandeburg {
15475ec8b7d1SJesse Brandeburg int i, num_active_queues;
15485ec8b7d1SJesse Brandeburg
15495ec8b7d1SJesse Brandeburg /* If we're in reset reallocating queues we don't actually know yet for
15505ec8b7d1SJesse Brandeburg * certain the PF gave us the number of queues we asked for but we'll
15515ec8b7d1SJesse Brandeburg * assume it did. Once basic reset is finished we'll confirm once we
15525ec8b7d1SJesse Brandeburg * start negotiating config with PF.
15535ec8b7d1SJesse Brandeburg */
15545ec8b7d1SJesse Brandeburg if (adapter->num_req_queues)
15555ec8b7d1SJesse Brandeburg num_active_queues = adapter->num_req_queues;
15565ec8b7d1SJesse Brandeburg else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
15575ec8b7d1SJesse Brandeburg adapter->num_tc)
15585ec8b7d1SJesse Brandeburg num_active_queues = adapter->ch_config.total_qps;
15595ec8b7d1SJesse Brandeburg else
15605ec8b7d1SJesse Brandeburg num_active_queues = min_t(int,
15615ec8b7d1SJesse Brandeburg adapter->vsi_res->num_queue_pairs,
15625ec8b7d1SJesse Brandeburg (int)(num_online_cpus()));
15635ec8b7d1SJesse Brandeburg
15645ec8b7d1SJesse Brandeburg
15655ec8b7d1SJesse Brandeburg adapter->tx_rings = kcalloc(num_active_queues,
156656184e01SJesse Brandeburg sizeof(struct iavf_ring), GFP_KERNEL);
15675ec8b7d1SJesse Brandeburg if (!adapter->tx_rings)
15685ec8b7d1SJesse Brandeburg goto err_out;
15695ec8b7d1SJesse Brandeburg adapter->rx_rings = kcalloc(num_active_queues,
157056184e01SJesse Brandeburg sizeof(struct iavf_ring), GFP_KERNEL);
15715ec8b7d1SJesse Brandeburg if (!adapter->rx_rings)
15725ec8b7d1SJesse Brandeburg goto err_out;
15735ec8b7d1SJesse Brandeburg
15745ec8b7d1SJesse Brandeburg for (i = 0; i < num_active_queues; i++) {
157556184e01SJesse Brandeburg struct iavf_ring *tx_ring;
157656184e01SJesse Brandeburg struct iavf_ring *rx_ring;
15775ec8b7d1SJesse Brandeburg
15785ec8b7d1SJesse Brandeburg tx_ring = &adapter->tx_rings[i];
15795ec8b7d1SJesse Brandeburg
15805ec8b7d1SJesse Brandeburg tx_ring->queue_index = i;
15815ec8b7d1SJesse Brandeburg tx_ring->netdev = adapter->netdev;
15825ec8b7d1SJesse Brandeburg tx_ring->dev = &adapter->pdev->dev;
15835ec8b7d1SJesse Brandeburg tx_ring->count = adapter->tx_desc_count;
158456184e01SJesse Brandeburg tx_ring->itr_setting = IAVF_ITR_TX_DEF;
15855ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
158656184e01SJesse Brandeburg tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
15875ec8b7d1SJesse Brandeburg
15885ec8b7d1SJesse Brandeburg rx_ring = &adapter->rx_rings[i];
15895ec8b7d1SJesse Brandeburg rx_ring->queue_index = i;
15905ec8b7d1SJesse Brandeburg rx_ring->netdev = adapter->netdev;
15915ec8b7d1SJesse Brandeburg rx_ring->count = adapter->rx_desc_count;
159256184e01SJesse Brandeburg rx_ring->itr_setting = IAVF_ITR_RX_DEF;
15935ec8b7d1SJesse Brandeburg }
15945ec8b7d1SJesse Brandeburg
15955ec8b7d1SJesse Brandeburg adapter->num_active_queues = num_active_queues;
15965ec8b7d1SJesse Brandeburg
1597ccd219d2SBrett Creeley iavf_set_queue_vlan_tag_loc(adapter);
1598ccd219d2SBrett Creeley
15995ec8b7d1SJesse Brandeburg return 0;
16005ec8b7d1SJesse Brandeburg
16015ec8b7d1SJesse Brandeburg err_out:
16025ec8b7d1SJesse Brandeburg iavf_free_queues(adapter);
16035ec8b7d1SJesse Brandeburg return -ENOMEM;
16045ec8b7d1SJesse Brandeburg }
16055ec8b7d1SJesse Brandeburg
16065ec8b7d1SJesse Brandeburg /**
16075ec8b7d1SJesse Brandeburg * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
16085ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
16095ec8b7d1SJesse Brandeburg *
16105ec8b7d1SJesse Brandeburg * Attempt to configure the interrupts using the best available
16115ec8b7d1SJesse Brandeburg * capabilities of the hardware and the kernel.
16125ec8b7d1SJesse Brandeburg **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)16135ec8b7d1SJesse Brandeburg static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
16145ec8b7d1SJesse Brandeburg {
16155ec8b7d1SJesse Brandeburg int vector, v_budget;
16165ec8b7d1SJesse Brandeburg int pairs = 0;
16175ec8b7d1SJesse Brandeburg int err = 0;
16185ec8b7d1SJesse Brandeburg
16195ec8b7d1SJesse Brandeburg if (!adapter->vsi_res) {
16205ec8b7d1SJesse Brandeburg err = -EIO;
16215ec8b7d1SJesse Brandeburg goto out;
16225ec8b7d1SJesse Brandeburg }
16235ec8b7d1SJesse Brandeburg pairs = adapter->num_active_queues;
16245ec8b7d1SJesse Brandeburg
16255ec8b7d1SJesse Brandeburg /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
16265ec8b7d1SJesse Brandeburg * us much good if we have more vectors than CPUs. However, we already
16275ec8b7d1SJesse Brandeburg * limit the total number of queues by the number of CPUs so we do not
16285ec8b7d1SJesse Brandeburg * need any further limiting here.
16295ec8b7d1SJesse Brandeburg */
16305ec8b7d1SJesse Brandeburg v_budget = min_t(int, pairs + NONQ_VECS,
16315ec8b7d1SJesse Brandeburg (int)adapter->vf_res->max_vectors);
16325ec8b7d1SJesse Brandeburg
16335ec8b7d1SJesse Brandeburg adapter->msix_entries = kcalloc(v_budget,
16345ec8b7d1SJesse Brandeburg sizeof(struct msix_entry), GFP_KERNEL);
16355ec8b7d1SJesse Brandeburg if (!adapter->msix_entries) {
16365ec8b7d1SJesse Brandeburg err = -ENOMEM;
16375ec8b7d1SJesse Brandeburg goto out;
16385ec8b7d1SJesse Brandeburg }
16395ec8b7d1SJesse Brandeburg
16405ec8b7d1SJesse Brandeburg for (vector = 0; vector < v_budget; vector++)
16415ec8b7d1SJesse Brandeburg adapter->msix_entries[vector].entry = vector;
16425ec8b7d1SJesse Brandeburg
16435ec8b7d1SJesse Brandeburg err = iavf_acquire_msix_vectors(adapter, v_budget);
1644d1639a17SAhmed Zaki if (!err)
1645d1639a17SAhmed Zaki iavf_schedule_finish_config(adapter);
16465ec8b7d1SJesse Brandeburg
16475ec8b7d1SJesse Brandeburg out:
16485ec8b7d1SJesse Brandeburg return err;
16495ec8b7d1SJesse Brandeburg }
16505ec8b7d1SJesse Brandeburg
16515ec8b7d1SJesse Brandeburg /**
165256184e01SJesse Brandeburg * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
16535ec8b7d1SJesse Brandeburg * @adapter: board private structure
16545ec8b7d1SJesse Brandeburg *
16555ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
16565ec8b7d1SJesse Brandeburg **/
iavf_config_rss_aq(struct iavf_adapter * adapter)16575ec8b7d1SJesse Brandeburg static int iavf_config_rss_aq(struct iavf_adapter *adapter)
16585ec8b7d1SJesse Brandeburg {
16597af36e32SAlice Michael struct iavf_aqc_get_set_rss_key_data *rss_key =
16607af36e32SAlice Michael (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1661f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
1662bae569d0SMateusz Palczewski enum iavf_status status;
16635ec8b7d1SJesse Brandeburg
16645ec8b7d1SJesse Brandeburg if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
16655ec8b7d1SJesse Brandeburg /* bail because we already have a command pending */
16665ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
16675ec8b7d1SJesse Brandeburg adapter->current_op);
16685ec8b7d1SJesse Brandeburg return -EBUSY;
16695ec8b7d1SJesse Brandeburg }
16705ec8b7d1SJesse Brandeburg
1671bae569d0SMateusz Palczewski status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1672bae569d0SMateusz Palczewski if (status) {
16735ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1674bae569d0SMateusz Palczewski iavf_stat_str(hw, status),
16755ec8b7d1SJesse Brandeburg iavf_aq_str(hw, hw->aq.asq_last_status));
1676bae569d0SMateusz Palczewski return iavf_status_to_errno(status);
16775ec8b7d1SJesse Brandeburg
16785ec8b7d1SJesse Brandeburg }
16795ec8b7d1SJesse Brandeburg
1680bae569d0SMateusz Palczewski status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
16815ec8b7d1SJesse Brandeburg adapter->rss_lut, adapter->rss_lut_size);
1682bae569d0SMateusz Palczewski if (status) {
16835ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1684bae569d0SMateusz Palczewski iavf_stat_str(hw, status),
16855ec8b7d1SJesse Brandeburg iavf_aq_str(hw, hw->aq.asq_last_status));
1686bae569d0SMateusz Palczewski return iavf_status_to_errno(status);
16875ec8b7d1SJesse Brandeburg }
16885ec8b7d1SJesse Brandeburg
1689bae569d0SMateusz Palczewski return 0;
16905ec8b7d1SJesse Brandeburg
16915ec8b7d1SJesse Brandeburg }
16925ec8b7d1SJesse Brandeburg
16935ec8b7d1SJesse Brandeburg /**
16945ec8b7d1SJesse Brandeburg * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
16955ec8b7d1SJesse Brandeburg * @adapter: board private structure
16965ec8b7d1SJesse Brandeburg *
16975ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
16985ec8b7d1SJesse Brandeburg **/
iavf_config_rss_reg(struct iavf_adapter * adapter)16995ec8b7d1SJesse Brandeburg static int iavf_config_rss_reg(struct iavf_adapter *adapter)
17005ec8b7d1SJesse Brandeburg {
1701f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
17025ec8b7d1SJesse Brandeburg u32 *dw;
17035ec8b7d1SJesse Brandeburg u16 i;
17045ec8b7d1SJesse Brandeburg
17055ec8b7d1SJesse Brandeburg dw = (u32 *)adapter->rss_key;
17065ec8b7d1SJesse Brandeburg for (i = 0; i <= adapter->rss_key_size / 4; i++)
1707f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
17085ec8b7d1SJesse Brandeburg
17095ec8b7d1SJesse Brandeburg dw = (u32 *)adapter->rss_lut;
17105ec8b7d1SJesse Brandeburg for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1711f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
17125ec8b7d1SJesse Brandeburg
1713f1cad2ceSJesse Brandeburg iavf_flush(hw);
17145ec8b7d1SJesse Brandeburg
17155ec8b7d1SJesse Brandeburg return 0;
17165ec8b7d1SJesse Brandeburg }
17175ec8b7d1SJesse Brandeburg
17185ec8b7d1SJesse Brandeburg /**
17195ec8b7d1SJesse Brandeburg * iavf_config_rss - Configure RSS keys and lut
17205ec8b7d1SJesse Brandeburg * @adapter: board private structure
17215ec8b7d1SJesse Brandeburg *
17225ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
17235ec8b7d1SJesse Brandeburg **/
iavf_config_rss(struct iavf_adapter * adapter)17245ec8b7d1SJesse Brandeburg int iavf_config_rss(struct iavf_adapter *adapter)
17255ec8b7d1SJesse Brandeburg {
17265ec8b7d1SJesse Brandeburg
17275ec8b7d1SJesse Brandeburg if (RSS_PF(adapter)) {
17285ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
17295ec8b7d1SJesse Brandeburg IAVF_FLAG_AQ_SET_RSS_KEY;
17305ec8b7d1SJesse Brandeburg return 0;
17315ec8b7d1SJesse Brandeburg } else if (RSS_AQ(adapter)) {
17325ec8b7d1SJesse Brandeburg return iavf_config_rss_aq(adapter);
17335ec8b7d1SJesse Brandeburg } else {
17345ec8b7d1SJesse Brandeburg return iavf_config_rss_reg(adapter);
17355ec8b7d1SJesse Brandeburg }
17365ec8b7d1SJesse Brandeburg }
17375ec8b7d1SJesse Brandeburg
17385ec8b7d1SJesse Brandeburg /**
17395ec8b7d1SJesse Brandeburg * iavf_fill_rss_lut - Fill the lut with default values
17405ec8b7d1SJesse Brandeburg * @adapter: board private structure
17415ec8b7d1SJesse Brandeburg **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)17425ec8b7d1SJesse Brandeburg static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
17435ec8b7d1SJesse Brandeburg {
17445ec8b7d1SJesse Brandeburg u16 i;
17455ec8b7d1SJesse Brandeburg
17465ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->rss_lut_size; i++)
17475ec8b7d1SJesse Brandeburg adapter->rss_lut[i] = i % adapter->num_active_queues;
17485ec8b7d1SJesse Brandeburg }
17495ec8b7d1SJesse Brandeburg
17505ec8b7d1SJesse Brandeburg /**
17515ec8b7d1SJesse Brandeburg * iavf_init_rss - Prepare for RSS
17525ec8b7d1SJesse Brandeburg * @adapter: board private structure
17535ec8b7d1SJesse Brandeburg *
17545ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
17555ec8b7d1SJesse Brandeburg **/
iavf_init_rss(struct iavf_adapter * adapter)17565ec8b7d1SJesse Brandeburg static int iavf_init_rss(struct iavf_adapter *adapter)
17575ec8b7d1SJesse Brandeburg {
1758f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
17595ec8b7d1SJesse Brandeburg
17605ec8b7d1SJesse Brandeburg if (!RSS_PF(adapter)) {
17615ec8b7d1SJesse Brandeburg /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
17625ec8b7d1SJesse Brandeburg if (adapter->vf_res->vf_cap_flags &
17635ec8b7d1SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
176456184e01SJesse Brandeburg adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
17655ec8b7d1SJesse Brandeburg else
176656184e01SJesse Brandeburg adapter->hena = IAVF_DEFAULT_RSS_HENA;
17675ec8b7d1SJesse Brandeburg
1768f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1769f1cad2ceSJesse Brandeburg wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
17705ec8b7d1SJesse Brandeburg }
17715ec8b7d1SJesse Brandeburg
17725ec8b7d1SJesse Brandeburg iavf_fill_rss_lut(adapter);
17735ec8b7d1SJesse Brandeburg netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
17745ec8b7d1SJesse Brandeburg
1775c3fec56eSMinghao Chi return iavf_config_rss(adapter);
17765ec8b7d1SJesse Brandeburg }
17775ec8b7d1SJesse Brandeburg
17785ec8b7d1SJesse Brandeburg /**
17795ec8b7d1SJesse Brandeburg * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
17805ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
17815ec8b7d1SJesse Brandeburg *
17825ec8b7d1SJesse Brandeburg * We allocate one q_vector per queue interrupt. If allocation fails we
17835ec8b7d1SJesse Brandeburg * return -ENOMEM.
17845ec8b7d1SJesse Brandeburg **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)17855ec8b7d1SJesse Brandeburg static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
17865ec8b7d1SJesse Brandeburg {
17875ec8b7d1SJesse Brandeburg int q_idx = 0, num_q_vectors;
178856184e01SJesse Brandeburg struct iavf_q_vector *q_vector;
17895ec8b7d1SJesse Brandeburg
17905ec8b7d1SJesse Brandeburg num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
17915ec8b7d1SJesse Brandeburg adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
17925ec8b7d1SJesse Brandeburg GFP_KERNEL);
17935ec8b7d1SJesse Brandeburg if (!adapter->q_vectors)
17945ec8b7d1SJesse Brandeburg return -ENOMEM;
17955ec8b7d1SJesse Brandeburg
17965ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
17975ec8b7d1SJesse Brandeburg q_vector = &adapter->q_vectors[q_idx];
17985ec8b7d1SJesse Brandeburg q_vector->adapter = adapter;
17995ec8b7d1SJesse Brandeburg q_vector->vsi = &adapter->vsi;
18005ec8b7d1SJesse Brandeburg q_vector->v_idx = q_idx;
18015ec8b7d1SJesse Brandeburg q_vector->reg_idx = q_idx;
18025ec8b7d1SJesse Brandeburg cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
18035ec8b7d1SJesse Brandeburg netif_napi_add(adapter->netdev, &q_vector->napi,
1804b48b89f9SJakub Kicinski iavf_napi_poll);
18055ec8b7d1SJesse Brandeburg }
18065ec8b7d1SJesse Brandeburg
18075ec8b7d1SJesse Brandeburg return 0;
18085ec8b7d1SJesse Brandeburg }
18095ec8b7d1SJesse Brandeburg
18105ec8b7d1SJesse Brandeburg /**
18115ec8b7d1SJesse Brandeburg * iavf_free_q_vectors - Free memory allocated for interrupt vectors
18125ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
18135ec8b7d1SJesse Brandeburg *
18145ec8b7d1SJesse Brandeburg * This function frees the memory allocated to the q_vectors. In addition if
18155ec8b7d1SJesse Brandeburg * NAPI is enabled it will delete any references to the NAPI struct prior
18165ec8b7d1SJesse Brandeburg * to freeing the q_vector.
18175ec8b7d1SJesse Brandeburg **/
iavf_free_q_vectors(struct iavf_adapter * adapter)18185ec8b7d1SJesse Brandeburg static void iavf_free_q_vectors(struct iavf_adapter *adapter)
18195ec8b7d1SJesse Brandeburg {
18205ec8b7d1SJesse Brandeburg int q_idx, num_q_vectors;
18215ec8b7d1SJesse Brandeburg
18225ec8b7d1SJesse Brandeburg if (!adapter->q_vectors)
18235ec8b7d1SJesse Brandeburg return;
18245ec8b7d1SJesse Brandeburg
18255ec8b7d1SJesse Brandeburg num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
18265ec8b7d1SJesse Brandeburg
18275ec8b7d1SJesse Brandeburg for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
182856184e01SJesse Brandeburg struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
18295ec8b7d1SJesse Brandeburg
18305ec8b7d1SJesse Brandeburg netif_napi_del(&q_vector->napi);
18315ec8b7d1SJesse Brandeburg }
18325ec8b7d1SJesse Brandeburg kfree(adapter->q_vectors);
18335ec8b7d1SJesse Brandeburg adapter->q_vectors = NULL;
18345ec8b7d1SJesse Brandeburg }
18355ec8b7d1SJesse Brandeburg
18365ec8b7d1SJesse Brandeburg /**
18375ec8b7d1SJesse Brandeburg * iavf_reset_interrupt_capability - Reset MSIX setup
18385ec8b7d1SJesse Brandeburg * @adapter: board private structure
18395ec8b7d1SJesse Brandeburg *
18405ec8b7d1SJesse Brandeburg **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1841a4aadf0fSPrzemek Kitszel static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
18425ec8b7d1SJesse Brandeburg {
18435ec8b7d1SJesse Brandeburg if (!adapter->msix_entries)
18445ec8b7d1SJesse Brandeburg return;
18455ec8b7d1SJesse Brandeburg
18465ec8b7d1SJesse Brandeburg pci_disable_msix(adapter->pdev);
18475ec8b7d1SJesse Brandeburg kfree(adapter->msix_entries);
18485ec8b7d1SJesse Brandeburg adapter->msix_entries = NULL;
18495ec8b7d1SJesse Brandeburg }
18505ec8b7d1SJesse Brandeburg
18515ec8b7d1SJesse Brandeburg /**
18525ec8b7d1SJesse Brandeburg * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
18535ec8b7d1SJesse Brandeburg * @adapter: board private structure to initialize
18545ec8b7d1SJesse Brandeburg *
18555ec8b7d1SJesse Brandeburg **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1856a4aadf0fSPrzemek Kitszel static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
18575ec8b7d1SJesse Brandeburg {
18585ec8b7d1SJesse Brandeburg int err;
18595ec8b7d1SJesse Brandeburg
18605ec8b7d1SJesse Brandeburg err = iavf_alloc_queues(adapter);
18615ec8b7d1SJesse Brandeburg if (err) {
18625ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
18635ec8b7d1SJesse Brandeburg "Unable to allocate memory for queues\n");
18645ec8b7d1SJesse Brandeburg goto err_alloc_queues;
18655ec8b7d1SJesse Brandeburg }
18665ec8b7d1SJesse Brandeburg
18675ec8b7d1SJesse Brandeburg err = iavf_set_interrupt_capability(adapter);
18685ec8b7d1SJesse Brandeburg if (err) {
18695ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
18705ec8b7d1SJesse Brandeburg "Unable to setup interrupt capabilities\n");
18715ec8b7d1SJesse Brandeburg goto err_set_interrupt;
18725ec8b7d1SJesse Brandeburg }
18735ec8b7d1SJesse Brandeburg
18745ec8b7d1SJesse Brandeburg err = iavf_alloc_q_vectors(adapter);
18755ec8b7d1SJesse Brandeburg if (err) {
18765ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
18775ec8b7d1SJesse Brandeburg "Unable to allocate memory for queue vectors\n");
18785ec8b7d1SJesse Brandeburg goto err_alloc_q_vectors;
18795ec8b7d1SJesse Brandeburg }
18805ec8b7d1SJesse Brandeburg
18815ec8b7d1SJesse Brandeburg /* If we've made it so far while ADq flag being ON, then we haven't
18825ec8b7d1SJesse Brandeburg * bailed out anywhere in middle. And ADq isn't just enabled but actual
18835ec8b7d1SJesse Brandeburg * resources have been allocated in the reset path.
18845ec8b7d1SJesse Brandeburg * Now we can truly claim that ADq is enabled.
18855ec8b7d1SJesse Brandeburg */
18865ec8b7d1SJesse Brandeburg if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
18875ec8b7d1SJesse Brandeburg adapter->num_tc)
18885ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
18895ec8b7d1SJesse Brandeburg adapter->num_tc);
18905ec8b7d1SJesse Brandeburg
18915ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
18925ec8b7d1SJesse Brandeburg (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
18935ec8b7d1SJesse Brandeburg adapter->num_active_queues);
18945ec8b7d1SJesse Brandeburg
18955ec8b7d1SJesse Brandeburg return 0;
18965ec8b7d1SJesse Brandeburg err_alloc_q_vectors:
18975ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
18985ec8b7d1SJesse Brandeburg err_set_interrupt:
18995ec8b7d1SJesse Brandeburg iavf_free_queues(adapter);
19005ec8b7d1SJesse Brandeburg err_alloc_queues:
19015ec8b7d1SJesse Brandeburg return err;
19025ec8b7d1SJesse Brandeburg }
19035ec8b7d1SJesse Brandeburg
19045ec8b7d1SJesse Brandeburg /**
1905b5b219a1SMichal Schmidt * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does
1906b5b219a1SMichal Schmidt * @adapter: board private structure
1907b5b219a1SMichal Schmidt **/
iavf_free_interrupt_scheme(struct iavf_adapter * adapter)1908b5b219a1SMichal Schmidt static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter)
1909b5b219a1SMichal Schmidt {
1910b5b219a1SMichal Schmidt iavf_free_q_vectors(adapter);
1911b5b219a1SMichal Schmidt iavf_reset_interrupt_capability(adapter);
1912b5b219a1SMichal Schmidt iavf_free_queues(adapter);
1913b5b219a1SMichal Schmidt }
1914b5b219a1SMichal Schmidt
1915b5b219a1SMichal Schmidt /**
19165ec8b7d1SJesse Brandeburg * iavf_free_rss - Free memory used by RSS structs
19175ec8b7d1SJesse Brandeburg * @adapter: board private structure
19185ec8b7d1SJesse Brandeburg **/
iavf_free_rss(struct iavf_adapter * adapter)19195ec8b7d1SJesse Brandeburg static void iavf_free_rss(struct iavf_adapter *adapter)
19205ec8b7d1SJesse Brandeburg {
19215ec8b7d1SJesse Brandeburg kfree(adapter->rss_key);
19225ec8b7d1SJesse Brandeburg adapter->rss_key = NULL;
19235ec8b7d1SJesse Brandeburg
19245ec8b7d1SJesse Brandeburg kfree(adapter->rss_lut);
19255ec8b7d1SJesse Brandeburg adapter->rss_lut = NULL;
19265ec8b7d1SJesse Brandeburg }
19275ec8b7d1SJesse Brandeburg
19285ec8b7d1SJesse Brandeburg /**
19295ec8b7d1SJesse Brandeburg * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
19305ec8b7d1SJesse Brandeburg * @adapter: board private structure
1931a77ed5c5SAhmed Zaki * @running: true if adapter->state == __IAVF_RUNNING
19325ec8b7d1SJesse Brandeburg *
19335ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
19345ec8b7d1SJesse Brandeburg **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter,bool running)1935a77ed5c5SAhmed Zaki static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
19365ec8b7d1SJesse Brandeburg {
19375ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
19385ec8b7d1SJesse Brandeburg int err;
19395ec8b7d1SJesse Brandeburg
1940a77ed5c5SAhmed Zaki if (running)
19415ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
19425ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
1943b5b219a1SMichal Schmidt iavf_free_interrupt_scheme(adapter);
19445ec8b7d1SJesse Brandeburg
19455ec8b7d1SJesse Brandeburg err = iavf_init_interrupt_scheme(adapter);
19465ec8b7d1SJesse Brandeburg if (err)
19475ec8b7d1SJesse Brandeburg goto err;
19485ec8b7d1SJesse Brandeburg
19495ec8b7d1SJesse Brandeburg netif_tx_stop_all_queues(netdev);
19505ec8b7d1SJesse Brandeburg
19515ec8b7d1SJesse Brandeburg err = iavf_request_misc_irq(adapter);
19525ec8b7d1SJesse Brandeburg if (err)
19535ec8b7d1SJesse Brandeburg goto err;
19545ec8b7d1SJesse Brandeburg
195556184e01SJesse Brandeburg set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
19565ec8b7d1SJesse Brandeburg
19575ec8b7d1SJesse Brandeburg iavf_map_rings_to_vectors(adapter);
19585ec8b7d1SJesse Brandeburg err:
19595ec8b7d1SJesse Brandeburg return err;
19605ec8b7d1SJesse Brandeburg }
19615ec8b7d1SJesse Brandeburg
19625ec8b7d1SJesse Brandeburg /**
1963d1639a17SAhmed Zaki * iavf_finish_config - do all netdev work that needs RTNL
1964d1639a17SAhmed Zaki * @work: our work_struct
1965d1639a17SAhmed Zaki *
1966d1639a17SAhmed Zaki * Do work that needs both RTNL and crit_lock.
1967d1639a17SAhmed Zaki **/
iavf_finish_config(struct work_struct * work)1968d1639a17SAhmed Zaki static void iavf_finish_config(struct work_struct *work)
1969d1639a17SAhmed Zaki {
1970d1639a17SAhmed Zaki struct iavf_adapter *adapter;
1971d1639a17SAhmed Zaki int pairs, err;
1972d1639a17SAhmed Zaki
1973d1639a17SAhmed Zaki adapter = container_of(work, struct iavf_adapter, finish_config);
1974d1639a17SAhmed Zaki
1975d1639a17SAhmed Zaki /* Always take RTNL first to prevent circular lock dependency */
1976d1639a17SAhmed Zaki rtnl_lock();
1977d1639a17SAhmed Zaki mutex_lock(&adapter->crit_lock);
1978d1639a17SAhmed Zaki
1979d1639a17SAhmed Zaki if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
198034ad34bfSMichal Schmidt adapter->netdev->reg_state == NETREG_REGISTERED &&
1981d1639a17SAhmed Zaki !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
1982d1639a17SAhmed Zaki netdev_update_features(adapter->netdev);
1983d1639a17SAhmed Zaki adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
1984d1639a17SAhmed Zaki }
1985d1639a17SAhmed Zaki
1986d1639a17SAhmed Zaki switch (adapter->state) {
1987d1639a17SAhmed Zaki case __IAVF_DOWN:
198834ad34bfSMichal Schmidt if (adapter->netdev->reg_state != NETREG_REGISTERED) {
1989d1639a17SAhmed Zaki err = register_netdevice(adapter->netdev);
1990d1639a17SAhmed Zaki if (err) {
1991d1639a17SAhmed Zaki dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
1992d1639a17SAhmed Zaki err);
1993d1639a17SAhmed Zaki
1994d1639a17SAhmed Zaki /* go back and try again.*/
1995d1639a17SAhmed Zaki iavf_free_rss(adapter);
1996d1639a17SAhmed Zaki iavf_free_misc_irq(adapter);
1997d1639a17SAhmed Zaki iavf_reset_interrupt_capability(adapter);
1998d1639a17SAhmed Zaki iavf_change_state(adapter,
1999d1639a17SAhmed Zaki __IAVF_INIT_CONFIG_ADAPTER);
2000d1639a17SAhmed Zaki goto out;
2001d1639a17SAhmed Zaki }
2002d1639a17SAhmed Zaki }
2003d1639a17SAhmed Zaki
2004d1639a17SAhmed Zaki /* Set the real number of queues when reset occurs while
2005d1639a17SAhmed Zaki * state == __IAVF_DOWN
2006d1639a17SAhmed Zaki */
2007d1639a17SAhmed Zaki fallthrough;
2008d1639a17SAhmed Zaki case __IAVF_RUNNING:
2009d1639a17SAhmed Zaki pairs = adapter->num_active_queues;
2010d1639a17SAhmed Zaki netif_set_real_num_rx_queues(adapter->netdev, pairs);
2011d1639a17SAhmed Zaki netif_set_real_num_tx_queues(adapter->netdev, pairs);
2012d1639a17SAhmed Zaki break;
2013d1639a17SAhmed Zaki
2014d1639a17SAhmed Zaki default:
2015d1639a17SAhmed Zaki break;
2016d1639a17SAhmed Zaki }
2017d1639a17SAhmed Zaki
2018d1639a17SAhmed Zaki out:
2019d1639a17SAhmed Zaki mutex_unlock(&adapter->crit_lock);
2020d1639a17SAhmed Zaki rtnl_unlock();
2021d1639a17SAhmed Zaki }
2022d1639a17SAhmed Zaki
2023d1639a17SAhmed Zaki /**
2024d1639a17SAhmed Zaki * iavf_schedule_finish_config - Set the flags and schedule a reset event
2025d1639a17SAhmed Zaki * @adapter: board private structure
2026d1639a17SAhmed Zaki **/
iavf_schedule_finish_config(struct iavf_adapter * adapter)2027d1639a17SAhmed Zaki void iavf_schedule_finish_config(struct iavf_adapter *adapter)
2028d1639a17SAhmed Zaki {
2029d1639a17SAhmed Zaki if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2030d1639a17SAhmed Zaki queue_work(adapter->wq, &adapter->finish_config);
2031d1639a17SAhmed Zaki }
2032d1639a17SAhmed Zaki
2033d1639a17SAhmed Zaki /**
2034b476b003SJakub Pawlak * iavf_process_aq_command - process aq_required flags
2035b476b003SJakub Pawlak * and sends aq command
2036b476b003SJakub Pawlak * @adapter: pointer to iavf adapter structure
2037b476b003SJakub Pawlak *
2038b476b003SJakub Pawlak * Returns 0 on success
2039b476b003SJakub Pawlak * Returns error code if no command was sent
2040b476b003SJakub Pawlak * or error code if the command failed.
2041b476b003SJakub Pawlak **/
iavf_process_aq_command(struct iavf_adapter * adapter)2042b476b003SJakub Pawlak static int iavf_process_aq_command(struct iavf_adapter *adapter)
2043b476b003SJakub Pawlak {
2044b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
2045b476b003SJakub Pawlak return iavf_send_vf_config_msg(adapter);
2046209f2f9cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2047209f2f9cSBrett Creeley return iavf_send_vf_offload_vlan_v2_msg(adapter);
2048b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2049b476b003SJakub Pawlak iavf_disable_queues(adapter);
2050b476b003SJakub Pawlak return 0;
2051b476b003SJakub Pawlak }
2052b476b003SJakub Pawlak
2053b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2054b476b003SJakub Pawlak iavf_map_queues(adapter);
2055b476b003SJakub Pawlak return 0;
2056b476b003SJakub Pawlak }
2057b476b003SJakub Pawlak
2058b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2059b476b003SJakub Pawlak iavf_add_ether_addrs(adapter);
2060b476b003SJakub Pawlak return 0;
2061b476b003SJakub Pawlak }
2062b476b003SJakub Pawlak
2063b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2064b476b003SJakub Pawlak iavf_add_vlans(adapter);
2065b476b003SJakub Pawlak return 0;
2066b476b003SJakub Pawlak }
2067b476b003SJakub Pawlak
2068b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2069b476b003SJakub Pawlak iavf_del_ether_addrs(adapter);
2070b476b003SJakub Pawlak return 0;
2071b476b003SJakub Pawlak }
2072b476b003SJakub Pawlak
2073b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2074b476b003SJakub Pawlak iavf_del_vlans(adapter);
2075b476b003SJakub Pawlak return 0;
2076b476b003SJakub Pawlak }
2077b476b003SJakub Pawlak
2078b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2079b476b003SJakub Pawlak iavf_enable_vlan_stripping(adapter);
2080b476b003SJakub Pawlak return 0;
2081b476b003SJakub Pawlak }
2082b476b003SJakub Pawlak
2083b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2084b476b003SJakub Pawlak iavf_disable_vlan_stripping(adapter);
2085b476b003SJakub Pawlak return 0;
2086b476b003SJakub Pawlak }
2087b476b003SJakub Pawlak
2088b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2089b476b003SJakub Pawlak iavf_configure_queues(adapter);
2090b476b003SJakub Pawlak return 0;
2091b476b003SJakub Pawlak }
2092b476b003SJakub Pawlak
2093b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2094b476b003SJakub Pawlak iavf_enable_queues(adapter);
2095b476b003SJakub Pawlak return 0;
2096b476b003SJakub Pawlak }
2097b476b003SJakub Pawlak
2098b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2099b476b003SJakub Pawlak /* This message goes straight to the firmware, not the
2100b476b003SJakub Pawlak * PF, so we don't have to set current_op as we will
2101b476b003SJakub Pawlak * not get a response through the ARQ.
2102b476b003SJakub Pawlak */
2103b476b003SJakub Pawlak adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2104b476b003SJakub Pawlak return 0;
2105b476b003SJakub Pawlak }
2106b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2107b476b003SJakub Pawlak iavf_get_hena(adapter);
2108b476b003SJakub Pawlak return 0;
2109b476b003SJakub Pawlak }
2110b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2111b476b003SJakub Pawlak iavf_set_hena(adapter);
2112b476b003SJakub Pawlak return 0;
2113b476b003SJakub Pawlak }
2114b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2115b476b003SJakub Pawlak iavf_set_rss_key(adapter);
2116b476b003SJakub Pawlak return 0;
2117b476b003SJakub Pawlak }
2118b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2119b476b003SJakub Pawlak iavf_set_rss_lut(adapter);
2120b476b003SJakub Pawlak return 0;
2121b476b003SJakub Pawlak }
21224a3de3fbSAhmed Zaki if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
21234a3de3fbSAhmed Zaki iavf_set_rss_hfunc(adapter);
21244a3de3fbSAhmed Zaki return 0;
21254a3de3fbSAhmed Zaki }
2126b476b003SJakub Pawlak
2127221465deSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
2128221465deSBrett Creeley iavf_set_promiscuous(adapter);
2129b476b003SJakub Pawlak return 0;
2130b476b003SJakub Pawlak }
2131b476b003SJakub Pawlak
2132b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2133b476b003SJakub Pawlak iavf_enable_channels(adapter);
2134b476b003SJakub Pawlak return 0;
2135b476b003SJakub Pawlak }
2136b476b003SJakub Pawlak
2137b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2138b476b003SJakub Pawlak iavf_disable_channels(adapter);
2139b476b003SJakub Pawlak return 0;
2140b476b003SJakub Pawlak }
2141b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2142b476b003SJakub Pawlak iavf_add_cloud_filter(adapter);
2143b476b003SJakub Pawlak return 0;
2144b476b003SJakub Pawlak }
2145b476b003SJakub Pawlak if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2146b476b003SJakub Pawlak iavf_del_cloud_filter(adapter);
2147b476b003SJakub Pawlak return 0;
2148b476b003SJakub Pawlak }
21490dbfbabbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
21500dbfbabbSHaiyue Wang iavf_add_fdir_filter(adapter);
21510dbfbabbSHaiyue Wang return IAVF_SUCCESS;
21520dbfbabbSHaiyue Wang }
21530dbfbabbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
21540dbfbabbSHaiyue Wang iavf_del_fdir_filter(adapter);
21550dbfbabbSHaiyue Wang return IAVF_SUCCESS;
21560dbfbabbSHaiyue Wang }
21570aaeb4fbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
21580aaeb4fbSHaiyue Wang iavf_add_adv_rss_cfg(adapter);
21590aaeb4fbSHaiyue Wang return 0;
21600aaeb4fbSHaiyue Wang }
21610aaeb4fbSHaiyue Wang if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
21620aaeb4fbSHaiyue Wang iavf_del_adv_rss_cfg(adapter);
21630aaeb4fbSHaiyue Wang return 0;
21640aaeb4fbSHaiyue Wang }
21658afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
21668afadd1cSBrett Creeley iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
21678afadd1cSBrett Creeley return 0;
21688afadd1cSBrett Creeley }
21698afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
21708afadd1cSBrett Creeley iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
21718afadd1cSBrett Creeley return 0;
21728afadd1cSBrett Creeley }
21738afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
21748afadd1cSBrett Creeley iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
21758afadd1cSBrett Creeley return 0;
21768afadd1cSBrett Creeley }
21778afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
21788afadd1cSBrett Creeley iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
21798afadd1cSBrett Creeley return 0;
21808afadd1cSBrett Creeley }
21818afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
21828afadd1cSBrett Creeley iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
21838afadd1cSBrett Creeley return 0;
21848afadd1cSBrett Creeley }
21858afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
21868afadd1cSBrett Creeley iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
21878afadd1cSBrett Creeley return 0;
21888afadd1cSBrett Creeley }
21898afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
21908afadd1cSBrett Creeley iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
21918afadd1cSBrett Creeley return 0;
21928afadd1cSBrett Creeley }
21938afadd1cSBrett Creeley if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
21948afadd1cSBrett Creeley iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
21958afadd1cSBrett Creeley return 0;
21968afadd1cSBrett Creeley }
21978afadd1cSBrett Creeley
21983b5bdd18SJedrzej Jagielski if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
21993b5bdd18SJedrzej Jagielski iavf_request_stats(adapter);
22003b5bdd18SJedrzej Jagielski return 0;
22013b5bdd18SJedrzej Jagielski }
22023b5bdd18SJedrzej Jagielski
2203b476b003SJakub Pawlak return -EAGAIN;
2204b476b003SJakub Pawlak }
2205b476b003SJakub Pawlak
2206b476b003SJakub Pawlak /**
22078afadd1cSBrett Creeley * iavf_set_vlan_offload_features - set VLAN offload configuration
22088afadd1cSBrett Creeley * @adapter: board private structure
22098afadd1cSBrett Creeley * @prev_features: previous features used for comparison
22108afadd1cSBrett Creeley * @features: updated features used for configuration
22118afadd1cSBrett Creeley *
22128afadd1cSBrett Creeley * Set the aq_required bit(s) based on the requested features passed in to
22138afadd1cSBrett Creeley * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
22148afadd1cSBrett Creeley * the watchdog if any changes are requested to expedite the request via
22158afadd1cSBrett Creeley * virtchnl.
22168afadd1cSBrett Creeley **/
2217a4aadf0fSPrzemek Kitszel static void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)22188afadd1cSBrett Creeley iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
22198afadd1cSBrett Creeley netdev_features_t prev_features,
22208afadd1cSBrett Creeley netdev_features_t features)
22218afadd1cSBrett Creeley {
22228afadd1cSBrett Creeley bool enable_stripping = true, enable_insertion = true;
22238afadd1cSBrett Creeley u16 vlan_ethertype = 0;
22248afadd1cSBrett Creeley u64 aq_required = 0;
22258afadd1cSBrett Creeley
22268afadd1cSBrett Creeley /* keep cases separate because one ethertype for offloads can be
22278afadd1cSBrett Creeley * disabled at the same time as another is disabled, so check for an
22288afadd1cSBrett Creeley * enabled ethertype first, then check for disabled. Default to
22298afadd1cSBrett Creeley * ETH_P_8021Q so an ethertype is specified if disabling insertion and
22308afadd1cSBrett Creeley * stripping.
22318afadd1cSBrett Creeley */
22328afadd1cSBrett Creeley if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
22338afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021AD;
22348afadd1cSBrett Creeley else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
22358afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021Q;
22368afadd1cSBrett Creeley else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
22378afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021AD;
22388afadd1cSBrett Creeley else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
22398afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021Q;
22408afadd1cSBrett Creeley else
22418afadd1cSBrett Creeley vlan_ethertype = ETH_P_8021Q;
22428afadd1cSBrett Creeley
22438afadd1cSBrett Creeley if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
22448afadd1cSBrett Creeley enable_stripping = false;
22458afadd1cSBrett Creeley if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
22468afadd1cSBrett Creeley enable_insertion = false;
22478afadd1cSBrett Creeley
22488afadd1cSBrett Creeley if (VLAN_ALLOWED(adapter)) {
22498afadd1cSBrett Creeley /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
22508afadd1cSBrett Creeley * stripping via virtchnl. VLAN insertion can be toggled on the
22518afadd1cSBrett Creeley * netdev, but it doesn't require a virtchnl message
22528afadd1cSBrett Creeley */
22538afadd1cSBrett Creeley if (enable_stripping)
22548afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
22558afadd1cSBrett Creeley else
22568afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
22578afadd1cSBrett Creeley
22588afadd1cSBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
22598afadd1cSBrett Creeley switch (vlan_ethertype) {
22608afadd1cSBrett Creeley case ETH_P_8021Q:
22618afadd1cSBrett Creeley if (enable_stripping)
22628afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
22638afadd1cSBrett Creeley else
22648afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
22658afadd1cSBrett Creeley
22668afadd1cSBrett Creeley if (enable_insertion)
22678afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
22688afadd1cSBrett Creeley else
22698afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
22708afadd1cSBrett Creeley break;
22718afadd1cSBrett Creeley case ETH_P_8021AD:
22728afadd1cSBrett Creeley if (enable_stripping)
22738afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
22748afadd1cSBrett Creeley else
22758afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
22768afadd1cSBrett Creeley
22778afadd1cSBrett Creeley if (enable_insertion)
22788afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
22798afadd1cSBrett Creeley else
22808afadd1cSBrett Creeley aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
22818afadd1cSBrett Creeley break;
22828afadd1cSBrett Creeley }
22838afadd1cSBrett Creeley }
22848afadd1cSBrett Creeley
228595260816SPetr Oros if (aq_required)
228695260816SPetr Oros iavf_schedule_aq_request(adapter, aq_required);
22878afadd1cSBrett Creeley }
22888afadd1cSBrett Creeley
22898afadd1cSBrett Creeley /**
2290b66c7bc1SJakub Pawlak * iavf_startup - first step of driver startup
2291b66c7bc1SJakub Pawlak * @adapter: board private structure
2292b66c7bc1SJakub Pawlak *
2293b66c7bc1SJakub Pawlak * Function process __IAVF_STARTUP driver state.
2294b66c7bc1SJakub Pawlak * When success the state is changed to __IAVF_INIT_VERSION_CHECK
229559756ad6SMateusz Palczewski * when fails the state is changed to __IAVF_INIT_FAILED
2296b66c7bc1SJakub Pawlak **/
iavf_startup(struct iavf_adapter * adapter)229759756ad6SMateusz Palczewski static void iavf_startup(struct iavf_adapter *adapter)
2298b66c7bc1SJakub Pawlak {
2299b66c7bc1SJakub Pawlak struct pci_dev *pdev = adapter->pdev;
2300b66c7bc1SJakub Pawlak struct iavf_hw *hw = &adapter->hw;
2301bae569d0SMateusz Palczewski enum iavf_status status;
2302bae569d0SMateusz Palczewski int ret;
2303b66c7bc1SJakub Pawlak
2304b66c7bc1SJakub Pawlak WARN_ON(adapter->state != __IAVF_STARTUP);
2305b66c7bc1SJakub Pawlak
2306b66c7bc1SJakub Pawlak /* driver loaded, probe complete */
2307b66c7bc1SJakub Pawlak adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2308b66c7bc1SJakub Pawlak adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2309b66c7bc1SJakub Pawlak
2310bae569d0SMateusz Palczewski ret = iavf_check_reset_complete(hw);
2311bae569d0SMateusz Palczewski if (ret) {
2312b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2313bae569d0SMateusz Palczewski ret);
2314b66c7bc1SJakub Pawlak goto err;
2315b66c7bc1SJakub Pawlak }
2316b66c7bc1SJakub Pawlak hw->aq.num_arq_entries = IAVF_AQ_LEN;
2317b66c7bc1SJakub Pawlak hw->aq.num_asq_entries = IAVF_AQ_LEN;
2318b66c7bc1SJakub Pawlak hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2319b66c7bc1SJakub Pawlak hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2320b66c7bc1SJakub Pawlak
2321bae569d0SMateusz Palczewski status = iavf_init_adminq(hw);
2322bae569d0SMateusz Palczewski if (status) {
2323bae569d0SMateusz Palczewski dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2324bae569d0SMateusz Palczewski status);
2325b66c7bc1SJakub Pawlak goto err;
2326b66c7bc1SJakub Pawlak }
2327bae569d0SMateusz Palczewski ret = iavf_send_api_ver(adapter);
2328bae569d0SMateusz Palczewski if (ret) {
2329bae569d0SMateusz Palczewski dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2330b66c7bc1SJakub Pawlak iavf_shutdown_adminq(hw);
2331b66c7bc1SJakub Pawlak goto err;
2332b66c7bc1SJakub Pawlak }
233345eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
233459756ad6SMateusz Palczewski return;
2335b66c7bc1SJakub Pawlak err:
233659756ad6SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_FAILED);
2337b66c7bc1SJakub Pawlak }
2338b66c7bc1SJakub Pawlak
2339b66c7bc1SJakub Pawlak /**
2340b66c7bc1SJakub Pawlak * iavf_init_version_check - second step of driver startup
2341b66c7bc1SJakub Pawlak * @adapter: board private structure
2342b66c7bc1SJakub Pawlak *
2343b66c7bc1SJakub Pawlak * Function process __IAVF_INIT_VERSION_CHECK driver state.
2344b66c7bc1SJakub Pawlak * When success the state is changed to __IAVF_INIT_GET_RESOURCES
234559756ad6SMateusz Palczewski * when fails the state is changed to __IAVF_INIT_FAILED
2346b66c7bc1SJakub Pawlak **/
iavf_init_version_check(struct iavf_adapter * adapter)234759756ad6SMateusz Palczewski static void iavf_init_version_check(struct iavf_adapter *adapter)
2348b66c7bc1SJakub Pawlak {
2349b66c7bc1SJakub Pawlak struct pci_dev *pdev = adapter->pdev;
2350b66c7bc1SJakub Pawlak struct iavf_hw *hw = &adapter->hw;
2351b66c7bc1SJakub Pawlak int err = -EAGAIN;
2352b66c7bc1SJakub Pawlak
2353b66c7bc1SJakub Pawlak WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2354b66c7bc1SJakub Pawlak
2355b66c7bc1SJakub Pawlak if (!iavf_asq_done(hw)) {
2356b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Admin queue command never completed\n");
2357b66c7bc1SJakub Pawlak iavf_shutdown_adminq(hw);
235845eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_STARTUP);
2359b66c7bc1SJakub Pawlak goto err;
2360b66c7bc1SJakub Pawlak }
2361b66c7bc1SJakub Pawlak
2362b66c7bc1SJakub Pawlak /* aq msg sent, awaiting reply */
2363b66c7bc1SJakub Pawlak err = iavf_verify_api_ver(adapter);
2364b66c7bc1SJakub Pawlak if (err) {
2365bae569d0SMateusz Palczewski if (err == -EALREADY)
2366b66c7bc1SJakub Pawlak err = iavf_send_api_ver(adapter);
2367b66c7bc1SJakub Pawlak else
2368b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2369b66c7bc1SJakub Pawlak adapter->pf_version.major,
2370b66c7bc1SJakub Pawlak adapter->pf_version.minor,
2371b66c7bc1SJakub Pawlak VIRTCHNL_VERSION_MAJOR,
2372b66c7bc1SJakub Pawlak VIRTCHNL_VERSION_MINOR);
2373b66c7bc1SJakub Pawlak goto err;
2374b66c7bc1SJakub Pawlak }
2375b66c7bc1SJakub Pawlak err = iavf_send_vf_config_msg(adapter);
2376b66c7bc1SJakub Pawlak if (err) {
2377b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2378b66c7bc1SJakub Pawlak err);
2379b66c7bc1SJakub Pawlak goto err;
2380b66c7bc1SJakub Pawlak }
238145eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
238259756ad6SMateusz Palczewski return;
2383b66c7bc1SJakub Pawlak err:
238459756ad6SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_FAILED);
2385b66c7bc1SJakub Pawlak }
2386b66c7bc1SJakub Pawlak
2387b66c7bc1SJakub Pawlak /**
2388209f2f9cSBrett Creeley * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2389209f2f9cSBrett Creeley * @adapter: board private structure
2390209f2f9cSBrett Creeley */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2391209f2f9cSBrett Creeley int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2392209f2f9cSBrett Creeley {
2393209f2f9cSBrett Creeley int i, num_req_queues = adapter->num_req_queues;
2394209f2f9cSBrett Creeley struct iavf_vsi *vsi = &adapter->vsi;
2395209f2f9cSBrett Creeley
2396209f2f9cSBrett Creeley for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2397209f2f9cSBrett Creeley if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2398209f2f9cSBrett Creeley adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2399209f2f9cSBrett Creeley }
2400209f2f9cSBrett Creeley if (!adapter->vsi_res) {
2401209f2f9cSBrett Creeley dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2402209f2f9cSBrett Creeley return -ENODEV;
2403209f2f9cSBrett Creeley }
2404209f2f9cSBrett Creeley
2405209f2f9cSBrett Creeley if (num_req_queues &&
2406209f2f9cSBrett Creeley num_req_queues > adapter->vsi_res->num_queue_pairs) {
2407209f2f9cSBrett Creeley /* Problem. The PF gave us fewer queues than what we had
2408209f2f9cSBrett Creeley * negotiated in our request. Need a reset to see if we can't
2409209f2f9cSBrett Creeley * get back to a working state.
2410209f2f9cSBrett Creeley */
2411209f2f9cSBrett Creeley dev_err(&adapter->pdev->dev,
2412209f2f9cSBrett Creeley "Requested %d queues, but PF only gave us %d.\n",
2413209f2f9cSBrett Creeley num_req_queues,
2414209f2f9cSBrett Creeley adapter->vsi_res->num_queue_pairs);
241557d03f56SMichal Maloszewski adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2416209f2f9cSBrett Creeley adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2417c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
2418209f2f9cSBrett Creeley
2419209f2f9cSBrett Creeley return -EAGAIN;
2420209f2f9cSBrett Creeley }
2421209f2f9cSBrett Creeley adapter->num_req_queues = 0;
2422209f2f9cSBrett Creeley adapter->vsi.id = adapter->vsi_res->vsi_id;
2423209f2f9cSBrett Creeley
2424209f2f9cSBrett Creeley adapter->vsi.back = adapter;
2425209f2f9cSBrett Creeley adapter->vsi.base_vector = 1;
2426209f2f9cSBrett Creeley vsi->netdev = adapter->netdev;
2427209f2f9cSBrett Creeley vsi->qs_handle = adapter->vsi_res->qset_handle;
2428209f2f9cSBrett Creeley if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2429209f2f9cSBrett Creeley adapter->rss_key_size = adapter->vf_res->rss_key_size;
2430209f2f9cSBrett Creeley adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2431209f2f9cSBrett Creeley } else {
2432209f2f9cSBrett Creeley adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2433209f2f9cSBrett Creeley adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2434209f2f9cSBrett Creeley }
2435209f2f9cSBrett Creeley
2436209f2f9cSBrett Creeley return 0;
2437209f2f9cSBrett Creeley }
2438209f2f9cSBrett Creeley
2439209f2f9cSBrett Creeley /**
2440b66c7bc1SJakub Pawlak * iavf_init_get_resources - third step of driver startup
2441b66c7bc1SJakub Pawlak * @adapter: board private structure
2442b66c7bc1SJakub Pawlak *
2443b66c7bc1SJakub Pawlak * Function process __IAVF_INIT_GET_RESOURCES driver state and
2444b66c7bc1SJakub Pawlak * finishes driver initialization procedure.
2445b66c7bc1SJakub Pawlak * When success the state is changed to __IAVF_DOWN
244659756ad6SMateusz Palczewski * when fails the state is changed to __IAVF_INIT_FAILED
2447b66c7bc1SJakub Pawlak **/
iavf_init_get_resources(struct iavf_adapter * adapter)244859756ad6SMateusz Palczewski static void iavf_init_get_resources(struct iavf_adapter *adapter)
2449b66c7bc1SJakub Pawlak {
2450b66c7bc1SJakub Pawlak struct pci_dev *pdev = adapter->pdev;
2451b66c7bc1SJakub Pawlak struct iavf_hw *hw = &adapter->hw;
2452e0ef26fbSBrett Creeley int err;
2453b66c7bc1SJakub Pawlak
2454b66c7bc1SJakub Pawlak WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2455b66c7bc1SJakub Pawlak /* aq msg sent, awaiting reply */
2456b66c7bc1SJakub Pawlak if (!adapter->vf_res) {
2457e0ef26fbSBrett Creeley adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2458e0ef26fbSBrett Creeley GFP_KERNEL);
2459e0ef26fbSBrett Creeley if (!adapter->vf_res) {
2460e0ef26fbSBrett Creeley err = -ENOMEM;
2461b66c7bc1SJakub Pawlak goto err;
2462b66c7bc1SJakub Pawlak }
2463e0ef26fbSBrett Creeley }
2464b66c7bc1SJakub Pawlak err = iavf_get_vf_config(adapter);
2465bae569d0SMateusz Palczewski if (err == -EALREADY) {
2466b66c7bc1SJakub Pawlak err = iavf_send_vf_config_msg(adapter);
2467541a1af4SPrzemyslaw Patynowski goto err;
2468bae569d0SMateusz Palczewski } else if (err == -EINVAL) {
2469bae569d0SMateusz Palczewski /* We only get -EINVAL if the device is in a very bad
2470b66c7bc1SJakub Pawlak * state or if we've been disabled for previous bad
2471b66c7bc1SJakub Pawlak * behavior. Either way, we're done now.
2472b66c7bc1SJakub Pawlak */
2473b66c7bc1SJakub Pawlak iavf_shutdown_adminq(hw);
2474b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
247559756ad6SMateusz Palczewski return;
2476b66c7bc1SJakub Pawlak }
2477b66c7bc1SJakub Pawlak if (err) {
2478b66c7bc1SJakub Pawlak dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2479b66c7bc1SJakub Pawlak goto err_alloc;
2480b66c7bc1SJakub Pawlak }
2481b66c7bc1SJakub Pawlak
2482209f2f9cSBrett Creeley err = iavf_parse_vf_resource_msg(adapter);
248387dba256SMateusz Palczewski if (err) {
248487dba256SMateusz Palczewski dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2485209f2f9cSBrett Creeley err);
2486209f2f9cSBrett Creeley goto err_alloc;
2487209f2f9cSBrett Creeley }
248887dba256SMateusz Palczewski /* Some features require additional messages to negotiate extended
248987dba256SMateusz Palczewski * capabilities. These are processed in sequence by the
249087dba256SMateusz Palczewski * __IAVF_INIT_EXTENDED_CAPS driver state.
2491209f2f9cSBrett Creeley */
249287dba256SMateusz Palczewski adapter->extended_caps = IAVF_EXTENDED_CAPS;
249387dba256SMateusz Palczewski
249487dba256SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2495209f2f9cSBrett Creeley return;
2496209f2f9cSBrett Creeley
2497209f2f9cSBrett Creeley err_alloc:
2498209f2f9cSBrett Creeley kfree(adapter->vf_res);
2499209f2f9cSBrett Creeley adapter->vf_res = NULL;
2500209f2f9cSBrett Creeley err:
2501209f2f9cSBrett Creeley iavf_change_state(adapter, __IAVF_INIT_FAILED);
2502209f2f9cSBrett Creeley }
2503209f2f9cSBrett Creeley
2504209f2f9cSBrett Creeley /**
250587dba256SMateusz Palczewski * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2506209f2f9cSBrett Creeley * @adapter: board private structure
2507209f2f9cSBrett Creeley *
250887dba256SMateusz Palczewski * Function processes send of the extended VLAN V2 capability message to the
250987dba256SMateusz Palczewski * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
251087dba256SMateusz Palczewski * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
251187dba256SMateusz Palczewski */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)251287dba256SMateusz Palczewski static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2513209f2f9cSBrett Creeley {
2514209f2f9cSBrett Creeley int ret;
2515209f2f9cSBrett Creeley
251687dba256SMateusz Palczewski WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
251787dba256SMateusz Palczewski
251887dba256SMateusz Palczewski ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
251987dba256SMateusz Palczewski if (ret && ret == -EOPNOTSUPP) {
252087dba256SMateusz Palczewski /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
252187dba256SMateusz Palczewski * we did not send the capability exchange message and do not
252287dba256SMateusz Palczewski * expect a response.
252387dba256SMateusz Palczewski */
252487dba256SMateusz Palczewski adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
252587dba256SMateusz Palczewski }
252687dba256SMateusz Palczewski
252787dba256SMateusz Palczewski /* We sent the message, so move on to the next step */
252887dba256SMateusz Palczewski adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
252987dba256SMateusz Palczewski }
253087dba256SMateusz Palczewski
253187dba256SMateusz Palczewski /**
253287dba256SMateusz Palczewski * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
253387dba256SMateusz Palczewski * @adapter: board private structure
253487dba256SMateusz Palczewski *
253587dba256SMateusz Palczewski * Function processes receipt of the extended VLAN V2 capability message from
253687dba256SMateusz Palczewski * the PF.
253787dba256SMateusz Palczewski **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)253887dba256SMateusz Palczewski static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
253987dba256SMateusz Palczewski {
254087dba256SMateusz Palczewski int ret;
254187dba256SMateusz Palczewski
254287dba256SMateusz Palczewski WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2543209f2f9cSBrett Creeley
2544209f2f9cSBrett Creeley memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2545209f2f9cSBrett Creeley
2546209f2f9cSBrett Creeley ret = iavf_get_vf_vlan_v2_caps(adapter);
254787dba256SMateusz Palczewski if (ret)
2548209f2f9cSBrett Creeley goto err;
2549209f2f9cSBrett Creeley
255087dba256SMateusz Palczewski /* We've processed receipt of the VLAN V2 caps message */
255187dba256SMateusz Palczewski adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2552209f2f9cSBrett Creeley return;
2553209f2f9cSBrett Creeley err:
255487dba256SMateusz Palczewski /* We didn't receive a reply. Make sure we try sending again when
255587dba256SMateusz Palczewski * __IAVF_INIT_FAILED attempts to recover.
255687dba256SMateusz Palczewski */
255787dba256SMateusz Palczewski adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2558209f2f9cSBrett Creeley iavf_change_state(adapter, __IAVF_INIT_FAILED);
2559209f2f9cSBrett Creeley }
2560209f2f9cSBrett Creeley
2561209f2f9cSBrett Creeley /**
256287dba256SMateusz Palczewski * iavf_init_process_extended_caps - Part of driver startup
256387dba256SMateusz Palczewski * @adapter: board private structure
256487dba256SMateusz Palczewski *
256587dba256SMateusz Palczewski * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
256687dba256SMateusz Palczewski * handles negotiating capabilities for features which require an additional
256787dba256SMateusz Palczewski * message.
256887dba256SMateusz Palczewski *
256987dba256SMateusz Palczewski * Once all extended capabilities exchanges are finished, the driver will
257087dba256SMateusz Palczewski * transition into __IAVF_INIT_CONFIG_ADAPTER.
257187dba256SMateusz Palczewski */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)257287dba256SMateusz Palczewski static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
257387dba256SMateusz Palczewski {
257487dba256SMateusz Palczewski WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
257587dba256SMateusz Palczewski
257687dba256SMateusz Palczewski /* Process capability exchange for VLAN V2 */
257787dba256SMateusz Palczewski if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
257887dba256SMateusz Palczewski iavf_init_send_offload_vlan_v2_caps(adapter);
257987dba256SMateusz Palczewski return;
258087dba256SMateusz Palczewski } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
258187dba256SMateusz Palczewski iavf_init_recv_offload_vlan_v2_caps(adapter);
258287dba256SMateusz Palczewski return;
258387dba256SMateusz Palczewski }
258487dba256SMateusz Palczewski
258587dba256SMateusz Palczewski /* When we reach here, no further extended capabilities exchanges are
258687dba256SMateusz Palczewski * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
258787dba256SMateusz Palczewski */
258887dba256SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
258987dba256SMateusz Palczewski }
259087dba256SMateusz Palczewski
259187dba256SMateusz Palczewski /**
2592209f2f9cSBrett Creeley * iavf_init_config_adapter - last part of driver startup
2593209f2f9cSBrett Creeley * @adapter: board private structure
2594209f2f9cSBrett Creeley *
2595209f2f9cSBrett Creeley * After all the supported capabilities are negotiated, then the
2596209f2f9cSBrett Creeley * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2597209f2f9cSBrett Creeley */
iavf_init_config_adapter(struct iavf_adapter * adapter)2598209f2f9cSBrett Creeley static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2599209f2f9cSBrett Creeley {
2600209f2f9cSBrett Creeley struct net_device *netdev = adapter->netdev;
2601209f2f9cSBrett Creeley struct pci_dev *pdev = adapter->pdev;
2602209f2f9cSBrett Creeley int err;
2603209f2f9cSBrett Creeley
2604209f2f9cSBrett Creeley WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2605209f2f9cSBrett Creeley
2606209f2f9cSBrett Creeley if (iavf_process_config(adapter))
2607209f2f9cSBrett Creeley goto err;
2608209f2f9cSBrett Creeley
2609b66c7bc1SJakub Pawlak adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2610b66c7bc1SJakub Pawlak
2611b66c7bc1SJakub Pawlak adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2612b66c7bc1SJakub Pawlak
2613b66c7bc1SJakub Pawlak netdev->netdev_ops = &iavf_netdev_ops;
2614b66c7bc1SJakub Pawlak iavf_set_ethtool_ops(netdev);
2615b66c7bc1SJakub Pawlak netdev->watchdog_timeo = 5 * HZ;
2616b66c7bc1SJakub Pawlak
2617b66c7bc1SJakub Pawlak netdev->min_mtu = ETH_MIN_MTU;
26185fa4caffSAlexander Lobakin netdev->max_mtu = LIBIE_MAX_MTU;
2619b66c7bc1SJakub Pawlak
2620b66c7bc1SJakub Pawlak if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2621b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2622b66c7bc1SJakub Pawlak adapter->hw.mac.addr);
2623b66c7bc1SJakub Pawlak eth_hw_addr_random(netdev);
2624b66c7bc1SJakub Pawlak ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2625b66c7bc1SJakub Pawlak } else {
2626f3956ebbSJakub Kicinski eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2627b66c7bc1SJakub Pawlak ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2628b66c7bc1SJakub Pawlak }
2629b66c7bc1SJakub Pawlak
2630b66c7bc1SJakub Pawlak adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2631b66c7bc1SJakub Pawlak adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2632b66c7bc1SJakub Pawlak err = iavf_init_interrupt_scheme(adapter);
2633b66c7bc1SJakub Pawlak if (err)
2634b66c7bc1SJakub Pawlak goto err_sw_init;
2635b66c7bc1SJakub Pawlak iavf_map_rings_to_vectors(adapter);
2636b66c7bc1SJakub Pawlak if (adapter->vf_res->vf_cap_flags &
2637b66c7bc1SJakub Pawlak VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2638b66c7bc1SJakub Pawlak adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2639b66c7bc1SJakub Pawlak
2640b66c7bc1SJakub Pawlak err = iavf_request_misc_irq(adapter);
2641b66c7bc1SJakub Pawlak if (err)
2642b66c7bc1SJakub Pawlak goto err_sw_init;
2643b66c7bc1SJakub Pawlak
2644b66c7bc1SJakub Pawlak netif_carrier_off(netdev);
2645b66c7bc1SJakub Pawlak adapter->link_up = false;
2646b66c7bc1SJakub Pawlak netif_tx_stop_all_queues(netdev);
2647d1639a17SAhmed Zaki
2648b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2649b66c7bc1SJakub Pawlak if (netdev->features & NETIF_F_GRO)
2650b66c7bc1SJakub Pawlak dev_info(&pdev->dev, "GRO is enabled\n");
2651b66c7bc1SJakub Pawlak
265245eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN);
2653b66c7bc1SJakub Pawlak set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2654b66c7bc1SJakub Pawlak
2655b66c7bc1SJakub Pawlak iavf_misc_irq_enable(adapter);
2656b66c7bc1SJakub Pawlak wake_up(&adapter->down_waitqueue);
2657b66c7bc1SJakub Pawlak
2658b66c7bc1SJakub Pawlak adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2659b66c7bc1SJakub Pawlak adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2660753f3884SWei Yongjun if (!adapter->rss_key || !adapter->rss_lut) {
2661753f3884SWei Yongjun err = -ENOMEM;
2662b66c7bc1SJakub Pawlak goto err_mem;
2663753f3884SWei Yongjun }
2664b66c7bc1SJakub Pawlak if (RSS_AQ(adapter))
2665b66c7bc1SJakub Pawlak adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2666b66c7bc1SJakub Pawlak else
2667b66c7bc1SJakub Pawlak iavf_init_rss(adapter);
2668b66c7bc1SJakub Pawlak
26698afadd1cSBrett Creeley if (VLAN_V2_ALLOWED(adapter))
26708afadd1cSBrett Creeley /* request initial VLAN offload settings */
26718afadd1cSBrett Creeley iavf_set_vlan_offload_features(adapter, 0, netdev->features);
26728afadd1cSBrett Creeley
2673d1639a17SAhmed Zaki iavf_schedule_finish_config(adapter);
267459756ad6SMateusz Palczewski return;
2675d1639a17SAhmed Zaki
2676b66c7bc1SJakub Pawlak err_mem:
2677b66c7bc1SJakub Pawlak iavf_free_rss(adapter);
2678b66c7bc1SJakub Pawlak iavf_free_misc_irq(adapter);
2679b66c7bc1SJakub Pawlak err_sw_init:
2680b66c7bc1SJakub Pawlak iavf_reset_interrupt_capability(adapter);
2681b66c7bc1SJakub Pawlak err:
268259756ad6SMateusz Palczewski iavf_change_state(adapter, __IAVF_INIT_FAILED);
2683b66c7bc1SJakub Pawlak }
2684b66c7bc1SJakub Pawlak
2685b66c7bc1SJakub Pawlak /**
26865ec8b7d1SJesse Brandeburg * iavf_watchdog_task - Periodic call-back task
26875ec8b7d1SJesse Brandeburg * @work: pointer to work_struct
26885ec8b7d1SJesse Brandeburg **/
iavf_watchdog_task(struct work_struct * work)26895ec8b7d1SJesse Brandeburg static void iavf_watchdog_task(struct work_struct *work)
26905ec8b7d1SJesse Brandeburg {
26915ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = container_of(work,
26925ec8b7d1SJesse Brandeburg struct iavf_adapter,
2693fdd4044fSJakub Pawlak watchdog_task.work);
2694f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
26955ec8b7d1SJesse Brandeburg u32 reg_val;
26965ec8b7d1SJesse Brandeburg
2697fc2e6b3bSSlawomir Laba if (!mutex_trylock(&adapter->crit_lock)) {
2698fc2e6b3bSSlawomir Laba if (adapter->state == __IAVF_REMOVE)
2699fc2e6b3bSSlawomir Laba return;
2700fc2e6b3bSSlawomir Laba
27015ec8b7d1SJesse Brandeburg goto restart_watchdog;
2702fc2e6b3bSSlawomir Laba }
27035ec8b7d1SJesse Brandeburg
2704bac84861SJan Sokolowski if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
270545eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_COMM_FAILED);
2706bac84861SJan Sokolowski
2707bac84861SJan Sokolowski switch (adapter->state) {
2708898ef1cbSMateusz Palczewski case __IAVF_STARTUP:
2709898ef1cbSMateusz Palczewski iavf_startup(adapter);
2710898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27114411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2712898ef1cbSMateusz Palczewski msecs_to_jiffies(30));
2713898ef1cbSMateusz Palczewski return;
2714898ef1cbSMateusz Palczewski case __IAVF_INIT_VERSION_CHECK:
2715898ef1cbSMateusz Palczewski iavf_init_version_check(adapter);
2716898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27174411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2718898ef1cbSMateusz Palczewski msecs_to_jiffies(30));
2719898ef1cbSMateusz Palczewski return;
2720898ef1cbSMateusz Palczewski case __IAVF_INIT_GET_RESOURCES:
2721898ef1cbSMateusz Palczewski iavf_init_get_resources(adapter);
2722898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27234411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2724898ef1cbSMateusz Palczewski msecs_to_jiffies(1));
2725898ef1cbSMateusz Palczewski return;
272687dba256SMateusz Palczewski case __IAVF_INIT_EXTENDED_CAPS:
272787dba256SMateusz Palczewski iavf_init_process_extended_caps(adapter);
2728209f2f9cSBrett Creeley mutex_unlock(&adapter->crit_lock);
27294411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2730209f2f9cSBrett Creeley msecs_to_jiffies(1));
2731209f2f9cSBrett Creeley return;
2732209f2f9cSBrett Creeley case __IAVF_INIT_CONFIG_ADAPTER:
2733209f2f9cSBrett Creeley iavf_init_config_adapter(adapter);
2734209f2f9cSBrett Creeley mutex_unlock(&adapter->crit_lock);
27354411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2736209f2f9cSBrett Creeley msecs_to_jiffies(1));
2737209f2f9cSBrett Creeley return;
2738898ef1cbSMateusz Palczewski case __IAVF_INIT_FAILED:
27393ccd54efSSlawomir Laba if (test_bit(__IAVF_IN_REMOVE_TASK,
27403ccd54efSSlawomir Laba &adapter->crit_section)) {
27413ccd54efSSlawomir Laba /* Do not update the state and do not reschedule
27423ccd54efSSlawomir Laba * watchdog task, iavf_remove should handle this state
27433ccd54efSSlawomir Laba * as it can loop forever
27443ccd54efSSlawomir Laba */
27453ccd54efSSlawomir Laba mutex_unlock(&adapter->crit_lock);
27463ccd54efSSlawomir Laba return;
27473ccd54efSSlawomir Laba }
2748898ef1cbSMateusz Palczewski if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2749898ef1cbSMateusz Palczewski dev_err(&adapter->pdev->dev,
2750898ef1cbSMateusz Palczewski "Failed to communicate with PF; waiting before retry\n");
2751898ef1cbSMateusz Palczewski adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2752898ef1cbSMateusz Palczewski iavf_shutdown_adminq(hw);
2753898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27544411a608SMichal Schmidt queue_delayed_work(adapter->wq,
2755898ef1cbSMateusz Palczewski &adapter->watchdog_task, (5 * HZ));
2756898ef1cbSMateusz Palczewski return;
2757898ef1cbSMateusz Palczewski }
2758898ef1cbSMateusz Palczewski /* Try again from failed step*/
2759898ef1cbSMateusz Palczewski iavf_change_state(adapter, adapter->last_state);
2760898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
27614411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
2762898ef1cbSMateusz Palczewski return;
2763bac84861SJan Sokolowski case __IAVF_COMM_FAILED:
27643ccd54efSSlawomir Laba if (test_bit(__IAVF_IN_REMOVE_TASK,
27653ccd54efSSlawomir Laba &adapter->crit_section)) {
27663ccd54efSSlawomir Laba /* Set state to __IAVF_INIT_FAILED and perform remove
27673ccd54efSSlawomir Laba * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
27683ccd54efSSlawomir Laba * doesn't bring the state back to __IAVF_COMM_FAILED.
27693ccd54efSSlawomir Laba */
27703ccd54efSSlawomir Laba iavf_change_state(adapter, __IAVF_INIT_FAILED);
27713ccd54efSSlawomir Laba adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
27723ccd54efSSlawomir Laba mutex_unlock(&adapter->crit_lock);
27733ccd54efSSlawomir Laba return;
27743ccd54efSSlawomir Laba }
2775f1cad2ceSJesse Brandeburg reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2776f1cad2ceSJesse Brandeburg IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2777b476b003SJakub Pawlak if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2778b476b003SJakub Pawlak reg_val == VIRTCHNL_VFR_COMPLETED) {
27795ec8b7d1SJesse Brandeburg /* A chance for redemption! */
2780bac84861SJan Sokolowski dev_err(&adapter->pdev->dev,
2781bac84861SJan Sokolowski "Hardware came out of reset. Attempting reinit.\n");
2782898ef1cbSMateusz Palczewski /* When init task contacts the PF and
27835ec8b7d1SJesse Brandeburg * gets everything set up again, it'll restart the
27845ec8b7d1SJesse Brandeburg * watchdog for us. Down, boy. Sit. Stay. Woof.
27855ec8b7d1SJesse Brandeburg */
2786898ef1cbSMateusz Palczewski iavf_change_state(adapter, __IAVF_STARTUP);
2787898ef1cbSMateusz Palczewski adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
27885ec8b7d1SJesse Brandeburg }
27895ec8b7d1SJesse Brandeburg adapter->aq_required = 0;
27905ec8b7d1SJesse Brandeburg adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2791bc2f39a6SDan Carpenter mutex_unlock(&adapter->crit_lock);
27924411a608SMichal Schmidt queue_delayed_work(adapter->wq,
2793bac84861SJan Sokolowski &adapter->watchdog_task,
2794bac84861SJan Sokolowski msecs_to_jiffies(10));
2795898ef1cbSMateusz Palczewski return;
2796bac84861SJan Sokolowski case __IAVF_RESETTING:
27975ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
27984411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
27994411a608SMichal Schmidt HZ * 2);
2800bac84861SJan Sokolowski return;
2801bac84861SJan Sokolowski case __IAVF_DOWN:
2802bac84861SJan Sokolowski case __IAVF_DOWN_PENDING:
2803bac84861SJan Sokolowski case __IAVF_TESTING:
2804bac84861SJan Sokolowski case __IAVF_RUNNING:
28055ec8b7d1SJesse Brandeburg if (adapter->current_op) {
28065ec8b7d1SJesse Brandeburg if (!iavf_asq_done(hw)) {
2807bac84861SJan Sokolowski dev_dbg(&adapter->pdev->dev,
2808bac84861SJan Sokolowski "Admin queue timeout\n");
28095ec8b7d1SJesse Brandeburg iavf_send_api_ver(adapter);
28105ec8b7d1SJesse Brandeburg }
2811bac84861SJan Sokolowski } else {
2812209f2f9cSBrett Creeley int ret = iavf_process_aq_command(adapter);
2813209f2f9cSBrett Creeley
281493580766STony Nguyen /* An error will be returned if no commands were
281593580766STony Nguyen * processed; use this opportunity to update stats
2816209f2f9cSBrett Creeley * if the error isn't -ENOTSUPP
281793580766STony Nguyen */
2818209f2f9cSBrett Creeley if (ret && ret != -EOPNOTSUPP &&
2819bac84861SJan Sokolowski adapter->state == __IAVF_RUNNING)
2820b476b003SJakub Pawlak iavf_request_stats(adapter);
28215ec8b7d1SJesse Brandeburg }
2822898ef1cbSMateusz Palczewski if (adapter->state == __IAVF_RUNNING)
2823898ef1cbSMateusz Palczewski iavf_detect_recover_hung(&adapter->vsi);
2824bac84861SJan Sokolowski break;
2825bac84861SJan Sokolowski case __IAVF_REMOVE:
2826bac84861SJan Sokolowski default:
2827bc2f39a6SDan Carpenter mutex_unlock(&adapter->crit_lock);
2828898ef1cbSMateusz Palczewski return;
2829bac84861SJan Sokolowski }
2830bac84861SJan Sokolowski
2831bac84861SJan Sokolowski /* check for hw reset */
2832bac84861SJan Sokolowski reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2833bac84861SJan Sokolowski if (!reg_val) {
2834bac84861SJan Sokolowski adapter->aq_required = 0;
2835bac84861SJan Sokolowski adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2836bac84861SJan Sokolowski dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2837c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2838898ef1cbSMateusz Palczewski mutex_unlock(&adapter->crit_lock);
28394411a608SMichal Schmidt queue_delayed_work(adapter->wq,
2840898ef1cbSMateusz Palczewski &adapter->watchdog_task, HZ * 2);
2841898ef1cbSMateusz Palczewski return;
2842bac84861SJan Sokolowski }
28435ec8b7d1SJesse Brandeburg
28445ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
28455ec8b7d1SJesse Brandeburg restart_watchdog:
2846a472eb5cSSlawomir Laba if (adapter->state >= __IAVF_DOWN)
28474411a608SMichal Schmidt queue_work(adapter->wq, &adapter->adminq_task);
28485ec8b7d1SJesse Brandeburg if (adapter->aq_required)
28494411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2850fdd4044fSJakub Pawlak msecs_to_jiffies(20));
28515ec8b7d1SJesse Brandeburg else
28524411a608SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
28534411a608SMichal Schmidt HZ * 2);
28545ec8b7d1SJesse Brandeburg }
28555ec8b7d1SJesse Brandeburg
285616b2dd8cSPrzemyslaw Patynowski /**
285716b2dd8cSPrzemyslaw Patynowski * iavf_disable_vf - disable VF
285816b2dd8cSPrzemyslaw Patynowski * @adapter: board private structure
285916b2dd8cSPrzemyslaw Patynowski *
286016b2dd8cSPrzemyslaw Patynowski * Set communication failed flag and free all resources.
286116b2dd8cSPrzemyslaw Patynowski * NOTE: This function is expected to be called with crit_lock being held.
286216b2dd8cSPrzemyslaw Patynowski **/
iavf_disable_vf(struct iavf_adapter * adapter)28635ec8b7d1SJesse Brandeburg static void iavf_disable_vf(struct iavf_adapter *adapter)
28645ec8b7d1SJesse Brandeburg {
28655ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f, *ftmp;
28665ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *fv, *fvtmp;
28675ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *cf, *cftmp;
28685ec8b7d1SJesse Brandeburg
28695ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
28705ec8b7d1SJesse Brandeburg
28715ec8b7d1SJesse Brandeburg /* We don't use netif_running() because it may be true prior to
28725ec8b7d1SJesse Brandeburg * ndo_open() returning, so we can't assume it means all our open
28735ec8b7d1SJesse Brandeburg * tasks have finished, since we're not holding the rtnl_lock here.
28745ec8b7d1SJesse Brandeburg */
28755ec8b7d1SJesse Brandeburg if (adapter->state == __IAVF_RUNNING) {
287656184e01SJesse Brandeburg set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
28775ec8b7d1SJesse Brandeburg netif_carrier_off(adapter->netdev);
28785ec8b7d1SJesse Brandeburg netif_tx_disable(adapter->netdev);
28795ec8b7d1SJesse Brandeburg adapter->link_up = false;
28805ec8b7d1SJesse Brandeburg iavf_napi_disable_all(adapter);
28815ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
28825ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
28835ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
28845ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
28855ec8b7d1SJesse Brandeburg }
28865ec8b7d1SJesse Brandeburg
28875ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
28885ec8b7d1SJesse Brandeburg
28895ec8b7d1SJesse Brandeburg /* Delete all of the filters */
28905ec8b7d1SJesse Brandeburg list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
28915ec8b7d1SJesse Brandeburg list_del(&f->list);
28925ec8b7d1SJesse Brandeburg kfree(f);
28935ec8b7d1SJesse Brandeburg }
28945ec8b7d1SJesse Brandeburg
28955ec8b7d1SJesse Brandeburg list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
28965ec8b7d1SJesse Brandeburg list_del(&fv->list);
28975ec8b7d1SJesse Brandeburg kfree(fv);
28985ec8b7d1SJesse Brandeburg }
28999c85b7faSAhmed Zaki adapter->num_vlan_filters = 0;
29005ec8b7d1SJesse Brandeburg
29015ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
29025ec8b7d1SJesse Brandeburg
29035ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
29045ec8b7d1SJesse Brandeburg list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
29055ec8b7d1SJesse Brandeburg list_del(&cf->list);
29065ec8b7d1SJesse Brandeburg kfree(cf);
29075ec8b7d1SJesse Brandeburg adapter->num_cloud_filters--;
29085ec8b7d1SJesse Brandeburg }
29095ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
29105ec8b7d1SJesse Brandeburg
29115ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
2912b5b219a1SMichal Schmidt iavf_free_interrupt_scheme(adapter);
2913e0ef26fbSBrett Creeley memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
29145ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(&adapter->hw);
29155ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
291645eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN);
29175ec8b7d1SJesse Brandeburg wake_up(&adapter->down_waitqueue);
29185ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
29195ec8b7d1SJesse Brandeburg }
29205ec8b7d1SJesse Brandeburg
29215ec8b7d1SJesse Brandeburg /**
29225ec8b7d1SJesse Brandeburg * iavf_reset_task - Call-back task to handle hardware reset
29235ec8b7d1SJesse Brandeburg * @work: pointer to work_struct
29245ec8b7d1SJesse Brandeburg *
29255ec8b7d1SJesse Brandeburg * During reset we need to shut down and reinitialize the admin queue
29265ec8b7d1SJesse Brandeburg * before we can use it to communicate with the PF again. We also clear
29275ec8b7d1SJesse Brandeburg * and reinit the rings because that context is lost as well.
29285ec8b7d1SJesse Brandeburg **/
iavf_reset_task(struct work_struct * work)29295ec8b7d1SJesse Brandeburg static void iavf_reset_task(struct work_struct *work)
29305ec8b7d1SJesse Brandeburg {
29315ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = container_of(work,
29325ec8b7d1SJesse Brandeburg struct iavf_adapter,
29335ec8b7d1SJesse Brandeburg reset_task);
29345ec8b7d1SJesse Brandeburg struct virtchnl_vf_resource *vfres = adapter->vf_res;
29355ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
2936f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
29379e052291SStefan Assmann struct iavf_mac_filter *f, *ftmp;
29385ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *cf;
2939bae569d0SMateusz Palczewski enum iavf_status status;
29405ec8b7d1SJesse Brandeburg u32 reg_val;
29415ec8b7d1SJesse Brandeburg int i = 0, err;
29425ec8b7d1SJesse Brandeburg bool running;
29435ec8b7d1SJesse Brandeburg
29445ec8b7d1SJesse Brandeburg /* When device is being removed it doesn't make sense to run the reset
29455ec8b7d1SJesse Brandeburg * task, just return in such a case.
29465ec8b7d1SJesse Brandeburg */
2947fc2e6b3bSSlawomir Laba if (!mutex_trylock(&adapter->crit_lock)) {
2948fc2e6b3bSSlawomir Laba if (adapter->state != __IAVF_REMOVE)
29494411a608SMichal Schmidt queue_work(adapter->wq, &adapter->reset_task);
29505ec8b7d1SJesse Brandeburg
2951d2806d96SMarcin Szycik return;
2952226d5285SStefan Assmann }
2953fc2e6b3bSSlawomir Laba
29545ec8b7d1SJesse Brandeburg iavf_misc_irq_disable(adapter);
29555ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
29565ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
29575ec8b7d1SJesse Brandeburg /* Restart the AQ here. If we have been reset but didn't
29585ec8b7d1SJesse Brandeburg * detect it, or if the PF had to reinit, our AQ will be hosed.
29595ec8b7d1SJesse Brandeburg */
29605ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(hw);
29615ec8b7d1SJesse Brandeburg iavf_init_adminq(hw);
29625ec8b7d1SJesse Brandeburg iavf_request_reset(adapter);
29635ec8b7d1SJesse Brandeburg }
29645ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_RESET_PENDING;
29655ec8b7d1SJesse Brandeburg
29665ec8b7d1SJesse Brandeburg /* poll until we see the reset actually happen */
29678e3e4b9dSPaul Greenwalt for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2968f1cad2ceSJesse Brandeburg reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2969f1cad2ceSJesse Brandeburg IAVF_VF_ARQLEN1_ARQENABLE_MASK;
29705ec8b7d1SJesse Brandeburg if (!reg_val)
29715ec8b7d1SJesse Brandeburg break;
29725ec8b7d1SJesse Brandeburg usleep_range(5000, 10000);
29735ec8b7d1SJesse Brandeburg }
29748e3e4b9dSPaul Greenwalt if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
29755ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Never saw reset\n");
29765ec8b7d1SJesse Brandeburg goto continue_reset; /* act like the reset happened */
29775ec8b7d1SJesse Brandeburg }
29785ec8b7d1SJesse Brandeburg
29795ec8b7d1SJesse Brandeburg /* wait until the reset is complete and the PF is responding to us */
29808e3e4b9dSPaul Greenwalt for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
29815ec8b7d1SJesse Brandeburg /* sleep first to make sure a minimum wait time is met */
29825ec8b7d1SJesse Brandeburg msleep(IAVF_RESET_WAIT_MS);
29835ec8b7d1SJesse Brandeburg
2984f1cad2ceSJesse Brandeburg reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2985f1cad2ceSJesse Brandeburg IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
29865ec8b7d1SJesse Brandeburg if (reg_val == VIRTCHNL_VFR_VFACTIVE)
29875ec8b7d1SJesse Brandeburg break;
29885ec8b7d1SJesse Brandeburg }
29895ec8b7d1SJesse Brandeburg
29905ec8b7d1SJesse Brandeburg pci_set_master(adapter->pdev);
29917e4dcc13SMitch Williams pci_restore_msi_state(adapter->pdev);
29925ec8b7d1SJesse Brandeburg
29938e3e4b9dSPaul Greenwalt if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
29945ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
29955ec8b7d1SJesse Brandeburg reg_val);
29965ec8b7d1SJesse Brandeburg iavf_disable_vf(adapter);
2997e85ff9c6SSlawomir Laba mutex_unlock(&adapter->crit_lock);
29985ec8b7d1SJesse Brandeburg return; /* Do not attempt to reinit. It's dead, Jim. */
29995ec8b7d1SJesse Brandeburg }
30005ec8b7d1SJesse Brandeburg
30015ec8b7d1SJesse Brandeburg continue_reset:
30025ec8b7d1SJesse Brandeburg /* We don't use netif_running() because it may be true prior to
30035ec8b7d1SJesse Brandeburg * ndo_open() returning, so we can't assume it means all our open
30045ec8b7d1SJesse Brandeburg * tasks have finished, since we're not holding the rtnl_lock here.
30055ec8b7d1SJesse Brandeburg */
300614756b2aSSlawomir Laba running = adapter->state == __IAVF_RUNNING;
30075ec8b7d1SJesse Brandeburg
30085ec8b7d1SJesse Brandeburg if (running) {
30095ec8b7d1SJesse Brandeburg netif_carrier_off(netdev);
3010c678669dSIvan Vecera netif_tx_stop_all_queues(netdev);
30115ec8b7d1SJesse Brandeburg adapter->link_up = false;
30125ec8b7d1SJesse Brandeburg iavf_napi_disable_all(adapter);
30135ec8b7d1SJesse Brandeburg }
30145ec8b7d1SJesse Brandeburg iavf_irq_disable(adapter);
30155ec8b7d1SJesse Brandeburg
301645eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_RESETTING);
30175ec8b7d1SJesse Brandeburg adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
30185ec8b7d1SJesse Brandeburg
30195ec8b7d1SJesse Brandeburg /* free the Tx/Rx rings and descriptors, might be better to just
30205ec8b7d1SJesse Brandeburg * re-use them sometime in the future
30215ec8b7d1SJesse Brandeburg */
30225ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
30235ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
30245ec8b7d1SJesse Brandeburg
30255ec8b7d1SJesse Brandeburg adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
30265ec8b7d1SJesse Brandeburg /* kill and reinit the admin queue */
30275ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(hw);
30285ec8b7d1SJesse Brandeburg adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3029bae569d0SMateusz Palczewski status = iavf_init_adminq(hw);
3030bae569d0SMateusz Palczewski if (status) {
30315ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3032bae569d0SMateusz Palczewski status);
3033bae569d0SMateusz Palczewski goto reset_err;
3034bae569d0SMateusz Palczewski }
30355ec8b7d1SJesse Brandeburg adapter->aq_required = 0;
30365ec8b7d1SJesse Brandeburg
303757d03f56SMichal Maloszewski if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
303857d03f56SMichal Maloszewski (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3039a77ed5c5SAhmed Zaki err = iavf_reinit_interrupt_scheme(adapter, running);
30405ec8b7d1SJesse Brandeburg if (err)
30415ec8b7d1SJesse Brandeburg goto reset_err;
30425ec8b7d1SJesse Brandeburg }
30435ec8b7d1SJesse Brandeburg
3044a7550f8bSMd Fahad Iqbal Polash if (RSS_AQ(adapter)) {
3045a7550f8bSMd Fahad Iqbal Polash adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3046a7550f8bSMd Fahad Iqbal Polash } else {
3047a7550f8bSMd Fahad Iqbal Polash err = iavf_init_rss(adapter);
3048a7550f8bSMd Fahad Iqbal Polash if (err)
3049a7550f8bSMd Fahad Iqbal Polash goto reset_err;
3050a7550f8bSMd Fahad Iqbal Polash }
3051a7550f8bSMd Fahad Iqbal Polash
30525ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3053209f2f9cSBrett Creeley /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3054209f2f9cSBrett Creeley * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3055209f2f9cSBrett Creeley * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3056209f2f9cSBrett Creeley * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3057209f2f9cSBrett Creeley * been successfully sent and negotiated
3058209f2f9cSBrett Creeley */
3059209f2f9cSBrett Creeley adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
30605ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
30615ec8b7d1SJesse Brandeburg
30625ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
30635ec8b7d1SJesse Brandeburg
30649e052291SStefan Assmann /* Delete filter for the current MAC address, it could have
30659e052291SStefan Assmann * been changed by the PF via administratively set MAC.
30669e052291SStefan Assmann * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
30679e052291SStefan Assmann */
30689e052291SStefan Assmann list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
30699e052291SStefan Assmann if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
30709e052291SStefan Assmann list_del(&f->list);
30719e052291SStefan Assmann kfree(f);
30729e052291SStefan Assmann }
30739e052291SStefan Assmann }
30745ec8b7d1SJesse Brandeburg /* re-add all MAC filters */
30755ec8b7d1SJesse Brandeburg list_for_each_entry(f, &adapter->mac_filter_list, list) {
30765ec8b7d1SJesse Brandeburg f->add = true;
30775ec8b7d1SJesse Brandeburg }
30785ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
30795ec8b7d1SJesse Brandeburg
30805ec8b7d1SJesse Brandeburg /* check if TCs are running and re-add all cloud filters */
30815ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
30825ec8b7d1SJesse Brandeburg if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
30835ec8b7d1SJesse Brandeburg adapter->num_tc) {
30845ec8b7d1SJesse Brandeburg list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
30855ec8b7d1SJesse Brandeburg cf->add = true;
30865ec8b7d1SJesse Brandeburg }
30875ec8b7d1SJesse Brandeburg }
30885ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
30895ec8b7d1SJesse Brandeburg
30905ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
30915ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
30925ec8b7d1SJesse Brandeburg iavf_misc_irq_enable(adapter);
30935ec8b7d1SJesse Brandeburg
30944411a608SMichal Schmidt mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
30955ec8b7d1SJesse Brandeburg
30965ec8b7d1SJesse Brandeburg /* We were running when the reset started, so we need to restore some
30975ec8b7d1SJesse Brandeburg * state here.
30985ec8b7d1SJesse Brandeburg */
30995ec8b7d1SJesse Brandeburg if (running) {
31005ec8b7d1SJesse Brandeburg /* allocate transmit descriptors */
31015ec8b7d1SJesse Brandeburg err = iavf_setup_all_tx_resources(adapter);
31025ec8b7d1SJesse Brandeburg if (err)
31035ec8b7d1SJesse Brandeburg goto reset_err;
31045ec8b7d1SJesse Brandeburg
31055ec8b7d1SJesse Brandeburg /* allocate receive descriptors */
31065ec8b7d1SJesse Brandeburg err = iavf_setup_all_rx_resources(adapter);
31075ec8b7d1SJesse Brandeburg if (err)
31085ec8b7d1SJesse Brandeburg goto reset_err;
31095ec8b7d1SJesse Brandeburg
311057d03f56SMichal Maloszewski if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
311157d03f56SMichal Maloszewski (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
31125ec8b7d1SJesse Brandeburg err = iavf_request_traffic_irqs(adapter, netdev->name);
31135ec8b7d1SJesse Brandeburg if (err)
31145ec8b7d1SJesse Brandeburg goto reset_err;
31155ec8b7d1SJesse Brandeburg
311657d03f56SMichal Maloszewski adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
31175ec8b7d1SJesse Brandeburg }
31185ec8b7d1SJesse Brandeburg
31195ec8b7d1SJesse Brandeburg iavf_configure(adapter);
31205ec8b7d1SJesse Brandeburg
312145eebd62SMateusz Palczewski /* iavf_up_complete() will switch device back
312245eebd62SMateusz Palczewski * to __IAVF_RUNNING
312345eebd62SMateusz Palczewski */
31245ec8b7d1SJesse Brandeburg iavf_up_complete(adapter);
31257d59706dSMateusz Palczewski
31265ec8b7d1SJesse Brandeburg iavf_irq_enable(adapter, true);
31275ec8b7d1SJesse Brandeburg } else {
312845eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN);
31295ec8b7d1SJesse Brandeburg wake_up(&adapter->down_waitqueue);
31305ec8b7d1SJesse Brandeburg }
313157d03f56SMichal Maloszewski
313257d03f56SMichal Maloszewski adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
313357d03f56SMichal Maloszewski
3134c2ed2403SMarcin Szycik wake_up(&adapter->reset_waitqueue);
31355ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
31365ec8b7d1SJesse Brandeburg
3137d2806d96SMarcin Szycik return;
31385ec8b7d1SJesse Brandeburg reset_err:
313931071173SPrzemyslaw Patynowski if (running) {
314031071173SPrzemyslaw Patynowski set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
314131071173SPrzemyslaw Patynowski iavf_free_traffic_irqs(adapter);
314231071173SPrzemyslaw Patynowski }
314331071173SPrzemyslaw Patynowski iavf_disable_vf(adapter);
314431071173SPrzemyslaw Patynowski
31455ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
31465ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
31475ec8b7d1SJesse Brandeburg }
31485ec8b7d1SJesse Brandeburg
31495ec8b7d1SJesse Brandeburg /**
31505ec8b7d1SJesse Brandeburg * iavf_adminq_task - worker thread to clean the admin queue
31515ec8b7d1SJesse Brandeburg * @work: pointer to work_struct containing our data
31525ec8b7d1SJesse Brandeburg **/
iavf_adminq_task(struct work_struct * work)31535ec8b7d1SJesse Brandeburg static void iavf_adminq_task(struct work_struct *work)
31545ec8b7d1SJesse Brandeburg {
31555ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter =
31565ec8b7d1SJesse Brandeburg container_of(work, struct iavf_adapter, adminq_task);
3157f349daa5SJesse Brandeburg struct iavf_hw *hw = &adapter->hw;
31587af36e32SAlice Michael struct iavf_arq_event_info event;
31595ec8b7d1SJesse Brandeburg enum virtchnl_ops v_op;
316080754bbcSSergey Nemov enum iavf_status ret, v_ret;
31615ec8b7d1SJesse Brandeburg u32 val, oldval;
31625ec8b7d1SJesse Brandeburg u16 pending;
31635ec8b7d1SJesse Brandeburg
3164fc2e6b3bSSlawomir Laba if (!mutex_trylock(&adapter->crit_lock)) {
3165fc2e6b3bSSlawomir Laba if (adapter->state == __IAVF_REMOVE)
3166fc2e6b3bSSlawomir Laba return;
3167fc2e6b3bSSlawomir Laba
31684411a608SMichal Schmidt queue_work(adapter->wq, &adapter->adminq_task);
3169fc2e6b3bSSlawomir Laba goto out;
3170fc2e6b3bSSlawomir Laba }
3171fc2e6b3bSSlawomir Laba
317291896c8aSJacob Keller if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
317391896c8aSJacob Keller goto unlock;
317491896c8aSJacob Keller
31755ec8b7d1SJesse Brandeburg event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
31765ec8b7d1SJesse Brandeburg event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
31775ec8b7d1SJesse Brandeburg if (!event.msg_buf)
3178a2f054c1SJacob Keller goto unlock;
31795ec8b7d1SJesse Brandeburg
31805ec8b7d1SJesse Brandeburg do {
31815ec8b7d1SJesse Brandeburg ret = iavf_clean_arq_element(hw, &event, &pending);
31825ec8b7d1SJesse Brandeburg v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
318380754bbcSSergey Nemov v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
31845ec8b7d1SJesse Brandeburg
31855ec8b7d1SJesse Brandeburg if (ret || !v_op)
31865ec8b7d1SJesse Brandeburg break; /* No event to process or error cleaning ARQ */
31875ec8b7d1SJesse Brandeburg
31885ec8b7d1SJesse Brandeburg iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
31895ec8b7d1SJesse Brandeburg event.msg_len);
31905ec8b7d1SJesse Brandeburg if (pending != 0)
31915ec8b7d1SJesse Brandeburg memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
31925ec8b7d1SJesse Brandeburg } while (pending);
31935ec8b7d1SJesse Brandeburg
3194c34743daSAhmed Zaki if (iavf_is_reset_in_progress(adapter))
31955ec8b7d1SJesse Brandeburg goto freedom;
31965ec8b7d1SJesse Brandeburg
31975ec8b7d1SJesse Brandeburg /* check for error indications */
31983d66f215SIvan Vecera val = rd32(hw, IAVF_VF_ARQLEN1);
3199321421b5SSurabhi Boob if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
32005ec8b7d1SJesse Brandeburg goto freedom;
32015ec8b7d1SJesse Brandeburg oldval = val;
3202f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
32035ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3204f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
32055ec8b7d1SJesse Brandeburg }
3206f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
32075ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3208f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
32095ec8b7d1SJesse Brandeburg }
3210f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
32115ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3212f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
32135ec8b7d1SJesse Brandeburg }
32145ec8b7d1SJesse Brandeburg if (oldval != val)
32153d66f215SIvan Vecera wr32(hw, IAVF_VF_ARQLEN1, val);
32165ec8b7d1SJesse Brandeburg
32173d66f215SIvan Vecera val = rd32(hw, IAVF_VF_ATQLEN1);
32185ec8b7d1SJesse Brandeburg oldval = val;
3219f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
32205ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3221f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
32225ec8b7d1SJesse Brandeburg }
3223f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
32245ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3225f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
32265ec8b7d1SJesse Brandeburg }
3227f1cad2ceSJesse Brandeburg if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
32285ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3229f1cad2ceSJesse Brandeburg val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
32305ec8b7d1SJesse Brandeburg }
32315ec8b7d1SJesse Brandeburg if (oldval != val)
32323d66f215SIvan Vecera wr32(hw, IAVF_VF_ATQLEN1, val);
32335ec8b7d1SJesse Brandeburg
32345ec8b7d1SJesse Brandeburg freedom:
32355ec8b7d1SJesse Brandeburg kfree(event.msg_buf);
3236a2f054c1SJacob Keller unlock:
3237a2f054c1SJacob Keller mutex_unlock(&adapter->crit_lock);
32385ec8b7d1SJesse Brandeburg out:
32395ec8b7d1SJesse Brandeburg /* re-enable Admin queue interrupt cause */
32405ec8b7d1SJesse Brandeburg iavf_misc_irq_enable(adapter);
32415ec8b7d1SJesse Brandeburg }
32425ec8b7d1SJesse Brandeburg
32435ec8b7d1SJesse Brandeburg /**
32445ec8b7d1SJesse Brandeburg * iavf_free_all_tx_resources - Free Tx Resources for All Queues
32455ec8b7d1SJesse Brandeburg * @adapter: board private structure
32465ec8b7d1SJesse Brandeburg *
32475ec8b7d1SJesse Brandeburg * Free all transmit software resources
32485ec8b7d1SJesse Brandeburg **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)32495ec8b7d1SJesse Brandeburg void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
32505ec8b7d1SJesse Brandeburg {
32515ec8b7d1SJesse Brandeburg int i;
32525ec8b7d1SJesse Brandeburg
32535ec8b7d1SJesse Brandeburg if (!adapter->tx_rings)
32545ec8b7d1SJesse Brandeburg return;
32555ec8b7d1SJesse Brandeburg
32565ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++)
32575ec8b7d1SJesse Brandeburg if (adapter->tx_rings[i].desc)
32585ec8b7d1SJesse Brandeburg iavf_free_tx_resources(&adapter->tx_rings[i]);
32595ec8b7d1SJesse Brandeburg }
32605ec8b7d1SJesse Brandeburg
32615ec8b7d1SJesse Brandeburg /**
32625ec8b7d1SJesse Brandeburg * iavf_setup_all_tx_resources - allocate all queues Tx resources
32635ec8b7d1SJesse Brandeburg * @adapter: board private structure
32645ec8b7d1SJesse Brandeburg *
32655ec8b7d1SJesse Brandeburg * If this function returns with an error, then it's possible one or
32665ec8b7d1SJesse Brandeburg * more of the rings is populated (while the rest are not). It is the
32675ec8b7d1SJesse Brandeburg * callers duty to clean those orphaned rings.
32685ec8b7d1SJesse Brandeburg *
32695ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
32705ec8b7d1SJesse Brandeburg **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)32715ec8b7d1SJesse Brandeburg static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
32725ec8b7d1SJesse Brandeburg {
32735ec8b7d1SJesse Brandeburg int i, err = 0;
32745ec8b7d1SJesse Brandeburg
32755ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
32765ec8b7d1SJesse Brandeburg adapter->tx_rings[i].count = adapter->tx_desc_count;
32775ec8b7d1SJesse Brandeburg err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
32785ec8b7d1SJesse Brandeburg if (!err)
32795ec8b7d1SJesse Brandeburg continue;
32805ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
32815ec8b7d1SJesse Brandeburg "Allocation for Tx Queue %u failed\n", i);
32825ec8b7d1SJesse Brandeburg break;
32835ec8b7d1SJesse Brandeburg }
32845ec8b7d1SJesse Brandeburg
32855ec8b7d1SJesse Brandeburg return err;
32865ec8b7d1SJesse Brandeburg }
32875ec8b7d1SJesse Brandeburg
32885ec8b7d1SJesse Brandeburg /**
32895ec8b7d1SJesse Brandeburg * iavf_setup_all_rx_resources - allocate all queues Rx resources
32905ec8b7d1SJesse Brandeburg * @adapter: board private structure
32915ec8b7d1SJesse Brandeburg *
32925ec8b7d1SJesse Brandeburg * If this function returns with an error, then it's possible one or
32935ec8b7d1SJesse Brandeburg * more of the rings is populated (while the rest are not). It is the
32945ec8b7d1SJesse Brandeburg * callers duty to clean those orphaned rings.
32955ec8b7d1SJesse Brandeburg *
32965ec8b7d1SJesse Brandeburg * Return 0 on success, negative on failure
32975ec8b7d1SJesse Brandeburg **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)32985ec8b7d1SJesse Brandeburg static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
32995ec8b7d1SJesse Brandeburg {
33005ec8b7d1SJesse Brandeburg int i, err = 0;
33015ec8b7d1SJesse Brandeburg
33025ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++) {
33035ec8b7d1SJesse Brandeburg adapter->rx_rings[i].count = adapter->rx_desc_count;
33045ec8b7d1SJesse Brandeburg err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
33055ec8b7d1SJesse Brandeburg if (!err)
33065ec8b7d1SJesse Brandeburg continue;
33075ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
33085ec8b7d1SJesse Brandeburg "Allocation for Rx Queue %u failed\n", i);
33095ec8b7d1SJesse Brandeburg break;
33105ec8b7d1SJesse Brandeburg }
33115ec8b7d1SJesse Brandeburg return err;
33125ec8b7d1SJesse Brandeburg }
33135ec8b7d1SJesse Brandeburg
33145ec8b7d1SJesse Brandeburg /**
33155ec8b7d1SJesse Brandeburg * iavf_free_all_rx_resources - Free Rx Resources for All Queues
33165ec8b7d1SJesse Brandeburg * @adapter: board private structure
33175ec8b7d1SJesse Brandeburg *
33185ec8b7d1SJesse Brandeburg * Free all receive software resources
33195ec8b7d1SJesse Brandeburg **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)33205ec8b7d1SJesse Brandeburg void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
33215ec8b7d1SJesse Brandeburg {
33225ec8b7d1SJesse Brandeburg int i;
33235ec8b7d1SJesse Brandeburg
33245ec8b7d1SJesse Brandeburg if (!adapter->rx_rings)
33255ec8b7d1SJesse Brandeburg return;
33265ec8b7d1SJesse Brandeburg
33275ec8b7d1SJesse Brandeburg for (i = 0; i < adapter->num_active_queues; i++)
33285ec8b7d1SJesse Brandeburg if (adapter->rx_rings[i].desc)
33295ec8b7d1SJesse Brandeburg iavf_free_rx_resources(&adapter->rx_rings[i]);
33305ec8b7d1SJesse Brandeburg }
33315ec8b7d1SJesse Brandeburg
33325ec8b7d1SJesse Brandeburg /**
33335ec8b7d1SJesse Brandeburg * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
33345ec8b7d1SJesse Brandeburg * @adapter: board private structure
33355ec8b7d1SJesse Brandeburg * @max_tx_rate: max Tx bw for a tc
33365ec8b7d1SJesse Brandeburg **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)33375ec8b7d1SJesse Brandeburg static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
33385ec8b7d1SJesse Brandeburg u64 max_tx_rate)
33395ec8b7d1SJesse Brandeburg {
33405ec8b7d1SJesse Brandeburg int speed = 0, ret = 0;
33415ec8b7d1SJesse Brandeburg
3342e0ef26fbSBrett Creeley if (ADV_LINK_SUPPORT(adapter)) {
3343e0ef26fbSBrett Creeley if (adapter->link_speed_mbps < U32_MAX) {
3344e0ef26fbSBrett Creeley speed = adapter->link_speed_mbps;
3345e0ef26fbSBrett Creeley goto validate_bw;
3346e0ef26fbSBrett Creeley } else {
3347e0ef26fbSBrett Creeley dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3348e0ef26fbSBrett Creeley return -EINVAL;
3349e0ef26fbSBrett Creeley }
3350e0ef26fbSBrett Creeley }
3351e0ef26fbSBrett Creeley
33525ec8b7d1SJesse Brandeburg switch (adapter->link_speed) {
33535071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_40GB:
335418c012d9SBrett Creeley speed = SPEED_40000;
33555ec8b7d1SJesse Brandeburg break;
33565071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_25GB:
335718c012d9SBrett Creeley speed = SPEED_25000;
33585ec8b7d1SJesse Brandeburg break;
33595071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_20GB:
336018c012d9SBrett Creeley speed = SPEED_20000;
33615ec8b7d1SJesse Brandeburg break;
33625071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_10GB:
336318c012d9SBrett Creeley speed = SPEED_10000;
336418c012d9SBrett Creeley break;
336518c012d9SBrett Creeley case VIRTCHNL_LINK_SPEED_5GB:
336618c012d9SBrett Creeley speed = SPEED_5000;
336718c012d9SBrett Creeley break;
336818c012d9SBrett Creeley case VIRTCHNL_LINK_SPEED_2_5GB:
336918c012d9SBrett Creeley speed = SPEED_2500;
33705ec8b7d1SJesse Brandeburg break;
33715071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_1GB:
337218c012d9SBrett Creeley speed = SPEED_1000;
33735ec8b7d1SJesse Brandeburg break;
33745071bda2SAleksandr Loktionov case VIRTCHNL_LINK_SPEED_100MB:
337518c012d9SBrett Creeley speed = SPEED_100;
33765ec8b7d1SJesse Brandeburg break;
33775ec8b7d1SJesse Brandeburg default:
33785ec8b7d1SJesse Brandeburg break;
33795ec8b7d1SJesse Brandeburg }
33805ec8b7d1SJesse Brandeburg
3381e0ef26fbSBrett Creeley validate_bw:
33825ec8b7d1SJesse Brandeburg if (max_tx_rate > speed) {
33835ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
33845ec8b7d1SJesse Brandeburg "Invalid tx rate specified\n");
33855ec8b7d1SJesse Brandeburg ret = -EINVAL;
33865ec8b7d1SJesse Brandeburg }
33875ec8b7d1SJesse Brandeburg
33885ec8b7d1SJesse Brandeburg return ret;
33895ec8b7d1SJesse Brandeburg }
33905ec8b7d1SJesse Brandeburg
33915ec8b7d1SJesse Brandeburg /**
3392262de08fSJesse Brandeburg * iavf_validate_ch_config - validate queue mapping info
33935ec8b7d1SJesse Brandeburg * @adapter: board private structure
33945ec8b7d1SJesse Brandeburg * @mqprio_qopt: queue parameters
33955ec8b7d1SJesse Brandeburg *
33965ec8b7d1SJesse Brandeburg * This function validates if the config provided by the user to
33975ec8b7d1SJesse Brandeburg * configure queue channels is valid or not. Returns 0 on a valid
33985ec8b7d1SJesse Brandeburg * config.
33995ec8b7d1SJesse Brandeburg **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)34005ec8b7d1SJesse Brandeburg static int iavf_validate_ch_config(struct iavf_adapter *adapter,
34015ec8b7d1SJesse Brandeburg struct tc_mqprio_qopt_offload *mqprio_qopt)
34025ec8b7d1SJesse Brandeburg {
34035ec8b7d1SJesse Brandeburg u64 total_max_rate = 0;
3404ec60d54cSPrzemyslaw Patynowski u32 tx_rate_rem = 0;
34055ec8b7d1SJesse Brandeburg int i, num_qps = 0;
34065ec8b7d1SJesse Brandeburg u64 tx_rate = 0;
34075ec8b7d1SJesse Brandeburg int ret = 0;
34085ec8b7d1SJesse Brandeburg
34095ec8b7d1SJesse Brandeburg if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
34105ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.num_tc < 1)
34115ec8b7d1SJesse Brandeburg return -EINVAL;
34125ec8b7d1SJesse Brandeburg
34135ec8b7d1SJesse Brandeburg for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
34145ec8b7d1SJesse Brandeburg if (!mqprio_qopt->qopt.count[i] ||
34155ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.offset[i] != num_qps)
34165ec8b7d1SJesse Brandeburg return -EINVAL;
34175ec8b7d1SJesse Brandeburg if (mqprio_qopt->min_rate[i]) {
34185ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
3419ec60d54cSPrzemyslaw Patynowski "Invalid min tx rate (greater than 0) specified for TC%d\n",
3420ec60d54cSPrzemyslaw Patynowski i);
34215ec8b7d1SJesse Brandeburg return -EINVAL;
34225ec8b7d1SJesse Brandeburg }
3423ec60d54cSPrzemyslaw Patynowski
34245ec8b7d1SJesse Brandeburg /* convert to Mbps */
34255ec8b7d1SJesse Brandeburg tx_rate = div_u64(mqprio_qopt->max_rate[i],
34265ec8b7d1SJesse Brandeburg IAVF_MBPS_DIVISOR);
3427ec60d54cSPrzemyslaw Patynowski
3428ec60d54cSPrzemyslaw Patynowski if (mqprio_qopt->max_rate[i] &&
3429ec60d54cSPrzemyslaw Patynowski tx_rate < IAVF_MBPS_QUANTA) {
3430ec60d54cSPrzemyslaw Patynowski dev_err(&adapter->pdev->dev,
3431ec60d54cSPrzemyslaw Patynowski "Invalid max tx rate for TC%d, minimum %dMbps\n",
3432ec60d54cSPrzemyslaw Patynowski i, IAVF_MBPS_QUANTA);
3433ec60d54cSPrzemyslaw Patynowski return -EINVAL;
3434ec60d54cSPrzemyslaw Patynowski }
3435ec60d54cSPrzemyslaw Patynowski
3436ec60d54cSPrzemyslaw Patynowski (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3437ec60d54cSPrzemyslaw Patynowski
3438ec60d54cSPrzemyslaw Patynowski if (tx_rate_rem != 0) {
3439ec60d54cSPrzemyslaw Patynowski dev_err(&adapter->pdev->dev,
3440ec60d54cSPrzemyslaw Patynowski "Invalid max tx rate for TC%d, not divisible by %d\n",
3441ec60d54cSPrzemyslaw Patynowski i, IAVF_MBPS_QUANTA);
3442ec60d54cSPrzemyslaw Patynowski return -EINVAL;
3443ec60d54cSPrzemyslaw Patynowski }
3444ec60d54cSPrzemyslaw Patynowski
34455ec8b7d1SJesse Brandeburg total_max_rate += tx_rate;
34465ec8b7d1SJesse Brandeburg num_qps += mqprio_qopt->qopt.count[i];
34475ec8b7d1SJesse Brandeburg }
3448b712941cSKaren Sornek if (num_qps > adapter->num_active_queues) {
3449b712941cSKaren Sornek dev_err(&adapter->pdev->dev,
3450b712941cSKaren Sornek "Cannot support requested number of queues\n");
34515ec8b7d1SJesse Brandeburg return -EINVAL;
3452b712941cSKaren Sornek }
34535ec8b7d1SJesse Brandeburg
34545ec8b7d1SJesse Brandeburg ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
34555ec8b7d1SJesse Brandeburg return ret;
34565ec8b7d1SJesse Brandeburg }
34575ec8b7d1SJesse Brandeburg
34585ec8b7d1SJesse Brandeburg /**
3459b50f7bcaSJesse Brandeburg * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3460b50f7bcaSJesse Brandeburg * @adapter: board private structure
34615ec8b7d1SJesse Brandeburg **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)34625ec8b7d1SJesse Brandeburg static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
34635ec8b7d1SJesse Brandeburg {
34645ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *cf, *cftmp;
34655ec8b7d1SJesse Brandeburg
34665ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
34675ec8b7d1SJesse Brandeburg list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
34685ec8b7d1SJesse Brandeburg list) {
34695ec8b7d1SJesse Brandeburg list_del(&cf->list);
34705ec8b7d1SJesse Brandeburg kfree(cf);
34715ec8b7d1SJesse Brandeburg adapter->num_cloud_filters--;
34725ec8b7d1SJesse Brandeburg }
34735ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
34745ec8b7d1SJesse Brandeburg }
34755ec8b7d1SJesse Brandeburg
34765ec8b7d1SJesse Brandeburg /**
347754976cf5SSudheer Mogilappagari * iavf_is_tc_config_same - Compare the mqprio TC config with the
347854976cf5SSudheer Mogilappagari * TC config already configured on this adapter.
347954976cf5SSudheer Mogilappagari * @adapter: board private structure
348054976cf5SSudheer Mogilappagari * @mqprio_qopt: TC config received from kernel.
348154976cf5SSudheer Mogilappagari *
348254976cf5SSudheer Mogilappagari * This function compares the TC config received from the kernel
348354976cf5SSudheer Mogilappagari * with the config already configured on the adapter.
348454976cf5SSudheer Mogilappagari *
348554976cf5SSudheer Mogilappagari * Return: True if configuration is same, false otherwise.
348654976cf5SSudheer Mogilappagari **/
iavf_is_tc_config_same(struct iavf_adapter * adapter,struct tc_mqprio_qopt * mqprio_qopt)348754976cf5SSudheer Mogilappagari static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
348854976cf5SSudheer Mogilappagari struct tc_mqprio_qopt *mqprio_qopt)
348954976cf5SSudheer Mogilappagari {
349054976cf5SSudheer Mogilappagari struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
349154976cf5SSudheer Mogilappagari int i;
349254976cf5SSudheer Mogilappagari
349354976cf5SSudheer Mogilappagari if (adapter->num_tc != mqprio_qopt->num_tc)
349454976cf5SSudheer Mogilappagari return false;
349554976cf5SSudheer Mogilappagari
349654976cf5SSudheer Mogilappagari for (i = 0; i < adapter->num_tc; i++) {
349754976cf5SSudheer Mogilappagari if (ch[i].count != mqprio_qopt->count[i] ||
349854976cf5SSudheer Mogilappagari ch[i].offset != mqprio_qopt->offset[i])
349954976cf5SSudheer Mogilappagari return false;
350054976cf5SSudheer Mogilappagari }
350154976cf5SSudheer Mogilappagari return true;
350254976cf5SSudheer Mogilappagari }
350354976cf5SSudheer Mogilappagari
350454976cf5SSudheer Mogilappagari /**
35055ec8b7d1SJesse Brandeburg * __iavf_setup_tc - configure multiple traffic classes
35065ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
3507b50f7bcaSJesse Brandeburg * @type_data: tc offload data
35085ec8b7d1SJesse Brandeburg *
35095ec8b7d1SJesse Brandeburg * This function processes the config information provided by the
35105ec8b7d1SJesse Brandeburg * user to configure traffic classes/queue channels and packages the
35115ec8b7d1SJesse Brandeburg * information to request the PF to setup traffic classes.
35125ec8b7d1SJesse Brandeburg *
35135ec8b7d1SJesse Brandeburg * Returns 0 on success.
35145ec8b7d1SJesse Brandeburg **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)35155ec8b7d1SJesse Brandeburg static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
35165ec8b7d1SJesse Brandeburg {
35175ec8b7d1SJesse Brandeburg struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
35185ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
35195ec8b7d1SJesse Brandeburg struct virtchnl_vf_resource *vfres = adapter->vf_res;
35205ec8b7d1SJesse Brandeburg u8 num_tc = 0, total_qps = 0;
35215ec8b7d1SJesse Brandeburg int ret = 0, netdev_tc = 0;
35225ec8b7d1SJesse Brandeburg u64 max_tx_rate;
35235ec8b7d1SJesse Brandeburg u16 mode;
35245ec8b7d1SJesse Brandeburg int i;
35255ec8b7d1SJesse Brandeburg
35265ec8b7d1SJesse Brandeburg num_tc = mqprio_qopt->qopt.num_tc;
35275ec8b7d1SJesse Brandeburg mode = mqprio_qopt->mode;
35285ec8b7d1SJesse Brandeburg
35295ec8b7d1SJesse Brandeburg /* delete queue_channel */
35305ec8b7d1SJesse Brandeburg if (!mqprio_qopt->qopt.hw) {
35315ec8b7d1SJesse Brandeburg if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
35325ec8b7d1SJesse Brandeburg /* reset the tc configuration */
35335ec8b7d1SJesse Brandeburg netdev_reset_tc(netdev);
35345ec8b7d1SJesse Brandeburg adapter->num_tc = 0;
35355ec8b7d1SJesse Brandeburg netif_tx_stop_all_queues(netdev);
35365ec8b7d1SJesse Brandeburg netif_tx_disable(netdev);
35375ec8b7d1SJesse Brandeburg iavf_del_all_cloud_filters(adapter);
35385ec8b7d1SJesse Brandeburg adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
353993cb804eSPrzemyslaw Patynowski total_qps = adapter->orig_num_active_queues;
35405ec8b7d1SJesse Brandeburg goto exit;
35415ec8b7d1SJesse Brandeburg } else {
35425ec8b7d1SJesse Brandeburg return -EINVAL;
35435ec8b7d1SJesse Brandeburg }
35445ec8b7d1SJesse Brandeburg }
35455ec8b7d1SJesse Brandeburg
35465ec8b7d1SJesse Brandeburg /* add queue channel */
35475ec8b7d1SJesse Brandeburg if (mode == TC_MQPRIO_MODE_CHANNEL) {
35485ec8b7d1SJesse Brandeburg if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
35495ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "ADq not supported\n");
35505ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
35515ec8b7d1SJesse Brandeburg }
35525ec8b7d1SJesse Brandeburg if (adapter->ch_config.state != __IAVF_TC_INVALID) {
35535ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
35545ec8b7d1SJesse Brandeburg return -EINVAL;
35555ec8b7d1SJesse Brandeburg }
35565ec8b7d1SJesse Brandeburg
35575ec8b7d1SJesse Brandeburg ret = iavf_validate_ch_config(adapter, mqprio_qopt);
35585ec8b7d1SJesse Brandeburg if (ret)
35595ec8b7d1SJesse Brandeburg return ret;
35605ec8b7d1SJesse Brandeburg /* Return if same TC config is requested */
356154976cf5SSudheer Mogilappagari if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
35625ec8b7d1SJesse Brandeburg return 0;
35635ec8b7d1SJesse Brandeburg adapter->num_tc = num_tc;
35645ec8b7d1SJesse Brandeburg
35655ec8b7d1SJesse Brandeburg for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
35665ec8b7d1SJesse Brandeburg if (i < num_tc) {
35675ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].count =
35685ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.count[i];
35695ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].offset =
35705ec8b7d1SJesse Brandeburg mqprio_qopt->qopt.offset[i];
35715ec8b7d1SJesse Brandeburg total_qps += mqprio_qopt->qopt.count[i];
35725ec8b7d1SJesse Brandeburg max_tx_rate = mqprio_qopt->max_rate[i];
35735ec8b7d1SJesse Brandeburg /* convert to Mbps */
35745ec8b7d1SJesse Brandeburg max_tx_rate = div_u64(max_tx_rate,
35755ec8b7d1SJesse Brandeburg IAVF_MBPS_DIVISOR);
35765ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].max_tx_rate =
35775ec8b7d1SJesse Brandeburg max_tx_rate;
35785ec8b7d1SJesse Brandeburg } else {
35795ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].count = 1;
35805ec8b7d1SJesse Brandeburg adapter->ch_config.ch_info[i].offset = 0;
35815ec8b7d1SJesse Brandeburg }
35825ec8b7d1SJesse Brandeburg }
358393cb804eSPrzemyslaw Patynowski
358493cb804eSPrzemyslaw Patynowski /* Take snapshot of original config such as "num_active_queues"
358593cb804eSPrzemyslaw Patynowski * It is used later when delete ADQ flow is exercised, so that
358693cb804eSPrzemyslaw Patynowski * once delete ADQ flow completes, VF shall go back to its
358793cb804eSPrzemyslaw Patynowski * original queue configuration
358893cb804eSPrzemyslaw Patynowski */
358993cb804eSPrzemyslaw Patynowski
359093cb804eSPrzemyslaw Patynowski adapter->orig_num_active_queues = adapter->num_active_queues;
359193cb804eSPrzemyslaw Patynowski
359293cb804eSPrzemyslaw Patynowski /* Store queue info based on TC so that VF gets configured
359393cb804eSPrzemyslaw Patynowski * with correct number of queues when VF completes ADQ config
359493cb804eSPrzemyslaw Patynowski * flow
359593cb804eSPrzemyslaw Patynowski */
35965ec8b7d1SJesse Brandeburg adapter->ch_config.total_qps = total_qps;
359793cb804eSPrzemyslaw Patynowski
35985ec8b7d1SJesse Brandeburg netif_tx_stop_all_queues(netdev);
35995ec8b7d1SJesse Brandeburg netif_tx_disable(netdev);
36005ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
36015ec8b7d1SJesse Brandeburg netdev_reset_tc(netdev);
36025ec8b7d1SJesse Brandeburg /* Report the tc mapping up the stack */
36035ec8b7d1SJesse Brandeburg netdev_set_num_tc(adapter->netdev, num_tc);
36045ec8b7d1SJesse Brandeburg for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
36055ec8b7d1SJesse Brandeburg u16 qcount = mqprio_qopt->qopt.count[i];
36065ec8b7d1SJesse Brandeburg u16 qoffset = mqprio_qopt->qopt.offset[i];
36075ec8b7d1SJesse Brandeburg
36085ec8b7d1SJesse Brandeburg if (i < num_tc)
36095ec8b7d1SJesse Brandeburg netdev_set_tc_queue(netdev, netdev_tc++, qcount,
36105ec8b7d1SJesse Brandeburg qoffset);
36115ec8b7d1SJesse Brandeburg }
36125ec8b7d1SJesse Brandeburg }
36135ec8b7d1SJesse Brandeburg exit:
361493cb804eSPrzemyslaw Patynowski if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
361593cb804eSPrzemyslaw Patynowski return 0;
361693cb804eSPrzemyslaw Patynowski
361793cb804eSPrzemyslaw Patynowski netif_set_real_num_rx_queues(netdev, total_qps);
361893cb804eSPrzemyslaw Patynowski netif_set_real_num_tx_queues(netdev, total_qps);
361993cb804eSPrzemyslaw Patynowski
36205ec8b7d1SJesse Brandeburg return ret;
36215ec8b7d1SJesse Brandeburg }
36225ec8b7d1SJesse Brandeburg
36235ec8b7d1SJesse Brandeburg /**
36245ec8b7d1SJesse Brandeburg * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
36255ec8b7d1SJesse Brandeburg * @adapter: board private structure
3626b50f7bcaSJesse Brandeburg * @f: pointer to struct flow_cls_offload
36275ec8b7d1SJesse Brandeburg * @filter: pointer to cloud filter structure
36285ec8b7d1SJesse Brandeburg */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)36295ec8b7d1SJesse Brandeburg static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3630f9e30088SPablo Neira Ayuso struct flow_cls_offload *f,
36315ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter)
36325ec8b7d1SJesse Brandeburg {
3633f9e30088SPablo Neira Ayuso struct flow_rule *rule = flow_cls_offload_flow_rule(f);
36348f256622SPablo Neira Ayuso struct flow_dissector *dissector = rule->match.dissector;
36355ec8b7d1SJesse Brandeburg u16 n_proto_mask = 0;
36365ec8b7d1SJesse Brandeburg u16 n_proto_key = 0;
36375ec8b7d1SJesse Brandeburg u8 field_flags = 0;
36385ec8b7d1SJesse Brandeburg u16 addr_type = 0;
36395ec8b7d1SJesse Brandeburg u16 n_proto = 0;
36405ec8b7d1SJesse Brandeburg int i = 0;
36415ec8b7d1SJesse Brandeburg struct virtchnl_filter *vf = &filter->f;
36425ec8b7d1SJesse Brandeburg
36438f256622SPablo Neira Ayuso if (dissector->used_keys &
36442b3082c6SRatheesh Kannoth ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
36452b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
36462b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
36472b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
36482b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
36492b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
36502b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
36512b3082c6SRatheesh Kannoth BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
36522b3082c6SRatheesh Kannoth dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n",
36538f256622SPablo Neira Ayuso dissector->used_keys);
36545ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
36555ec8b7d1SJesse Brandeburg }
36565ec8b7d1SJesse Brandeburg
36578f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
36588f256622SPablo Neira Ayuso struct flow_match_enc_keyid match;
36595ec8b7d1SJesse Brandeburg
36608f256622SPablo Neira Ayuso flow_rule_match_enc_keyid(rule, &match);
36618f256622SPablo Neira Ayuso if (match.mask->keyid != 0)
36625ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
36635ec8b7d1SJesse Brandeburg }
36645ec8b7d1SJesse Brandeburg
36658f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
36668f256622SPablo Neira Ayuso struct flow_match_basic match;
36675ec8b7d1SJesse Brandeburg
36688f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match);
36698f256622SPablo Neira Ayuso n_proto_key = ntohs(match.key->n_proto);
36708f256622SPablo Neira Ayuso n_proto_mask = ntohs(match.mask->n_proto);
36715ec8b7d1SJesse Brandeburg
36725ec8b7d1SJesse Brandeburg if (n_proto_key == ETH_P_ALL) {
36735ec8b7d1SJesse Brandeburg n_proto_key = 0;
36745ec8b7d1SJesse Brandeburg n_proto_mask = 0;
36755ec8b7d1SJesse Brandeburg }
36765ec8b7d1SJesse Brandeburg n_proto = n_proto_key & n_proto_mask;
36775ec8b7d1SJesse Brandeburg if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
36785ec8b7d1SJesse Brandeburg return -EINVAL;
36795ec8b7d1SJesse Brandeburg if (n_proto == ETH_P_IPV6) {
36805ec8b7d1SJesse Brandeburg /* specify flow type as TCP IPv6 */
36815ec8b7d1SJesse Brandeburg vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
36825ec8b7d1SJesse Brandeburg }
36835ec8b7d1SJesse Brandeburg
36848f256622SPablo Neira Ayuso if (match.key->ip_proto != IPPROTO_TCP) {
36855ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
36865ec8b7d1SJesse Brandeburg return -EINVAL;
36875ec8b7d1SJesse Brandeburg }
36885ec8b7d1SJesse Brandeburg }
36895ec8b7d1SJesse Brandeburg
36908f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
36918f256622SPablo Neira Ayuso struct flow_match_eth_addrs match;
36925ec8b7d1SJesse Brandeburg
36938f256622SPablo Neira Ayuso flow_rule_match_eth_addrs(rule, &match);
36948f256622SPablo Neira Ayuso
36955ec8b7d1SJesse Brandeburg /* use is_broadcast and is_zero to check for all 0xf or 0 */
36968f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.mask->dst)) {
36978f256622SPablo Neira Ayuso if (is_broadcast_ether_addr(match.mask->dst)) {
36985ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_OMAC;
36995ec8b7d1SJesse Brandeburg } else {
37005ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
37018f256622SPablo Neira Ayuso match.mask->dst);
37029f4651eaSJacob Keller return -EINVAL;
37035ec8b7d1SJesse Brandeburg }
37045ec8b7d1SJesse Brandeburg }
37055ec8b7d1SJesse Brandeburg
37068f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.mask->src)) {
37078f256622SPablo Neira Ayuso if (is_broadcast_ether_addr(match.mask->src)) {
37085ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IMAC;
37095ec8b7d1SJesse Brandeburg } else {
37105ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
37118f256622SPablo Neira Ayuso match.mask->src);
37129f4651eaSJacob Keller return -EINVAL;
37135ec8b7d1SJesse Brandeburg }
37145ec8b7d1SJesse Brandeburg }
37155ec8b7d1SJesse Brandeburg
37168f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.key->dst))
37178f256622SPablo Neira Ayuso if (is_valid_ether_addr(match.key->dst) ||
37188f256622SPablo Neira Ayuso is_multicast_ether_addr(match.key->dst)) {
37195ec8b7d1SJesse Brandeburg /* set the mask if a valid dst_mac address */
37205ec8b7d1SJesse Brandeburg for (i = 0; i < ETH_ALEN; i++)
37215ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_mac[i] |= 0xff;
37225ec8b7d1SJesse Brandeburg ether_addr_copy(vf->data.tcp_spec.dst_mac,
37238f256622SPablo Neira Ayuso match.key->dst);
37245ec8b7d1SJesse Brandeburg }
37255ec8b7d1SJesse Brandeburg
37268f256622SPablo Neira Ayuso if (!is_zero_ether_addr(match.key->src))
37278f256622SPablo Neira Ayuso if (is_valid_ether_addr(match.key->src) ||
37288f256622SPablo Neira Ayuso is_multicast_ether_addr(match.key->src)) {
37295ec8b7d1SJesse Brandeburg /* set the mask if a valid dst_mac address */
37305ec8b7d1SJesse Brandeburg for (i = 0; i < ETH_ALEN; i++)
37315ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_mac[i] |= 0xff;
37325ec8b7d1SJesse Brandeburg ether_addr_copy(vf->data.tcp_spec.src_mac,
37338f256622SPablo Neira Ayuso match.key->src);
37345ec8b7d1SJesse Brandeburg }
37355ec8b7d1SJesse Brandeburg }
37365ec8b7d1SJesse Brandeburg
37378f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
37388f256622SPablo Neira Ayuso struct flow_match_vlan match;
37395ec8b7d1SJesse Brandeburg
37408f256622SPablo Neira Ayuso flow_rule_match_vlan(rule, &match);
37418f256622SPablo Neira Ayuso if (match.mask->vlan_id) {
37428f256622SPablo Neira Ayuso if (match.mask->vlan_id == VLAN_VID_MASK) {
37435ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IVLAN;
37445ec8b7d1SJesse Brandeburg } else {
37455ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
37468f256622SPablo Neira Ayuso match.mask->vlan_id);
37479f4651eaSJacob Keller return -EINVAL;
37485ec8b7d1SJesse Brandeburg }
37495ec8b7d1SJesse Brandeburg }
37505ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
37518f256622SPablo Neira Ayuso vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
37525ec8b7d1SJesse Brandeburg }
37535ec8b7d1SJesse Brandeburg
37548f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
37558f256622SPablo Neira Ayuso struct flow_match_control match;
37565ec8b7d1SJesse Brandeburg
37578f256622SPablo Neira Ayuso flow_rule_match_control(rule, &match);
37588f256622SPablo Neira Ayuso addr_type = match.key->addr_type;
3759c7b9c494SAsbjørn Sloth Tønnesen
3760c7b9c494SAsbjørn Sloth Tønnesen if (flow_rule_has_control_flags(match.mask->flags,
3761c7b9c494SAsbjørn Sloth Tønnesen f->common.extack))
3762c7b9c494SAsbjørn Sloth Tønnesen return -EOPNOTSUPP;
37635ec8b7d1SJesse Brandeburg }
37645ec8b7d1SJesse Brandeburg
37655ec8b7d1SJesse Brandeburg if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
37668f256622SPablo Neira Ayuso struct flow_match_ipv4_addrs match;
37675ec8b7d1SJesse Brandeburg
37688f256622SPablo Neira Ayuso flow_rule_match_ipv4_addrs(rule, &match);
37698f256622SPablo Neira Ayuso if (match.mask->dst) {
37708f256622SPablo Neira Ayuso if (match.mask->dst == cpu_to_be32(0xffffffff)) {
37715ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
37725ec8b7d1SJesse Brandeburg } else {
37735ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
37748f256622SPablo Neira Ayuso be32_to_cpu(match.mask->dst));
37759f4651eaSJacob Keller return -EINVAL;
37765ec8b7d1SJesse Brandeburg }
37775ec8b7d1SJesse Brandeburg }
37785ec8b7d1SJesse Brandeburg
37798f256622SPablo Neira Ayuso if (match.mask->src) {
37808f256622SPablo Neira Ayuso if (match.mask->src == cpu_to_be32(0xffffffff)) {
37815ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
37825ec8b7d1SJesse Brandeburg } else {
37835ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
37846650c8e9SDaniil Tatianin be32_to_cpu(match.mask->src));
37859f4651eaSJacob Keller return -EINVAL;
37865ec8b7d1SJesse Brandeburg }
37875ec8b7d1SJesse Brandeburg }
37885ec8b7d1SJesse Brandeburg
37895ec8b7d1SJesse Brandeburg if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
37905ec8b7d1SJesse Brandeburg dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
37919f4651eaSJacob Keller return -EINVAL;
37925ec8b7d1SJesse Brandeburg }
37938f256622SPablo Neira Ayuso if (match.key->dst) {
37945ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
37958f256622SPablo Neira Ayuso vf->data.tcp_spec.dst_ip[0] = match.key->dst;
37965ec8b7d1SJesse Brandeburg }
37978f256622SPablo Neira Ayuso if (match.key->src) {
37985ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
37998f256622SPablo Neira Ayuso vf->data.tcp_spec.src_ip[0] = match.key->src;
38005ec8b7d1SJesse Brandeburg }
38015ec8b7d1SJesse Brandeburg }
38025ec8b7d1SJesse Brandeburg
38035ec8b7d1SJesse Brandeburg if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
38048f256622SPablo Neira Ayuso struct flow_match_ipv6_addrs match;
38058f256622SPablo Neira Ayuso
38068f256622SPablo Neira Ayuso flow_rule_match_ipv6_addrs(rule, &match);
38075ec8b7d1SJesse Brandeburg
38085ec8b7d1SJesse Brandeburg /* validate mask, make sure it is not IPV6_ADDR_ANY */
38098f256622SPablo Neira Ayuso if (ipv6_addr_any(&match.mask->dst)) {
38105ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
38115ec8b7d1SJesse Brandeburg IPV6_ADDR_ANY);
38129f4651eaSJacob Keller return -EINVAL;
38135ec8b7d1SJesse Brandeburg }
38145ec8b7d1SJesse Brandeburg
38155ec8b7d1SJesse Brandeburg /* src and dest IPv6 address should not be LOOPBACK
38165ec8b7d1SJesse Brandeburg * (0:0:0:0:0:0:0:1) which can be represented as ::1
38175ec8b7d1SJesse Brandeburg */
38188f256622SPablo Neira Ayuso if (ipv6_addr_loopback(&match.key->dst) ||
38198f256622SPablo Neira Ayuso ipv6_addr_loopback(&match.key->src)) {
38205ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
38215ec8b7d1SJesse Brandeburg "ipv6 addr should not be loopback\n");
38229f4651eaSJacob Keller return -EINVAL;
38235ec8b7d1SJesse Brandeburg }
38248f256622SPablo Neira Ayuso if (!ipv6_addr_any(&match.mask->dst) ||
38258f256622SPablo Neira Ayuso !ipv6_addr_any(&match.mask->src))
38265ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
38275ec8b7d1SJesse Brandeburg
38285ec8b7d1SJesse Brandeburg for (i = 0; i < 4; i++)
38295ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
38308f256622SPablo Neira Ayuso memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
38315ec8b7d1SJesse Brandeburg sizeof(vf->data.tcp_spec.dst_ip));
38325ec8b7d1SJesse Brandeburg for (i = 0; i < 4; i++)
38335ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
38348f256622SPablo Neira Ayuso memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
38355ec8b7d1SJesse Brandeburg sizeof(vf->data.tcp_spec.src_ip));
38365ec8b7d1SJesse Brandeburg }
38378f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
38388f256622SPablo Neira Ayuso struct flow_match_ports match;
38395ec8b7d1SJesse Brandeburg
38408f256622SPablo Neira Ayuso flow_rule_match_ports(rule, &match);
38418f256622SPablo Neira Ayuso if (match.mask->src) {
38428f256622SPablo Neira Ayuso if (match.mask->src == cpu_to_be16(0xffff)) {
38435ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
38445ec8b7d1SJesse Brandeburg } else {
38455ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
38468f256622SPablo Neira Ayuso be16_to_cpu(match.mask->src));
38479f4651eaSJacob Keller return -EINVAL;
38485ec8b7d1SJesse Brandeburg }
38495ec8b7d1SJesse Brandeburg }
38505ec8b7d1SJesse Brandeburg
38518f256622SPablo Neira Ayuso if (match.mask->dst) {
38528f256622SPablo Neira Ayuso if (match.mask->dst == cpu_to_be16(0xffff)) {
38535ec8b7d1SJesse Brandeburg field_flags |= IAVF_CLOUD_FIELD_IIP;
38545ec8b7d1SJesse Brandeburg } else {
38555ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
38568f256622SPablo Neira Ayuso be16_to_cpu(match.mask->dst));
38579f4651eaSJacob Keller return -EINVAL;
38585ec8b7d1SJesse Brandeburg }
38595ec8b7d1SJesse Brandeburg }
38608f256622SPablo Neira Ayuso if (match.key->dst) {
38615ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
38628f256622SPablo Neira Ayuso vf->data.tcp_spec.dst_port = match.key->dst;
38635ec8b7d1SJesse Brandeburg }
38645ec8b7d1SJesse Brandeburg
38658f256622SPablo Neira Ayuso if (match.key->src) {
38665ec8b7d1SJesse Brandeburg vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
38678f256622SPablo Neira Ayuso vf->data.tcp_spec.src_port = match.key->src;
38685ec8b7d1SJesse Brandeburg }
38695ec8b7d1SJesse Brandeburg }
38705ec8b7d1SJesse Brandeburg vf->field_flags = field_flags;
38715ec8b7d1SJesse Brandeburg
38725ec8b7d1SJesse Brandeburg return 0;
38735ec8b7d1SJesse Brandeburg }
38745ec8b7d1SJesse Brandeburg
38755ec8b7d1SJesse Brandeburg /**
38765ec8b7d1SJesse Brandeburg * iavf_handle_tclass - Forward to a traffic class on the device
38775ec8b7d1SJesse Brandeburg * @adapter: board private structure
38785ec8b7d1SJesse Brandeburg * @tc: traffic class index on the device
38795ec8b7d1SJesse Brandeburg * @filter: pointer to cloud filter structure
38805ec8b7d1SJesse Brandeburg */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)38815ec8b7d1SJesse Brandeburg static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
38825ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter)
38835ec8b7d1SJesse Brandeburg {
38845ec8b7d1SJesse Brandeburg if (tc == 0)
38855ec8b7d1SJesse Brandeburg return 0;
38865ec8b7d1SJesse Brandeburg if (tc < adapter->num_tc) {
38875ec8b7d1SJesse Brandeburg if (!filter->f.data.tcp_spec.dst_port) {
38885ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev,
38895ec8b7d1SJesse Brandeburg "Specify destination port to redirect to traffic class other than TC0\n");
38905ec8b7d1SJesse Brandeburg return -EINVAL;
38915ec8b7d1SJesse Brandeburg }
38925ec8b7d1SJesse Brandeburg }
38935ec8b7d1SJesse Brandeburg /* redirect to a traffic class on the same device */
38945ec8b7d1SJesse Brandeburg filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
38955ec8b7d1SJesse Brandeburg filter->f.action_meta = tc;
38965ec8b7d1SJesse Brandeburg return 0;
38975ec8b7d1SJesse Brandeburg }
38985ec8b7d1SJesse Brandeburg
38995ec8b7d1SJesse Brandeburg /**
390040e589baSAvinash Dayanand * iavf_find_cf - Find the cloud filter in the list
390140e589baSAvinash Dayanand * @adapter: Board private structure
390240e589baSAvinash Dayanand * @cookie: filter specific cookie
390340e589baSAvinash Dayanand *
390440e589baSAvinash Dayanand * Returns ptr to the filter object or NULL. Must be called while holding the
390540e589baSAvinash Dayanand * cloud_filter_list_lock.
390640e589baSAvinash Dayanand */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)390740e589baSAvinash Dayanand static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
390840e589baSAvinash Dayanand unsigned long *cookie)
390940e589baSAvinash Dayanand {
391040e589baSAvinash Dayanand struct iavf_cloud_filter *filter = NULL;
391140e589baSAvinash Dayanand
391240e589baSAvinash Dayanand if (!cookie)
391340e589baSAvinash Dayanand return NULL;
391440e589baSAvinash Dayanand
391540e589baSAvinash Dayanand list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
391640e589baSAvinash Dayanand if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
391740e589baSAvinash Dayanand return filter;
391840e589baSAvinash Dayanand }
391940e589baSAvinash Dayanand return NULL;
392040e589baSAvinash Dayanand }
392140e589baSAvinash Dayanand
392240e589baSAvinash Dayanand /**
39235ec8b7d1SJesse Brandeburg * iavf_configure_clsflower - Add tc flower filters
39245ec8b7d1SJesse Brandeburg * @adapter: board private structure
3925f9e30088SPablo Neira Ayuso * @cls_flower: Pointer to struct flow_cls_offload
39265ec8b7d1SJesse Brandeburg */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)39275ec8b7d1SJesse Brandeburg static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3928f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls_flower)
39295ec8b7d1SJesse Brandeburg {
39305ec8b7d1SJesse Brandeburg int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
39315ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter = NULL;
39325ec8b7d1SJesse Brandeburg int err = -EINVAL, count = 50;
39335ec8b7d1SJesse Brandeburg
39345ec8b7d1SJesse Brandeburg if (tc < 0) {
39355ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
39365ec8b7d1SJesse Brandeburg return -EINVAL;
39375ec8b7d1SJesse Brandeburg }
39385ec8b7d1SJesse Brandeburg
39395ec8b7d1SJesse Brandeburg filter = kzalloc(sizeof(*filter), GFP_KERNEL);
39405ec8b7d1SJesse Brandeburg if (!filter)
39415ec8b7d1SJesse Brandeburg return -ENOMEM;
39425ec8b7d1SJesse Brandeburg
39435ac49f3cSStefan Assmann while (!mutex_trylock(&adapter->crit_lock)) {
39442135a8d5SNicholas Nunley if (--count == 0) {
39452135a8d5SNicholas Nunley kfree(filter);
39462135a8d5SNicholas Nunley return err;
39472135a8d5SNicholas Nunley }
39485ec8b7d1SJesse Brandeburg udelay(1);
39495ec8b7d1SJesse Brandeburg }
39505ec8b7d1SJesse Brandeburg
39515ec8b7d1SJesse Brandeburg filter->cookie = cls_flower->cookie;
39525ec8b7d1SJesse Brandeburg
395340e589baSAvinash Dayanand /* bail out here if filter already exists */
395440e589baSAvinash Dayanand spin_lock_bh(&adapter->cloud_filter_list_lock);
395540e589baSAvinash Dayanand if (iavf_find_cf(adapter, &cls_flower->cookie)) {
395640e589baSAvinash Dayanand dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
395740e589baSAvinash Dayanand err = -EEXIST;
395840e589baSAvinash Dayanand goto spin_unlock;
395940e589baSAvinash Dayanand }
396040e589baSAvinash Dayanand spin_unlock_bh(&adapter->cloud_filter_list_lock);
396140e589baSAvinash Dayanand
39625ec8b7d1SJesse Brandeburg /* set the mask to all zeroes to begin with */
39635ec8b7d1SJesse Brandeburg memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
39645ec8b7d1SJesse Brandeburg /* start out with flow type and eth type IPv4 to begin with */
39655ec8b7d1SJesse Brandeburg filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
39665ec8b7d1SJesse Brandeburg err = iavf_parse_cls_flower(adapter, cls_flower, filter);
39674f040080SJacob Keller if (err)
39685ec8b7d1SJesse Brandeburg goto err;
39695ec8b7d1SJesse Brandeburg
39705ec8b7d1SJesse Brandeburg err = iavf_handle_tclass(adapter, tc, filter);
39714f040080SJacob Keller if (err)
39725ec8b7d1SJesse Brandeburg goto err;
39735ec8b7d1SJesse Brandeburg
39745ec8b7d1SJesse Brandeburg /* add filter to the list */
39755ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
39765ec8b7d1SJesse Brandeburg list_add_tail(&filter->list, &adapter->cloud_filter_list);
39775ec8b7d1SJesse Brandeburg adapter->num_cloud_filters++;
39785ec8b7d1SJesse Brandeburg filter->add = true;
39795ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
398040e589baSAvinash Dayanand spin_unlock:
39815ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
39825ec8b7d1SJesse Brandeburg err:
39835ec8b7d1SJesse Brandeburg if (err)
39845ec8b7d1SJesse Brandeburg kfree(filter);
39855ec8b7d1SJesse Brandeburg
39865ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
39875ec8b7d1SJesse Brandeburg return err;
39885ec8b7d1SJesse Brandeburg }
39895ec8b7d1SJesse Brandeburg
39905ec8b7d1SJesse Brandeburg /**
39915ec8b7d1SJesse Brandeburg * iavf_delete_clsflower - Remove tc flower filters
39925ec8b7d1SJesse Brandeburg * @adapter: board private structure
3993f9e30088SPablo Neira Ayuso * @cls_flower: Pointer to struct flow_cls_offload
39945ec8b7d1SJesse Brandeburg */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)39955ec8b7d1SJesse Brandeburg static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3996f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls_flower)
39975ec8b7d1SJesse Brandeburg {
39985ec8b7d1SJesse Brandeburg struct iavf_cloud_filter *filter = NULL;
39995ec8b7d1SJesse Brandeburg int err = 0;
40005ec8b7d1SJesse Brandeburg
40015ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
40025ec8b7d1SJesse Brandeburg filter = iavf_find_cf(adapter, &cls_flower->cookie);
40035ec8b7d1SJesse Brandeburg if (filter) {
40045ec8b7d1SJesse Brandeburg filter->del = true;
40055ec8b7d1SJesse Brandeburg adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
40065ec8b7d1SJesse Brandeburg } else {
40075ec8b7d1SJesse Brandeburg err = -EINVAL;
40085ec8b7d1SJesse Brandeburg }
40095ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
40105ec8b7d1SJesse Brandeburg
40115ec8b7d1SJesse Brandeburg return err;
40125ec8b7d1SJesse Brandeburg }
40135ec8b7d1SJesse Brandeburg
40145ec8b7d1SJesse Brandeburg /**
40155ec8b7d1SJesse Brandeburg * iavf_setup_tc_cls_flower - flower classifier offloads
4016*623122acSAhmed Zaki * @adapter: pointer to iavf adapter structure
4017b50f7bcaSJesse Brandeburg * @cls_flower: pointer to flow_cls_offload struct with flow info
40185ec8b7d1SJesse Brandeburg */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)40195ec8b7d1SJesse Brandeburg static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4020f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls_flower)
40215ec8b7d1SJesse Brandeburg {
40225ec8b7d1SJesse Brandeburg switch (cls_flower->command) {
4023f9e30088SPablo Neira Ayuso case FLOW_CLS_REPLACE:
40245ec8b7d1SJesse Brandeburg return iavf_configure_clsflower(adapter, cls_flower);
4025f9e30088SPablo Neira Ayuso case FLOW_CLS_DESTROY:
40265ec8b7d1SJesse Brandeburg return iavf_delete_clsflower(adapter, cls_flower);
4027f9e30088SPablo Neira Ayuso case FLOW_CLS_STATS:
40285ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
40295ec8b7d1SJesse Brandeburg default:
40305ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
40315ec8b7d1SJesse Brandeburg }
40325ec8b7d1SJesse Brandeburg }
40335ec8b7d1SJesse Brandeburg
40345ec8b7d1SJesse Brandeburg /**
4035*623122acSAhmed Zaki * iavf_add_cls_u32 - Add U32 classifier offloads
4036*623122acSAhmed Zaki * @adapter: pointer to iavf adapter structure
4037*623122acSAhmed Zaki * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4038*623122acSAhmed Zaki *
4039*623122acSAhmed Zaki * Return: 0 on success or negative errno on failure.
4040*623122acSAhmed Zaki */
iavf_add_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4041*623122acSAhmed Zaki static int iavf_add_cls_u32(struct iavf_adapter *adapter,
4042*623122acSAhmed Zaki struct tc_cls_u32_offload *cls_u32)
4043*623122acSAhmed Zaki {
4044*623122acSAhmed Zaki struct netlink_ext_ack *extack = cls_u32->common.extack;
4045*623122acSAhmed Zaki struct virtchnl_fdir_rule *rule_cfg;
4046*623122acSAhmed Zaki struct virtchnl_filter_action *vact;
4047*623122acSAhmed Zaki struct virtchnl_proto_hdrs *hdrs;
4048*623122acSAhmed Zaki struct ethhdr *spec_h, *mask_h;
4049*623122acSAhmed Zaki const struct tc_action *act;
4050*623122acSAhmed Zaki struct iavf_fdir_fltr *fltr;
4051*623122acSAhmed Zaki struct tcf_exts *exts;
4052*623122acSAhmed Zaki unsigned int q_index;
4053*623122acSAhmed Zaki int i, status = 0;
4054*623122acSAhmed Zaki int off_base = 0;
4055*623122acSAhmed Zaki
4056*623122acSAhmed Zaki if (cls_u32->knode.link_handle) {
4057*623122acSAhmed Zaki NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
4058*623122acSAhmed Zaki return -EOPNOTSUPP;
4059*623122acSAhmed Zaki }
4060*623122acSAhmed Zaki
4061*623122acSAhmed Zaki fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
4062*623122acSAhmed Zaki if (!fltr)
4063*623122acSAhmed Zaki return -ENOMEM;
4064*623122acSAhmed Zaki
4065*623122acSAhmed Zaki rule_cfg = &fltr->vc_add_msg.rule_cfg;
4066*623122acSAhmed Zaki hdrs = &rule_cfg->proto_hdrs;
4067*623122acSAhmed Zaki hdrs->count = 0;
4068*623122acSAhmed Zaki
4069*623122acSAhmed Zaki /* The parser lib at the PF expects the packet starting with MAC hdr */
4070*623122acSAhmed Zaki switch (ntohs(cls_u32->common.protocol)) {
4071*623122acSAhmed Zaki case ETH_P_802_3:
4072*623122acSAhmed Zaki break;
4073*623122acSAhmed Zaki case ETH_P_IP:
4074*623122acSAhmed Zaki spec_h = (struct ethhdr *)hdrs->raw.spec;
4075*623122acSAhmed Zaki mask_h = (struct ethhdr *)hdrs->raw.mask;
4076*623122acSAhmed Zaki spec_h->h_proto = htons(ETH_P_IP);
4077*623122acSAhmed Zaki mask_h->h_proto = htons(0xFFFF);
4078*623122acSAhmed Zaki off_base += ETH_HLEN;
4079*623122acSAhmed Zaki break;
4080*623122acSAhmed Zaki default:
4081*623122acSAhmed Zaki NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
4082*623122acSAhmed Zaki status = -EOPNOTSUPP;
4083*623122acSAhmed Zaki goto free_alloc;
4084*623122acSAhmed Zaki }
4085*623122acSAhmed Zaki
4086*623122acSAhmed Zaki for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
4087*623122acSAhmed Zaki __be32 val, mask;
4088*623122acSAhmed Zaki int off;
4089*623122acSAhmed Zaki
4090*623122acSAhmed Zaki off = off_base + cls_u32->knode.sel->keys[i].off;
4091*623122acSAhmed Zaki val = cls_u32->knode.sel->keys[i].val;
4092*623122acSAhmed Zaki mask = cls_u32->knode.sel->keys[i].mask;
4093*623122acSAhmed Zaki
4094*623122acSAhmed Zaki if (off >= sizeof(hdrs->raw.spec)) {
4095*623122acSAhmed Zaki NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
4096*623122acSAhmed Zaki status = -EINVAL;
4097*623122acSAhmed Zaki goto free_alloc;
4098*623122acSAhmed Zaki }
4099*623122acSAhmed Zaki
4100*623122acSAhmed Zaki memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
4101*623122acSAhmed Zaki memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
4102*623122acSAhmed Zaki hdrs->raw.pkt_len = off + sizeof(val);
4103*623122acSAhmed Zaki }
4104*623122acSAhmed Zaki
4105*623122acSAhmed Zaki /* Only one action is allowed */
4106*623122acSAhmed Zaki rule_cfg->action_set.count = 1;
4107*623122acSAhmed Zaki vact = &rule_cfg->action_set.actions[0];
4108*623122acSAhmed Zaki exts = cls_u32->knode.exts;
4109*623122acSAhmed Zaki
4110*623122acSAhmed Zaki tcf_exts_for_each_action(i, act, exts) {
4111*623122acSAhmed Zaki /* FDIR queue */
4112*623122acSAhmed Zaki if (is_tcf_skbedit_rx_queue_mapping(act)) {
4113*623122acSAhmed Zaki q_index = tcf_skbedit_rx_queue_mapping(act);
4114*623122acSAhmed Zaki if (q_index >= adapter->num_active_queues) {
4115*623122acSAhmed Zaki status = -EINVAL;
4116*623122acSAhmed Zaki goto free_alloc;
4117*623122acSAhmed Zaki }
4118*623122acSAhmed Zaki
4119*623122acSAhmed Zaki vact->type = VIRTCHNL_ACTION_QUEUE;
4120*623122acSAhmed Zaki vact->act_conf.queue.index = q_index;
4121*623122acSAhmed Zaki break;
4122*623122acSAhmed Zaki }
4123*623122acSAhmed Zaki
4124*623122acSAhmed Zaki /* Drop */
4125*623122acSAhmed Zaki if (is_tcf_gact_shot(act)) {
4126*623122acSAhmed Zaki vact->type = VIRTCHNL_ACTION_DROP;
4127*623122acSAhmed Zaki break;
4128*623122acSAhmed Zaki }
4129*623122acSAhmed Zaki
4130*623122acSAhmed Zaki /* Unsupported action */
4131*623122acSAhmed Zaki NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
4132*623122acSAhmed Zaki status = -EOPNOTSUPP;
4133*623122acSAhmed Zaki goto free_alloc;
4134*623122acSAhmed Zaki }
4135*623122acSAhmed Zaki
4136*623122acSAhmed Zaki fltr->vc_add_msg.vsi_id = adapter->vsi.id;
4137*623122acSAhmed Zaki fltr->cls_u32_handle = cls_u32->knode.handle;
4138*623122acSAhmed Zaki return iavf_fdir_add_fltr(adapter, fltr);
4139*623122acSAhmed Zaki
4140*623122acSAhmed Zaki free_alloc:
4141*623122acSAhmed Zaki kfree(fltr);
4142*623122acSAhmed Zaki return status;
4143*623122acSAhmed Zaki }
4144*623122acSAhmed Zaki
4145*623122acSAhmed Zaki /**
4146*623122acSAhmed Zaki * iavf_del_cls_u32 - Delete U32 classifier offloads
4147*623122acSAhmed Zaki * @adapter: pointer to iavf adapter structure
4148*623122acSAhmed Zaki * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4149*623122acSAhmed Zaki *
4150*623122acSAhmed Zaki * Return: 0 on success or negative errno on failure.
4151*623122acSAhmed Zaki */
iavf_del_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4152*623122acSAhmed Zaki static int iavf_del_cls_u32(struct iavf_adapter *adapter,
4153*623122acSAhmed Zaki struct tc_cls_u32_offload *cls_u32)
4154*623122acSAhmed Zaki {
4155*623122acSAhmed Zaki return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
4156*623122acSAhmed Zaki }
4157*623122acSAhmed Zaki
4158*623122acSAhmed Zaki /**
4159*623122acSAhmed Zaki * iavf_setup_tc_cls_u32 - U32 filter offloads
4160*623122acSAhmed Zaki * @adapter: pointer to iavf adapter structure
4161*623122acSAhmed Zaki * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4162*623122acSAhmed Zaki *
4163*623122acSAhmed Zaki * Return: 0 on success or negative errno on failure.
4164*623122acSAhmed Zaki */
iavf_setup_tc_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4165*623122acSAhmed Zaki static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
4166*623122acSAhmed Zaki struct tc_cls_u32_offload *cls_u32)
4167*623122acSAhmed Zaki {
4168*623122acSAhmed Zaki if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
4169*623122acSAhmed Zaki return -EOPNOTSUPP;
4170*623122acSAhmed Zaki
4171*623122acSAhmed Zaki switch (cls_u32->command) {
4172*623122acSAhmed Zaki case TC_CLSU32_NEW_KNODE:
4173*623122acSAhmed Zaki case TC_CLSU32_REPLACE_KNODE:
4174*623122acSAhmed Zaki return iavf_add_cls_u32(adapter, cls_u32);
4175*623122acSAhmed Zaki case TC_CLSU32_DELETE_KNODE:
4176*623122acSAhmed Zaki return iavf_del_cls_u32(adapter, cls_u32);
4177*623122acSAhmed Zaki default:
4178*623122acSAhmed Zaki return -EOPNOTSUPP;
4179*623122acSAhmed Zaki }
4180*623122acSAhmed Zaki }
4181*623122acSAhmed Zaki
4182*623122acSAhmed Zaki /**
41835ec8b7d1SJesse Brandeburg * iavf_setup_tc_block_cb - block callback for tc
41845ec8b7d1SJesse Brandeburg * @type: type of offload
41855ec8b7d1SJesse Brandeburg * @type_data: offload data
41865ec8b7d1SJesse Brandeburg * @cb_priv:
41875ec8b7d1SJesse Brandeburg *
41885ec8b7d1SJesse Brandeburg * This function is the block callback for traffic classes
41895ec8b7d1SJesse Brandeburg **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)41905ec8b7d1SJesse Brandeburg static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
41915ec8b7d1SJesse Brandeburg void *cb_priv)
41925ec8b7d1SJesse Brandeburg {
4193bb0858d8SJiri Pirko struct iavf_adapter *adapter = cb_priv;
4194bb0858d8SJiri Pirko
4195bb0858d8SJiri Pirko if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4196bb0858d8SJiri Pirko return -EOPNOTSUPP;
4197bb0858d8SJiri Pirko
41985ec8b7d1SJesse Brandeburg switch (type) {
41995ec8b7d1SJesse Brandeburg case TC_SETUP_CLSFLOWER:
42005ec8b7d1SJesse Brandeburg return iavf_setup_tc_cls_flower(cb_priv, type_data);
4201*623122acSAhmed Zaki case TC_SETUP_CLSU32:
4202*623122acSAhmed Zaki return iavf_setup_tc_cls_u32(cb_priv, type_data);
42035ec8b7d1SJesse Brandeburg default:
42045ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
42055ec8b7d1SJesse Brandeburg }
42065ec8b7d1SJesse Brandeburg }
42075ec8b7d1SJesse Brandeburg
4208955bcb6eSPablo Neira Ayuso static LIST_HEAD(iavf_block_cb_list);
4209955bcb6eSPablo Neira Ayuso
42105ec8b7d1SJesse Brandeburg /**
42115ec8b7d1SJesse Brandeburg * iavf_setup_tc - configure multiple traffic classes
42125ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
42135ec8b7d1SJesse Brandeburg * @type: type of offload
4214b50f7bcaSJesse Brandeburg * @type_data: tc offload data
42155ec8b7d1SJesse Brandeburg *
42165ec8b7d1SJesse Brandeburg * This function is the callback to ndo_setup_tc in the
42175ec8b7d1SJesse Brandeburg * netdev_ops.
42185ec8b7d1SJesse Brandeburg *
42195ec8b7d1SJesse Brandeburg * Returns 0 on success
42205ec8b7d1SJesse Brandeburg **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)42215ec8b7d1SJesse Brandeburg static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
42225ec8b7d1SJesse Brandeburg void *type_data)
42235ec8b7d1SJesse Brandeburg {
42244e95bc26SPablo Neira Ayuso struct iavf_adapter *adapter = netdev_priv(netdev);
42254e95bc26SPablo Neira Ayuso
42265ec8b7d1SJesse Brandeburg switch (type) {
42275ec8b7d1SJesse Brandeburg case TC_SETUP_QDISC_MQPRIO:
42285ec8b7d1SJesse Brandeburg return __iavf_setup_tc(netdev, type_data);
42295ec8b7d1SJesse Brandeburg case TC_SETUP_BLOCK:
4230955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data,
4231955bcb6eSPablo Neira Ayuso &iavf_block_cb_list,
42324e95bc26SPablo Neira Ayuso iavf_setup_tc_block_cb,
42334e95bc26SPablo Neira Ayuso adapter, adapter, true);
42345ec8b7d1SJesse Brandeburg default:
42355ec8b7d1SJesse Brandeburg return -EOPNOTSUPP;
42365ec8b7d1SJesse Brandeburg }
42375ec8b7d1SJesse Brandeburg }
42385ec8b7d1SJesse Brandeburg
42395ec8b7d1SJesse Brandeburg /**
42403a0b5a29SPiotr Gardocki * iavf_restore_fdir_filters
42413a0b5a29SPiotr Gardocki * @adapter: board private structure
42423a0b5a29SPiotr Gardocki *
42433a0b5a29SPiotr Gardocki * Restore existing FDIR filters when VF netdev comes back up.
42443a0b5a29SPiotr Gardocki **/
iavf_restore_fdir_filters(struct iavf_adapter * adapter)42453a0b5a29SPiotr Gardocki static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
42463a0b5a29SPiotr Gardocki {
42473a0b5a29SPiotr Gardocki struct iavf_fdir_fltr *f;
42483a0b5a29SPiotr Gardocki
42493a0b5a29SPiotr Gardocki spin_lock_bh(&adapter->fdir_fltr_lock);
42503a0b5a29SPiotr Gardocki list_for_each_entry(f, &adapter->fdir_list_head, list) {
42513a0b5a29SPiotr Gardocki if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
42523a0b5a29SPiotr Gardocki /* Cancel a request, keep filter as active */
42533a0b5a29SPiotr Gardocki f->state = IAVF_FDIR_FLTR_ACTIVE;
42543a0b5a29SPiotr Gardocki } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
42553a0b5a29SPiotr Gardocki f->state == IAVF_FDIR_FLTR_INACTIVE) {
42563a0b5a29SPiotr Gardocki /* Add filters which are inactive or have a pending
42573a0b5a29SPiotr Gardocki * request to PF to be deleted
42583a0b5a29SPiotr Gardocki */
42593a0b5a29SPiotr Gardocki f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
42603a0b5a29SPiotr Gardocki adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
42613a0b5a29SPiotr Gardocki }
42623a0b5a29SPiotr Gardocki }
42633a0b5a29SPiotr Gardocki spin_unlock_bh(&adapter->fdir_fltr_lock);
42643a0b5a29SPiotr Gardocki }
42653a0b5a29SPiotr Gardocki
42663a0b5a29SPiotr Gardocki /**
42675ec8b7d1SJesse Brandeburg * iavf_open - Called when a network interface is made active
42685ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
42695ec8b7d1SJesse Brandeburg *
42705ec8b7d1SJesse Brandeburg * Returns 0 on success, negative value on failure
42715ec8b7d1SJesse Brandeburg *
42725ec8b7d1SJesse Brandeburg * The open entry point is called when a network interface is made
42735ec8b7d1SJesse Brandeburg * active by the system (IFF_UP). At this point all resources needed
42745ec8b7d1SJesse Brandeburg * for transmit and receive operations are allocated, the interrupt
4275fdd4044fSJakub Pawlak * handler is registered with the OS, the watchdog is started,
42765ec8b7d1SJesse Brandeburg * and the stack is notified that the interface is ready.
42775ec8b7d1SJesse Brandeburg **/
iavf_open(struct net_device * netdev)42785ec8b7d1SJesse Brandeburg static int iavf_open(struct net_device *netdev)
42795ec8b7d1SJesse Brandeburg {
42805ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
42815ec8b7d1SJesse Brandeburg int err;
42825ec8b7d1SJesse Brandeburg
42835ec8b7d1SJesse Brandeburg if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
42845ec8b7d1SJesse Brandeburg dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
42855ec8b7d1SJesse Brandeburg return -EIO;
42865ec8b7d1SJesse Brandeburg }
42875ec8b7d1SJesse Brandeburg
4288cbe9e511SIvan Vecera while (!mutex_trylock(&adapter->crit_lock)) {
4289cbe9e511SIvan Vecera /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4290cbe9e511SIvan Vecera * is already taken and iavf_open is called from an upper
4291cbe9e511SIvan Vecera * device's notifier reacting on NETDEV_REGISTER event.
4292cbe9e511SIvan Vecera * We have to leave here to avoid dead lock.
4293cbe9e511SIvan Vecera */
4294cbe9e511SIvan Vecera if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4295cbe9e511SIvan Vecera return -EBUSY;
4296cbe9e511SIvan Vecera
42975ec8b7d1SJesse Brandeburg usleep_range(500, 1000);
4298cbe9e511SIvan Vecera }
42995ec8b7d1SJesse Brandeburg
43005ec8b7d1SJesse Brandeburg if (adapter->state != __IAVF_DOWN) {
43015ec8b7d1SJesse Brandeburg err = -EBUSY;
43025ec8b7d1SJesse Brandeburg goto err_unlock;
43035ec8b7d1SJesse Brandeburg }
43045ec8b7d1SJesse Brandeburg
4305605ca7c5SPrzemyslaw Patynowski if (adapter->state == __IAVF_RUNNING &&
4306605ca7c5SPrzemyslaw Patynowski !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4307605ca7c5SPrzemyslaw Patynowski dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4308605ca7c5SPrzemyslaw Patynowski err = 0;
4309605ca7c5SPrzemyslaw Patynowski goto err_unlock;
4310605ca7c5SPrzemyslaw Patynowski }
4311605ca7c5SPrzemyslaw Patynowski
43125ec8b7d1SJesse Brandeburg /* allocate transmit descriptors */
43135ec8b7d1SJesse Brandeburg err = iavf_setup_all_tx_resources(adapter);
43145ec8b7d1SJesse Brandeburg if (err)
43155ec8b7d1SJesse Brandeburg goto err_setup_tx;
43165ec8b7d1SJesse Brandeburg
43175ec8b7d1SJesse Brandeburg /* allocate receive descriptors */
43185ec8b7d1SJesse Brandeburg err = iavf_setup_all_rx_resources(adapter);
43195ec8b7d1SJesse Brandeburg if (err)
43205ec8b7d1SJesse Brandeburg goto err_setup_rx;
43215ec8b7d1SJesse Brandeburg
43225ec8b7d1SJesse Brandeburg /* clear any pending interrupts, may auto mask */
43235ec8b7d1SJesse Brandeburg err = iavf_request_traffic_irqs(adapter, netdev->name);
43245ec8b7d1SJesse Brandeburg if (err)
43255ec8b7d1SJesse Brandeburg goto err_req_irq;
43265ec8b7d1SJesse Brandeburg
43275ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
43285ec8b7d1SJesse Brandeburg
43295ec8b7d1SJesse Brandeburg iavf_add_filter(adapter, adapter->hw.mac.addr);
43305ec8b7d1SJesse Brandeburg
43315ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
43325ec8b7d1SJesse Brandeburg
43333a0b5a29SPiotr Gardocki /* Restore filters that were removed with IFF_DOWN */
433442930142SAkeem G Abodunrin iavf_restore_filters(adapter);
43353a0b5a29SPiotr Gardocki iavf_restore_fdir_filters(adapter);
433642930142SAkeem G Abodunrin
43375ec8b7d1SJesse Brandeburg iavf_configure(adapter);
43385ec8b7d1SJesse Brandeburg
43395ec8b7d1SJesse Brandeburg iavf_up_complete(adapter);
43405ec8b7d1SJesse Brandeburg
43415ec8b7d1SJesse Brandeburg iavf_irq_enable(adapter, true);
43425ec8b7d1SJesse Brandeburg
43435ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
43445ec8b7d1SJesse Brandeburg
43455ec8b7d1SJesse Brandeburg return 0;
43465ec8b7d1SJesse Brandeburg
43475ec8b7d1SJesse Brandeburg err_req_irq:
43485ec8b7d1SJesse Brandeburg iavf_down(adapter);
43495ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
43505ec8b7d1SJesse Brandeburg err_setup_rx:
43515ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
43525ec8b7d1SJesse Brandeburg err_setup_tx:
43535ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
43545ec8b7d1SJesse Brandeburg err_unlock:
43555ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
43565ec8b7d1SJesse Brandeburg
43575ec8b7d1SJesse Brandeburg return err;
43585ec8b7d1SJesse Brandeburg }
43595ec8b7d1SJesse Brandeburg
43605ec8b7d1SJesse Brandeburg /**
43615ec8b7d1SJesse Brandeburg * iavf_close - Disables a network interface
43625ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
43635ec8b7d1SJesse Brandeburg *
43645ec8b7d1SJesse Brandeburg * Returns 0, this is not allowed to fail
43655ec8b7d1SJesse Brandeburg *
43665ec8b7d1SJesse Brandeburg * The close entry point is called when an interface is de-activated
43675ec8b7d1SJesse Brandeburg * by the OS. The hardware is still under the drivers control, but
43685ec8b7d1SJesse Brandeburg * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
43695ec8b7d1SJesse Brandeburg * are freed, along with all transmit and receive resources.
43705ec8b7d1SJesse Brandeburg **/
iavf_close(struct net_device * netdev)43715ec8b7d1SJesse Brandeburg static int iavf_close(struct net_device *netdev)
43725ec8b7d1SJesse Brandeburg {
43735ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
437411c12adcSMichal Jaron u64 aq_to_restore;
43755ec8b7d1SJesse Brandeburg int status;
43765ec8b7d1SJesse Brandeburg
4377fc2e6b3bSSlawomir Laba mutex_lock(&adapter->crit_lock);
43785ec8b7d1SJesse Brandeburg
4379fc2e6b3bSSlawomir Laba if (adapter->state <= __IAVF_DOWN_PENDING) {
4380fc2e6b3bSSlawomir Laba mutex_unlock(&adapter->crit_lock);
4381fc2e6b3bSSlawomir Laba return 0;
4382fc2e6b3bSSlawomir Laba }
43835ec8b7d1SJesse Brandeburg
438456184e01SJesse Brandeburg set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
438511c12adcSMichal Jaron /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
438611c12adcSMichal Jaron * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
438711c12adcSMichal Jaron * deadlock with adminq_task() until iavf_close timeouts. We must send
438811c12adcSMichal Jaron * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
438911c12adcSMichal Jaron * disable queues possible for vf. Give only necessary flags to
439011c12adcSMichal Jaron * iavf_down and save other to set them right before iavf_close()
439111c12adcSMichal Jaron * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
439211c12adcSMichal Jaron * iavf will be in DOWN state.
439311c12adcSMichal Jaron */
439411c12adcSMichal Jaron aq_to_restore = adapter->aq_required;
439511c12adcSMichal Jaron adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
439611c12adcSMichal Jaron
439711c12adcSMichal Jaron /* Remove flags which we do not want to send after close or we want to
439811c12adcSMichal Jaron * send before disable queues.
439911c12adcSMichal Jaron */
440011c12adcSMichal Jaron aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
440111c12adcSMichal Jaron IAVF_FLAG_AQ_ENABLE_QUEUES |
440211c12adcSMichal Jaron IAVF_FLAG_AQ_CONFIGURE_QUEUES |
440311c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_VLAN_FILTER |
440411c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_MAC_FILTER |
440511c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
440611c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_FDIR_FILTER |
440711c12adcSMichal Jaron IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
44085ec8b7d1SJesse Brandeburg
44095ec8b7d1SJesse Brandeburg iavf_down(adapter);
441045eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_DOWN_PENDING);
44115ec8b7d1SJesse Brandeburg iavf_free_traffic_irqs(adapter);
44125ec8b7d1SJesse Brandeburg
44135ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
44145ec8b7d1SJesse Brandeburg
44155ec8b7d1SJesse Brandeburg /* We explicitly don't free resources here because the hardware is
44165ec8b7d1SJesse Brandeburg * still active and can DMA into memory. Resources are cleared in
44175ec8b7d1SJesse Brandeburg * iavf_virtchnl_completion() after we get confirmation from the PF
44185ec8b7d1SJesse Brandeburg * driver that the rings have been stopped.
44195ec8b7d1SJesse Brandeburg *
44205ec8b7d1SJesse Brandeburg * Also, we wait for state to transition to __IAVF_DOWN before
44215ec8b7d1SJesse Brandeburg * returning. State change occurs in iavf_virtchnl_completion() after
44225ec8b7d1SJesse Brandeburg * VF resources are released (which occurs after PF driver processes and
44235ec8b7d1SJesse Brandeburg * responds to admin queue commands).
44245ec8b7d1SJesse Brandeburg */
44255ec8b7d1SJesse Brandeburg
44265ec8b7d1SJesse Brandeburg status = wait_event_timeout(adapter->down_waitqueue,
44275ec8b7d1SJesse Brandeburg adapter->state == __IAVF_DOWN,
442888ec7308SMitch Williams msecs_to_jiffies(500));
44295ec8b7d1SJesse Brandeburg if (!status)
44305ec8b7d1SJesse Brandeburg netdev_warn(netdev, "Device resources not yet released\n");
443111c12adcSMichal Jaron
443211c12adcSMichal Jaron mutex_lock(&adapter->crit_lock);
443311c12adcSMichal Jaron adapter->aq_required |= aq_to_restore;
443411c12adcSMichal Jaron mutex_unlock(&adapter->crit_lock);
44355ec8b7d1SJesse Brandeburg return 0;
44365ec8b7d1SJesse Brandeburg }
44375ec8b7d1SJesse Brandeburg
44385ec8b7d1SJesse Brandeburg /**
44395ec8b7d1SJesse Brandeburg * iavf_change_mtu - Change the Maximum Transfer Unit
44405ec8b7d1SJesse Brandeburg * @netdev: network interface device structure
44415ec8b7d1SJesse Brandeburg * @new_mtu: new value for maximum frame size
44425ec8b7d1SJesse Brandeburg *
44435ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
44445ec8b7d1SJesse Brandeburg **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)44455ec8b7d1SJesse Brandeburg static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
44465ec8b7d1SJesse Brandeburg {
44475ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
4448c2ed2403SMarcin Szycik int ret = 0;
44495ec8b7d1SJesse Brandeburg
4450aeb5d11fSPatryk Małek netdev_dbg(netdev, "changing MTU from %d to %d\n",
4451aeb5d11fSPatryk Małek netdev->mtu, new_mtu);
44521eb2cdedSEric Dumazet WRITE_ONCE(netdev->mtu, new_mtu);
4453d2c0f45fSSlawomir Laba
4454d2c0f45fSSlawomir Laba if (netif_running(netdev)) {
4455c34743daSAhmed Zaki iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
4456c2ed2403SMarcin Szycik ret = iavf_wait_for_reset(adapter);
4457c2ed2403SMarcin Szycik if (ret < 0)
4458c2ed2403SMarcin Szycik netdev_warn(netdev, "MTU change interrupted waiting for reset");
4459c2ed2403SMarcin Szycik else if (ret)
4460c2ed2403SMarcin Szycik netdev_warn(netdev, "MTU change timed out waiting for reset");
4461d2c0f45fSSlawomir Laba }
44625ec8b7d1SJesse Brandeburg
4463c2ed2403SMarcin Szycik return ret;
44645ec8b7d1SJesse Brandeburg }
44655ec8b7d1SJesse Brandeburg
446609d23b89SPiotr Gardocki /**
446709d23b89SPiotr Gardocki * iavf_disable_fdir - disable Flow Director and clear existing filters
446809d23b89SPiotr Gardocki * @adapter: board private structure
446909d23b89SPiotr Gardocki **/
iavf_disable_fdir(struct iavf_adapter * adapter)447009d23b89SPiotr Gardocki static void iavf_disable_fdir(struct iavf_adapter *adapter)
447109d23b89SPiotr Gardocki {
447209d23b89SPiotr Gardocki struct iavf_fdir_fltr *fdir, *fdirtmp;
447309d23b89SPiotr Gardocki bool del_filters = false;
447409d23b89SPiotr Gardocki
447509d23b89SPiotr Gardocki adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
447609d23b89SPiotr Gardocki
447709d23b89SPiotr Gardocki /* remove all Flow Director filters */
447809d23b89SPiotr Gardocki spin_lock_bh(&adapter->fdir_fltr_lock);
447909d23b89SPiotr Gardocki list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
448009d23b89SPiotr Gardocki list) {
448109d23b89SPiotr Gardocki if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
448209d23b89SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
448309d23b89SPiotr Gardocki /* Delete filters not registered in PF */
448409d23b89SPiotr Gardocki list_del(&fdir->list);
4485*623122acSAhmed Zaki iavf_dec_fdir_active_fltr(adapter, fdir);
448609d23b89SPiotr Gardocki kfree(fdir);
448709d23b89SPiotr Gardocki } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
448809d23b89SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
448909d23b89SPiotr Gardocki fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
449009d23b89SPiotr Gardocki /* Filters registered in PF, schedule their deletion */
449109d23b89SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
449209d23b89SPiotr Gardocki del_filters = true;
449309d23b89SPiotr Gardocki } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
449409d23b89SPiotr Gardocki /* Request to delete filter already sent to PF, change
449509d23b89SPiotr Gardocki * state to DEL_PENDING to delete filter after PF's
449609d23b89SPiotr Gardocki * response, not set as INACTIVE
449709d23b89SPiotr Gardocki */
449809d23b89SPiotr Gardocki fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
449909d23b89SPiotr Gardocki }
450009d23b89SPiotr Gardocki }
450109d23b89SPiotr Gardocki spin_unlock_bh(&adapter->fdir_fltr_lock);
450209d23b89SPiotr Gardocki
450309d23b89SPiotr Gardocki if (del_filters) {
450409d23b89SPiotr Gardocki adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
450509d23b89SPiotr Gardocki mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
450609d23b89SPiotr Gardocki }
450709d23b89SPiotr Gardocki }
450809d23b89SPiotr Gardocki
45098afadd1cSBrett Creeley #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
45108afadd1cSBrett Creeley NETIF_F_HW_VLAN_CTAG_TX | \
45118afadd1cSBrett Creeley NETIF_F_HW_VLAN_STAG_RX | \
45128afadd1cSBrett Creeley NETIF_F_HW_VLAN_STAG_TX)
45138afadd1cSBrett Creeley
45145ec8b7d1SJesse Brandeburg /**
451556184e01SJesse Brandeburg * iavf_set_features - set the netdev feature flags
45165ec8b7d1SJesse Brandeburg * @netdev: ptr to the netdev being adjusted
45175ec8b7d1SJesse Brandeburg * @features: the feature set that the stack is suggesting
45185ec8b7d1SJesse Brandeburg * Note: expects to be called while under rtnl_lock()
45195ec8b7d1SJesse Brandeburg **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)45205ec8b7d1SJesse Brandeburg static int iavf_set_features(struct net_device *netdev,
45215ec8b7d1SJesse Brandeburg netdev_features_t features)
45225ec8b7d1SJesse Brandeburg {
45235ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
45245ec8b7d1SJesse Brandeburg
45258afadd1cSBrett Creeley /* trigger update on any VLAN feature change */
45268afadd1cSBrett Creeley if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
45278afadd1cSBrett Creeley (features & NETIF_VLAN_OFFLOAD_FEATURES))
45288afadd1cSBrett Creeley iavf_set_vlan_offload_features(adapter, netdev->features,
45298afadd1cSBrett Creeley features);
45307559d672SNorbert Zulinski if (CRC_OFFLOAD_ALLOWED(adapter) &&
45317559d672SNorbert Zulinski ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
45327559d672SNorbert Zulinski iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
45335ec8b7d1SJesse Brandeburg
453409d23b89SPiotr Gardocki if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
453509d23b89SPiotr Gardocki if (features & NETIF_F_NTUPLE)
453609d23b89SPiotr Gardocki adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
453709d23b89SPiotr Gardocki else
453809d23b89SPiotr Gardocki iavf_disable_fdir(adapter);
453909d23b89SPiotr Gardocki }
454009d23b89SPiotr Gardocki
45415ec8b7d1SJesse Brandeburg return 0;
45425ec8b7d1SJesse Brandeburg }
45435ec8b7d1SJesse Brandeburg
45445ec8b7d1SJesse Brandeburg /**
45455ec8b7d1SJesse Brandeburg * iavf_features_check - Validate encapsulated packet conforms to limits
45465ec8b7d1SJesse Brandeburg * @skb: skb buff
45475ec8b7d1SJesse Brandeburg * @dev: This physical port's netdev
45485ec8b7d1SJesse Brandeburg * @features: Offload features that the stack believes apply
45495ec8b7d1SJesse Brandeburg **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)45505ec8b7d1SJesse Brandeburg static netdev_features_t iavf_features_check(struct sk_buff *skb,
45515ec8b7d1SJesse Brandeburg struct net_device *dev,
45525ec8b7d1SJesse Brandeburg netdev_features_t features)
45535ec8b7d1SJesse Brandeburg {
45545ec8b7d1SJesse Brandeburg size_t len;
45555ec8b7d1SJesse Brandeburg
45565ec8b7d1SJesse Brandeburg /* No point in doing any of this if neither checksum nor GSO are
45575ec8b7d1SJesse Brandeburg * being requested for this frame. We can rule out both by just
45585ec8b7d1SJesse Brandeburg * checking for CHECKSUM_PARTIAL
45595ec8b7d1SJesse Brandeburg */
45605ec8b7d1SJesse Brandeburg if (skb->ip_summed != CHECKSUM_PARTIAL)
45615ec8b7d1SJesse Brandeburg return features;
45625ec8b7d1SJesse Brandeburg
45635ec8b7d1SJesse Brandeburg /* We cannot support GSO if the MSS is going to be less than
45645ec8b7d1SJesse Brandeburg * 64 bytes. If it is then we need to drop support for GSO.
45655ec8b7d1SJesse Brandeburg */
45665ec8b7d1SJesse Brandeburg if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
45675ec8b7d1SJesse Brandeburg features &= ~NETIF_F_GSO_MASK;
45685ec8b7d1SJesse Brandeburg
45695ec8b7d1SJesse Brandeburg /* MACLEN can support at most 63 words */
457080bfab79SEric Dumazet len = skb_network_offset(skb);
45715ec8b7d1SJesse Brandeburg if (len & ~(63 * 2))
45725ec8b7d1SJesse Brandeburg goto out_err;
45735ec8b7d1SJesse Brandeburg
45745ec8b7d1SJesse Brandeburg /* IPLEN and EIPLEN can support at most 127 dwords */
4575cc15bd10SEric Dumazet len = skb_network_header_len(skb);
45765ec8b7d1SJesse Brandeburg if (len & ~(127 * 4))
45775ec8b7d1SJesse Brandeburg goto out_err;
45785ec8b7d1SJesse Brandeburg
45795ec8b7d1SJesse Brandeburg if (skb->encapsulation) {
45805ec8b7d1SJesse Brandeburg /* L4TUNLEN can support 127 words */
45815ec8b7d1SJesse Brandeburg len = skb_inner_network_header(skb) - skb_transport_header(skb);
45825ec8b7d1SJesse Brandeburg if (len & ~(127 * 2))
45835ec8b7d1SJesse Brandeburg goto out_err;
45845ec8b7d1SJesse Brandeburg
45855ec8b7d1SJesse Brandeburg /* IPLEN can support at most 127 dwords */
45865ec8b7d1SJesse Brandeburg len = skb_inner_transport_header(skb) -
45875ec8b7d1SJesse Brandeburg skb_inner_network_header(skb);
45885ec8b7d1SJesse Brandeburg if (len & ~(127 * 4))
45895ec8b7d1SJesse Brandeburg goto out_err;
45905ec8b7d1SJesse Brandeburg }
45915ec8b7d1SJesse Brandeburg
45925ec8b7d1SJesse Brandeburg /* No need to validate L4LEN as TCP is the only protocol with a
4593afdc8a54SJilin Yuan * flexible value and we support all possible values supported
45945ec8b7d1SJesse Brandeburg * by TCP, which is at most 15 dwords
45955ec8b7d1SJesse Brandeburg */
45965ec8b7d1SJesse Brandeburg
45975ec8b7d1SJesse Brandeburg return features;
45985ec8b7d1SJesse Brandeburg out_err:
45995ec8b7d1SJesse Brandeburg return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
46005ec8b7d1SJesse Brandeburg }
46015ec8b7d1SJesse Brandeburg
46025ec8b7d1SJesse Brandeburg /**
460348ccc43eSBrett Creeley * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
460448ccc43eSBrett Creeley * @adapter: board private structure
460548ccc43eSBrett Creeley *
460648ccc43eSBrett Creeley * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
460748ccc43eSBrett Creeley * were negotiated determine the VLAN features that can be toggled on and off.
460848ccc43eSBrett Creeley **/
460948ccc43eSBrett Creeley static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)461048ccc43eSBrett Creeley iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
461148ccc43eSBrett Creeley {
461248ccc43eSBrett Creeley netdev_features_t hw_features = 0;
461348ccc43eSBrett Creeley
461448ccc43eSBrett Creeley if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
461548ccc43eSBrett Creeley return hw_features;
461648ccc43eSBrett Creeley
461748ccc43eSBrett Creeley /* Enable VLAN features if supported */
461848ccc43eSBrett Creeley if (VLAN_ALLOWED(adapter)) {
461948ccc43eSBrett Creeley hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
462048ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_RX);
462148ccc43eSBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
462248ccc43eSBrett Creeley struct virtchnl_vlan_caps *vlan_v2_caps =
462348ccc43eSBrett Creeley &adapter->vlan_v2_caps;
462448ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *stripping_support =
462548ccc43eSBrett Creeley &vlan_v2_caps->offloads.stripping_support;
462648ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *insertion_support =
462748ccc43eSBrett Creeley &vlan_v2_caps->offloads.insertion_support;
462848ccc43eSBrett Creeley
462948ccc43eSBrett Creeley if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
463048ccc43eSBrett Creeley stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
463148ccc43eSBrett Creeley if (stripping_support->outer &
463248ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
463348ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
463448ccc43eSBrett Creeley if (stripping_support->outer &
463548ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8)
463648ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_STAG_RX;
463748ccc43eSBrett Creeley } else if (stripping_support->inner !=
463848ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED &&
463948ccc43eSBrett Creeley stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
464048ccc43eSBrett Creeley if (stripping_support->inner &
464148ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
464248ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
464348ccc43eSBrett Creeley }
464448ccc43eSBrett Creeley
464548ccc43eSBrett Creeley if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
464648ccc43eSBrett Creeley insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
464748ccc43eSBrett Creeley if (insertion_support->outer &
464848ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
464948ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
465048ccc43eSBrett Creeley if (insertion_support->outer &
465148ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8)
465248ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_STAG_TX;
465348ccc43eSBrett Creeley } else if (insertion_support->inner &&
465448ccc43eSBrett Creeley insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
465548ccc43eSBrett Creeley if (insertion_support->inner &
465648ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100)
465748ccc43eSBrett Creeley hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
465848ccc43eSBrett Creeley }
465948ccc43eSBrett Creeley }
466048ccc43eSBrett Creeley
46617559d672SNorbert Zulinski if (CRC_OFFLOAD_ALLOWED(adapter))
46627559d672SNorbert Zulinski hw_features |= NETIF_F_RXFCS;
46637559d672SNorbert Zulinski
466448ccc43eSBrett Creeley return hw_features;
466548ccc43eSBrett Creeley }
466648ccc43eSBrett Creeley
466748ccc43eSBrett Creeley /**
466848ccc43eSBrett Creeley * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
466948ccc43eSBrett Creeley * @adapter: board private structure
467048ccc43eSBrett Creeley *
467148ccc43eSBrett Creeley * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
467248ccc43eSBrett Creeley * were negotiated determine the VLAN features that are enabled by default.
467348ccc43eSBrett Creeley **/
467448ccc43eSBrett Creeley static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)467548ccc43eSBrett Creeley iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
467648ccc43eSBrett Creeley {
467748ccc43eSBrett Creeley netdev_features_t features = 0;
467848ccc43eSBrett Creeley
467948ccc43eSBrett Creeley if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
468048ccc43eSBrett Creeley return features;
468148ccc43eSBrett Creeley
468248ccc43eSBrett Creeley if (VLAN_ALLOWED(adapter)) {
468348ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_FILTER |
468448ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
468548ccc43eSBrett Creeley } else if (VLAN_V2_ALLOWED(adapter)) {
468648ccc43eSBrett Creeley struct virtchnl_vlan_caps *vlan_v2_caps =
468748ccc43eSBrett Creeley &adapter->vlan_v2_caps;
468848ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *filtering_support =
468948ccc43eSBrett Creeley &vlan_v2_caps->filtering.filtering_support;
469048ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *stripping_support =
469148ccc43eSBrett Creeley &vlan_v2_caps->offloads.stripping_support;
469248ccc43eSBrett Creeley struct virtchnl_vlan_supported_caps *insertion_support =
469348ccc43eSBrett Creeley &vlan_v2_caps->offloads.insertion_support;
469448ccc43eSBrett Creeley u32 ethertype_init;
469548ccc43eSBrett Creeley
469648ccc43eSBrett Creeley /* give priority to outer stripping and don't support both outer
469748ccc43eSBrett Creeley * and inner stripping
469848ccc43eSBrett Creeley */
469948ccc43eSBrett Creeley ethertype_init = vlan_v2_caps->offloads.ethertype_init;
470048ccc43eSBrett Creeley if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
470148ccc43eSBrett Creeley if (stripping_support->outer &
470248ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
470348ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
470448ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_RX;
470548ccc43eSBrett Creeley else if (stripping_support->outer &
470648ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
470748ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
470848ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_RX;
470948ccc43eSBrett Creeley } else if (stripping_support->inner !=
471048ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED) {
471148ccc43eSBrett Creeley if (stripping_support->inner &
471248ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
471348ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
471448ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_RX;
471548ccc43eSBrett Creeley }
471648ccc43eSBrett Creeley
471748ccc43eSBrett Creeley /* give priority to outer insertion and don't support both outer
471848ccc43eSBrett Creeley * and inner insertion
471948ccc43eSBrett Creeley */
472048ccc43eSBrett Creeley if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
472148ccc43eSBrett Creeley if (insertion_support->outer &
472248ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
472348ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
472448ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_TX;
472548ccc43eSBrett Creeley else if (insertion_support->outer &
472648ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
472748ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
472848ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_TX;
472948ccc43eSBrett Creeley } else if (insertion_support->inner !=
473048ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED) {
473148ccc43eSBrett Creeley if (insertion_support->inner &
473248ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
473348ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
473448ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_TX;
473548ccc43eSBrett Creeley }
473648ccc43eSBrett Creeley
473748ccc43eSBrett Creeley /* give priority to outer filtering and don't bother if both
473848ccc43eSBrett Creeley * outer and inner filtering are enabled
473948ccc43eSBrett Creeley */
474048ccc43eSBrett Creeley ethertype_init = vlan_v2_caps->filtering.ethertype_init;
474148ccc43eSBrett Creeley if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
474248ccc43eSBrett Creeley if (filtering_support->outer &
474348ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
474448ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
474548ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_FILTER;
474648ccc43eSBrett Creeley if (filtering_support->outer &
474748ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
474848ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
474948ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_FILTER;
475048ccc43eSBrett Creeley } else if (filtering_support->inner !=
475148ccc43eSBrett Creeley VIRTCHNL_VLAN_UNSUPPORTED) {
475248ccc43eSBrett Creeley if (filtering_support->inner &
475348ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_8100 &&
475448ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
475548ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_CTAG_FILTER;
475648ccc43eSBrett Creeley if (filtering_support->inner &
475748ccc43eSBrett Creeley VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
475848ccc43eSBrett Creeley ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
475948ccc43eSBrett Creeley features |= NETIF_F_HW_VLAN_STAG_FILTER;
476048ccc43eSBrett Creeley }
476148ccc43eSBrett Creeley }
476248ccc43eSBrett Creeley
476348ccc43eSBrett Creeley return features;
476448ccc43eSBrett Creeley }
476548ccc43eSBrett Creeley
476648ccc43eSBrett Creeley #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
476748ccc43eSBrett Creeley (!(((requested) & (feature_bit)) && \
476848ccc43eSBrett Creeley !((allowed) & (feature_bit))))
476948ccc43eSBrett Creeley
477048ccc43eSBrett Creeley /**
477148ccc43eSBrett Creeley * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
477248ccc43eSBrett Creeley * @adapter: board private structure
477348ccc43eSBrett Creeley * @requested_features: stack requested NETDEV features
477448ccc43eSBrett Creeley **/
477548ccc43eSBrett Creeley static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)477648ccc43eSBrett Creeley iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
477748ccc43eSBrett Creeley netdev_features_t requested_features)
477848ccc43eSBrett Creeley {
477948ccc43eSBrett Creeley netdev_features_t allowed_features;
478048ccc43eSBrett Creeley
478148ccc43eSBrett Creeley allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
478248ccc43eSBrett Creeley iavf_get_netdev_vlan_features(adapter);
478348ccc43eSBrett Creeley
478448ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
478548ccc43eSBrett Creeley allowed_features,
478648ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_TX))
478748ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
478848ccc43eSBrett Creeley
478948ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
479048ccc43eSBrett Creeley allowed_features,
479148ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_RX))
479248ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
479348ccc43eSBrett Creeley
479448ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
479548ccc43eSBrett Creeley allowed_features,
479648ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_TX))
479748ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
479848ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
479948ccc43eSBrett Creeley allowed_features,
480048ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_RX))
480148ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
480248ccc43eSBrett Creeley
480348ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
480448ccc43eSBrett Creeley allowed_features,
480548ccc43eSBrett Creeley NETIF_F_HW_VLAN_CTAG_FILTER))
480648ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
480748ccc43eSBrett Creeley
480848ccc43eSBrett Creeley if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
480948ccc43eSBrett Creeley allowed_features,
481048ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_FILTER))
481148ccc43eSBrett Creeley requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
481248ccc43eSBrett Creeley
481348ccc43eSBrett Creeley if ((requested_features &
481448ccc43eSBrett Creeley (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
481548ccc43eSBrett Creeley (requested_features &
481648ccc43eSBrett Creeley (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
481748ccc43eSBrett Creeley adapter->vlan_v2_caps.offloads.ethertype_match ==
481848ccc43eSBrett Creeley VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
481948ccc43eSBrett Creeley netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
482048ccc43eSBrett Creeley requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
482148ccc43eSBrett Creeley NETIF_F_HW_VLAN_STAG_TX);
482248ccc43eSBrett Creeley }
482348ccc43eSBrett Creeley
482448ccc43eSBrett Creeley return requested_features;
482548ccc43eSBrett Creeley }
482648ccc43eSBrett Creeley
482748ccc43eSBrett Creeley /**
48287559d672SNorbert Zulinski * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features
48297559d672SNorbert Zulinski * @adapter: board private structure
48307559d672SNorbert Zulinski * @requested_features: stack requested NETDEV features
48317559d672SNorbert Zulinski *
48327559d672SNorbert Zulinski * Returns fixed-up features bits
48337559d672SNorbert Zulinski **/
48347559d672SNorbert Zulinski static netdev_features_t
iavf_fix_strip_features(struct iavf_adapter * adapter,netdev_features_t requested_features)48357559d672SNorbert Zulinski iavf_fix_strip_features(struct iavf_adapter *adapter,
48367559d672SNorbert Zulinski netdev_features_t requested_features)
48377559d672SNorbert Zulinski {
48387559d672SNorbert Zulinski struct net_device *netdev = adapter->netdev;
48397559d672SNorbert Zulinski bool crc_offload_req, is_vlan_strip;
48407559d672SNorbert Zulinski netdev_features_t vlan_strip;
48417559d672SNorbert Zulinski int num_non_zero_vlan;
48427559d672SNorbert Zulinski
48437559d672SNorbert Zulinski crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) &&
48447559d672SNorbert Zulinski (requested_features & NETIF_F_RXFCS);
48457559d672SNorbert Zulinski num_non_zero_vlan = iavf_get_num_vlans_added(adapter);
48467559d672SNorbert Zulinski vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX);
48477559d672SNorbert Zulinski is_vlan_strip = requested_features & vlan_strip;
48487559d672SNorbert Zulinski
48497559d672SNorbert Zulinski if (!crc_offload_req)
48507559d672SNorbert Zulinski return requested_features;
48517559d672SNorbert Zulinski
48527559d672SNorbert Zulinski if (!num_non_zero_vlan && (netdev->features & vlan_strip) &&
48537559d672SNorbert Zulinski !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
48547559d672SNorbert Zulinski requested_features &= ~vlan_strip;
48557559d672SNorbert Zulinski netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
48567559d672SNorbert Zulinski return requested_features;
48577559d672SNorbert Zulinski }
48587559d672SNorbert Zulinski
48597559d672SNorbert Zulinski if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
48607559d672SNorbert Zulinski requested_features &= ~vlan_strip;
48617559d672SNorbert Zulinski if (!(netdev->features & vlan_strip))
48627559d672SNorbert Zulinski netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping");
48637559d672SNorbert Zulinski
48647559d672SNorbert Zulinski return requested_features;
48657559d672SNorbert Zulinski }
48667559d672SNorbert Zulinski
48677559d672SNorbert Zulinski if (num_non_zero_vlan && is_vlan_strip &&
48687559d672SNorbert Zulinski !(netdev->features & NETIF_F_RXFCS)) {
48697559d672SNorbert Zulinski requested_features &= ~NETIF_F_RXFCS;
48707559d672SNorbert Zulinski netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping");
48717559d672SNorbert Zulinski }
48727559d672SNorbert Zulinski
48737559d672SNorbert Zulinski return requested_features;
48747559d672SNorbert Zulinski }
48757559d672SNorbert Zulinski
48767559d672SNorbert Zulinski /**
48775ec8b7d1SJesse Brandeburg * iavf_fix_features - fix up the netdev feature bits
48785ec8b7d1SJesse Brandeburg * @netdev: our net device
48795ec8b7d1SJesse Brandeburg * @features: desired feature bits
48805ec8b7d1SJesse Brandeburg *
48815ec8b7d1SJesse Brandeburg * Returns fixed-up features bits
48825ec8b7d1SJesse Brandeburg **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)48835ec8b7d1SJesse Brandeburg static netdev_features_t iavf_fix_features(struct net_device *netdev,
48845ec8b7d1SJesse Brandeburg netdev_features_t features)
48855ec8b7d1SJesse Brandeburg {
48865ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
48875ec8b7d1SJesse Brandeburg
48887559d672SNorbert Zulinski features = iavf_fix_netdev_vlan_features(adapter, features);
48897559d672SNorbert Zulinski
489009d23b89SPiotr Gardocki if (!FDIR_FLTR_SUPPORT(adapter))
489109d23b89SPiotr Gardocki features &= ~NETIF_F_NTUPLE;
489209d23b89SPiotr Gardocki
48937559d672SNorbert Zulinski return iavf_fix_strip_features(adapter, features);
48945ec8b7d1SJesse Brandeburg }
48955ec8b7d1SJesse Brandeburg
48965ec8b7d1SJesse Brandeburg static const struct net_device_ops iavf_netdev_ops = {
48975ec8b7d1SJesse Brandeburg .ndo_open = iavf_open,
48985ec8b7d1SJesse Brandeburg .ndo_stop = iavf_close,
48995ec8b7d1SJesse Brandeburg .ndo_start_xmit = iavf_xmit_frame,
49005ec8b7d1SJesse Brandeburg .ndo_set_rx_mode = iavf_set_rx_mode,
49015ec8b7d1SJesse Brandeburg .ndo_validate_addr = eth_validate_addr,
49025ec8b7d1SJesse Brandeburg .ndo_set_mac_address = iavf_set_mac,
49035ec8b7d1SJesse Brandeburg .ndo_change_mtu = iavf_change_mtu,
49045ec8b7d1SJesse Brandeburg .ndo_tx_timeout = iavf_tx_timeout,
49055ec8b7d1SJesse Brandeburg .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
49065ec8b7d1SJesse Brandeburg .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
49075ec8b7d1SJesse Brandeburg .ndo_features_check = iavf_features_check,
49085ec8b7d1SJesse Brandeburg .ndo_fix_features = iavf_fix_features,
49095ec8b7d1SJesse Brandeburg .ndo_set_features = iavf_set_features,
49105ec8b7d1SJesse Brandeburg .ndo_setup_tc = iavf_setup_tc,
49115ec8b7d1SJesse Brandeburg };
49125ec8b7d1SJesse Brandeburg
49135ec8b7d1SJesse Brandeburg /**
49145ec8b7d1SJesse Brandeburg * iavf_check_reset_complete - check that VF reset is complete
49155ec8b7d1SJesse Brandeburg * @hw: pointer to hw struct
49165ec8b7d1SJesse Brandeburg *
49175ec8b7d1SJesse Brandeburg * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
49185ec8b7d1SJesse Brandeburg **/
iavf_check_reset_complete(struct iavf_hw * hw)4919f349daa5SJesse Brandeburg static int iavf_check_reset_complete(struct iavf_hw *hw)
49205ec8b7d1SJesse Brandeburg {
49215ec8b7d1SJesse Brandeburg u32 rstat;
49225ec8b7d1SJesse Brandeburg int i;
49235ec8b7d1SJesse Brandeburg
49248e3e4b9dSPaul Greenwalt for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4925f1cad2ceSJesse Brandeburg rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4926f1cad2ceSJesse Brandeburg IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
49275ec8b7d1SJesse Brandeburg if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
49285ec8b7d1SJesse Brandeburg (rstat == VIRTCHNL_VFR_COMPLETED))
49295ec8b7d1SJesse Brandeburg return 0;
493054584b17SMichal Schmidt msleep(IAVF_RESET_WAIT_MS);
49315ec8b7d1SJesse Brandeburg }
49325ec8b7d1SJesse Brandeburg return -EBUSY;
49335ec8b7d1SJesse Brandeburg }
49345ec8b7d1SJesse Brandeburg
49355ec8b7d1SJesse Brandeburg /**
49365ec8b7d1SJesse Brandeburg * iavf_process_config - Process the config information we got from the PF
49375ec8b7d1SJesse Brandeburg * @adapter: board private structure
49385ec8b7d1SJesse Brandeburg *
49395ec8b7d1SJesse Brandeburg * Verify that we have a valid config struct, and set up our netdev features
49405ec8b7d1SJesse Brandeburg * and our VSI struct.
49415ec8b7d1SJesse Brandeburg **/
iavf_process_config(struct iavf_adapter * adapter)49425ec8b7d1SJesse Brandeburg int iavf_process_config(struct iavf_adapter *adapter)
49435ec8b7d1SJesse Brandeburg {
49445ec8b7d1SJesse Brandeburg struct virtchnl_vf_resource *vfres = adapter->vf_res;
494548ccc43eSBrett Creeley netdev_features_t hw_vlan_features, vlan_features;
49465ec8b7d1SJesse Brandeburg struct net_device *netdev = adapter->netdev;
49475ec8b7d1SJesse Brandeburg netdev_features_t hw_enc_features;
49485ec8b7d1SJesse Brandeburg netdev_features_t hw_features;
49495ec8b7d1SJesse Brandeburg
49505ec8b7d1SJesse Brandeburg hw_enc_features = NETIF_F_SG |
49515ec8b7d1SJesse Brandeburg NETIF_F_IP_CSUM |
49525ec8b7d1SJesse Brandeburg NETIF_F_IPV6_CSUM |
49535ec8b7d1SJesse Brandeburg NETIF_F_HIGHDMA |
49545ec8b7d1SJesse Brandeburg NETIF_F_SOFT_FEATURES |
49555ec8b7d1SJesse Brandeburg NETIF_F_TSO |
49565ec8b7d1SJesse Brandeburg NETIF_F_TSO_ECN |
49575ec8b7d1SJesse Brandeburg NETIF_F_TSO6 |
49585ec8b7d1SJesse Brandeburg NETIF_F_SCTP_CRC |
49595ec8b7d1SJesse Brandeburg NETIF_F_RXHASH |
49605ec8b7d1SJesse Brandeburg NETIF_F_RXCSUM |
49615ec8b7d1SJesse Brandeburg 0;
49625ec8b7d1SJesse Brandeburg
49635ec8b7d1SJesse Brandeburg /* advertise to stack only if offloads for encapsulated packets is
49645ec8b7d1SJesse Brandeburg * supported
49655ec8b7d1SJesse Brandeburg */
49665ec8b7d1SJesse Brandeburg if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
49675ec8b7d1SJesse Brandeburg hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
49685ec8b7d1SJesse Brandeburg NETIF_F_GSO_GRE |
49695ec8b7d1SJesse Brandeburg NETIF_F_GSO_GRE_CSUM |
49705ec8b7d1SJesse Brandeburg NETIF_F_GSO_IPXIP4 |
49715ec8b7d1SJesse Brandeburg NETIF_F_GSO_IPXIP6 |
49725ec8b7d1SJesse Brandeburg NETIF_F_GSO_UDP_TUNNEL_CSUM |
49735ec8b7d1SJesse Brandeburg NETIF_F_GSO_PARTIAL |
49745ec8b7d1SJesse Brandeburg 0;
49755ec8b7d1SJesse Brandeburg
49765ec8b7d1SJesse Brandeburg if (!(vfres->vf_cap_flags &
49775ec8b7d1SJesse Brandeburg VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
49785ec8b7d1SJesse Brandeburg netdev->gso_partial_features |=
49795ec8b7d1SJesse Brandeburg NETIF_F_GSO_UDP_TUNNEL_CSUM;
49805ec8b7d1SJesse Brandeburg
49815ec8b7d1SJesse Brandeburg netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
49825ec8b7d1SJesse Brandeburg netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
49835ec8b7d1SJesse Brandeburg netdev->hw_enc_features |= hw_enc_features;
49845ec8b7d1SJesse Brandeburg }
49855ec8b7d1SJesse Brandeburg /* record features VLANs can make use of */
49865ec8b7d1SJesse Brandeburg netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
49875ec8b7d1SJesse Brandeburg
49885ec8b7d1SJesse Brandeburg /* Write features and hw_features separately to avoid polluting
49895ec8b7d1SJesse Brandeburg * with, or dropping, features that are set when we registered.
49905ec8b7d1SJesse Brandeburg */
49915ec8b7d1SJesse Brandeburg hw_features = hw_enc_features;
49925ec8b7d1SJesse Brandeburg
499348ccc43eSBrett Creeley /* get HW VLAN features that can be toggled */
499448ccc43eSBrett Creeley hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
499548ccc43eSBrett Creeley
4996*623122acSAhmed Zaki /* Enable HW TC offload if ADQ or tc U32 is supported */
4997*623122acSAhmed Zaki if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
4998*623122acSAhmed Zaki TC_U32_SUPPORT(adapter))
49995ec8b7d1SJesse Brandeburg hw_features |= NETIF_F_HW_TC;
5000*623122acSAhmed Zaki
5001c91a4f9fSBrett Creeley if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
5002c91a4f9fSBrett Creeley hw_features |= NETIF_F_GSO_UDP_L4;
50035ec8b7d1SJesse Brandeburg
500448ccc43eSBrett Creeley netdev->hw_features |= hw_features | hw_vlan_features;
500548ccc43eSBrett Creeley vlan_features = iavf_get_netdev_vlan_features(adapter);
50065ec8b7d1SJesse Brandeburg
500748ccc43eSBrett Creeley netdev->features |= hw_features | vlan_features;
50085ec8b7d1SJesse Brandeburg
50095ec8b7d1SJesse Brandeburg if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
50105ec8b7d1SJesse Brandeburg netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
50115ec8b7d1SJesse Brandeburg
501209d23b89SPiotr Gardocki if (FDIR_FLTR_SUPPORT(adapter)) {
501309d23b89SPiotr Gardocki netdev->hw_features |= NETIF_F_NTUPLE;
501409d23b89SPiotr Gardocki netdev->features |= NETIF_F_NTUPLE;
501509d23b89SPiotr Gardocki adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
501609d23b89SPiotr Gardocki }
501709d23b89SPiotr Gardocki
50185ec8b7d1SJesse Brandeburg netdev->priv_flags |= IFF_UNICAST_FLT;
50195ec8b7d1SJesse Brandeburg
50205ec8b7d1SJesse Brandeburg /* Do not turn on offloads when they are requested to be turned off.
50215ec8b7d1SJesse Brandeburg * TSO needs minimum 576 bytes to work correctly.
50225ec8b7d1SJesse Brandeburg */
50235ec8b7d1SJesse Brandeburg if (netdev->wanted_features) {
50245ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_TSO) ||
50255ec8b7d1SJesse Brandeburg netdev->mtu < 576)
50265ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_TSO;
50275ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_TSO6) ||
50285ec8b7d1SJesse Brandeburg netdev->mtu < 576)
50295ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_TSO6;
50305ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
50315ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_TSO_ECN;
50325ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_GRO))
50335ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_GRO;
50345ec8b7d1SJesse Brandeburg if (!(netdev->wanted_features & NETIF_F_GSO))
50355ec8b7d1SJesse Brandeburg netdev->features &= ~NETIF_F_GSO;
50365ec8b7d1SJesse Brandeburg }
50375ec8b7d1SJesse Brandeburg
50385ec8b7d1SJesse Brandeburg return 0;
50395ec8b7d1SJesse Brandeburg }
50405ec8b7d1SJesse Brandeburg
50415ec8b7d1SJesse Brandeburg /**
50425ec8b7d1SJesse Brandeburg * iavf_probe - Device Initialization Routine
50435ec8b7d1SJesse Brandeburg * @pdev: PCI device information struct
50445ec8b7d1SJesse Brandeburg * @ent: entry in iavf_pci_tbl
50455ec8b7d1SJesse Brandeburg *
50465ec8b7d1SJesse Brandeburg * Returns 0 on success, negative on failure
50475ec8b7d1SJesse Brandeburg *
50485ec8b7d1SJesse Brandeburg * iavf_probe initializes an adapter identified by a pci_dev structure.
50495ec8b7d1SJesse Brandeburg * The OS initialization, configuring of the adapter private structure,
50505ec8b7d1SJesse Brandeburg * and a hardware reset occur.
50515ec8b7d1SJesse Brandeburg **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)50525ec8b7d1SJesse Brandeburg static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50535ec8b7d1SJesse Brandeburg {
50545ec8b7d1SJesse Brandeburg struct net_device *netdev;
50555ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = NULL;
5056f349daa5SJesse Brandeburg struct iavf_hw *hw = NULL;
50575ec8b7d1SJesse Brandeburg int err;
50585ec8b7d1SJesse Brandeburg
50595ec8b7d1SJesse Brandeburg err = pci_enable_device(pdev);
50605ec8b7d1SJesse Brandeburg if (err)
50615ec8b7d1SJesse Brandeburg return err;
50625ec8b7d1SJesse Brandeburg
50635ec8b7d1SJesse Brandeburg err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
50645ec8b7d1SJesse Brandeburg if (err) {
50655ec8b7d1SJesse Brandeburg dev_err(&pdev->dev,
50665ec8b7d1SJesse Brandeburg "DMA configuration failed: 0x%x\n", err);
50675ec8b7d1SJesse Brandeburg goto err_dma;
50685ec8b7d1SJesse Brandeburg }
50695ec8b7d1SJesse Brandeburg
50705ec8b7d1SJesse Brandeburg err = pci_request_regions(pdev, iavf_driver_name);
50715ec8b7d1SJesse Brandeburg if (err) {
50725ec8b7d1SJesse Brandeburg dev_err(&pdev->dev,
50735ec8b7d1SJesse Brandeburg "pci_request_regions failed 0x%x\n", err);
50745ec8b7d1SJesse Brandeburg goto err_pci_reg;
50755ec8b7d1SJesse Brandeburg }
50765ec8b7d1SJesse Brandeburg
50775ec8b7d1SJesse Brandeburg pci_set_master(pdev);
50785ec8b7d1SJesse Brandeburg
50795ec8b7d1SJesse Brandeburg netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
50805ec8b7d1SJesse Brandeburg IAVF_MAX_REQ_QUEUES);
50815ec8b7d1SJesse Brandeburg if (!netdev) {
50825ec8b7d1SJesse Brandeburg err = -ENOMEM;
50835ec8b7d1SJesse Brandeburg goto err_alloc_etherdev;
50845ec8b7d1SJesse Brandeburg }
50855ec8b7d1SJesse Brandeburg
50865ec8b7d1SJesse Brandeburg SET_NETDEV_DEV(netdev, &pdev->dev);
50875ec8b7d1SJesse Brandeburg
50885ec8b7d1SJesse Brandeburg pci_set_drvdata(pdev, netdev);
50895ec8b7d1SJesse Brandeburg adapter = netdev_priv(netdev);
50905ec8b7d1SJesse Brandeburg
50915ec8b7d1SJesse Brandeburg adapter->netdev = netdev;
50925ec8b7d1SJesse Brandeburg adapter->pdev = pdev;
50935ec8b7d1SJesse Brandeburg
50945ec8b7d1SJesse Brandeburg hw = &adapter->hw;
50955ec8b7d1SJesse Brandeburg hw->back = adapter;
50965ec8b7d1SJesse Brandeburg
50974411a608SMichal Schmidt adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
50984411a608SMichal Schmidt iavf_driver_name);
50994411a608SMichal Schmidt if (!adapter->wq) {
51004411a608SMichal Schmidt err = -ENOMEM;
51014411a608SMichal Schmidt goto err_alloc_wq;
51024411a608SMichal Schmidt }
51034411a608SMichal Schmidt
51045ec8b7d1SJesse Brandeburg adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
510545eebd62SMateusz Palczewski iavf_change_state(adapter, __IAVF_STARTUP);
51065ec8b7d1SJesse Brandeburg
51075ec8b7d1SJesse Brandeburg /* Call save state here because it relies on the adapter struct. */
51085ec8b7d1SJesse Brandeburg pci_save_state(pdev);
51095ec8b7d1SJesse Brandeburg
51105ec8b7d1SJesse Brandeburg hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
51115ec8b7d1SJesse Brandeburg pci_resource_len(pdev, 0));
51125ec8b7d1SJesse Brandeburg if (!hw->hw_addr) {
51135ec8b7d1SJesse Brandeburg err = -EIO;
51145ec8b7d1SJesse Brandeburg goto err_ioremap;
51155ec8b7d1SJesse Brandeburg }
51165ec8b7d1SJesse Brandeburg hw->vendor_id = pdev->vendor;
51175ec8b7d1SJesse Brandeburg hw->device_id = pdev->device;
51185ec8b7d1SJesse Brandeburg pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
51195ec8b7d1SJesse Brandeburg hw->subsystem_vendor_id = pdev->subsystem_vendor;
51205ec8b7d1SJesse Brandeburg hw->subsystem_device_id = pdev->subsystem_device;
51215ec8b7d1SJesse Brandeburg hw->bus.device = PCI_SLOT(pdev->devfn);
51225ec8b7d1SJesse Brandeburg hw->bus.func = PCI_FUNC(pdev->devfn);
51235ec8b7d1SJesse Brandeburg hw->bus.bus_id = pdev->bus->number;
51245ec8b7d1SJesse Brandeburg
51255ec8b7d1SJesse Brandeburg /* set up the locks for the AQ, do this only once in probe
51265ec8b7d1SJesse Brandeburg * and destroy them only once in remove
51275ec8b7d1SJesse Brandeburg */
51285ac49f3cSStefan Assmann mutex_init(&adapter->crit_lock);
51295ec8b7d1SJesse Brandeburg mutex_init(&hw->aq.asq_mutex);
51305ec8b7d1SJesse Brandeburg mutex_init(&hw->aq.arq_mutex);
51315ec8b7d1SJesse Brandeburg
51325ec8b7d1SJesse Brandeburg spin_lock_init(&adapter->mac_vlan_list_lock);
51335ec8b7d1SJesse Brandeburg spin_lock_init(&adapter->cloud_filter_list_lock);
51340dbfbabbSHaiyue Wang spin_lock_init(&adapter->fdir_fltr_lock);
51350aaeb4fbSHaiyue Wang spin_lock_init(&adapter->adv_rss_lock);
5136221465deSBrett Creeley spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
51375ec8b7d1SJesse Brandeburg
51385ec8b7d1SJesse Brandeburg INIT_LIST_HEAD(&adapter->mac_filter_list);
51395ec8b7d1SJesse Brandeburg INIT_LIST_HEAD(&adapter->vlan_filter_list);
51405ec8b7d1SJesse Brandeburg INIT_LIST_HEAD(&adapter->cloud_filter_list);
51410dbfbabbSHaiyue Wang INIT_LIST_HEAD(&adapter->fdir_list_head);
51420aaeb4fbSHaiyue Wang INIT_LIST_HEAD(&adapter->adv_rss_list_head);
51435ec8b7d1SJesse Brandeburg
51445ec8b7d1SJesse Brandeburg INIT_WORK(&adapter->reset_task, iavf_reset_task);
51455ec8b7d1SJesse Brandeburg INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
5146d1639a17SAhmed Zaki INIT_WORK(&adapter->finish_config, iavf_finish_config);
5147fdd4044fSJakub Pawlak INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
51485ec8b7d1SJesse Brandeburg
51495ec8b7d1SJesse Brandeburg /* Setup the wait queue for indicating transition to down status */
51505ec8b7d1SJesse Brandeburg init_waitqueue_head(&adapter->down_waitqueue);
51515ec8b7d1SJesse Brandeburg
5152c2ed2403SMarcin Szycik /* Setup the wait queue for indicating transition to running state */
5153c2ed2403SMarcin Szycik init_waitqueue_head(&adapter->reset_waitqueue);
5154c2ed2403SMarcin Szycik
515535a2443dSMateusz Palczewski /* Setup the wait queue for indicating virtchannel events */
515635a2443dSMateusz Palczewski init_waitqueue_head(&adapter->vc_waitqueue);
515735a2443dSMateusz Palczewski
51587db31110SMichal Schmidt queue_delayed_work(adapter->wq, &adapter->watchdog_task,
51597db31110SMichal Schmidt msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
51607db31110SMichal Schmidt /* Initialization goes on in the work. Do not add more of it below. */
51615ec8b7d1SJesse Brandeburg return 0;
51625ec8b7d1SJesse Brandeburg
51635ec8b7d1SJesse Brandeburg err_ioremap:
51644411a608SMichal Schmidt destroy_workqueue(adapter->wq);
51654411a608SMichal Schmidt err_alloc_wq:
51665ec8b7d1SJesse Brandeburg free_netdev(netdev);
51675ec8b7d1SJesse Brandeburg err_alloc_etherdev:
51685ec8b7d1SJesse Brandeburg pci_release_regions(pdev);
51695ec8b7d1SJesse Brandeburg err_pci_reg:
51705ec8b7d1SJesse Brandeburg err_dma:
51715ec8b7d1SJesse Brandeburg pci_disable_device(pdev);
51725ec8b7d1SJesse Brandeburg return err;
51735ec8b7d1SJesse Brandeburg }
51745ec8b7d1SJesse Brandeburg
51755ec8b7d1SJesse Brandeburg /**
51765ec8b7d1SJesse Brandeburg * iavf_suspend - Power management suspend routine
5177b50f7bcaSJesse Brandeburg * @dev_d: device info pointer
51785ec8b7d1SJesse Brandeburg *
51795ec8b7d1SJesse Brandeburg * Called when the system (VM) is entering sleep/suspend.
51805ec8b7d1SJesse Brandeburg **/
iavf_suspend(struct device * dev_d)518175a3f93bSJesse Brandeburg static int iavf_suspend(struct device *dev_d)
51825ec8b7d1SJesse Brandeburg {
5183bc5cbd73SVaibhav Gupta struct net_device *netdev = dev_get_drvdata(dev_d);
51845ec8b7d1SJesse Brandeburg struct iavf_adapter *adapter = netdev_priv(netdev);
51855ec8b7d1SJesse Brandeburg
51865ec8b7d1SJesse Brandeburg netif_device_detach(netdev);
51875ec8b7d1SJesse Brandeburg
51885902ee6dSMichal Schmidt mutex_lock(&adapter->crit_lock);
51895ec8b7d1SJesse Brandeburg
51905ec8b7d1SJesse Brandeburg if (netif_running(netdev)) {
51915ec8b7d1SJesse Brandeburg rtnl_lock();
51925ec8b7d1SJesse Brandeburg iavf_down(adapter);
51935ec8b7d1SJesse Brandeburg rtnl_unlock();
51945ec8b7d1SJesse Brandeburg }
51955ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
51965ec8b7d1SJesse Brandeburg iavf_reset_interrupt_capability(adapter);
51975ec8b7d1SJesse Brandeburg
51985ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
51995ec8b7d1SJesse Brandeburg
52005ec8b7d1SJesse Brandeburg return 0;
52015ec8b7d1SJesse Brandeburg }
52025ec8b7d1SJesse Brandeburg
52035ec8b7d1SJesse Brandeburg /**
52045ec8b7d1SJesse Brandeburg * iavf_resume - Power management resume routine
5205b50f7bcaSJesse Brandeburg * @dev_d: device info pointer
52065ec8b7d1SJesse Brandeburg *
52075ec8b7d1SJesse Brandeburg * Called when the system (VM) is resumed from sleep/suspend.
52085ec8b7d1SJesse Brandeburg **/
iavf_resume(struct device * dev_d)520975a3f93bSJesse Brandeburg static int iavf_resume(struct device *dev_d)
52105ec8b7d1SJesse Brandeburg {
5211bc5cbd73SVaibhav Gupta struct pci_dev *pdev = to_pci_dev(dev_d);
5212247aa001SKaren Sornek struct iavf_adapter *adapter;
52135ec8b7d1SJesse Brandeburg u32 err;
52145ec8b7d1SJesse Brandeburg
5215247aa001SKaren Sornek adapter = iavf_pdev_to_adapter(pdev);
5216247aa001SKaren Sornek
52175ec8b7d1SJesse Brandeburg pci_set_master(pdev);
52185ec8b7d1SJesse Brandeburg
52195ec8b7d1SJesse Brandeburg rtnl_lock();
52205ec8b7d1SJesse Brandeburg err = iavf_set_interrupt_capability(adapter);
52215ec8b7d1SJesse Brandeburg if (err) {
52225ec8b7d1SJesse Brandeburg rtnl_unlock();
52235ec8b7d1SJesse Brandeburg dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
52245ec8b7d1SJesse Brandeburg return err;
52255ec8b7d1SJesse Brandeburg }
52265ec8b7d1SJesse Brandeburg err = iavf_request_misc_irq(adapter);
52275ec8b7d1SJesse Brandeburg rtnl_unlock();
52285ec8b7d1SJesse Brandeburg if (err) {
52295ec8b7d1SJesse Brandeburg dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
52305ec8b7d1SJesse Brandeburg return err;
52315ec8b7d1SJesse Brandeburg }
52325ec8b7d1SJesse Brandeburg
52334411a608SMichal Schmidt queue_work(adapter->wq, &adapter->reset_task);
52345ec8b7d1SJesse Brandeburg
5235247aa001SKaren Sornek netif_device_attach(adapter->netdev);
52365ec8b7d1SJesse Brandeburg
52375ec8b7d1SJesse Brandeburg return err;
52385ec8b7d1SJesse Brandeburg }
52395ec8b7d1SJesse Brandeburg
52405ec8b7d1SJesse Brandeburg /**
52415ec8b7d1SJesse Brandeburg * iavf_remove - Device Removal Routine
52425ec8b7d1SJesse Brandeburg * @pdev: PCI device information struct
52435ec8b7d1SJesse Brandeburg *
52445ec8b7d1SJesse Brandeburg * iavf_remove is called by the PCI subsystem to alert the driver
52455ec8b7d1SJesse Brandeburg * that it should release a PCI device. The could be caused by a
52465ec8b7d1SJesse Brandeburg * Hot-Plug event, or because the driver is going to be removed from
52475ec8b7d1SJesse Brandeburg * memory.
52485ec8b7d1SJesse Brandeburg **/
iavf_remove(struct pci_dev * pdev)52495ec8b7d1SJesse Brandeburg static void iavf_remove(struct pci_dev *pdev)
52505ec8b7d1SJesse Brandeburg {
52510dbfbabbSHaiyue Wang struct iavf_fdir_fltr *fdir, *fdirtmp;
52525ec8b7d1SJesse Brandeburg struct iavf_vlan_filter *vlf, *vlftmp;
5253a8417330SSlawomir Laba struct iavf_cloud_filter *cf, *cftmp;
52540aaeb4fbSHaiyue Wang struct iavf_adv_rss *rss, *rsstmp;
52555ec8b7d1SJesse Brandeburg struct iavf_mac_filter *f, *ftmp;
52567ae42ef3SSlawomir Laba struct iavf_adapter *adapter;
5257a8417330SSlawomir Laba struct net_device *netdev;
5258a8417330SSlawomir Laba struct iavf_hw *hw;
5259fc2e6b3bSSlawomir Laba
52607ae42ef3SSlawomir Laba /* Don't proceed with remove if netdev is already freed */
52617ae42ef3SSlawomir Laba netdev = pci_get_drvdata(pdev);
52627ae42ef3SSlawomir Laba if (!netdev)
52637ae42ef3SSlawomir Laba return;
52647ae42ef3SSlawomir Laba
52657ae42ef3SSlawomir Laba adapter = iavf_pdev_to_adapter(pdev);
5266a8417330SSlawomir Laba hw = &adapter->hw;
5267a8417330SSlawomir Laba
5268a8417330SSlawomir Laba if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5269b04683ffSIvan Vecera return;
5270b04683ffSIvan Vecera
527197457801SSlawomir Laba /* Wait until port initialization is complete.
527297457801SSlawomir Laba * There are flows where register/unregister netdev may race.
527397457801SSlawomir Laba */
527497457801SSlawomir Laba while (1) {
527597457801SSlawomir Laba mutex_lock(&adapter->crit_lock);
527697457801SSlawomir Laba if (adapter->state == __IAVF_RUNNING ||
52773ccd54efSSlawomir Laba adapter->state == __IAVF_DOWN ||
52783ccd54efSSlawomir Laba adapter->state == __IAVF_INIT_FAILED) {
527997457801SSlawomir Laba mutex_unlock(&adapter->crit_lock);
528097457801SSlawomir Laba break;
528197457801SSlawomir Laba }
52824e264be9SStefan Assmann /* Simply return if we already went through iavf_shutdown */
52834e264be9SStefan Assmann if (adapter->state == __IAVF_REMOVE) {
52844e264be9SStefan Assmann mutex_unlock(&adapter->crit_lock);
52854e264be9SStefan Assmann return;
52864e264be9SStefan Assmann }
528797457801SSlawomir Laba
528897457801SSlawomir Laba mutex_unlock(&adapter->crit_lock);
528997457801SSlawomir Laba usleep_range(500, 1000);
529097457801SSlawomir Laba }
5291898ef1cbSMateusz Palczewski cancel_delayed_work_sync(&adapter->watchdog_task);
5292d1639a17SAhmed Zaki cancel_work_sync(&adapter->finish_config);
529397457801SSlawomir Laba
529434ad34bfSMichal Schmidt if (netdev->reg_state == NETREG_REGISTERED)
52955c4e1d18SMichal Schmidt unregister_netdev(netdev);
5296d1639a17SAhmed Zaki
5297fc2e6b3bSSlawomir Laba mutex_lock(&adapter->crit_lock);
529869b95744SBartosz Staszewski dev_info(&adapter->pdev->dev, "Removing device\n");
5299fc2e6b3bSSlawomir Laba iavf_change_state(adapter, __IAVF_REMOVE);
5300fc2e6b3bSSlawomir Laba
53015ec8b7d1SJesse Brandeburg iavf_request_reset(adapter);
53025ec8b7d1SJesse Brandeburg msleep(50);
53035ec8b7d1SJesse Brandeburg /* If the FW isn't responding, kick it once, but only once. */
53045ec8b7d1SJesse Brandeburg if (!iavf_asq_done(hw)) {
53055ec8b7d1SJesse Brandeburg iavf_request_reset(adapter);
53065ec8b7d1SJesse Brandeburg msleep(50);
53075ec8b7d1SJesse Brandeburg }
5308226d5285SStefan Assmann
5309fc2e6b3bSSlawomir Laba iavf_misc_irq_disable(adapter);
5310226d5285SStefan Assmann /* Shut down all the garbage mashers on the detention level */
5311fc2e6b3bSSlawomir Laba cancel_work_sync(&adapter->reset_task);
5312fc2e6b3bSSlawomir Laba cancel_delayed_work_sync(&adapter->watchdog_task);
5313fc2e6b3bSSlawomir Laba cancel_work_sync(&adapter->adminq_task);
5314fc2e6b3bSSlawomir Laba
5315226d5285SStefan Assmann adapter->aq_required = 0;
5316226d5285SStefan Assmann adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5317605ca7c5SPrzemyslaw Patynowski
53185ec8b7d1SJesse Brandeburg iavf_free_all_tx_resources(adapter);
53195ec8b7d1SJesse Brandeburg iavf_free_all_rx_resources(adapter);
53205ec8b7d1SJesse Brandeburg iavf_free_misc_irq(adapter);
5321b5b219a1SMichal Schmidt iavf_free_interrupt_scheme(adapter);
53225ec8b7d1SJesse Brandeburg
53235ec8b7d1SJesse Brandeburg iavf_free_rss(adapter);
53245ec8b7d1SJesse Brandeburg
53255ec8b7d1SJesse Brandeburg if (hw->aq.asq.count)
53265ec8b7d1SJesse Brandeburg iavf_shutdown_adminq(hw);
53275ec8b7d1SJesse Brandeburg
53285ec8b7d1SJesse Brandeburg /* destroy the locks only once, here */
53295ec8b7d1SJesse Brandeburg mutex_destroy(&hw->aq.arq_mutex);
53305ec8b7d1SJesse Brandeburg mutex_destroy(&hw->aq.asq_mutex);
53315ac49f3cSStefan Assmann mutex_unlock(&adapter->crit_lock);
53325ac49f3cSStefan Assmann mutex_destroy(&adapter->crit_lock);
53335ec8b7d1SJesse Brandeburg
53345ec8b7d1SJesse Brandeburg iounmap(hw->hw_addr);
53355ec8b7d1SJesse Brandeburg pci_release_regions(pdev);
53365ec8b7d1SJesse Brandeburg kfree(adapter->vf_res);
53375ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->mac_vlan_list_lock);
53385ec8b7d1SJesse Brandeburg /* If we got removed before an up/down sequence, we've got a filter
53395ec8b7d1SJesse Brandeburg * hanging out there that we need to get rid of.
53405ec8b7d1SJesse Brandeburg */
53415ec8b7d1SJesse Brandeburg list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
53425ec8b7d1SJesse Brandeburg list_del(&f->list);
53435ec8b7d1SJesse Brandeburg kfree(f);
53445ec8b7d1SJesse Brandeburg }
53455ec8b7d1SJesse Brandeburg list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
53465ec8b7d1SJesse Brandeburg list) {
53475ec8b7d1SJesse Brandeburg list_del(&vlf->list);
53485ec8b7d1SJesse Brandeburg kfree(vlf);
53495ec8b7d1SJesse Brandeburg }
53505ec8b7d1SJesse Brandeburg
53515ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->mac_vlan_list_lock);
53525ec8b7d1SJesse Brandeburg
53535ec8b7d1SJesse Brandeburg spin_lock_bh(&adapter->cloud_filter_list_lock);
53545ec8b7d1SJesse Brandeburg list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
53555ec8b7d1SJesse Brandeburg list_del(&cf->list);
53565ec8b7d1SJesse Brandeburg kfree(cf);
53575ec8b7d1SJesse Brandeburg }
53585ec8b7d1SJesse Brandeburg spin_unlock_bh(&adapter->cloud_filter_list_lock);
53595ec8b7d1SJesse Brandeburg
53600dbfbabbSHaiyue Wang spin_lock_bh(&adapter->fdir_fltr_lock);
53610dbfbabbSHaiyue Wang list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
53620dbfbabbSHaiyue Wang list_del(&fdir->list);
53630dbfbabbSHaiyue Wang kfree(fdir);
53640dbfbabbSHaiyue Wang }
53650dbfbabbSHaiyue Wang spin_unlock_bh(&adapter->fdir_fltr_lock);
53660dbfbabbSHaiyue Wang
53670aaeb4fbSHaiyue Wang spin_lock_bh(&adapter->adv_rss_lock);
53680aaeb4fbSHaiyue Wang list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
53690aaeb4fbSHaiyue Wang list) {
53700aaeb4fbSHaiyue Wang list_del(&rss->list);
53710aaeb4fbSHaiyue Wang kfree(rss);
53720aaeb4fbSHaiyue Wang }
53730aaeb4fbSHaiyue Wang spin_unlock_bh(&adapter->adv_rss_lock);
53740aaeb4fbSHaiyue Wang
53754411a608SMichal Schmidt destroy_workqueue(adapter->wq);
53764411a608SMichal Schmidt
53777ae42ef3SSlawomir Laba pci_set_drvdata(pdev, NULL);
53787ae42ef3SSlawomir Laba
53795ec8b7d1SJesse Brandeburg free_netdev(netdev);
53805ec8b7d1SJesse Brandeburg
53815ec8b7d1SJesse Brandeburg pci_disable_device(pdev);
53825ec8b7d1SJesse Brandeburg }
53835ec8b7d1SJesse Brandeburg
53847ae42ef3SSlawomir Laba /**
53857ae42ef3SSlawomir Laba * iavf_shutdown - Shutdown the device in preparation for a reboot
53867ae42ef3SSlawomir Laba * @pdev: pci device structure
53877ae42ef3SSlawomir Laba **/
iavf_shutdown(struct pci_dev * pdev)53887ae42ef3SSlawomir Laba static void iavf_shutdown(struct pci_dev *pdev)
53897ae42ef3SSlawomir Laba {
53907ae42ef3SSlawomir Laba iavf_remove(pdev);
53917ae42ef3SSlawomir Laba
53927ae42ef3SSlawomir Laba if (system_state == SYSTEM_POWER_OFF)
53937ae42ef3SSlawomir Laba pci_set_power_state(pdev, PCI_D3hot);
53947ae42ef3SSlawomir Laba }
53957ae42ef3SSlawomir Laba
539675a3f93bSJesse Brandeburg static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5397bc5cbd73SVaibhav Gupta
53985ec8b7d1SJesse Brandeburg static struct pci_driver iavf_driver = {
53995ec8b7d1SJesse Brandeburg .name = iavf_driver_name,
54005ec8b7d1SJesse Brandeburg .id_table = iavf_pci_tbl,
54015ec8b7d1SJesse Brandeburg .probe = iavf_probe,
54025ec8b7d1SJesse Brandeburg .remove = iavf_remove,
540375a3f93bSJesse Brandeburg .driver.pm = pm_sleep_ptr(&iavf_pm_ops),
54045ec8b7d1SJesse Brandeburg .shutdown = iavf_shutdown,
54055ec8b7d1SJesse Brandeburg };
54065ec8b7d1SJesse Brandeburg
54075ec8b7d1SJesse Brandeburg /**
540856184e01SJesse Brandeburg * iavf_init_module - Driver Registration Routine
54095ec8b7d1SJesse Brandeburg *
541056184e01SJesse Brandeburg * iavf_init_module is the first routine called when the driver is
54115ec8b7d1SJesse Brandeburg * loaded. All it does is register with the PCI subsystem.
54125ec8b7d1SJesse Brandeburg **/
iavf_init_module(void)54135ec8b7d1SJesse Brandeburg static int __init iavf_init_module(void)
54145ec8b7d1SJesse Brandeburg {
541534a2a3b8SJeff Kirsher pr_info("iavf: %s\n", iavf_driver_string);
54165ec8b7d1SJesse Brandeburg
54175ec8b7d1SJesse Brandeburg pr_info("%s\n", iavf_copyright);
54185ec8b7d1SJesse Brandeburg
54194411a608SMichal Schmidt return pci_register_driver(&iavf_driver);
54205ec8b7d1SJesse Brandeburg }
54215ec8b7d1SJesse Brandeburg
54225ec8b7d1SJesse Brandeburg module_init(iavf_init_module);
54235ec8b7d1SJesse Brandeburg
54245ec8b7d1SJesse Brandeburg /**
542556184e01SJesse Brandeburg * iavf_exit_module - Driver Exit Cleanup Routine
54265ec8b7d1SJesse Brandeburg *
542756184e01SJesse Brandeburg * iavf_exit_module is called just before the driver is removed
54285ec8b7d1SJesse Brandeburg * from memory.
54295ec8b7d1SJesse Brandeburg **/
iavf_exit_module(void)54305ec8b7d1SJesse Brandeburg static void __exit iavf_exit_module(void)
54315ec8b7d1SJesse Brandeburg {
54325ec8b7d1SJesse Brandeburg pci_unregister_driver(&iavf_driver);
54335ec8b7d1SJesse Brandeburg }
54345ec8b7d1SJesse Brandeburg
54355ec8b7d1SJesse Brandeburg module_exit(iavf_exit_module);
54365ec8b7d1SJesse Brandeburg
54375ec8b7d1SJesse Brandeburg /* iavf_main.c */
5438