xref: /linux/drivers/net/ethernet/intel/iavf/iavf_main.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/net/intel/libie/rx.h>
5 #include <net/netdev_lock.h>
6 
7 #include "iavf.h"
8 #include "iavf_ptp.h"
9 #include "iavf_prototype.h"
10 /* All iavf tracepoints are defined by the include below, which must
11  * be included exactly once across the whole kernel with
12  * CREATE_TRACE_POINTS defined
13  */
14 #define CREATE_TRACE_POINTS
15 #include "iavf_trace.h"
16 
17 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
18 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
19 static int iavf_close(struct net_device *netdev);
20 static void iavf_init_get_resources(struct iavf_adapter *adapter);
21 static int iavf_check_reset_complete(struct iavf_hw *hw);
22 
23 char iavf_driver_name[] = "iavf";
24 static const char iavf_driver_string[] =
25 	"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
26 
27 static const char iavf_copyright[] =
28 	"Copyright (c) 2013 - 2018 Intel Corporation.";
29 
30 /* iavf_pci_tbl - PCI Device ID Table
31  *
32  * Wildcard entries (PCI_ANY_ID) should come last
33  * Last entry must be all 0s
34  *
35  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36  *   Class, Class Mask, private data (not used) }
37  */
38 static const struct pci_device_id iavf_pci_tbl[] = {
39 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
40 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
41 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
42 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
43 	/* required last entry */
44 	{0, }
45 };
46 
47 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
48 
49 MODULE_ALIAS("i40evf");
50 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
51 MODULE_IMPORT_NS("LIBETH");
52 MODULE_IMPORT_NS("LIBIE");
53 MODULE_IMPORT_NS("LIBIE_ADMINQ");
54 MODULE_LICENSE("GPL v2");
55 
56 static const struct net_device_ops iavf_netdev_ops;
57 
iavf_status_to_errno(enum iavf_status status)58 int iavf_status_to_errno(enum iavf_status status)
59 {
60 	switch (status) {
61 	case IAVF_SUCCESS:
62 		return 0;
63 	case IAVF_ERR_PARAM:
64 	case IAVF_ERR_MAC_TYPE:
65 	case IAVF_ERR_INVALID_MAC_ADDR:
66 	case IAVF_ERR_INVALID_LINK_SETTINGS:
67 	case IAVF_ERR_INVALID_PD_ID:
68 	case IAVF_ERR_INVALID_QP_ID:
69 	case IAVF_ERR_INVALID_CQ_ID:
70 	case IAVF_ERR_INVALID_CEQ_ID:
71 	case IAVF_ERR_INVALID_AEQ_ID:
72 	case IAVF_ERR_INVALID_SIZE:
73 	case IAVF_ERR_INVALID_ARP_INDEX:
74 	case IAVF_ERR_INVALID_FPM_FUNC_ID:
75 	case IAVF_ERR_QP_INVALID_MSG_SIZE:
76 	case IAVF_ERR_INVALID_FRAG_COUNT:
77 	case IAVF_ERR_INVALID_ALIGNMENT:
78 	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
79 	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
80 	case IAVF_ERR_INVALID_VF_ID:
81 	case IAVF_ERR_INVALID_HMCFN_ID:
82 	case IAVF_ERR_INVALID_PBLE_INDEX:
83 	case IAVF_ERR_INVALID_SD_INDEX:
84 	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
85 	case IAVF_ERR_INVALID_SD_TYPE:
86 	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
87 	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
88 	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
89 		return -EINVAL;
90 	case IAVF_ERR_NVM:
91 	case IAVF_ERR_NVM_CHECKSUM:
92 	case IAVF_ERR_PHY:
93 	case IAVF_ERR_CONFIG:
94 	case IAVF_ERR_UNKNOWN_PHY:
95 	case IAVF_ERR_LINK_SETUP:
96 	case IAVF_ERR_ADAPTER_STOPPED:
97 	case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
98 	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
99 	case IAVF_ERR_RESET_FAILED:
100 	case IAVF_ERR_BAD_PTR:
101 	case IAVF_ERR_SWFW_SYNC:
102 	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
103 	case IAVF_ERR_QUEUE_EMPTY:
104 	case IAVF_ERR_FLUSHED_QUEUE:
105 	case IAVF_ERR_OPCODE_MISMATCH:
106 	case IAVF_ERR_CQP_COMPL_ERROR:
107 	case IAVF_ERR_BACKING_PAGE_ERROR:
108 	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
109 	case IAVF_ERR_MEMCPY_FAILED:
110 	case IAVF_ERR_SRQ_ENABLED:
111 	case IAVF_ERR_ADMIN_QUEUE_ERROR:
112 	case IAVF_ERR_ADMIN_QUEUE_FULL:
113 	case IAVF_ERR_BAD_RDMA_CQE:
114 	case IAVF_ERR_NVM_BLANK_MODE:
115 	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
116 	case IAVF_ERR_DIAG_TEST_FAILED:
117 	case IAVF_ERR_FIRMWARE_API_VERSION:
118 	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
119 		return -EIO;
120 	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
121 		return -ENODEV;
122 	case IAVF_ERR_NO_AVAILABLE_VSI:
123 	case IAVF_ERR_RING_FULL:
124 		return -ENOSPC;
125 	case IAVF_ERR_NO_MEMORY:
126 		return -ENOMEM;
127 	case IAVF_ERR_TIMEOUT:
128 	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
129 		return -ETIMEDOUT;
130 	case IAVF_ERR_NOT_IMPLEMENTED:
131 	case IAVF_NOT_SUPPORTED:
132 		return -EOPNOTSUPP;
133 	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
134 		return -EALREADY;
135 	case IAVF_ERR_NOT_READY:
136 		return -EBUSY;
137 	case IAVF_ERR_BUF_TOO_SHORT:
138 		return -EMSGSIZE;
139 	}
140 
141 	return -EIO;
142 }
143 
virtchnl_status_to_errno(enum virtchnl_status_code v_status)144 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
145 {
146 	switch (v_status) {
147 	case VIRTCHNL_STATUS_SUCCESS:
148 		return 0;
149 	case VIRTCHNL_STATUS_ERR_PARAM:
150 	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
151 		return -EINVAL;
152 	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
153 		return -ENOMEM;
154 	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
155 	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
156 	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
157 		return -EIO;
158 	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
159 		return -EOPNOTSUPP;
160 	}
161 
162 	return -EIO;
163 }
164 
165 /**
166  * iavf_pdev_to_adapter - go from pci_dev to adapter
167  * @pdev: pci_dev pointer
168  */
iavf_pdev_to_adapter(struct pci_dev * pdev)169 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
170 {
171 	return netdev_priv(pci_get_drvdata(pdev));
172 }
173 
174 /**
175  * iavf_is_reset_in_progress - Check if a reset is in progress
176  * @adapter: board private structure
177  */
iavf_is_reset_in_progress(struct iavf_adapter * adapter)178 static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
179 {
180 	if (adapter->state == __IAVF_RESETTING ||
181 	    adapter->flags & (IAVF_FLAG_RESET_PENDING |
182 			      IAVF_FLAG_RESET_NEEDED))
183 		return true;
184 
185 	return false;
186 }
187 
188 /**
189  * iavf_wait_for_reset - Wait for reset to finish.
190  * @adapter: board private structure
191  *
192  * Returns 0 if reset finished successfully, negative on timeout or interrupt.
193  */
iavf_wait_for_reset(struct iavf_adapter * adapter)194 int iavf_wait_for_reset(struct iavf_adapter *adapter)
195 {
196 	int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
197 					!iavf_is_reset_in_progress(adapter),
198 					msecs_to_jiffies(5000));
199 
200 	/* If ret < 0 then it means wait was interrupted.
201 	 * If ret == 0 then it means we got a timeout while waiting
202 	 * for reset to finish.
203 	 * If ret > 0 it means reset has finished.
204 	 */
205 	if (ret > 0)
206 		return 0;
207 	else if (ret < 0)
208 		return -EINTR;
209 	else
210 		return -EBUSY;
211 }
212 
213 /**
214  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
215  * @hw:   pointer to the HW structure
216  * @mem:  ptr to mem struct to fill out
217  * @size: size of memory requested
218  * @alignment: what to align the allocation to
219  **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)220 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
221 					 struct iavf_dma_mem *mem,
222 					 u64 size, u32 alignment)
223 {
224 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
225 
226 	if (!mem)
227 		return IAVF_ERR_PARAM;
228 
229 	mem->size = ALIGN(size, alignment);
230 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
231 				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
232 	if (mem->va)
233 		return 0;
234 	else
235 		return IAVF_ERR_NO_MEMORY;
236 }
237 
238 /**
239  * iavf_free_dma_mem - wrapper for DMA memory freeing
240  * @hw:   pointer to the HW structure
241  * @mem:  ptr to mem struct to free
242  **/
iavf_free_dma_mem(struct iavf_hw * hw,struct iavf_dma_mem * mem)243 enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
244 {
245 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
246 
247 	if (!mem || !mem->va)
248 		return IAVF_ERR_PARAM;
249 	dma_free_coherent(&adapter->pdev->dev, mem->size,
250 			  mem->va, (dma_addr_t)mem->pa);
251 	return 0;
252 }
253 
254 /**
255  * iavf_allocate_virt_mem - virt memory alloc wrapper
256  * @hw:   pointer to the HW structure
257  * @mem:  ptr to mem struct to fill out
258  * @size: size of memory requested
259  **/
iavf_allocate_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)260 enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
261 					struct iavf_virt_mem *mem, u32 size)
262 {
263 	if (!mem)
264 		return IAVF_ERR_PARAM;
265 
266 	mem->size = size;
267 	mem->va = kzalloc(size, GFP_KERNEL);
268 
269 	if (mem->va)
270 		return 0;
271 	else
272 		return IAVF_ERR_NO_MEMORY;
273 }
274 
275 /**
276  * iavf_free_virt_mem - virt memory free wrapper
277  * @hw:   pointer to the HW structure
278  * @mem:  ptr to mem struct to free
279  **/
iavf_free_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem)280 void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
281 {
282 	kfree(mem->va);
283 }
284 
285 /**
286  * iavf_schedule_reset - Set the flags and schedule a reset event
287  * @adapter: board private structure
288  * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
289  **/
iavf_schedule_reset(struct iavf_adapter * adapter,u64 flags)290 void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
291 {
292 	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
293 	    !(adapter->flags &
294 	    (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
295 		adapter->flags |= flags;
296 		queue_work(adapter->wq, &adapter->reset_task);
297 	}
298 }
299 
300 /**
301  * iavf_schedule_aq_request - Set the flags and schedule aq request
302  * @adapter: board private structure
303  * @flags: requested aq flags
304  **/
iavf_schedule_aq_request(struct iavf_adapter * adapter,u64 flags)305 void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
306 {
307 	adapter->aq_required |= flags;
308 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
309 }
310 
311 /**
312  * iavf_tx_timeout - Respond to a Tx Hang
313  * @netdev: network interface device structure
314  * @txqueue: queue number that is timing out
315  **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)316 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
317 {
318 	struct iavf_adapter *adapter = netdev_priv(netdev);
319 
320 	adapter->tx_timeout_count++;
321 	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
322 }
323 
324 /**
325  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
326  * @adapter: board private structure
327  **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)328 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
329 {
330 	struct iavf_hw *hw = &adapter->hw;
331 
332 	if (!adapter->msix_entries)
333 		return;
334 
335 	wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
336 
337 	iavf_flush(hw);
338 
339 	synchronize_irq(adapter->msix_entries[0].vector);
340 }
341 
342 /**
343  * iavf_misc_irq_enable - Enable default interrupt generation settings
344  * @adapter: board private structure
345  **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)346 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
347 {
348 	struct iavf_hw *hw = &adapter->hw;
349 
350 	wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
351 				       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
352 	wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
353 
354 	iavf_flush(hw);
355 }
356 
357 /**
358  * iavf_irq_disable - Mask off interrupt generation on the NIC
359  * @adapter: board private structure
360  **/
iavf_irq_disable(struct iavf_adapter * adapter)361 static void iavf_irq_disable(struct iavf_adapter *adapter)
362 {
363 	int i;
364 	struct iavf_hw *hw = &adapter->hw;
365 
366 	if (!adapter->msix_entries)
367 		return;
368 
369 	for (i = 1; i < adapter->num_msix_vectors; i++) {
370 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
371 		synchronize_irq(adapter->msix_entries[i].vector);
372 	}
373 	iavf_flush(hw);
374 }
375 
376 /**
377  * iavf_irq_enable_queues - Enable interrupt for all queues
378  * @adapter: board private structure
379  **/
iavf_irq_enable_queues(struct iavf_adapter * adapter)380 static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
381 {
382 	struct iavf_hw *hw = &adapter->hw;
383 	int i;
384 
385 	for (i = 1; i < adapter->num_msix_vectors; i++) {
386 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
387 		     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
388 		     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
389 	}
390 }
391 
392 /**
393  * iavf_irq_enable - Enable default interrupt generation settings
394  * @adapter: board private structure
395  * @flush: boolean value whether to run rd32()
396  **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)397 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
398 {
399 	struct iavf_hw *hw = &adapter->hw;
400 
401 	iavf_misc_irq_enable(adapter);
402 	iavf_irq_enable_queues(adapter);
403 
404 	if (flush)
405 		iavf_flush(hw);
406 }
407 
408 /**
409  * iavf_msix_aq - Interrupt handler for vector 0
410  * @irq: interrupt number
411  * @data: pointer to netdev
412  **/
iavf_msix_aq(int irq,void * data)413 static irqreturn_t iavf_msix_aq(int irq, void *data)
414 {
415 	struct net_device *netdev = data;
416 	struct iavf_adapter *adapter = netdev_priv(netdev);
417 	struct iavf_hw *hw = &adapter->hw;
418 
419 	/* handle non-queue interrupts, these reads clear the registers */
420 	rd32(hw, IAVF_VFINT_ICR01);
421 	rd32(hw, IAVF_VFINT_ICR0_ENA1);
422 
423 	if (adapter->state != __IAVF_REMOVE)
424 		/* schedule work on the private workqueue */
425 		queue_work(adapter->wq, &adapter->adminq_task);
426 
427 	return IRQ_HANDLED;
428 }
429 
430 /**
431  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
432  * @irq: interrupt number
433  * @data: pointer to a q_vector
434  **/
iavf_msix_clean_rings(int irq,void * data)435 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
436 {
437 	struct iavf_q_vector *q_vector = data;
438 
439 	if (!q_vector->tx.ring && !q_vector->rx.ring)
440 		return IRQ_HANDLED;
441 
442 	napi_schedule_irqoff(&q_vector->napi);
443 
444 	return IRQ_HANDLED;
445 }
446 
447 /**
448  * iavf_map_vector_to_rxq - associate irqs with rx queues
449  * @adapter: board private structure
450  * @v_idx: interrupt number
451  * @r_idx: queue number
452  **/
453 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)454 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
455 {
456 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
457 	struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
458 	struct iavf_hw *hw = &adapter->hw;
459 
460 	rx_ring->q_vector = q_vector;
461 	rx_ring->next = q_vector->rx.ring;
462 	rx_ring->vsi = &adapter->vsi;
463 	q_vector->rx.ring = rx_ring;
464 	q_vector->rx.count++;
465 	q_vector->rx.next_update = jiffies + 1;
466 	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
467 	q_vector->ring_mask |= BIT(r_idx);
468 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
469 	     q_vector->rx.current_itr >> 1);
470 	q_vector->rx.current_itr = q_vector->rx.target_itr;
471 }
472 
473 /**
474  * iavf_map_vector_to_txq - associate irqs with tx queues
475  * @adapter: board private structure
476  * @v_idx: interrupt number
477  * @t_idx: queue number
478  **/
479 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)480 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
481 {
482 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
483 	struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
484 	struct iavf_hw *hw = &adapter->hw;
485 
486 	tx_ring->q_vector = q_vector;
487 	tx_ring->next = q_vector->tx.ring;
488 	tx_ring->vsi = &adapter->vsi;
489 	q_vector->tx.ring = tx_ring;
490 	q_vector->tx.count++;
491 	q_vector->tx.next_update = jiffies + 1;
492 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
493 	q_vector->num_ringpairs++;
494 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
495 	     q_vector->tx.target_itr >> 1);
496 	q_vector->tx.current_itr = q_vector->tx.target_itr;
497 }
498 
499 /**
500  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
501  * @adapter: board private structure to initialize
502  *
503  * This function maps descriptor rings to the queue-specific vectors
504  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
505  * one vector per ring/queue, but on a constrained vector budget, we
506  * group the rings as "efficiently" as possible.  You would add new
507  * mapping configurations in here.
508  **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)509 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
510 {
511 	int rings_remaining = adapter->num_active_queues;
512 	int ridx = 0, vidx = 0;
513 	int q_vectors;
514 
515 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
516 
517 	for (; ridx < rings_remaining; ridx++) {
518 		iavf_map_vector_to_rxq(adapter, vidx, ridx);
519 		iavf_map_vector_to_txq(adapter, vidx, ridx);
520 
521 		/* In the case where we have more queues than vectors, continue
522 		 * round-robin on vectors until all queues are mapped.
523 		 */
524 		if (++vidx >= q_vectors)
525 			vidx = 0;
526 	}
527 
528 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
529 }
530 
531 /**
532  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
533  * @adapter: board private structure
534  * @basename: device basename
535  *
536  * Allocates MSI-X vectors for tx and rx handling, and requests
537  * interrupts from the kernel.
538  **/
539 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)540 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
541 {
542 	unsigned int vector, q_vectors;
543 	unsigned int rx_int_idx = 0, tx_int_idx = 0;
544 	int irq_num, err;
545 
546 	iavf_irq_disable(adapter);
547 	/* Decrement for Other and TCP Timer vectors */
548 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
549 
550 	for (vector = 0; vector < q_vectors; vector++) {
551 		struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
552 
553 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
554 
555 		if (q_vector->tx.ring && q_vector->rx.ring) {
556 			snprintf(q_vector->name, sizeof(q_vector->name),
557 				 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
558 			tx_int_idx++;
559 		} else if (q_vector->rx.ring) {
560 			snprintf(q_vector->name, sizeof(q_vector->name),
561 				 "iavf-%s-rx-%u", basename, rx_int_idx++);
562 		} else if (q_vector->tx.ring) {
563 			snprintf(q_vector->name, sizeof(q_vector->name),
564 				 "iavf-%s-tx-%u", basename, tx_int_idx++);
565 		} else {
566 			/* skip this unused q_vector */
567 			continue;
568 		}
569 		err = request_irq(irq_num,
570 				  iavf_msix_clean_rings,
571 				  0,
572 				  q_vector->name,
573 				  q_vector);
574 		if (err) {
575 			dev_info(&adapter->pdev->dev,
576 				 "Request_irq failed, error: %d\n", err);
577 			goto free_queue_irqs;
578 		}
579 	}
580 
581 	return 0;
582 
583 free_queue_irqs:
584 	while (vector) {
585 		vector--;
586 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
587 		free_irq(irq_num, &adapter->q_vectors[vector]);
588 	}
589 	return err;
590 }
591 
592 /**
593  * iavf_request_misc_irq - Initialize MSI-X interrupts
594  * @adapter: board private structure
595  *
596  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
597  * vector is only for the admin queue, and stays active even when the netdev
598  * is closed.
599  **/
iavf_request_misc_irq(struct iavf_adapter * adapter)600 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
601 {
602 	struct net_device *netdev = adapter->netdev;
603 	int err;
604 
605 	snprintf(adapter->misc_vector_name,
606 		 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
607 		 dev_name(&adapter->pdev->dev));
608 	err = request_irq(adapter->msix_entries[0].vector,
609 			  &iavf_msix_aq, 0,
610 			  adapter->misc_vector_name, netdev);
611 	if (err) {
612 		dev_err(&adapter->pdev->dev,
613 			"request_irq for %s failed: %d\n",
614 			adapter->misc_vector_name, err);
615 		free_irq(adapter->msix_entries[0].vector, netdev);
616 	}
617 	return err;
618 }
619 
620 /**
621  * iavf_free_traffic_irqs - Free MSI-X interrupts
622  * @adapter: board private structure
623  *
624  * Frees all MSI-X vectors other than 0.
625  **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)626 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
627 {
628 	struct iavf_q_vector *q_vector;
629 	int vector, irq_num, q_vectors;
630 
631 	if (!adapter->msix_entries)
632 		return;
633 
634 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
635 
636 	for (vector = 0; vector < q_vectors; vector++) {
637 		q_vector = &adapter->q_vectors[vector];
638 		netif_napi_set_irq_locked(&q_vector->napi, -1);
639 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
640 		free_irq(irq_num, q_vector);
641 	}
642 }
643 
644 /**
645  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
646  * @adapter: board private structure
647  *
648  * Frees MSI-X vector 0.
649  **/
iavf_free_misc_irq(struct iavf_adapter * adapter)650 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
651 {
652 	struct net_device *netdev = adapter->netdev;
653 
654 	if (!adapter->msix_entries)
655 		return;
656 
657 	free_irq(adapter->msix_entries[0].vector, netdev);
658 }
659 
660 /**
661  * iavf_configure_tx - Configure Transmit Unit after Reset
662  * @adapter: board private structure
663  *
664  * Configure the Tx unit of the MAC after a reset.
665  **/
iavf_configure_tx(struct iavf_adapter * adapter)666 static void iavf_configure_tx(struct iavf_adapter *adapter)
667 {
668 	struct iavf_hw *hw = &adapter->hw;
669 	int i;
670 
671 	for (i = 0; i < adapter->num_active_queues; i++)
672 		adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
673 }
674 
675 /**
676  * iavf_select_rx_desc_format - Select Rx descriptor format
677  * @adapter: adapter private structure
678  *
679  * Select what Rx descriptor format based on availability and enabled
680  * features.
681  *
682  * Return: the desired RXDID to select for a given Rx queue, as defined by
683  *         enum virtchnl_rxdid_format.
684  */
iavf_select_rx_desc_format(const struct iavf_adapter * adapter)685 static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
686 {
687 	u64 rxdids = adapter->supp_rxdids;
688 
689 	/* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
690 	 * stick with the default value of the legacy 32 byte format.
691 	 */
692 	if (!IAVF_RXDID_ALLOWED(adapter))
693 		return VIRTCHNL_RXDID_1_32B_BASE;
694 
695 	/* Rx timestamping requires the use of flexible NIC descriptors */
696 	if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
697 		if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
698 			return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
699 
700 		pci_warn(adapter->pdev,
701 			 "Unable to negotiate flexible descriptor format\n");
702 	}
703 
704 	/* Warn if the PF does not list support for the default legacy
705 	 * descriptor format. This shouldn't happen, as this is the format
706 	 * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
707 	 * likely caused by a bug in the PF implementation failing to indicate
708 	 * support for the format.
709 	 */
710 	if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
711 		netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
712 
713 	return VIRTCHNL_RXDID_1_32B_BASE;
714 }
715 
716 /**
717  * iavf_configure_rx - Configure Receive Unit after Reset
718  * @adapter: board private structure
719  *
720  * Configure the Rx unit of the MAC after a reset.
721  **/
iavf_configure_rx(struct iavf_adapter * adapter)722 static void iavf_configure_rx(struct iavf_adapter *adapter)
723 {
724 	struct iavf_hw *hw = &adapter->hw;
725 
726 	adapter->rxdid = iavf_select_rx_desc_format(adapter);
727 
728 	for (u32 i = 0; i < adapter->num_active_queues; i++) {
729 		adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
730 		adapter->rx_rings[i].rxdid = adapter->rxdid;
731 	}
732 }
733 
734 /**
735  * iavf_find_vlan - Search filter list for specific vlan filter
736  * @adapter: board private structure
737  * @vlan: vlan tag
738  *
739  * Returns ptr to the filter object or NULL. Must be called while holding the
740  * mac_vlan_list_lock.
741  **/
742 static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)743 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
744 				 struct iavf_vlan vlan)
745 {
746 	struct iavf_vlan_filter *f;
747 
748 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
749 		if (f->vlan.vid == vlan.vid &&
750 		    f->vlan.tpid == vlan.tpid)
751 			return f;
752 	}
753 
754 	return NULL;
755 }
756 
757 /**
758  * iavf_add_vlan - Add a vlan filter to the list
759  * @adapter: board private structure
760  * @vlan: VLAN tag
761  *
762  * Returns ptr to the filter object or NULL when no memory available.
763  **/
764 static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)765 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
766 				struct iavf_vlan vlan)
767 {
768 	struct iavf_vlan_filter *f = NULL;
769 
770 	spin_lock_bh(&adapter->mac_vlan_list_lock);
771 
772 	f = iavf_find_vlan(adapter, vlan);
773 	if (!f) {
774 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
775 		if (!f)
776 			goto clearout;
777 
778 		f->vlan = vlan;
779 
780 		list_add_tail(&f->list, &adapter->vlan_filter_list);
781 		f->state = IAVF_VLAN_ADD;
782 		adapter->num_vlan_filters++;
783 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
784 	} else if (f->state == IAVF_VLAN_REMOVE) {
785 		/* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
786 		 * We can safely only change the state here.
787 		 */
788 		f->state = IAVF_VLAN_ACTIVE;
789 	}
790 
791 clearout:
792 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
793 	return f;
794 }
795 
796 /**
797  * iavf_del_vlan - Remove a vlan filter from the list
798  * @adapter: board private structure
799  * @vlan: VLAN tag
800  **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)801 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
802 {
803 	struct iavf_vlan_filter *f;
804 
805 	spin_lock_bh(&adapter->mac_vlan_list_lock);
806 
807 	f = iavf_find_vlan(adapter, vlan);
808 	if (f) {
809 		/* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
810 		 * Remove it from the list.
811 		 */
812 		if (f->state == IAVF_VLAN_ADD) {
813 			list_del(&f->list);
814 			kfree(f);
815 			adapter->num_vlan_filters--;
816 		} else {
817 			f->state = IAVF_VLAN_REMOVE;
818 			iavf_schedule_aq_request(adapter,
819 						 IAVF_FLAG_AQ_DEL_VLAN_FILTER);
820 		}
821 	}
822 
823 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
824 }
825 
826 /**
827  * iavf_restore_filters
828  * @adapter: board private structure
829  *
830  * Restore existing non MAC filters when VF netdev comes back up
831  **/
iavf_restore_filters(struct iavf_adapter * adapter)832 static void iavf_restore_filters(struct iavf_adapter *adapter)
833 {
834 	struct iavf_vlan_filter *f;
835 
836 	/* re-add all VLAN filters */
837 	spin_lock_bh(&adapter->mac_vlan_list_lock);
838 
839 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
840 		if (f->state == IAVF_VLAN_INACTIVE)
841 			f->state = IAVF_VLAN_ADD;
842 	}
843 
844 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
845 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
846 }
847 
848 /**
849  * iavf_get_num_vlans_added - get number of VLANs added
850  * @adapter: board private structure
851  */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)852 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
853 {
854 	return adapter->num_vlan_filters;
855 }
856 
857 /**
858  * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
859  * @adapter: board private structure
860  *
861  * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
862  * do not impose a limit as that maintains current behavior and for
863  * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
864  **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)865 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
866 {
867 	/* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
868 	 * never been a limit on the VF driver side
869 	 */
870 	if (VLAN_ALLOWED(adapter))
871 		return VLAN_N_VID;
872 	else if (VLAN_V2_ALLOWED(adapter))
873 		return adapter->vlan_v2_caps.filtering.max_filters;
874 
875 	return 0;
876 }
877 
878 /**
879  * iavf_max_vlans_added - check if maximum VLANs allowed already exist
880  * @adapter: board private structure
881  **/
iavf_max_vlans_added(struct iavf_adapter * adapter)882 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
883 {
884 	if (iavf_get_num_vlans_added(adapter) <
885 	    iavf_get_max_vlans_allowed(adapter))
886 		return false;
887 
888 	return true;
889 }
890 
891 /**
892  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
893  * @netdev: network device struct
894  * @proto: unused protocol data
895  * @vid: VLAN tag
896  **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)897 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
898 				__always_unused __be16 proto, u16 vid)
899 {
900 	struct iavf_adapter *adapter = netdev_priv(netdev);
901 
902 	/* Do not track VLAN 0 filter, always added by the PF on VF init */
903 	if (!vid)
904 		return 0;
905 
906 	if (!VLAN_FILTERING_ALLOWED(adapter))
907 		return -EIO;
908 
909 	if (iavf_max_vlans_added(adapter)) {
910 		netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
911 			   iavf_get_max_vlans_allowed(adapter));
912 		return -EIO;
913 	}
914 
915 	if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
916 		return -ENOMEM;
917 
918 	return 0;
919 }
920 
921 /**
922  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
923  * @netdev: network device struct
924  * @proto: unused protocol data
925  * @vid: VLAN tag
926  **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)927 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
928 				 __always_unused __be16 proto, u16 vid)
929 {
930 	struct iavf_adapter *adapter = netdev_priv(netdev);
931 
932 	/* We do not track VLAN 0 filter */
933 	if (!vid)
934 		return 0;
935 
936 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
937 	return 0;
938 }
939 
940 /**
941  * iavf_find_filter - Search filter list for specific mac filter
942  * @adapter: board private structure
943  * @macaddr: the MAC address
944  *
945  * Returns ptr to the filter object or NULL. Must be called while holding the
946  * mac_vlan_list_lock.
947  **/
948 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)949 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
950 				  const u8 *macaddr)
951 {
952 	struct iavf_mac_filter *f;
953 
954 	if (!macaddr)
955 		return NULL;
956 
957 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
958 		if (ether_addr_equal(macaddr, f->macaddr))
959 			return f;
960 	}
961 	return NULL;
962 }
963 
964 /**
965  * iavf_add_filter - Add a mac filter to the filter list
966  * @adapter: board private structure
967  * @macaddr: the MAC address
968  *
969  * Returns ptr to the filter object or NULL when no memory available.
970  **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)971 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
972 					const u8 *macaddr)
973 {
974 	struct iavf_mac_filter *f;
975 
976 	if (!macaddr)
977 		return NULL;
978 
979 	f = iavf_find_filter(adapter, macaddr);
980 	if (!f) {
981 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
982 		if (!f)
983 			return f;
984 
985 		ether_addr_copy(f->macaddr, macaddr);
986 
987 		list_add_tail(&f->list, &adapter->mac_filter_list);
988 		f->add = true;
989 		f->add_handled = false;
990 		f->is_new_mac = true;
991 		f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
992 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
993 	} else {
994 		f->remove = false;
995 	}
996 
997 	return f;
998 }
999 
1000 /**
1001  * iavf_replace_primary_mac - Replace current primary address
1002  * @adapter: board private structure
1003  * @new_mac: new MAC address to be applied
1004  *
1005  * Replace current dev_addr and send request to PF for removal of previous
1006  * primary MAC address filter and addition of new primary MAC filter.
1007  * Return 0 for success, -ENOMEM for failure.
1008  *
1009  * Do not call this with mac_vlan_list_lock!
1010  **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)1011 static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1012 				    const u8 *new_mac)
1013 {
1014 	struct iavf_hw *hw = &adapter->hw;
1015 	struct iavf_mac_filter *new_f;
1016 	struct iavf_mac_filter *old_f;
1017 
1018 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1019 
1020 	new_f = iavf_add_filter(adapter, new_mac);
1021 	if (!new_f) {
1022 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
1023 		return -ENOMEM;
1024 	}
1025 
1026 	old_f = iavf_find_filter(adapter, hw->mac.addr);
1027 	if (old_f) {
1028 		old_f->is_primary = false;
1029 		old_f->remove = true;
1030 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1031 	}
1032 	/* Always send the request to add if changing primary MAC,
1033 	 * even if filter is already present on the list
1034 	 */
1035 	new_f->is_primary = true;
1036 	new_f->add = true;
1037 	ether_addr_copy(hw->mac.addr, new_mac);
1038 
1039 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1040 
1041 	/* schedule the watchdog task to immediately process the request */
1042 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1043 	return 0;
1044 }
1045 
1046 /**
1047  * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1048  * @netdev: network interface device structure
1049  * @macaddr: MAC address to set
1050  *
1051  * Returns true on success, false on failure
1052  */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)1053 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1054 				    const u8 *macaddr)
1055 {
1056 	struct iavf_adapter *adapter = netdev_priv(netdev);
1057 	struct iavf_mac_filter *f;
1058 	bool ret = false;
1059 
1060 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1061 
1062 	f = iavf_find_filter(adapter, macaddr);
1063 
1064 	if (!f || (!f->add && f->add_handled))
1065 		ret = true;
1066 
1067 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1068 
1069 	return ret;
1070 }
1071 
1072 /**
1073  * iavf_set_mac - NDO callback to set port MAC address
1074  * @netdev: network interface device structure
1075  * @p: pointer to an address structure
1076  *
1077  * Returns 0 on success, negative on failure
1078  */
iavf_set_mac(struct net_device * netdev,void * p)1079 static int iavf_set_mac(struct net_device *netdev, void *p)
1080 {
1081 	struct iavf_adapter *adapter = netdev_priv(netdev);
1082 	struct sockaddr *addr = p;
1083 	int ret;
1084 
1085 	if (!is_valid_ether_addr(addr->sa_data))
1086 		return -EADDRNOTAVAIL;
1087 
1088 	ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1089 
1090 	if (ret)
1091 		return ret;
1092 
1093 	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1094 					       iavf_is_mac_set_handled(netdev, addr->sa_data),
1095 					       msecs_to_jiffies(2500));
1096 
1097 	/* If ret < 0 then it means wait was interrupted.
1098 	 * If ret == 0 then it means we got a timeout.
1099 	 * else it means we got response for set MAC from PF,
1100 	 * check if netdev MAC was updated to requested MAC,
1101 	 * if yes then set MAC succeeded otherwise it failed return -EACCES
1102 	 */
1103 	if (ret < 0)
1104 		return ret;
1105 
1106 	if (!ret)
1107 		return -EAGAIN;
1108 
1109 	if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1110 		return -EACCES;
1111 
1112 	return 0;
1113 }
1114 
1115 /**
1116  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1117  * @netdev: the netdevice
1118  * @addr: address to add
1119  *
1120  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1121  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1122  */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)1123 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1124 {
1125 	struct iavf_adapter *adapter = netdev_priv(netdev);
1126 
1127 	if (iavf_add_filter(adapter, addr))
1128 		return 0;
1129 	else
1130 		return -ENOMEM;
1131 }
1132 
1133 /**
1134  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1135  * @netdev: the netdevice
1136  * @addr: address to add
1137  *
1138  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1139  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1140  */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)1141 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1142 {
1143 	struct iavf_adapter *adapter = netdev_priv(netdev);
1144 	struct iavf_mac_filter *f;
1145 
1146 	/* Under some circumstances, we might receive a request to delete
1147 	 * our own device address from our uc list. Because we store the
1148 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1149 	 * such requests and not delete our device address from this list.
1150 	 */
1151 	if (ether_addr_equal(addr, netdev->dev_addr))
1152 		return 0;
1153 
1154 	f = iavf_find_filter(adapter, addr);
1155 	if (f) {
1156 		f->remove = true;
1157 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1158 	}
1159 	return 0;
1160 }
1161 
1162 /**
1163  * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
1164  * @adapter: device specific adapter
1165  */
iavf_promiscuous_mode_changed(struct iavf_adapter * adapter)1166 bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
1167 {
1168 	return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
1169 		(IFF_PROMISC | IFF_ALLMULTI);
1170 }
1171 
1172 /**
1173  * iavf_set_rx_mode - NDO callback to set the netdev filters
1174  * @netdev: network interface device structure
1175  **/
iavf_set_rx_mode(struct net_device * netdev)1176 static void iavf_set_rx_mode(struct net_device *netdev)
1177 {
1178 	struct iavf_adapter *adapter = netdev_priv(netdev);
1179 
1180 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1181 	__dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1182 	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1183 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1184 
1185 	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
1186 	if (iavf_promiscuous_mode_changed(adapter))
1187 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1188 	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1189 }
1190 
1191 /**
1192  * iavf_napi_enable_all - enable NAPI on all queue vectors
1193  * @adapter: board private structure
1194  **/
iavf_napi_enable_all(struct iavf_adapter * adapter)1195 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1196 {
1197 	int q_idx;
1198 	struct iavf_q_vector *q_vector;
1199 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1200 
1201 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1202 		struct napi_struct *napi;
1203 
1204 		q_vector = &adapter->q_vectors[q_idx];
1205 		napi = &q_vector->napi;
1206 		napi_enable_locked(napi);
1207 	}
1208 }
1209 
1210 /**
1211  * iavf_napi_disable_all - disable NAPI on all queue vectors
1212  * @adapter: board private structure
1213  **/
iavf_napi_disable_all(struct iavf_adapter * adapter)1214 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1215 {
1216 	int q_idx;
1217 	struct iavf_q_vector *q_vector;
1218 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1219 
1220 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1221 		q_vector = &adapter->q_vectors[q_idx];
1222 		napi_disable_locked(&q_vector->napi);
1223 	}
1224 }
1225 
1226 /**
1227  * iavf_configure - set up transmit and receive data structures
1228  * @adapter: board private structure
1229  **/
iavf_configure(struct iavf_adapter * adapter)1230 static void iavf_configure(struct iavf_adapter *adapter)
1231 {
1232 	struct net_device *netdev = adapter->netdev;
1233 	int i;
1234 
1235 	iavf_set_rx_mode(netdev);
1236 
1237 	iavf_configure_tx(adapter);
1238 	iavf_configure_rx(adapter);
1239 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1240 
1241 	for (i = 0; i < adapter->num_active_queues; i++) {
1242 		struct iavf_ring *ring = &adapter->rx_rings[i];
1243 
1244 		iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1245 	}
1246 }
1247 
1248 /**
1249  * iavf_up_complete - Finish the last steps of bringing up a connection
1250  * @adapter: board private structure
1251  */
iavf_up_complete(struct iavf_adapter * adapter)1252 static void iavf_up_complete(struct iavf_adapter *adapter)
1253 {
1254 	netdev_assert_locked(adapter->netdev);
1255 
1256 	iavf_change_state(adapter, __IAVF_RUNNING);
1257 	clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1258 
1259 	iavf_napi_enable_all(adapter);
1260 
1261 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
1262 }
1263 
1264 /**
1265  * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1266  * yet and mark other to be removed.
1267  * @adapter: board private structure
1268  **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)1269 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1270 {
1271 	struct iavf_vlan_filter *vlf, *vlftmp;
1272 	struct iavf_mac_filter *f, *ftmp;
1273 
1274 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1275 	/* clear the sync flag on all filters */
1276 	__dev_uc_unsync(adapter->netdev, NULL);
1277 	__dev_mc_unsync(adapter->netdev, NULL);
1278 
1279 	/* remove all MAC filters */
1280 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1281 				 list) {
1282 		if (f->add) {
1283 			list_del(&f->list);
1284 			kfree(f);
1285 		} else {
1286 			f->remove = true;
1287 		}
1288 	}
1289 
1290 	/* disable all VLAN filters */
1291 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1292 				 list)
1293 		vlf->state = IAVF_VLAN_DISABLE;
1294 
1295 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1296 }
1297 
1298 /**
1299  * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1300  * mark other to be removed.
1301  * @adapter: board private structure
1302  **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)1303 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1304 {
1305 	struct iavf_cloud_filter *cf, *cftmp;
1306 
1307 	/* remove all cloud filters */
1308 	spin_lock_bh(&adapter->cloud_filter_list_lock);
1309 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1310 				 list) {
1311 		if (cf->add) {
1312 			list_del(&cf->list);
1313 			kfree(cf);
1314 			adapter->num_cloud_filters--;
1315 		} else {
1316 			cf->del = true;
1317 		}
1318 	}
1319 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
1320 }
1321 
1322 /**
1323  * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1324  * other to be removed.
1325  * @adapter: board private structure
1326  **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)1327 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1328 {
1329 	struct iavf_fdir_fltr *fdir;
1330 
1331 	/* remove all Flow Director filters */
1332 	spin_lock_bh(&adapter->fdir_fltr_lock);
1333 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1334 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1335 			/* Cancel a request, keep filter as inactive */
1336 			fdir->state = IAVF_FDIR_FLTR_INACTIVE;
1337 		} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
1338 			 fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
1339 			/* Disable filters which are active or have a pending
1340 			 * request to PF to be added
1341 			 */
1342 			fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
1343 		}
1344 	}
1345 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1346 }
1347 
1348 /**
1349  * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1350  * other to be removed.
1351  * @adapter: board private structure
1352  **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)1353 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1354 {
1355 	struct iavf_adv_rss *rss, *rsstmp;
1356 
1357 	/* remove all advance RSS configuration */
1358 	spin_lock_bh(&adapter->adv_rss_lock);
1359 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1360 				 list) {
1361 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1362 			list_del(&rss->list);
1363 			kfree(rss);
1364 		} else {
1365 			rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1366 		}
1367 	}
1368 	spin_unlock_bh(&adapter->adv_rss_lock);
1369 }
1370 
1371 /**
1372  * iavf_down - Shutdown the connection processing
1373  * @adapter: board private structure
1374  */
iavf_down(struct iavf_adapter * adapter)1375 void iavf_down(struct iavf_adapter *adapter)
1376 {
1377 	struct net_device *netdev = adapter->netdev;
1378 
1379 	netdev_assert_locked(netdev);
1380 
1381 	if (adapter->state <= __IAVF_DOWN_PENDING)
1382 		return;
1383 
1384 	netif_carrier_off(netdev);
1385 	netif_tx_disable(netdev);
1386 	adapter->link_up = false;
1387 	iavf_napi_disable_all(adapter);
1388 	iavf_irq_disable(adapter);
1389 
1390 	iavf_clear_mac_vlan_filters(adapter);
1391 	iavf_clear_cloud_filters(adapter);
1392 	iavf_clear_fdir_filters(adapter);
1393 	iavf_clear_adv_rss_conf(adapter);
1394 
1395 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1396 		return;
1397 
1398 	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
1399 		/* cancel any current operation */
1400 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1401 		/* Schedule operations to close down the HW. Don't wait
1402 		 * here for this to complete. The watchdog is still running
1403 		 * and it will take care of this.
1404 		 */
1405 		if (!list_empty(&adapter->mac_filter_list))
1406 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1407 		if (!list_empty(&adapter->vlan_filter_list))
1408 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1409 		if (!list_empty(&adapter->cloud_filter_list))
1410 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1411 		if (!list_empty(&adapter->fdir_list_head))
1412 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1413 		if (!list_empty(&adapter->adv_rss_list_head))
1414 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1415 	}
1416 
1417 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
1418 }
1419 
1420 /**
1421  * iavf_acquire_msix_vectors - Setup the MSIX capability
1422  * @adapter: board private structure
1423  * @vectors: number of vectors to request
1424  *
1425  * Work with the OS to set up the MSIX vectors needed.
1426  *
1427  * Returns 0 on success, negative on failure
1428  **/
1429 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1430 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1431 {
1432 	int err, vector_threshold;
1433 
1434 	/* We'll want at least 3 (vector_threshold):
1435 	 * 0) Other (Admin Queue and link, mostly)
1436 	 * 1) TxQ[0] Cleanup
1437 	 * 2) RxQ[0] Cleanup
1438 	 */
1439 	vector_threshold = MIN_MSIX_COUNT;
1440 
1441 	/* The more we get, the more we will assign to Tx/Rx Cleanup
1442 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1443 	 * Right now, we simply care about how many we'll get; we'll
1444 	 * set them up later while requesting irq's.
1445 	 */
1446 	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1447 				    vector_threshold, vectors);
1448 	if (err < 0) {
1449 		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1450 		kfree(adapter->msix_entries);
1451 		adapter->msix_entries = NULL;
1452 		return err;
1453 	}
1454 
1455 	/* Adjust for only the vectors we'll use, which is minimum
1456 	 * of max_msix_q_vectors + NONQ_VECS, or the number of
1457 	 * vectors we were allocated.
1458 	 */
1459 	adapter->num_msix_vectors = err;
1460 	return 0;
1461 }
1462 
1463 /**
1464  * iavf_free_queues - Free memory for all rings
1465  * @adapter: board private structure to initialize
1466  *
1467  * Free all of the memory associated with queue pairs.
1468  **/
iavf_free_queues(struct iavf_adapter * adapter)1469 static void iavf_free_queues(struct iavf_adapter *adapter)
1470 {
1471 	if (!adapter->vsi_res)
1472 		return;
1473 	adapter->num_active_queues = 0;
1474 	kfree(adapter->tx_rings);
1475 	adapter->tx_rings = NULL;
1476 	kfree(adapter->rx_rings);
1477 	adapter->rx_rings = NULL;
1478 }
1479 
1480 /**
1481  * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1482  * @adapter: board private structure
1483  *
1484  * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1485  * stripped in certain descriptor fields. Instead of checking the offload
1486  * capability bits in the hot path, cache the location the ring specific
1487  * flags.
1488  */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1489 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1490 {
1491 	int i;
1492 
1493 	for (i = 0; i < adapter->num_active_queues; i++) {
1494 		struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1495 		struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1496 
1497 		/* prevent multiple L2TAG bits being set after VFR */
1498 		tx_ring->flags &=
1499 			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1500 			  IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1501 		rx_ring->flags &=
1502 			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1503 			  IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1504 
1505 		if (VLAN_ALLOWED(adapter)) {
1506 			tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1507 			rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1508 		} else if (VLAN_V2_ALLOWED(adapter)) {
1509 			struct virtchnl_vlan_supported_caps *stripping_support;
1510 			struct virtchnl_vlan_supported_caps *insertion_support;
1511 
1512 			stripping_support =
1513 				&adapter->vlan_v2_caps.offloads.stripping_support;
1514 			insertion_support =
1515 				&adapter->vlan_v2_caps.offloads.insertion_support;
1516 
1517 			if (stripping_support->outer) {
1518 				if (stripping_support->outer &
1519 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1520 					rx_ring->flags |=
1521 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1522 				else if (stripping_support->outer &
1523 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1524 					rx_ring->flags |=
1525 						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1526 			} else if (stripping_support->inner) {
1527 				if (stripping_support->inner &
1528 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1529 					rx_ring->flags |=
1530 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1531 				else if (stripping_support->inner &
1532 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1533 					rx_ring->flags |=
1534 						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1535 			}
1536 
1537 			if (insertion_support->outer) {
1538 				if (insertion_support->outer &
1539 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1540 					tx_ring->flags |=
1541 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1542 				else if (insertion_support->outer &
1543 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1544 					tx_ring->flags |=
1545 						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1546 			} else if (insertion_support->inner) {
1547 				if (insertion_support->inner &
1548 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1549 					tx_ring->flags |=
1550 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1551 				else if (insertion_support->inner &
1552 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1553 					tx_ring->flags |=
1554 						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1555 			}
1556 		}
1557 	}
1558 }
1559 
1560 /**
1561  * iavf_alloc_queues - Allocate memory for all rings
1562  * @adapter: board private structure to initialize
1563  *
1564  * We allocate one ring per queue at run-time since we don't know the
1565  * number of queues at compile-time.  The polling_netdev array is
1566  * intended for Multiqueue, but should work fine with a single queue.
1567  **/
iavf_alloc_queues(struct iavf_adapter * adapter)1568 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1569 {
1570 	int i, num_active_queues;
1571 
1572 	/* If we're in reset reallocating queues we don't actually know yet for
1573 	 * certain the PF gave us the number of queues we asked for but we'll
1574 	 * assume it did.  Once basic reset is finished we'll confirm once we
1575 	 * start negotiating config with PF.
1576 	 */
1577 	if (adapter->num_req_queues)
1578 		num_active_queues = adapter->num_req_queues;
1579 	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1580 		 adapter->num_tc)
1581 		num_active_queues = adapter->ch_config.total_qps;
1582 	else
1583 		num_active_queues = min_t(int,
1584 					  adapter->vsi_res->num_queue_pairs,
1585 					  (int)(num_online_cpus()));
1586 
1587 
1588 	adapter->tx_rings = kcalloc(num_active_queues,
1589 				    sizeof(struct iavf_ring), GFP_KERNEL);
1590 	if (!adapter->tx_rings)
1591 		goto err_out;
1592 	adapter->rx_rings = kcalloc(num_active_queues,
1593 				    sizeof(struct iavf_ring), GFP_KERNEL);
1594 	if (!adapter->rx_rings)
1595 		goto err_out;
1596 
1597 	for (i = 0; i < num_active_queues; i++) {
1598 		struct iavf_ring *tx_ring;
1599 		struct iavf_ring *rx_ring;
1600 
1601 		tx_ring = &adapter->tx_rings[i];
1602 
1603 		tx_ring->queue_index = i;
1604 		tx_ring->netdev = adapter->netdev;
1605 		tx_ring->dev = &adapter->pdev->dev;
1606 		tx_ring->count = adapter->tx_desc_count;
1607 		tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1608 		if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1609 			tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1610 
1611 		rx_ring = &adapter->rx_rings[i];
1612 		rx_ring->queue_index = i;
1613 		rx_ring->netdev = adapter->netdev;
1614 		rx_ring->count = adapter->rx_desc_count;
1615 		rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1616 	}
1617 
1618 	adapter->num_active_queues = num_active_queues;
1619 
1620 	iavf_set_queue_vlan_tag_loc(adapter);
1621 
1622 	return 0;
1623 
1624 err_out:
1625 	iavf_free_queues(adapter);
1626 	return -ENOMEM;
1627 }
1628 
1629 /**
1630  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1631  * @adapter: board private structure to initialize
1632  *
1633  * Attempt to configure the interrupts using the best available
1634  * capabilities of the hardware and the kernel.
1635  **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1636 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1637 {
1638 	int vector, v_budget;
1639 	int pairs = 0;
1640 	int err = 0;
1641 
1642 	if (!adapter->vsi_res) {
1643 		err = -EIO;
1644 		goto out;
1645 	}
1646 	pairs = adapter->num_active_queues;
1647 
1648 	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1649 	 * us much good if we have more vectors than CPUs. However, we already
1650 	 * limit the total number of queues by the number of CPUs so we do not
1651 	 * need any further limiting here.
1652 	 */
1653 	v_budget = min_t(int, pairs + NONQ_VECS,
1654 			 (int)adapter->vf_res->max_vectors);
1655 
1656 	adapter->msix_entries = kcalloc(v_budget,
1657 					sizeof(struct msix_entry), GFP_KERNEL);
1658 	if (!adapter->msix_entries) {
1659 		err = -ENOMEM;
1660 		goto out;
1661 	}
1662 
1663 	for (vector = 0; vector < v_budget; vector++)
1664 		adapter->msix_entries[vector].entry = vector;
1665 
1666 	err = iavf_acquire_msix_vectors(adapter, v_budget);
1667 	if (!err)
1668 		iavf_schedule_finish_config(adapter);
1669 
1670 out:
1671 	return err;
1672 }
1673 
1674 /**
1675  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1676  * @adapter: board private structure
1677  *
1678  * Return 0 on success, negative on failure
1679  **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1680 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1681 {
1682 	struct iavf_aqc_get_set_rss_key_data *rss_key =
1683 		(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1684 	struct iavf_hw *hw = &adapter->hw;
1685 	enum iavf_status status;
1686 
1687 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1688 		/* bail because we already have a command pending */
1689 		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1690 			adapter->current_op);
1691 		return -EBUSY;
1692 	}
1693 
1694 	status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1695 	if (status) {
1696 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1697 			iavf_stat_str(hw, status),
1698 			libie_aq_str(hw->aq.asq_last_status));
1699 		return iavf_status_to_errno(status);
1700 
1701 	}
1702 
1703 	status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1704 				     adapter->rss_lut, adapter->rss_lut_size);
1705 	if (status) {
1706 		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1707 			iavf_stat_str(hw, status),
1708 			libie_aq_str(hw->aq.asq_last_status));
1709 		return iavf_status_to_errno(status);
1710 	}
1711 
1712 	return 0;
1713 
1714 }
1715 
1716 /**
1717  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1718  * @adapter: board private structure
1719  *
1720  * Returns 0 on success, negative on failure
1721  **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1722 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1723 {
1724 	struct iavf_hw *hw = &adapter->hw;
1725 	u32 *dw;
1726 	u16 i;
1727 
1728 	dw = (u32 *)adapter->rss_key;
1729 	for (i = 0; i <= adapter->rss_key_size / 4; i++)
1730 		wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1731 
1732 	dw = (u32 *)adapter->rss_lut;
1733 	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1734 		wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1735 
1736 	iavf_flush(hw);
1737 
1738 	return 0;
1739 }
1740 
1741 /**
1742  * iavf_config_rss - Configure RSS keys and lut
1743  * @adapter: board private structure
1744  *
1745  * Returns 0 on success, negative on failure
1746  **/
iavf_config_rss(struct iavf_adapter * adapter)1747 int iavf_config_rss(struct iavf_adapter *adapter)
1748 {
1749 
1750 	if (RSS_PF(adapter)) {
1751 		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1752 					IAVF_FLAG_AQ_SET_RSS_KEY;
1753 		return 0;
1754 	} else if (RSS_AQ(adapter)) {
1755 		return iavf_config_rss_aq(adapter);
1756 	} else {
1757 		return iavf_config_rss_reg(adapter);
1758 	}
1759 }
1760 
1761 /**
1762  * iavf_fill_rss_lut - Fill the lut with default values
1763  * @adapter: board private structure
1764  **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1765 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1766 {
1767 	u16 i;
1768 
1769 	for (i = 0; i < adapter->rss_lut_size; i++)
1770 		adapter->rss_lut[i] = i % adapter->num_active_queues;
1771 }
1772 
1773 /**
1774  * iavf_init_rss - Prepare for RSS
1775  * @adapter: board private structure
1776  *
1777  * Return 0 on success, negative on failure
1778  **/
iavf_init_rss(struct iavf_adapter * adapter)1779 static int iavf_init_rss(struct iavf_adapter *adapter)
1780 {
1781 	struct iavf_hw *hw = &adapter->hw;
1782 
1783 	if (!RSS_PF(adapter)) {
1784 		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1785 		if (adapter->vf_res->vf_cap_flags &
1786 		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1787 			adapter->rss_hashcfg =
1788 				IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
1789 		else
1790 			adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;
1791 
1792 		wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
1793 		wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
1794 	}
1795 
1796 	iavf_fill_rss_lut(adapter);
1797 	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1798 
1799 	return iavf_config_rss(adapter);
1800 }
1801 
1802 /**
1803  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1804  * @adapter: board private structure to initialize
1805  *
1806  * We allocate one q_vector per queue interrupt.  If allocation fails we
1807  * return -ENOMEM.
1808  **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1809 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1810 {
1811 	int q_idx = 0, num_q_vectors, irq_num;
1812 	struct iavf_q_vector *q_vector;
1813 
1814 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1815 	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1816 				     GFP_KERNEL);
1817 	if (!adapter->q_vectors)
1818 		return -ENOMEM;
1819 
1820 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1821 		irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
1822 		q_vector = &adapter->q_vectors[q_idx];
1823 		q_vector->adapter = adapter;
1824 		q_vector->vsi = &adapter->vsi;
1825 		q_vector->v_idx = q_idx;
1826 		q_vector->reg_idx = q_idx;
1827 		netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
1828 					     iavf_napi_poll, q_idx);
1829 		netif_napi_set_irq_locked(&q_vector->napi, irq_num);
1830 	}
1831 
1832 	return 0;
1833 }
1834 
1835 /**
1836  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1837  * @adapter: board private structure to initialize
1838  *
1839  * This function frees the memory allocated to the q_vectors.  In addition if
1840  * NAPI is enabled it will delete any references to the NAPI struct prior
1841  * to freeing the q_vector.
1842  **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1843 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1844 {
1845 	int q_idx, num_q_vectors;
1846 
1847 	if (!adapter->q_vectors)
1848 		return;
1849 
1850 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1851 
1852 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1853 		struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1854 
1855 		netif_napi_del_locked(&q_vector->napi);
1856 	}
1857 	kfree(adapter->q_vectors);
1858 	adapter->q_vectors = NULL;
1859 }
1860 
1861 /**
1862  * iavf_reset_interrupt_capability - Reset MSIX setup
1863  * @adapter: board private structure
1864  *
1865  **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1866 static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1867 {
1868 	if (!adapter->msix_entries)
1869 		return;
1870 
1871 	pci_disable_msix(adapter->pdev);
1872 	kfree(adapter->msix_entries);
1873 	adapter->msix_entries = NULL;
1874 }
1875 
1876 /**
1877  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1878  * @adapter: board private structure to initialize
1879  *
1880  **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1881 static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1882 {
1883 	int err;
1884 
1885 	err = iavf_alloc_queues(adapter);
1886 	if (err) {
1887 		dev_err(&adapter->pdev->dev,
1888 			"Unable to allocate memory for queues\n");
1889 		goto err_alloc_queues;
1890 	}
1891 
1892 	err = iavf_set_interrupt_capability(adapter);
1893 	if (err) {
1894 		dev_err(&adapter->pdev->dev,
1895 			"Unable to setup interrupt capabilities\n");
1896 		goto err_set_interrupt;
1897 	}
1898 
1899 	err = iavf_alloc_q_vectors(adapter);
1900 	if (err) {
1901 		dev_err(&adapter->pdev->dev,
1902 			"Unable to allocate memory for queue vectors\n");
1903 		goto err_alloc_q_vectors;
1904 	}
1905 
1906 	/* If we've made it so far while ADq flag being ON, then we haven't
1907 	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1908 	 * resources have been allocated in the reset path.
1909 	 * Now we can truly claim that ADq is enabled.
1910 	 */
1911 	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1912 	    adapter->num_tc)
1913 		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1914 			 adapter->num_tc);
1915 
1916 	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1917 		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1918 		 adapter->num_active_queues);
1919 
1920 	return 0;
1921 err_alloc_q_vectors:
1922 	iavf_reset_interrupt_capability(adapter);
1923 err_set_interrupt:
1924 	iavf_free_queues(adapter);
1925 err_alloc_queues:
1926 	return err;
1927 }
1928 
1929 /**
1930  * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does
1931  * @adapter: board private structure
1932  **/
iavf_free_interrupt_scheme(struct iavf_adapter * adapter)1933 static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter)
1934 {
1935 	iavf_free_q_vectors(adapter);
1936 	iavf_reset_interrupt_capability(adapter);
1937 	iavf_free_queues(adapter);
1938 }
1939 
1940 /**
1941  * iavf_free_rss - Free memory used by RSS structs
1942  * @adapter: board private structure
1943  **/
iavf_free_rss(struct iavf_adapter * adapter)1944 static void iavf_free_rss(struct iavf_adapter *adapter)
1945 {
1946 	kfree(adapter->rss_key);
1947 	adapter->rss_key = NULL;
1948 
1949 	kfree(adapter->rss_lut);
1950 	adapter->rss_lut = NULL;
1951 }
1952 
1953 /**
1954  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1955  * @adapter: board private structure
1956  * @running: true if adapter->state == __IAVF_RUNNING
1957  *
1958  * Returns 0 on success, negative on failure
1959  **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter,bool running)1960 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
1961 {
1962 	struct net_device *netdev = adapter->netdev;
1963 	int err;
1964 
1965 	if (running)
1966 		iavf_free_traffic_irqs(adapter);
1967 	iavf_free_misc_irq(adapter);
1968 	iavf_free_interrupt_scheme(adapter);
1969 
1970 	err = iavf_init_interrupt_scheme(adapter);
1971 	if (err)
1972 		goto err;
1973 
1974 	netif_tx_stop_all_queues(netdev);
1975 
1976 	err = iavf_request_misc_irq(adapter);
1977 	if (err)
1978 		goto err;
1979 
1980 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1981 
1982 	iavf_map_rings_to_vectors(adapter);
1983 err:
1984 	return err;
1985 }
1986 
1987 /**
1988  * iavf_finish_config - do all netdev work that needs RTNL
1989  * @work: our work_struct
1990  *
1991  * Do work that needs RTNL.
1992  */
iavf_finish_config(struct work_struct * work)1993 static void iavf_finish_config(struct work_struct *work)
1994 {
1995 	struct iavf_adapter *adapter;
1996 	bool netdev_released = false;
1997 	int pairs, err;
1998 
1999 	adapter = container_of(work, struct iavf_adapter, finish_config);
2000 
2001 	/* Always take RTNL first to prevent circular lock dependency;
2002 	 * the dev->lock (== netdev lock) is needed to update the queue number.
2003 	 */
2004 	rtnl_lock();
2005 	netdev_lock(adapter->netdev);
2006 
2007 	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
2008 	    adapter->netdev->reg_state == NETREG_REGISTERED &&
2009 	    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
2010 		netdev_update_features(adapter->netdev);
2011 		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2012 	}
2013 
2014 	switch (adapter->state) {
2015 	case __IAVF_DOWN:
2016 		/* Set the real number of queues when reset occurs while
2017 		 * state == __IAVF_DOWN
2018 		 */
2019 		pairs = adapter->num_active_queues;
2020 		netif_set_real_num_rx_queues(adapter->netdev, pairs);
2021 		netif_set_real_num_tx_queues(adapter->netdev, pairs);
2022 
2023 		if (adapter->netdev->reg_state != NETREG_REGISTERED) {
2024 			netdev_unlock(adapter->netdev);
2025 			netdev_released = true;
2026 			err = register_netdevice(adapter->netdev);
2027 			if (err) {
2028 				dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
2029 					err);
2030 
2031 				/* go back and try again.*/
2032 				netdev_lock(adapter->netdev);
2033 				iavf_free_rss(adapter);
2034 				iavf_free_misc_irq(adapter);
2035 				iavf_reset_interrupt_capability(adapter);
2036 				iavf_change_state(adapter,
2037 						  __IAVF_INIT_CONFIG_ADAPTER);
2038 				netdev_unlock(adapter->netdev);
2039 				goto out;
2040 			}
2041 		}
2042 		break;
2043 	case __IAVF_RUNNING:
2044 		pairs = adapter->num_active_queues;
2045 		netif_set_real_num_rx_queues(adapter->netdev, pairs);
2046 		netif_set_real_num_tx_queues(adapter->netdev, pairs);
2047 		break;
2048 
2049 	default:
2050 		break;
2051 	}
2052 
2053 out:
2054 	if (!netdev_released)
2055 		netdev_unlock(adapter->netdev);
2056 	rtnl_unlock();
2057 }
2058 
2059 /**
2060  * iavf_schedule_finish_config - Set the flags and schedule a reset event
2061  * @adapter: board private structure
2062  **/
iavf_schedule_finish_config(struct iavf_adapter * adapter)2063 void iavf_schedule_finish_config(struct iavf_adapter *adapter)
2064 {
2065 	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2066 		queue_work(adapter->wq, &adapter->finish_config);
2067 }
2068 
2069 /**
2070  * iavf_process_aq_command - process aq_required flags
2071  * and sends aq command
2072  * @adapter: pointer to iavf adapter structure
2073  *
2074  * Returns 0 on success
2075  * Returns error code if no command was sent
2076  * or error code if the command failed.
2077  **/
iavf_process_aq_command(struct iavf_adapter * adapter)2078 static int iavf_process_aq_command(struct iavf_adapter *adapter)
2079 {
2080 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
2081 		return iavf_send_vf_config_msg(adapter);
2082 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2083 		return iavf_send_vf_offload_vlan_v2_msg(adapter);
2084 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
2085 		return iavf_send_vf_supported_rxdids_msg(adapter);
2086 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
2087 		return iavf_send_vf_ptp_caps_msg(adapter);
2088 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2089 		iavf_disable_queues(adapter);
2090 		return 0;
2091 	}
2092 
2093 	if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2094 		iavf_map_queues(adapter);
2095 		return 0;
2096 	}
2097 
2098 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2099 		iavf_add_ether_addrs(adapter);
2100 		return 0;
2101 	}
2102 
2103 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2104 		iavf_add_vlans(adapter);
2105 		return 0;
2106 	}
2107 
2108 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2109 		iavf_del_ether_addrs(adapter);
2110 		return 0;
2111 	}
2112 
2113 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2114 		iavf_del_vlans(adapter);
2115 		return 0;
2116 	}
2117 
2118 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2119 		iavf_enable_vlan_stripping(adapter);
2120 		return 0;
2121 	}
2122 
2123 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2124 		iavf_disable_vlan_stripping(adapter);
2125 		return 0;
2126 	}
2127 
2128 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
2129 		iavf_cfg_queues_bw(adapter);
2130 		return 0;
2131 	}
2132 
2133 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) {
2134 		iavf_get_qos_caps(adapter);
2135 		return 0;
2136 	}
2137 
2138 	if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) {
2139 		iavf_cfg_queues_quanta_size(adapter);
2140 		return 0;
2141 	}
2142 
2143 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2144 		iavf_configure_queues(adapter);
2145 		return 0;
2146 	}
2147 
2148 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2149 		iavf_enable_queues(adapter);
2150 		return 0;
2151 	}
2152 
2153 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2154 		/* This message goes straight to the firmware, not the
2155 		 * PF, so we don't have to set current_op as we will
2156 		 * not get a response through the ARQ.
2157 		 */
2158 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2159 		return 0;
2160 	}
2161 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
2162 		iavf_get_rss_hashcfg(adapter);
2163 		return 0;
2164 	}
2165 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
2166 		iavf_set_rss_hashcfg(adapter);
2167 		return 0;
2168 	}
2169 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2170 		iavf_set_rss_key(adapter);
2171 		return 0;
2172 	}
2173 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2174 		iavf_set_rss_lut(adapter);
2175 		return 0;
2176 	}
2177 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
2178 		iavf_set_rss_hfunc(adapter);
2179 		return 0;
2180 	}
2181 
2182 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
2183 		iavf_set_promiscuous(adapter);
2184 		return 0;
2185 	}
2186 
2187 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2188 		iavf_enable_channels(adapter);
2189 		return 0;
2190 	}
2191 
2192 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2193 		iavf_disable_channels(adapter);
2194 		return 0;
2195 	}
2196 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2197 		iavf_add_cloud_filter(adapter);
2198 		return 0;
2199 	}
2200 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2201 		iavf_del_cloud_filter(adapter);
2202 		return 0;
2203 	}
2204 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2205 		iavf_add_fdir_filter(adapter);
2206 		return IAVF_SUCCESS;
2207 	}
2208 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2209 		iavf_del_fdir_filter(adapter);
2210 		return IAVF_SUCCESS;
2211 	}
2212 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2213 		iavf_add_adv_rss_cfg(adapter);
2214 		return 0;
2215 	}
2216 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2217 		iavf_del_adv_rss_cfg(adapter);
2218 		return 0;
2219 	}
2220 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2221 		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2222 		return 0;
2223 	}
2224 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2225 		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2226 		return 0;
2227 	}
2228 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2229 		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2230 		return 0;
2231 	}
2232 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2233 		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2234 		return 0;
2235 	}
2236 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2237 		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2238 		return 0;
2239 	}
2240 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2241 		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2242 		return 0;
2243 	}
2244 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2245 		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2246 		return 0;
2247 	}
2248 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2249 		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2250 		return 0;
2251 	}
2252 	if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
2253 		iavf_virtchnl_send_ptp_cmd(adapter);
2254 		return IAVF_SUCCESS;
2255 	}
2256 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2257 		iavf_request_stats(adapter);
2258 		return 0;
2259 	}
2260 
2261 	return -EAGAIN;
2262 }
2263 
2264 /**
2265  * iavf_set_vlan_offload_features - set VLAN offload configuration
2266  * @adapter: board private structure
2267  * @prev_features: previous features used for comparison
2268  * @features: updated features used for configuration
2269  *
2270  * Set the aq_required bit(s) based on the requested features passed in to
2271  * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2272  * the watchdog if any changes are requested to expedite the request via
2273  * virtchnl.
2274  **/
2275 static void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)2276 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2277 			       netdev_features_t prev_features,
2278 			       netdev_features_t features)
2279 {
2280 	bool enable_stripping = true, enable_insertion = true;
2281 	u16 vlan_ethertype = 0;
2282 	u64 aq_required = 0;
2283 
2284 	/* keep cases separate because one ethertype for offloads can be
2285 	 * disabled at the same time as another is disabled, so check for an
2286 	 * enabled ethertype first, then check for disabled. Default to
2287 	 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2288 	 * stripping.
2289 	 */
2290 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2291 		vlan_ethertype = ETH_P_8021AD;
2292 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2293 		vlan_ethertype = ETH_P_8021Q;
2294 	else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2295 		vlan_ethertype = ETH_P_8021AD;
2296 	else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2297 		vlan_ethertype = ETH_P_8021Q;
2298 	else
2299 		vlan_ethertype = ETH_P_8021Q;
2300 
2301 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2302 		enable_stripping = false;
2303 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2304 		enable_insertion = false;
2305 
2306 	if (VLAN_ALLOWED(adapter)) {
2307 		/* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2308 		 * stripping via virtchnl. VLAN insertion can be toggled on the
2309 		 * netdev, but it doesn't require a virtchnl message
2310 		 */
2311 		if (enable_stripping)
2312 			aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2313 		else
2314 			aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2315 
2316 	} else if (VLAN_V2_ALLOWED(adapter)) {
2317 		switch (vlan_ethertype) {
2318 		case ETH_P_8021Q:
2319 			if (enable_stripping)
2320 				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2321 			else
2322 				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2323 
2324 			if (enable_insertion)
2325 				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2326 			else
2327 				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2328 			break;
2329 		case ETH_P_8021AD:
2330 			if (enable_stripping)
2331 				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2332 			else
2333 				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2334 
2335 			if (enable_insertion)
2336 				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2337 			else
2338 				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2339 			break;
2340 		}
2341 	}
2342 
2343 	if (aq_required)
2344 		iavf_schedule_aq_request(adapter, aq_required);
2345 }
2346 
2347 /**
2348  * iavf_startup - first step of driver startup
2349  * @adapter: board private structure
2350  *
2351  * Function process __IAVF_STARTUP driver state.
2352  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2353  * when fails the state is changed to __IAVF_INIT_FAILED
2354  **/
iavf_startup(struct iavf_adapter * adapter)2355 static void iavf_startup(struct iavf_adapter *adapter)
2356 {
2357 	struct pci_dev *pdev = adapter->pdev;
2358 	struct iavf_hw *hw = &adapter->hw;
2359 	enum iavf_status status;
2360 	int ret;
2361 
2362 	WARN_ON(adapter->state != __IAVF_STARTUP);
2363 
2364 	/* driver loaded, probe complete */
2365 	adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2366 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2367 
2368 	ret = iavf_check_reset_complete(hw);
2369 	if (ret) {
2370 		dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2371 			 ret);
2372 		goto err;
2373 	}
2374 	hw->aq.num_arq_entries = IAVF_AQ_LEN;
2375 	hw->aq.num_asq_entries = IAVF_AQ_LEN;
2376 	hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2377 	hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2378 
2379 	status = iavf_init_adminq(hw);
2380 	if (status) {
2381 		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2382 			status);
2383 		goto err;
2384 	}
2385 	ret = iavf_send_api_ver(adapter);
2386 	if (ret) {
2387 		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2388 		iavf_shutdown_adminq(hw);
2389 		goto err;
2390 	}
2391 	iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2392 	return;
2393 err:
2394 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2395 }
2396 
2397 /**
2398  * iavf_init_version_check - second step of driver startup
2399  * @adapter: board private structure
2400  *
2401  * Function process __IAVF_INIT_VERSION_CHECK driver state.
2402  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2403  * when fails the state is changed to __IAVF_INIT_FAILED
2404  **/
iavf_init_version_check(struct iavf_adapter * adapter)2405 static void iavf_init_version_check(struct iavf_adapter *adapter)
2406 {
2407 	struct pci_dev *pdev = adapter->pdev;
2408 	struct iavf_hw *hw = &adapter->hw;
2409 	int err = -EAGAIN;
2410 
2411 	WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2412 
2413 	if (!iavf_asq_done(hw)) {
2414 		dev_err(&pdev->dev, "Admin queue command never completed\n");
2415 		iavf_shutdown_adminq(hw);
2416 		iavf_change_state(adapter, __IAVF_STARTUP);
2417 		goto err;
2418 	}
2419 
2420 	/* aq msg sent, awaiting reply */
2421 	err = iavf_verify_api_ver(adapter);
2422 	if (err) {
2423 		if (err == -EALREADY)
2424 			err = iavf_send_api_ver(adapter);
2425 		else
2426 			dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2427 				adapter->pf_version.major,
2428 				adapter->pf_version.minor,
2429 				VIRTCHNL_VERSION_MAJOR,
2430 				VIRTCHNL_VERSION_MINOR);
2431 		goto err;
2432 	}
2433 	err = iavf_send_vf_config_msg(adapter);
2434 	if (err) {
2435 		dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2436 			err);
2437 		goto err;
2438 	}
2439 	iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2440 	return;
2441 err:
2442 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2443 }
2444 
2445 /**
2446  * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2447  * @adapter: board private structure
2448  */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2449 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2450 {
2451 	int i, num_req_queues = adapter->num_req_queues;
2452 	struct iavf_vsi *vsi = &adapter->vsi;
2453 
2454 	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2455 		if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2456 			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2457 	}
2458 	if (!adapter->vsi_res) {
2459 		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2460 		return -ENODEV;
2461 	}
2462 
2463 	if (num_req_queues &&
2464 	    num_req_queues > adapter->vsi_res->num_queue_pairs) {
2465 		/* Problem.  The PF gave us fewer queues than what we had
2466 		 * negotiated in our request.  Need a reset to see if we can't
2467 		 * get back to a working state.
2468 		 */
2469 		dev_err(&adapter->pdev->dev,
2470 			"Requested %d queues, but PF only gave us %d.\n",
2471 			num_req_queues,
2472 			adapter->vsi_res->num_queue_pairs);
2473 		adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2474 		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2475 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
2476 
2477 		return -EAGAIN;
2478 	}
2479 	adapter->num_req_queues = 0;
2480 	adapter->vsi.id = adapter->vsi_res->vsi_id;
2481 
2482 	adapter->vsi.back = adapter;
2483 	adapter->vsi.base_vector = 1;
2484 	vsi->netdev = adapter->netdev;
2485 	vsi->qs_handle = adapter->vsi_res->qset_handle;
2486 	if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2487 		adapter->rss_key_size = adapter->vf_res->rss_key_size;
2488 		adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2489 	} else {
2490 		adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2491 		adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 /**
2498  * iavf_init_get_resources - third step of driver startup
2499  * @adapter: board private structure
2500  *
2501  * Function process __IAVF_INIT_GET_RESOURCES driver state and
2502  * finishes driver initialization procedure.
2503  * When success the state is changed to __IAVF_DOWN
2504  * when fails the state is changed to __IAVF_INIT_FAILED
2505  **/
iavf_init_get_resources(struct iavf_adapter * adapter)2506 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2507 {
2508 	struct pci_dev *pdev = adapter->pdev;
2509 	struct iavf_hw *hw = &adapter->hw;
2510 	int err;
2511 
2512 	WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2513 	/* aq msg sent, awaiting reply */
2514 	if (!adapter->vf_res) {
2515 		adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2516 					  GFP_KERNEL);
2517 		if (!adapter->vf_res) {
2518 			err = -ENOMEM;
2519 			goto err;
2520 		}
2521 	}
2522 	err = iavf_get_vf_config(adapter);
2523 	if (err == -EALREADY) {
2524 		err = iavf_send_vf_config_msg(adapter);
2525 		goto err;
2526 	} else if (err == -EINVAL) {
2527 		/* We only get -EINVAL if the device is in a very bad
2528 		 * state or if we've been disabled for previous bad
2529 		 * behavior. Either way, we're done now.
2530 		 */
2531 		iavf_shutdown_adminq(hw);
2532 		dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2533 		return;
2534 	}
2535 	if (err) {
2536 		dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2537 		goto err_alloc;
2538 	}
2539 
2540 	err = iavf_parse_vf_resource_msg(adapter);
2541 	if (err) {
2542 		dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2543 			err);
2544 		goto err_alloc;
2545 	}
2546 	/* Some features require additional messages to negotiate extended
2547 	 * capabilities. These are processed in sequence by the
2548 	 * __IAVF_INIT_EXTENDED_CAPS driver state.
2549 	 */
2550 	adapter->extended_caps = IAVF_EXTENDED_CAPS;
2551 
2552 	iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2553 	return;
2554 
2555 err_alloc:
2556 	kfree(adapter->vf_res);
2557 	adapter->vf_res = NULL;
2558 err:
2559 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2560 }
2561 
2562 /**
2563  * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2564  * @adapter: board private structure
2565  *
2566  * Function processes send of the extended VLAN V2 capability message to the
2567  * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2568  * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2569  */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)2570 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2571 {
2572 	int ret;
2573 
2574 	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2575 
2576 	ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2577 	if (ret && ret == -EOPNOTSUPP) {
2578 		/* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2579 		 * we did not send the capability exchange message and do not
2580 		 * expect a response.
2581 		 */
2582 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2583 	}
2584 
2585 	/* We sent the message, so move on to the next step */
2586 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2587 }
2588 
2589 /**
2590  * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2591  * @adapter: board private structure
2592  *
2593  * Function processes receipt of the extended VLAN V2 capability message from
2594  * the PF.
2595  **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)2596 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2597 {
2598 	int ret;
2599 
2600 	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2601 
2602 	memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2603 
2604 	ret = iavf_get_vf_vlan_v2_caps(adapter);
2605 	if (ret)
2606 		goto err;
2607 
2608 	/* We've processed receipt of the VLAN V2 caps message */
2609 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2610 	return;
2611 err:
2612 	/* We didn't receive a reply. Make sure we try sending again when
2613 	 * __IAVF_INIT_FAILED attempts to recover.
2614 	 */
2615 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2616 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2617 }
2618 
2619 /**
2620  * iavf_init_send_supported_rxdids - part of querying for supported RXDID
2621  * formats
2622  * @adapter: board private structure
2623  *
2624  * Function processes send of the request for supported RXDIDs to the PF.
2625  * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
2626  * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
2627  */
iavf_init_send_supported_rxdids(struct iavf_adapter * adapter)2628 static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
2629 {
2630 	int ret;
2631 
2632 	ret = iavf_send_vf_supported_rxdids_msg(adapter);
2633 	if (ret == -EOPNOTSUPP) {
2634 		/* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
2635 		 * case, we did not send the capability exchange message and
2636 		 * do not expect a response.
2637 		 */
2638 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
2639 	}
2640 
2641 	/* We sent the message, so move on to the next step */
2642 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
2643 }
2644 
2645 /**
2646  * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
2647  * formats
2648  * @adapter: board private structure
2649  *
2650  * Function processes receipt of the supported RXDIDs message from the PF.
2651  **/
iavf_init_recv_supported_rxdids(struct iavf_adapter * adapter)2652 static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
2653 {
2654 	int ret;
2655 
2656 	memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
2657 
2658 	ret = iavf_get_vf_supported_rxdids(adapter);
2659 	if (ret)
2660 		goto err;
2661 
2662 	/* We've processed the PF response to the
2663 	 * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
2664 	 */
2665 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
2666 	return;
2667 
2668 err:
2669 	/* We didn't receive a reply. Make sure we try sending again when
2670 	 * __IAVF_INIT_FAILED attempts to recover.
2671 	 */
2672 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
2673 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2674 }
2675 
2676 /**
2677  * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
2678  * @adapter: board private structure
2679  *
2680  * Function processes send of the request for 1588 PTP capabilities to the PF.
2681  * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
2682  * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
2683  */
iavf_init_send_ptp_caps(struct iavf_adapter * adapter)2684 static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
2685 {
2686 	if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
2687 		/* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
2688 		 * did not send the capability exchange message and do not
2689 		 * expect a response.
2690 		 */
2691 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
2692 	}
2693 
2694 	/* We sent the message, so move on to the next step */
2695 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
2696 }
2697 
2698 /**
2699  * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
2700  * @adapter: board private structure
2701  *
2702  * Function processes receipt of the PTP capabilities supported on this VF.
2703  **/
iavf_init_recv_ptp_caps(struct iavf_adapter * adapter)2704 static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
2705 {
2706 	memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
2707 
2708 	if (iavf_get_vf_ptp_caps(adapter))
2709 		goto err;
2710 
2711 	/* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
2712 	 * message we sent previously.
2713 	 */
2714 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
2715 	return;
2716 
2717 err:
2718 	/* We didn't receive a reply. Make sure we try sending again when
2719 	 * __IAVF_INIT_FAILED attempts to recover.
2720 	 */
2721 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
2722 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2723 }
2724 
2725 /**
2726  * iavf_init_process_extended_caps - Part of driver startup
2727  * @adapter: board private structure
2728  *
2729  * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2730  * handles negotiating capabilities for features which require an additional
2731  * message.
2732  *
2733  * Once all extended capabilities exchanges are finished, the driver will
2734  * transition into __IAVF_INIT_CONFIG_ADAPTER.
2735  */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)2736 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2737 {
2738 	WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2739 
2740 	/* Process capability exchange for VLAN V2 */
2741 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2742 		iavf_init_send_offload_vlan_v2_caps(adapter);
2743 		return;
2744 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2745 		iavf_init_recv_offload_vlan_v2_caps(adapter);
2746 		return;
2747 	}
2748 
2749 	/* Process capability exchange for RXDID formats */
2750 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
2751 		iavf_init_send_supported_rxdids(adapter);
2752 		return;
2753 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
2754 		iavf_init_recv_supported_rxdids(adapter);
2755 		return;
2756 	}
2757 
2758 	/* Process capability exchange for PTP features */
2759 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
2760 		iavf_init_send_ptp_caps(adapter);
2761 		return;
2762 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
2763 		iavf_init_recv_ptp_caps(adapter);
2764 		return;
2765 	}
2766 
2767 	/* When we reach here, no further extended capabilities exchanges are
2768 	 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2769 	 */
2770 	iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2771 }
2772 
2773 /**
2774  * iavf_init_config_adapter - last part of driver startup
2775  * @adapter: board private structure
2776  *
2777  * After all the supported capabilities are negotiated, then the
2778  * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2779  */
iavf_init_config_adapter(struct iavf_adapter * adapter)2780 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2781 {
2782 	struct net_device *netdev = adapter->netdev;
2783 	struct pci_dev *pdev = adapter->pdev;
2784 	int err;
2785 
2786 	WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2787 
2788 	if (iavf_process_config(adapter))
2789 		goto err;
2790 
2791 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2792 
2793 	adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2794 
2795 	netdev->netdev_ops = &iavf_netdev_ops;
2796 	iavf_set_ethtool_ops(netdev);
2797 	netdev->watchdog_timeo = 5 * HZ;
2798 
2799 	netdev->min_mtu = ETH_MIN_MTU;
2800 	netdev->max_mtu = LIBIE_MAX_MTU;
2801 
2802 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2803 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2804 			 adapter->hw.mac.addr);
2805 		eth_hw_addr_random(netdev);
2806 		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2807 	} else {
2808 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2809 		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2810 	}
2811 
2812 	adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2813 	adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2814 	err = iavf_init_interrupt_scheme(adapter);
2815 	if (err)
2816 		goto err_sw_init;
2817 	iavf_map_rings_to_vectors(adapter);
2818 	if (adapter->vf_res->vf_cap_flags &
2819 		VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2820 		adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2821 
2822 	err = iavf_request_misc_irq(adapter);
2823 	if (err)
2824 		goto err_sw_init;
2825 
2826 	netif_carrier_off(netdev);
2827 	adapter->link_up = false;
2828 	netif_tx_stop_all_queues(netdev);
2829 
2830 	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2831 	if (netdev->features & NETIF_F_GRO)
2832 		dev_info(&pdev->dev, "GRO is enabled\n");
2833 
2834 	iavf_change_state(adapter, __IAVF_DOWN);
2835 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2836 
2837 	iavf_misc_irq_enable(adapter);
2838 	wake_up(&adapter->down_waitqueue);
2839 
2840 	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2841 	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2842 	if (!adapter->rss_key || !adapter->rss_lut) {
2843 		err = -ENOMEM;
2844 		goto err_mem;
2845 	}
2846 	if (RSS_AQ(adapter))
2847 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2848 	else
2849 		iavf_init_rss(adapter);
2850 
2851 	if (VLAN_V2_ALLOWED(adapter))
2852 		/* request initial VLAN offload settings */
2853 		iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2854 
2855 	if (QOS_ALLOWED(adapter))
2856 		adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
2857 
2858 	/* Setup initial PTP configuration */
2859 	iavf_ptp_init(adapter);
2860 
2861 	iavf_schedule_finish_config(adapter);
2862 	return;
2863 
2864 err_mem:
2865 	iavf_free_rss(adapter);
2866 	iavf_free_misc_irq(adapter);
2867 err_sw_init:
2868 	iavf_reset_interrupt_capability(adapter);
2869 err:
2870 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2871 }
2872 
2873 static const int IAVF_NO_RESCHED = -1;
2874 
2875 /* return: msec delay for requeueing itself */
iavf_watchdog_step(struct iavf_adapter * adapter)2876 static int iavf_watchdog_step(struct iavf_adapter *adapter)
2877 {
2878 	struct iavf_hw *hw = &adapter->hw;
2879 	u32 reg_val;
2880 
2881 	netdev_assert_locked(adapter->netdev);
2882 
2883 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2884 		iavf_change_state(adapter, __IAVF_COMM_FAILED);
2885 
2886 	switch (adapter->state) {
2887 	case __IAVF_STARTUP:
2888 		iavf_startup(adapter);
2889 		return 30;
2890 	case __IAVF_INIT_VERSION_CHECK:
2891 		iavf_init_version_check(adapter);
2892 		return 30;
2893 	case __IAVF_INIT_GET_RESOURCES:
2894 		iavf_init_get_resources(adapter);
2895 		return 1;
2896 	case __IAVF_INIT_EXTENDED_CAPS:
2897 		iavf_init_process_extended_caps(adapter);
2898 		return 1;
2899 	case __IAVF_INIT_CONFIG_ADAPTER:
2900 		iavf_init_config_adapter(adapter);
2901 		return 1;
2902 	case __IAVF_INIT_FAILED:
2903 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2904 			     &adapter->crit_section)) {
2905 			/* Do not update the state and do not reschedule
2906 			 * watchdog task, iavf_remove should handle this state
2907 			 * as it can loop forever
2908 			 */
2909 			return IAVF_NO_RESCHED;
2910 		}
2911 		if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2912 			dev_err(&adapter->pdev->dev,
2913 				"Failed to communicate with PF; waiting before retry\n");
2914 			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2915 			iavf_shutdown_adminq(hw);
2916 			return 5000;
2917 		}
2918 		/* Try again from failed step*/
2919 		iavf_change_state(adapter, adapter->last_state);
2920 		return 1000;
2921 	case __IAVF_COMM_FAILED:
2922 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2923 			     &adapter->crit_section)) {
2924 			/* Set state to __IAVF_INIT_FAILED and perform remove
2925 			 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2926 			 * doesn't bring the state back to __IAVF_COMM_FAILED.
2927 			 */
2928 			iavf_change_state(adapter, __IAVF_INIT_FAILED);
2929 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2930 			return IAVF_NO_RESCHED;
2931 		}
2932 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2933 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2934 		if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2935 		    reg_val == VIRTCHNL_VFR_COMPLETED) {
2936 			/* A chance for redemption! */
2937 			dev_err(&adapter->pdev->dev,
2938 				"Hardware came out of reset. Attempting reinit.\n");
2939 			/* When init task contacts the PF and
2940 			 * gets everything set up again, it'll restart the
2941 			 * watchdog for us. Down, boy. Sit. Stay. Woof.
2942 			 */
2943 			iavf_change_state(adapter, __IAVF_STARTUP);
2944 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2945 		}
2946 		adapter->aq_required = 0;
2947 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2948 		return 10;
2949 	case __IAVF_RESETTING:
2950 		return 2000;
2951 	case __IAVF_DOWN:
2952 	case __IAVF_DOWN_PENDING:
2953 	case __IAVF_TESTING:
2954 	case __IAVF_RUNNING:
2955 		if (adapter->current_op) {
2956 			if (!iavf_asq_done(hw)) {
2957 				dev_dbg(&adapter->pdev->dev,
2958 					"Admin queue timeout\n");
2959 				iavf_send_api_ver(adapter);
2960 			}
2961 		} else {
2962 			int ret = iavf_process_aq_command(adapter);
2963 
2964 			/* An error will be returned if no commands were
2965 			 * processed; use this opportunity to update stats
2966 			 * if the error isn't -ENOTSUPP
2967 			 */
2968 			if (ret && ret != -EOPNOTSUPP &&
2969 			    adapter->state == __IAVF_RUNNING)
2970 				iavf_request_stats(adapter);
2971 		}
2972 		if (adapter->state == __IAVF_RUNNING)
2973 			iavf_detect_recover_hung(&adapter->vsi);
2974 		break;
2975 	case __IAVF_REMOVE:
2976 	default:
2977 		return IAVF_NO_RESCHED;
2978 	}
2979 
2980 	/* check for hw reset */
2981 	reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2982 	if (!reg_val) {
2983 		adapter->aq_required = 0;
2984 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2985 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2986 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2987 	}
2988 
2989 	return adapter->aq_required ? 20 : 2000;
2990 }
2991 
iavf_watchdog_task(struct work_struct * work)2992 static void iavf_watchdog_task(struct work_struct *work)
2993 {
2994 	struct iavf_adapter *adapter = container_of(work,
2995 						    struct iavf_adapter,
2996 						    watchdog_task.work);
2997 	struct net_device *netdev = adapter->netdev;
2998 	int msec_delay;
2999 
3000 	netdev_lock(netdev);
3001 	msec_delay = iavf_watchdog_step(adapter);
3002 	/* note that we schedule a different task */
3003 	if (adapter->state >= __IAVF_DOWN)
3004 		queue_work(adapter->wq, &adapter->adminq_task);
3005 
3006 	if (msec_delay != IAVF_NO_RESCHED)
3007 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
3008 				   msecs_to_jiffies(msec_delay));
3009 	netdev_unlock(netdev);
3010 }
3011 
3012 /**
3013  * iavf_disable_vf - disable VF
3014  * @adapter: board private structure
3015  *
3016  * Set communication failed flag and free all resources.
3017  */
iavf_disable_vf(struct iavf_adapter * adapter)3018 static void iavf_disable_vf(struct iavf_adapter *adapter)
3019 {
3020 	struct iavf_mac_filter *f, *ftmp;
3021 	struct iavf_vlan_filter *fv, *fvtmp;
3022 	struct iavf_cloud_filter *cf, *cftmp;
3023 
3024 	netdev_assert_locked(adapter->netdev);
3025 
3026 	adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3027 
3028 	/* We don't use netif_running() because it may be true prior to
3029 	 * ndo_open() returning, so we can't assume it means all our open
3030 	 * tasks have finished, since we're not holding the rtnl_lock here.
3031 	 */
3032 	if (adapter->state == __IAVF_RUNNING) {
3033 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3034 		netif_carrier_off(adapter->netdev);
3035 		netif_tx_disable(adapter->netdev);
3036 		adapter->link_up = false;
3037 		iavf_napi_disable_all(adapter);
3038 		iavf_irq_disable(adapter);
3039 		iavf_free_traffic_irqs(adapter);
3040 		iavf_free_all_tx_resources(adapter);
3041 		iavf_free_all_rx_resources(adapter);
3042 	}
3043 
3044 	spin_lock_bh(&adapter->mac_vlan_list_lock);
3045 
3046 	/* Delete all of the filters */
3047 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3048 		list_del(&f->list);
3049 		kfree(f);
3050 	}
3051 
3052 	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
3053 		list_del(&fv->list);
3054 		kfree(fv);
3055 	}
3056 	adapter->num_vlan_filters = 0;
3057 
3058 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
3059 
3060 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3061 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3062 		list_del(&cf->list);
3063 		kfree(cf);
3064 		adapter->num_cloud_filters--;
3065 	}
3066 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3067 
3068 	iavf_free_misc_irq(adapter);
3069 	iavf_free_interrupt_scheme(adapter);
3070 	memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
3071 	iavf_shutdown_adminq(&adapter->hw);
3072 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3073 	iavf_change_state(adapter, __IAVF_DOWN);
3074 	wake_up(&adapter->down_waitqueue);
3075 	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
3076 }
3077 
3078 /**
3079  * iavf_reconfig_qs_bw - Call-back task to handle hardware reset
3080  * @adapter: board private structure
3081  *
3082  * After a reset, the shaper parameters of queues need to be replayed again.
3083  * Since the net_shaper object inside TX rings persists across reset,
3084  * set the update flag for all queues so that the virtchnl message is triggered
3085  * for all queues.
3086  **/
iavf_reconfig_qs_bw(struct iavf_adapter * adapter)3087 static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter)
3088 {
3089 	int i, num = 0;
3090 
3091 	for (i = 0; i < adapter->num_active_queues; i++)
3092 		if (adapter->tx_rings[i].q_shaper.bw_min ||
3093 		    adapter->tx_rings[i].q_shaper.bw_max) {
3094 			adapter->tx_rings[i].q_shaper_update = true;
3095 			num++;
3096 		}
3097 
3098 	if (num)
3099 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
3100 }
3101 
3102 /**
3103  * iavf_reset_task - Call-back task to handle hardware reset
3104  * @work: pointer to work_struct
3105  *
3106  * During reset we need to shut down and reinitialize the admin queue
3107  * before we can use it to communicate with the PF again. We also clear
3108  * and reinit the rings because that context is lost as well.
3109  **/
iavf_reset_task(struct work_struct * work)3110 static void iavf_reset_task(struct work_struct *work)
3111 {
3112 	struct iavf_adapter *adapter = container_of(work,
3113 						      struct iavf_adapter,
3114 						      reset_task);
3115 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
3116 	struct net_device *netdev = adapter->netdev;
3117 	struct iavf_hw *hw = &adapter->hw;
3118 	struct iavf_mac_filter *f, *ftmp;
3119 	struct iavf_cloud_filter *cf;
3120 	enum iavf_status status;
3121 	u32 reg_val;
3122 	int i = 0, err;
3123 	bool running;
3124 
3125 	netdev_lock(netdev);
3126 
3127 	iavf_misc_irq_disable(adapter);
3128 	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
3129 		adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
3130 		/* Restart the AQ here. If we have been reset but didn't
3131 		 * detect it, or if the PF had to reinit, our AQ will be hosed.
3132 		 */
3133 		iavf_shutdown_adminq(hw);
3134 		iavf_init_adminq(hw);
3135 		iavf_request_reset(adapter);
3136 	}
3137 	adapter->flags |= IAVF_FLAG_RESET_PENDING;
3138 
3139 	/* poll until we see the reset actually happen */
3140 	for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
3141 		reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
3142 			  IAVF_VF_ARQLEN1_ARQENABLE_MASK;
3143 		if (!reg_val)
3144 			break;
3145 		usleep_range(5000, 10000);
3146 	}
3147 	if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
3148 		dev_info(&adapter->pdev->dev, "Never saw reset\n");
3149 		goto continue_reset; /* act like the reset happened */
3150 	}
3151 
3152 	/* wait until the reset is complete and the PF is responding to us */
3153 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3154 		/* sleep first to make sure a minimum wait time is met */
3155 		msleep(IAVF_RESET_WAIT_MS);
3156 
3157 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3158 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3159 		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3160 			break;
3161 	}
3162 
3163 	pci_set_master(adapter->pdev);
3164 	pci_restore_msi_state(adapter->pdev);
3165 
3166 	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3167 		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3168 			reg_val);
3169 		iavf_disable_vf(adapter);
3170 		netdev_unlock(netdev);
3171 		return; /* Do not attempt to reinit. It's dead, Jim. */
3172 	}
3173 
3174 continue_reset:
3175 	/* If we are still early in the state machine, just restart. */
3176 	if (adapter->state <= __IAVF_INIT_FAILED) {
3177 		iavf_shutdown_adminq(hw);
3178 		iavf_change_state(adapter, __IAVF_STARTUP);
3179 		iavf_startup(adapter);
3180 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
3181 				   msecs_to_jiffies(30));
3182 		netdev_unlock(netdev);
3183 		return;
3184 	}
3185 
3186 	/* We don't use netif_running() because it may be true prior to
3187 	 * ndo_open() returning, so we can't assume it means all our open
3188 	 * tasks have finished, since we're not holding the rtnl_lock here.
3189 	 */
3190 	running = adapter->state == __IAVF_RUNNING;
3191 
3192 	if (running) {
3193 		netif_carrier_off(netdev);
3194 		netif_tx_stop_all_queues(netdev);
3195 		adapter->link_up = false;
3196 		iavf_napi_disable_all(adapter);
3197 	}
3198 	iavf_irq_disable(adapter);
3199 
3200 	iavf_change_state(adapter, __IAVF_RESETTING);
3201 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3202 
3203 	/* free the Tx/Rx rings and descriptors, might be better to just
3204 	 * re-use them sometime in the future
3205 	 */
3206 	iavf_free_all_rx_resources(adapter);
3207 	iavf_free_all_tx_resources(adapter);
3208 
3209 	adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3210 	/* kill and reinit the admin queue */
3211 	iavf_shutdown_adminq(hw);
3212 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3213 	status = iavf_init_adminq(hw);
3214 	if (status) {
3215 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3216 			 status);
3217 		goto reset_err;
3218 	}
3219 	adapter->aq_required = 0;
3220 
3221 	if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3222 	    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3223 		err = iavf_reinit_interrupt_scheme(adapter, running);
3224 		if (err)
3225 			goto reset_err;
3226 	}
3227 
3228 	if (RSS_AQ(adapter)) {
3229 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3230 	} else {
3231 		err = iavf_init_rss(adapter);
3232 		if (err)
3233 			goto reset_err;
3234 	}
3235 
3236 	adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3237 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3238 
3239 	/* Certain capabilities require an extended negotiation process using
3240 	 * extra messages that must be processed after getting the VF
3241 	 * configuration. The related checks such as VLAN_V2_ALLOWED() are not
3242 	 * reliable here, since the configuration has not yet been negotiated.
3243 	 *
3244 	 * Always set these flags, since them related VIRTCHNL messages won't
3245 	 * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
3246 	 */
3247 	adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
3248 
3249 	spin_lock_bh(&adapter->mac_vlan_list_lock);
3250 
3251 	/* Delete filter for the current MAC address, it could have
3252 	 * been changed by the PF via administratively set MAC.
3253 	 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3254 	 */
3255 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3256 		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3257 			list_del(&f->list);
3258 			kfree(f);
3259 		}
3260 	}
3261 	/* re-add all MAC filters */
3262 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
3263 		f->add = true;
3264 	}
3265 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
3266 
3267 	/* check if TCs are running and re-add all cloud filters */
3268 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3269 	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3270 	    adapter->num_tc) {
3271 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3272 			cf->add = true;
3273 		}
3274 	}
3275 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3276 
3277 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3278 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3279 	iavf_misc_irq_enable(adapter);
3280 
3281 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
3282 
3283 	/* We were running when the reset started, so we need to restore some
3284 	 * state here.
3285 	 */
3286 	if (running) {
3287 		/* allocate transmit descriptors */
3288 		err = iavf_setup_all_tx_resources(adapter);
3289 		if (err)
3290 			goto reset_err;
3291 
3292 		/* allocate receive descriptors */
3293 		err = iavf_setup_all_rx_resources(adapter);
3294 		if (err)
3295 			goto reset_err;
3296 
3297 		if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3298 		    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3299 			err = iavf_request_traffic_irqs(adapter, netdev->name);
3300 			if (err)
3301 				goto reset_err;
3302 
3303 			adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3304 		}
3305 
3306 		iavf_configure(adapter);
3307 
3308 		/* iavf_up_complete() will switch device back
3309 		 * to __IAVF_RUNNING
3310 		 */
3311 		iavf_up_complete(adapter);
3312 
3313 		iavf_irq_enable(adapter, true);
3314 
3315 		iavf_reconfig_qs_bw(adapter);
3316 	} else {
3317 		iavf_change_state(adapter, __IAVF_DOWN);
3318 		wake_up(&adapter->down_waitqueue);
3319 	}
3320 
3321 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3322 
3323 	wake_up(&adapter->reset_waitqueue);
3324 	netdev_unlock(netdev);
3325 
3326 	return;
3327 reset_err:
3328 	if (running) {
3329 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3330 		iavf_free_traffic_irqs(adapter);
3331 	}
3332 	iavf_disable_vf(adapter);
3333 
3334 	netdev_unlock(netdev);
3335 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3336 }
3337 
3338 /**
3339  * iavf_adminq_task - worker thread to clean the admin queue
3340  * @work: pointer to work_struct containing our data
3341  **/
iavf_adminq_task(struct work_struct * work)3342 static void iavf_adminq_task(struct work_struct *work)
3343 {
3344 	struct iavf_adapter *adapter =
3345 		container_of(work, struct iavf_adapter, adminq_task);
3346 	struct net_device *netdev = adapter->netdev;
3347 	struct iavf_hw *hw = &adapter->hw;
3348 	struct iavf_arq_event_info event;
3349 	enum virtchnl_ops v_op;
3350 	enum iavf_status ret, v_ret;
3351 	u32 val, oldval;
3352 	u16 pending;
3353 
3354 	netdev_lock(netdev);
3355 
3356 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3357 		goto unlock;
3358 
3359 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3360 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3361 	if (!event.msg_buf)
3362 		goto unlock;
3363 
3364 	do {
3365 		ret = iavf_clean_arq_element(hw, &event, &pending);
3366 		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3367 		v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3368 
3369 		if (ret || !v_op)
3370 			break; /* No event to process or error cleaning ARQ */
3371 
3372 		iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3373 					 event.msg_len);
3374 		if (pending != 0)
3375 			memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3376 	} while (pending);
3377 
3378 	if (iavf_is_reset_in_progress(adapter))
3379 		goto freedom;
3380 
3381 	/* check for error indications */
3382 	val = rd32(hw, IAVF_VF_ARQLEN1);
3383 	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3384 		goto freedom;
3385 	oldval = val;
3386 	if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3387 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3388 		val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3389 	}
3390 	if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3391 		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3392 		val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3393 	}
3394 	if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3395 		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3396 		val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3397 	}
3398 	if (oldval != val)
3399 		wr32(hw, IAVF_VF_ARQLEN1, val);
3400 
3401 	val = rd32(hw, IAVF_VF_ATQLEN1);
3402 	oldval = val;
3403 	if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3404 		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3405 		val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3406 	}
3407 	if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3408 		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3409 		val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3410 	}
3411 	if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3412 		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3413 		val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3414 	}
3415 	if (oldval != val)
3416 		wr32(hw, IAVF_VF_ATQLEN1, val);
3417 
3418 freedom:
3419 	kfree(event.msg_buf);
3420 unlock:
3421 	netdev_unlock(netdev);
3422 	/* re-enable Admin queue interrupt cause */
3423 	iavf_misc_irq_enable(adapter);
3424 }
3425 
3426 /**
3427  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3428  * @adapter: board private structure
3429  *
3430  * Free all transmit software resources
3431  **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)3432 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3433 {
3434 	int i;
3435 
3436 	if (!adapter->tx_rings)
3437 		return;
3438 
3439 	for (i = 0; i < adapter->num_active_queues; i++)
3440 		if (adapter->tx_rings[i].desc)
3441 			iavf_free_tx_resources(&adapter->tx_rings[i]);
3442 }
3443 
3444 /**
3445  * iavf_setup_all_tx_resources - allocate all queues Tx resources
3446  * @adapter: board private structure
3447  *
3448  * If this function returns with an error, then it's possible one or
3449  * more of the rings is populated (while the rest are not).  It is the
3450  * callers duty to clean those orphaned rings.
3451  *
3452  * Return 0 on success, negative on failure
3453  **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)3454 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3455 {
3456 	int i, err = 0;
3457 
3458 	for (i = 0; i < adapter->num_active_queues; i++) {
3459 		adapter->tx_rings[i].count = adapter->tx_desc_count;
3460 		err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3461 		if (!err)
3462 			continue;
3463 		dev_err(&adapter->pdev->dev,
3464 			"Allocation for Tx Queue %u failed\n", i);
3465 		break;
3466 	}
3467 
3468 	return err;
3469 }
3470 
3471 /**
3472  * iavf_setup_all_rx_resources - allocate all queues Rx resources
3473  * @adapter: board private structure
3474  *
3475  * If this function returns with an error, then it's possible one or
3476  * more of the rings is populated (while the rest are not).  It is the
3477  * callers duty to clean those orphaned rings.
3478  *
3479  * Return 0 on success, negative on failure
3480  **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)3481 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3482 {
3483 	int i, err = 0;
3484 
3485 	for (i = 0; i < adapter->num_active_queues; i++) {
3486 		adapter->rx_rings[i].count = adapter->rx_desc_count;
3487 		err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3488 		if (!err)
3489 			continue;
3490 		dev_err(&adapter->pdev->dev,
3491 			"Allocation for Rx Queue %u failed\n", i);
3492 		break;
3493 	}
3494 	return err;
3495 }
3496 
3497 /**
3498  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3499  * @adapter: board private structure
3500  *
3501  * Free all receive software resources
3502  **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)3503 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3504 {
3505 	int i;
3506 
3507 	if (!adapter->rx_rings)
3508 		return;
3509 
3510 	for (i = 0; i < adapter->num_active_queues; i++)
3511 		if (adapter->rx_rings[i].desc)
3512 			iavf_free_rx_resources(&adapter->rx_rings[i]);
3513 }
3514 
3515 /**
3516  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3517  * @adapter: board private structure
3518  * @max_tx_rate: max Tx bw for a tc
3519  **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)3520 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3521 				      u64 max_tx_rate)
3522 {
3523 	int speed = 0, ret = 0;
3524 
3525 	if (ADV_LINK_SUPPORT(adapter)) {
3526 		if (adapter->link_speed_mbps < U32_MAX) {
3527 			speed = adapter->link_speed_mbps;
3528 			goto validate_bw;
3529 		} else {
3530 			dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3531 			return -EINVAL;
3532 		}
3533 	}
3534 
3535 	switch (adapter->link_speed) {
3536 	case VIRTCHNL_LINK_SPEED_40GB:
3537 		speed = SPEED_40000;
3538 		break;
3539 	case VIRTCHNL_LINK_SPEED_25GB:
3540 		speed = SPEED_25000;
3541 		break;
3542 	case VIRTCHNL_LINK_SPEED_20GB:
3543 		speed = SPEED_20000;
3544 		break;
3545 	case VIRTCHNL_LINK_SPEED_10GB:
3546 		speed = SPEED_10000;
3547 		break;
3548 	case VIRTCHNL_LINK_SPEED_5GB:
3549 		speed = SPEED_5000;
3550 		break;
3551 	case VIRTCHNL_LINK_SPEED_2_5GB:
3552 		speed = SPEED_2500;
3553 		break;
3554 	case VIRTCHNL_LINK_SPEED_1GB:
3555 		speed = SPEED_1000;
3556 		break;
3557 	case VIRTCHNL_LINK_SPEED_100MB:
3558 		speed = SPEED_100;
3559 		break;
3560 	default:
3561 		break;
3562 	}
3563 
3564 validate_bw:
3565 	if (max_tx_rate > speed) {
3566 		dev_err(&adapter->pdev->dev,
3567 			"Invalid tx rate specified\n");
3568 		ret = -EINVAL;
3569 	}
3570 
3571 	return ret;
3572 }
3573 
3574 /**
3575  * iavf_validate_ch_config - validate queue mapping info
3576  * @adapter: board private structure
3577  * @mqprio_qopt: queue parameters
3578  *
3579  * This function validates if the config provided by the user to
3580  * configure queue channels is valid or not. Returns 0 on a valid
3581  * config.
3582  **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)3583 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3584 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
3585 {
3586 	u64 total_max_rate = 0;
3587 	u32 tx_rate_rem = 0;
3588 	int i, num_qps = 0;
3589 	u64 tx_rate = 0;
3590 	int ret = 0;
3591 
3592 	if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3593 	    mqprio_qopt->qopt.num_tc < 1)
3594 		return -EINVAL;
3595 
3596 	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3597 		if (!mqprio_qopt->qopt.count[i] ||
3598 		    mqprio_qopt->qopt.offset[i] != num_qps)
3599 			return -EINVAL;
3600 		if (mqprio_qopt->min_rate[i]) {
3601 			dev_err(&adapter->pdev->dev,
3602 				"Invalid min tx rate (greater than 0) specified for TC%d\n",
3603 				i);
3604 			return -EINVAL;
3605 		}
3606 
3607 		/* convert to Mbps */
3608 		tx_rate = div_u64(mqprio_qopt->max_rate[i],
3609 				  IAVF_MBPS_DIVISOR);
3610 
3611 		if (mqprio_qopt->max_rate[i] &&
3612 		    tx_rate < IAVF_MBPS_QUANTA) {
3613 			dev_err(&adapter->pdev->dev,
3614 				"Invalid max tx rate for TC%d, minimum %dMbps\n",
3615 				i, IAVF_MBPS_QUANTA);
3616 			return -EINVAL;
3617 		}
3618 
3619 		(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3620 
3621 		if (tx_rate_rem != 0) {
3622 			dev_err(&adapter->pdev->dev,
3623 				"Invalid max tx rate for TC%d, not divisible by %d\n",
3624 				i, IAVF_MBPS_QUANTA);
3625 			return -EINVAL;
3626 		}
3627 
3628 		total_max_rate += tx_rate;
3629 		num_qps += mqprio_qopt->qopt.count[i];
3630 	}
3631 	if (num_qps > adapter->num_active_queues) {
3632 		dev_err(&adapter->pdev->dev,
3633 			"Cannot support requested number of queues\n");
3634 		return -EINVAL;
3635 	}
3636 
3637 	ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3638 	return ret;
3639 }
3640 
3641 /**
3642  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3643  * @adapter: board private structure
3644  **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)3645 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3646 {
3647 	struct iavf_cloud_filter *cf, *cftmp;
3648 
3649 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3650 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3651 				 list) {
3652 		list_del(&cf->list);
3653 		kfree(cf);
3654 		adapter->num_cloud_filters--;
3655 	}
3656 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3657 }
3658 
3659 /**
3660  * iavf_is_tc_config_same - Compare the mqprio TC config with the
3661  * TC config already configured on this adapter.
3662  * @adapter: board private structure
3663  * @mqprio_qopt: TC config received from kernel.
3664  *
3665  * This function compares the TC config received from the kernel
3666  * with the config already configured on the adapter.
3667  *
3668  * Return: True if configuration is same, false otherwise.
3669  **/
iavf_is_tc_config_same(struct iavf_adapter * adapter,struct tc_mqprio_qopt * mqprio_qopt)3670 static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
3671 				   struct tc_mqprio_qopt *mqprio_qopt)
3672 {
3673 	struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
3674 	int i;
3675 
3676 	if (adapter->num_tc != mqprio_qopt->num_tc)
3677 		return false;
3678 
3679 	for (i = 0; i < adapter->num_tc; i++) {
3680 		if (ch[i].count != mqprio_qopt->count[i] ||
3681 		    ch[i].offset != mqprio_qopt->offset[i])
3682 			return false;
3683 	}
3684 	return true;
3685 }
3686 
3687 /**
3688  * __iavf_setup_tc - configure multiple traffic classes
3689  * @netdev: network interface device structure
3690  * @type_data: tc offload data
3691  *
3692  * This function processes the config information provided by the
3693  * user to configure traffic classes/queue channels and packages the
3694  * information to request the PF to setup traffic classes.
3695  *
3696  * Returns 0 on success.
3697  **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)3698 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3699 {
3700 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3701 	struct iavf_adapter *adapter = netdev_priv(netdev);
3702 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
3703 	u8 num_tc = 0, total_qps = 0;
3704 	int ret = 0, netdev_tc = 0;
3705 	u64 max_tx_rate;
3706 	u16 mode;
3707 	int i;
3708 
3709 	num_tc = mqprio_qopt->qopt.num_tc;
3710 	mode = mqprio_qopt->mode;
3711 
3712 	/* delete queue_channel */
3713 	if (!mqprio_qopt->qopt.hw) {
3714 		if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3715 			/* reset the tc configuration */
3716 			netdev_reset_tc(netdev);
3717 			adapter->num_tc = 0;
3718 			netif_tx_stop_all_queues(netdev);
3719 			netif_tx_disable(netdev);
3720 			iavf_del_all_cloud_filters(adapter);
3721 			adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3722 			total_qps = adapter->orig_num_active_queues;
3723 			goto exit;
3724 		} else {
3725 			return -EINVAL;
3726 		}
3727 	}
3728 
3729 	/* add queue channel */
3730 	if (mode == TC_MQPRIO_MODE_CHANNEL) {
3731 		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3732 			dev_err(&adapter->pdev->dev, "ADq not supported\n");
3733 			return -EOPNOTSUPP;
3734 		}
3735 		if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3736 			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3737 			return -EINVAL;
3738 		}
3739 
3740 		ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3741 		if (ret)
3742 			return ret;
3743 		/* Return if same TC config is requested */
3744 		if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
3745 			return 0;
3746 		adapter->num_tc = num_tc;
3747 
3748 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3749 			if (i < num_tc) {
3750 				adapter->ch_config.ch_info[i].count =
3751 					mqprio_qopt->qopt.count[i];
3752 				adapter->ch_config.ch_info[i].offset =
3753 					mqprio_qopt->qopt.offset[i];
3754 				total_qps += mqprio_qopt->qopt.count[i];
3755 				max_tx_rate = mqprio_qopt->max_rate[i];
3756 				/* convert to Mbps */
3757 				max_tx_rate = div_u64(max_tx_rate,
3758 						      IAVF_MBPS_DIVISOR);
3759 				adapter->ch_config.ch_info[i].max_tx_rate =
3760 					max_tx_rate;
3761 			} else {
3762 				adapter->ch_config.ch_info[i].count = 1;
3763 				adapter->ch_config.ch_info[i].offset = 0;
3764 			}
3765 		}
3766 
3767 		/* Take snapshot of original config such as "num_active_queues"
3768 		 * It is used later when delete ADQ flow is exercised, so that
3769 		 * once delete ADQ flow completes, VF shall go back to its
3770 		 * original queue configuration
3771 		 */
3772 
3773 		adapter->orig_num_active_queues = adapter->num_active_queues;
3774 
3775 		/* Store queue info based on TC so that VF gets configured
3776 		 * with correct number of queues when VF completes ADQ config
3777 		 * flow
3778 		 */
3779 		adapter->ch_config.total_qps = total_qps;
3780 
3781 		netif_tx_stop_all_queues(netdev);
3782 		netif_tx_disable(netdev);
3783 		adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3784 		netdev_reset_tc(netdev);
3785 		/* Report the tc mapping up the stack */
3786 		netdev_set_num_tc(adapter->netdev, num_tc);
3787 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3788 			u16 qcount = mqprio_qopt->qopt.count[i];
3789 			u16 qoffset = mqprio_qopt->qopt.offset[i];
3790 
3791 			if (i < num_tc)
3792 				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3793 						    qoffset);
3794 		}
3795 	}
3796 exit:
3797 	if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3798 		return 0;
3799 
3800 	netif_set_real_num_rx_queues(netdev, total_qps);
3801 	netif_set_real_num_tx_queues(netdev, total_qps);
3802 
3803 	return ret;
3804 }
3805 
3806 /**
3807  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3808  * @adapter: board private structure
3809  * @f: pointer to struct flow_cls_offload
3810  * @filter: pointer to cloud filter structure
3811  */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)3812 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3813 				 struct flow_cls_offload *f,
3814 				 struct iavf_cloud_filter *filter)
3815 {
3816 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3817 	struct flow_dissector *dissector = rule->match.dissector;
3818 	u16 n_proto_mask = 0;
3819 	u16 n_proto_key = 0;
3820 	u8 field_flags = 0;
3821 	u16 addr_type = 0;
3822 	u16 n_proto = 0;
3823 	int i = 0;
3824 	struct virtchnl_filter *vf = &filter->f;
3825 
3826 	if (dissector->used_keys &
3827 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
3828 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
3829 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3830 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
3831 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3832 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3833 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
3834 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3835 		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n",
3836 			dissector->used_keys);
3837 		return -EOPNOTSUPP;
3838 	}
3839 
3840 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3841 		struct flow_match_enc_keyid match;
3842 
3843 		flow_rule_match_enc_keyid(rule, &match);
3844 		if (match.mask->keyid != 0)
3845 			field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3846 	}
3847 
3848 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3849 		struct flow_match_basic match;
3850 
3851 		flow_rule_match_basic(rule, &match);
3852 		n_proto_key = ntohs(match.key->n_proto);
3853 		n_proto_mask = ntohs(match.mask->n_proto);
3854 
3855 		if (n_proto_key == ETH_P_ALL) {
3856 			n_proto_key = 0;
3857 			n_proto_mask = 0;
3858 		}
3859 		n_proto = n_proto_key & n_proto_mask;
3860 		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3861 			return -EINVAL;
3862 		if (n_proto == ETH_P_IPV6) {
3863 			/* specify flow type as TCP IPv6 */
3864 			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3865 		}
3866 
3867 		if (match.key->ip_proto != IPPROTO_TCP) {
3868 			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3869 			return -EINVAL;
3870 		}
3871 	}
3872 
3873 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3874 		struct flow_match_eth_addrs match;
3875 
3876 		flow_rule_match_eth_addrs(rule, &match);
3877 
3878 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
3879 		if (!is_zero_ether_addr(match.mask->dst)) {
3880 			if (is_broadcast_ether_addr(match.mask->dst)) {
3881 				field_flags |= IAVF_CLOUD_FIELD_OMAC;
3882 			} else {
3883 				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3884 					match.mask->dst);
3885 				return -EINVAL;
3886 			}
3887 		}
3888 
3889 		if (!is_zero_ether_addr(match.mask->src)) {
3890 			if (is_broadcast_ether_addr(match.mask->src)) {
3891 				field_flags |= IAVF_CLOUD_FIELD_IMAC;
3892 			} else {
3893 				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3894 					match.mask->src);
3895 				return -EINVAL;
3896 			}
3897 		}
3898 
3899 		if (!is_zero_ether_addr(match.key->dst))
3900 			if (is_valid_ether_addr(match.key->dst) ||
3901 			    is_multicast_ether_addr(match.key->dst)) {
3902 				/* set the mask if a valid dst_mac address */
3903 				for (i = 0; i < ETH_ALEN; i++)
3904 					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3905 				ether_addr_copy(vf->data.tcp_spec.dst_mac,
3906 						match.key->dst);
3907 			}
3908 
3909 		if (!is_zero_ether_addr(match.key->src))
3910 			if (is_valid_ether_addr(match.key->src) ||
3911 			    is_multicast_ether_addr(match.key->src)) {
3912 				/* set the mask if a valid dst_mac address */
3913 				for (i = 0; i < ETH_ALEN; i++)
3914 					vf->mask.tcp_spec.src_mac[i] |= 0xff;
3915 				ether_addr_copy(vf->data.tcp_spec.src_mac,
3916 						match.key->src);
3917 		}
3918 	}
3919 
3920 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3921 		struct flow_match_vlan match;
3922 
3923 		flow_rule_match_vlan(rule, &match);
3924 		if (match.mask->vlan_id) {
3925 			if (match.mask->vlan_id == VLAN_VID_MASK) {
3926 				field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3927 			} else {
3928 				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3929 					match.mask->vlan_id);
3930 				return -EINVAL;
3931 			}
3932 		}
3933 		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3934 		vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3935 	}
3936 
3937 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3938 		struct flow_match_control match;
3939 
3940 		flow_rule_match_control(rule, &match);
3941 		addr_type = match.key->addr_type;
3942 
3943 		if (flow_rule_has_control_flags(match.mask->flags,
3944 						f->common.extack))
3945 			return -EOPNOTSUPP;
3946 	}
3947 
3948 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3949 		struct flow_match_ipv4_addrs match;
3950 
3951 		flow_rule_match_ipv4_addrs(rule, &match);
3952 		if (match.mask->dst) {
3953 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3954 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3955 			} else {
3956 				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3957 					be32_to_cpu(match.mask->dst));
3958 				return -EINVAL;
3959 			}
3960 		}
3961 
3962 		if (match.mask->src) {
3963 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
3964 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3965 			} else {
3966 				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3967 					be32_to_cpu(match.mask->src));
3968 				return -EINVAL;
3969 			}
3970 		}
3971 
3972 		if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3973 			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3974 			return -EINVAL;
3975 		}
3976 		if (match.key->dst) {
3977 			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3978 			vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3979 		}
3980 		if (match.key->src) {
3981 			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3982 			vf->data.tcp_spec.src_ip[0] = match.key->src;
3983 		}
3984 	}
3985 
3986 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3987 		struct flow_match_ipv6_addrs match;
3988 
3989 		flow_rule_match_ipv6_addrs(rule, &match);
3990 
3991 		/* validate mask, make sure it is not IPV6_ADDR_ANY */
3992 		if (ipv6_addr_any(&match.mask->dst)) {
3993 			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3994 				IPV6_ADDR_ANY);
3995 			return -EINVAL;
3996 		}
3997 
3998 		/* src and dest IPv6 address should not be LOOPBACK
3999 		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
4000 		 */
4001 		if (ipv6_addr_loopback(&match.key->dst) ||
4002 		    ipv6_addr_loopback(&match.key->src)) {
4003 			dev_err(&adapter->pdev->dev,
4004 				"ipv6 addr should not be loopback\n");
4005 			return -EINVAL;
4006 		}
4007 		if (!ipv6_addr_any(&match.mask->dst) ||
4008 		    !ipv6_addr_any(&match.mask->src))
4009 			field_flags |= IAVF_CLOUD_FIELD_IIP;
4010 
4011 		for (i = 0; i < 4; i++)
4012 			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
4013 		memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
4014 		       sizeof(vf->data.tcp_spec.dst_ip));
4015 		for (i = 0; i < 4; i++)
4016 			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
4017 		memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
4018 		       sizeof(vf->data.tcp_spec.src_ip));
4019 	}
4020 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
4021 		struct flow_match_ports match;
4022 
4023 		flow_rule_match_ports(rule, &match);
4024 		if (match.mask->src) {
4025 			if (match.mask->src == cpu_to_be16(0xffff)) {
4026 				field_flags |= IAVF_CLOUD_FIELD_IIP;
4027 			} else {
4028 				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
4029 					be16_to_cpu(match.mask->src));
4030 				return -EINVAL;
4031 			}
4032 		}
4033 
4034 		if (match.mask->dst) {
4035 			if (match.mask->dst == cpu_to_be16(0xffff)) {
4036 				field_flags |= IAVF_CLOUD_FIELD_IIP;
4037 			} else {
4038 				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
4039 					be16_to_cpu(match.mask->dst));
4040 				return -EINVAL;
4041 			}
4042 		}
4043 		if (match.key->dst) {
4044 			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
4045 			vf->data.tcp_spec.dst_port = match.key->dst;
4046 		}
4047 
4048 		if (match.key->src) {
4049 			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
4050 			vf->data.tcp_spec.src_port = match.key->src;
4051 		}
4052 	}
4053 	vf->field_flags = field_flags;
4054 
4055 	return 0;
4056 }
4057 
4058 /**
4059  * iavf_handle_tclass - Forward to a traffic class on the device
4060  * @adapter: board private structure
4061  * @tc: traffic class index on the device
4062  * @filter: pointer to cloud filter structure
4063  */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)4064 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
4065 			      struct iavf_cloud_filter *filter)
4066 {
4067 	if (tc == 0)
4068 		return 0;
4069 	if (tc < adapter->num_tc) {
4070 		if (!filter->f.data.tcp_spec.dst_port) {
4071 			dev_err(&adapter->pdev->dev,
4072 				"Specify destination port to redirect to traffic class other than TC0\n");
4073 			return -EINVAL;
4074 		}
4075 	}
4076 	/* redirect to a traffic class on the same device */
4077 	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
4078 	filter->f.action_meta = tc;
4079 	return 0;
4080 }
4081 
4082 /**
4083  * iavf_find_cf - Find the cloud filter in the list
4084  * @adapter: Board private structure
4085  * @cookie: filter specific cookie
4086  *
4087  * Returns ptr to the filter object or NULL. Must be called while holding the
4088  * cloud_filter_list_lock.
4089  */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)4090 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
4091 					      unsigned long *cookie)
4092 {
4093 	struct iavf_cloud_filter *filter = NULL;
4094 
4095 	if (!cookie)
4096 		return NULL;
4097 
4098 	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
4099 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
4100 			return filter;
4101 	}
4102 	return NULL;
4103 }
4104 
4105 /**
4106  * iavf_configure_clsflower - Add tc flower filters
4107  * @adapter: board private structure
4108  * @cls_flower: Pointer to struct flow_cls_offload
4109  */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4110 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
4111 				    struct flow_cls_offload *cls_flower)
4112 {
4113 	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4114 	struct iavf_cloud_filter *filter;
4115 	int err;
4116 
4117 	if (tc < 0) {
4118 		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4119 		return -EINVAL;
4120 	}
4121 
4122 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4123 	if (!filter)
4124 		return -ENOMEM;
4125 	filter->cookie = cls_flower->cookie;
4126 
4127 	netdev_lock(adapter->netdev);
4128 
4129 	/* bail out here if filter already exists */
4130 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4131 	if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4132 		dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4133 		err = -EEXIST;
4134 		goto spin_unlock;
4135 	}
4136 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4137 
4138 	/* set the mask to all zeroes to begin with */
4139 	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4140 	/* start out with flow type and eth type IPv4 to begin with */
4141 	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4142 	err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4143 	if (err)
4144 		goto err;
4145 
4146 	err = iavf_handle_tclass(adapter, tc, filter);
4147 	if (err)
4148 		goto err;
4149 
4150 	/* add filter to the list */
4151 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4152 	list_add_tail(&filter->list, &adapter->cloud_filter_list);
4153 	adapter->num_cloud_filters++;
4154 	filter->add = true;
4155 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4156 spin_unlock:
4157 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4158 err:
4159 	if (err)
4160 		kfree(filter);
4161 
4162 	netdev_unlock(adapter->netdev);
4163 	return err;
4164 }
4165 
4166 /**
4167  * iavf_delete_clsflower - Remove tc flower filters
4168  * @adapter: board private structure
4169  * @cls_flower: Pointer to struct flow_cls_offload
4170  */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4171 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4172 				 struct flow_cls_offload *cls_flower)
4173 {
4174 	struct iavf_cloud_filter *filter = NULL;
4175 	int err = 0;
4176 
4177 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4178 	filter = iavf_find_cf(adapter, &cls_flower->cookie);
4179 	if (filter) {
4180 		filter->del = true;
4181 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4182 	} else {
4183 		err = -EINVAL;
4184 	}
4185 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4186 
4187 	return err;
4188 }
4189 
4190 /**
4191  * iavf_setup_tc_cls_flower - flower classifier offloads
4192  * @adapter: pointer to iavf adapter structure
4193  * @cls_flower: pointer to flow_cls_offload struct with flow info
4194  */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4195 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4196 				    struct flow_cls_offload *cls_flower)
4197 {
4198 	switch (cls_flower->command) {
4199 	case FLOW_CLS_REPLACE:
4200 		return iavf_configure_clsflower(adapter, cls_flower);
4201 	case FLOW_CLS_DESTROY:
4202 		return iavf_delete_clsflower(adapter, cls_flower);
4203 	case FLOW_CLS_STATS:
4204 		return -EOPNOTSUPP;
4205 	default:
4206 		return -EOPNOTSUPP;
4207 	}
4208 }
4209 
4210 /**
4211  * iavf_add_cls_u32 - Add U32 classifier offloads
4212  * @adapter: pointer to iavf adapter structure
4213  * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4214  *
4215  * Return: 0 on success or negative errno on failure.
4216  */
iavf_add_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4217 static int iavf_add_cls_u32(struct iavf_adapter *adapter,
4218 			    struct tc_cls_u32_offload *cls_u32)
4219 {
4220 	struct netlink_ext_ack *extack = cls_u32->common.extack;
4221 	struct virtchnl_fdir_rule *rule_cfg;
4222 	struct virtchnl_filter_action *vact;
4223 	struct virtchnl_proto_hdrs *hdrs;
4224 	struct ethhdr *spec_h, *mask_h;
4225 	const struct tc_action *act;
4226 	struct iavf_fdir_fltr *fltr;
4227 	struct tcf_exts *exts;
4228 	unsigned int q_index;
4229 	int i, status = 0;
4230 	int off_base = 0;
4231 
4232 	if (cls_u32->knode.link_handle) {
4233 		NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
4234 		return -EOPNOTSUPP;
4235 	}
4236 
4237 	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
4238 	if (!fltr)
4239 		return -ENOMEM;
4240 
4241 	rule_cfg = &fltr->vc_add_msg.rule_cfg;
4242 	hdrs = &rule_cfg->proto_hdrs;
4243 	hdrs->count = 0;
4244 
4245 	/* The parser lib at the PF expects the packet starting with MAC hdr */
4246 	switch (ntohs(cls_u32->common.protocol)) {
4247 	case ETH_P_802_3:
4248 		break;
4249 	case ETH_P_IP:
4250 		spec_h = (struct ethhdr *)hdrs->raw.spec;
4251 		mask_h = (struct ethhdr *)hdrs->raw.mask;
4252 		spec_h->h_proto = htons(ETH_P_IP);
4253 		mask_h->h_proto = htons(0xFFFF);
4254 		off_base += ETH_HLEN;
4255 		break;
4256 	default:
4257 		NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
4258 		status = -EOPNOTSUPP;
4259 		goto free_alloc;
4260 	}
4261 
4262 	for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
4263 		__be32 val, mask;
4264 		int off;
4265 
4266 		off = off_base + cls_u32->knode.sel->keys[i].off;
4267 		val = cls_u32->knode.sel->keys[i].val;
4268 		mask = cls_u32->knode.sel->keys[i].mask;
4269 
4270 		if (off >= sizeof(hdrs->raw.spec)) {
4271 			NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
4272 			status = -EINVAL;
4273 			goto free_alloc;
4274 		}
4275 
4276 		memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
4277 		memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
4278 		hdrs->raw.pkt_len = off + sizeof(val);
4279 	}
4280 
4281 	/* Only one action is allowed */
4282 	rule_cfg->action_set.count = 1;
4283 	vact = &rule_cfg->action_set.actions[0];
4284 	exts = cls_u32->knode.exts;
4285 
4286 	tcf_exts_for_each_action(i, act, exts) {
4287 		/* FDIR queue */
4288 		if (is_tcf_skbedit_rx_queue_mapping(act)) {
4289 			q_index = tcf_skbedit_rx_queue_mapping(act);
4290 			if (q_index >= adapter->num_active_queues) {
4291 				status = -EINVAL;
4292 				goto free_alloc;
4293 			}
4294 
4295 			vact->type = VIRTCHNL_ACTION_QUEUE;
4296 			vact->act_conf.queue.index = q_index;
4297 			break;
4298 		}
4299 
4300 		/* Drop */
4301 		if (is_tcf_gact_shot(act)) {
4302 			vact->type = VIRTCHNL_ACTION_DROP;
4303 			break;
4304 		}
4305 
4306 		/* Unsupported action */
4307 		NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
4308 		status = -EOPNOTSUPP;
4309 		goto free_alloc;
4310 	}
4311 
4312 	fltr->vc_add_msg.vsi_id = adapter->vsi.id;
4313 	fltr->cls_u32_handle = cls_u32->knode.handle;
4314 	return iavf_fdir_add_fltr(adapter, fltr);
4315 
4316 free_alloc:
4317 	kfree(fltr);
4318 	return status;
4319 }
4320 
4321 /**
4322  * iavf_del_cls_u32 - Delete U32 classifier offloads
4323  * @adapter: pointer to iavf adapter structure
4324  * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4325  *
4326  * Return: 0 on success or negative errno on failure.
4327  */
iavf_del_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4328 static int iavf_del_cls_u32(struct iavf_adapter *adapter,
4329 			    struct tc_cls_u32_offload *cls_u32)
4330 {
4331 	return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
4332 }
4333 
4334 /**
4335  * iavf_setup_tc_cls_u32 - U32 filter offloads
4336  * @adapter: pointer to iavf adapter structure
4337  * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4338  *
4339  * Return: 0 on success or negative errno on failure.
4340  */
iavf_setup_tc_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4341 static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
4342 				 struct tc_cls_u32_offload *cls_u32)
4343 {
4344 	if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
4345 		return -EOPNOTSUPP;
4346 
4347 	switch (cls_u32->command) {
4348 	case TC_CLSU32_NEW_KNODE:
4349 	case TC_CLSU32_REPLACE_KNODE:
4350 		return iavf_add_cls_u32(adapter, cls_u32);
4351 	case TC_CLSU32_DELETE_KNODE:
4352 		return iavf_del_cls_u32(adapter, cls_u32);
4353 	default:
4354 		return -EOPNOTSUPP;
4355 	}
4356 }
4357 
4358 /**
4359  * iavf_setup_tc_block_cb - block callback for tc
4360  * @type: type of offload
4361  * @type_data: offload data
4362  * @cb_priv:
4363  *
4364  * This function is the block callback for traffic classes
4365  **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4366 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4367 				  void *cb_priv)
4368 {
4369 	struct iavf_adapter *adapter = cb_priv;
4370 
4371 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4372 		return -EOPNOTSUPP;
4373 
4374 	switch (type) {
4375 	case TC_SETUP_CLSFLOWER:
4376 		return iavf_setup_tc_cls_flower(cb_priv, type_data);
4377 	case TC_SETUP_CLSU32:
4378 		return iavf_setup_tc_cls_u32(cb_priv, type_data);
4379 	default:
4380 		return -EOPNOTSUPP;
4381 	}
4382 }
4383 
4384 static LIST_HEAD(iavf_block_cb_list);
4385 
4386 /**
4387  * iavf_setup_tc - configure multiple traffic classes
4388  * @netdev: network interface device structure
4389  * @type: type of offload
4390  * @type_data: tc offload data
4391  *
4392  * This function is the callback to ndo_setup_tc in the
4393  * netdev_ops.
4394  *
4395  * Returns 0 on success
4396  **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)4397 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4398 			 void *type_data)
4399 {
4400 	struct iavf_adapter *adapter = netdev_priv(netdev);
4401 
4402 	switch (type) {
4403 	case TC_SETUP_QDISC_MQPRIO:
4404 		return __iavf_setup_tc(netdev, type_data);
4405 	case TC_SETUP_BLOCK:
4406 		return flow_block_cb_setup_simple(type_data,
4407 						  &iavf_block_cb_list,
4408 						  iavf_setup_tc_block_cb,
4409 						  adapter, adapter, true);
4410 	default:
4411 		return -EOPNOTSUPP;
4412 	}
4413 }
4414 
4415 /**
4416  * iavf_restore_fdir_filters
4417  * @adapter: board private structure
4418  *
4419  * Restore existing FDIR filters when VF netdev comes back up.
4420  **/
iavf_restore_fdir_filters(struct iavf_adapter * adapter)4421 static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
4422 {
4423 	struct iavf_fdir_fltr *f;
4424 
4425 	spin_lock_bh(&adapter->fdir_fltr_lock);
4426 	list_for_each_entry(f, &adapter->fdir_list_head, list) {
4427 		if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
4428 			/* Cancel a request, keep filter as active */
4429 			f->state = IAVF_FDIR_FLTR_ACTIVE;
4430 		} else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
4431 			   f->state == IAVF_FDIR_FLTR_INACTIVE) {
4432 			/* Add filters which are inactive or have a pending
4433 			 * request to PF to be deleted
4434 			 */
4435 			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
4436 			adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
4437 		}
4438 	}
4439 	spin_unlock_bh(&adapter->fdir_fltr_lock);
4440 }
4441 
4442 /**
4443  * iavf_open - Called when a network interface is made active
4444  * @netdev: network interface device structure
4445  *
4446  * Returns 0 on success, negative value on failure
4447  *
4448  * The open entry point is called when a network interface is made
4449  * active by the system (IFF_UP).  At this point all resources needed
4450  * for transmit and receive operations are allocated, the interrupt
4451  * handler is registered with the OS, the watchdog is started,
4452  * and the stack is notified that the interface is ready.
4453  **/
iavf_open(struct net_device * netdev)4454 static int iavf_open(struct net_device *netdev)
4455 {
4456 	struct iavf_adapter *adapter = netdev_priv(netdev);
4457 	int err;
4458 
4459 	netdev_assert_locked(netdev);
4460 
4461 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4462 		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4463 		return -EIO;
4464 	}
4465 
4466 	if (adapter->state != __IAVF_DOWN)
4467 		return -EBUSY;
4468 
4469 	if (adapter->state == __IAVF_RUNNING &&
4470 	    !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4471 		dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4472 		return 0;
4473 	}
4474 
4475 	/* allocate transmit descriptors */
4476 	err = iavf_setup_all_tx_resources(adapter);
4477 	if (err)
4478 		goto err_setup_tx;
4479 
4480 	/* allocate receive descriptors */
4481 	err = iavf_setup_all_rx_resources(adapter);
4482 	if (err)
4483 		goto err_setup_rx;
4484 
4485 	/* clear any pending interrupts, may auto mask */
4486 	err = iavf_request_traffic_irqs(adapter, netdev->name);
4487 	if (err)
4488 		goto err_req_irq;
4489 
4490 	spin_lock_bh(&adapter->mac_vlan_list_lock);
4491 	iavf_add_filter(adapter, adapter->hw.mac.addr);
4492 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
4493 
4494 	/* Restore filters that were removed with IFF_DOWN */
4495 	iavf_restore_filters(adapter);
4496 	iavf_restore_fdir_filters(adapter);
4497 
4498 	iavf_configure(adapter);
4499 
4500 	iavf_up_complete(adapter);
4501 
4502 	iavf_irq_enable(adapter, true);
4503 
4504 	return 0;
4505 
4506 err_req_irq:
4507 	iavf_down(adapter);
4508 	iavf_free_traffic_irqs(adapter);
4509 err_setup_rx:
4510 	iavf_free_all_rx_resources(adapter);
4511 err_setup_tx:
4512 	iavf_free_all_tx_resources(adapter);
4513 
4514 	return err;
4515 }
4516 
4517 /**
4518  * iavf_close - Disables a network interface
4519  * @netdev: network interface device structure
4520  *
4521  * Returns 0, this is not allowed to fail
4522  *
4523  * The close entry point is called when an interface is de-activated
4524  * by the OS.  The hardware is still under the drivers control, but
4525  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4526  * are freed, along with all transmit and receive resources.
4527  **/
iavf_close(struct net_device * netdev)4528 static int iavf_close(struct net_device *netdev)
4529 {
4530 	struct iavf_adapter *adapter = netdev_priv(netdev);
4531 	u64 aq_to_restore;
4532 	int status;
4533 
4534 	netdev_assert_locked(netdev);
4535 
4536 	if (adapter->state <= __IAVF_DOWN_PENDING)
4537 		return 0;
4538 
4539 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4540 	/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4541 	 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4542 	 * deadlock with adminq_task() until iavf_close timeouts. We must send
4543 	 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4544 	 * disable queues possible for vf. Give only necessary flags to
4545 	 * iavf_down and save other to set them right before iavf_close()
4546 	 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4547 	 * iavf will be in DOWN state.
4548 	 */
4549 	aq_to_restore = adapter->aq_required;
4550 	adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4551 
4552 	/* Remove flags which we do not want to send after close or we want to
4553 	 * send before disable queues.
4554 	 */
4555 	aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG		|
4556 			   IAVF_FLAG_AQ_ENABLE_QUEUES		|
4557 			   IAVF_FLAG_AQ_CONFIGURE_QUEUES	|
4558 			   IAVF_FLAG_AQ_ADD_VLAN_FILTER		|
4559 			   IAVF_FLAG_AQ_ADD_MAC_FILTER		|
4560 			   IAVF_FLAG_AQ_ADD_CLOUD_FILTER	|
4561 			   IAVF_FLAG_AQ_ADD_FDIR_FILTER		|
4562 			   IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4563 
4564 	iavf_down(adapter);
4565 	iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4566 	iavf_free_traffic_irqs(adapter);
4567 
4568 	netdev_unlock(netdev);
4569 
4570 	/* We explicitly don't free resources here because the hardware is
4571 	 * still active and can DMA into memory. Resources are cleared in
4572 	 * iavf_virtchnl_completion() after we get confirmation from the PF
4573 	 * driver that the rings have been stopped.
4574 	 *
4575 	 * Also, we wait for state to transition to __IAVF_DOWN before
4576 	 * returning. State change occurs in iavf_virtchnl_completion() after
4577 	 * VF resources are released (which occurs after PF driver processes and
4578 	 * responds to admin queue commands).
4579 	 */
4580 
4581 	status = wait_event_timeout(adapter->down_waitqueue,
4582 				    adapter->state == __IAVF_DOWN,
4583 				    msecs_to_jiffies(500));
4584 	if (!status)
4585 		netdev_warn(netdev, "Device resources not yet released\n");
4586 	netdev_lock(netdev);
4587 
4588 	adapter->aq_required |= aq_to_restore;
4589 
4590 	return 0;
4591 }
4592 
4593 /**
4594  * iavf_change_mtu - Change the Maximum Transfer Unit
4595  * @netdev: network interface device structure
4596  * @new_mtu: new value for maximum frame size
4597  *
4598  * Returns 0 on success, negative on failure
4599  **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)4600 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4601 {
4602 	struct iavf_adapter *adapter = netdev_priv(netdev);
4603 	int ret = 0;
4604 
4605 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
4606 		   netdev->mtu, new_mtu);
4607 	WRITE_ONCE(netdev->mtu, new_mtu);
4608 
4609 	if (netif_running(netdev)) {
4610 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
4611 		ret = iavf_wait_for_reset(adapter);
4612 		if (ret < 0)
4613 			netdev_warn(netdev, "MTU change interrupted waiting for reset");
4614 		else if (ret)
4615 			netdev_warn(netdev, "MTU change timed out waiting for reset");
4616 	}
4617 
4618 	return ret;
4619 }
4620 
4621 /**
4622  * iavf_disable_fdir - disable Flow Director and clear existing filters
4623  * @adapter: board private structure
4624  **/
iavf_disable_fdir(struct iavf_adapter * adapter)4625 static void iavf_disable_fdir(struct iavf_adapter *adapter)
4626 {
4627 	struct iavf_fdir_fltr *fdir, *fdirtmp;
4628 	bool del_filters = false;
4629 
4630 	adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
4631 
4632 	/* remove all Flow Director filters */
4633 	spin_lock_bh(&adapter->fdir_fltr_lock);
4634 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
4635 				 list) {
4636 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
4637 		    fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
4638 			/* Delete filters not registered in PF */
4639 			list_del(&fdir->list);
4640 			iavf_dec_fdir_active_fltr(adapter, fdir);
4641 			kfree(fdir);
4642 		} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
4643 			   fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
4644 			   fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
4645 			/* Filters registered in PF, schedule their deletion */
4646 			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
4647 			del_filters = true;
4648 		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
4649 			/* Request to delete filter already sent to PF, change
4650 			 * state to DEL_PENDING to delete filter after PF's
4651 			 * response, not set as INACTIVE
4652 			 */
4653 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
4654 		}
4655 	}
4656 	spin_unlock_bh(&adapter->fdir_fltr_lock);
4657 
4658 	if (del_filters) {
4659 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
4660 		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4661 	}
4662 }
4663 
4664 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
4665 					 NETIF_F_HW_VLAN_CTAG_TX | \
4666 					 NETIF_F_HW_VLAN_STAG_RX | \
4667 					 NETIF_F_HW_VLAN_STAG_TX)
4668 
4669 /**
4670  * iavf_set_features - set the netdev feature flags
4671  * @netdev: ptr to the netdev being adjusted
4672  * @features: the feature set that the stack is suggesting
4673  * Note: expects to be called while under rtnl_lock()
4674  **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)4675 static int iavf_set_features(struct net_device *netdev,
4676 			     netdev_features_t features)
4677 {
4678 	struct iavf_adapter *adapter = netdev_priv(netdev);
4679 
4680 	/* trigger update on any VLAN feature change */
4681 	if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4682 	    (features & NETIF_VLAN_OFFLOAD_FEATURES))
4683 		iavf_set_vlan_offload_features(adapter, netdev->features,
4684 					       features);
4685 	if (CRC_OFFLOAD_ALLOWED(adapter) &&
4686 	    ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
4687 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
4688 
4689 	if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
4690 		if (features & NETIF_F_NTUPLE)
4691 			adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
4692 		else
4693 			iavf_disable_fdir(adapter);
4694 	}
4695 
4696 	return 0;
4697 }
4698 
4699 /**
4700  * iavf_features_check - Validate encapsulated packet conforms to limits
4701  * @skb: skb buff
4702  * @dev: This physical port's netdev
4703  * @features: Offload features that the stack believes apply
4704  **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4705 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4706 					     struct net_device *dev,
4707 					     netdev_features_t features)
4708 {
4709 	size_t len;
4710 
4711 	/* No point in doing any of this if neither checksum nor GSO are
4712 	 * being requested for this frame.  We can rule out both by just
4713 	 * checking for CHECKSUM_PARTIAL
4714 	 */
4715 	if (skb->ip_summed != CHECKSUM_PARTIAL)
4716 		return features;
4717 
4718 	/* We cannot support GSO if the MSS is going to be less than
4719 	 * 64 bytes.  If it is then we need to drop support for GSO.
4720 	 */
4721 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4722 		features &= ~NETIF_F_GSO_MASK;
4723 
4724 	/* MACLEN can support at most 63 words */
4725 	len = skb_network_offset(skb);
4726 	if (len & ~(63 * 2))
4727 		goto out_err;
4728 
4729 	/* IPLEN and EIPLEN can support at most 127 dwords */
4730 	len = skb_network_header_len(skb);
4731 	if (len & ~(127 * 4))
4732 		goto out_err;
4733 
4734 	if (skb->encapsulation) {
4735 		/* L4TUNLEN can support 127 words */
4736 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
4737 		if (len & ~(127 * 2))
4738 			goto out_err;
4739 
4740 		/* IPLEN can support at most 127 dwords */
4741 		len = skb_inner_transport_header(skb) -
4742 		      skb_inner_network_header(skb);
4743 		if (len & ~(127 * 4))
4744 			goto out_err;
4745 	}
4746 
4747 	/* No need to validate L4LEN as TCP is the only protocol with a
4748 	 * flexible value and we support all possible values supported
4749 	 * by TCP, which is at most 15 dwords
4750 	 */
4751 
4752 	return features;
4753 out_err:
4754 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4755 }
4756 
4757 /**
4758  * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4759  * @adapter: board private structure
4760  *
4761  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4762  * were negotiated determine the VLAN features that can be toggled on and off.
4763  **/
4764 static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)4765 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4766 {
4767 	netdev_features_t hw_features = 0;
4768 
4769 	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4770 		return hw_features;
4771 
4772 	/* Enable VLAN features if supported */
4773 	if (VLAN_ALLOWED(adapter)) {
4774 		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4775 				NETIF_F_HW_VLAN_CTAG_RX);
4776 	} else if (VLAN_V2_ALLOWED(adapter)) {
4777 		struct virtchnl_vlan_caps *vlan_v2_caps =
4778 			&adapter->vlan_v2_caps;
4779 		struct virtchnl_vlan_supported_caps *stripping_support =
4780 			&vlan_v2_caps->offloads.stripping_support;
4781 		struct virtchnl_vlan_supported_caps *insertion_support =
4782 			&vlan_v2_caps->offloads.insertion_support;
4783 
4784 		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4785 		    stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4786 			if (stripping_support->outer &
4787 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4788 				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4789 			if (stripping_support->outer &
4790 			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
4791 				hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4792 		} else if (stripping_support->inner !=
4793 			   VIRTCHNL_VLAN_UNSUPPORTED &&
4794 			   stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4795 			if (stripping_support->inner &
4796 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4797 				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4798 		}
4799 
4800 		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4801 		    insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4802 			if (insertion_support->outer &
4803 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4804 				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4805 			if (insertion_support->outer &
4806 			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
4807 				hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4808 		} else if (insertion_support->inner &&
4809 			   insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4810 			if (insertion_support->inner &
4811 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4812 				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4813 		}
4814 	}
4815 
4816 	if (CRC_OFFLOAD_ALLOWED(adapter))
4817 		hw_features |= NETIF_F_RXFCS;
4818 
4819 	return hw_features;
4820 }
4821 
4822 /**
4823  * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4824  * @adapter: board private structure
4825  *
4826  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4827  * were negotiated determine the VLAN features that are enabled by default.
4828  **/
4829 static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)4830 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4831 {
4832 	netdev_features_t features = 0;
4833 
4834 	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4835 		return features;
4836 
4837 	if (VLAN_ALLOWED(adapter)) {
4838 		features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4839 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4840 	} else if (VLAN_V2_ALLOWED(adapter)) {
4841 		struct virtchnl_vlan_caps *vlan_v2_caps =
4842 			&adapter->vlan_v2_caps;
4843 		struct virtchnl_vlan_supported_caps *filtering_support =
4844 			&vlan_v2_caps->filtering.filtering_support;
4845 		struct virtchnl_vlan_supported_caps *stripping_support =
4846 			&vlan_v2_caps->offloads.stripping_support;
4847 		struct virtchnl_vlan_supported_caps *insertion_support =
4848 			&vlan_v2_caps->offloads.insertion_support;
4849 		u32 ethertype_init;
4850 
4851 		/* give priority to outer stripping and don't support both outer
4852 		 * and inner stripping
4853 		 */
4854 		ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4855 		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4856 			if (stripping_support->outer &
4857 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4858 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4859 				features |= NETIF_F_HW_VLAN_CTAG_RX;
4860 			else if (stripping_support->outer &
4861 				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4862 				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4863 				features |= NETIF_F_HW_VLAN_STAG_RX;
4864 		} else if (stripping_support->inner !=
4865 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4866 			if (stripping_support->inner &
4867 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4868 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4869 				features |= NETIF_F_HW_VLAN_CTAG_RX;
4870 		}
4871 
4872 		/* give priority to outer insertion and don't support both outer
4873 		 * and inner insertion
4874 		 */
4875 		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4876 			if (insertion_support->outer &
4877 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4878 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4879 				features |= NETIF_F_HW_VLAN_CTAG_TX;
4880 			else if (insertion_support->outer &
4881 				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4882 				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4883 				features |= NETIF_F_HW_VLAN_STAG_TX;
4884 		} else if (insertion_support->inner !=
4885 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4886 			if (insertion_support->inner &
4887 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4888 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4889 				features |= NETIF_F_HW_VLAN_CTAG_TX;
4890 		}
4891 
4892 		/* give priority to outer filtering and don't bother if both
4893 		 * outer and inner filtering are enabled
4894 		 */
4895 		ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4896 		if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4897 			if (filtering_support->outer &
4898 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4899 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4900 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4901 			if (filtering_support->outer &
4902 			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4903 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4904 				features |= NETIF_F_HW_VLAN_STAG_FILTER;
4905 		} else if (filtering_support->inner !=
4906 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4907 			if (filtering_support->inner &
4908 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4909 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4910 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4911 			if (filtering_support->inner &
4912 			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4913 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4914 				features |= NETIF_F_HW_VLAN_STAG_FILTER;
4915 		}
4916 	}
4917 
4918 	return features;
4919 }
4920 
4921 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4922 	(!(((requested) & (feature_bit)) && \
4923 	   !((allowed) & (feature_bit))))
4924 
4925 /**
4926  * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4927  * @adapter: board private structure
4928  * @requested_features: stack requested NETDEV features
4929  **/
4930 static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4931 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4932 			      netdev_features_t requested_features)
4933 {
4934 	netdev_features_t allowed_features;
4935 
4936 	allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4937 		iavf_get_netdev_vlan_features(adapter);
4938 
4939 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4940 					      allowed_features,
4941 					      NETIF_F_HW_VLAN_CTAG_TX))
4942 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4943 
4944 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4945 					      allowed_features,
4946 					      NETIF_F_HW_VLAN_CTAG_RX))
4947 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4948 
4949 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4950 					      allowed_features,
4951 					      NETIF_F_HW_VLAN_STAG_TX))
4952 		requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4953 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4954 					      allowed_features,
4955 					      NETIF_F_HW_VLAN_STAG_RX))
4956 		requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4957 
4958 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4959 					      allowed_features,
4960 					      NETIF_F_HW_VLAN_CTAG_FILTER))
4961 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4962 
4963 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4964 					      allowed_features,
4965 					      NETIF_F_HW_VLAN_STAG_FILTER))
4966 		requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4967 
4968 	if ((requested_features &
4969 	     (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4970 	    (requested_features &
4971 	     (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4972 	    adapter->vlan_v2_caps.offloads.ethertype_match ==
4973 	    VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4974 		netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4975 		requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4976 					NETIF_F_HW_VLAN_STAG_TX);
4977 	}
4978 
4979 	return requested_features;
4980 }
4981 
4982 /**
4983  * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features
4984  * @adapter: board private structure
4985  * @requested_features: stack requested NETDEV features
4986  *
4987  * Returns fixed-up features bits
4988  **/
4989 static netdev_features_t
iavf_fix_strip_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4990 iavf_fix_strip_features(struct iavf_adapter *adapter,
4991 			netdev_features_t requested_features)
4992 {
4993 	struct net_device *netdev = adapter->netdev;
4994 	bool crc_offload_req, is_vlan_strip;
4995 	netdev_features_t vlan_strip;
4996 	int num_non_zero_vlan;
4997 
4998 	crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) &&
4999 			  (requested_features & NETIF_F_RXFCS);
5000 	num_non_zero_vlan = iavf_get_num_vlans_added(adapter);
5001 	vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX);
5002 	is_vlan_strip = requested_features & vlan_strip;
5003 
5004 	if (!crc_offload_req)
5005 		return requested_features;
5006 
5007 	if (!num_non_zero_vlan && (netdev->features & vlan_strip) &&
5008 	    !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
5009 		requested_features &= ~vlan_strip;
5010 		netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
5011 		return requested_features;
5012 	}
5013 
5014 	if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
5015 		requested_features &= ~vlan_strip;
5016 		if (!(netdev->features & vlan_strip))
5017 			netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping");
5018 
5019 		return requested_features;
5020 	}
5021 
5022 	if (num_non_zero_vlan && is_vlan_strip &&
5023 	    !(netdev->features & NETIF_F_RXFCS)) {
5024 		requested_features &= ~NETIF_F_RXFCS;
5025 		netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping");
5026 	}
5027 
5028 	return requested_features;
5029 }
5030 
5031 /**
5032  * iavf_fix_features - fix up the netdev feature bits
5033  * @netdev: our net device
5034  * @features: desired feature bits
5035  *
5036  * Returns fixed-up features bits
5037  **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)5038 static netdev_features_t iavf_fix_features(struct net_device *netdev,
5039 					   netdev_features_t features)
5040 {
5041 	struct iavf_adapter *adapter = netdev_priv(netdev);
5042 
5043 	features = iavf_fix_netdev_vlan_features(adapter, features);
5044 
5045 	if (!FDIR_FLTR_SUPPORT(adapter))
5046 		features &= ~NETIF_F_NTUPLE;
5047 
5048 	return iavf_fix_strip_features(adapter, features);
5049 }
5050 
iavf_hwstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)5051 static int iavf_hwstamp_get(struct net_device *netdev,
5052 			    struct kernel_hwtstamp_config *config)
5053 {
5054 	struct iavf_adapter *adapter = netdev_priv(netdev);
5055 
5056 	*config = adapter->ptp.hwtstamp_config;
5057 
5058 	return 0;
5059 }
5060 
iavf_hwstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)5061 static int iavf_hwstamp_set(struct net_device *netdev,
5062 			    struct kernel_hwtstamp_config *config,
5063 			    struct netlink_ext_ack *extack)
5064 {
5065 	struct iavf_adapter *adapter = netdev_priv(netdev);
5066 
5067 	return iavf_ptp_set_ts_config(adapter, config, extack);
5068 }
5069 
5070 static int
iavf_verify_shaper(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)5071 iavf_verify_shaper(struct net_shaper_binding *binding,
5072 		   const struct net_shaper *shaper,
5073 		   struct netlink_ext_ack *extack)
5074 {
5075 	struct iavf_adapter *adapter = netdev_priv(binding->netdev);
5076 	u64 vf_max;
5077 
5078 	if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE) {
5079 		vf_max = adapter->qos_caps->cap[0].shaper.peak;
5080 		if (vf_max && shaper->bw_max > vf_max) {
5081 			NL_SET_ERR_MSG_FMT(extack, "Max rate (%llu) of queue %d can't exceed max TX rate of VF (%llu kbps)",
5082 					   shaper->bw_max, shaper->handle.id,
5083 					   vf_max);
5084 			return -EINVAL;
5085 		}
5086 	}
5087 	return 0;
5088 }
5089 
5090 static int
iavf_shaper_set(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)5091 iavf_shaper_set(struct net_shaper_binding *binding,
5092 		const struct net_shaper *shaper,
5093 		struct netlink_ext_ack *extack)
5094 {
5095 	struct iavf_adapter *adapter = netdev_priv(binding->netdev);
5096 	const struct net_shaper_handle *handle = &shaper->handle;
5097 	struct iavf_ring *tx_ring;
5098 	int ret;
5099 
5100 	netdev_assert_locked(adapter->netdev);
5101 
5102 	if (handle->id >= adapter->num_active_queues)
5103 		return 0;
5104 
5105 	ret = iavf_verify_shaper(binding, shaper, extack);
5106 	if (ret)
5107 		return ret;
5108 
5109 	tx_ring = &adapter->tx_rings[handle->id];
5110 
5111 	tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000);
5112 	tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000);
5113 	tx_ring->q_shaper_update = true;
5114 
5115 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
5116 
5117 	return 0;
5118 }
5119 
iavf_shaper_del(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct netlink_ext_ack * extack)5120 static int iavf_shaper_del(struct net_shaper_binding *binding,
5121 			   const struct net_shaper_handle *handle,
5122 			   struct netlink_ext_ack *extack)
5123 {
5124 	struct iavf_adapter *adapter = netdev_priv(binding->netdev);
5125 	struct iavf_ring *tx_ring;
5126 
5127 	netdev_assert_locked(adapter->netdev);
5128 
5129 	if (handle->id >= adapter->num_active_queues)
5130 		return 0;
5131 
5132 	tx_ring = &adapter->tx_rings[handle->id];
5133 	tx_ring->q_shaper.bw_min = 0;
5134 	tx_ring->q_shaper.bw_max = 0;
5135 	tx_ring->q_shaper_update = true;
5136 
5137 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
5138 
5139 	return 0;
5140 }
5141 
iavf_shaper_cap(struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long * flags)5142 static void iavf_shaper_cap(struct net_shaper_binding *binding,
5143 			    enum net_shaper_scope scope,
5144 			    unsigned long *flags)
5145 {
5146 	if (scope != NET_SHAPER_SCOPE_QUEUE)
5147 		return;
5148 
5149 	*flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) |
5150 		 BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
5151 		 BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
5152 }
5153 
5154 static const struct net_shaper_ops iavf_shaper_ops = {
5155 	.set = iavf_shaper_set,
5156 	.delete = iavf_shaper_del,
5157 	.capabilities = iavf_shaper_cap,
5158 };
5159 
5160 static const struct net_device_ops iavf_netdev_ops = {
5161 	.ndo_open		= iavf_open,
5162 	.ndo_stop		= iavf_close,
5163 	.ndo_start_xmit		= iavf_xmit_frame,
5164 	.ndo_set_rx_mode	= iavf_set_rx_mode,
5165 	.ndo_validate_addr	= eth_validate_addr,
5166 	.ndo_set_mac_address	= iavf_set_mac,
5167 	.ndo_change_mtu		= iavf_change_mtu,
5168 	.ndo_tx_timeout		= iavf_tx_timeout,
5169 	.ndo_vlan_rx_add_vid	= iavf_vlan_rx_add_vid,
5170 	.ndo_vlan_rx_kill_vid	= iavf_vlan_rx_kill_vid,
5171 	.ndo_features_check	= iavf_features_check,
5172 	.ndo_fix_features	= iavf_fix_features,
5173 	.ndo_set_features	= iavf_set_features,
5174 	.ndo_setup_tc		= iavf_setup_tc,
5175 	.net_shaper_ops		= &iavf_shaper_ops,
5176 	.ndo_hwtstamp_get	= iavf_hwstamp_get,
5177 	.ndo_hwtstamp_set	= iavf_hwstamp_set,
5178 };
5179 
5180 /**
5181  * iavf_check_reset_complete - check that VF reset is complete
5182  * @hw: pointer to hw struct
5183  *
5184  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
5185  **/
iavf_check_reset_complete(struct iavf_hw * hw)5186 static int iavf_check_reset_complete(struct iavf_hw *hw)
5187 {
5188 	u32 rstat;
5189 	int i;
5190 
5191 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
5192 		rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
5193 			     IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
5194 		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
5195 		    (rstat == VIRTCHNL_VFR_COMPLETED))
5196 			return 0;
5197 		msleep(IAVF_RESET_WAIT_MS);
5198 	}
5199 	return -EBUSY;
5200 }
5201 
5202 /**
5203  * iavf_process_config - Process the config information we got from the PF
5204  * @adapter: board private structure
5205  *
5206  * Verify that we have a valid config struct, and set up our netdev features
5207  * and our VSI struct.
5208  **/
iavf_process_config(struct iavf_adapter * adapter)5209 int iavf_process_config(struct iavf_adapter *adapter)
5210 {
5211 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
5212 	netdev_features_t hw_vlan_features, vlan_features;
5213 	struct net_device *netdev = adapter->netdev;
5214 	netdev_features_t hw_enc_features;
5215 	netdev_features_t hw_features;
5216 
5217 	hw_enc_features = NETIF_F_SG			|
5218 			  NETIF_F_IP_CSUM		|
5219 			  NETIF_F_IPV6_CSUM		|
5220 			  NETIF_F_HIGHDMA		|
5221 			  NETIF_F_SOFT_FEATURES	|
5222 			  NETIF_F_TSO			|
5223 			  NETIF_F_TSO_ECN		|
5224 			  NETIF_F_TSO6			|
5225 			  NETIF_F_SCTP_CRC		|
5226 			  NETIF_F_RXHASH		|
5227 			  NETIF_F_RXCSUM		|
5228 			  0;
5229 
5230 	/* advertise to stack only if offloads for encapsulated packets is
5231 	 * supported
5232 	 */
5233 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
5234 		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
5235 				   NETIF_F_GSO_GRE		|
5236 				   NETIF_F_GSO_GRE_CSUM		|
5237 				   NETIF_F_GSO_IPXIP4		|
5238 				   NETIF_F_GSO_IPXIP6		|
5239 				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
5240 				   NETIF_F_GSO_PARTIAL		|
5241 				   0;
5242 
5243 		if (!(vfres->vf_cap_flags &
5244 		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
5245 			netdev->gso_partial_features |=
5246 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
5247 
5248 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
5249 		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
5250 		netdev->hw_enc_features |= hw_enc_features;
5251 	}
5252 	/* record features VLANs can make use of */
5253 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
5254 
5255 	/* Write features and hw_features separately to avoid polluting
5256 	 * with, or dropping, features that are set when we registered.
5257 	 */
5258 	hw_features = hw_enc_features;
5259 
5260 	/* get HW VLAN features that can be toggled */
5261 	hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
5262 
5263 	/* Enable HW TC offload if ADQ or tc U32 is supported */
5264 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
5265 	    TC_U32_SUPPORT(adapter))
5266 		hw_features |= NETIF_F_HW_TC;
5267 
5268 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
5269 		hw_features |= NETIF_F_GSO_UDP_L4;
5270 
5271 	netdev->hw_features |= hw_features | hw_vlan_features;
5272 	vlan_features = iavf_get_netdev_vlan_features(adapter);
5273 
5274 	netdev->features |= hw_features | vlan_features;
5275 
5276 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
5277 		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5278 
5279 	if (FDIR_FLTR_SUPPORT(adapter)) {
5280 		netdev->hw_features |= NETIF_F_NTUPLE;
5281 		netdev->features |= NETIF_F_NTUPLE;
5282 		adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
5283 	}
5284 
5285 	netdev->priv_flags |= IFF_UNICAST_FLT;
5286 
5287 	/* Do not turn on offloads when they are requested to be turned off.
5288 	 * TSO needs minimum 576 bytes to work correctly.
5289 	 */
5290 	if (netdev->wanted_features) {
5291 		if (!(netdev->wanted_features & NETIF_F_TSO) ||
5292 		    netdev->mtu < 576)
5293 			netdev->features &= ~NETIF_F_TSO;
5294 		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
5295 		    netdev->mtu < 576)
5296 			netdev->features &= ~NETIF_F_TSO6;
5297 		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
5298 			netdev->features &= ~NETIF_F_TSO_ECN;
5299 		if (!(netdev->wanted_features & NETIF_F_GRO))
5300 			netdev->features &= ~NETIF_F_GRO;
5301 		if (!(netdev->wanted_features & NETIF_F_GSO))
5302 			netdev->features &= ~NETIF_F_GSO;
5303 	}
5304 
5305 	return 0;
5306 }
5307 
5308 /**
5309  * iavf_probe - Device Initialization Routine
5310  * @pdev: PCI device information struct
5311  * @ent: entry in iavf_pci_tbl
5312  *
5313  * Returns 0 on success, negative on failure
5314  *
5315  * iavf_probe initializes an adapter identified by a pci_dev structure.
5316  * The OS initialization, configuring of the adapter private structure,
5317  * and a hardware reset occur.
5318  **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)5319 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5320 {
5321 	struct net_device *netdev;
5322 	struct iavf_adapter *adapter = NULL;
5323 	struct iavf_hw *hw = NULL;
5324 	int err, len;
5325 
5326 	err = pci_enable_device(pdev);
5327 	if (err)
5328 		return err;
5329 
5330 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5331 	if (err) {
5332 		dev_err(&pdev->dev,
5333 			"DMA configuration failed: 0x%x\n", err);
5334 		goto err_dma;
5335 	}
5336 
5337 	err = pci_request_regions(pdev, iavf_driver_name);
5338 	if (err) {
5339 		dev_err(&pdev->dev,
5340 			"pci_request_regions failed 0x%x\n", err);
5341 		goto err_pci_reg;
5342 	}
5343 
5344 	pci_set_master(pdev);
5345 
5346 	netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
5347 				   IAVF_MAX_REQ_QUEUES);
5348 	if (!netdev) {
5349 		err = -ENOMEM;
5350 		goto err_alloc_etherdev;
5351 	}
5352 
5353 	netif_set_affinity_auto(netdev);
5354 	SET_NETDEV_DEV(netdev, &pdev->dev);
5355 
5356 	pci_set_drvdata(pdev, netdev);
5357 	adapter = netdev_priv(netdev);
5358 
5359 	adapter->netdev = netdev;
5360 	adapter->pdev = pdev;
5361 
5362 	hw = &adapter->hw;
5363 	hw->back = adapter;
5364 
5365 	adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
5366 					      iavf_driver_name);
5367 	if (!adapter->wq) {
5368 		err = -ENOMEM;
5369 		goto err_alloc_wq;
5370 	}
5371 
5372 	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5373 	iavf_change_state(adapter, __IAVF_STARTUP);
5374 
5375 	/* Call save state here because it relies on the adapter struct. */
5376 	pci_save_state(pdev);
5377 
5378 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5379 			      pci_resource_len(pdev, 0));
5380 	if (!hw->hw_addr) {
5381 		err = -EIO;
5382 		goto err_ioremap;
5383 	}
5384 	hw->vendor_id = pdev->vendor;
5385 	hw->device_id = pdev->device;
5386 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5387 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5388 	hw->subsystem_device_id = pdev->subsystem_device;
5389 	hw->bus.device = PCI_SLOT(pdev->devfn);
5390 	hw->bus.func = PCI_FUNC(pdev->devfn);
5391 	hw->bus.bus_id = pdev->bus->number;
5392 
5393 	len = struct_size(adapter->qos_caps, cap, IAVF_MAX_QOS_TC_NUM);
5394 	adapter->qos_caps = kzalloc(len, GFP_KERNEL);
5395 	if (!adapter->qos_caps) {
5396 		err = -ENOMEM;
5397 		goto err_alloc_qos_cap;
5398 	}
5399 
5400 	mutex_init(&hw->aq.asq_mutex);
5401 	mutex_init(&hw->aq.arq_mutex);
5402 
5403 	spin_lock_init(&adapter->mac_vlan_list_lock);
5404 	spin_lock_init(&adapter->cloud_filter_list_lock);
5405 	spin_lock_init(&adapter->fdir_fltr_lock);
5406 	spin_lock_init(&adapter->adv_rss_lock);
5407 	spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
5408 
5409 	INIT_LIST_HEAD(&adapter->mac_filter_list);
5410 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
5411 	INIT_LIST_HEAD(&adapter->cloud_filter_list);
5412 	INIT_LIST_HEAD(&adapter->fdir_list_head);
5413 	INIT_LIST_HEAD(&adapter->adv_rss_list_head);
5414 
5415 	INIT_WORK(&adapter->reset_task, iavf_reset_task);
5416 	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
5417 	INIT_WORK(&adapter->finish_config, iavf_finish_config);
5418 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
5419 
5420 	/* Setup the wait queue for indicating transition to down status */
5421 	init_waitqueue_head(&adapter->down_waitqueue);
5422 
5423 	/* Setup the wait queue for indicating transition to running state */
5424 	init_waitqueue_head(&adapter->reset_waitqueue);
5425 
5426 	/* Setup the wait queue for indicating virtchannel events */
5427 	init_waitqueue_head(&adapter->vc_waitqueue);
5428 
5429 	INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
5430 	init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
5431 	mutex_init(&adapter->ptp.aq_cmd_lock);
5432 
5433 	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
5434 			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
5435 	/* Initialization goes on in the work. Do not add more of it below. */
5436 	return 0;
5437 
5438 err_alloc_qos_cap:
5439 	iounmap(hw->hw_addr);
5440 err_ioremap:
5441 	destroy_workqueue(adapter->wq);
5442 err_alloc_wq:
5443 	free_netdev(netdev);
5444 err_alloc_etherdev:
5445 	pci_release_regions(pdev);
5446 err_pci_reg:
5447 err_dma:
5448 	pci_disable_device(pdev);
5449 	return err;
5450 }
5451 
5452 /**
5453  * iavf_suspend - Power management suspend routine
5454  * @dev_d: device info pointer
5455  *
5456  * Called when the system (VM) is entering sleep/suspend.
5457  **/
iavf_suspend(struct device * dev_d)5458 static int iavf_suspend(struct device *dev_d)
5459 {
5460 	struct net_device *netdev = dev_get_drvdata(dev_d);
5461 	struct iavf_adapter *adapter = netdev_priv(netdev);
5462 	bool running;
5463 
5464 	netif_device_detach(netdev);
5465 
5466 	running = netif_running(netdev);
5467 	if (running)
5468 		rtnl_lock();
5469 	netdev_lock(netdev);
5470 
5471 	if (running)
5472 		iavf_down(adapter);
5473 
5474 	iavf_free_misc_irq(adapter);
5475 	iavf_reset_interrupt_capability(adapter);
5476 
5477 	netdev_unlock(netdev);
5478 	if (running)
5479 		rtnl_unlock();
5480 
5481 	return 0;
5482 }
5483 
5484 /**
5485  * iavf_resume - Power management resume routine
5486  * @dev_d: device info pointer
5487  *
5488  * Called when the system (VM) is resumed from sleep/suspend.
5489  **/
iavf_resume(struct device * dev_d)5490 static int iavf_resume(struct device *dev_d)
5491 {
5492 	struct pci_dev *pdev = to_pci_dev(dev_d);
5493 	struct iavf_adapter *adapter;
5494 	u32 err;
5495 
5496 	adapter = iavf_pdev_to_adapter(pdev);
5497 
5498 	pci_set_master(pdev);
5499 
5500 	rtnl_lock();
5501 	err = iavf_set_interrupt_capability(adapter);
5502 	if (err) {
5503 		rtnl_unlock();
5504 		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5505 		return err;
5506 	}
5507 	err = iavf_request_misc_irq(adapter);
5508 	rtnl_unlock();
5509 	if (err) {
5510 		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5511 		return err;
5512 	}
5513 
5514 	queue_work(adapter->wq, &adapter->reset_task);
5515 
5516 	netif_device_attach(adapter->netdev);
5517 
5518 	return err;
5519 }
5520 
5521 /**
5522  * iavf_remove - Device Removal Routine
5523  * @pdev: PCI device information struct
5524  *
5525  * iavf_remove is called by the PCI subsystem to alert the driver
5526  * that it should release a PCI device.  The could be caused by a
5527  * Hot-Plug event, or because the driver is going to be removed from
5528  * memory.
5529  **/
iavf_remove(struct pci_dev * pdev)5530 static void iavf_remove(struct pci_dev *pdev)
5531 {
5532 	struct iavf_fdir_fltr *fdir, *fdirtmp;
5533 	struct iavf_vlan_filter *vlf, *vlftmp;
5534 	struct iavf_cloud_filter *cf, *cftmp;
5535 	struct iavf_adv_rss *rss, *rsstmp;
5536 	struct iavf_mac_filter *f, *ftmp;
5537 	struct iavf_adapter *adapter;
5538 	struct net_device *netdev;
5539 	struct iavf_hw *hw;
5540 
5541 	/* Don't proceed with remove if netdev is already freed */
5542 	netdev = pci_get_drvdata(pdev);
5543 	if (!netdev)
5544 		return;
5545 
5546 	adapter = iavf_pdev_to_adapter(pdev);
5547 	hw = &adapter->hw;
5548 
5549 	if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5550 		return;
5551 
5552 	/* Wait until port initialization is complete.
5553 	 * There are flows where register/unregister netdev may race.
5554 	 */
5555 	while (1) {
5556 		netdev_lock(netdev);
5557 		if (adapter->state == __IAVF_RUNNING ||
5558 		    adapter->state == __IAVF_DOWN ||
5559 		    adapter->state == __IAVF_INIT_FAILED) {
5560 			netdev_unlock(netdev);
5561 			break;
5562 		}
5563 		/* Simply return if we already went through iavf_shutdown */
5564 		if (adapter->state == __IAVF_REMOVE) {
5565 			netdev_unlock(netdev);
5566 			return;
5567 		}
5568 
5569 		netdev_unlock(netdev);
5570 		usleep_range(500, 1000);
5571 	}
5572 	cancel_delayed_work_sync(&adapter->watchdog_task);
5573 	cancel_work_sync(&adapter->finish_config);
5574 
5575 	if (netdev->reg_state == NETREG_REGISTERED)
5576 		unregister_netdev(netdev);
5577 
5578 	netdev_lock(netdev);
5579 	dev_info(&adapter->pdev->dev, "Removing device\n");
5580 	iavf_change_state(adapter, __IAVF_REMOVE);
5581 
5582 	iavf_request_reset(adapter);
5583 	msleep(50);
5584 	/* If the FW isn't responding, kick it once, but only once. */
5585 	if (!iavf_asq_done(hw)) {
5586 		iavf_request_reset(adapter);
5587 		msleep(50);
5588 	}
5589 
5590 	iavf_ptp_release(adapter);
5591 
5592 	iavf_misc_irq_disable(adapter);
5593 	/* Shut down all the garbage mashers on the detention level */
5594 	netdev_unlock(netdev);
5595 	cancel_work_sync(&adapter->reset_task);
5596 	cancel_delayed_work_sync(&adapter->watchdog_task);
5597 	cancel_work_sync(&adapter->adminq_task);
5598 	netdev_lock(netdev);
5599 
5600 	adapter->aq_required = 0;
5601 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5602 
5603 	iavf_free_all_tx_resources(adapter);
5604 	iavf_free_all_rx_resources(adapter);
5605 	iavf_free_misc_irq(adapter);
5606 	iavf_free_interrupt_scheme(adapter);
5607 
5608 	iavf_free_rss(adapter);
5609 
5610 	if (hw->aq.asq.count)
5611 		iavf_shutdown_adminq(hw);
5612 
5613 	/* destroy the locks only once, here */
5614 	mutex_destroy(&hw->aq.arq_mutex);
5615 	mutex_destroy(&hw->aq.asq_mutex);
5616 	netdev_unlock(netdev);
5617 
5618 	iounmap(hw->hw_addr);
5619 	pci_release_regions(pdev);
5620 	kfree(adapter->vf_res);
5621 	spin_lock_bh(&adapter->mac_vlan_list_lock);
5622 	/* If we got removed before an up/down sequence, we've got a filter
5623 	 * hanging out there that we need to get rid of.
5624 	 */
5625 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5626 		list_del(&f->list);
5627 		kfree(f);
5628 	}
5629 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5630 				 list) {
5631 		list_del(&vlf->list);
5632 		kfree(vlf);
5633 	}
5634 
5635 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
5636 
5637 	spin_lock_bh(&adapter->cloud_filter_list_lock);
5638 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5639 		list_del(&cf->list);
5640 		kfree(cf);
5641 	}
5642 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
5643 
5644 	spin_lock_bh(&adapter->fdir_fltr_lock);
5645 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5646 		list_del(&fdir->list);
5647 		kfree(fdir);
5648 	}
5649 	spin_unlock_bh(&adapter->fdir_fltr_lock);
5650 
5651 	spin_lock_bh(&adapter->adv_rss_lock);
5652 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5653 				 list) {
5654 		list_del(&rss->list);
5655 		kfree(rss);
5656 	}
5657 	spin_unlock_bh(&adapter->adv_rss_lock);
5658 
5659 	destroy_workqueue(adapter->wq);
5660 
5661 	pci_set_drvdata(pdev, NULL);
5662 
5663 	free_netdev(netdev);
5664 
5665 	pci_disable_device(pdev);
5666 }
5667 
5668 /**
5669  * iavf_shutdown - Shutdown the device in preparation for a reboot
5670  * @pdev: pci device structure
5671  **/
iavf_shutdown(struct pci_dev * pdev)5672 static void iavf_shutdown(struct pci_dev *pdev)
5673 {
5674 	iavf_remove(pdev);
5675 
5676 	if (system_state == SYSTEM_POWER_OFF)
5677 		pci_set_power_state(pdev, PCI_D3hot);
5678 }
5679 
5680 static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5681 
5682 static struct pci_driver iavf_driver = {
5683 	.name      = iavf_driver_name,
5684 	.id_table  = iavf_pci_tbl,
5685 	.probe     = iavf_probe,
5686 	.remove    = iavf_remove,
5687 	.driver.pm = pm_sleep_ptr(&iavf_pm_ops),
5688 	.shutdown  = iavf_shutdown,
5689 };
5690 
5691 /**
5692  * iavf_init_module - Driver Registration Routine
5693  *
5694  * iavf_init_module is the first routine called when the driver is
5695  * loaded. All it does is register with the PCI subsystem.
5696  **/
iavf_init_module(void)5697 static int __init iavf_init_module(void)
5698 {
5699 	pr_info("iavf: %s\n", iavf_driver_string);
5700 
5701 	pr_info("%s\n", iavf_copyright);
5702 
5703 	return pci_register_driver(&iavf_driver);
5704 }
5705 
5706 module_init(iavf_init_module);
5707 
5708 /**
5709  * iavf_exit_module - Driver Exit Cleanup Routine
5710  *
5711  * iavf_exit_module is called just before the driver is removed
5712  * from memory.
5713  **/
iavf_exit_module(void)5714 static void __exit iavf_exit_module(void)
5715 {
5716 	pci_unregister_driver(&iavf_driver);
5717 }
5718 
5719 module_exit(iavf_exit_module);
5720 
5721 /* iavf_main.c */
5722