xref: /linux/drivers/net/ethernet/intel/iavf/iavf_main.c (revision a1d9d8e833781c44ab688708804ce35f20f3cbbd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/net/intel/libie/rx.h>
5 #include <net/netdev_lock.h>
6 
7 #include "iavf.h"
8 #include "iavf_ptp.h"
9 #include "iavf_prototype.h"
10 /* All iavf tracepoints are defined by the include below, which must
11  * be included exactly once across the whole kernel with
12  * CREATE_TRACE_POINTS defined
13  */
14 #define CREATE_TRACE_POINTS
15 #include "iavf_trace.h"
16 
17 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
18 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
19 static int iavf_close(struct net_device *netdev);
20 static void iavf_init_get_resources(struct iavf_adapter *adapter);
21 static int iavf_check_reset_complete(struct iavf_hw *hw);
22 
23 char iavf_driver_name[] = "iavf";
24 static const char iavf_driver_string[] =
25 	"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
26 
27 static const char iavf_copyright[] =
28 	"Copyright (c) 2013 - 2018 Intel Corporation.";
29 
30 /* iavf_pci_tbl - PCI Device ID Table
31  *
32  * Wildcard entries (PCI_ANY_ID) should come last
33  * Last entry must be all 0s
34  *
35  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36  *   Class, Class Mask, private data (not used) }
37  */
38 static const struct pci_device_id iavf_pci_tbl[] = {
39 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
40 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
41 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
42 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
43 	/* required last entry */
44 	{0, }
45 };
46 
47 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
48 
49 MODULE_ALIAS("i40evf");
50 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
51 MODULE_IMPORT_NS("LIBETH");
52 MODULE_IMPORT_NS("LIBIE");
53 MODULE_IMPORT_NS("LIBIE_ADMINQ");
54 MODULE_LICENSE("GPL v2");
55 
56 static const struct net_device_ops iavf_netdev_ops;
57 
iavf_status_to_errno(enum iavf_status status)58 int iavf_status_to_errno(enum iavf_status status)
59 {
60 	switch (status) {
61 	case IAVF_SUCCESS:
62 		return 0;
63 	case IAVF_ERR_PARAM:
64 	case IAVF_ERR_MAC_TYPE:
65 	case IAVF_ERR_INVALID_MAC_ADDR:
66 	case IAVF_ERR_INVALID_LINK_SETTINGS:
67 	case IAVF_ERR_INVALID_PD_ID:
68 	case IAVF_ERR_INVALID_QP_ID:
69 	case IAVF_ERR_INVALID_CQ_ID:
70 	case IAVF_ERR_INVALID_CEQ_ID:
71 	case IAVF_ERR_INVALID_AEQ_ID:
72 	case IAVF_ERR_INVALID_SIZE:
73 	case IAVF_ERR_INVALID_ARP_INDEX:
74 	case IAVF_ERR_INVALID_FPM_FUNC_ID:
75 	case IAVF_ERR_QP_INVALID_MSG_SIZE:
76 	case IAVF_ERR_INVALID_FRAG_COUNT:
77 	case IAVF_ERR_INVALID_ALIGNMENT:
78 	case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
79 	case IAVF_ERR_INVALID_IMM_DATA_SIZE:
80 	case IAVF_ERR_INVALID_VF_ID:
81 	case IAVF_ERR_INVALID_HMCFN_ID:
82 	case IAVF_ERR_INVALID_PBLE_INDEX:
83 	case IAVF_ERR_INVALID_SD_INDEX:
84 	case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
85 	case IAVF_ERR_INVALID_SD_TYPE:
86 	case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
87 	case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
88 	case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
89 		return -EINVAL;
90 	case IAVF_ERR_NVM:
91 	case IAVF_ERR_NVM_CHECKSUM:
92 	case IAVF_ERR_PHY:
93 	case IAVF_ERR_CONFIG:
94 	case IAVF_ERR_UNKNOWN_PHY:
95 	case IAVF_ERR_LINK_SETUP:
96 	case IAVF_ERR_ADAPTER_STOPPED:
97 	case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
98 	case IAVF_ERR_AUTONEG_NOT_COMPLETE:
99 	case IAVF_ERR_RESET_FAILED:
100 	case IAVF_ERR_BAD_PTR:
101 	case IAVF_ERR_SWFW_SYNC:
102 	case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
103 	case IAVF_ERR_QUEUE_EMPTY:
104 	case IAVF_ERR_FLUSHED_QUEUE:
105 	case IAVF_ERR_OPCODE_MISMATCH:
106 	case IAVF_ERR_CQP_COMPL_ERROR:
107 	case IAVF_ERR_BACKING_PAGE_ERROR:
108 	case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
109 	case IAVF_ERR_MEMCPY_FAILED:
110 	case IAVF_ERR_SRQ_ENABLED:
111 	case IAVF_ERR_ADMIN_QUEUE_ERROR:
112 	case IAVF_ERR_ADMIN_QUEUE_FULL:
113 	case IAVF_ERR_BAD_RDMA_CQE:
114 	case IAVF_ERR_NVM_BLANK_MODE:
115 	case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
116 	case IAVF_ERR_DIAG_TEST_FAILED:
117 	case IAVF_ERR_FIRMWARE_API_VERSION:
118 	case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
119 		return -EIO;
120 	case IAVF_ERR_DEVICE_NOT_SUPPORTED:
121 		return -ENODEV;
122 	case IAVF_ERR_NO_AVAILABLE_VSI:
123 	case IAVF_ERR_RING_FULL:
124 		return -ENOSPC;
125 	case IAVF_ERR_NO_MEMORY:
126 		return -ENOMEM;
127 	case IAVF_ERR_TIMEOUT:
128 	case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
129 		return -ETIMEDOUT;
130 	case IAVF_ERR_NOT_IMPLEMENTED:
131 	case IAVF_NOT_SUPPORTED:
132 		return -EOPNOTSUPP;
133 	case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
134 		return -EALREADY;
135 	case IAVF_ERR_NOT_READY:
136 		return -EBUSY;
137 	case IAVF_ERR_BUF_TOO_SHORT:
138 		return -EMSGSIZE;
139 	}
140 
141 	return -EIO;
142 }
143 
virtchnl_status_to_errno(enum virtchnl_status_code v_status)144 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
145 {
146 	switch (v_status) {
147 	case VIRTCHNL_STATUS_SUCCESS:
148 		return 0;
149 	case VIRTCHNL_STATUS_ERR_PARAM:
150 	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
151 		return -EINVAL;
152 	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
153 		return -ENOMEM;
154 	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
155 	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
156 	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
157 		return -EIO;
158 	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
159 		return -EOPNOTSUPP;
160 	}
161 
162 	return -EIO;
163 }
164 
165 /**
166  * iavf_pdev_to_adapter - go from pci_dev to adapter
167  * @pdev: pci_dev pointer
168  */
iavf_pdev_to_adapter(struct pci_dev * pdev)169 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
170 {
171 	return netdev_priv(pci_get_drvdata(pdev));
172 }
173 
174 /**
175  * iavf_is_reset_in_progress - Check if a reset is in progress
176  * @adapter: board private structure
177  */
iavf_is_reset_in_progress(struct iavf_adapter * adapter)178 static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
179 {
180 	if (adapter->state == __IAVF_RESETTING ||
181 	    adapter->flags & (IAVF_FLAG_RESET_PENDING |
182 			      IAVF_FLAG_RESET_NEEDED))
183 		return true;
184 
185 	return false;
186 }
187 
188 /**
189  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
190  * @hw:   pointer to the HW structure
191  * @mem:  ptr to mem struct to fill out
192  * @size: size of memory requested
193  * @alignment: what to align the allocation to
194  **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)195 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
196 					 struct iavf_dma_mem *mem,
197 					 u64 size, u32 alignment)
198 {
199 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
200 
201 	if (!mem)
202 		return IAVF_ERR_PARAM;
203 
204 	mem->size = ALIGN(size, alignment);
205 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
206 				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
207 	if (mem->va)
208 		return 0;
209 	else
210 		return IAVF_ERR_NO_MEMORY;
211 }
212 
213 /**
214  * iavf_free_dma_mem - wrapper for DMA memory freeing
215  * @hw:   pointer to the HW structure
216  * @mem:  ptr to mem struct to free
217  **/
iavf_free_dma_mem(struct iavf_hw * hw,struct iavf_dma_mem * mem)218 enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
219 {
220 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
221 
222 	if (!mem || !mem->va)
223 		return IAVF_ERR_PARAM;
224 	dma_free_coherent(&adapter->pdev->dev, mem->size,
225 			  mem->va, (dma_addr_t)mem->pa);
226 	return 0;
227 }
228 
229 /**
230  * iavf_allocate_virt_mem - virt memory alloc wrapper
231  * @hw:   pointer to the HW structure
232  * @mem:  ptr to mem struct to fill out
233  * @size: size of memory requested
234  **/
iavf_allocate_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)235 enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
236 					struct iavf_virt_mem *mem, u32 size)
237 {
238 	if (!mem)
239 		return IAVF_ERR_PARAM;
240 
241 	mem->size = size;
242 	mem->va = kzalloc(size, GFP_KERNEL);
243 
244 	if (mem->va)
245 		return 0;
246 	else
247 		return IAVF_ERR_NO_MEMORY;
248 }
249 
250 /**
251  * iavf_free_virt_mem - virt memory free wrapper
252  * @hw:   pointer to the HW structure
253  * @mem:  ptr to mem struct to free
254  **/
iavf_free_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem)255 void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
256 {
257 	kfree(mem->va);
258 }
259 
260 /**
261  * iavf_schedule_reset - Set the flags and schedule a reset event
262  * @adapter: board private structure
263  * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
264  **/
iavf_schedule_reset(struct iavf_adapter * adapter,u64 flags)265 void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
266 {
267 	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
268 	    !(adapter->flags &
269 	    (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
270 		adapter->flags |= flags;
271 		queue_work(adapter->wq, &adapter->reset_task);
272 	}
273 }
274 
275 /**
276  * iavf_schedule_aq_request - Set the flags and schedule aq request
277  * @adapter: board private structure
278  * @flags: requested aq flags
279  **/
iavf_schedule_aq_request(struct iavf_adapter * adapter,u64 flags)280 void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
281 {
282 	adapter->aq_required |= flags;
283 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
284 }
285 
286 /**
287  * iavf_tx_timeout - Respond to a Tx Hang
288  * @netdev: network interface device structure
289  * @txqueue: queue number that is timing out
290  **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)291 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
292 {
293 	struct iavf_adapter *adapter = netdev_priv(netdev);
294 
295 	adapter->tx_timeout_count++;
296 	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
297 }
298 
299 /**
300  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
301  * @adapter: board private structure
302  **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)303 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
304 {
305 	struct iavf_hw *hw = &adapter->hw;
306 
307 	if (!adapter->msix_entries)
308 		return;
309 
310 	wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
311 
312 	iavf_flush(hw);
313 
314 	synchronize_irq(adapter->msix_entries[0].vector);
315 }
316 
317 /**
318  * iavf_misc_irq_enable - Enable default interrupt generation settings
319  * @adapter: board private structure
320  **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)321 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
322 {
323 	struct iavf_hw *hw = &adapter->hw;
324 
325 	wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
326 				       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
327 	wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
328 
329 	iavf_flush(hw);
330 }
331 
332 /**
333  * iavf_irq_disable - Mask off interrupt generation on the NIC
334  * @adapter: board private structure
335  **/
iavf_irq_disable(struct iavf_adapter * adapter)336 static void iavf_irq_disable(struct iavf_adapter *adapter)
337 {
338 	int i;
339 	struct iavf_hw *hw = &adapter->hw;
340 
341 	if (!adapter->msix_entries)
342 		return;
343 
344 	for (i = 1; i < adapter->num_msix_vectors; i++) {
345 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
346 		synchronize_irq(adapter->msix_entries[i].vector);
347 	}
348 	iavf_flush(hw);
349 }
350 
351 /**
352  * iavf_irq_enable_queues - Enable interrupt for all queues
353  * @adapter: board private structure
354  **/
iavf_irq_enable_queues(struct iavf_adapter * adapter)355 static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
356 {
357 	struct iavf_hw *hw = &adapter->hw;
358 	int i;
359 
360 	for (i = 1; i < adapter->num_msix_vectors; i++) {
361 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
362 		     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
363 		     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
364 	}
365 }
366 
367 /**
368  * iavf_irq_enable - Enable default interrupt generation settings
369  * @adapter: board private structure
370  * @flush: boolean value whether to run rd32()
371  **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)372 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
373 {
374 	struct iavf_hw *hw = &adapter->hw;
375 
376 	iavf_misc_irq_enable(adapter);
377 	iavf_irq_enable_queues(adapter);
378 
379 	if (flush)
380 		iavf_flush(hw);
381 }
382 
383 /**
384  * iavf_msix_aq - Interrupt handler for vector 0
385  * @irq: interrupt number
386  * @data: pointer to netdev
387  **/
iavf_msix_aq(int irq,void * data)388 static irqreturn_t iavf_msix_aq(int irq, void *data)
389 {
390 	struct net_device *netdev = data;
391 	struct iavf_adapter *adapter = netdev_priv(netdev);
392 	struct iavf_hw *hw = &adapter->hw;
393 
394 	/* handle non-queue interrupts, these reads clear the registers */
395 	rd32(hw, IAVF_VFINT_ICR01);
396 	rd32(hw, IAVF_VFINT_ICR0_ENA1);
397 
398 	if (adapter->state != __IAVF_REMOVE)
399 		/* schedule work on the private workqueue */
400 		queue_work(adapter->wq, &adapter->adminq_task);
401 
402 	return IRQ_HANDLED;
403 }
404 
405 /**
406  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
407  * @irq: interrupt number
408  * @data: pointer to a q_vector
409  **/
iavf_msix_clean_rings(int irq,void * data)410 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
411 {
412 	struct iavf_q_vector *q_vector = data;
413 
414 	if (!q_vector->tx.ring && !q_vector->rx.ring)
415 		return IRQ_HANDLED;
416 
417 	napi_schedule_irqoff(&q_vector->napi);
418 
419 	return IRQ_HANDLED;
420 }
421 
422 /**
423  * iavf_map_vector_to_rxq - associate irqs with rx queues
424  * @adapter: board private structure
425  * @v_idx: interrupt number
426  * @r_idx: queue number
427  **/
428 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)429 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
430 {
431 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
432 	struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
433 	struct iavf_hw *hw = &adapter->hw;
434 
435 	rx_ring->q_vector = q_vector;
436 	rx_ring->next = q_vector->rx.ring;
437 	rx_ring->vsi = &adapter->vsi;
438 	q_vector->rx.ring = rx_ring;
439 	q_vector->rx.count++;
440 	q_vector->rx.next_update = jiffies + 1;
441 	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
442 	q_vector->ring_mask |= BIT(r_idx);
443 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
444 	     q_vector->rx.current_itr >> 1);
445 	q_vector->rx.current_itr = q_vector->rx.target_itr;
446 }
447 
448 /**
449  * iavf_map_vector_to_txq - associate irqs with tx queues
450  * @adapter: board private structure
451  * @v_idx: interrupt number
452  * @t_idx: queue number
453  **/
454 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)455 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
456 {
457 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
458 	struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
459 	struct iavf_hw *hw = &adapter->hw;
460 
461 	tx_ring->q_vector = q_vector;
462 	tx_ring->next = q_vector->tx.ring;
463 	tx_ring->vsi = &adapter->vsi;
464 	q_vector->tx.ring = tx_ring;
465 	q_vector->tx.count++;
466 	q_vector->tx.next_update = jiffies + 1;
467 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
468 	q_vector->num_ringpairs++;
469 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
470 	     q_vector->tx.target_itr >> 1);
471 	q_vector->tx.current_itr = q_vector->tx.target_itr;
472 }
473 
474 /**
475  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
476  * @adapter: board private structure to initialize
477  *
478  * This function maps descriptor rings to the queue-specific vectors
479  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
480  * one vector per ring/queue, but on a constrained vector budget, we
481  * group the rings as "efficiently" as possible.  You would add new
482  * mapping configurations in here.
483  **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)484 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
485 {
486 	int rings_remaining = adapter->num_active_queues;
487 	int ridx = 0, vidx = 0;
488 	int q_vectors;
489 
490 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
491 
492 	for (; ridx < rings_remaining; ridx++) {
493 		iavf_map_vector_to_rxq(adapter, vidx, ridx);
494 		iavf_map_vector_to_txq(adapter, vidx, ridx);
495 
496 		/* In the case where we have more queues than vectors, continue
497 		 * round-robin on vectors until all queues are mapped.
498 		 */
499 		if (++vidx >= q_vectors)
500 			vidx = 0;
501 	}
502 
503 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
504 }
505 
506 /**
507  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
508  * @adapter: board private structure
509  * @basename: device basename
510  *
511  * Allocates MSI-X vectors for tx and rx handling, and requests
512  * interrupts from the kernel.
513  **/
514 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)515 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
516 {
517 	unsigned int vector, q_vectors;
518 	unsigned int rx_int_idx = 0, tx_int_idx = 0;
519 	int irq_num, err;
520 
521 	iavf_irq_disable(adapter);
522 	/* Decrement for Other and TCP Timer vectors */
523 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
524 
525 	for (vector = 0; vector < q_vectors; vector++) {
526 		struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
527 
528 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
529 
530 		if (q_vector->tx.ring && q_vector->rx.ring) {
531 			snprintf(q_vector->name, sizeof(q_vector->name),
532 				 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
533 			tx_int_idx++;
534 		} else if (q_vector->rx.ring) {
535 			snprintf(q_vector->name, sizeof(q_vector->name),
536 				 "iavf-%s-rx-%u", basename, rx_int_idx++);
537 		} else if (q_vector->tx.ring) {
538 			snprintf(q_vector->name, sizeof(q_vector->name),
539 				 "iavf-%s-tx-%u", basename, tx_int_idx++);
540 		} else {
541 			/* skip this unused q_vector */
542 			continue;
543 		}
544 		err = request_irq(irq_num,
545 				  iavf_msix_clean_rings,
546 				  0,
547 				  q_vector->name,
548 				  q_vector);
549 		if (err) {
550 			dev_info(&adapter->pdev->dev,
551 				 "Request_irq failed, error: %d\n", err);
552 			goto free_queue_irqs;
553 		}
554 	}
555 
556 	return 0;
557 
558 free_queue_irqs:
559 	while (vector) {
560 		vector--;
561 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
562 		free_irq(irq_num, &adapter->q_vectors[vector]);
563 	}
564 	return err;
565 }
566 
567 /**
568  * iavf_request_misc_irq - Initialize MSI-X interrupts
569  * @adapter: board private structure
570  *
571  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
572  * vector is only for the admin queue, and stays active even when the netdev
573  * is closed.
574  **/
iavf_request_misc_irq(struct iavf_adapter * adapter)575 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
576 {
577 	struct net_device *netdev = adapter->netdev;
578 	int err;
579 
580 	snprintf(adapter->misc_vector_name,
581 		 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
582 		 dev_name(&adapter->pdev->dev));
583 	err = request_irq(adapter->msix_entries[0].vector,
584 			  &iavf_msix_aq, 0,
585 			  adapter->misc_vector_name, netdev);
586 	if (err) {
587 		dev_err(&adapter->pdev->dev,
588 			"request_irq for %s failed: %d\n",
589 			adapter->misc_vector_name, err);
590 		free_irq(adapter->msix_entries[0].vector, netdev);
591 	}
592 	return err;
593 }
594 
595 /**
596  * iavf_free_traffic_irqs - Free MSI-X interrupts
597  * @adapter: board private structure
598  *
599  * Frees all MSI-X vectors other than 0.
600  **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)601 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
602 {
603 	struct iavf_q_vector *q_vector;
604 	int vector, irq_num, q_vectors;
605 
606 	if (!adapter->msix_entries)
607 		return;
608 
609 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
610 
611 	for (vector = 0; vector < q_vectors; vector++) {
612 		q_vector = &adapter->q_vectors[vector];
613 		netif_napi_set_irq_locked(&q_vector->napi, -1);
614 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
615 		free_irq(irq_num, q_vector);
616 	}
617 }
618 
619 /**
620  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
621  * @adapter: board private structure
622  *
623  * Frees MSI-X vector 0.
624  **/
iavf_free_misc_irq(struct iavf_adapter * adapter)625 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
626 {
627 	struct net_device *netdev = adapter->netdev;
628 
629 	if (!adapter->msix_entries)
630 		return;
631 
632 	free_irq(adapter->msix_entries[0].vector, netdev);
633 }
634 
635 /**
636  * iavf_configure_tx - Configure Transmit Unit after Reset
637  * @adapter: board private structure
638  *
639  * Configure the Tx unit of the MAC after a reset.
640  **/
iavf_configure_tx(struct iavf_adapter * adapter)641 static void iavf_configure_tx(struct iavf_adapter *adapter)
642 {
643 	struct iavf_hw *hw = &adapter->hw;
644 	int i;
645 
646 	for (i = 0; i < adapter->num_active_queues; i++)
647 		adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
648 }
649 
650 /**
651  * iavf_select_rx_desc_format - Select Rx descriptor format
652  * @adapter: adapter private structure
653  *
654  * Select what Rx descriptor format based on availability and enabled
655  * features.
656  *
657  * Return: the desired RXDID to select for a given Rx queue, as defined by
658  *         enum virtchnl_rxdid_format.
659  */
iavf_select_rx_desc_format(const struct iavf_adapter * adapter)660 static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
661 {
662 	u64 rxdids = adapter->supp_rxdids;
663 
664 	/* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
665 	 * stick with the default value of the legacy 32 byte format.
666 	 */
667 	if (!IAVF_RXDID_ALLOWED(adapter))
668 		return VIRTCHNL_RXDID_1_32B_BASE;
669 
670 	/* Rx timestamping requires the use of flexible NIC descriptors */
671 	if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
672 		if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
673 			return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
674 
675 		pci_warn(adapter->pdev,
676 			 "Unable to negotiate flexible descriptor format\n");
677 	}
678 
679 	/* Warn if the PF does not list support for the default legacy
680 	 * descriptor format. This shouldn't happen, as this is the format
681 	 * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
682 	 * likely caused by a bug in the PF implementation failing to indicate
683 	 * support for the format.
684 	 */
685 	if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
686 		netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
687 
688 	return VIRTCHNL_RXDID_1_32B_BASE;
689 }
690 
691 /**
692  * iavf_configure_rx - Configure Receive Unit after Reset
693  * @adapter: board private structure
694  *
695  * Configure the Rx unit of the MAC after a reset.
696  **/
iavf_configure_rx(struct iavf_adapter * adapter)697 static void iavf_configure_rx(struct iavf_adapter *adapter)
698 {
699 	struct iavf_hw *hw = &adapter->hw;
700 
701 	adapter->rxdid = iavf_select_rx_desc_format(adapter);
702 
703 	for (u32 i = 0; i < adapter->num_active_queues; i++) {
704 		adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
705 		adapter->rx_rings[i].rxdid = adapter->rxdid;
706 	}
707 }
708 
709 /**
710  * iavf_find_vlan - Search filter list for specific vlan filter
711  * @adapter: board private structure
712  * @vlan: vlan tag
713  *
714  * Returns ptr to the filter object or NULL. Must be called while holding the
715  * mac_vlan_list_lock.
716  **/
717 static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)718 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
719 				 struct iavf_vlan vlan)
720 {
721 	struct iavf_vlan_filter *f;
722 
723 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
724 		if (f->vlan.vid == vlan.vid &&
725 		    f->vlan.tpid == vlan.tpid)
726 			return f;
727 	}
728 
729 	return NULL;
730 }
731 
732 /**
733  * iavf_add_vlan - Add a vlan filter to the list
734  * @adapter: board private structure
735  * @vlan: VLAN tag
736  *
737  * Returns ptr to the filter object or NULL when no memory available.
738  **/
739 static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)740 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
741 				struct iavf_vlan vlan)
742 {
743 	struct iavf_vlan_filter *f = NULL;
744 
745 	spin_lock_bh(&adapter->mac_vlan_list_lock);
746 
747 	f = iavf_find_vlan(adapter, vlan);
748 	if (!f) {
749 		f = kzalloc_obj(*f, GFP_ATOMIC);
750 		if (!f)
751 			goto clearout;
752 
753 		f->vlan = vlan;
754 
755 		list_add_tail(&f->list, &adapter->vlan_filter_list);
756 		f->state = IAVF_VLAN_ADD;
757 		adapter->num_vlan_filters++;
758 		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
759 	} else if (f->state == IAVF_VLAN_REMOVE) {
760 		/* Re-add the filter since we cannot tell whether the
761 		 * pending delete has already been processed by the PF.
762 		 * A duplicate add is harmless.
763 		 */
764 		f->state = IAVF_VLAN_ADD;
765 		iavf_schedule_aq_request(adapter,
766 					 IAVF_FLAG_AQ_ADD_VLAN_FILTER);
767 	}
768 
769 clearout:
770 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
771 	return f;
772 }
773 
774 /**
775  * iavf_del_vlan - Remove a vlan filter from the list
776  * @adapter: board private structure
777  * @vlan: VLAN tag
778  **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)779 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
780 {
781 	struct iavf_vlan_filter *f;
782 
783 	spin_lock_bh(&adapter->mac_vlan_list_lock);
784 
785 	f = iavf_find_vlan(adapter, vlan);
786 	if (f) {
787 		/* IAVF_ADD_VLAN means that VLAN wasn't even added yet.
788 		 * Remove it from the list.
789 		 */
790 		if (f->state == IAVF_VLAN_ADD) {
791 			list_del(&f->list);
792 			kfree(f);
793 			adapter->num_vlan_filters--;
794 		} else {
795 			f->state = IAVF_VLAN_REMOVE;
796 			iavf_schedule_aq_request(adapter,
797 						 IAVF_FLAG_AQ_DEL_VLAN_FILTER);
798 		}
799 	}
800 
801 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
802 }
803 
804 /**
805  * iavf_restore_filters
806  * @adapter: board private structure
807  *
808  * Restore existing non MAC filters when VF netdev comes back up
809  **/
iavf_restore_filters(struct iavf_adapter * adapter)810 static void iavf_restore_filters(struct iavf_adapter *adapter)
811 {
812 	struct iavf_vlan_filter *f;
813 
814 	/* re-add all VLAN filters */
815 	spin_lock_bh(&adapter->mac_vlan_list_lock);
816 
817 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
818 		if (f->state == IAVF_VLAN_INACTIVE)
819 			f->state = IAVF_VLAN_ADD;
820 	}
821 
822 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
823 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
824 }
825 
826 /**
827  * iavf_get_num_vlans_added - get number of VLANs added
828  * @adapter: board private structure
829  */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)830 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
831 {
832 	return adapter->num_vlan_filters;
833 }
834 
835 /**
836  * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
837  * @adapter: board private structure
838  *
839  * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
840  * do not impose a limit as that maintains current behavior and for
841  * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
842  **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)843 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
844 {
845 	/* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
846 	 * never been a limit on the VF driver side
847 	 */
848 	if (VLAN_ALLOWED(adapter))
849 		return VLAN_N_VID;
850 	else if (VLAN_V2_ALLOWED(adapter))
851 		return adapter->vlan_v2_caps.filtering.max_filters;
852 
853 	return 0;
854 }
855 
856 /**
857  * iavf_max_vlans_added - check if maximum VLANs allowed already exist
858  * @adapter: board private structure
859  **/
iavf_max_vlans_added(struct iavf_adapter * adapter)860 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
861 {
862 	if (iavf_get_num_vlans_added(adapter) <
863 	    iavf_get_max_vlans_allowed(adapter))
864 		return false;
865 
866 	return true;
867 }
868 
869 /**
870  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
871  * @netdev: network device struct
872  * @proto: unused protocol data
873  * @vid: VLAN tag
874  **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)875 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
876 				__always_unused __be16 proto, u16 vid)
877 {
878 	struct iavf_adapter *adapter = netdev_priv(netdev);
879 
880 	/* Do not track VLAN 0 filter, always added by the PF on VF init */
881 	if (!vid)
882 		return 0;
883 
884 	if (!VLAN_FILTERING_ALLOWED(adapter))
885 		return -EIO;
886 
887 	if (iavf_max_vlans_added(adapter)) {
888 		netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
889 			   iavf_get_max_vlans_allowed(adapter));
890 		return -EIO;
891 	}
892 
893 	if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
894 		return -ENOMEM;
895 
896 	return 0;
897 }
898 
899 /**
900  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
901  * @netdev: network device struct
902  * @proto: unused protocol data
903  * @vid: VLAN tag
904  **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)905 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
906 				 __always_unused __be16 proto, u16 vid)
907 {
908 	struct iavf_adapter *adapter = netdev_priv(netdev);
909 
910 	/* We do not track VLAN 0 filter */
911 	if (!vid)
912 		return 0;
913 
914 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
915 	return 0;
916 }
917 
918 /**
919  * iavf_find_filter - Search filter list for specific mac filter
920  * @adapter: board private structure
921  * @macaddr: the MAC address
922  *
923  * Returns ptr to the filter object or NULL. Must be called while holding the
924  * mac_vlan_list_lock.
925  **/
926 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)927 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
928 				  const u8 *macaddr)
929 {
930 	struct iavf_mac_filter *f;
931 
932 	if (!macaddr)
933 		return NULL;
934 
935 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
936 		if (ether_addr_equal(macaddr, f->macaddr))
937 			return f;
938 	}
939 	return NULL;
940 }
941 
942 /**
943  * iavf_add_filter - Add a mac filter to the filter list
944  * @adapter: board private structure
945  * @macaddr: the MAC address
946  *
947  * Returns ptr to the filter object or NULL when no memory available.
948  **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)949 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
950 					const u8 *macaddr)
951 {
952 	struct iavf_mac_filter *f;
953 
954 	if (!macaddr)
955 		return NULL;
956 
957 	f = iavf_find_filter(adapter, macaddr);
958 	if (!f) {
959 		f = kzalloc_obj(*f, GFP_ATOMIC);
960 		if (!f)
961 			return f;
962 
963 		ether_addr_copy(f->macaddr, macaddr);
964 
965 		list_add_tail(&f->list, &adapter->mac_filter_list);
966 		f->add = true;
967 		f->add_handled = false;
968 		f->is_new_mac = true;
969 		f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
970 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
971 	} else {
972 		f->remove = false;
973 	}
974 
975 	return f;
976 }
977 
978 /**
979  * iavf_replace_primary_mac - Replace current primary address
980  * @adapter: board private structure
981  * @new_mac: new MAC address to be applied
982  *
983  * Replace current dev_addr and send request to PF for removal of previous
984  * primary MAC address filter and addition of new primary MAC filter.
985  * Return 0 for success, -ENOMEM for failure.
986  *
987  * Do not call this with mac_vlan_list_lock!
988  **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)989 static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
990 				    const u8 *new_mac)
991 {
992 	struct iavf_hw *hw = &adapter->hw;
993 	struct iavf_mac_filter *new_f;
994 	struct iavf_mac_filter *old_f;
995 
996 	spin_lock_bh(&adapter->mac_vlan_list_lock);
997 
998 	new_f = iavf_add_filter(adapter, new_mac);
999 	if (!new_f) {
1000 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
1001 		return -ENOMEM;
1002 	}
1003 
1004 	old_f = iavf_find_filter(adapter, hw->mac.addr);
1005 	if (old_f) {
1006 		old_f->is_primary = false;
1007 		old_f->remove = true;
1008 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1009 	}
1010 	/* Always send the request to add if changing primary MAC,
1011 	 * even if filter is already present on the list
1012 	 */
1013 	new_f->is_primary = true;
1014 	new_f->add = true;
1015 	ether_addr_copy(hw->mac.addr, new_mac);
1016 
1017 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1018 
1019 	/* schedule the watchdog task to immediately process the request */
1020 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1021 	return 0;
1022 }
1023 
1024 /**
1025  * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1026  * @netdev: network interface device structure
1027  * @macaddr: MAC address to set
1028  *
1029  * Returns true on success, false on failure
1030  */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)1031 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1032 				    const u8 *macaddr)
1033 {
1034 	struct iavf_adapter *adapter = netdev_priv(netdev);
1035 	struct iavf_mac_filter *f;
1036 	bool ret = false;
1037 
1038 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1039 
1040 	f = iavf_find_filter(adapter, macaddr);
1041 
1042 	if (!f || (!f->add && f->add_handled))
1043 		ret = true;
1044 
1045 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1046 
1047 	return ret;
1048 }
1049 
1050 /**
1051  * iavf_set_mac - NDO callback to set port MAC address
1052  * @netdev: network interface device structure
1053  * @p: pointer to an address structure
1054  *
1055  * Returns 0 on success, negative on failure
1056  */
iavf_set_mac(struct net_device * netdev,void * p)1057 static int iavf_set_mac(struct net_device *netdev, void *p)
1058 {
1059 	struct iavf_adapter *adapter = netdev_priv(netdev);
1060 	struct sockaddr *addr = p;
1061 	int ret;
1062 
1063 	if (!is_valid_ether_addr(addr->sa_data))
1064 		return -EADDRNOTAVAIL;
1065 
1066 	ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1067 
1068 	if (ret)
1069 		return ret;
1070 
1071 	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1072 					       iavf_is_mac_set_handled(netdev, addr->sa_data),
1073 					       msecs_to_jiffies(2500));
1074 
1075 	/* If ret < 0 then it means wait was interrupted.
1076 	 * If ret == 0 then it means we got a timeout.
1077 	 * else it means we got response for set MAC from PF,
1078 	 * check if netdev MAC was updated to requested MAC,
1079 	 * if yes then set MAC succeeded otherwise it failed return -EACCES
1080 	 */
1081 	if (ret < 0)
1082 		return ret;
1083 
1084 	if (!ret)
1085 		return -EAGAIN;
1086 
1087 	if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1088 		return -EACCES;
1089 
1090 	return 0;
1091 }
1092 
1093 /**
1094  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1095  * @netdev: the netdevice
1096  * @addr: address to add
1097  *
1098  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1099  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1100  */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)1101 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1102 {
1103 	struct iavf_adapter *adapter = netdev_priv(netdev);
1104 
1105 	if (iavf_add_filter(adapter, addr))
1106 		return 0;
1107 	else
1108 		return -ENOMEM;
1109 }
1110 
1111 /**
1112  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1113  * @netdev: the netdevice
1114  * @addr: address to add
1115  *
1116  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1117  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1118  */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)1119 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1120 {
1121 	struct iavf_adapter *adapter = netdev_priv(netdev);
1122 	struct iavf_mac_filter *f;
1123 
1124 	/* Under some circumstances, we might receive a request to delete
1125 	 * our own device address from our uc list. Because we store the
1126 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1127 	 * such requests and not delete our device address from this list.
1128 	 */
1129 	if (ether_addr_equal(addr, netdev->dev_addr))
1130 		return 0;
1131 
1132 	f = iavf_find_filter(adapter, addr);
1133 	if (f) {
1134 		f->remove = true;
1135 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1136 	}
1137 	return 0;
1138 }
1139 
1140 /**
1141  * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
1142  * @adapter: device specific adapter
1143  */
iavf_promiscuous_mode_changed(struct iavf_adapter * adapter)1144 bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
1145 {
1146 	return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
1147 		(IFF_PROMISC | IFF_ALLMULTI);
1148 }
1149 
1150 /**
1151  * iavf_set_rx_mode - NDO callback to set the netdev filters
1152  * @netdev: network interface device structure
1153  **/
iavf_set_rx_mode(struct net_device * netdev)1154 static void iavf_set_rx_mode(struct net_device *netdev)
1155 {
1156 	struct iavf_adapter *adapter = netdev_priv(netdev);
1157 
1158 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1159 	__dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1160 	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1161 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1162 
1163 	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
1164 	if (iavf_promiscuous_mode_changed(adapter))
1165 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1166 	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1167 }
1168 
1169 /**
1170  * iavf_napi_enable_all - enable NAPI on all queue vectors
1171  * @adapter: board private structure
1172  **/
iavf_napi_enable_all(struct iavf_adapter * adapter)1173 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1174 {
1175 	int q_idx;
1176 	struct iavf_q_vector *q_vector;
1177 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1178 
1179 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1180 		struct napi_struct *napi;
1181 
1182 		q_vector = &adapter->q_vectors[q_idx];
1183 		napi = &q_vector->napi;
1184 		napi_enable_locked(napi);
1185 	}
1186 }
1187 
1188 /**
1189  * iavf_napi_disable_all - disable NAPI on all queue vectors
1190  * @adapter: board private structure
1191  **/
iavf_napi_disable_all(struct iavf_adapter * adapter)1192 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1193 {
1194 	int q_idx;
1195 	struct iavf_q_vector *q_vector;
1196 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1197 
1198 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1199 		q_vector = &adapter->q_vectors[q_idx];
1200 		napi_disable_locked(&q_vector->napi);
1201 	}
1202 }
1203 
1204 /**
1205  * iavf_configure - set up transmit and receive data structures
1206  * @adapter: board private structure
1207  **/
iavf_configure(struct iavf_adapter * adapter)1208 static void iavf_configure(struct iavf_adapter *adapter)
1209 {
1210 	struct net_device *netdev = adapter->netdev;
1211 	int i;
1212 
1213 	iavf_set_rx_mode(netdev);
1214 
1215 	iavf_configure_tx(adapter);
1216 	iavf_configure_rx(adapter);
1217 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1218 
1219 	for (i = 0; i < adapter->num_active_queues; i++) {
1220 		struct iavf_ring *ring = &adapter->rx_rings[i];
1221 
1222 		iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1223 	}
1224 }
1225 
1226 /**
1227  * iavf_up_complete - Finish the last steps of bringing up a connection
1228  * @adapter: board private structure
1229  */
iavf_up_complete(struct iavf_adapter * adapter)1230 static void iavf_up_complete(struct iavf_adapter *adapter)
1231 {
1232 	netdev_assert_locked(adapter->netdev);
1233 
1234 	iavf_change_state(adapter, __IAVF_RUNNING);
1235 	clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1236 
1237 	iavf_napi_enable_all(adapter);
1238 
1239 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
1240 }
1241 
1242 /**
1243  * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1244  * yet and mark other to be removed.
1245  * @adapter: board private structure
1246  **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)1247 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1248 {
1249 	struct iavf_vlan_filter *vlf, *vlftmp;
1250 	struct iavf_mac_filter *f, *ftmp;
1251 
1252 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1253 	/* clear the sync flag on all filters */
1254 	__dev_uc_unsync(adapter->netdev, NULL);
1255 	__dev_mc_unsync(adapter->netdev, NULL);
1256 
1257 	/* remove all MAC filters */
1258 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1259 				 list) {
1260 		if (f->add) {
1261 			list_del(&f->list);
1262 			kfree(f);
1263 		} else {
1264 			f->remove = true;
1265 		}
1266 	}
1267 
1268 	/* disable all VLAN filters */
1269 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1270 				 list)
1271 		vlf->state = IAVF_VLAN_DISABLE;
1272 
1273 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1274 }
1275 
1276 /**
1277  * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1278  * mark other to be removed.
1279  * @adapter: board private structure
1280  **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)1281 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1282 {
1283 	struct iavf_cloud_filter *cf, *cftmp;
1284 
1285 	/* remove all cloud filters */
1286 	spin_lock_bh(&adapter->cloud_filter_list_lock);
1287 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1288 				 list) {
1289 		if (cf->add) {
1290 			list_del(&cf->list);
1291 			kfree(cf);
1292 			adapter->num_cloud_filters--;
1293 		} else {
1294 			cf->del = true;
1295 		}
1296 	}
1297 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
1298 }
1299 
1300 /**
1301  * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1302  * other to be removed.
1303  * @adapter: board private structure
1304  **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)1305 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1306 {
1307 	struct iavf_fdir_fltr *fdir;
1308 
1309 	/* remove all Flow Director filters */
1310 	spin_lock_bh(&adapter->fdir_fltr_lock);
1311 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1312 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1313 			/* Cancel a request, keep filter as inactive */
1314 			fdir->state = IAVF_FDIR_FLTR_INACTIVE;
1315 		} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
1316 			 fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
1317 			/* Disable filters which are active or have a pending
1318 			 * request to PF to be added
1319 			 */
1320 			fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST;
1321 		}
1322 	}
1323 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1324 }
1325 
1326 /**
1327  * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1328  * other to be removed.
1329  * @adapter: board private structure
1330  **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)1331 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1332 {
1333 	struct iavf_adv_rss *rss, *rsstmp;
1334 
1335 	/* remove all advance RSS configuration */
1336 	spin_lock_bh(&adapter->adv_rss_lock);
1337 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1338 				 list) {
1339 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1340 			list_del(&rss->list);
1341 			kfree(rss);
1342 		} else {
1343 			rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1344 		}
1345 	}
1346 	spin_unlock_bh(&adapter->adv_rss_lock);
1347 }
1348 
1349 /**
1350  * iavf_down - Shutdown the connection processing
1351  * @adapter: board private structure
1352  */
iavf_down(struct iavf_adapter * adapter)1353 void iavf_down(struct iavf_adapter *adapter)
1354 {
1355 	struct net_device *netdev = adapter->netdev;
1356 
1357 	netdev_assert_locked(netdev);
1358 
1359 	if (adapter->state <= __IAVF_DOWN_PENDING)
1360 		return;
1361 
1362 	netif_carrier_off(netdev);
1363 	netif_tx_disable(netdev);
1364 	adapter->link_up = false;
1365 	iavf_napi_disable_all(adapter);
1366 	iavf_irq_disable(adapter);
1367 
1368 	iavf_clear_mac_vlan_filters(adapter);
1369 	iavf_clear_cloud_filters(adapter);
1370 	iavf_clear_fdir_filters(adapter);
1371 	iavf_clear_adv_rss_conf(adapter);
1372 
1373 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1374 		return;
1375 
1376 	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
1377 		/* cancel any current operation */
1378 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1379 		/* Schedule operations to close down the HW. Don't wait
1380 		 * here for this to complete. The watchdog is still running
1381 		 * and it will take care of this.
1382 		 */
1383 		if (!list_empty(&adapter->mac_filter_list))
1384 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1385 		if (!list_empty(&adapter->vlan_filter_list))
1386 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1387 		if (!list_empty(&adapter->cloud_filter_list))
1388 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1389 		if (!list_empty(&adapter->fdir_list_head))
1390 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1391 		if (!list_empty(&adapter->adv_rss_list_head))
1392 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1393 	}
1394 
1395 	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
1396 }
1397 
1398 /**
1399  * iavf_acquire_msix_vectors - Setup the MSIX capability
1400  * @adapter: board private structure
1401  * @vectors: number of vectors to request
1402  *
1403  * Work with the OS to set up the MSIX vectors needed.
1404  *
1405  * Returns 0 on success, negative on failure
1406  **/
1407 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1408 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1409 {
1410 	int err, vector_threshold;
1411 
1412 	/* We'll want at least 3 (vector_threshold):
1413 	 * 0) Other (Admin Queue and link, mostly)
1414 	 * 1) TxQ[0] Cleanup
1415 	 * 2) RxQ[0] Cleanup
1416 	 */
1417 	vector_threshold = MIN_MSIX_COUNT;
1418 
1419 	/* The more we get, the more we will assign to Tx/Rx Cleanup
1420 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1421 	 * Right now, we simply care about how many we'll get; we'll
1422 	 * set them up later while requesting irq's.
1423 	 */
1424 	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1425 				    vector_threshold, vectors);
1426 	if (err < 0) {
1427 		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1428 		kfree(adapter->msix_entries);
1429 		adapter->msix_entries = NULL;
1430 		return err;
1431 	}
1432 
1433 	/* Adjust for only the vectors we'll use, which is minimum
1434 	 * of max_msix_q_vectors + NONQ_VECS, or the number of
1435 	 * vectors we were allocated.
1436 	 */
1437 	adapter->num_msix_vectors = err;
1438 	return 0;
1439 }
1440 
1441 /**
1442  * iavf_free_queues - Free memory for all rings
1443  * @adapter: board private structure to initialize
1444  *
1445  * Free all of the memory associated with queue pairs.
1446  **/
iavf_free_queues(struct iavf_adapter * adapter)1447 static void iavf_free_queues(struct iavf_adapter *adapter)
1448 {
1449 	if (!adapter->vsi_res)
1450 		return;
1451 	adapter->num_active_queues = 0;
1452 	kfree(adapter->tx_rings);
1453 	adapter->tx_rings = NULL;
1454 	kfree(adapter->rx_rings);
1455 	adapter->rx_rings = NULL;
1456 }
1457 
1458 /**
1459  * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1460  * @adapter: board private structure
1461  *
1462  * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1463  * stripped in certain descriptor fields. Instead of checking the offload
1464  * capability bits in the hot path, cache the location the ring specific
1465  * flags.
1466  */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1467 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1468 {
1469 	int i;
1470 
1471 	for (i = 0; i < adapter->num_active_queues; i++) {
1472 		struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1473 		struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1474 
1475 		/* prevent multiple L2TAG bits being set after VFR */
1476 		tx_ring->flags &=
1477 			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1478 			  IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1479 		rx_ring->flags &=
1480 			~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1481 			  IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1482 
1483 		if (VLAN_ALLOWED(adapter)) {
1484 			tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1485 			rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1486 		} else if (VLAN_V2_ALLOWED(adapter)) {
1487 			struct virtchnl_vlan_supported_caps *stripping_support;
1488 			struct virtchnl_vlan_supported_caps *insertion_support;
1489 
1490 			stripping_support =
1491 				&adapter->vlan_v2_caps.offloads.stripping_support;
1492 			insertion_support =
1493 				&adapter->vlan_v2_caps.offloads.insertion_support;
1494 
1495 			if (stripping_support->outer) {
1496 				if (stripping_support->outer &
1497 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1498 					rx_ring->flags |=
1499 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1500 				else if (stripping_support->outer &
1501 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1502 					rx_ring->flags |=
1503 						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1504 			} else if (stripping_support->inner) {
1505 				if (stripping_support->inner &
1506 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1507 					rx_ring->flags |=
1508 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1509 				else if (stripping_support->inner &
1510 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1511 					rx_ring->flags |=
1512 						IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1513 			}
1514 
1515 			if (insertion_support->outer) {
1516 				if (insertion_support->outer &
1517 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1518 					tx_ring->flags |=
1519 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1520 				else if (insertion_support->outer &
1521 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1522 					tx_ring->flags |=
1523 						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1524 			} else if (insertion_support->inner) {
1525 				if (insertion_support->inner &
1526 				    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1527 					tx_ring->flags |=
1528 						IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1529 				else if (insertion_support->inner &
1530 					 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1531 					tx_ring->flags |=
1532 						IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1533 			}
1534 		}
1535 	}
1536 }
1537 
1538 /**
1539  * iavf_alloc_queues - Allocate memory for all rings
1540  * @adapter: board private structure to initialize
1541  *
1542  * We allocate one ring per queue at run-time since we don't know the
1543  * number of queues at compile-time.  The polling_netdev array is
1544  * intended for Multiqueue, but should work fine with a single queue.
1545  **/
iavf_alloc_queues(struct iavf_adapter * adapter)1546 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1547 {
1548 	int i, num_active_queues;
1549 
1550 	/* If we're in reset reallocating queues we don't actually know yet for
1551 	 * certain the PF gave us the number of queues we asked for but we'll
1552 	 * assume it did.  Once basic reset is finished we'll confirm once we
1553 	 * start negotiating config with PF.
1554 	 */
1555 	if (adapter->num_req_queues)
1556 		num_active_queues = adapter->num_req_queues;
1557 	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1558 		 adapter->num_tc)
1559 		num_active_queues = adapter->ch_config.total_qps;
1560 	else
1561 		num_active_queues = min_t(int,
1562 					  adapter->vsi_res->num_queue_pairs,
1563 					  (int)(num_online_cpus()));
1564 
1565 
1566 	adapter->tx_rings = kzalloc_objs(struct iavf_ring, num_active_queues);
1567 	if (!adapter->tx_rings)
1568 		goto err_out;
1569 	adapter->rx_rings = kzalloc_objs(struct iavf_ring, num_active_queues);
1570 	if (!adapter->rx_rings)
1571 		goto err_out;
1572 
1573 	for (i = 0; i < num_active_queues; i++) {
1574 		struct iavf_ring *tx_ring;
1575 		struct iavf_ring *rx_ring;
1576 
1577 		tx_ring = &adapter->tx_rings[i];
1578 
1579 		tx_ring->queue_index = i;
1580 		tx_ring->netdev = adapter->netdev;
1581 		tx_ring->dev = &adapter->pdev->dev;
1582 		tx_ring->count = adapter->tx_desc_count;
1583 		tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1584 		if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1585 			tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1586 
1587 		rx_ring = &adapter->rx_rings[i];
1588 		rx_ring->queue_index = i;
1589 		rx_ring->netdev = adapter->netdev;
1590 		rx_ring->count = adapter->rx_desc_count;
1591 		rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1592 	}
1593 
1594 	adapter->num_active_queues = num_active_queues;
1595 
1596 	iavf_set_queue_vlan_tag_loc(adapter);
1597 
1598 	return 0;
1599 
1600 err_out:
1601 	iavf_free_queues(adapter);
1602 	return -ENOMEM;
1603 }
1604 
1605 /**
1606  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1607  * @adapter: board private structure to initialize
1608  *
1609  * Attempt to configure the interrupts using the best available
1610  * capabilities of the hardware and the kernel.
1611  **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1612 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1613 {
1614 	int vector, v_budget;
1615 	int pairs = 0;
1616 	int err = 0;
1617 
1618 	if (!adapter->vsi_res) {
1619 		err = -EIO;
1620 		goto out;
1621 	}
1622 	pairs = adapter->num_active_queues;
1623 
1624 	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1625 	 * us much good if we have more vectors than CPUs. However, we already
1626 	 * limit the total number of queues by the number of CPUs so we do not
1627 	 * need any further limiting here.
1628 	 */
1629 	v_budget = min_t(int, pairs + NONQ_VECS,
1630 			 (int)adapter->vf_res->max_vectors);
1631 
1632 	adapter->msix_entries = kzalloc_objs(struct msix_entry, v_budget);
1633 	if (!adapter->msix_entries) {
1634 		err = -ENOMEM;
1635 		goto out;
1636 	}
1637 
1638 	for (vector = 0; vector < v_budget; vector++)
1639 		adapter->msix_entries[vector].entry = vector;
1640 
1641 	err = iavf_acquire_msix_vectors(adapter, v_budget);
1642 	if (!err)
1643 		iavf_schedule_finish_config(adapter);
1644 
1645 out:
1646 	return err;
1647 }
1648 
1649 /**
1650  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1651  * @adapter: board private structure
1652  *
1653  * Return 0 on success, negative on failure
1654  **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1655 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1656 {
1657 	struct iavf_aqc_get_set_rss_key_data *rss_key =
1658 		(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1659 	struct iavf_hw *hw = &adapter->hw;
1660 	enum iavf_status status;
1661 
1662 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1663 		/* bail because we already have a command pending */
1664 		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1665 			adapter->current_op);
1666 		return -EBUSY;
1667 	}
1668 
1669 	status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1670 	if (status) {
1671 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1672 			iavf_stat_str(hw, status),
1673 			libie_aq_str(hw->aq.asq_last_status));
1674 		return iavf_status_to_errno(status);
1675 
1676 	}
1677 
1678 	status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1679 				     adapter->rss_lut, adapter->rss_lut_size);
1680 	if (status) {
1681 		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1682 			iavf_stat_str(hw, status),
1683 			libie_aq_str(hw->aq.asq_last_status));
1684 		return iavf_status_to_errno(status);
1685 	}
1686 
1687 	return 0;
1688 
1689 }
1690 
1691 /**
1692  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1693  * @adapter: board private structure
1694  *
1695  * Returns 0 on success, negative on failure
1696  **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1697 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1698 {
1699 	struct iavf_hw *hw = &adapter->hw;
1700 	u32 *dw;
1701 	u16 i;
1702 
1703 	dw = (u32 *)adapter->rss_key;
1704 	for (i = 0; i < adapter->rss_key_size / 4; i++)
1705 		wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1706 
1707 	dw = (u32 *)adapter->rss_lut;
1708 	for (i = 0; i < adapter->rss_lut_size / 4; i++)
1709 		wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1710 
1711 	iavf_flush(hw);
1712 
1713 	return 0;
1714 }
1715 
1716 /**
1717  * iavf_config_rss - Configure RSS keys and lut
1718  * @adapter: board private structure
1719  *
1720  * Returns 0 on success, negative on failure
1721  **/
iavf_config_rss(struct iavf_adapter * adapter)1722 int iavf_config_rss(struct iavf_adapter *adapter)
1723 {
1724 
1725 	if (RSS_PF(adapter)) {
1726 		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1727 					IAVF_FLAG_AQ_SET_RSS_KEY;
1728 		return 0;
1729 	} else if (RSS_AQ(adapter)) {
1730 		return iavf_config_rss_aq(adapter);
1731 	} else {
1732 		return iavf_config_rss_reg(adapter);
1733 	}
1734 }
1735 
1736 /**
1737  * iavf_fill_rss_lut - Fill the lut with default values
1738  * @adapter: board private structure
1739  **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1740 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1741 {
1742 	u16 i;
1743 
1744 	for (i = 0; i < adapter->rss_lut_size; i++)
1745 		adapter->rss_lut[i] = i % adapter->num_active_queues;
1746 }
1747 
1748 /**
1749  * iavf_init_rss - Prepare for RSS
1750  * @adapter: board private structure
1751  *
1752  * Return 0 on success, negative on failure
1753  **/
iavf_init_rss(struct iavf_adapter * adapter)1754 static int iavf_init_rss(struct iavf_adapter *adapter)
1755 {
1756 	struct iavf_hw *hw = &adapter->hw;
1757 
1758 	if (!RSS_PF(adapter)) {
1759 		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1760 		if (adapter->vf_res->vf_cap_flags &
1761 		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1762 			adapter->rss_hashcfg =
1763 				IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
1764 		else
1765 			adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;
1766 
1767 		wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
1768 		wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
1769 	}
1770 
1771 	iavf_fill_rss_lut(adapter);
1772 	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1773 
1774 	return iavf_config_rss(adapter);
1775 }
1776 
1777 /**
1778  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1779  * @adapter: board private structure to initialize
1780  *
1781  * We allocate one q_vector per queue interrupt.  If allocation fails we
1782  * return -ENOMEM.
1783  **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1784 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1785 {
1786 	int q_idx = 0, num_q_vectors, irq_num;
1787 	struct iavf_q_vector *q_vector;
1788 
1789 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1790 	adapter->q_vectors = kzalloc_objs(*q_vector, num_q_vectors);
1791 	if (!adapter->q_vectors)
1792 		return -ENOMEM;
1793 
1794 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1795 		irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
1796 		q_vector = &adapter->q_vectors[q_idx];
1797 		q_vector->adapter = adapter;
1798 		q_vector->vsi = &adapter->vsi;
1799 		q_vector->v_idx = q_idx;
1800 		q_vector->reg_idx = q_idx;
1801 		netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
1802 					     iavf_napi_poll, q_idx);
1803 		netif_napi_set_irq_locked(&q_vector->napi, irq_num);
1804 	}
1805 
1806 	return 0;
1807 }
1808 
1809 /**
1810  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1811  * @adapter: board private structure to initialize
1812  *
1813  * This function frees the memory allocated to the q_vectors.  In addition if
1814  * NAPI is enabled it will delete any references to the NAPI struct prior
1815  * to freeing the q_vector.
1816  **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1817 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1818 {
1819 	int q_idx, num_q_vectors;
1820 
1821 	if (!adapter->q_vectors)
1822 		return;
1823 
1824 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1825 
1826 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1827 		struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1828 
1829 		netif_napi_del_locked(&q_vector->napi);
1830 	}
1831 	kfree(adapter->q_vectors);
1832 	adapter->q_vectors = NULL;
1833 }
1834 
1835 /**
1836  * iavf_reset_interrupt_capability - Reset MSIX setup
1837  * @adapter: board private structure
1838  *
1839  **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1840 static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1841 {
1842 	if (!adapter->msix_entries)
1843 		return;
1844 
1845 	pci_disable_msix(adapter->pdev);
1846 	kfree(adapter->msix_entries);
1847 	adapter->msix_entries = NULL;
1848 }
1849 
1850 /**
1851  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1852  * @adapter: board private structure to initialize
1853  *
1854  **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1855 static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1856 {
1857 	int err;
1858 
1859 	err = iavf_alloc_queues(adapter);
1860 	if (err) {
1861 		dev_err(&adapter->pdev->dev,
1862 			"Unable to allocate memory for queues\n");
1863 		goto err_alloc_queues;
1864 	}
1865 
1866 	err = iavf_set_interrupt_capability(adapter);
1867 	if (err) {
1868 		dev_err(&adapter->pdev->dev,
1869 			"Unable to setup interrupt capabilities\n");
1870 		goto err_set_interrupt;
1871 	}
1872 
1873 	err = iavf_alloc_q_vectors(adapter);
1874 	if (err) {
1875 		dev_err(&adapter->pdev->dev,
1876 			"Unable to allocate memory for queue vectors\n");
1877 		goto err_alloc_q_vectors;
1878 	}
1879 
1880 	/* If we've made it so far while ADq flag being ON, then we haven't
1881 	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1882 	 * resources have been allocated in the reset path.
1883 	 * Now we can truly claim that ADq is enabled.
1884 	 */
1885 	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1886 	    adapter->num_tc)
1887 		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1888 			 adapter->num_tc);
1889 
1890 	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1891 		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1892 		 adapter->num_active_queues);
1893 
1894 	return 0;
1895 err_alloc_q_vectors:
1896 	iavf_reset_interrupt_capability(adapter);
1897 err_set_interrupt:
1898 	iavf_free_queues(adapter);
1899 err_alloc_queues:
1900 	return err;
1901 }
1902 
1903 /**
1904  * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does
1905  * @adapter: board private structure
1906  **/
iavf_free_interrupt_scheme(struct iavf_adapter * adapter)1907 static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter)
1908 {
1909 	iavf_free_q_vectors(adapter);
1910 	iavf_reset_interrupt_capability(adapter);
1911 	iavf_free_queues(adapter);
1912 }
1913 
1914 /**
1915  * iavf_free_rss - Free memory used by RSS structs
1916  * @adapter: board private structure
1917  **/
iavf_free_rss(struct iavf_adapter * adapter)1918 static void iavf_free_rss(struct iavf_adapter *adapter)
1919 {
1920 	kfree(adapter->rss_key);
1921 	adapter->rss_key = NULL;
1922 
1923 	kfree(adapter->rss_lut);
1924 	adapter->rss_lut = NULL;
1925 }
1926 
1927 /**
1928  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1929  * @adapter: board private structure
1930  * @running: true if adapter->state == __IAVF_RUNNING
1931  *
1932  * Returns 0 on success, negative on failure
1933  **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter,bool running)1934 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
1935 {
1936 	struct net_device *netdev = adapter->netdev;
1937 	int err;
1938 
1939 	if (running)
1940 		iavf_free_traffic_irqs(adapter);
1941 	iavf_free_misc_irq(adapter);
1942 	iavf_free_interrupt_scheme(adapter);
1943 
1944 	err = iavf_init_interrupt_scheme(adapter);
1945 	if (err)
1946 		goto err;
1947 
1948 	netif_tx_stop_all_queues(netdev);
1949 
1950 	err = iavf_request_misc_irq(adapter);
1951 	if (err)
1952 		goto err;
1953 
1954 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1955 
1956 	iavf_map_rings_to_vectors(adapter);
1957 err:
1958 	return err;
1959 }
1960 
1961 /**
1962  * iavf_finish_config - do all netdev work that needs RTNL
1963  * @work: our work_struct
1964  *
1965  * Do work that needs RTNL.
1966  */
iavf_finish_config(struct work_struct * work)1967 static void iavf_finish_config(struct work_struct *work)
1968 {
1969 	struct iavf_adapter *adapter;
1970 	bool netdev_released = false;
1971 	int pairs, err;
1972 
1973 	adapter = container_of(work, struct iavf_adapter, finish_config);
1974 
1975 	/* Always take RTNL first to prevent circular lock dependency;
1976 	 * the dev->lock (== netdev lock) is needed to update the queue number.
1977 	 */
1978 	rtnl_lock();
1979 	netdev_lock(adapter->netdev);
1980 
1981 	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
1982 	    adapter->netdev->reg_state == NETREG_REGISTERED &&
1983 	    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
1984 		netdev_update_features(adapter->netdev);
1985 		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
1986 	}
1987 
1988 	switch (adapter->state) {
1989 	case __IAVF_DOWN:
1990 		/* Set the real number of queues when reset occurs while
1991 		 * state == __IAVF_DOWN
1992 		 */
1993 		pairs = adapter->num_active_queues;
1994 		netif_set_real_num_rx_queues(adapter->netdev, pairs);
1995 		netif_set_real_num_tx_queues(adapter->netdev, pairs);
1996 
1997 		if (adapter->netdev->reg_state != NETREG_REGISTERED) {
1998 			netdev_unlock(adapter->netdev);
1999 			netdev_released = true;
2000 			err = register_netdevice(adapter->netdev);
2001 			if (err) {
2002 				dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
2003 					err);
2004 
2005 				/* go back and try again.*/
2006 				netdev_lock(adapter->netdev);
2007 				iavf_free_rss(adapter);
2008 				iavf_free_misc_irq(adapter);
2009 				iavf_reset_interrupt_capability(adapter);
2010 				iavf_change_state(adapter,
2011 						  __IAVF_INIT_CONFIG_ADAPTER);
2012 				netdev_unlock(adapter->netdev);
2013 				goto out;
2014 			}
2015 		}
2016 		break;
2017 	case __IAVF_RUNNING:
2018 		pairs = adapter->num_active_queues;
2019 		netif_set_real_num_rx_queues(adapter->netdev, pairs);
2020 		netif_set_real_num_tx_queues(adapter->netdev, pairs);
2021 		break;
2022 
2023 	default:
2024 		break;
2025 	}
2026 
2027 out:
2028 	if (!netdev_released)
2029 		netdev_unlock(adapter->netdev);
2030 	rtnl_unlock();
2031 }
2032 
2033 /**
2034  * iavf_schedule_finish_config - Set the flags and schedule a reset event
2035  * @adapter: board private structure
2036  **/
iavf_schedule_finish_config(struct iavf_adapter * adapter)2037 void iavf_schedule_finish_config(struct iavf_adapter *adapter)
2038 {
2039 	if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2040 		queue_work(adapter->wq, &adapter->finish_config);
2041 }
2042 
2043 /**
2044  * iavf_process_aq_command - process aq_required flags
2045  * and sends aq command
2046  * @adapter: pointer to iavf adapter structure
2047  *
2048  * Returns 0 on success
2049  * Returns error code if no command was sent
2050  * or error code if the command failed.
2051  **/
iavf_process_aq_command(struct iavf_adapter * adapter)2052 static int iavf_process_aq_command(struct iavf_adapter *adapter)
2053 {
2054 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
2055 		return iavf_send_vf_config_msg(adapter);
2056 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2057 		return iavf_send_vf_offload_vlan_v2_msg(adapter);
2058 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
2059 		return iavf_send_vf_supported_rxdids_msg(adapter);
2060 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
2061 		return iavf_send_vf_ptp_caps_msg(adapter);
2062 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2063 		iavf_disable_queues(adapter);
2064 		return 0;
2065 	}
2066 
2067 	if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2068 		iavf_map_queues(adapter);
2069 		return 0;
2070 	}
2071 
2072 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2073 		iavf_add_ether_addrs(adapter);
2074 		return 0;
2075 	}
2076 
2077 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2078 		iavf_add_vlans(adapter);
2079 		return 0;
2080 	}
2081 
2082 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2083 		iavf_del_ether_addrs(adapter);
2084 		return 0;
2085 	}
2086 
2087 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2088 		iavf_del_vlans(adapter);
2089 		return 0;
2090 	}
2091 
2092 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2093 		iavf_enable_vlan_stripping(adapter);
2094 		return 0;
2095 	}
2096 
2097 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2098 		iavf_disable_vlan_stripping(adapter);
2099 		return 0;
2100 	}
2101 
2102 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
2103 		iavf_cfg_queues_bw(adapter);
2104 		return 0;
2105 	}
2106 
2107 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) {
2108 		iavf_get_qos_caps(adapter);
2109 		return 0;
2110 	}
2111 
2112 	if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) {
2113 		iavf_cfg_queues_quanta_size(adapter);
2114 		return 0;
2115 	}
2116 
2117 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2118 		iavf_configure_queues(adapter);
2119 		return 0;
2120 	}
2121 
2122 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2123 		iavf_enable_queues(adapter);
2124 		return 0;
2125 	}
2126 
2127 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2128 		/* This message goes straight to the firmware, not the
2129 		 * PF, so we don't have to set current_op as we will
2130 		 * not get a response through the ARQ.
2131 		 */
2132 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2133 		return 0;
2134 	}
2135 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
2136 		iavf_get_rss_hashcfg(adapter);
2137 		return 0;
2138 	}
2139 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
2140 		iavf_set_rss_hashcfg(adapter);
2141 		return 0;
2142 	}
2143 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2144 		iavf_set_rss_key(adapter);
2145 		return 0;
2146 	}
2147 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2148 		iavf_set_rss_lut(adapter);
2149 		return 0;
2150 	}
2151 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
2152 		iavf_set_rss_hfunc(adapter);
2153 		return 0;
2154 	}
2155 
2156 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
2157 		iavf_set_promiscuous(adapter);
2158 		return 0;
2159 	}
2160 
2161 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2162 		iavf_enable_channels(adapter);
2163 		return 0;
2164 	}
2165 
2166 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2167 		iavf_disable_channels(adapter);
2168 		return 0;
2169 	}
2170 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2171 		iavf_add_cloud_filter(adapter);
2172 		return 0;
2173 	}
2174 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2175 		iavf_del_cloud_filter(adapter);
2176 		return 0;
2177 	}
2178 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2179 		iavf_add_fdir_filter(adapter);
2180 		return IAVF_SUCCESS;
2181 	}
2182 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2183 		iavf_del_fdir_filter(adapter);
2184 		return IAVF_SUCCESS;
2185 	}
2186 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2187 		iavf_add_adv_rss_cfg(adapter);
2188 		return 0;
2189 	}
2190 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2191 		iavf_del_adv_rss_cfg(adapter);
2192 		return 0;
2193 	}
2194 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2195 		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2196 		return 0;
2197 	}
2198 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2199 		iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2200 		return 0;
2201 	}
2202 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2203 		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2204 		return 0;
2205 	}
2206 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2207 		iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2208 		return 0;
2209 	}
2210 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2211 		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2212 		return 0;
2213 	}
2214 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2215 		iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2216 		return 0;
2217 	}
2218 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2219 		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2220 		return 0;
2221 	}
2222 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2223 		iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2224 		return 0;
2225 	}
2226 	if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
2227 		iavf_virtchnl_send_ptp_cmd(adapter);
2228 		return IAVF_SUCCESS;
2229 	}
2230 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2231 		iavf_request_stats(adapter);
2232 		return 0;
2233 	}
2234 
2235 	return -EAGAIN;
2236 }
2237 
2238 /**
2239  * iavf_set_vlan_offload_features - set VLAN offload configuration
2240  * @adapter: board private structure
2241  * @prev_features: previous features used for comparison
2242  * @features: updated features used for configuration
2243  *
2244  * Set the aq_required bit(s) based on the requested features passed in to
2245  * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2246  * the watchdog if any changes are requested to expedite the request via
2247  * virtchnl.
2248  **/
2249 static void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)2250 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2251 			       netdev_features_t prev_features,
2252 			       netdev_features_t features)
2253 {
2254 	bool enable_stripping = true, enable_insertion = true;
2255 	u16 vlan_ethertype = 0;
2256 	u64 aq_required = 0;
2257 
2258 	/* keep cases separate because one ethertype for offloads can be
2259 	 * disabled at the same time as another is disabled, so check for an
2260 	 * enabled ethertype first, then check for disabled. Default to
2261 	 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2262 	 * stripping.
2263 	 */
2264 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2265 		vlan_ethertype = ETH_P_8021AD;
2266 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2267 		vlan_ethertype = ETH_P_8021Q;
2268 	else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2269 		vlan_ethertype = ETH_P_8021AD;
2270 	else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2271 		vlan_ethertype = ETH_P_8021Q;
2272 	else
2273 		vlan_ethertype = ETH_P_8021Q;
2274 
2275 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2276 		enable_stripping = false;
2277 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2278 		enable_insertion = false;
2279 
2280 	if (VLAN_ALLOWED(adapter)) {
2281 		/* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2282 		 * stripping via virtchnl. VLAN insertion can be toggled on the
2283 		 * netdev, but it doesn't require a virtchnl message
2284 		 */
2285 		if (enable_stripping)
2286 			aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2287 		else
2288 			aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2289 
2290 	} else if (VLAN_V2_ALLOWED(adapter)) {
2291 		switch (vlan_ethertype) {
2292 		case ETH_P_8021Q:
2293 			if (enable_stripping)
2294 				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2295 			else
2296 				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2297 
2298 			if (enable_insertion)
2299 				aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2300 			else
2301 				aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2302 			break;
2303 		case ETH_P_8021AD:
2304 			if (enable_stripping)
2305 				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2306 			else
2307 				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2308 
2309 			if (enable_insertion)
2310 				aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2311 			else
2312 				aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2313 			break;
2314 		}
2315 	}
2316 
2317 	if (aq_required)
2318 		iavf_schedule_aq_request(adapter, aq_required);
2319 }
2320 
2321 /**
2322  * iavf_startup - first step of driver startup
2323  * @adapter: board private structure
2324  *
2325  * Function process __IAVF_STARTUP driver state.
2326  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2327  * when fails the state is changed to __IAVF_INIT_FAILED
2328  **/
iavf_startup(struct iavf_adapter * adapter)2329 static void iavf_startup(struct iavf_adapter *adapter)
2330 {
2331 	struct pci_dev *pdev = adapter->pdev;
2332 	struct iavf_hw *hw = &adapter->hw;
2333 	enum iavf_status status;
2334 	int ret;
2335 
2336 	WARN_ON(adapter->state != __IAVF_STARTUP);
2337 
2338 	/* driver loaded, probe complete */
2339 	adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2340 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2341 
2342 	ret = iavf_check_reset_complete(hw);
2343 	if (ret) {
2344 		dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2345 			 ret);
2346 		goto err;
2347 	}
2348 	hw->aq.num_arq_entries = IAVF_AQ_LEN;
2349 	hw->aq.num_asq_entries = IAVF_AQ_LEN;
2350 	hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2351 	hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2352 
2353 	status = iavf_init_adminq(hw);
2354 	if (status) {
2355 		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2356 			status);
2357 		goto err;
2358 	}
2359 	ret = iavf_send_api_ver(adapter);
2360 	if (ret) {
2361 		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2362 		iavf_shutdown_adminq(hw);
2363 		goto err;
2364 	}
2365 	iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2366 	return;
2367 err:
2368 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2369 }
2370 
2371 /**
2372  * iavf_init_version_check - second step of driver startup
2373  * @adapter: board private structure
2374  *
2375  * Function process __IAVF_INIT_VERSION_CHECK driver state.
2376  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2377  * when fails the state is changed to __IAVF_INIT_FAILED
2378  **/
iavf_init_version_check(struct iavf_adapter * adapter)2379 static void iavf_init_version_check(struct iavf_adapter *adapter)
2380 {
2381 	struct pci_dev *pdev = adapter->pdev;
2382 	struct iavf_hw *hw = &adapter->hw;
2383 	int err = -EAGAIN;
2384 
2385 	WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2386 
2387 	if (!iavf_asq_done(hw)) {
2388 		dev_err(&pdev->dev, "Admin queue command never completed\n");
2389 		iavf_shutdown_adminq(hw);
2390 		iavf_change_state(adapter, __IAVF_STARTUP);
2391 		goto err;
2392 	}
2393 
2394 	/* aq msg sent, awaiting reply */
2395 	err = iavf_verify_api_ver(adapter);
2396 	if (err) {
2397 		if (err == -EALREADY)
2398 			err = iavf_send_api_ver(adapter);
2399 		else
2400 			dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2401 				adapter->pf_version.major,
2402 				adapter->pf_version.minor,
2403 				VIRTCHNL_VERSION_MAJOR,
2404 				VIRTCHNL_VERSION_MINOR);
2405 		goto err;
2406 	}
2407 	err = iavf_send_vf_config_msg(adapter);
2408 	if (err) {
2409 		dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2410 			err);
2411 		goto err;
2412 	}
2413 	iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2414 	return;
2415 err:
2416 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2417 }
2418 
2419 /**
2420  * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2421  * @adapter: board private structure
2422  */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2423 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2424 {
2425 	int i, num_req_queues = adapter->num_req_queues;
2426 	struct iavf_vsi *vsi = &adapter->vsi;
2427 
2428 	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2429 		if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2430 			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2431 	}
2432 	if (!adapter->vsi_res) {
2433 		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2434 		return -ENODEV;
2435 	}
2436 
2437 	if (num_req_queues &&
2438 	    num_req_queues > adapter->vsi_res->num_queue_pairs) {
2439 		/* Problem.  The PF gave us fewer queues than what we had
2440 		 * negotiated in our request.  Need a reset to see if we can't
2441 		 * get back to a working state.
2442 		 */
2443 		dev_err(&adapter->pdev->dev,
2444 			"Requested %d queues, but PF only gave us %d.\n",
2445 			num_req_queues,
2446 			adapter->vsi_res->num_queue_pairs);
2447 		adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2448 		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2449 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
2450 
2451 		return -EAGAIN;
2452 	}
2453 	adapter->num_req_queues = 0;
2454 	adapter->vsi.id = adapter->vsi_res->vsi_id;
2455 
2456 	adapter->vsi.back = adapter;
2457 	adapter->vsi.base_vector = 1;
2458 	vsi->netdev = adapter->netdev;
2459 	vsi->qs_handle = adapter->vsi_res->qset_handle;
2460 	if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2461 		adapter->rss_key_size = adapter->vf_res->rss_key_size;
2462 		adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2463 	} else {
2464 		adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2465 		adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2466 	}
2467 
2468 	return 0;
2469 }
2470 
2471 /**
2472  * iavf_init_get_resources - third step of driver startup
2473  * @adapter: board private structure
2474  *
2475  * Function process __IAVF_INIT_GET_RESOURCES driver state and
2476  * finishes driver initialization procedure.
2477  * When success the state is changed to __IAVF_DOWN
2478  * when fails the state is changed to __IAVF_INIT_FAILED
2479  **/
iavf_init_get_resources(struct iavf_adapter * adapter)2480 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2481 {
2482 	struct pci_dev *pdev = adapter->pdev;
2483 	struct iavf_hw *hw = &adapter->hw;
2484 	int err;
2485 
2486 	WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2487 	/* aq msg sent, awaiting reply */
2488 	if (!adapter->vf_res) {
2489 		adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2490 					  GFP_KERNEL);
2491 		if (!adapter->vf_res) {
2492 			err = -ENOMEM;
2493 			goto err;
2494 		}
2495 	}
2496 	err = iavf_get_vf_config(adapter);
2497 	if (err == -EALREADY) {
2498 		err = iavf_send_vf_config_msg(adapter);
2499 		goto err;
2500 	} else if (err == -EINVAL) {
2501 		/* We only get -EINVAL if the device is in a very bad
2502 		 * state or if we've been disabled for previous bad
2503 		 * behavior. Either way, we're done now.
2504 		 */
2505 		iavf_shutdown_adminq(hw);
2506 		dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2507 		return;
2508 	}
2509 	if (err) {
2510 		dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2511 		goto err_alloc;
2512 	}
2513 
2514 	err = iavf_parse_vf_resource_msg(adapter);
2515 	if (err) {
2516 		dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2517 			err);
2518 		goto err_alloc;
2519 	}
2520 	/* Some features require additional messages to negotiate extended
2521 	 * capabilities. These are processed in sequence by the
2522 	 * __IAVF_INIT_EXTENDED_CAPS driver state.
2523 	 */
2524 	adapter->extended_caps = IAVF_EXTENDED_CAPS;
2525 
2526 	iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2527 	return;
2528 
2529 err_alloc:
2530 	kfree(adapter->vf_res);
2531 	adapter->vf_res = NULL;
2532 err:
2533 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2534 }
2535 
2536 /**
2537  * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2538  * @adapter: board private structure
2539  *
2540  * Function processes send of the extended VLAN V2 capability message to the
2541  * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2542  * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2543  */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)2544 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2545 {
2546 	int ret;
2547 
2548 	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2549 
2550 	ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2551 	if (ret && ret == -EOPNOTSUPP) {
2552 		/* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2553 		 * we did not send the capability exchange message and do not
2554 		 * expect a response.
2555 		 */
2556 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2557 	}
2558 
2559 	/* We sent the message, so move on to the next step */
2560 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2561 }
2562 
2563 /**
2564  * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2565  * @adapter: board private structure
2566  *
2567  * Function processes receipt of the extended VLAN V2 capability message from
2568  * the PF.
2569  **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)2570 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2571 {
2572 	int ret;
2573 
2574 	WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2575 
2576 	memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2577 
2578 	ret = iavf_get_vf_vlan_v2_caps(adapter);
2579 	if (ret)
2580 		goto err;
2581 
2582 	/* We've processed receipt of the VLAN V2 caps message */
2583 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2584 	return;
2585 err:
2586 	/* We didn't receive a reply. Make sure we try sending again when
2587 	 * __IAVF_INIT_FAILED attempts to recover.
2588 	 */
2589 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2590 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2591 }
2592 
2593 /**
2594  * iavf_init_send_supported_rxdids - part of querying for supported RXDID
2595  * formats
2596  * @adapter: board private structure
2597  *
2598  * Function processes send of the request for supported RXDIDs to the PF.
2599  * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
2600  * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
2601  */
iavf_init_send_supported_rxdids(struct iavf_adapter * adapter)2602 static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
2603 {
2604 	int ret;
2605 
2606 	ret = iavf_send_vf_supported_rxdids_msg(adapter);
2607 	if (ret == -EOPNOTSUPP) {
2608 		/* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
2609 		 * case, we did not send the capability exchange message and
2610 		 * do not expect a response.
2611 		 */
2612 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
2613 	}
2614 
2615 	/* We sent the message, so move on to the next step */
2616 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
2617 }
2618 
2619 /**
2620  * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
2621  * formats
2622  * @adapter: board private structure
2623  *
2624  * Function processes receipt of the supported RXDIDs message from the PF.
2625  **/
iavf_init_recv_supported_rxdids(struct iavf_adapter * adapter)2626 static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
2627 {
2628 	int ret;
2629 
2630 	memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
2631 
2632 	ret = iavf_get_vf_supported_rxdids(adapter);
2633 	if (ret)
2634 		goto err;
2635 
2636 	/* We've processed the PF response to the
2637 	 * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
2638 	 */
2639 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
2640 	return;
2641 
2642 err:
2643 	/* We didn't receive a reply. Make sure we try sending again when
2644 	 * __IAVF_INIT_FAILED attempts to recover.
2645 	 */
2646 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
2647 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2648 }
2649 
2650 /**
2651  * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
2652  * @adapter: board private structure
2653  *
2654  * Function processes send of the request for 1588 PTP capabilities to the PF.
2655  * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
2656  * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
2657  */
iavf_init_send_ptp_caps(struct iavf_adapter * adapter)2658 static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
2659 {
2660 	if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
2661 		/* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
2662 		 * did not send the capability exchange message and do not
2663 		 * expect a response.
2664 		 */
2665 		adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
2666 	}
2667 
2668 	/* We sent the message, so move on to the next step */
2669 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
2670 }
2671 
2672 /**
2673  * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
2674  * @adapter: board private structure
2675  *
2676  * Function processes receipt of the PTP capabilities supported on this VF.
2677  **/
iavf_init_recv_ptp_caps(struct iavf_adapter * adapter)2678 static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
2679 {
2680 	memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
2681 
2682 	if (iavf_get_vf_ptp_caps(adapter))
2683 		goto err;
2684 
2685 	/* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
2686 	 * message we sent previously.
2687 	 */
2688 	adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
2689 	return;
2690 
2691 err:
2692 	/* We didn't receive a reply. Make sure we try sending again when
2693 	 * __IAVF_INIT_FAILED attempts to recover.
2694 	 */
2695 	adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
2696 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2697 }
2698 
2699 /**
2700  * iavf_init_process_extended_caps - Part of driver startup
2701  * @adapter: board private structure
2702  *
2703  * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2704  * handles negotiating capabilities for features which require an additional
2705  * message.
2706  *
2707  * Once all extended capabilities exchanges are finished, the driver will
2708  * transition into __IAVF_INIT_CONFIG_ADAPTER.
2709  */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)2710 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2711 {
2712 	WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2713 
2714 	/* Process capability exchange for VLAN V2 */
2715 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2716 		iavf_init_send_offload_vlan_v2_caps(adapter);
2717 		return;
2718 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2719 		iavf_init_recv_offload_vlan_v2_caps(adapter);
2720 		return;
2721 	}
2722 
2723 	/* Process capability exchange for RXDID formats */
2724 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
2725 		iavf_init_send_supported_rxdids(adapter);
2726 		return;
2727 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
2728 		iavf_init_recv_supported_rxdids(adapter);
2729 		return;
2730 	}
2731 
2732 	/* Process capability exchange for PTP features */
2733 	if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
2734 		iavf_init_send_ptp_caps(adapter);
2735 		return;
2736 	} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
2737 		iavf_init_recv_ptp_caps(adapter);
2738 		return;
2739 	}
2740 
2741 	/* When we reach here, no further extended capabilities exchanges are
2742 	 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2743 	 */
2744 	iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2745 }
2746 
2747 /**
2748  * iavf_init_config_adapter - last part of driver startup
2749  * @adapter: board private structure
2750  *
2751  * After all the supported capabilities are negotiated, then the
2752  * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2753  */
iavf_init_config_adapter(struct iavf_adapter * adapter)2754 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2755 {
2756 	struct net_device *netdev = adapter->netdev;
2757 	struct pci_dev *pdev = adapter->pdev;
2758 	int err;
2759 
2760 	WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2761 
2762 	if (iavf_process_config(adapter))
2763 		goto err;
2764 
2765 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2766 
2767 	adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2768 
2769 	netdev->netdev_ops = &iavf_netdev_ops;
2770 	iavf_set_ethtool_ops(netdev);
2771 	netdev->watchdog_timeo = 5 * HZ;
2772 
2773 	netdev->min_mtu = ETH_MIN_MTU;
2774 
2775 	/* PF/VF API: vf_res->max_mtu is max frame size (not MTU).
2776 	 * Convert to MTU.
2777 	 */
2778 	if (!adapter->vf_res->max_mtu) {
2779 		netdev->max_mtu = LIBIE_MAX_MTU;
2780 	} else if (adapter->vf_res->max_mtu < LIBETH_RX_LL_LEN + ETH_MIN_MTU ||
2781 		   adapter->vf_res->max_mtu >
2782 			   LIBETH_RX_LL_LEN + LIBIE_MAX_MTU) {
2783 		netdev_warn_once(adapter->netdev,
2784 				 "invalid max frame size %d from PF, using default MTU %d",
2785 				 adapter->vf_res->max_mtu, LIBIE_MAX_MTU);
2786 		netdev->max_mtu = LIBIE_MAX_MTU;
2787 	} else {
2788 		netdev->max_mtu = adapter->vf_res->max_mtu - LIBETH_RX_LL_LEN;
2789 	}
2790 
2791 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2792 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2793 			 adapter->hw.mac.addr);
2794 		eth_hw_addr_random(netdev);
2795 		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2796 	} else {
2797 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2798 		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2799 	}
2800 
2801 	adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2802 	adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2803 	err = iavf_init_interrupt_scheme(adapter);
2804 	if (err)
2805 		goto err_sw_init;
2806 	iavf_map_rings_to_vectors(adapter);
2807 	if (adapter->vf_res->vf_cap_flags &
2808 		VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2809 		adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2810 
2811 	err = iavf_request_misc_irq(adapter);
2812 	if (err)
2813 		goto err_sw_init;
2814 
2815 	netif_carrier_off(netdev);
2816 	adapter->link_up = false;
2817 	netif_tx_stop_all_queues(netdev);
2818 
2819 	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2820 	if (netdev->features & NETIF_F_GRO)
2821 		dev_info(&pdev->dev, "GRO is enabled\n");
2822 
2823 	iavf_change_state(adapter, __IAVF_DOWN);
2824 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2825 
2826 	iavf_misc_irq_enable(adapter);
2827 	wake_up(&adapter->down_waitqueue);
2828 
2829 	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2830 	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2831 	if (!adapter->rss_key || !adapter->rss_lut) {
2832 		err = -ENOMEM;
2833 		goto err_mem;
2834 	}
2835 	if (RSS_AQ(adapter))
2836 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2837 	else
2838 		iavf_init_rss(adapter);
2839 
2840 	if (VLAN_V2_ALLOWED(adapter))
2841 		/* request initial VLAN offload settings */
2842 		iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2843 
2844 	if (QOS_ALLOWED(adapter))
2845 		adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
2846 
2847 	/* Setup initial PTP configuration */
2848 	iavf_ptp_init(adapter);
2849 
2850 	iavf_schedule_finish_config(adapter);
2851 	return;
2852 
2853 err_mem:
2854 	iavf_free_rss(adapter);
2855 	iavf_free_misc_irq(adapter);
2856 err_sw_init:
2857 	iavf_reset_interrupt_capability(adapter);
2858 err:
2859 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2860 }
2861 
2862 static const int IAVF_NO_RESCHED = -1;
2863 
2864 /* return: msec delay for requeueing itself */
iavf_watchdog_step(struct iavf_adapter * adapter)2865 static int iavf_watchdog_step(struct iavf_adapter *adapter)
2866 {
2867 	struct iavf_hw *hw = &adapter->hw;
2868 	u32 reg_val;
2869 
2870 	netdev_assert_locked(adapter->netdev);
2871 
2872 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2873 		iavf_change_state(adapter, __IAVF_COMM_FAILED);
2874 
2875 	switch (adapter->state) {
2876 	case __IAVF_STARTUP:
2877 		iavf_startup(adapter);
2878 		return 30;
2879 	case __IAVF_INIT_VERSION_CHECK:
2880 		iavf_init_version_check(adapter);
2881 		return 30;
2882 	case __IAVF_INIT_GET_RESOURCES:
2883 		iavf_init_get_resources(adapter);
2884 		return 1;
2885 	case __IAVF_INIT_EXTENDED_CAPS:
2886 		iavf_init_process_extended_caps(adapter);
2887 		return 1;
2888 	case __IAVF_INIT_CONFIG_ADAPTER:
2889 		iavf_init_config_adapter(adapter);
2890 		return 1;
2891 	case __IAVF_INIT_FAILED:
2892 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2893 			     &adapter->crit_section)) {
2894 			/* Do not update the state and do not reschedule
2895 			 * watchdog task, iavf_remove should handle this state
2896 			 * as it can loop forever
2897 			 */
2898 			return IAVF_NO_RESCHED;
2899 		}
2900 		if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2901 			dev_err(&adapter->pdev->dev,
2902 				"Failed to communicate with PF; waiting before retry\n");
2903 			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2904 			iavf_shutdown_adminq(hw);
2905 			return 5000;
2906 		}
2907 		/* Try again from failed step*/
2908 		iavf_change_state(adapter, adapter->last_state);
2909 		return 1000;
2910 	case __IAVF_COMM_FAILED:
2911 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2912 			     &adapter->crit_section)) {
2913 			/* Set state to __IAVF_INIT_FAILED and perform remove
2914 			 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2915 			 * doesn't bring the state back to __IAVF_COMM_FAILED.
2916 			 */
2917 			iavf_change_state(adapter, __IAVF_INIT_FAILED);
2918 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2919 			return IAVF_NO_RESCHED;
2920 		}
2921 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2922 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2923 		if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2924 		    reg_val == VIRTCHNL_VFR_COMPLETED) {
2925 			/* A chance for redemption! */
2926 			dev_err(&adapter->pdev->dev,
2927 				"Hardware came out of reset. Attempting reinit.\n");
2928 			/* When init task contacts the PF and
2929 			 * gets everything set up again, it'll restart the
2930 			 * watchdog for us. Down, boy. Sit. Stay. Woof.
2931 			 */
2932 			iavf_change_state(adapter, __IAVF_STARTUP);
2933 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2934 		}
2935 		adapter->aq_required = 0;
2936 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2937 		return 10;
2938 	case __IAVF_RESETTING:
2939 		return 2000;
2940 	case __IAVF_DOWN:
2941 	case __IAVF_DOWN_PENDING:
2942 	case __IAVF_TESTING:
2943 	case __IAVF_RUNNING:
2944 		if (adapter->current_op) {
2945 			if (!iavf_asq_done(hw)) {
2946 				dev_dbg(&adapter->pdev->dev,
2947 					"Admin queue timeout\n");
2948 				iavf_send_api_ver(adapter);
2949 			}
2950 		} else {
2951 			int ret = iavf_process_aq_command(adapter);
2952 
2953 			/* An error will be returned if no commands were
2954 			 * processed; use this opportunity to update stats
2955 			 * if the error isn't -ENOTSUPP
2956 			 */
2957 			if (ret && ret != -EOPNOTSUPP &&
2958 			    adapter->state == __IAVF_RUNNING)
2959 				iavf_request_stats(adapter);
2960 		}
2961 		if (adapter->state == __IAVF_RUNNING)
2962 			iavf_detect_recover_hung(&adapter->vsi);
2963 		break;
2964 	case __IAVF_REMOVE:
2965 	default:
2966 		return IAVF_NO_RESCHED;
2967 	}
2968 
2969 	/* check for hw reset */
2970 	reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2971 	if (!reg_val) {
2972 		adapter->aq_required = 0;
2973 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2974 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2975 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2976 	}
2977 
2978 	return adapter->aq_required ? 20 : 2000;
2979 }
2980 
iavf_watchdog_task(struct work_struct * work)2981 static void iavf_watchdog_task(struct work_struct *work)
2982 {
2983 	struct iavf_adapter *adapter = container_of(work,
2984 						    struct iavf_adapter,
2985 						    watchdog_task.work);
2986 	struct net_device *netdev = adapter->netdev;
2987 	int msec_delay;
2988 
2989 	netdev_lock(netdev);
2990 	msec_delay = iavf_watchdog_step(adapter);
2991 	/* note that we schedule a different task */
2992 	if (adapter->state >= __IAVF_DOWN)
2993 		queue_work(adapter->wq, &adapter->adminq_task);
2994 
2995 	if (msec_delay != IAVF_NO_RESCHED)
2996 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2997 				   msecs_to_jiffies(msec_delay));
2998 	netdev_unlock(netdev);
2999 }
3000 
3001 /**
3002  * iavf_disable_vf - disable VF
3003  * @adapter: board private structure
3004  *
3005  * Set communication failed flag and free all resources.
3006  */
iavf_disable_vf(struct iavf_adapter * adapter)3007 static void iavf_disable_vf(struct iavf_adapter *adapter)
3008 {
3009 	struct iavf_mac_filter *f, *ftmp;
3010 	struct iavf_vlan_filter *fv, *fvtmp;
3011 	struct iavf_cloud_filter *cf, *cftmp;
3012 
3013 	netdev_assert_locked(adapter->netdev);
3014 
3015 	adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3016 
3017 	iavf_ptp_release(adapter);
3018 
3019 	/* We don't use netif_running() because it may be true prior to
3020 	 * ndo_open() returning, so we can't assume it means all our open
3021 	 * tasks have finished, since we're not holding the rtnl_lock here.
3022 	 */
3023 	if (adapter->state == __IAVF_RUNNING) {
3024 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3025 		netif_carrier_off(adapter->netdev);
3026 		netif_tx_disable(adapter->netdev);
3027 		adapter->link_up = false;
3028 		iavf_napi_disable_all(adapter);
3029 		iavf_irq_disable(adapter);
3030 		iavf_free_traffic_irqs(adapter);
3031 		iavf_free_all_tx_resources(adapter);
3032 		iavf_free_all_rx_resources(adapter);
3033 	}
3034 
3035 	spin_lock_bh(&adapter->mac_vlan_list_lock);
3036 
3037 	/* Delete all of the filters */
3038 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3039 		list_del(&f->list);
3040 		kfree(f);
3041 	}
3042 
3043 	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
3044 		list_del(&fv->list);
3045 		kfree(fv);
3046 	}
3047 	adapter->num_vlan_filters = 0;
3048 
3049 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
3050 
3051 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3052 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3053 		list_del(&cf->list);
3054 		kfree(cf);
3055 		adapter->num_cloud_filters--;
3056 	}
3057 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3058 
3059 	iavf_free_misc_irq(adapter);
3060 	iavf_free_interrupt_scheme(adapter);
3061 	memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
3062 	iavf_shutdown_adminq(&adapter->hw);
3063 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3064 	iavf_change_state(adapter, __IAVF_DOWN);
3065 	wake_up(&adapter->down_waitqueue);
3066 	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
3067 }
3068 
3069 /**
3070  * iavf_reconfig_qs_bw - Call-back task to handle hardware reset
3071  * @adapter: board private structure
3072  *
3073  * After a reset, the shaper parameters of queues need to be replayed again.
3074  * Since the net_shaper object inside TX rings persists across reset,
3075  * set the update flag for all queues so that the virtchnl message is triggered
3076  * for all queues.
3077  **/
iavf_reconfig_qs_bw(struct iavf_adapter * adapter)3078 static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter)
3079 {
3080 	int i, num = 0;
3081 
3082 	for (i = 0; i < adapter->num_active_queues; i++)
3083 		if (adapter->tx_rings[i].q_shaper.bw_min ||
3084 		    adapter->tx_rings[i].q_shaper.bw_max) {
3085 			adapter->tx_rings[i].q_shaper_update = true;
3086 			num++;
3087 		}
3088 
3089 	if (num)
3090 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
3091 }
3092 
3093 /**
3094  * iavf_reset_step - Perform the VF reset sequence
3095  * @adapter: board private structure
3096  *
3097  * Requests a reset from PF, polls for completion, and reconfigures
3098  * the driver. Caller must hold the netdev instance lock.
3099  *
3100  * This can sleep for several seconds while polling HW registers.
3101  */
iavf_reset_step(struct iavf_adapter * adapter)3102 void iavf_reset_step(struct iavf_adapter *adapter)
3103 {
3104 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
3105 	struct net_device *netdev = adapter->netdev;
3106 	struct iavf_hw *hw = &adapter->hw;
3107 	struct iavf_mac_filter *f, *ftmp;
3108 	struct iavf_cloud_filter *cf;
3109 	enum iavf_status status;
3110 	u32 reg_val;
3111 	int i = 0, err;
3112 	bool running;
3113 
3114 	netdev_assert_locked(netdev);
3115 
3116 	iavf_misc_irq_disable(adapter);
3117 	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
3118 		adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
3119 		/* Restart the AQ here. If we have been reset but didn't
3120 		 * detect it, or if the PF had to reinit, our AQ will be hosed.
3121 		 */
3122 		iavf_shutdown_adminq(hw);
3123 		iavf_init_adminq(hw);
3124 		iavf_request_reset(adapter);
3125 	}
3126 	adapter->flags |= IAVF_FLAG_RESET_PENDING;
3127 
3128 	/* poll until we see the reset actually happen */
3129 	for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
3130 		reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
3131 			  IAVF_VF_ARQLEN1_ARQENABLE_MASK;
3132 		if (!reg_val)
3133 			break;
3134 		usleep_range(5000, 10000);
3135 	}
3136 	if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
3137 		dev_info(&adapter->pdev->dev, "Never saw reset\n");
3138 		goto continue_reset; /* act like the reset happened */
3139 	}
3140 
3141 	/* wait until the reset is complete and the PF is responding to us */
3142 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3143 		/* sleep first to make sure a minimum wait time is met */
3144 		msleep(IAVF_RESET_WAIT_MS);
3145 
3146 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3147 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3148 		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3149 			break;
3150 	}
3151 
3152 	pci_set_master(adapter->pdev);
3153 	pci_restore_msi_state(adapter->pdev);
3154 
3155 	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3156 		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3157 			reg_val);
3158 		iavf_disable_vf(adapter);
3159 		return; /* Do not attempt to reinit. It's dead, Jim. */
3160 	}
3161 
3162 continue_reset:
3163 	/* If we are still early in the state machine, just restart. */
3164 	if (adapter->state <= __IAVF_INIT_FAILED) {
3165 		iavf_shutdown_adminq(hw);
3166 		iavf_change_state(adapter, __IAVF_STARTUP);
3167 		iavf_startup(adapter);
3168 		queue_delayed_work(adapter->wq, &adapter->watchdog_task,
3169 				   msecs_to_jiffies(30));
3170 		return;
3171 	}
3172 
3173 	/* We don't use netif_running() because it may be true prior to
3174 	 * ndo_open() returning, so we can't assume it means all our open
3175 	 * tasks have finished, since we're not holding the rtnl_lock here.
3176 	 */
3177 	running = adapter->state == __IAVF_RUNNING;
3178 
3179 	if (running) {
3180 		netif_carrier_off(netdev);
3181 		netif_tx_stop_all_queues(netdev);
3182 		adapter->link_up = false;
3183 		iavf_napi_disable_all(adapter);
3184 	}
3185 	iavf_irq_disable(adapter);
3186 
3187 	iavf_change_state(adapter, __IAVF_RESETTING);
3188 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3189 
3190 	iavf_ptp_release(adapter);
3191 
3192 	/* free the Tx/Rx rings and descriptors, might be better to just
3193 	 * re-use them sometime in the future
3194 	 */
3195 	iavf_free_all_rx_resources(adapter);
3196 	iavf_free_all_tx_resources(adapter);
3197 
3198 	adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3199 	/* kill and reinit the admin queue */
3200 	iavf_shutdown_adminq(hw);
3201 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3202 	status = iavf_init_adminq(hw);
3203 	if (status) {
3204 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3205 			 status);
3206 		goto reset_err;
3207 	}
3208 	adapter->aq_required = 0;
3209 
3210 	if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3211 	    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3212 		err = iavf_reinit_interrupt_scheme(adapter, running);
3213 		if (err)
3214 			goto reset_err;
3215 	}
3216 
3217 	if (RSS_AQ(adapter)) {
3218 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3219 	} else {
3220 		err = iavf_init_rss(adapter);
3221 		if (err)
3222 			goto reset_err;
3223 	}
3224 
3225 	adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3226 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3227 
3228 	/* Certain capabilities require an extended negotiation process using
3229 	 * extra messages that must be processed after getting the VF
3230 	 * configuration. The related checks such as VLAN_V2_ALLOWED() are not
3231 	 * reliable here, since the configuration has not yet been negotiated.
3232 	 *
3233 	 * Always set these flags, since them related VIRTCHNL messages won't
3234 	 * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
3235 	 */
3236 	adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
3237 
3238 	spin_lock_bh(&adapter->mac_vlan_list_lock);
3239 
3240 	/* Delete filter for the current MAC address, it could have
3241 	 * been changed by the PF via administratively set MAC.
3242 	 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3243 	 */
3244 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3245 		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3246 			list_del(&f->list);
3247 			kfree(f);
3248 		}
3249 	}
3250 	/* re-add all MAC filters */
3251 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
3252 		f->add = true;
3253 	}
3254 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
3255 
3256 	/* check if TCs are running and re-add all cloud filters */
3257 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3258 	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3259 	    adapter->num_tc) {
3260 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3261 			cf->add = true;
3262 		}
3263 	}
3264 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3265 
3266 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3267 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3268 	iavf_misc_irq_enable(adapter);
3269 
3270 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
3271 
3272 	/* We were running when the reset started, so we need to restore some
3273 	 * state here.
3274 	 */
3275 	if (running) {
3276 		/* allocate transmit descriptors */
3277 		err = iavf_setup_all_tx_resources(adapter);
3278 		if (err)
3279 			goto reset_err;
3280 
3281 		/* allocate receive descriptors */
3282 		err = iavf_setup_all_rx_resources(adapter);
3283 		if (err)
3284 			goto reset_err;
3285 
3286 		if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3287 		    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3288 			err = iavf_request_traffic_irqs(adapter, netdev->name);
3289 			if (err)
3290 				goto reset_err;
3291 
3292 			adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3293 		}
3294 
3295 		iavf_configure(adapter);
3296 
3297 		/* iavf_up_complete() will switch device back
3298 		 * to __IAVF_RUNNING
3299 		 */
3300 		iavf_up_complete(adapter);
3301 
3302 		iavf_irq_enable(adapter, true);
3303 
3304 		iavf_reconfig_qs_bw(adapter);
3305 	} else {
3306 		iavf_change_state(adapter, __IAVF_DOWN);
3307 		wake_up(&adapter->down_waitqueue);
3308 	}
3309 
3310 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3311 
3312 	return;
3313 reset_err:
3314 	if (running) {
3315 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3316 		iavf_free_traffic_irqs(adapter);
3317 	}
3318 	iavf_disable_vf(adapter);
3319 
3320 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3321 }
3322 
iavf_reset_task(struct work_struct * work)3323 static void iavf_reset_task(struct work_struct *work)
3324 {
3325 	struct iavf_adapter *adapter = container_of(work,
3326 						      struct iavf_adapter,
3327 						      reset_task);
3328 	struct net_device *netdev = adapter->netdev;
3329 
3330 	netdev_lock(netdev);
3331 	iavf_reset_step(adapter);
3332 	netdev_unlock(netdev);
3333 }
3334 
3335 /**
3336  * iavf_adminq_task - worker thread to clean the admin queue
3337  * @work: pointer to work_struct containing our data
3338  **/
iavf_adminq_task(struct work_struct * work)3339 static void iavf_adminq_task(struct work_struct *work)
3340 {
3341 	struct iavf_adapter *adapter =
3342 		container_of(work, struct iavf_adapter, adminq_task);
3343 	struct net_device *netdev = adapter->netdev;
3344 	struct iavf_hw *hw = &adapter->hw;
3345 	struct iavf_arq_event_info event;
3346 	enum virtchnl_ops v_op;
3347 	enum iavf_status ret, v_ret;
3348 	u32 val, oldval;
3349 	u16 pending;
3350 
3351 	netdev_lock(netdev);
3352 
3353 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3354 		goto unlock;
3355 
3356 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3357 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3358 	if (!event.msg_buf)
3359 		goto unlock;
3360 
3361 	do {
3362 		ret = iavf_clean_arq_element(hw, &event, &pending);
3363 		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3364 		v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3365 
3366 		if (ret || !v_op)
3367 			break; /* No event to process or error cleaning ARQ */
3368 
3369 		iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3370 					 event.msg_len);
3371 		if (pending != 0)
3372 			memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3373 	} while (pending);
3374 
3375 	if (iavf_is_reset_in_progress(adapter))
3376 		goto freedom;
3377 
3378 	/* check for error indications */
3379 	val = rd32(hw, IAVF_VF_ARQLEN1);
3380 	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3381 		goto freedom;
3382 	oldval = val;
3383 	if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3384 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3385 		val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3386 	}
3387 	if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3388 		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3389 		val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3390 	}
3391 	if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3392 		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3393 		val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3394 	}
3395 	if (oldval != val)
3396 		wr32(hw, IAVF_VF_ARQLEN1, val);
3397 
3398 	val = rd32(hw, IAVF_VF_ATQLEN1);
3399 	oldval = val;
3400 	if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3401 		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3402 		val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3403 	}
3404 	if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3405 		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3406 		val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3407 	}
3408 	if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3409 		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3410 		val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3411 	}
3412 	if (oldval != val)
3413 		wr32(hw, IAVF_VF_ATQLEN1, val);
3414 
3415 freedom:
3416 	kfree(event.msg_buf);
3417 unlock:
3418 	netdev_unlock(netdev);
3419 	/* re-enable Admin queue interrupt cause */
3420 	iavf_misc_irq_enable(adapter);
3421 }
3422 
3423 /**
3424  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3425  * @adapter: board private structure
3426  *
3427  * Free all transmit software resources
3428  **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)3429 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3430 {
3431 	int i;
3432 
3433 	if (!adapter->tx_rings)
3434 		return;
3435 
3436 	for (i = 0; i < adapter->num_active_queues; i++)
3437 		if (adapter->tx_rings[i].desc)
3438 			iavf_free_tx_resources(&adapter->tx_rings[i]);
3439 }
3440 
3441 /**
3442  * iavf_setup_all_tx_resources - allocate all queues Tx resources
3443  * @adapter: board private structure
3444  *
3445  * If this function returns with an error, then it's possible one or
3446  * more of the rings is populated (while the rest are not).  It is the
3447  * callers duty to clean those orphaned rings.
3448  *
3449  * Return 0 on success, negative on failure
3450  **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)3451 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3452 {
3453 	int i, err = 0;
3454 
3455 	for (i = 0; i < adapter->num_active_queues; i++) {
3456 		adapter->tx_rings[i].count = adapter->tx_desc_count;
3457 		err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3458 		if (!err)
3459 			continue;
3460 		dev_err(&adapter->pdev->dev,
3461 			"Allocation for Tx Queue %u failed\n", i);
3462 		break;
3463 	}
3464 
3465 	return err;
3466 }
3467 
3468 /**
3469  * iavf_setup_all_rx_resources - allocate all queues Rx resources
3470  * @adapter: board private structure
3471  *
3472  * If this function returns with an error, then it's possible one or
3473  * more of the rings is populated (while the rest are not).  It is the
3474  * callers duty to clean those orphaned rings.
3475  *
3476  * Return 0 on success, negative on failure
3477  **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)3478 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3479 {
3480 	int i, err = 0;
3481 
3482 	for (i = 0; i < adapter->num_active_queues; i++) {
3483 		adapter->rx_rings[i].count = adapter->rx_desc_count;
3484 		err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3485 		if (!err)
3486 			continue;
3487 		dev_err(&adapter->pdev->dev,
3488 			"Allocation for Rx Queue %u failed\n", i);
3489 		break;
3490 	}
3491 	return err;
3492 }
3493 
3494 /**
3495  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3496  * @adapter: board private structure
3497  *
3498  * Free all receive software resources
3499  **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)3500 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3501 {
3502 	int i;
3503 
3504 	if (!adapter->rx_rings)
3505 		return;
3506 
3507 	for (i = 0; i < adapter->num_active_queues; i++)
3508 		if (adapter->rx_rings[i].desc)
3509 			iavf_free_rx_resources(&adapter->rx_rings[i]);
3510 }
3511 
3512 /**
3513  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3514  * @adapter: board private structure
3515  * @max_tx_rate: max Tx bw for a tc
3516  **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)3517 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3518 				      u64 max_tx_rate)
3519 {
3520 	int speed = 0, ret = 0;
3521 
3522 	if (ADV_LINK_SUPPORT(adapter)) {
3523 		if (adapter->link_speed_mbps < U32_MAX) {
3524 			speed = adapter->link_speed_mbps;
3525 			goto validate_bw;
3526 		} else {
3527 			dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3528 			return -EINVAL;
3529 		}
3530 	}
3531 
3532 	switch (adapter->link_speed) {
3533 	case VIRTCHNL_LINK_SPEED_40GB:
3534 		speed = SPEED_40000;
3535 		break;
3536 	case VIRTCHNL_LINK_SPEED_25GB:
3537 		speed = SPEED_25000;
3538 		break;
3539 	case VIRTCHNL_LINK_SPEED_20GB:
3540 		speed = SPEED_20000;
3541 		break;
3542 	case VIRTCHNL_LINK_SPEED_10GB:
3543 		speed = SPEED_10000;
3544 		break;
3545 	case VIRTCHNL_LINK_SPEED_5GB:
3546 		speed = SPEED_5000;
3547 		break;
3548 	case VIRTCHNL_LINK_SPEED_2_5GB:
3549 		speed = SPEED_2500;
3550 		break;
3551 	case VIRTCHNL_LINK_SPEED_1GB:
3552 		speed = SPEED_1000;
3553 		break;
3554 	case VIRTCHNL_LINK_SPEED_100MB:
3555 		speed = SPEED_100;
3556 		break;
3557 	default:
3558 		break;
3559 	}
3560 
3561 validate_bw:
3562 	if (max_tx_rate > speed) {
3563 		dev_err(&adapter->pdev->dev,
3564 			"Invalid tx rate specified\n");
3565 		ret = -EINVAL;
3566 	}
3567 
3568 	return ret;
3569 }
3570 
3571 /**
3572  * iavf_validate_ch_config - validate queue mapping info
3573  * @adapter: board private structure
3574  * @mqprio_qopt: queue parameters
3575  *
3576  * This function validates if the config provided by the user to
3577  * configure queue channels is valid or not. Returns 0 on a valid
3578  * config.
3579  **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)3580 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3581 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
3582 {
3583 	u64 total_max_rate = 0;
3584 	u32 tx_rate_rem = 0;
3585 	int i, num_qps = 0;
3586 	u64 tx_rate = 0;
3587 	int ret = 0;
3588 
3589 	if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3590 	    mqprio_qopt->qopt.num_tc < 1)
3591 		return -EINVAL;
3592 
3593 	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3594 		if (!mqprio_qopt->qopt.count[i] ||
3595 		    mqprio_qopt->qopt.offset[i] != num_qps)
3596 			return -EINVAL;
3597 		if (mqprio_qopt->min_rate[i]) {
3598 			dev_err(&adapter->pdev->dev,
3599 				"Invalid min tx rate (greater than 0) specified for TC%d\n",
3600 				i);
3601 			return -EINVAL;
3602 		}
3603 
3604 		/* convert to Mbps */
3605 		tx_rate = div_u64(mqprio_qopt->max_rate[i],
3606 				  IAVF_MBPS_DIVISOR);
3607 
3608 		if (mqprio_qopt->max_rate[i] &&
3609 		    tx_rate < IAVF_MBPS_QUANTA) {
3610 			dev_err(&adapter->pdev->dev,
3611 				"Invalid max tx rate for TC%d, minimum %dMbps\n",
3612 				i, IAVF_MBPS_QUANTA);
3613 			return -EINVAL;
3614 		}
3615 
3616 		(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3617 
3618 		if (tx_rate_rem != 0) {
3619 			dev_err(&adapter->pdev->dev,
3620 				"Invalid max tx rate for TC%d, not divisible by %d\n",
3621 				i, IAVF_MBPS_QUANTA);
3622 			return -EINVAL;
3623 		}
3624 
3625 		total_max_rate += tx_rate;
3626 		num_qps += mqprio_qopt->qopt.count[i];
3627 	}
3628 	if (num_qps > adapter->num_active_queues) {
3629 		dev_err(&adapter->pdev->dev,
3630 			"Cannot support requested number of queues\n");
3631 		return -EINVAL;
3632 	}
3633 
3634 	ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3635 	return ret;
3636 }
3637 
3638 /**
3639  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3640  * @adapter: board private structure
3641  **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)3642 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3643 {
3644 	struct iavf_cloud_filter *cf, *cftmp;
3645 
3646 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3647 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3648 				 list) {
3649 		list_del(&cf->list);
3650 		kfree(cf);
3651 		adapter->num_cloud_filters--;
3652 	}
3653 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3654 }
3655 
3656 /**
3657  * iavf_is_tc_config_same - Compare the mqprio TC config with the
3658  * TC config already configured on this adapter.
3659  * @adapter: board private structure
3660  * @mqprio_qopt: TC config received from kernel.
3661  *
3662  * This function compares the TC config received from the kernel
3663  * with the config already configured on the adapter.
3664  *
3665  * Return: True if configuration is same, false otherwise.
3666  **/
iavf_is_tc_config_same(struct iavf_adapter * adapter,struct tc_mqprio_qopt * mqprio_qopt)3667 static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
3668 				   struct tc_mqprio_qopt *mqprio_qopt)
3669 {
3670 	struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
3671 	int i;
3672 
3673 	if (adapter->num_tc != mqprio_qopt->num_tc)
3674 		return false;
3675 
3676 	for (i = 0; i < adapter->num_tc; i++) {
3677 		if (ch[i].count != mqprio_qopt->count[i] ||
3678 		    ch[i].offset != mqprio_qopt->offset[i])
3679 			return false;
3680 	}
3681 	return true;
3682 }
3683 
3684 /**
3685  * __iavf_setup_tc - configure multiple traffic classes
3686  * @netdev: network interface device structure
3687  * @type_data: tc offload data
3688  *
3689  * This function processes the config information provided by the
3690  * user to configure traffic classes/queue channels and packages the
3691  * information to request the PF to setup traffic classes.
3692  *
3693  * Returns 0 on success.
3694  **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)3695 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3696 {
3697 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3698 	struct iavf_adapter *adapter = netdev_priv(netdev);
3699 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
3700 	u8 num_tc = 0, total_qps = 0;
3701 	int ret = 0, netdev_tc = 0;
3702 	u64 max_tx_rate;
3703 	u16 mode;
3704 	int i;
3705 
3706 	num_tc = mqprio_qopt->qopt.num_tc;
3707 	mode = mqprio_qopt->mode;
3708 
3709 	/* delete queue_channel */
3710 	if (!mqprio_qopt->qopt.hw) {
3711 		if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3712 			/* reset the tc configuration */
3713 			netdev_reset_tc(netdev);
3714 			adapter->num_tc = 0;
3715 			netif_tx_stop_all_queues(netdev);
3716 			netif_tx_disable(netdev);
3717 			iavf_del_all_cloud_filters(adapter);
3718 			adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3719 			total_qps = adapter->orig_num_active_queues;
3720 			goto exit;
3721 		} else {
3722 			return -EINVAL;
3723 		}
3724 	}
3725 
3726 	/* add queue channel */
3727 	if (mode == TC_MQPRIO_MODE_CHANNEL) {
3728 		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3729 			dev_err(&adapter->pdev->dev, "ADq not supported\n");
3730 			return -EOPNOTSUPP;
3731 		}
3732 		if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3733 			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3734 			return -EINVAL;
3735 		}
3736 
3737 		ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3738 		if (ret)
3739 			return ret;
3740 		/* Return if same TC config is requested */
3741 		if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
3742 			return 0;
3743 		adapter->num_tc = num_tc;
3744 
3745 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3746 			if (i < num_tc) {
3747 				adapter->ch_config.ch_info[i].count =
3748 					mqprio_qopt->qopt.count[i];
3749 				adapter->ch_config.ch_info[i].offset =
3750 					mqprio_qopt->qopt.offset[i];
3751 				total_qps += mqprio_qopt->qopt.count[i];
3752 				max_tx_rate = mqprio_qopt->max_rate[i];
3753 				/* convert to Mbps */
3754 				max_tx_rate = div_u64(max_tx_rate,
3755 						      IAVF_MBPS_DIVISOR);
3756 				adapter->ch_config.ch_info[i].max_tx_rate =
3757 					max_tx_rate;
3758 			} else {
3759 				adapter->ch_config.ch_info[i].count = 1;
3760 				adapter->ch_config.ch_info[i].offset = 0;
3761 			}
3762 		}
3763 
3764 		/* Take snapshot of original config such as "num_active_queues"
3765 		 * It is used later when delete ADQ flow is exercised, so that
3766 		 * once delete ADQ flow completes, VF shall go back to its
3767 		 * original queue configuration
3768 		 */
3769 
3770 		adapter->orig_num_active_queues = adapter->num_active_queues;
3771 
3772 		/* Store queue info based on TC so that VF gets configured
3773 		 * with correct number of queues when VF completes ADQ config
3774 		 * flow
3775 		 */
3776 		adapter->ch_config.total_qps = total_qps;
3777 
3778 		netif_tx_stop_all_queues(netdev);
3779 		netif_tx_disable(netdev);
3780 		adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3781 		netdev_reset_tc(netdev);
3782 		/* Report the tc mapping up the stack */
3783 		netdev_set_num_tc(adapter->netdev, num_tc);
3784 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3785 			u16 qcount = mqprio_qopt->qopt.count[i];
3786 			u16 qoffset = mqprio_qopt->qopt.offset[i];
3787 
3788 			if (i < num_tc)
3789 				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3790 						    qoffset);
3791 		}
3792 	}
3793 exit:
3794 	if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3795 		return 0;
3796 
3797 	netif_set_real_num_rx_queues(netdev, total_qps);
3798 	netif_set_real_num_tx_queues(netdev, total_qps);
3799 
3800 	return ret;
3801 }
3802 
3803 /**
3804  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3805  * @adapter: board private structure
3806  * @f: pointer to struct flow_cls_offload
3807  * @filter: pointer to cloud filter structure
3808  */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)3809 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3810 				 struct flow_cls_offload *f,
3811 				 struct iavf_cloud_filter *filter)
3812 {
3813 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3814 	struct flow_dissector *dissector = rule->match.dissector;
3815 	u16 n_proto_mask = 0;
3816 	u16 n_proto_key = 0;
3817 	u8 field_flags = 0;
3818 	u16 addr_type = 0;
3819 	u16 n_proto = 0;
3820 	int i = 0;
3821 	struct virtchnl_filter *vf = &filter->f;
3822 
3823 	if (dissector->used_keys &
3824 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
3825 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
3826 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3827 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
3828 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3829 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3830 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
3831 	      BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3832 		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n",
3833 			dissector->used_keys);
3834 		return -EOPNOTSUPP;
3835 	}
3836 
3837 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3838 		struct flow_match_enc_keyid match;
3839 
3840 		flow_rule_match_enc_keyid(rule, &match);
3841 		if (match.mask->keyid != 0)
3842 			field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3843 	}
3844 
3845 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3846 		struct flow_match_basic match;
3847 
3848 		flow_rule_match_basic(rule, &match);
3849 		n_proto_key = ntohs(match.key->n_proto);
3850 		n_proto_mask = ntohs(match.mask->n_proto);
3851 
3852 		if (n_proto_key == ETH_P_ALL) {
3853 			n_proto_key = 0;
3854 			n_proto_mask = 0;
3855 		}
3856 		n_proto = n_proto_key & n_proto_mask;
3857 		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3858 			return -EINVAL;
3859 		if (n_proto == ETH_P_IPV6) {
3860 			/* specify flow type as TCP IPv6 */
3861 			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3862 		}
3863 
3864 		if (match.key->ip_proto != IPPROTO_TCP) {
3865 			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3866 			return -EINVAL;
3867 		}
3868 	}
3869 
3870 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3871 		struct flow_match_eth_addrs match;
3872 
3873 		flow_rule_match_eth_addrs(rule, &match);
3874 
3875 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
3876 		if (!is_zero_ether_addr(match.mask->dst)) {
3877 			if (is_broadcast_ether_addr(match.mask->dst)) {
3878 				field_flags |= IAVF_CLOUD_FIELD_OMAC;
3879 			} else {
3880 				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3881 					match.mask->dst);
3882 				return -EINVAL;
3883 			}
3884 		}
3885 
3886 		if (!is_zero_ether_addr(match.mask->src)) {
3887 			if (is_broadcast_ether_addr(match.mask->src)) {
3888 				field_flags |= IAVF_CLOUD_FIELD_IMAC;
3889 			} else {
3890 				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3891 					match.mask->src);
3892 				return -EINVAL;
3893 			}
3894 		}
3895 
3896 		if (!is_zero_ether_addr(match.key->dst))
3897 			if (is_valid_ether_addr(match.key->dst) ||
3898 			    is_multicast_ether_addr(match.key->dst)) {
3899 				/* set the mask if a valid dst_mac address */
3900 				for (i = 0; i < ETH_ALEN; i++)
3901 					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3902 				ether_addr_copy(vf->data.tcp_spec.dst_mac,
3903 						match.key->dst);
3904 			}
3905 
3906 		if (!is_zero_ether_addr(match.key->src))
3907 			if (is_valid_ether_addr(match.key->src) ||
3908 			    is_multicast_ether_addr(match.key->src)) {
3909 				/* set the mask if a valid dst_mac address */
3910 				for (i = 0; i < ETH_ALEN; i++)
3911 					vf->mask.tcp_spec.src_mac[i] |= 0xff;
3912 				ether_addr_copy(vf->data.tcp_spec.src_mac,
3913 						match.key->src);
3914 		}
3915 	}
3916 
3917 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3918 		struct flow_match_vlan match;
3919 
3920 		flow_rule_match_vlan(rule, &match);
3921 		if (match.mask->vlan_id) {
3922 			if (match.mask->vlan_id == VLAN_VID_MASK) {
3923 				field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3924 			} else {
3925 				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3926 					match.mask->vlan_id);
3927 				return -EINVAL;
3928 			}
3929 		}
3930 		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3931 		vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3932 	}
3933 
3934 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3935 		struct flow_match_control match;
3936 
3937 		flow_rule_match_control(rule, &match);
3938 		addr_type = match.key->addr_type;
3939 
3940 		if (flow_rule_has_control_flags(match.mask->flags,
3941 						f->common.extack))
3942 			return -EOPNOTSUPP;
3943 	}
3944 
3945 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3946 		struct flow_match_ipv4_addrs match;
3947 
3948 		flow_rule_match_ipv4_addrs(rule, &match);
3949 		if (match.mask->dst) {
3950 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3951 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3952 			} else {
3953 				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3954 					be32_to_cpu(match.mask->dst));
3955 				return -EINVAL;
3956 			}
3957 		}
3958 
3959 		if (match.mask->src) {
3960 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
3961 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3962 			} else {
3963 				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3964 					be32_to_cpu(match.mask->src));
3965 				return -EINVAL;
3966 			}
3967 		}
3968 
3969 		if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3970 			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3971 			return -EINVAL;
3972 		}
3973 		if (match.key->dst) {
3974 			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3975 			vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3976 		}
3977 		if (match.key->src) {
3978 			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3979 			vf->data.tcp_spec.src_ip[0] = match.key->src;
3980 		}
3981 	}
3982 
3983 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3984 		struct flow_match_ipv6_addrs match;
3985 
3986 		flow_rule_match_ipv6_addrs(rule, &match);
3987 
3988 		/* validate mask, make sure it is not IPV6_ADDR_ANY */
3989 		if (ipv6_addr_any(&match.mask->dst)) {
3990 			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3991 				IPV6_ADDR_ANY);
3992 			return -EINVAL;
3993 		}
3994 
3995 		/* src and dest IPv6 address should not be LOOPBACK
3996 		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3997 		 */
3998 		if (ipv6_addr_loopback(&match.key->dst) ||
3999 		    ipv6_addr_loopback(&match.key->src)) {
4000 			dev_err(&adapter->pdev->dev,
4001 				"ipv6 addr should not be loopback\n");
4002 			return -EINVAL;
4003 		}
4004 		if (!ipv6_addr_any(&match.mask->dst) ||
4005 		    !ipv6_addr_any(&match.mask->src))
4006 			field_flags |= IAVF_CLOUD_FIELD_IIP;
4007 
4008 		for (i = 0; i < 4; i++)
4009 			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
4010 		memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
4011 		       sizeof(vf->data.tcp_spec.dst_ip));
4012 		for (i = 0; i < 4; i++)
4013 			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
4014 		memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
4015 		       sizeof(vf->data.tcp_spec.src_ip));
4016 	}
4017 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
4018 		struct flow_match_ports match;
4019 
4020 		flow_rule_match_ports(rule, &match);
4021 		if (match.mask->src) {
4022 			if (match.mask->src == cpu_to_be16(0xffff)) {
4023 				field_flags |= IAVF_CLOUD_FIELD_IIP;
4024 			} else {
4025 				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
4026 					be16_to_cpu(match.mask->src));
4027 				return -EINVAL;
4028 			}
4029 		}
4030 
4031 		if (match.mask->dst) {
4032 			if (match.mask->dst == cpu_to_be16(0xffff)) {
4033 				field_flags |= IAVF_CLOUD_FIELD_IIP;
4034 			} else {
4035 				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
4036 					be16_to_cpu(match.mask->dst));
4037 				return -EINVAL;
4038 			}
4039 		}
4040 		if (match.key->dst) {
4041 			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
4042 			vf->data.tcp_spec.dst_port = match.key->dst;
4043 		}
4044 
4045 		if (match.key->src) {
4046 			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
4047 			vf->data.tcp_spec.src_port = match.key->src;
4048 		}
4049 	}
4050 	vf->field_flags = field_flags;
4051 
4052 	return 0;
4053 }
4054 
4055 /**
4056  * iavf_handle_tclass - Forward to a traffic class on the device
4057  * @adapter: board private structure
4058  * @tc: traffic class index on the device
4059  * @filter: pointer to cloud filter structure
4060  */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)4061 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
4062 			      struct iavf_cloud_filter *filter)
4063 {
4064 	if (tc == 0)
4065 		return 0;
4066 	if (tc < adapter->num_tc) {
4067 		if (!filter->f.data.tcp_spec.dst_port) {
4068 			dev_err(&adapter->pdev->dev,
4069 				"Specify destination port to redirect to traffic class other than TC0\n");
4070 			return -EINVAL;
4071 		}
4072 	}
4073 	/* redirect to a traffic class on the same device */
4074 	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
4075 	filter->f.action_meta = tc;
4076 	return 0;
4077 }
4078 
4079 /**
4080  * iavf_find_cf - Find the cloud filter in the list
4081  * @adapter: Board private structure
4082  * @cookie: filter specific cookie
4083  *
4084  * Returns ptr to the filter object or NULL. Must be called while holding the
4085  * cloud_filter_list_lock.
4086  */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)4087 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
4088 					      unsigned long *cookie)
4089 {
4090 	struct iavf_cloud_filter *filter = NULL;
4091 
4092 	if (!cookie)
4093 		return NULL;
4094 
4095 	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
4096 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
4097 			return filter;
4098 	}
4099 	return NULL;
4100 }
4101 
4102 /**
4103  * iavf_configure_clsflower - Add tc flower filters
4104  * @adapter: board private structure
4105  * @cls_flower: Pointer to struct flow_cls_offload
4106  */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4107 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
4108 				    struct flow_cls_offload *cls_flower)
4109 {
4110 	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4111 	struct iavf_cloud_filter *filter;
4112 	int err;
4113 
4114 	if (tc < 0) {
4115 		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4116 		return -EINVAL;
4117 	}
4118 
4119 	filter = kzalloc_obj(*filter);
4120 	if (!filter)
4121 		return -ENOMEM;
4122 	filter->cookie = cls_flower->cookie;
4123 
4124 	netdev_lock(adapter->netdev);
4125 
4126 	/* bail out here if filter already exists */
4127 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4128 	if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4129 		dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4130 		err = -EEXIST;
4131 		goto spin_unlock;
4132 	}
4133 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4134 
4135 	/* set the mask to all zeroes to begin with */
4136 	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4137 	/* start out with flow type and eth type IPv4 to begin with */
4138 	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4139 	err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4140 	if (err)
4141 		goto err;
4142 
4143 	err = iavf_handle_tclass(adapter, tc, filter);
4144 	if (err)
4145 		goto err;
4146 
4147 	/* add filter to the list */
4148 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4149 	list_add_tail(&filter->list, &adapter->cloud_filter_list);
4150 	adapter->num_cloud_filters++;
4151 	filter->add = true;
4152 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4153 spin_unlock:
4154 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4155 err:
4156 	if (err)
4157 		kfree(filter);
4158 
4159 	netdev_unlock(adapter->netdev);
4160 	return err;
4161 }
4162 
4163 /**
4164  * iavf_delete_clsflower - Remove tc flower filters
4165  * @adapter: board private structure
4166  * @cls_flower: Pointer to struct flow_cls_offload
4167  */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4168 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4169 				 struct flow_cls_offload *cls_flower)
4170 {
4171 	struct iavf_cloud_filter *filter = NULL;
4172 	int err = 0;
4173 
4174 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4175 	filter = iavf_find_cf(adapter, &cls_flower->cookie);
4176 	if (filter) {
4177 		filter->del = true;
4178 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4179 	} else {
4180 		err = -EINVAL;
4181 	}
4182 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4183 
4184 	return err;
4185 }
4186 
4187 /**
4188  * iavf_setup_tc_cls_flower - flower classifier offloads
4189  * @adapter: pointer to iavf adapter structure
4190  * @cls_flower: pointer to flow_cls_offload struct with flow info
4191  */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4192 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4193 				    struct flow_cls_offload *cls_flower)
4194 {
4195 	switch (cls_flower->command) {
4196 	case FLOW_CLS_REPLACE:
4197 		return iavf_configure_clsflower(adapter, cls_flower);
4198 	case FLOW_CLS_DESTROY:
4199 		return iavf_delete_clsflower(adapter, cls_flower);
4200 	case FLOW_CLS_STATS:
4201 		return -EOPNOTSUPP;
4202 	default:
4203 		return -EOPNOTSUPP;
4204 	}
4205 }
4206 
4207 /**
4208  * iavf_add_cls_u32 - Add U32 classifier offloads
4209  * @adapter: pointer to iavf adapter structure
4210  * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4211  *
4212  * Return: 0 on success or negative errno on failure.
4213  */
iavf_add_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4214 static int iavf_add_cls_u32(struct iavf_adapter *adapter,
4215 			    struct tc_cls_u32_offload *cls_u32)
4216 {
4217 	struct netlink_ext_ack *extack = cls_u32->common.extack;
4218 	struct virtchnl_fdir_rule *rule_cfg;
4219 	struct virtchnl_filter_action *vact;
4220 	struct virtchnl_proto_hdrs *hdrs;
4221 	struct ethhdr *spec_h, *mask_h;
4222 	const struct tc_action *act;
4223 	struct iavf_fdir_fltr *fltr;
4224 	struct tcf_exts *exts;
4225 	unsigned int q_index;
4226 	int i, status = 0;
4227 	int off_base = 0;
4228 
4229 	if (cls_u32->knode.link_handle) {
4230 		NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
4231 		return -EOPNOTSUPP;
4232 	}
4233 
4234 	fltr = kzalloc_obj(*fltr);
4235 	if (!fltr)
4236 		return -ENOMEM;
4237 
4238 	rule_cfg = &fltr->vc_add_msg.rule_cfg;
4239 	hdrs = &rule_cfg->proto_hdrs;
4240 	hdrs->count = 0;
4241 
4242 	/* The parser lib at the PF expects the packet starting with MAC hdr */
4243 	switch (ntohs(cls_u32->common.protocol)) {
4244 	case ETH_P_802_3:
4245 		break;
4246 	case ETH_P_IP:
4247 		spec_h = (struct ethhdr *)hdrs->raw.spec;
4248 		mask_h = (struct ethhdr *)hdrs->raw.mask;
4249 		spec_h->h_proto = htons(ETH_P_IP);
4250 		mask_h->h_proto = htons(0xFFFF);
4251 		off_base += ETH_HLEN;
4252 		break;
4253 	default:
4254 		NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
4255 		status = -EOPNOTSUPP;
4256 		goto free_alloc;
4257 	}
4258 
4259 	for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
4260 		__be32 val, mask;
4261 		int off;
4262 
4263 		off = off_base + cls_u32->knode.sel->keys[i].off;
4264 		val = cls_u32->knode.sel->keys[i].val;
4265 		mask = cls_u32->knode.sel->keys[i].mask;
4266 
4267 		if (off >= sizeof(hdrs->raw.spec)) {
4268 			NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
4269 			status = -EINVAL;
4270 			goto free_alloc;
4271 		}
4272 
4273 		memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
4274 		memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
4275 		hdrs->raw.pkt_len = off + sizeof(val);
4276 	}
4277 
4278 	/* Only one action is allowed */
4279 	rule_cfg->action_set.count = 1;
4280 	vact = &rule_cfg->action_set.actions[0];
4281 	exts = cls_u32->knode.exts;
4282 
4283 	tcf_exts_for_each_action(i, act, exts) {
4284 		/* FDIR queue */
4285 		if (is_tcf_skbedit_rx_queue_mapping(act)) {
4286 			q_index = tcf_skbedit_rx_queue_mapping(act);
4287 			if (q_index >= adapter->num_active_queues) {
4288 				status = -EINVAL;
4289 				goto free_alloc;
4290 			}
4291 
4292 			vact->type = VIRTCHNL_ACTION_QUEUE;
4293 			vact->act_conf.queue.index = q_index;
4294 			break;
4295 		}
4296 
4297 		/* Drop */
4298 		if (is_tcf_gact_shot(act)) {
4299 			vact->type = VIRTCHNL_ACTION_DROP;
4300 			break;
4301 		}
4302 
4303 		/* Unsupported action */
4304 		NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
4305 		status = -EOPNOTSUPP;
4306 		goto free_alloc;
4307 	}
4308 
4309 	fltr->vc_add_msg.vsi_id = adapter->vsi.id;
4310 	fltr->cls_u32_handle = cls_u32->knode.handle;
4311 	return iavf_fdir_add_fltr(adapter, fltr);
4312 
4313 free_alloc:
4314 	kfree(fltr);
4315 	return status;
4316 }
4317 
4318 /**
4319  * iavf_del_cls_u32 - Delete U32 classifier offloads
4320  * @adapter: pointer to iavf adapter structure
4321  * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4322  *
4323  * Return: 0 on success or negative errno on failure.
4324  */
iavf_del_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4325 static int iavf_del_cls_u32(struct iavf_adapter *adapter,
4326 			    struct tc_cls_u32_offload *cls_u32)
4327 {
4328 	return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
4329 }
4330 
4331 /**
4332  * iavf_setup_tc_cls_u32 - U32 filter offloads
4333  * @adapter: pointer to iavf adapter structure
4334  * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
4335  *
4336  * Return: 0 on success or negative errno on failure.
4337  */
iavf_setup_tc_cls_u32(struct iavf_adapter * adapter,struct tc_cls_u32_offload * cls_u32)4338 static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
4339 				 struct tc_cls_u32_offload *cls_u32)
4340 {
4341 	if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
4342 		return -EOPNOTSUPP;
4343 
4344 	switch (cls_u32->command) {
4345 	case TC_CLSU32_NEW_KNODE:
4346 	case TC_CLSU32_REPLACE_KNODE:
4347 		return iavf_add_cls_u32(adapter, cls_u32);
4348 	case TC_CLSU32_DELETE_KNODE:
4349 		return iavf_del_cls_u32(adapter, cls_u32);
4350 	default:
4351 		return -EOPNOTSUPP;
4352 	}
4353 }
4354 
4355 /**
4356  * iavf_setup_tc_block_cb - block callback for tc
4357  * @type: type of offload
4358  * @type_data: offload data
4359  * @cb_priv:
4360  *
4361  * This function is the block callback for traffic classes
4362  **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4363 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4364 				  void *cb_priv)
4365 {
4366 	struct iavf_adapter *adapter = cb_priv;
4367 
4368 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4369 		return -EOPNOTSUPP;
4370 
4371 	switch (type) {
4372 	case TC_SETUP_CLSFLOWER:
4373 		return iavf_setup_tc_cls_flower(cb_priv, type_data);
4374 	case TC_SETUP_CLSU32:
4375 		return iavf_setup_tc_cls_u32(cb_priv, type_data);
4376 	default:
4377 		return -EOPNOTSUPP;
4378 	}
4379 }
4380 
4381 static LIST_HEAD(iavf_block_cb_list);
4382 
4383 /**
4384  * iavf_setup_tc - configure multiple traffic classes
4385  * @netdev: network interface device structure
4386  * @type: type of offload
4387  * @type_data: tc offload data
4388  *
4389  * This function is the callback to ndo_setup_tc in the
4390  * netdev_ops.
4391  *
4392  * Returns 0 on success
4393  **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)4394 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4395 			 void *type_data)
4396 {
4397 	struct iavf_adapter *adapter = netdev_priv(netdev);
4398 
4399 	switch (type) {
4400 	case TC_SETUP_QDISC_MQPRIO:
4401 		return __iavf_setup_tc(netdev, type_data);
4402 	case TC_SETUP_BLOCK:
4403 		return flow_block_cb_setup_simple(type_data,
4404 						  &iavf_block_cb_list,
4405 						  iavf_setup_tc_block_cb,
4406 						  adapter, adapter, true);
4407 	default:
4408 		return -EOPNOTSUPP;
4409 	}
4410 }
4411 
4412 /**
4413  * iavf_restore_fdir_filters
4414  * @adapter: board private structure
4415  *
4416  * Restore existing FDIR filters when VF netdev comes back up.
4417  **/
iavf_restore_fdir_filters(struct iavf_adapter * adapter)4418 static void iavf_restore_fdir_filters(struct iavf_adapter *adapter)
4419 {
4420 	struct iavf_fdir_fltr *f;
4421 
4422 	spin_lock_bh(&adapter->fdir_fltr_lock);
4423 	list_for_each_entry(f, &adapter->fdir_list_head, list) {
4424 		if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
4425 			/* Cancel a request, keep filter as active */
4426 			f->state = IAVF_FDIR_FLTR_ACTIVE;
4427 		} else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING ||
4428 			   f->state == IAVF_FDIR_FLTR_INACTIVE) {
4429 			/* Add filters which are inactive or have a pending
4430 			 * request to PF to be deleted
4431 			 */
4432 			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
4433 			adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
4434 		}
4435 	}
4436 	spin_unlock_bh(&adapter->fdir_fltr_lock);
4437 }
4438 
4439 /**
4440  * iavf_open - Called when a network interface is made active
4441  * @netdev: network interface device structure
4442  *
4443  * Returns 0 on success, negative value on failure
4444  *
4445  * The open entry point is called when a network interface is made
4446  * active by the system (IFF_UP).  At this point all resources needed
4447  * for transmit and receive operations are allocated, the interrupt
4448  * handler is registered with the OS, the watchdog is started,
4449  * and the stack is notified that the interface is ready.
4450  **/
iavf_open(struct net_device * netdev)4451 static int iavf_open(struct net_device *netdev)
4452 {
4453 	struct iavf_adapter *adapter = netdev_priv(netdev);
4454 	int err;
4455 
4456 	netdev_assert_locked(netdev);
4457 
4458 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4459 		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4460 		return -EIO;
4461 	}
4462 
4463 	if (adapter->state != __IAVF_DOWN)
4464 		return -EBUSY;
4465 
4466 	if (adapter->state == __IAVF_RUNNING &&
4467 	    !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4468 		dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4469 		return 0;
4470 	}
4471 
4472 	/* allocate transmit descriptors */
4473 	err = iavf_setup_all_tx_resources(adapter);
4474 	if (err)
4475 		goto err_setup_tx;
4476 
4477 	/* allocate receive descriptors */
4478 	err = iavf_setup_all_rx_resources(adapter);
4479 	if (err)
4480 		goto err_setup_rx;
4481 
4482 	/* clear any pending interrupts, may auto mask */
4483 	err = iavf_request_traffic_irqs(adapter, netdev->name);
4484 	if (err)
4485 		goto err_req_irq;
4486 
4487 	spin_lock_bh(&adapter->mac_vlan_list_lock);
4488 	iavf_add_filter(adapter, adapter->hw.mac.addr);
4489 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
4490 
4491 	/* Restore filters that were removed with IFF_DOWN */
4492 	iavf_restore_filters(adapter);
4493 	iavf_restore_fdir_filters(adapter);
4494 
4495 	iavf_configure(adapter);
4496 
4497 	iavf_up_complete(adapter);
4498 
4499 	iavf_irq_enable(adapter, true);
4500 
4501 	return 0;
4502 
4503 err_req_irq:
4504 	iavf_down(adapter);
4505 	iavf_free_traffic_irqs(adapter);
4506 err_setup_rx:
4507 	iavf_free_all_rx_resources(adapter);
4508 err_setup_tx:
4509 	iavf_free_all_tx_resources(adapter);
4510 
4511 	return err;
4512 }
4513 
4514 /**
4515  * iavf_close - Disables a network interface
4516  * @netdev: network interface device structure
4517  *
4518  * Returns 0, this is not allowed to fail
4519  *
4520  * The close entry point is called when an interface is de-activated
4521  * by the OS.  The hardware is still under the drivers control, but
4522  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4523  * are freed, along with all transmit and receive resources.
4524  **/
iavf_close(struct net_device * netdev)4525 static int iavf_close(struct net_device *netdev)
4526 {
4527 	struct iavf_adapter *adapter = netdev_priv(netdev);
4528 	u64 aq_to_restore;
4529 	int status;
4530 
4531 	netdev_assert_locked(netdev);
4532 
4533 	if (adapter->state <= __IAVF_DOWN_PENDING)
4534 		return 0;
4535 
4536 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4537 	/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4538 	 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4539 	 * deadlock with adminq_task() until iavf_close timeouts. We must send
4540 	 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4541 	 * disable queues possible for vf. Give only necessary flags to
4542 	 * iavf_down and save other to set them right before iavf_close()
4543 	 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4544 	 * iavf will be in DOWN state.
4545 	 */
4546 	aq_to_restore = adapter->aq_required;
4547 	adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4548 
4549 	/* Remove flags which we do not want to send after close or we want to
4550 	 * send before disable queues.
4551 	 */
4552 	aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG		|
4553 			   IAVF_FLAG_AQ_ENABLE_QUEUES		|
4554 			   IAVF_FLAG_AQ_CONFIGURE_QUEUES	|
4555 			   IAVF_FLAG_AQ_ADD_VLAN_FILTER		|
4556 			   IAVF_FLAG_AQ_ADD_MAC_FILTER		|
4557 			   IAVF_FLAG_AQ_ADD_CLOUD_FILTER	|
4558 			   IAVF_FLAG_AQ_ADD_FDIR_FILTER		|
4559 			   IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4560 
4561 	iavf_down(adapter);
4562 	iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4563 	iavf_free_traffic_irqs(adapter);
4564 
4565 	netdev_unlock(netdev);
4566 
4567 	/* We explicitly don't free resources here because the hardware is
4568 	 * still active and can DMA into memory. Resources are cleared in
4569 	 * iavf_virtchnl_completion() after we get confirmation from the PF
4570 	 * driver that the rings have been stopped.
4571 	 *
4572 	 * Also, we wait for state to transition to __IAVF_DOWN before
4573 	 * returning. State change occurs in iavf_virtchnl_completion() after
4574 	 * VF resources are released (which occurs after PF driver processes and
4575 	 * responds to admin queue commands).
4576 	 */
4577 
4578 	status = wait_event_timeout(adapter->down_waitqueue,
4579 				    adapter->state == __IAVF_DOWN,
4580 				    msecs_to_jiffies(500));
4581 	if (!status)
4582 		netdev_warn(netdev, "Device resources not yet released\n");
4583 	netdev_lock(netdev);
4584 
4585 	adapter->aq_required |= aq_to_restore;
4586 
4587 	return 0;
4588 }
4589 
4590 /**
4591  * iavf_change_mtu - Change the Maximum Transfer Unit
4592  * @netdev: network interface device structure
4593  * @new_mtu: new value for maximum frame size
4594  *
4595  * Returns 0 on success, negative on failure
4596  **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)4597 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4598 {
4599 	struct iavf_adapter *adapter = netdev_priv(netdev);
4600 
4601 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
4602 		   netdev->mtu, new_mtu);
4603 	WRITE_ONCE(netdev->mtu, new_mtu);
4604 
4605 	if (netif_running(netdev)) {
4606 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4607 		iavf_reset_step(adapter);
4608 	}
4609 
4610 	return 0;
4611 }
4612 
4613 /**
4614  * iavf_disable_fdir - disable Flow Director and clear existing filters
4615  * @adapter: board private structure
4616  **/
iavf_disable_fdir(struct iavf_adapter * adapter)4617 static void iavf_disable_fdir(struct iavf_adapter *adapter)
4618 {
4619 	struct iavf_fdir_fltr *fdir, *fdirtmp;
4620 	bool del_filters = false;
4621 
4622 	adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED;
4623 
4624 	/* remove all Flow Director filters */
4625 	spin_lock_bh(&adapter->fdir_fltr_lock);
4626 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
4627 				 list) {
4628 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
4629 		    fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
4630 			/* Delete filters not registered in PF */
4631 			list_del(&fdir->list);
4632 			iavf_dec_fdir_active_fltr(adapter, fdir);
4633 			kfree(fdir);
4634 		} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
4635 			   fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
4636 			   fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
4637 			/* Filters registered in PF, schedule their deletion */
4638 			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
4639 			del_filters = true;
4640 		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
4641 			/* Request to delete filter already sent to PF, change
4642 			 * state to DEL_PENDING to delete filter after PF's
4643 			 * response, not set as INACTIVE
4644 			 */
4645 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
4646 		}
4647 	}
4648 	spin_unlock_bh(&adapter->fdir_fltr_lock);
4649 
4650 	if (del_filters) {
4651 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
4652 		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
4653 	}
4654 }
4655 
4656 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
4657 					 NETIF_F_HW_VLAN_CTAG_TX | \
4658 					 NETIF_F_HW_VLAN_STAG_RX | \
4659 					 NETIF_F_HW_VLAN_STAG_TX)
4660 
4661 /**
4662  * iavf_set_features - set the netdev feature flags
4663  * @netdev: ptr to the netdev being adjusted
4664  * @features: the feature set that the stack is suggesting
4665  * Note: expects to be called while under rtnl_lock()
4666  **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)4667 static int iavf_set_features(struct net_device *netdev,
4668 			     netdev_features_t features)
4669 {
4670 	struct iavf_adapter *adapter = netdev_priv(netdev);
4671 
4672 	/* trigger update on any VLAN feature change */
4673 	if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4674 	    (features & NETIF_VLAN_OFFLOAD_FEATURES))
4675 		iavf_set_vlan_offload_features(adapter, netdev->features,
4676 					       features);
4677 	if (CRC_OFFLOAD_ALLOWED(adapter) &&
4678 	    ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
4679 		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
4680 
4681 	if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) {
4682 		if (features & NETIF_F_NTUPLE)
4683 			adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
4684 		else
4685 			iavf_disable_fdir(adapter);
4686 	}
4687 
4688 	return 0;
4689 }
4690 
4691 /**
4692  * iavf_features_check - Validate encapsulated packet conforms to limits
4693  * @skb: skb buff
4694  * @dev: This physical port's netdev
4695  * @features: Offload features that the stack believes apply
4696  **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4697 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4698 					     struct net_device *dev,
4699 					     netdev_features_t features)
4700 {
4701 	size_t len;
4702 
4703 	/* No point in doing any of this if neither checksum nor GSO are
4704 	 * being requested for this frame.  We can rule out both by just
4705 	 * checking for CHECKSUM_PARTIAL
4706 	 */
4707 	if (skb->ip_summed != CHECKSUM_PARTIAL)
4708 		return features;
4709 
4710 	/* We cannot support GSO if the MSS is going to be less than
4711 	 * 64 bytes.  If it is then we need to drop support for GSO.
4712 	 */
4713 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4714 		features &= ~NETIF_F_GSO_MASK;
4715 
4716 	/* MACLEN can support at most 63 words */
4717 	len = skb_network_offset(skb);
4718 	if (len & ~(63 * 2))
4719 		goto out_err;
4720 
4721 	/* IPLEN and EIPLEN can support at most 127 dwords */
4722 	len = skb_network_header_len(skb);
4723 	if (len & ~(127 * 4))
4724 		goto out_err;
4725 
4726 	if (skb->encapsulation) {
4727 		/* L4TUNLEN can support 127 words */
4728 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
4729 		if (len & ~(127 * 2))
4730 			goto out_err;
4731 
4732 		/* IPLEN can support at most 127 dwords */
4733 		len = skb_inner_transport_header(skb) -
4734 		      skb_inner_network_header(skb);
4735 		if (len & ~(127 * 4))
4736 			goto out_err;
4737 	}
4738 
4739 	/* No need to validate L4LEN as TCP is the only protocol with a
4740 	 * flexible value and we support all possible values supported
4741 	 * by TCP, which is at most 15 dwords
4742 	 */
4743 
4744 	return features;
4745 out_err:
4746 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4747 }
4748 
4749 /**
4750  * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4751  * @adapter: board private structure
4752  *
4753  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4754  * were negotiated determine the VLAN features that can be toggled on and off.
4755  **/
4756 static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)4757 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4758 {
4759 	netdev_features_t hw_features = 0;
4760 
4761 	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4762 		return hw_features;
4763 
4764 	/* Enable VLAN features if supported */
4765 	if (VLAN_ALLOWED(adapter)) {
4766 		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4767 				NETIF_F_HW_VLAN_CTAG_RX);
4768 	} else if (VLAN_V2_ALLOWED(adapter)) {
4769 		struct virtchnl_vlan_caps *vlan_v2_caps =
4770 			&adapter->vlan_v2_caps;
4771 		struct virtchnl_vlan_supported_caps *stripping_support =
4772 			&vlan_v2_caps->offloads.stripping_support;
4773 		struct virtchnl_vlan_supported_caps *insertion_support =
4774 			&vlan_v2_caps->offloads.insertion_support;
4775 
4776 		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4777 		    stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4778 			if (stripping_support->outer &
4779 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4780 				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4781 			if (stripping_support->outer &
4782 			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
4783 				hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4784 		} else if (stripping_support->inner !=
4785 			   VIRTCHNL_VLAN_UNSUPPORTED &&
4786 			   stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4787 			if (stripping_support->inner &
4788 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4789 				hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4790 		}
4791 
4792 		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4793 		    insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4794 			if (insertion_support->outer &
4795 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4796 				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4797 			if (insertion_support->outer &
4798 			    VIRTCHNL_VLAN_ETHERTYPE_88A8)
4799 				hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4800 		} else if (insertion_support->inner &&
4801 			   insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4802 			if (insertion_support->inner &
4803 			    VIRTCHNL_VLAN_ETHERTYPE_8100)
4804 				hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4805 		}
4806 	}
4807 
4808 	if (CRC_OFFLOAD_ALLOWED(adapter))
4809 		hw_features |= NETIF_F_RXFCS;
4810 
4811 	return hw_features;
4812 }
4813 
4814 /**
4815  * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4816  * @adapter: board private structure
4817  *
4818  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4819  * were negotiated determine the VLAN features that are enabled by default.
4820  **/
4821 static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)4822 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4823 {
4824 	netdev_features_t features = 0;
4825 
4826 	if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4827 		return features;
4828 
4829 	if (VLAN_ALLOWED(adapter)) {
4830 		features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4831 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4832 	} else if (VLAN_V2_ALLOWED(adapter)) {
4833 		struct virtchnl_vlan_caps *vlan_v2_caps =
4834 			&adapter->vlan_v2_caps;
4835 		struct virtchnl_vlan_supported_caps *filtering_support =
4836 			&vlan_v2_caps->filtering.filtering_support;
4837 		struct virtchnl_vlan_supported_caps *stripping_support =
4838 			&vlan_v2_caps->offloads.stripping_support;
4839 		struct virtchnl_vlan_supported_caps *insertion_support =
4840 			&vlan_v2_caps->offloads.insertion_support;
4841 		u32 ethertype_init;
4842 
4843 		/* give priority to outer stripping and don't support both outer
4844 		 * and inner stripping
4845 		 */
4846 		ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4847 		if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4848 			if (stripping_support->outer &
4849 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4850 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4851 				features |= NETIF_F_HW_VLAN_CTAG_RX;
4852 			else if (stripping_support->outer &
4853 				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4854 				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4855 				features |= NETIF_F_HW_VLAN_STAG_RX;
4856 		} else if (stripping_support->inner !=
4857 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4858 			if (stripping_support->inner &
4859 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4860 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4861 				features |= NETIF_F_HW_VLAN_CTAG_RX;
4862 		}
4863 
4864 		/* give priority to outer insertion and don't support both outer
4865 		 * and inner insertion
4866 		 */
4867 		if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4868 			if (insertion_support->outer &
4869 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4870 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4871 				features |= NETIF_F_HW_VLAN_CTAG_TX;
4872 			else if (insertion_support->outer &
4873 				 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4874 				 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4875 				features |= NETIF_F_HW_VLAN_STAG_TX;
4876 		} else if (insertion_support->inner !=
4877 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4878 			if (insertion_support->inner &
4879 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4880 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4881 				features |= NETIF_F_HW_VLAN_CTAG_TX;
4882 		}
4883 
4884 		/* give priority to outer filtering and don't bother if both
4885 		 * outer and inner filtering are enabled
4886 		 */
4887 		ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4888 		if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4889 			if (filtering_support->outer &
4890 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4891 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4892 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4893 			if (filtering_support->outer &
4894 			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4895 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4896 				features |= NETIF_F_HW_VLAN_STAG_FILTER;
4897 		} else if (filtering_support->inner !=
4898 			   VIRTCHNL_VLAN_UNSUPPORTED) {
4899 			if (filtering_support->inner &
4900 			    VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4901 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4902 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4903 			if (filtering_support->inner &
4904 			    VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4905 			    ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4906 				features |= NETIF_F_HW_VLAN_STAG_FILTER;
4907 		}
4908 	}
4909 
4910 	return features;
4911 }
4912 
4913 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4914 	(!(((requested) & (feature_bit)) && \
4915 	   !((allowed) & (feature_bit))))
4916 
4917 /**
4918  * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4919  * @adapter: board private structure
4920  * @requested_features: stack requested NETDEV features
4921  **/
4922 static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4923 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4924 			      netdev_features_t requested_features)
4925 {
4926 	netdev_features_t allowed_features;
4927 
4928 	allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4929 		iavf_get_netdev_vlan_features(adapter);
4930 
4931 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4932 					      allowed_features,
4933 					      NETIF_F_HW_VLAN_CTAG_TX))
4934 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4935 
4936 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4937 					      allowed_features,
4938 					      NETIF_F_HW_VLAN_CTAG_RX))
4939 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4940 
4941 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4942 					      allowed_features,
4943 					      NETIF_F_HW_VLAN_STAG_TX))
4944 		requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4945 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4946 					      allowed_features,
4947 					      NETIF_F_HW_VLAN_STAG_RX))
4948 		requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4949 
4950 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4951 					      allowed_features,
4952 					      NETIF_F_HW_VLAN_CTAG_FILTER))
4953 		requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4954 
4955 	if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4956 					      allowed_features,
4957 					      NETIF_F_HW_VLAN_STAG_FILTER))
4958 		requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4959 
4960 	if ((requested_features &
4961 	     (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4962 	    (requested_features &
4963 	     (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4964 	    adapter->vlan_v2_caps.offloads.ethertype_match ==
4965 	    VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4966 		netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4967 		requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4968 					NETIF_F_HW_VLAN_STAG_TX);
4969 	}
4970 
4971 	return requested_features;
4972 }
4973 
4974 /**
4975  * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features
4976  * @adapter: board private structure
4977  * @requested_features: stack requested NETDEV features
4978  *
4979  * Returns fixed-up features bits
4980  **/
4981 static netdev_features_t
iavf_fix_strip_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4982 iavf_fix_strip_features(struct iavf_adapter *adapter,
4983 			netdev_features_t requested_features)
4984 {
4985 	struct net_device *netdev = adapter->netdev;
4986 	bool crc_offload_req, is_vlan_strip;
4987 	netdev_features_t vlan_strip;
4988 	int num_non_zero_vlan;
4989 
4990 	crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) &&
4991 			  (requested_features & NETIF_F_RXFCS);
4992 	num_non_zero_vlan = iavf_get_num_vlans_added(adapter);
4993 	vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX);
4994 	is_vlan_strip = requested_features & vlan_strip;
4995 
4996 	if (!crc_offload_req)
4997 		return requested_features;
4998 
4999 	if (!num_non_zero_vlan && (netdev->features & vlan_strip) &&
5000 	    !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
5001 		requested_features &= ~vlan_strip;
5002 		netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
5003 		return requested_features;
5004 	}
5005 
5006 	if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
5007 		requested_features &= ~vlan_strip;
5008 		if (!(netdev->features & vlan_strip))
5009 			netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping");
5010 
5011 		return requested_features;
5012 	}
5013 
5014 	if (num_non_zero_vlan && is_vlan_strip &&
5015 	    !(netdev->features & NETIF_F_RXFCS)) {
5016 		requested_features &= ~NETIF_F_RXFCS;
5017 		netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping");
5018 	}
5019 
5020 	return requested_features;
5021 }
5022 
5023 /**
5024  * iavf_fix_features - fix up the netdev feature bits
5025  * @netdev: our net device
5026  * @features: desired feature bits
5027  *
5028  * Returns fixed-up features bits
5029  **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)5030 static netdev_features_t iavf_fix_features(struct net_device *netdev,
5031 					   netdev_features_t features)
5032 {
5033 	struct iavf_adapter *adapter = netdev_priv(netdev);
5034 
5035 	features = iavf_fix_netdev_vlan_features(adapter, features);
5036 
5037 	if (!FDIR_FLTR_SUPPORT(adapter))
5038 		features &= ~NETIF_F_NTUPLE;
5039 
5040 	return iavf_fix_strip_features(adapter, features);
5041 }
5042 
iavf_hwstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)5043 static int iavf_hwstamp_get(struct net_device *netdev,
5044 			    struct kernel_hwtstamp_config *config)
5045 {
5046 	struct iavf_adapter *adapter = netdev_priv(netdev);
5047 
5048 	*config = adapter->ptp.hwtstamp_config;
5049 
5050 	return 0;
5051 }
5052 
iavf_hwstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)5053 static int iavf_hwstamp_set(struct net_device *netdev,
5054 			    struct kernel_hwtstamp_config *config,
5055 			    struct netlink_ext_ack *extack)
5056 {
5057 	struct iavf_adapter *adapter = netdev_priv(netdev);
5058 
5059 	return iavf_ptp_set_ts_config(adapter, config, extack);
5060 }
5061 
5062 static int
iavf_verify_shaper(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)5063 iavf_verify_shaper(struct net_shaper_binding *binding,
5064 		   const struct net_shaper *shaper,
5065 		   struct netlink_ext_ack *extack)
5066 {
5067 	struct iavf_adapter *adapter = netdev_priv(binding->netdev);
5068 	u64 vf_max;
5069 
5070 	if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE) {
5071 		vf_max = adapter->qos_caps->cap[0].shaper.peak;
5072 		if (vf_max && shaper->bw_max > vf_max) {
5073 			NL_SET_ERR_MSG_FMT(extack, "Max rate (%llu) of queue %d can't exceed max TX rate of VF (%llu kbps)",
5074 					   shaper->bw_max, shaper->handle.id,
5075 					   vf_max);
5076 			return -EINVAL;
5077 		}
5078 	}
5079 	return 0;
5080 }
5081 
5082 static int
iavf_shaper_set(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)5083 iavf_shaper_set(struct net_shaper_binding *binding,
5084 		const struct net_shaper *shaper,
5085 		struct netlink_ext_ack *extack)
5086 {
5087 	struct iavf_adapter *adapter = netdev_priv(binding->netdev);
5088 	const struct net_shaper_handle *handle = &shaper->handle;
5089 	struct iavf_ring *tx_ring;
5090 	int ret;
5091 
5092 	netdev_assert_locked(adapter->netdev);
5093 
5094 	if (handle->id >= adapter->num_active_queues)
5095 		return 0;
5096 
5097 	ret = iavf_verify_shaper(binding, shaper, extack);
5098 	if (ret)
5099 		return ret;
5100 
5101 	tx_ring = &adapter->tx_rings[handle->id];
5102 
5103 	tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000);
5104 	tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000);
5105 	tx_ring->q_shaper_update = true;
5106 
5107 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
5108 
5109 	return 0;
5110 }
5111 
iavf_shaper_del(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct netlink_ext_ack * extack)5112 static int iavf_shaper_del(struct net_shaper_binding *binding,
5113 			   const struct net_shaper_handle *handle,
5114 			   struct netlink_ext_ack *extack)
5115 {
5116 	struct iavf_adapter *adapter = netdev_priv(binding->netdev);
5117 	struct iavf_ring *tx_ring;
5118 
5119 	netdev_assert_locked(adapter->netdev);
5120 
5121 	if (handle->id >= adapter->num_active_queues)
5122 		return 0;
5123 
5124 	tx_ring = &adapter->tx_rings[handle->id];
5125 	tx_ring->q_shaper.bw_min = 0;
5126 	tx_ring->q_shaper.bw_max = 0;
5127 	tx_ring->q_shaper_update = true;
5128 
5129 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
5130 
5131 	return 0;
5132 }
5133 
iavf_shaper_cap(struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long * flags)5134 static void iavf_shaper_cap(struct net_shaper_binding *binding,
5135 			    enum net_shaper_scope scope,
5136 			    unsigned long *flags)
5137 {
5138 	if (scope != NET_SHAPER_SCOPE_QUEUE)
5139 		return;
5140 
5141 	*flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) |
5142 		 BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
5143 		 BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
5144 }
5145 
5146 static const struct net_shaper_ops iavf_shaper_ops = {
5147 	.set = iavf_shaper_set,
5148 	.delete = iavf_shaper_del,
5149 	.capabilities = iavf_shaper_cap,
5150 };
5151 
5152 static const struct net_device_ops iavf_netdev_ops = {
5153 	.ndo_open		= iavf_open,
5154 	.ndo_stop		= iavf_close,
5155 	.ndo_start_xmit		= iavf_xmit_frame,
5156 	.ndo_set_rx_mode	= iavf_set_rx_mode,
5157 	.ndo_validate_addr	= eth_validate_addr,
5158 	.ndo_set_mac_address	= iavf_set_mac,
5159 	.ndo_change_mtu		= iavf_change_mtu,
5160 	.ndo_tx_timeout		= iavf_tx_timeout,
5161 	.ndo_vlan_rx_add_vid	= iavf_vlan_rx_add_vid,
5162 	.ndo_vlan_rx_kill_vid	= iavf_vlan_rx_kill_vid,
5163 	.ndo_features_check	= iavf_features_check,
5164 	.ndo_fix_features	= iavf_fix_features,
5165 	.ndo_set_features	= iavf_set_features,
5166 	.ndo_setup_tc		= iavf_setup_tc,
5167 	.net_shaper_ops		= &iavf_shaper_ops,
5168 	.ndo_hwtstamp_get	= iavf_hwstamp_get,
5169 	.ndo_hwtstamp_set	= iavf_hwstamp_set,
5170 };
5171 
5172 /**
5173  * iavf_check_reset_complete - check that VF reset is complete
5174  * @hw: pointer to hw struct
5175  *
5176  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
5177  **/
iavf_check_reset_complete(struct iavf_hw * hw)5178 static int iavf_check_reset_complete(struct iavf_hw *hw)
5179 {
5180 	u32 rstat;
5181 	int i;
5182 
5183 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
5184 		rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
5185 			     IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
5186 		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
5187 		    (rstat == VIRTCHNL_VFR_COMPLETED))
5188 			return 0;
5189 		msleep(IAVF_RESET_WAIT_MS);
5190 	}
5191 	return -EBUSY;
5192 }
5193 
5194 /**
5195  * iavf_process_config - Process the config information we got from the PF
5196  * @adapter: board private structure
5197  *
5198  * Verify that we have a valid config struct, and set up our netdev features
5199  * and our VSI struct.
5200  **/
iavf_process_config(struct iavf_adapter * adapter)5201 int iavf_process_config(struct iavf_adapter *adapter)
5202 {
5203 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
5204 	netdev_features_t hw_vlan_features, vlan_features;
5205 	struct net_device *netdev = adapter->netdev;
5206 	netdev_features_t hw_enc_features;
5207 	netdev_features_t hw_features;
5208 
5209 	hw_enc_features = NETIF_F_SG			|
5210 			  NETIF_F_IP_CSUM		|
5211 			  NETIF_F_IPV6_CSUM		|
5212 			  NETIF_F_HIGHDMA		|
5213 			  NETIF_F_SOFT_FEATURES	|
5214 			  NETIF_F_TSO			|
5215 			  NETIF_F_TSO_ECN		|
5216 			  NETIF_F_TSO6			|
5217 			  NETIF_F_SCTP_CRC		|
5218 			  NETIF_F_RXHASH		|
5219 			  NETIF_F_RXCSUM		|
5220 			  0;
5221 
5222 	/* advertise to stack only if offloads for encapsulated packets is
5223 	 * supported
5224 	 */
5225 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
5226 		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
5227 				   NETIF_F_GSO_GRE		|
5228 				   NETIF_F_GSO_GRE_CSUM		|
5229 				   NETIF_F_GSO_IPXIP4		|
5230 				   NETIF_F_GSO_IPXIP6		|
5231 				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
5232 				   NETIF_F_GSO_PARTIAL		|
5233 				   0;
5234 
5235 		if (!(vfres->vf_cap_flags &
5236 		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
5237 			netdev->gso_partial_features |=
5238 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
5239 
5240 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
5241 		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
5242 		netdev->hw_enc_features |= hw_enc_features;
5243 	}
5244 	/* record features VLANs can make use of */
5245 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
5246 
5247 	/* Write features and hw_features separately to avoid polluting
5248 	 * with, or dropping, features that are set when we registered.
5249 	 */
5250 	hw_features = hw_enc_features;
5251 
5252 	/* get HW VLAN features that can be toggled */
5253 	hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
5254 
5255 	/* Enable HW TC offload if ADQ or tc U32 is supported */
5256 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
5257 	    TC_U32_SUPPORT(adapter))
5258 		hw_features |= NETIF_F_HW_TC;
5259 
5260 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
5261 		hw_features |= NETIF_F_GSO_UDP_L4;
5262 
5263 	netdev->hw_features |= hw_features | hw_vlan_features;
5264 	vlan_features = iavf_get_netdev_vlan_features(adapter);
5265 
5266 	netdev->features |= hw_features | vlan_features;
5267 
5268 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
5269 		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5270 
5271 	if (FDIR_FLTR_SUPPORT(adapter)) {
5272 		netdev->hw_features |= NETIF_F_NTUPLE;
5273 		netdev->features |= NETIF_F_NTUPLE;
5274 		adapter->flags |= IAVF_FLAG_FDIR_ENABLED;
5275 	}
5276 
5277 	netdev->priv_flags |= IFF_UNICAST_FLT;
5278 
5279 	/* Do not turn on offloads when they are requested to be turned off.
5280 	 * TSO needs minimum 576 bytes to work correctly.
5281 	 */
5282 	if (netdev->wanted_features) {
5283 		if (!(netdev->wanted_features & NETIF_F_TSO) ||
5284 		    netdev->mtu < 576)
5285 			netdev->features &= ~NETIF_F_TSO;
5286 		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
5287 		    netdev->mtu < 576)
5288 			netdev->features &= ~NETIF_F_TSO6;
5289 		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
5290 			netdev->features &= ~NETIF_F_TSO_ECN;
5291 		if (!(netdev->wanted_features & NETIF_F_GRO))
5292 			netdev->features &= ~NETIF_F_GRO;
5293 		if (!(netdev->wanted_features & NETIF_F_GSO))
5294 			netdev->features &= ~NETIF_F_GSO;
5295 	}
5296 
5297 	return 0;
5298 }
5299 
5300 /**
5301  * iavf_probe - Device Initialization Routine
5302  * @pdev: PCI device information struct
5303  * @ent: entry in iavf_pci_tbl
5304  *
5305  * Returns 0 on success, negative on failure
5306  *
5307  * iavf_probe initializes an adapter identified by a pci_dev structure.
5308  * The OS initialization, configuring of the adapter private structure,
5309  * and a hardware reset occur.
5310  **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)5311 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5312 {
5313 	struct net_device *netdev;
5314 	struct iavf_adapter *adapter = NULL;
5315 	struct iavf_hw *hw = NULL;
5316 	int err, len;
5317 
5318 	err = pci_enable_device(pdev);
5319 	if (err)
5320 		return err;
5321 
5322 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5323 	if (err) {
5324 		dev_err(&pdev->dev,
5325 			"DMA configuration failed: 0x%x\n", err);
5326 		goto err_dma;
5327 	}
5328 
5329 	err = pci_request_regions(pdev, iavf_driver_name);
5330 	if (err) {
5331 		dev_err(&pdev->dev,
5332 			"pci_request_regions failed 0x%x\n", err);
5333 		goto err_pci_reg;
5334 	}
5335 
5336 	pci_set_master(pdev);
5337 
5338 	netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
5339 				   IAVF_MAX_REQ_QUEUES);
5340 	if (!netdev) {
5341 		err = -ENOMEM;
5342 		goto err_alloc_etherdev;
5343 	}
5344 
5345 	netif_set_affinity_auto(netdev);
5346 	SET_NETDEV_DEV(netdev, &pdev->dev);
5347 
5348 	pci_set_drvdata(pdev, netdev);
5349 	adapter = netdev_priv(netdev);
5350 
5351 	adapter->netdev = netdev;
5352 	adapter->pdev = pdev;
5353 
5354 	hw = &adapter->hw;
5355 	hw->back = adapter;
5356 
5357 	adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
5358 					      iavf_driver_name);
5359 	if (!adapter->wq) {
5360 		err = -ENOMEM;
5361 		goto err_alloc_wq;
5362 	}
5363 
5364 	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5365 	iavf_change_state(adapter, __IAVF_STARTUP);
5366 
5367 	/* Call save state here because it relies on the adapter struct. */
5368 	pci_save_state(pdev);
5369 
5370 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5371 			      pci_resource_len(pdev, 0));
5372 	if (!hw->hw_addr) {
5373 		err = -EIO;
5374 		goto err_ioremap;
5375 	}
5376 	hw->vendor_id = pdev->vendor;
5377 	hw->device_id = pdev->device;
5378 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5379 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5380 	hw->subsystem_device_id = pdev->subsystem_device;
5381 	hw->bus.device = PCI_SLOT(pdev->devfn);
5382 	hw->bus.func = PCI_FUNC(pdev->devfn);
5383 	hw->bus.bus_id = pdev->bus->number;
5384 
5385 	len = struct_size(adapter->qos_caps, cap, IAVF_MAX_QOS_TC_NUM);
5386 	adapter->qos_caps = kzalloc(len, GFP_KERNEL);
5387 	if (!adapter->qos_caps) {
5388 		err = -ENOMEM;
5389 		goto err_alloc_qos_cap;
5390 	}
5391 
5392 	mutex_init(&hw->aq.asq_mutex);
5393 	mutex_init(&hw->aq.arq_mutex);
5394 
5395 	spin_lock_init(&adapter->mac_vlan_list_lock);
5396 	spin_lock_init(&adapter->cloud_filter_list_lock);
5397 	spin_lock_init(&adapter->fdir_fltr_lock);
5398 	spin_lock_init(&adapter->adv_rss_lock);
5399 	spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
5400 
5401 	INIT_LIST_HEAD(&adapter->mac_filter_list);
5402 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
5403 	INIT_LIST_HEAD(&adapter->cloud_filter_list);
5404 	INIT_LIST_HEAD(&adapter->fdir_list_head);
5405 	INIT_LIST_HEAD(&adapter->adv_rss_list_head);
5406 
5407 	INIT_WORK(&adapter->reset_task, iavf_reset_task);
5408 	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
5409 	INIT_WORK(&adapter->finish_config, iavf_finish_config);
5410 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
5411 
5412 	/* Setup the wait queue for indicating transition to down status */
5413 	init_waitqueue_head(&adapter->down_waitqueue);
5414 
5415 	/* Setup the wait queue for indicating virtchannel events */
5416 	init_waitqueue_head(&adapter->vc_waitqueue);
5417 
5418 	INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
5419 	init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
5420 	mutex_init(&adapter->ptp.aq_cmd_lock);
5421 
5422 	queue_delayed_work(adapter->wq, &adapter->watchdog_task,
5423 			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
5424 	/* Initialization goes on in the work. Do not add more of it below. */
5425 	return 0;
5426 
5427 err_alloc_qos_cap:
5428 	iounmap(hw->hw_addr);
5429 err_ioremap:
5430 	destroy_workqueue(adapter->wq);
5431 err_alloc_wq:
5432 	free_netdev(netdev);
5433 err_alloc_etherdev:
5434 	pci_release_regions(pdev);
5435 err_pci_reg:
5436 err_dma:
5437 	pci_disable_device(pdev);
5438 	return err;
5439 }
5440 
5441 /**
5442  * iavf_suspend - Power management suspend routine
5443  * @dev_d: device info pointer
5444  *
5445  * Called when the system (VM) is entering sleep/suspend.
5446  **/
iavf_suspend(struct device * dev_d)5447 static int iavf_suspend(struct device *dev_d)
5448 {
5449 	struct net_device *netdev = dev_get_drvdata(dev_d);
5450 	struct iavf_adapter *adapter = netdev_priv(netdev);
5451 	bool running;
5452 
5453 	netif_device_detach(netdev);
5454 
5455 	running = netif_running(netdev);
5456 	if (running)
5457 		rtnl_lock();
5458 	netdev_lock(netdev);
5459 
5460 	if (running)
5461 		iavf_down(adapter);
5462 
5463 	iavf_free_misc_irq(adapter);
5464 	iavf_reset_interrupt_capability(adapter);
5465 
5466 	netdev_unlock(netdev);
5467 	if (running)
5468 		rtnl_unlock();
5469 
5470 	return 0;
5471 }
5472 
5473 /**
5474  * iavf_resume - Power management resume routine
5475  * @dev_d: device info pointer
5476  *
5477  * Called when the system (VM) is resumed from sleep/suspend.
5478  **/
iavf_resume(struct device * dev_d)5479 static int iavf_resume(struct device *dev_d)
5480 {
5481 	struct pci_dev *pdev = to_pci_dev(dev_d);
5482 	struct iavf_adapter *adapter;
5483 	int err;
5484 
5485 	adapter = iavf_pdev_to_adapter(pdev);
5486 
5487 	pci_set_master(pdev);
5488 
5489 	rtnl_lock();
5490 	err = iavf_set_interrupt_capability(adapter);
5491 	if (err) {
5492 		rtnl_unlock();
5493 		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5494 		return err;
5495 	}
5496 	err = iavf_request_misc_irq(adapter);
5497 	rtnl_unlock();
5498 	if (err) {
5499 		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5500 		return err;
5501 	}
5502 
5503 	queue_work(adapter->wq, &adapter->reset_task);
5504 
5505 	netif_device_attach(adapter->netdev);
5506 
5507 	return err;
5508 }
5509 
5510 /**
5511  * iavf_remove - Device Removal Routine
5512  * @pdev: PCI device information struct
5513  *
5514  * iavf_remove is called by the PCI subsystem to alert the driver
5515  * that it should release a PCI device.  The could be caused by a
5516  * Hot-Plug event, or because the driver is going to be removed from
5517  * memory.
5518  **/
iavf_remove(struct pci_dev * pdev)5519 static void iavf_remove(struct pci_dev *pdev)
5520 {
5521 	struct iavf_fdir_fltr *fdir, *fdirtmp;
5522 	struct iavf_vlan_filter *vlf, *vlftmp;
5523 	struct iavf_cloud_filter *cf, *cftmp;
5524 	struct iavf_adv_rss *rss, *rsstmp;
5525 	struct iavf_mac_filter *f, *ftmp;
5526 	struct iavf_adapter *adapter;
5527 	struct net_device *netdev;
5528 	struct iavf_hw *hw;
5529 
5530 	/* Don't proceed with remove if netdev is already freed */
5531 	netdev = pci_get_drvdata(pdev);
5532 	if (!netdev)
5533 		return;
5534 
5535 	adapter = iavf_pdev_to_adapter(pdev);
5536 	hw = &adapter->hw;
5537 
5538 	if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5539 		return;
5540 
5541 	/* Wait until port initialization is complete.
5542 	 * There are flows where register/unregister netdev may race.
5543 	 */
5544 	while (1) {
5545 		netdev_lock(netdev);
5546 		if (adapter->state == __IAVF_RUNNING ||
5547 		    adapter->state == __IAVF_DOWN ||
5548 		    adapter->state == __IAVF_INIT_FAILED) {
5549 			netdev_unlock(netdev);
5550 			break;
5551 		}
5552 		/* Simply return if we already went through iavf_shutdown */
5553 		if (adapter->state == __IAVF_REMOVE) {
5554 			netdev_unlock(netdev);
5555 			return;
5556 		}
5557 
5558 		netdev_unlock(netdev);
5559 		usleep_range(500, 1000);
5560 	}
5561 	cancel_delayed_work_sync(&adapter->watchdog_task);
5562 	cancel_work_sync(&adapter->finish_config);
5563 
5564 	if (netdev->reg_state == NETREG_REGISTERED)
5565 		unregister_netdev(netdev);
5566 
5567 	netdev_lock(netdev);
5568 	dev_info(&adapter->pdev->dev, "Removing device\n");
5569 	iavf_change_state(adapter, __IAVF_REMOVE);
5570 
5571 	iavf_request_reset(adapter);
5572 	msleep(50);
5573 	/* If the FW isn't responding, kick it once, but only once. */
5574 	if (!iavf_asq_done(hw)) {
5575 		iavf_request_reset(adapter);
5576 		msleep(50);
5577 	}
5578 
5579 	iavf_ptp_release(adapter);
5580 
5581 	iavf_misc_irq_disable(adapter);
5582 	/* Shut down all the garbage mashers on the detention level */
5583 	netdev_unlock(netdev);
5584 	cancel_work_sync(&adapter->reset_task);
5585 	cancel_delayed_work_sync(&adapter->watchdog_task);
5586 	cancel_work_sync(&adapter->adminq_task);
5587 	netdev_lock(netdev);
5588 
5589 	adapter->aq_required = 0;
5590 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5591 
5592 	iavf_free_all_tx_resources(adapter);
5593 	iavf_free_all_rx_resources(adapter);
5594 	iavf_free_misc_irq(adapter);
5595 	iavf_free_interrupt_scheme(adapter);
5596 
5597 	iavf_free_rss(adapter);
5598 
5599 	if (hw->aq.asq.count)
5600 		iavf_shutdown_adminq(hw);
5601 
5602 	/* destroy the locks only once, here */
5603 	mutex_destroy(&hw->aq.arq_mutex);
5604 	mutex_destroy(&hw->aq.asq_mutex);
5605 	netdev_unlock(netdev);
5606 
5607 	iounmap(hw->hw_addr);
5608 	pci_release_regions(pdev);
5609 	kfree(adapter->vf_res);
5610 	spin_lock_bh(&adapter->mac_vlan_list_lock);
5611 	/* If we got removed before an up/down sequence, we've got a filter
5612 	 * hanging out there that we need to get rid of.
5613 	 */
5614 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5615 		list_del(&f->list);
5616 		kfree(f);
5617 	}
5618 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5619 				 list) {
5620 		list_del(&vlf->list);
5621 		kfree(vlf);
5622 	}
5623 
5624 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
5625 
5626 	spin_lock_bh(&adapter->cloud_filter_list_lock);
5627 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5628 		list_del(&cf->list);
5629 		kfree(cf);
5630 	}
5631 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
5632 
5633 	spin_lock_bh(&adapter->fdir_fltr_lock);
5634 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5635 		list_del(&fdir->list);
5636 		kfree(fdir);
5637 	}
5638 	spin_unlock_bh(&adapter->fdir_fltr_lock);
5639 
5640 	spin_lock_bh(&adapter->adv_rss_lock);
5641 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5642 				 list) {
5643 		list_del(&rss->list);
5644 		kfree(rss);
5645 	}
5646 	spin_unlock_bh(&adapter->adv_rss_lock);
5647 
5648 	destroy_workqueue(adapter->wq);
5649 
5650 	pci_set_drvdata(pdev, NULL);
5651 
5652 	free_netdev(netdev);
5653 
5654 	pci_disable_device(pdev);
5655 }
5656 
5657 /**
5658  * iavf_shutdown - Shutdown the device in preparation for a reboot
5659  * @pdev: pci device structure
5660  **/
iavf_shutdown(struct pci_dev * pdev)5661 static void iavf_shutdown(struct pci_dev *pdev)
5662 {
5663 	iavf_remove(pdev);
5664 
5665 	if (system_state == SYSTEM_POWER_OFF)
5666 		pci_set_power_state(pdev, PCI_D3hot);
5667 }
5668 
5669 static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5670 
5671 static struct pci_driver iavf_driver = {
5672 	.name      = iavf_driver_name,
5673 	.id_table  = iavf_pci_tbl,
5674 	.probe     = iavf_probe,
5675 	.remove    = iavf_remove,
5676 	.driver.pm = pm_sleep_ptr(&iavf_pm_ops),
5677 	.shutdown  = iavf_shutdown,
5678 };
5679 
5680 /**
5681  * iavf_init_module - Driver Registration Routine
5682  *
5683  * iavf_init_module is the first routine called when the driver is
5684  * loaded. All it does is register with the PCI subsystem.
5685  **/
iavf_init_module(void)5686 static int __init iavf_init_module(void)
5687 {
5688 	pr_info("iavf: %s\n", iavf_driver_string);
5689 
5690 	pr_info("%s\n", iavf_copyright);
5691 
5692 	return pci_register_driver(&iavf_driver);
5693 }
5694 
5695 module_init(iavf_init_module);
5696 
5697 /**
5698  * iavf_exit_module - Driver Exit Cleanup Routine
5699  *
5700  * iavf_exit_module is called just before the driver is removed
5701  * from memory.
5702  **/
iavf_exit_module(void)5703 static void __exit iavf_exit_module(void)
5704 {
5705 	pci_unregister_driver(&iavf_driver);
5706 }
5707 
5708 module_exit(iavf_exit_module);
5709 
5710 /* iavf_main.c */
5711