xref: /linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/net/intel/libie/rx.h>
5 
6 #include "iavf.h"
7 #include "iavf_ptp.h"
8 #include "iavf_prototype.h"
9 
10 /**
11  * iavf_send_pf_msg
12  * @adapter: adapter structure
13  * @op: virtual channel opcode
14  * @msg: pointer to message buffer
15  * @len: message length
16  *
17  * Send message to PF and print status if failure.
18  **/
19 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
20 			    enum virtchnl_ops op, u8 *msg, u16 len)
21 {
22 	struct iavf_hw *hw = &adapter->hw;
23 	enum iavf_status status;
24 
25 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
26 		return 0; /* nothing to see here, move along */
27 
28 	status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
29 	if (status)
30 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
31 			op, iavf_stat_str(hw, status),
32 			libie_aq_str(hw->aq.asq_last_status));
33 	return iavf_status_to_errno(status);
34 }
35 
36 /**
37  * iavf_send_api_ver
38  * @adapter: adapter structure
39  *
40  * Send API version admin queue message to the PF. The reply is not checked
41  * in this function. Returns 0 if the message was successfully
42  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
43  **/
44 int iavf_send_api_ver(struct iavf_adapter *adapter)
45 {
46 	struct virtchnl_version_info vvi;
47 
48 	vvi.major = VIRTCHNL_VERSION_MAJOR;
49 	vvi.minor = VIRTCHNL_VERSION_MINOR;
50 
51 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
52 				sizeof(vvi));
53 }
54 
55 /**
56  * iavf_poll_virtchnl_msg
57  * @hw: HW configuration structure
58  * @event: event to populate on success
59  * @op_to_poll: requested virtchnl op to poll for
60  *
61  * Initialize poll for virtchnl msg matching the requested_op. Returns 0
62  * if a message of the correct opcode is in the queue or an error code
63  * if no message matching the op code is waiting and other failures.
64  */
65 static int
66 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
67 		       enum virtchnl_ops op_to_poll)
68 {
69 	enum virtchnl_ops received_op;
70 	enum iavf_status status;
71 	u32 v_retval;
72 
73 	while (1) {
74 		/* When the AQ is empty, iavf_clean_arq_element will return
75 		 * nonzero and this loop will terminate.
76 		 */
77 		status = iavf_clean_arq_element(hw, event, NULL);
78 		if (status != IAVF_SUCCESS)
79 			return iavf_status_to_errno(status);
80 		received_op =
81 		    (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
82 
83 		if (received_op == VIRTCHNL_OP_EVENT) {
84 			struct iavf_adapter *adapter = hw->back;
85 			struct virtchnl_pf_event *vpe =
86 				(struct virtchnl_pf_event *)event->msg_buf;
87 
88 			if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
89 				continue;
90 
91 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
92 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
93 				iavf_schedule_reset(adapter,
94 						    IAVF_FLAG_RESET_PENDING);
95 
96 			return -EIO;
97 		}
98 
99 		if (op_to_poll == received_op)
100 			break;
101 	}
102 
103 	v_retval = le32_to_cpu(event->desc.cookie_low);
104 	return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
105 }
106 
107 /**
108  * iavf_verify_api_ver
109  * @adapter: adapter structure
110  *
111  * Compare API versions with the PF. Must be called after admin queue is
112  * initialized. Returns 0 if API versions match, -EIO if they do not,
113  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
114  * from the firmware are propagated.
115  **/
116 int iavf_verify_api_ver(struct iavf_adapter *adapter)
117 {
118 	struct iavf_arq_event_info event;
119 	int err;
120 
121 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
122 	event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
123 	if (!event.msg_buf)
124 		return -ENOMEM;
125 
126 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
127 	if (!err) {
128 		struct virtchnl_version_info *pf_vvi =
129 			(struct virtchnl_version_info *)event.msg_buf;
130 		adapter->pf_version = *pf_vvi;
131 
132 		if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
133 		    (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
134 		     pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
135 			err = -EIO;
136 	}
137 
138 	kfree(event.msg_buf);
139 
140 	return err;
141 }
142 
143 /**
144  * iavf_send_vf_config_msg
145  * @adapter: adapter structure
146  *
147  * Send VF configuration request admin queue message to the PF. The reply
148  * is not checked in this function. Returns 0 if the message was
149  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
150  **/
151 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
152 {
153 	u32 caps;
154 
155 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
156 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
157 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
158 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
159 	       VIRTCHNL_VF_OFFLOAD_VLAN |
160 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
161 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
162 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
163 	       VIRTCHNL_VF_OFFLOAD_TC_U32 |
164 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
165 	       VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
166 	       VIRTCHNL_VF_OFFLOAD_CRC |
167 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
168 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
169 	       VIRTCHNL_VF_CAP_PTP |
170 	       VIRTCHNL_VF_OFFLOAD_ADQ |
171 	       VIRTCHNL_VF_OFFLOAD_USO |
172 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
173 	       VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
174 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
175 	       VIRTCHNL_VF_OFFLOAD_QOS;
176 
177 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
178 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
179 	if (PF_IS_V11(adapter))
180 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
181 					(u8 *)&caps, sizeof(caps));
182 	else
183 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
184 					NULL, 0);
185 }
186 
187 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
188 {
189 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
190 
191 	if (!VLAN_V2_ALLOWED(adapter))
192 		return -EOPNOTSUPP;
193 
194 	adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
195 
196 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
197 				NULL, 0);
198 }
199 
200 int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
201 {
202 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
203 
204 	if (!IAVF_RXDID_ALLOWED(adapter))
205 		return -EOPNOTSUPP;
206 
207 	adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
208 
209 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
210 				NULL, 0);
211 }
212 
213 /**
214  * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
215  * @adapter: private adapter structure
216  *
217  * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
218  * capabilities available to this device. This includes the following
219  * potential access:
220  *
221  * * READ_PHC - access to read the PTP hardware clock time
222  * * RX_TSTAMP - access to request Rx timestamps on all received packets
223  *
224  * The PF will reply with the same opcode a filled out copy of the
225  * virtchnl_ptp_caps structure which defines the specifics of which features
226  * are accessible to this device.
227  *
228  * Return: 0 if success, error code otherwise.
229  */
230 int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
231 {
232 	struct virtchnl_ptp_caps hw_caps = {
233 		.caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
234 			VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
235 	};
236 
237 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
238 
239 	if (!IAVF_PTP_ALLOWED(adapter))
240 		return -EOPNOTSUPP;
241 
242 	adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
243 
244 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
245 				(u8 *)&hw_caps, sizeof(hw_caps));
246 }
247 
248 /**
249  * iavf_validate_num_queues
250  * @adapter: adapter structure
251  *
252  * Validate that the number of queues the PF has sent in
253  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
254  **/
255 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
256 {
257 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
258 		struct virtchnl_vsi_resource *vsi_res;
259 		int i;
260 
261 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
262 			 adapter->vf_res->num_queue_pairs,
263 			 IAVF_MAX_REQ_QUEUES);
264 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
265 			 IAVF_MAX_REQ_QUEUES);
266 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
267 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
268 			vsi_res = &adapter->vf_res->vsi_res[i];
269 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
270 		}
271 	}
272 }
273 
274 /**
275  * iavf_get_vf_config
276  * @adapter: private adapter structure
277  *
278  * Get VF configuration from PF and populate hw structure. Must be called after
279  * admin queue is initialized. Busy waits until response is received from PF,
280  * with maximum timeout. Response from PF is returned in the buffer for further
281  * processing by the caller.
282  **/
283 int iavf_get_vf_config(struct iavf_adapter *adapter)
284 {
285 	struct iavf_hw *hw = &adapter->hw;
286 	struct iavf_arq_event_info event;
287 	u16 len;
288 	int err;
289 
290 	len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
291 	event.buf_len = len;
292 	event.msg_buf = kzalloc(len, GFP_KERNEL);
293 	if (!event.msg_buf)
294 		return -ENOMEM;
295 
296 	err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
297 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
298 
299 	/* some PFs send more queues than we should have so validate that
300 	 * we aren't getting too many queues
301 	 */
302 	if (!err)
303 		iavf_validate_num_queues(adapter);
304 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
305 
306 	kfree(event.msg_buf);
307 
308 	return err;
309 }
310 
311 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
312 {
313 	struct iavf_arq_event_info event;
314 	int err;
315 	u16 len;
316 
317 	len = sizeof(struct virtchnl_vlan_caps);
318 	event.buf_len = len;
319 	event.msg_buf = kzalloc(len, GFP_KERNEL);
320 	if (!event.msg_buf)
321 		return -ENOMEM;
322 
323 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
324 				     VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
325 	if (!err)
326 		memcpy(&adapter->vlan_v2_caps, event.msg_buf,
327 		       min(event.msg_len, len));
328 
329 	kfree(event.msg_buf);
330 
331 	return err;
332 }
333 
334 int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
335 {
336 	struct iavf_arq_event_info event;
337 	u64 rxdids;
338 	int err;
339 
340 	event.msg_buf = (u8 *)&rxdids;
341 	event.buf_len = sizeof(rxdids);
342 
343 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
344 				     VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
345 	if (!err)
346 		adapter->supp_rxdids = rxdids;
347 
348 	return err;
349 }
350 
351 int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
352 {
353 	struct virtchnl_ptp_caps caps = {};
354 	struct iavf_arq_event_info event;
355 	int err;
356 
357 	event.msg_buf = (u8 *)&caps;
358 	event.buf_len = sizeof(caps);
359 
360 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
361 				     VIRTCHNL_OP_1588_PTP_GET_CAPS);
362 	if (!err)
363 		adapter->ptp.hw_caps = caps;
364 
365 	return err;
366 }
367 
368 /**
369  * iavf_configure_queues
370  * @adapter: adapter structure
371  *
372  * Request that the PF set up our (previously allocated) queues.
373  **/
374 void iavf_configure_queues(struct iavf_adapter *adapter)
375 {
376 	struct virtchnl_vsi_queue_config_info *vqci;
377 	int pairs = adapter->num_active_queues;
378 	struct virtchnl_queue_pair_info *vqpi;
379 	u32 i, max_frame;
380 	u8 rx_flags = 0;
381 	size_t len;
382 
383 	max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
384 	max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
385 
386 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
387 		/* bail because we already have a command pending */
388 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
389 			adapter->current_op);
390 		return;
391 	}
392 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
393 	len = virtchnl_struct_size(vqci, qpair, pairs);
394 	vqci = kzalloc(len, GFP_KERNEL);
395 	if (!vqci)
396 		return;
397 
398 	if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
399 		rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
400 
401 	vqci->vsi_id = adapter->vsi_res->vsi_id;
402 	vqci->num_queue_pairs = pairs;
403 	vqpi = vqci->qpair;
404 	/* Size check is not needed here - HW max is 16 queue pairs, and we
405 	 * can fit info for 31 of them into the AQ buffer before it overflows.
406 	 */
407 	for (i = 0; i < pairs; i++) {
408 		vqpi->txq.vsi_id = vqci->vsi_id;
409 		vqpi->txq.queue_id = i;
410 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
411 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
412 		vqpi->rxq.vsi_id = vqci->vsi_id;
413 		vqpi->rxq.queue_id = i;
414 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
415 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
416 		vqpi->rxq.max_pkt_size = max_frame;
417 		vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
418 		if (IAVF_RXDID_ALLOWED(adapter))
419 			vqpi->rxq.rxdid = adapter->rxdid;
420 		if (CRC_OFFLOAD_ALLOWED(adapter))
421 			vqpi->rxq.crc_disable = !!(adapter->netdev->features &
422 						   NETIF_F_RXFCS);
423 		vqpi->rxq.flags = rx_flags;
424 		vqpi++;
425 	}
426 
427 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
428 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
429 			 (u8 *)vqci, len);
430 	kfree(vqci);
431 }
432 
433 /**
434  * iavf_enable_queues
435  * @adapter: adapter structure
436  *
437  * Request that the PF enable all of our queues.
438  **/
439 void iavf_enable_queues(struct iavf_adapter *adapter)
440 {
441 	struct virtchnl_queue_select vqs;
442 
443 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
444 		/* bail because we already have a command pending */
445 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
446 			adapter->current_op);
447 		return;
448 	}
449 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
450 	vqs.vsi_id = adapter->vsi_res->vsi_id;
451 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
452 	vqs.rx_queues = vqs.tx_queues;
453 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
454 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
455 			 (u8 *)&vqs, sizeof(vqs));
456 }
457 
458 /**
459  * iavf_disable_queues
460  * @adapter: adapter structure
461  *
462  * Request that the PF disable all of our queues.
463  **/
464 void iavf_disable_queues(struct iavf_adapter *adapter)
465 {
466 	struct virtchnl_queue_select vqs;
467 
468 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
469 		/* bail because we already have a command pending */
470 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
471 			adapter->current_op);
472 		return;
473 	}
474 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
475 	vqs.vsi_id = adapter->vsi_res->vsi_id;
476 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
477 	vqs.rx_queues = vqs.tx_queues;
478 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
479 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
480 			 (u8 *)&vqs, sizeof(vqs));
481 }
482 
483 /**
484  * iavf_map_queues
485  * @adapter: adapter structure
486  *
487  * Request that the PF map queues to interrupt vectors. Misc causes, including
488  * admin queue, are always mapped to vector 0.
489  **/
490 void iavf_map_queues(struct iavf_adapter *adapter)
491 {
492 	struct virtchnl_irq_map_info *vimi;
493 	struct virtchnl_vector_map *vecmap;
494 	struct iavf_q_vector *q_vector;
495 	int v_idx, q_vectors;
496 	size_t len;
497 
498 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
499 		/* bail because we already have a command pending */
500 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
501 			adapter->current_op);
502 		return;
503 	}
504 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
505 
506 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
507 
508 	len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
509 	vimi = kzalloc(len, GFP_KERNEL);
510 	if (!vimi)
511 		return;
512 
513 	vimi->num_vectors = adapter->num_msix_vectors;
514 	/* Queue vectors first */
515 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
516 		q_vector = &adapter->q_vectors[v_idx];
517 		vecmap = &vimi->vecmap[v_idx];
518 
519 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
520 		vecmap->vector_id = v_idx + NONQ_VECS;
521 		vecmap->txq_map = q_vector->ring_mask;
522 		vecmap->rxq_map = q_vector->ring_mask;
523 		vecmap->rxitr_idx = IAVF_RX_ITR;
524 		vecmap->txitr_idx = IAVF_TX_ITR;
525 	}
526 	/* Misc vector last - this is only for AdminQ messages */
527 	vecmap = &vimi->vecmap[v_idx];
528 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
529 	vecmap->vector_id = 0;
530 	vecmap->txq_map = 0;
531 	vecmap->rxq_map = 0;
532 
533 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
534 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
535 			 (u8 *)vimi, len);
536 	kfree(vimi);
537 }
538 
539 /**
540  * iavf_set_mac_addr_type - Set the correct request type from the filter type
541  * @virtchnl_ether_addr: pointer to requested list element
542  * @filter: pointer to requested filter
543  **/
544 static void
545 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
546 		       const struct iavf_mac_filter *filter)
547 {
548 	virtchnl_ether_addr->type = filter->is_primary ?
549 		VIRTCHNL_ETHER_ADDR_PRIMARY :
550 		VIRTCHNL_ETHER_ADDR_EXTRA;
551 }
552 
553 /**
554  * iavf_add_ether_addrs
555  * @adapter: adapter structure
556  *
557  * Request that the PF add one or more addresses to our filters.
558  **/
559 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
560 {
561 	struct virtchnl_ether_addr_list *veal;
562 	struct iavf_mac_filter *f;
563 	int i = 0, count = 0;
564 	bool more = false;
565 	size_t len;
566 
567 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
568 		/* bail because we already have a command pending */
569 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
570 			adapter->current_op);
571 		return;
572 	}
573 
574 	spin_lock_bh(&adapter->mac_vlan_list_lock);
575 
576 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
577 		if (f->add)
578 			count++;
579 	}
580 	if (!count) {
581 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
582 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
583 		return;
584 	}
585 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
586 
587 	len = virtchnl_struct_size(veal, list, count);
588 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
589 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
590 		while (len > IAVF_MAX_AQ_BUF_SIZE)
591 			len = virtchnl_struct_size(veal, list, --count);
592 		more = true;
593 	}
594 
595 	veal = kzalloc(len, GFP_ATOMIC);
596 	if (!veal) {
597 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
598 		return;
599 	}
600 
601 	veal->vsi_id = adapter->vsi_res->vsi_id;
602 	veal->num_elements = count;
603 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
604 		if (f->add) {
605 			ether_addr_copy(veal->list[i].addr, f->macaddr);
606 			iavf_set_mac_addr_type(&veal->list[i], f);
607 			i++;
608 			f->add = false;
609 			if (i == count)
610 				break;
611 		}
612 	}
613 	if (!more)
614 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
615 
616 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
617 
618 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
619 	kfree(veal);
620 }
621 
622 /**
623  * iavf_del_ether_addrs
624  * @adapter: adapter structure
625  *
626  * Request that the PF remove one or more addresses from our filters.
627  **/
628 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
629 {
630 	struct virtchnl_ether_addr_list *veal;
631 	struct iavf_mac_filter *f, *ftmp;
632 	int i = 0, count = 0;
633 	bool more = false;
634 	size_t len;
635 
636 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
637 		/* bail because we already have a command pending */
638 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
639 			adapter->current_op);
640 		return;
641 	}
642 
643 	spin_lock_bh(&adapter->mac_vlan_list_lock);
644 
645 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
646 		if (f->remove)
647 			count++;
648 	}
649 	if (!count) {
650 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
651 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
652 		return;
653 	}
654 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
655 
656 	len = virtchnl_struct_size(veal, list, count);
657 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
658 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
659 		while (len > IAVF_MAX_AQ_BUF_SIZE)
660 			len = virtchnl_struct_size(veal, list, --count);
661 		more = true;
662 	}
663 	veal = kzalloc(len, GFP_ATOMIC);
664 	if (!veal) {
665 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
666 		return;
667 	}
668 
669 	veal->vsi_id = adapter->vsi_res->vsi_id;
670 	veal->num_elements = count;
671 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
672 		if (f->remove) {
673 			ether_addr_copy(veal->list[i].addr, f->macaddr);
674 			iavf_set_mac_addr_type(&veal->list[i], f);
675 			i++;
676 			list_del(&f->list);
677 			kfree(f);
678 			if (i == count)
679 				break;
680 		}
681 	}
682 	if (!more)
683 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
684 
685 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
686 
687 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
688 	kfree(veal);
689 }
690 
691 /**
692  * iavf_mac_add_ok
693  * @adapter: adapter structure
694  *
695  * Submit list of filters based on PF response.
696  **/
697 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
698 {
699 	struct iavf_mac_filter *f, *ftmp;
700 
701 	spin_lock_bh(&adapter->mac_vlan_list_lock);
702 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
703 		f->is_new_mac = false;
704 		if (!f->add && !f->add_handled)
705 			f->add_handled = true;
706 	}
707 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
708 }
709 
710 /**
711  * iavf_mac_add_reject
712  * @adapter: adapter structure
713  *
714  * Remove filters from list based on PF response.
715  **/
716 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
717 {
718 	struct net_device *netdev = adapter->netdev;
719 	struct iavf_mac_filter *f, *ftmp;
720 
721 	spin_lock_bh(&adapter->mac_vlan_list_lock);
722 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
723 		if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
724 			f->remove = false;
725 
726 		if (!f->add && !f->add_handled)
727 			f->add_handled = true;
728 
729 		if (f->is_new_mac) {
730 			list_del(&f->list);
731 			kfree(f);
732 		}
733 	}
734 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
735 }
736 
737 /**
738  * iavf_vlan_add_reject
739  * @adapter: adapter structure
740  *
741  * Remove VLAN filters from list based on PF response.
742  **/
743 static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
744 {
745 	struct iavf_vlan_filter *f, *ftmp;
746 
747 	spin_lock_bh(&adapter->mac_vlan_list_lock);
748 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
749 		if (f->state == IAVF_VLAN_IS_NEW) {
750 			list_del(&f->list);
751 			kfree(f);
752 			adapter->num_vlan_filters--;
753 		}
754 	}
755 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
756 }
757 
758 /**
759  * iavf_add_vlans
760  * @adapter: adapter structure
761  *
762  * Request that the PF add one or more VLAN filters to our VSI.
763  **/
764 void iavf_add_vlans(struct iavf_adapter *adapter)
765 {
766 	int len, i = 0, count = 0;
767 	struct iavf_vlan_filter *f;
768 	bool more = false;
769 
770 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
771 		/* bail because we already have a command pending */
772 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
773 			adapter->current_op);
774 		return;
775 	}
776 
777 	spin_lock_bh(&adapter->mac_vlan_list_lock);
778 
779 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
780 		if (f->state == IAVF_VLAN_ADD)
781 			count++;
782 	}
783 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
784 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
785 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
786 		return;
787 	}
788 
789 	if (VLAN_ALLOWED(adapter)) {
790 		struct virtchnl_vlan_filter_list *vvfl;
791 
792 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
793 
794 		len = virtchnl_struct_size(vvfl, vlan_id, count);
795 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
796 			dev_info(&adapter->pdev->dev,
797 				 "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n");
798 			while (len > IAVF_MAX_AQ_BUF_SIZE)
799 				len = virtchnl_struct_size(vvfl, vlan_id,
800 							   --count);
801 			more = true;
802 		}
803 		vvfl = kzalloc(len, GFP_ATOMIC);
804 		if (!vvfl) {
805 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
806 			return;
807 		}
808 
809 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
810 		vvfl->num_elements = count;
811 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
812 			if (f->state == IAVF_VLAN_ADD) {
813 				vvfl->vlan_id[i] = f->vlan.vid;
814 				i++;
815 				f->state = IAVF_VLAN_IS_NEW;
816 				if (i == count)
817 					break;
818 			}
819 		}
820 		if (!more)
821 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
822 
823 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
824 
825 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
826 		kfree(vvfl);
827 	} else {
828 		u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
829 		u16 current_vlans = iavf_get_num_vlans_added(adapter);
830 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
831 
832 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
833 
834 		if ((count + current_vlans) > max_vlans &&
835 		    current_vlans < max_vlans) {
836 			count = max_vlans - iavf_get_num_vlans_added(adapter);
837 			more = true;
838 		}
839 
840 		len = virtchnl_struct_size(vvfl_v2, filters, count);
841 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
842 			dev_info(&adapter->pdev->dev,
843 				 "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n");
844 			while (len > IAVF_MAX_AQ_BUF_SIZE)
845 				len = virtchnl_struct_size(vvfl_v2, filters,
846 							   --count);
847 			more = true;
848 		}
849 
850 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
851 		if (!vvfl_v2) {
852 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
853 			return;
854 		}
855 
856 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
857 		vvfl_v2->num_elements = count;
858 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
859 			if (f->state == IAVF_VLAN_ADD) {
860 				struct virtchnl_vlan_supported_caps *filtering_support =
861 					&adapter->vlan_v2_caps.filtering.filtering_support;
862 				struct virtchnl_vlan *vlan;
863 
864 				if (i == count)
865 					break;
866 
867 				/* give priority over outer if it's enabled */
868 				if (filtering_support->outer)
869 					vlan = &vvfl_v2->filters[i].outer;
870 				else
871 					vlan = &vvfl_v2->filters[i].inner;
872 
873 				vlan->tci = f->vlan.vid;
874 				vlan->tpid = f->vlan.tpid;
875 
876 				i++;
877 				f->state = IAVF_VLAN_IS_NEW;
878 			}
879 		}
880 
881 		if (!more)
882 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
883 
884 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
885 
886 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
887 				 (u8 *)vvfl_v2, len);
888 		kfree(vvfl_v2);
889 	}
890 }
891 
892 /**
893  * iavf_del_vlans
894  * @adapter: adapter structure
895  *
896  * Request that the PF remove one or more VLAN filters from our VSI.
897  **/
898 void iavf_del_vlans(struct iavf_adapter *adapter)
899 {
900 	struct iavf_vlan_filter *f, *ftmp;
901 	int len, i = 0, count = 0;
902 	bool more = false;
903 
904 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
905 		/* bail because we already have a command pending */
906 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
907 			adapter->current_op);
908 		return;
909 	}
910 
911 	spin_lock_bh(&adapter->mac_vlan_list_lock);
912 
913 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
914 		/* since VLAN capabilities are not allowed, we dont want to send
915 		 * a VLAN delete request because it will most likely fail and
916 		 * create unnecessary errors/noise, so just free the VLAN
917 		 * filters marked for removal to enable bailing out before
918 		 * sending a virtchnl message
919 		 */
920 		if (f->state == IAVF_VLAN_REMOVE &&
921 		    !VLAN_FILTERING_ALLOWED(adapter)) {
922 			list_del(&f->list);
923 			kfree(f);
924 			adapter->num_vlan_filters--;
925 		} else if (f->state == IAVF_VLAN_DISABLE &&
926 		    !VLAN_FILTERING_ALLOWED(adapter)) {
927 			f->state = IAVF_VLAN_INACTIVE;
928 		} else if (f->state == IAVF_VLAN_REMOVE ||
929 			   f->state == IAVF_VLAN_DISABLE) {
930 			count++;
931 		}
932 	}
933 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
934 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
935 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
936 		return;
937 	}
938 
939 	if (VLAN_ALLOWED(adapter)) {
940 		struct virtchnl_vlan_filter_list *vvfl;
941 
942 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
943 
944 		len = virtchnl_struct_size(vvfl, vlan_id, count);
945 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
946 			dev_info(&adapter->pdev->dev,
947 				 "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n");
948 			while (len > IAVF_MAX_AQ_BUF_SIZE)
949 				len = virtchnl_struct_size(vvfl, vlan_id,
950 							   --count);
951 			more = true;
952 		}
953 		vvfl = kzalloc(len, GFP_ATOMIC);
954 		if (!vvfl) {
955 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
956 			return;
957 		}
958 
959 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
960 		vvfl->num_elements = count;
961 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
962 			if (f->state == IAVF_VLAN_DISABLE) {
963 				vvfl->vlan_id[i] = f->vlan.vid;
964 				f->state = IAVF_VLAN_INACTIVE;
965 				i++;
966 				if (i == count)
967 					break;
968 			} else if (f->state == IAVF_VLAN_REMOVE) {
969 				vvfl->vlan_id[i] = f->vlan.vid;
970 				list_del(&f->list);
971 				kfree(f);
972 				adapter->num_vlan_filters--;
973 				i++;
974 				if (i == count)
975 					break;
976 			}
977 		}
978 
979 		if (!more)
980 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
981 
982 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
983 
984 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
985 		kfree(vvfl);
986 	} else {
987 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
988 
989 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
990 
991 		len = virtchnl_struct_size(vvfl_v2, filters, count);
992 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
993 			dev_info(&adapter->pdev->dev,
994 				 "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n");
995 			while (len > IAVF_MAX_AQ_BUF_SIZE)
996 				len = virtchnl_struct_size(vvfl_v2, filters,
997 							   --count);
998 			more = true;
999 		}
1000 
1001 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
1002 		if (!vvfl_v2) {
1003 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
1004 			return;
1005 		}
1006 
1007 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
1008 		vvfl_v2->num_elements = count;
1009 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
1010 			if (f->state == IAVF_VLAN_DISABLE ||
1011 			    f->state == IAVF_VLAN_REMOVE) {
1012 				struct virtchnl_vlan_supported_caps *filtering_support =
1013 					&adapter->vlan_v2_caps.filtering.filtering_support;
1014 				struct virtchnl_vlan *vlan;
1015 
1016 				/* give priority over outer if it's enabled */
1017 				if (filtering_support->outer)
1018 					vlan = &vvfl_v2->filters[i].outer;
1019 				else
1020 					vlan = &vvfl_v2->filters[i].inner;
1021 
1022 				vlan->tci = f->vlan.vid;
1023 				vlan->tpid = f->vlan.tpid;
1024 
1025 				if (f->state == IAVF_VLAN_DISABLE) {
1026 					f->state = IAVF_VLAN_INACTIVE;
1027 				} else {
1028 					list_del(&f->list);
1029 					kfree(f);
1030 					adapter->num_vlan_filters--;
1031 				}
1032 				i++;
1033 				if (i == count)
1034 					break;
1035 			}
1036 		}
1037 
1038 		if (!more)
1039 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1040 
1041 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
1042 
1043 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
1044 				 (u8 *)vvfl_v2, len);
1045 		kfree(vvfl_v2);
1046 	}
1047 }
1048 
1049 /**
1050  * iavf_set_promiscuous
1051  * @adapter: adapter structure
1052  *
1053  * Request that the PF enable promiscuous mode for our VSI.
1054  **/
1055 void iavf_set_promiscuous(struct iavf_adapter *adapter)
1056 {
1057 	struct net_device *netdev = adapter->netdev;
1058 	struct virtchnl_promisc_info vpi;
1059 	unsigned int flags;
1060 
1061 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1062 		/* bail because we already have a command pending */
1063 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
1064 			adapter->current_op);
1065 		return;
1066 	}
1067 
1068 	/* prevent changes to promiscuous flags */
1069 	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
1070 
1071 	/* sanity check to prevent duplicate AQ calls */
1072 	if (!iavf_promiscuous_mode_changed(adapter)) {
1073 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1074 		dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
1075 		/* allow changes to promiscuous flags */
1076 		spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1077 		return;
1078 	}
1079 
1080 	/* there are 2 bits, but only 3 states */
1081 	if (!(netdev->flags & IFF_PROMISC) &&
1082 	    netdev->flags & IFF_ALLMULTI) {
1083 		/* State 1  - only multicast promiscuous mode enabled
1084 		 * - !IFF_PROMISC && IFF_ALLMULTI
1085 		 */
1086 		flags = FLAG_VF_MULTICAST_PROMISC;
1087 		adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
1088 		adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
1089 		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
1090 	} else if (!(netdev->flags & IFF_PROMISC) &&
1091 		   !(netdev->flags & IFF_ALLMULTI)) {
1092 		/* State 2 - unicast/multicast promiscuous mode disabled
1093 		 * - !IFF_PROMISC && !IFF_ALLMULTI
1094 		 */
1095 		flags = 0;
1096 		adapter->current_netdev_promisc_flags &=
1097 			~(IFF_PROMISC | IFF_ALLMULTI);
1098 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
1099 	} else {
1100 		/* State 3 - unicast/multicast promiscuous mode enabled
1101 		 * - IFF_PROMISC && IFF_ALLMULTI
1102 		 * - IFF_PROMISC && !IFF_ALLMULTI
1103 		 */
1104 		flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
1105 		adapter->current_netdev_promisc_flags |= IFF_PROMISC;
1106 		if (netdev->flags & IFF_ALLMULTI)
1107 			adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
1108 		else
1109 			adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
1110 
1111 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
1112 	}
1113 
1114 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1115 
1116 	/* allow changes to promiscuous flags */
1117 	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1118 
1119 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1120 	vpi.vsi_id = adapter->vsi_res->vsi_id;
1121 	vpi.flags = flags;
1122 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1123 			 (u8 *)&vpi, sizeof(vpi));
1124 }
1125 
1126 /**
1127  * iavf_request_stats
1128  * @adapter: adapter structure
1129  *
1130  * Request VSI statistics from PF.
1131  **/
1132 void iavf_request_stats(struct iavf_adapter *adapter)
1133 {
1134 	struct virtchnl_queue_select vqs;
1135 
1136 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1137 		/* no error message, this isn't crucial */
1138 		return;
1139 	}
1140 
1141 	adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
1142 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
1143 	vqs.vsi_id = adapter->vsi_res->vsi_id;
1144 	/* queue maps are ignored for this message - only the vsi is used */
1145 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
1146 			     sizeof(vqs)))
1147 		/* if the request failed, don't lock out others */
1148 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1149 }
1150 
1151 /**
1152  * iavf_get_rss_hashcfg
1153  * @adapter: adapter structure
1154  *
1155  * Request RSS Hash enable bits from PF
1156  **/
1157 void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
1158 {
1159 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1160 		/* bail because we already have a command pending */
1161 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
1162 			adapter->current_op);
1163 		return;
1164 	}
1165 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
1166 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
1167 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
1168 }
1169 
1170 /**
1171  * iavf_set_rss_hashcfg
1172  * @adapter: adapter structure
1173  *
1174  * Request the PF to set our RSS hash capabilities
1175  **/
1176 void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
1177 {
1178 	struct virtchnl_rss_hashcfg vrh;
1179 
1180 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1181 		/* bail because we already have a command pending */
1182 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1183 			adapter->current_op);
1184 		return;
1185 	}
1186 	vrh.hashcfg = adapter->rss_hashcfg;
1187 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
1188 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
1189 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh,
1190 			 sizeof(vrh));
1191 }
1192 
1193 /**
1194  * iavf_set_rss_key
1195  * @adapter: adapter structure
1196  *
1197  * Request the PF to set our RSS hash key
1198  **/
1199 void iavf_set_rss_key(struct iavf_adapter *adapter)
1200 {
1201 	struct virtchnl_rss_key *vrk;
1202 	int len;
1203 
1204 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1205 		/* bail because we already have a command pending */
1206 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1207 			adapter->current_op);
1208 		return;
1209 	}
1210 	len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
1211 	vrk = kzalloc(len, GFP_KERNEL);
1212 	if (!vrk)
1213 		return;
1214 	vrk->vsi_id = adapter->vsi.id;
1215 	vrk->key_len = adapter->rss_key_size;
1216 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1217 
1218 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1219 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1220 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
1221 	kfree(vrk);
1222 }
1223 
1224 /**
1225  * iavf_set_rss_lut
1226  * @adapter: adapter structure
1227  *
1228  * Request the PF to set our RSS lookup table
1229  **/
1230 void iavf_set_rss_lut(struct iavf_adapter *adapter)
1231 {
1232 	struct virtchnl_rss_lut *vrl;
1233 	int len;
1234 
1235 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1236 		/* bail because we already have a command pending */
1237 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1238 			adapter->current_op);
1239 		return;
1240 	}
1241 	len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
1242 	vrl = kzalloc(len, GFP_KERNEL);
1243 	if (!vrl)
1244 		return;
1245 	vrl->vsi_id = adapter->vsi.id;
1246 	vrl->lut_entries = adapter->rss_lut_size;
1247 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1248 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1249 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1250 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
1251 	kfree(vrl);
1252 }
1253 
1254 /**
1255  * iavf_set_rss_hfunc
1256  * @adapter: adapter structure
1257  *
1258  * Request the PF to set our RSS Hash function
1259  **/
1260 void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
1261 {
1262 	struct virtchnl_rss_hfunc *vrh;
1263 	int len = sizeof(*vrh);
1264 
1265 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1266 		/* bail because we already have a command pending */
1267 		dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
1268 			adapter->current_op);
1269 		return;
1270 	}
1271 	vrh = kzalloc(len, GFP_KERNEL);
1272 	if (!vrh)
1273 		return;
1274 	vrh->vsi_id = adapter->vsi.id;
1275 	vrh->rss_algorithm = adapter->hfunc;
1276 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
1277 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
1278 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
1279 	kfree(vrh);
1280 }
1281 
1282 /**
1283  * iavf_enable_vlan_stripping
1284  * @adapter: adapter structure
1285  *
1286  * Request VLAN header stripping to be enabled
1287  **/
1288 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1289 {
1290 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1291 		/* bail because we already have a command pending */
1292 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1293 			adapter->current_op);
1294 		return;
1295 	}
1296 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1297 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1298 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
1299 }
1300 
1301 /**
1302  * iavf_disable_vlan_stripping
1303  * @adapter: adapter structure
1304  *
1305  * Request VLAN header stripping to be disabled
1306  **/
1307 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1308 {
1309 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1310 		/* bail because we already have a command pending */
1311 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1312 			adapter->current_op);
1313 		return;
1314 	}
1315 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1316 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1317 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
1318 }
1319 
1320 /**
1321  * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1322  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1323  */
1324 static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1325 {
1326 	switch (tpid) {
1327 	case ETH_P_8021Q:
1328 		return VIRTCHNL_VLAN_ETHERTYPE_8100;
1329 	case ETH_P_8021AD:
1330 		return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 /**
1337  * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1338  * @adapter: adapter structure
1339  * @msg: message structure used for updating offloads over virtchnl to update
1340  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1341  * @offload_op: opcode used to determine which support structure to check
1342  */
1343 static int
1344 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1345 			      struct virtchnl_vlan_setting *msg, u16 tpid,
1346 			      enum virtchnl_ops offload_op)
1347 {
1348 	struct virtchnl_vlan_supported_caps *offload_support;
1349 	u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1350 
1351 	/* reference the correct offload support structure */
1352 	switch (offload_op) {
1353 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1354 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1355 		offload_support =
1356 			&adapter->vlan_v2_caps.offloads.stripping_support;
1357 		break;
1358 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1359 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1360 		offload_support =
1361 			&adapter->vlan_v2_caps.offloads.insertion_support;
1362 		break;
1363 	default:
1364 		dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1365 			offload_op);
1366 		return -EINVAL;
1367 	}
1368 
1369 	/* make sure ethertype is supported */
1370 	if (offload_support->outer & vc_ethertype &&
1371 	    offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1372 		msg->outer_ethertype_setting = vc_ethertype;
1373 	} else if (offload_support->inner & vc_ethertype &&
1374 		   offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1375 		msg->inner_ethertype_setting = vc_ethertype;
1376 	} else {
1377 		dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1378 			offload_op, tpid);
1379 		return -EINVAL;
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 /**
1386  * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1387  * @adapter: adapter structure
1388  * @tpid: VLAN TPID
1389  * @offload_op: opcode used to determine which AQ required bit to clear
1390  */
1391 static void
1392 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1393 				  enum virtchnl_ops offload_op)
1394 {
1395 	switch (offload_op) {
1396 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1397 		if (tpid == ETH_P_8021Q)
1398 			adapter->aq_required &=
1399 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1400 		else if (tpid == ETH_P_8021AD)
1401 			adapter->aq_required &=
1402 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1403 		break;
1404 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1405 		if (tpid == ETH_P_8021Q)
1406 			adapter->aq_required &=
1407 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1408 		else if (tpid == ETH_P_8021AD)
1409 			adapter->aq_required &=
1410 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1411 		break;
1412 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1413 		if (tpid == ETH_P_8021Q)
1414 			adapter->aq_required &=
1415 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1416 		else if (tpid == ETH_P_8021AD)
1417 			adapter->aq_required &=
1418 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1419 		break;
1420 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1421 		if (tpid == ETH_P_8021Q)
1422 			adapter->aq_required &=
1423 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1424 		else if (tpid == ETH_P_8021AD)
1425 			adapter->aq_required &=
1426 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1427 		break;
1428 	default:
1429 		dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1430 			offload_op);
1431 	}
1432 }
1433 
1434 /**
1435  * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1436  * @adapter: adapter structure
1437  * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1438  * @offload_op: offload_op used to make the request over virtchnl
1439  */
1440 static void
1441 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1442 			  enum virtchnl_ops offload_op)
1443 {
1444 	struct virtchnl_vlan_setting *msg;
1445 	int len = sizeof(*msg);
1446 
1447 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1448 		/* bail because we already have a command pending */
1449 		dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1450 			offload_op, adapter->current_op);
1451 		return;
1452 	}
1453 
1454 	adapter->current_op = offload_op;
1455 
1456 	msg = kzalloc(len, GFP_KERNEL);
1457 	if (!msg)
1458 		return;
1459 
1460 	msg->vport_id = adapter->vsi_res->vsi_id;
1461 
1462 	/* always clear to prevent unsupported and endless requests */
1463 	iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1464 
1465 	/* only send valid offload requests */
1466 	if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1467 		iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
1468 	else
1469 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1470 
1471 	kfree(msg);
1472 }
1473 
1474 /**
1475  * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1476  * @adapter: adapter structure
1477  * @tpid: VLAN TPID used to enable VLAN stripping
1478  */
1479 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1480 {
1481 	iavf_send_vlan_offload_v2(adapter, tpid,
1482 				  VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1483 }
1484 
1485 /**
1486  * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1487  * @adapter: adapter structure
1488  * @tpid: VLAN TPID used to disable VLAN stripping
1489  */
1490 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1491 {
1492 	iavf_send_vlan_offload_v2(adapter, tpid,
1493 				  VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1494 }
1495 
1496 /**
1497  * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1498  * @adapter: adapter structure
1499  * @tpid: VLAN TPID used to enable VLAN insertion
1500  */
1501 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1502 {
1503 	iavf_send_vlan_offload_v2(adapter, tpid,
1504 				  VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1505 }
1506 
1507 /**
1508  * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1509  * @adapter: adapter structure
1510  * @tpid: VLAN TPID used to disable VLAN insertion
1511  */
1512 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1513 {
1514 	iavf_send_vlan_offload_v2(adapter, tpid,
1515 				  VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1516 }
1517 
1518 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1519 /**
1520  * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
1521  * @adapter: adapter private structure
1522  *
1523  * De-queue one PTP command request and send the command message to the PF.
1524  * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
1525  */
1526 void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
1527 {
1528 	struct iavf_ptp_aq_cmd *cmd;
1529 	int err;
1530 
1531 	if (!adapter->ptp.clock) {
1532 		/* This shouldn't be possible to hit, since no messages should
1533 		 * be queued if PTP is not initialized.
1534 		 */
1535 		pci_err(adapter->pdev, "PTP is not initialized\n");
1536 		adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1537 		return;
1538 	}
1539 
1540 	mutex_lock(&adapter->ptp.aq_cmd_lock);
1541 	cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
1542 				       struct iavf_ptp_aq_cmd, list);
1543 	if (!cmd) {
1544 		/* no further PTP messages to send */
1545 		adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1546 		goto out_unlock;
1547 	}
1548 
1549 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1550 		/* bail because we already have a command pending */
1551 		pci_err(adapter->pdev,
1552 			"Cannot send PTP command %d, command %d pending\n",
1553 			cmd->v_opcode, adapter->current_op);
1554 		goto out_unlock;
1555 	}
1556 
1557 	err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
1558 	if (!err) {
1559 		/* Command was sent without errors, so we can remove it from
1560 		 * the list and discard it.
1561 		 */
1562 		list_del(&cmd->list);
1563 		kfree(cmd);
1564 	} else {
1565 		/* We failed to send the command, try again next cycle */
1566 		pci_err(adapter->pdev, "Failed to send PTP command %d\n",
1567 			cmd->v_opcode);
1568 	}
1569 
1570 	if (list_empty(&adapter->ptp.aq_cmds))
1571 		/* no further PTP messages to send */
1572 		adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1573 
1574 out_unlock:
1575 	mutex_unlock(&adapter->ptp.aq_cmd_lock);
1576 }
1577 #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
1578 
1579 /**
1580  * iavf_print_link_message - print link up or down
1581  * @adapter: adapter structure
1582  *
1583  * Log a message telling the world of our wonderous link status
1584  */
1585 static void iavf_print_link_message(struct iavf_adapter *adapter)
1586 {
1587 	struct net_device *netdev = adapter->netdev;
1588 	int link_speed_mbps;
1589 	char *speed;
1590 
1591 	if (!adapter->link_up) {
1592 		netdev_info(netdev, "NIC Link is Down\n");
1593 		return;
1594 	}
1595 
1596 	if (ADV_LINK_SUPPORT(adapter)) {
1597 		link_speed_mbps = adapter->link_speed_mbps;
1598 		goto print_link_msg;
1599 	}
1600 
1601 	switch (adapter->link_speed) {
1602 	case VIRTCHNL_LINK_SPEED_40GB:
1603 		link_speed_mbps = SPEED_40000;
1604 		break;
1605 	case VIRTCHNL_LINK_SPEED_25GB:
1606 		link_speed_mbps = SPEED_25000;
1607 		break;
1608 	case VIRTCHNL_LINK_SPEED_20GB:
1609 		link_speed_mbps = SPEED_20000;
1610 		break;
1611 	case VIRTCHNL_LINK_SPEED_10GB:
1612 		link_speed_mbps = SPEED_10000;
1613 		break;
1614 	case VIRTCHNL_LINK_SPEED_5GB:
1615 		link_speed_mbps = SPEED_5000;
1616 		break;
1617 	case VIRTCHNL_LINK_SPEED_2_5GB:
1618 		link_speed_mbps = SPEED_2500;
1619 		break;
1620 	case VIRTCHNL_LINK_SPEED_1GB:
1621 		link_speed_mbps = SPEED_1000;
1622 		break;
1623 	case VIRTCHNL_LINK_SPEED_100MB:
1624 		link_speed_mbps = SPEED_100;
1625 		break;
1626 	default:
1627 		link_speed_mbps = SPEED_UNKNOWN;
1628 		break;
1629 	}
1630 
1631 print_link_msg:
1632 	if (link_speed_mbps > SPEED_1000) {
1633 		if (link_speed_mbps == SPEED_2500) {
1634 			speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps");
1635 		} else {
1636 			/* convert to Gbps inline */
1637 			speed = kasprintf(GFP_KERNEL, "%d Gbps",
1638 					  link_speed_mbps / 1000);
1639 		}
1640 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
1641 		speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps");
1642 	} else {
1643 		speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps);
1644 	}
1645 
1646 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1647 	kfree(speed);
1648 }
1649 
1650 /**
1651  * iavf_get_vpe_link_status
1652  * @adapter: adapter structure
1653  * @vpe: virtchnl_pf_event structure
1654  *
1655  * Helper function for determining the link status
1656  **/
1657 static bool
1658 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1659 			 struct virtchnl_pf_event *vpe)
1660 {
1661 	if (ADV_LINK_SUPPORT(adapter))
1662 		return vpe->event_data.link_event_adv.link_status;
1663 	else
1664 		return vpe->event_data.link_event.link_status;
1665 }
1666 
1667 /**
1668  * iavf_set_adapter_link_speed_from_vpe
1669  * @adapter: adapter structure for which we are setting the link speed
1670  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1671  *
1672  * Helper function for setting iavf_adapter link speed
1673  **/
1674 static void
1675 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1676 				     struct virtchnl_pf_event *vpe)
1677 {
1678 	if (ADV_LINK_SUPPORT(adapter))
1679 		adapter->link_speed_mbps =
1680 			vpe->event_data.link_event_adv.link_speed;
1681 	else
1682 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1683 }
1684 
1685 /**
1686  * iavf_get_qos_caps - get qos caps support
1687  * @adapter: iavf adapter struct instance
1688  *
1689  * This function requests PF for Supported QoS Caps.
1690  */
1691 void iavf_get_qos_caps(struct iavf_adapter *adapter)
1692 {
1693 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1694 		/* bail because we already have a command pending */
1695 		dev_err(&adapter->pdev->dev,
1696 			"Cannot get qos caps, command %d pending\n",
1697 			adapter->current_op);
1698 		return;
1699 	}
1700 
1701 	adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS;
1702 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS;
1703 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0);
1704 }
1705 
1706 /**
1707  * iavf_set_quanta_size - set quanta size of queue chunk
1708  * @adapter: iavf adapter struct instance
1709  * @quanta_size: quanta size in bytes
1710  * @queue_index: starting index of queue chunk
1711  * @num_queues: number of queues in the queue chunk
1712  *
1713  * This function requests PF to set quanta size of queue chunk
1714  * starting at queue_index.
1715  */
1716 static void
1717 iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size,
1718 		     u16 queue_index, u16 num_queues)
1719 {
1720 	struct virtchnl_quanta_cfg quanta_cfg;
1721 
1722 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1723 		/* bail because we already have a command pending */
1724 		dev_err(&adapter->pdev->dev,
1725 			"Cannot set queue quanta size, command %d pending\n",
1726 			adapter->current_op);
1727 		return;
1728 	}
1729 
1730 	adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA;
1731 	quanta_cfg.quanta_size = quanta_size;
1732 	quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
1733 	quanta_cfg.queue_select.start_queue_id = queue_index;
1734 	quanta_cfg.queue_select.num_queues = num_queues;
1735 	adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
1736 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA,
1737 			 (u8 *)&quanta_cfg, sizeof(quanta_cfg));
1738 }
1739 
1740 /**
1741  * iavf_cfg_queues_quanta_size - configure quanta size of queues
1742  * @adapter: adapter structure
1743  *
1744  * Request that the PF configure quanta size of allocated queues.
1745  **/
1746 void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter)
1747 {
1748 	int quanta_size = IAVF_DEFAULT_QUANTA_SIZE;
1749 
1750 	/* Set Queue Quanta Size to default */
1751 	iavf_set_quanta_size(adapter, quanta_size, 0,
1752 			     adapter->num_active_queues);
1753 }
1754 
1755 /**
1756  * iavf_cfg_queues_bw - configure bandwidth of allocated queues
1757  * @adapter: iavf adapter structure instance
1758  *
1759  * This function requests PF to configure queue bandwidth of allocated queues
1760  */
1761 void iavf_cfg_queues_bw(struct iavf_adapter *adapter)
1762 {
1763 	struct virtchnl_queues_bw_cfg *qs_bw_cfg;
1764 	struct net_shaper *q_shaper;
1765 	int qs_to_update = 0;
1766 	int i, inx = 0;
1767 	size_t len;
1768 
1769 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1770 		/* bail because we already have a command pending */
1771 		dev_err(&adapter->pdev->dev,
1772 			"Cannot set tc queue bw, command %d pending\n",
1773 			adapter->current_op);
1774 		return;
1775 	}
1776 
1777 	for (i = 0; i < adapter->num_active_queues; i++) {
1778 		if (adapter->tx_rings[i].q_shaper_update)
1779 			qs_to_update++;
1780 	}
1781 	len = struct_size(qs_bw_cfg, cfg, qs_to_update);
1782 	qs_bw_cfg = kzalloc(len, GFP_KERNEL);
1783 	if (!qs_bw_cfg)
1784 		return;
1785 
1786 	qs_bw_cfg->vsi_id = adapter->vsi.id;
1787 	qs_bw_cfg->num_queues = qs_to_update;
1788 
1789 	for (i = 0; i < adapter->num_active_queues; i++) {
1790 		struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1791 
1792 		q_shaper = &tx_ring->q_shaper;
1793 		if (tx_ring->q_shaper_update) {
1794 			qs_bw_cfg->cfg[inx].queue_id = i;
1795 			qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max;
1796 			qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min;
1797 			qs_bw_cfg->cfg[inx].tc = 0;
1798 			inx++;
1799 		}
1800 	}
1801 
1802 	adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW;
1803 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
1804 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW,
1805 			 (u8 *)qs_bw_cfg, len);
1806 	kfree(qs_bw_cfg);
1807 }
1808 
1809 /**
1810  * iavf_enable_channels
1811  * @adapter: adapter structure
1812  *
1813  * Request that the PF enable channels as specified by
1814  * the user via tc tool.
1815  **/
1816 void iavf_enable_channels(struct iavf_adapter *adapter)
1817 {
1818 	struct virtchnl_tc_info *vti = NULL;
1819 	size_t len;
1820 	int i;
1821 
1822 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1823 		/* bail because we already have a command pending */
1824 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1825 			adapter->current_op);
1826 		return;
1827 	}
1828 
1829 	len = virtchnl_struct_size(vti, list, adapter->num_tc);
1830 	vti = kzalloc(len, GFP_KERNEL);
1831 	if (!vti)
1832 		return;
1833 	vti->num_tc = adapter->num_tc;
1834 	for (i = 0; i < vti->num_tc; i++) {
1835 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1836 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1837 		vti->list[i].pad = 0;
1838 		vti->list[i].max_tx_rate =
1839 				adapter->ch_config.ch_info[i].max_tx_rate;
1840 	}
1841 
1842 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1843 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1844 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1845 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1846 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1847 	kfree(vti);
1848 }
1849 
1850 /**
1851  * iavf_disable_channels
1852  * @adapter: adapter structure
1853  *
1854  * Request that the PF disable channels that are configured
1855  **/
1856 void iavf_disable_channels(struct iavf_adapter *adapter)
1857 {
1858 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1859 		/* bail because we already have a command pending */
1860 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1861 			adapter->current_op);
1862 		return;
1863 	}
1864 
1865 	adapter->ch_config.state = __IAVF_TC_INVALID;
1866 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1867 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1868 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1869 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1870 }
1871 
1872 /**
1873  * iavf_print_cloud_filter
1874  * @adapter: adapter structure
1875  * @f: cloud filter to print
1876  *
1877  * Print the cloud filter
1878  **/
1879 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1880 				    struct virtchnl_filter *f)
1881 {
1882 	switch (f->flow_type) {
1883 	case VIRTCHNL_TCP_V4_FLOW:
1884 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1885 			 &f->data.tcp_spec.dst_mac,
1886 			 &f->data.tcp_spec.src_mac,
1887 			 ntohs(f->data.tcp_spec.vlan_id),
1888 			 &f->data.tcp_spec.dst_ip[0],
1889 			 &f->data.tcp_spec.src_ip[0],
1890 			 ntohs(f->data.tcp_spec.dst_port),
1891 			 ntohs(f->data.tcp_spec.src_port));
1892 		break;
1893 	case VIRTCHNL_TCP_V6_FLOW:
1894 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1895 			 &f->data.tcp_spec.dst_mac,
1896 			 &f->data.tcp_spec.src_mac,
1897 			 ntohs(f->data.tcp_spec.vlan_id),
1898 			 &f->data.tcp_spec.dst_ip,
1899 			 &f->data.tcp_spec.src_ip,
1900 			 ntohs(f->data.tcp_spec.dst_port),
1901 			 ntohs(f->data.tcp_spec.src_port));
1902 		break;
1903 	}
1904 }
1905 
1906 /**
1907  * iavf_add_cloud_filter
1908  * @adapter: adapter structure
1909  *
1910  * Request that the PF add cloud filters as specified
1911  * by the user via tc tool.
1912  **/
1913 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1914 {
1915 	struct iavf_cloud_filter *cf;
1916 	struct virtchnl_filter *f;
1917 	int len = 0, count = 0;
1918 
1919 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1920 		/* bail because we already have a command pending */
1921 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1922 			adapter->current_op);
1923 		return;
1924 	}
1925 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1926 		if (cf->add) {
1927 			count++;
1928 			break;
1929 		}
1930 	}
1931 	if (!count) {
1932 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1933 		return;
1934 	}
1935 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1936 
1937 	len = sizeof(struct virtchnl_filter);
1938 	f = kzalloc(len, GFP_KERNEL);
1939 	if (!f)
1940 		return;
1941 
1942 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1943 		if (cf->add) {
1944 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1945 			cf->add = false;
1946 			cf->state = __IAVF_CF_ADD_PENDING;
1947 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1948 					 (u8 *)f, len);
1949 		}
1950 	}
1951 	kfree(f);
1952 }
1953 
1954 /**
1955  * iavf_del_cloud_filter
1956  * @adapter: adapter structure
1957  *
1958  * Request that the PF delete cloud filters as specified
1959  * by the user via tc tool.
1960  **/
1961 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1962 {
1963 	struct iavf_cloud_filter *cf, *cftmp;
1964 	struct virtchnl_filter *f;
1965 	int len = 0, count = 0;
1966 
1967 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1968 		/* bail because we already have a command pending */
1969 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1970 			adapter->current_op);
1971 		return;
1972 	}
1973 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1974 		if (cf->del) {
1975 			count++;
1976 			break;
1977 		}
1978 	}
1979 	if (!count) {
1980 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1981 		return;
1982 	}
1983 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1984 
1985 	len = sizeof(struct virtchnl_filter);
1986 	f = kzalloc(len, GFP_KERNEL);
1987 	if (!f)
1988 		return;
1989 
1990 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1991 		if (cf->del) {
1992 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1993 			cf->del = false;
1994 			cf->state = __IAVF_CF_DEL_PENDING;
1995 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1996 					 (u8 *)f, len);
1997 		}
1998 	}
1999 	kfree(f);
2000 }
2001 
2002 /**
2003  * iavf_add_fdir_filter
2004  * @adapter: the VF adapter structure
2005  *
2006  * Request that the PF add Flow Director filters as specified
2007  * by the user via ethtool.
2008  **/
2009 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
2010 {
2011 	struct iavf_fdir_fltr *fdir;
2012 	struct virtchnl_fdir_add *f;
2013 	bool process_fltr = false;
2014 	int len;
2015 
2016 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2017 		/* bail because we already have a command pending */
2018 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
2019 			adapter->current_op);
2020 		return;
2021 	}
2022 
2023 	len = sizeof(struct virtchnl_fdir_add);
2024 	f = kzalloc(len, GFP_KERNEL);
2025 	if (!f)
2026 		return;
2027 
2028 	spin_lock_bh(&adapter->fdir_fltr_lock);
2029 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
2030 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
2031 			process_fltr = true;
2032 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
2033 			memcpy(f, &fdir->vc_add_msg, len);
2034 			break;
2035 		}
2036 	}
2037 	spin_unlock_bh(&adapter->fdir_fltr_lock);
2038 
2039 	if (!process_fltr) {
2040 		/* prevent iavf_add_fdir_filter() from being called when there
2041 		 * are no filters to add
2042 		 */
2043 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
2044 		kfree(f);
2045 		return;
2046 	}
2047 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
2048 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
2049 	kfree(f);
2050 }
2051 
2052 /**
2053  * iavf_del_fdir_filter
2054  * @adapter: the VF adapter structure
2055  *
2056  * Request that the PF delete Flow Director filters as specified
2057  * by the user via ethtool.
2058  **/
2059 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
2060 {
2061 	struct virtchnl_fdir_del f = {};
2062 	struct iavf_fdir_fltr *fdir;
2063 	bool process_fltr = false;
2064 	int len;
2065 
2066 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2067 		/* bail because we already have a command pending */
2068 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
2069 			adapter->current_op);
2070 		return;
2071 	}
2072 
2073 	len = sizeof(struct virtchnl_fdir_del);
2074 
2075 	spin_lock_bh(&adapter->fdir_fltr_lock);
2076 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
2077 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
2078 			process_fltr = true;
2079 			f.vsi_id = fdir->vc_add_msg.vsi_id;
2080 			f.flow_id = fdir->flow_id;
2081 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
2082 			break;
2083 		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
2084 			process_fltr = true;
2085 			f.vsi_id = fdir->vc_add_msg.vsi_id;
2086 			f.flow_id = fdir->flow_id;
2087 			fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
2088 			break;
2089 		}
2090 	}
2091 	spin_unlock_bh(&adapter->fdir_fltr_lock);
2092 
2093 	if (!process_fltr) {
2094 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
2095 		return;
2096 	}
2097 
2098 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
2099 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
2100 }
2101 
2102 /**
2103  * iavf_add_adv_rss_cfg
2104  * @adapter: the VF adapter structure
2105  *
2106  * Request that the PF add RSS configuration as specified
2107  * by the user via ethtool.
2108  **/
2109 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
2110 {
2111 	struct virtchnl_rss_cfg *rss_cfg;
2112 	struct iavf_adv_rss *rss;
2113 	bool process_rss = false;
2114 	int len;
2115 
2116 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2117 		/* bail because we already have a command pending */
2118 		dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
2119 			adapter->current_op);
2120 		return;
2121 	}
2122 
2123 	len = sizeof(struct virtchnl_rss_cfg);
2124 	rss_cfg = kzalloc(len, GFP_KERNEL);
2125 	if (!rss_cfg)
2126 		return;
2127 
2128 	spin_lock_bh(&adapter->adv_rss_lock);
2129 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2130 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
2131 			process_rss = true;
2132 			rss->state = IAVF_ADV_RSS_ADD_PENDING;
2133 			memcpy(rss_cfg, &rss->cfg_msg, len);
2134 			iavf_print_adv_rss_cfg(adapter, rss,
2135 					       "Input set change for",
2136 					       "is pending");
2137 			break;
2138 		}
2139 	}
2140 	spin_unlock_bh(&adapter->adv_rss_lock);
2141 
2142 	if (process_rss) {
2143 		adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
2144 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
2145 				 (u8 *)rss_cfg, len);
2146 	} else {
2147 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
2148 	}
2149 
2150 	kfree(rss_cfg);
2151 }
2152 
2153 /**
2154  * iavf_del_adv_rss_cfg
2155  * @adapter: the VF adapter structure
2156  *
2157  * Request that the PF delete RSS configuration as specified
2158  * by the user via ethtool.
2159  **/
2160 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
2161 {
2162 	struct virtchnl_rss_cfg *rss_cfg;
2163 	struct iavf_adv_rss *rss;
2164 	bool process_rss = false;
2165 	int len;
2166 
2167 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2168 		/* bail because we already have a command pending */
2169 		dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
2170 			adapter->current_op);
2171 		return;
2172 	}
2173 
2174 	len = sizeof(struct virtchnl_rss_cfg);
2175 	rss_cfg = kzalloc(len, GFP_KERNEL);
2176 	if (!rss_cfg)
2177 		return;
2178 
2179 	spin_lock_bh(&adapter->adv_rss_lock);
2180 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2181 		if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
2182 			process_rss = true;
2183 			rss->state = IAVF_ADV_RSS_DEL_PENDING;
2184 			memcpy(rss_cfg, &rss->cfg_msg, len);
2185 			break;
2186 		}
2187 	}
2188 	spin_unlock_bh(&adapter->adv_rss_lock);
2189 
2190 	if (process_rss) {
2191 		adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
2192 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
2193 				 (u8 *)rss_cfg, len);
2194 	} else {
2195 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
2196 	}
2197 
2198 	kfree(rss_cfg);
2199 }
2200 
2201 /**
2202  * iavf_request_reset
2203  * @adapter: adapter structure
2204  *
2205  * Request that the PF reset this VF. No response is expected.
2206  **/
2207 int iavf_request_reset(struct iavf_adapter *adapter)
2208 {
2209 	int err;
2210 	/* Don't check CURRENT_OP - this is always higher priority */
2211 	err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
2212 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2213 	return err;
2214 }
2215 
2216 /**
2217  * iavf_netdev_features_vlan_strip_set - update vlan strip status
2218  * @netdev: ptr to netdev being adjusted
2219  * @enable: enable or disable vlan strip
2220  *
2221  * Helper function to change vlan strip status in netdev->features.
2222  */
2223 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
2224 						const bool enable)
2225 {
2226 	if (enable)
2227 		netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2228 	else
2229 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2230 }
2231 
2232 /**
2233  * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
2234  * @adapter: private adapter structure
2235  *
2236  * Called after a reset to re-add all FDIR filters and delete some of them
2237  * if they were pending to be deleted.
2238  */
2239 static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
2240 {
2241 	struct iavf_fdir_fltr *f, *ftmp;
2242 	bool add_filters = false;
2243 
2244 	spin_lock_bh(&adapter->fdir_fltr_lock);
2245 	list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
2246 		if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
2247 		    f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
2248 		    f->state == IAVF_FDIR_FLTR_ACTIVE) {
2249 			/* All filters and requests have been removed in PF,
2250 			 * restore them
2251 			 */
2252 			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
2253 			add_filters = true;
2254 		} else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
2255 			   f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2256 			/* Link down state, leave filters as inactive */
2257 			f->state = IAVF_FDIR_FLTR_INACTIVE;
2258 		} else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
2259 			   f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2260 			/* Delete filters that were pending to be deleted, the
2261 			 * list on PF is already cleared after a reset
2262 			 */
2263 			list_del(&f->list);
2264 			iavf_dec_fdir_active_fltr(adapter, f);
2265 			kfree(f);
2266 		}
2267 	}
2268 	spin_unlock_bh(&adapter->fdir_fltr_lock);
2269 
2270 	if (add_filters)
2271 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
2272 }
2273 
2274 /**
2275  * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
2276  * @adapter: private adapter structure
2277  * @data: the message from the PF
2278  * @len: length of the message from the PF
2279  *
2280  * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
2281  * is sent by the PF in response to the same op as a request from the VF.
2282  * Extract the 64bit nanoseconds time from the message and store it in
2283  * cached_phc_time. Then, notify any thread that is waiting for the update via
2284  * the wait queue.
2285  */
2286 static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
2287 				       void *data, u16 len)
2288 {
2289 	struct virtchnl_phc_time *msg = data;
2290 
2291 	if (len != sizeof(*msg)) {
2292 		dev_err_once(&adapter->pdev->dev,
2293 			     "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
2294 			     len, sizeof(*msg));
2295 		return;
2296 	}
2297 
2298 	adapter->ptp.cached_phc_time = msg->time;
2299 	adapter->ptp.cached_phc_updated = jiffies;
2300 	adapter->ptp.phc_time_ready = true;
2301 
2302 	wake_up(&adapter->ptp.phc_time_waitqueue);
2303 }
2304 
2305 /**
2306  * iavf_virtchnl_completion
2307  * @adapter: adapter structure
2308  * @v_opcode: opcode sent by PF
2309  * @v_retval: retval sent by PF
2310  * @msg: message sent by PF
2311  * @msglen: message length
2312  *
2313  * Asynchronous completion function for admin queue messages. Rather than busy
2314  * wait, we fire off our requests and assume that no errors will be returned.
2315  * This function handles the reply messages.
2316  **/
2317 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
2318 			      enum virtchnl_ops v_opcode,
2319 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
2320 {
2321 	struct net_device *netdev = adapter->netdev;
2322 
2323 	if (v_opcode == VIRTCHNL_OP_EVENT) {
2324 		struct virtchnl_pf_event *vpe =
2325 			(struct virtchnl_pf_event *)msg;
2326 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
2327 
2328 		switch (vpe->event) {
2329 		case VIRTCHNL_EVENT_LINK_CHANGE:
2330 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
2331 
2332 			/* we've already got the right link status, bail */
2333 			if (adapter->link_up == link_up)
2334 				break;
2335 
2336 			if (link_up) {
2337 				/* If we get link up message and start queues
2338 				 * before our queues are configured it will
2339 				 * trigger a TX hang. In that case, just ignore
2340 				 * the link status message,we'll get another one
2341 				 * after we enable queues and actually prepared
2342 				 * to send traffic.
2343 				 */
2344 				if (adapter->state != __IAVF_RUNNING)
2345 					break;
2346 
2347 				/* For ADq enabled VF, we reconfigure VSIs and
2348 				 * re-allocate queues. Hence wait till all
2349 				 * queues are enabled.
2350 				 */
2351 				if (adapter->flags &
2352 				    IAVF_FLAG_QUEUES_DISABLED)
2353 					break;
2354 			}
2355 
2356 			adapter->link_up = link_up;
2357 			if (link_up) {
2358 				netif_tx_start_all_queues(netdev);
2359 				netif_carrier_on(netdev);
2360 			} else {
2361 				netif_tx_stop_all_queues(netdev);
2362 				netif_carrier_off(netdev);
2363 			}
2364 			iavf_print_link_message(adapter);
2365 			break;
2366 		case VIRTCHNL_EVENT_RESET_IMPENDING:
2367 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
2368 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
2369 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
2370 				iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2371 			}
2372 			break;
2373 		default:
2374 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
2375 				vpe->event);
2376 			break;
2377 		}
2378 		return;
2379 	}
2380 	if (v_retval) {
2381 		switch (v_opcode) {
2382 		case VIRTCHNL_OP_ADD_VLAN:
2383 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2384 				iavf_stat_str(&adapter->hw, v_retval));
2385 			break;
2386 		case VIRTCHNL_OP_ADD_ETH_ADDR:
2387 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
2388 				iavf_stat_str(&adapter->hw, v_retval));
2389 			iavf_mac_add_reject(adapter);
2390 			/* restore administratively set MAC address */
2391 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2392 			wake_up(&adapter->vc_waitqueue);
2393 			break;
2394 		case VIRTCHNL_OP_DEL_VLAN:
2395 			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
2396 				iavf_stat_str(&adapter->hw, v_retval));
2397 			break;
2398 		case VIRTCHNL_OP_DEL_ETH_ADDR:
2399 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
2400 				iavf_stat_str(&adapter->hw, v_retval));
2401 			break;
2402 		case VIRTCHNL_OP_ENABLE_CHANNELS:
2403 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
2404 				iavf_stat_str(&adapter->hw, v_retval));
2405 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2406 			adapter->ch_config.state = __IAVF_TC_INVALID;
2407 			netdev_reset_tc(netdev);
2408 			netif_tx_start_all_queues(netdev);
2409 			break;
2410 		case VIRTCHNL_OP_DISABLE_CHANNELS:
2411 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
2412 				iavf_stat_str(&adapter->hw, v_retval));
2413 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2414 			adapter->ch_config.state = __IAVF_TC_RUNNING;
2415 			netif_tx_start_all_queues(netdev);
2416 			break;
2417 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2418 			struct iavf_cloud_filter *cf, *cftmp;
2419 
2420 			list_for_each_entry_safe(cf, cftmp,
2421 						 &adapter->cloud_filter_list,
2422 						 list) {
2423 				if (cf->state == __IAVF_CF_ADD_PENDING) {
2424 					cf->state = __IAVF_CF_INVALID;
2425 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
2426 						 iavf_stat_str(&adapter->hw,
2427 							       v_retval));
2428 					iavf_print_cloud_filter(adapter,
2429 								&cf->f);
2430 					list_del(&cf->list);
2431 					kfree(cf);
2432 					adapter->num_cloud_filters--;
2433 				}
2434 			}
2435 			}
2436 			break;
2437 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2438 			struct iavf_cloud_filter *cf;
2439 
2440 			list_for_each_entry(cf, &adapter->cloud_filter_list,
2441 					    list) {
2442 				if (cf->state == __IAVF_CF_DEL_PENDING) {
2443 					cf->state = __IAVF_CF_ACTIVE;
2444 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
2445 						 iavf_stat_str(&adapter->hw,
2446 							       v_retval));
2447 					iavf_print_cloud_filter(adapter,
2448 								&cf->f);
2449 				}
2450 			}
2451 			}
2452 			break;
2453 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2454 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
2455 
2456 			spin_lock_bh(&adapter->fdir_fltr_lock);
2457 			list_for_each_entry_safe(fdir, fdir_tmp,
2458 						 &adapter->fdir_list_head,
2459 						 list) {
2460 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2461 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
2462 						 iavf_stat_str(&adapter->hw,
2463 							       v_retval));
2464 					iavf_print_fdir_fltr(adapter, fdir);
2465 					if (msglen)
2466 						dev_err(&adapter->pdev->dev,
2467 							"%s\n", msg);
2468 					list_del(&fdir->list);
2469 					iavf_dec_fdir_active_fltr(adapter, fdir);
2470 					kfree(fdir);
2471 				}
2472 			}
2473 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2474 			}
2475 			break;
2476 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2477 			struct iavf_fdir_fltr *fdir;
2478 
2479 			spin_lock_bh(&adapter->fdir_fltr_lock);
2480 			list_for_each_entry(fdir, &adapter->fdir_list_head,
2481 					    list) {
2482 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
2483 				    fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2484 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2485 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2486 						 iavf_stat_str(&adapter->hw,
2487 							       v_retval));
2488 					iavf_print_fdir_fltr(adapter, fdir);
2489 				}
2490 			}
2491 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2492 			}
2493 			break;
2494 		case VIRTCHNL_OP_ADD_RSS_CFG: {
2495 			struct iavf_adv_rss *rss, *rss_tmp;
2496 
2497 			spin_lock_bh(&adapter->adv_rss_lock);
2498 			list_for_each_entry_safe(rss, rss_tmp,
2499 						 &adapter->adv_rss_list_head,
2500 						 list) {
2501 				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2502 					iavf_print_adv_rss_cfg(adapter, rss,
2503 							       "Failed to change the input set for",
2504 							       NULL);
2505 					list_del(&rss->list);
2506 					kfree(rss);
2507 				}
2508 			}
2509 			spin_unlock_bh(&adapter->adv_rss_lock);
2510 			}
2511 			break;
2512 		case VIRTCHNL_OP_DEL_RSS_CFG: {
2513 			struct iavf_adv_rss *rss;
2514 
2515 			spin_lock_bh(&adapter->adv_rss_lock);
2516 			list_for_each_entry(rss, &adapter->adv_rss_list_head,
2517 					    list) {
2518 				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2519 					rss->state = IAVF_ADV_RSS_ACTIVE;
2520 					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2521 						iavf_stat_str(&adapter->hw,
2522 							      v_retval));
2523 				}
2524 			}
2525 			spin_unlock_bh(&adapter->adv_rss_lock);
2526 			}
2527 			break;
2528 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2529 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2530 			/* Vlan stripping could not be enabled by ethtool.
2531 			 * Disable it in netdev->features.
2532 			 */
2533 			iavf_netdev_features_vlan_strip_set(netdev, false);
2534 			break;
2535 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2536 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2537 			/* Vlan stripping could not be disabled by ethtool.
2538 			 * Enable it in netdev->features.
2539 			 */
2540 			iavf_netdev_features_vlan_strip_set(netdev, true);
2541 			break;
2542 		case VIRTCHNL_OP_ADD_VLAN_V2:
2543 			iavf_vlan_add_reject(adapter);
2544 			dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2545 				 iavf_stat_str(&adapter->hw, v_retval));
2546 			break;
2547 		case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
2548 			dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
2549 				 iavf_stat_str(&adapter->hw, v_retval));
2550 
2551 			if (adapter->hfunc ==
2552 					VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
2553 				adapter->hfunc =
2554 					VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
2555 			else
2556 				adapter->hfunc =
2557 					VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
2558 
2559 			break;
2560 		case VIRTCHNL_OP_GET_QOS_CAPS:
2561 			dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n",
2562 				 iavf_stat_str(&adapter->hw, v_retval));
2563 			break;
2564 		case VIRTCHNL_OP_CONFIG_QUANTA:
2565 			dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n",
2566 				 iavf_stat_str(&adapter->hw, v_retval));
2567 			break;
2568 		case VIRTCHNL_OP_CONFIG_QUEUE_BW:
2569 			dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n",
2570 				 iavf_stat_str(&adapter->hw, v_retval));
2571 			break;
2572 		default:
2573 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2574 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
2575 				v_opcode);
2576 		}
2577 	}
2578 	switch (v_opcode) {
2579 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2580 		if (!v_retval)
2581 			iavf_mac_add_ok(adapter);
2582 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
2583 			if (!ether_addr_equal(netdev->dev_addr,
2584 					      adapter->hw.mac.addr)) {
2585 				netif_addr_lock_bh(netdev);
2586 				eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2587 				netif_addr_unlock_bh(netdev);
2588 			}
2589 		wake_up(&adapter->vc_waitqueue);
2590 		break;
2591 	case VIRTCHNL_OP_GET_STATS: {
2592 		struct iavf_eth_stats *stats =
2593 			(struct iavf_eth_stats *)msg;
2594 		netdev->stats.rx_packets = stats->rx_unicast +
2595 					   stats->rx_multicast +
2596 					   stats->rx_broadcast;
2597 		netdev->stats.tx_packets = stats->tx_unicast +
2598 					   stats->tx_multicast +
2599 					   stats->tx_broadcast;
2600 		netdev->stats.rx_bytes = stats->rx_bytes;
2601 		netdev->stats.tx_bytes = stats->tx_bytes;
2602 		netdev->stats.tx_errors = stats->tx_errors;
2603 		netdev->stats.rx_dropped = stats->rx_discards;
2604 		netdev->stats.tx_dropped = stats->tx_discards;
2605 		adapter->current_stats = *stats;
2606 		}
2607 		break;
2608 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
2609 		u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
2610 
2611 		memcpy(adapter->vf_res, msg, min(msglen, len));
2612 		iavf_validate_num_queues(adapter);
2613 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
2614 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
2615 			/* restore current mac address */
2616 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2617 		} else {
2618 			netif_addr_lock_bh(netdev);
2619 			/* refresh current mac address if changed */
2620 			ether_addr_copy(netdev->perm_addr,
2621 					adapter->hw.mac.addr);
2622 			netif_addr_unlock_bh(netdev);
2623 		}
2624 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2625 		iavf_add_filter(adapter, adapter->hw.mac.addr);
2626 
2627 		if (VLAN_ALLOWED(adapter)) {
2628 			if (!list_empty(&adapter->vlan_filter_list)) {
2629 				struct iavf_vlan_filter *vlf;
2630 
2631 				/* re-add all VLAN filters over virtchnl */
2632 				list_for_each_entry(vlf,
2633 						    &adapter->vlan_filter_list,
2634 						    list)
2635 					vlf->state = IAVF_VLAN_ADD;
2636 
2637 				adapter->aq_required |=
2638 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2639 			}
2640 		}
2641 
2642 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2643 
2644 		iavf_activate_fdir_filters(adapter);
2645 
2646 		iavf_parse_vf_resource_msg(adapter);
2647 
2648 		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2649 		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2650 		 * configuration
2651 		 */
2652 		if (VLAN_V2_ALLOWED(adapter))
2653 			break;
2654 		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2655 		 * wasn't successfully negotiated with the PF
2656 		 */
2657 		}
2658 		fallthrough;
2659 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2660 		struct iavf_mac_filter *f;
2661 		bool was_mac_changed;
2662 		u64 aq_required = 0;
2663 
2664 		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2665 			memcpy(&adapter->vlan_v2_caps, msg,
2666 			       min_t(u16, msglen,
2667 				     sizeof(adapter->vlan_v2_caps)));
2668 
2669 		iavf_process_config(adapter);
2670 		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
2671 		iavf_schedule_finish_config(adapter);
2672 
2673 		iavf_set_queue_vlan_tag_loc(adapter);
2674 
2675 		was_mac_changed = !ether_addr_equal(netdev->dev_addr,
2676 						    adapter->hw.mac.addr);
2677 
2678 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2679 
2680 		/* re-add all MAC filters */
2681 		list_for_each_entry(f, &adapter->mac_filter_list, list) {
2682 			if (was_mac_changed &&
2683 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2684 				ether_addr_copy(f->macaddr,
2685 						adapter->hw.mac.addr);
2686 
2687 			f->is_new_mac = true;
2688 			f->add = true;
2689 			f->add_handled = false;
2690 			f->remove = false;
2691 		}
2692 
2693 		/* re-add all VLAN filters */
2694 		if (VLAN_FILTERING_ALLOWED(adapter)) {
2695 			struct iavf_vlan_filter *vlf;
2696 
2697 			if (!list_empty(&adapter->vlan_filter_list)) {
2698 				list_for_each_entry(vlf,
2699 						    &adapter->vlan_filter_list,
2700 						    list)
2701 					vlf->state = IAVF_VLAN_ADD;
2702 
2703 				aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2704 			}
2705 		}
2706 
2707 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2708 
2709 		netif_addr_lock_bh(netdev);
2710 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2711 		netif_addr_unlock_bh(netdev);
2712 
2713 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
2714 			aq_required;
2715 		}
2716 		break;
2717 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
2718 		if (msglen != sizeof(u64))
2719 			return;
2720 
2721 		adapter->supp_rxdids = *(u64 *)msg;
2722 
2723 		break;
2724 	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
2725 		if (msglen != sizeof(adapter->ptp.hw_caps))
2726 			return;
2727 
2728 		adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
2729 
2730 		/* process any state change needed due to new capabilities */
2731 		iavf_ptp_process_caps(adapter);
2732 		break;
2733 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
2734 		iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
2735 		break;
2736 	case VIRTCHNL_OP_ENABLE_QUEUES:
2737 		/* enable transmits */
2738 		iavf_irq_enable(adapter, true);
2739 		wake_up(&adapter->reset_waitqueue);
2740 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2741 		break;
2742 	case VIRTCHNL_OP_DISABLE_QUEUES:
2743 		iavf_free_all_tx_resources(adapter);
2744 		iavf_free_all_rx_resources(adapter);
2745 		if (adapter->state == __IAVF_DOWN_PENDING) {
2746 			iavf_change_state(adapter, __IAVF_DOWN);
2747 			wake_up(&adapter->down_waitqueue);
2748 		}
2749 		break;
2750 	case VIRTCHNL_OP_VERSION:
2751 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2752 		/* Don't display an error if we get these out of sequence.
2753 		 * If the firmware needed to get kicked, we'll get these and
2754 		 * it's no problem.
2755 		 */
2756 		if (v_opcode != adapter->current_op)
2757 			return;
2758 		break;
2759 	case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: {
2760 		struct virtchnl_rss_hashcfg *vrh =
2761 			(struct virtchnl_rss_hashcfg *)msg;
2762 
2763 		if (msglen == sizeof(*vrh))
2764 			adapter->rss_hashcfg = vrh->hashcfg;
2765 		else
2766 			dev_warn(&adapter->pdev->dev,
2767 				 "Invalid message %d from PF\n", v_opcode);
2768 		}
2769 		break;
2770 	case VIRTCHNL_OP_REQUEST_QUEUES: {
2771 		struct virtchnl_vf_res_request *vfres =
2772 			(struct virtchnl_vf_res_request *)msg;
2773 
2774 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
2775 			dev_info(&adapter->pdev->dev,
2776 				 "Requested %d queues, PF can support %d\n",
2777 				 adapter->num_req_queues,
2778 				 vfres->num_queue_pairs);
2779 			adapter->num_req_queues = 0;
2780 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2781 		}
2782 		}
2783 		break;
2784 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2785 		struct iavf_cloud_filter *cf;
2786 
2787 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2788 			if (cf->state == __IAVF_CF_ADD_PENDING)
2789 				cf->state = __IAVF_CF_ACTIVE;
2790 		}
2791 		}
2792 		break;
2793 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2794 		struct iavf_cloud_filter *cf, *cftmp;
2795 
2796 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2797 					 list) {
2798 			if (cf->state == __IAVF_CF_DEL_PENDING) {
2799 				cf->state = __IAVF_CF_INVALID;
2800 				list_del(&cf->list);
2801 				kfree(cf);
2802 				adapter->num_cloud_filters--;
2803 			}
2804 		}
2805 		}
2806 		break;
2807 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2808 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2809 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2810 
2811 		spin_lock_bh(&adapter->fdir_fltr_lock);
2812 		list_for_each_entry_safe(fdir, fdir_tmp,
2813 					 &adapter->fdir_list_head,
2814 					 list) {
2815 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2816 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2817 					if (!iavf_is_raw_fdir(fdir))
2818 						dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2819 							 fdir->loc);
2820 					else
2821 						dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
2822 							 TC_U32_USERHTID(fdir->cls_u32_handle));
2823 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2824 					fdir->flow_id = add_fltr->flow_id;
2825 				} else {
2826 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2827 						 add_fltr->status);
2828 					iavf_print_fdir_fltr(adapter, fdir);
2829 					list_del(&fdir->list);
2830 					iavf_dec_fdir_active_fltr(adapter, fdir);
2831 					kfree(fdir);
2832 				}
2833 			}
2834 		}
2835 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2836 		}
2837 		break;
2838 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2839 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2840 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2841 
2842 		spin_lock_bh(&adapter->fdir_fltr_lock);
2843 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2844 					 list) {
2845 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2846 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2847 				    del_fltr->status ==
2848 				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2849 					if (!iavf_is_raw_fdir(fdir))
2850 						dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2851 							 fdir->loc);
2852 					else
2853 						dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
2854 							 TC_U32_USERHTID(fdir->cls_u32_handle));
2855 					list_del(&fdir->list);
2856 					iavf_dec_fdir_active_fltr(adapter, fdir);
2857 					kfree(fdir);
2858 				} else {
2859 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2860 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2861 						 del_fltr->status);
2862 					iavf_print_fdir_fltr(adapter, fdir);
2863 				}
2864 			} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2865 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2866 				    del_fltr->status ==
2867 				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2868 					fdir->state = IAVF_FDIR_FLTR_INACTIVE;
2869 				} else {
2870 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2871 					dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
2872 						 del_fltr->status);
2873 					iavf_print_fdir_fltr(adapter, fdir);
2874 				}
2875 			}
2876 		}
2877 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2878 		}
2879 		break;
2880 	case VIRTCHNL_OP_ADD_RSS_CFG: {
2881 		struct iavf_adv_rss *rss;
2882 
2883 		spin_lock_bh(&adapter->adv_rss_lock);
2884 		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2885 			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2886 				iavf_print_adv_rss_cfg(adapter, rss,
2887 						       "Input set change for",
2888 						       "successful");
2889 				rss->state = IAVF_ADV_RSS_ACTIVE;
2890 			}
2891 		}
2892 		spin_unlock_bh(&adapter->adv_rss_lock);
2893 		}
2894 		break;
2895 	case VIRTCHNL_OP_DEL_RSS_CFG: {
2896 		struct iavf_adv_rss *rss, *rss_tmp;
2897 
2898 		spin_lock_bh(&adapter->adv_rss_lock);
2899 		list_for_each_entry_safe(rss, rss_tmp,
2900 					 &adapter->adv_rss_list_head, list) {
2901 			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2902 				list_del(&rss->list);
2903 				kfree(rss);
2904 			}
2905 		}
2906 		spin_unlock_bh(&adapter->adv_rss_lock);
2907 		}
2908 		break;
2909 	case VIRTCHNL_OP_ADD_VLAN_V2: {
2910 		struct iavf_vlan_filter *f;
2911 
2912 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2913 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2914 			if (f->state == IAVF_VLAN_IS_NEW)
2915 				f->state = IAVF_VLAN_ACTIVE;
2916 		}
2917 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2918 		}
2919 		break;
2920 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2921 		/* PF enabled vlan strip on this VF.
2922 		 * Update netdev->features if needed to be in sync with ethtool.
2923 		 */
2924 		if (!v_retval)
2925 			iavf_netdev_features_vlan_strip_set(netdev, true);
2926 		break;
2927 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2928 		/* PF disabled vlan strip on this VF.
2929 		 * Update netdev->features if needed to be in sync with ethtool.
2930 		 */
2931 		if (!v_retval)
2932 			iavf_netdev_features_vlan_strip_set(netdev, false);
2933 		break;
2934 	case VIRTCHNL_OP_GET_QOS_CAPS: {
2935 		u16 len = struct_size(adapter->qos_caps, cap,
2936 				      IAVF_MAX_QOS_TC_NUM);
2937 
2938 		memcpy(adapter->qos_caps, msg, min(msglen, len));
2939 
2940 		adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
2941 		}
2942 		break;
2943 	case VIRTCHNL_OP_CONFIG_QUANTA:
2944 		break;
2945 	case VIRTCHNL_OP_CONFIG_QUEUE_BW: {
2946 		int i;
2947 		/* shaper configuration is successful for all queues */
2948 		for (i = 0; i < adapter->num_active_queues; i++)
2949 			adapter->tx_rings[i].q_shaper_update = false;
2950 	}
2951 		break;
2952 	default:
2953 		if (adapter->current_op && (v_opcode != adapter->current_op))
2954 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2955 				 adapter->current_op, v_opcode);
2956 		break;
2957 	} /* switch v_opcode */
2958 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2959 }
2960