xref: /linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (revision c7546e2c3cb739a3c1a2f5acaf9bb629d401afe5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/net/intel/libie/rx.h>
5 
6 #include "iavf.h"
7 #include "iavf_prototype.h"
8 
9 /**
10  * iavf_send_pf_msg
11  * @adapter: adapter structure
12  * @op: virtual channel opcode
13  * @msg: pointer to message buffer
14  * @len: message length
15  *
16  * Send message to PF and print status if failure.
17  **/
18 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
19 			    enum virtchnl_ops op, u8 *msg, u16 len)
20 {
21 	struct iavf_hw *hw = &adapter->hw;
22 	enum iavf_status status;
23 
24 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
25 		return 0; /* nothing to see here, move along */
26 
27 	status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
28 	if (status)
29 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
30 			op, iavf_stat_str(hw, status),
31 			iavf_aq_str(hw, hw->aq.asq_last_status));
32 	return iavf_status_to_errno(status);
33 }
34 
35 /**
36  * iavf_send_api_ver
37  * @adapter: adapter structure
38  *
39  * Send API version admin queue message to the PF. The reply is not checked
40  * in this function. Returns 0 if the message was successfully
41  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
42  **/
43 int iavf_send_api_ver(struct iavf_adapter *adapter)
44 {
45 	struct virtchnl_version_info vvi;
46 
47 	vvi.major = VIRTCHNL_VERSION_MAJOR;
48 	vvi.minor = VIRTCHNL_VERSION_MINOR;
49 
50 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
51 				sizeof(vvi));
52 }
53 
54 /**
55  * iavf_poll_virtchnl_msg
56  * @hw: HW configuration structure
57  * @event: event to populate on success
58  * @op_to_poll: requested virtchnl op to poll for
59  *
60  * Initialize poll for virtchnl msg matching the requested_op. Returns 0
61  * if a message of the correct opcode is in the queue or an error code
62  * if no message matching the op code is waiting and other failures.
63  */
64 static int
65 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
66 		       enum virtchnl_ops op_to_poll)
67 {
68 	enum virtchnl_ops received_op;
69 	enum iavf_status status;
70 	u32 v_retval;
71 
72 	while (1) {
73 		/* When the AQ is empty, iavf_clean_arq_element will return
74 		 * nonzero and this loop will terminate.
75 		 */
76 		status = iavf_clean_arq_element(hw, event, NULL);
77 		if (status != IAVF_SUCCESS)
78 			return iavf_status_to_errno(status);
79 		received_op =
80 		    (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
81 		if (op_to_poll == received_op)
82 			break;
83 	}
84 
85 	v_retval = le32_to_cpu(event->desc.cookie_low);
86 	return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
87 }
88 
89 /**
90  * iavf_verify_api_ver
91  * @adapter: adapter structure
92  *
93  * Compare API versions with the PF. Must be called after admin queue is
94  * initialized. Returns 0 if API versions match, -EIO if they do not,
95  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
96  * from the firmware are propagated.
97  **/
98 int iavf_verify_api_ver(struct iavf_adapter *adapter)
99 {
100 	struct iavf_arq_event_info event;
101 	int err;
102 
103 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
104 	event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
105 	if (!event.msg_buf)
106 		return -ENOMEM;
107 
108 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
109 	if (!err) {
110 		struct virtchnl_version_info *pf_vvi =
111 			(struct virtchnl_version_info *)event.msg_buf;
112 		adapter->pf_version = *pf_vvi;
113 
114 		if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
115 		    (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
116 		     pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
117 			err = -EIO;
118 	}
119 
120 	kfree(event.msg_buf);
121 
122 	return err;
123 }
124 
125 /**
126  * iavf_send_vf_config_msg
127  * @adapter: adapter structure
128  *
129  * Send VF configuration request admin queue message to the PF. The reply
130  * is not checked in this function. Returns 0 if the message was
131  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
132  **/
133 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
134 {
135 	u32 caps;
136 
137 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
138 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
139 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
140 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
141 	       VIRTCHNL_VF_OFFLOAD_VLAN |
142 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
143 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
144 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
145 	       VIRTCHNL_VF_OFFLOAD_TC_U32 |
146 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
147 	       VIRTCHNL_VF_OFFLOAD_CRC |
148 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
149 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
150 	       VIRTCHNL_VF_OFFLOAD_ADQ |
151 	       VIRTCHNL_VF_OFFLOAD_USO |
152 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
153 	       VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
154 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
155 
156 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
157 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
158 	if (PF_IS_V11(adapter))
159 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
160 					(u8 *)&caps, sizeof(caps));
161 	else
162 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
163 					NULL, 0);
164 }
165 
166 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
167 {
168 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
169 
170 	if (!VLAN_V2_ALLOWED(adapter))
171 		return -EOPNOTSUPP;
172 
173 	adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
174 
175 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
176 				NULL, 0);
177 }
178 
179 /**
180  * iavf_validate_num_queues
181  * @adapter: adapter structure
182  *
183  * Validate that the number of queues the PF has sent in
184  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
185  **/
186 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
187 {
188 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
189 		struct virtchnl_vsi_resource *vsi_res;
190 		int i;
191 
192 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
193 			 adapter->vf_res->num_queue_pairs,
194 			 IAVF_MAX_REQ_QUEUES);
195 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
196 			 IAVF_MAX_REQ_QUEUES);
197 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
198 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
199 			vsi_res = &adapter->vf_res->vsi_res[i];
200 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
201 		}
202 	}
203 }
204 
205 /**
206  * iavf_get_vf_config
207  * @adapter: private adapter structure
208  *
209  * Get VF configuration from PF and populate hw structure. Must be called after
210  * admin queue is initialized. Busy waits until response is received from PF,
211  * with maximum timeout. Response from PF is returned in the buffer for further
212  * processing by the caller.
213  **/
214 int iavf_get_vf_config(struct iavf_adapter *adapter)
215 {
216 	struct iavf_hw *hw = &adapter->hw;
217 	struct iavf_arq_event_info event;
218 	u16 len;
219 	int err;
220 
221 	len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
222 	event.buf_len = len;
223 	event.msg_buf = kzalloc(len, GFP_KERNEL);
224 	if (!event.msg_buf)
225 		return -ENOMEM;
226 
227 	err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
228 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
229 
230 	/* some PFs send more queues than we should have so validate that
231 	 * we aren't getting too many queues
232 	 */
233 	if (!err)
234 		iavf_validate_num_queues(adapter);
235 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
236 
237 	kfree(event.msg_buf);
238 
239 	return err;
240 }
241 
242 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
243 {
244 	struct iavf_arq_event_info event;
245 	int err;
246 	u16 len;
247 
248 	len = sizeof(struct virtchnl_vlan_caps);
249 	event.buf_len = len;
250 	event.msg_buf = kzalloc(len, GFP_KERNEL);
251 	if (!event.msg_buf)
252 		return -ENOMEM;
253 
254 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
255 				     VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
256 	if (!err)
257 		memcpy(&adapter->vlan_v2_caps, event.msg_buf,
258 		       min(event.msg_len, len));
259 
260 	kfree(event.msg_buf);
261 
262 	return err;
263 }
264 
265 /**
266  * iavf_configure_queues
267  * @adapter: adapter structure
268  *
269  * Request that the PF set up our (previously allocated) queues.
270  **/
271 void iavf_configure_queues(struct iavf_adapter *adapter)
272 {
273 	struct virtchnl_vsi_queue_config_info *vqci;
274 	int pairs = adapter->num_active_queues;
275 	struct virtchnl_queue_pair_info *vqpi;
276 	u32 i, max_frame;
277 	size_t len;
278 
279 	max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
280 	max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
281 
282 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
283 		/* bail because we already have a command pending */
284 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
285 			adapter->current_op);
286 		return;
287 	}
288 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
289 	len = virtchnl_struct_size(vqci, qpair, pairs);
290 	vqci = kzalloc(len, GFP_KERNEL);
291 	if (!vqci)
292 		return;
293 
294 	vqci->vsi_id = adapter->vsi_res->vsi_id;
295 	vqci->num_queue_pairs = pairs;
296 	vqpi = vqci->qpair;
297 	/* Size check is not needed here - HW max is 16 queue pairs, and we
298 	 * can fit info for 31 of them into the AQ buffer before it overflows.
299 	 */
300 	for (i = 0; i < pairs; i++) {
301 		vqpi->txq.vsi_id = vqci->vsi_id;
302 		vqpi->txq.queue_id = i;
303 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
304 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
305 		vqpi->rxq.vsi_id = vqci->vsi_id;
306 		vqpi->rxq.queue_id = i;
307 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
308 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
309 		vqpi->rxq.max_pkt_size = max_frame;
310 		vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
311 		if (CRC_OFFLOAD_ALLOWED(adapter))
312 			vqpi->rxq.crc_disable = !!(adapter->netdev->features &
313 						   NETIF_F_RXFCS);
314 		vqpi++;
315 	}
316 
317 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
318 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
319 			 (u8 *)vqci, len);
320 	kfree(vqci);
321 }
322 
323 /**
324  * iavf_enable_queues
325  * @adapter: adapter structure
326  *
327  * Request that the PF enable all of our queues.
328  **/
329 void iavf_enable_queues(struct iavf_adapter *adapter)
330 {
331 	struct virtchnl_queue_select vqs;
332 
333 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
334 		/* bail because we already have a command pending */
335 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
336 			adapter->current_op);
337 		return;
338 	}
339 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
340 	vqs.vsi_id = adapter->vsi_res->vsi_id;
341 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
342 	vqs.rx_queues = vqs.tx_queues;
343 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
344 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
345 			 (u8 *)&vqs, sizeof(vqs));
346 }
347 
348 /**
349  * iavf_disable_queues
350  * @adapter: adapter structure
351  *
352  * Request that the PF disable all of our queues.
353  **/
354 void iavf_disable_queues(struct iavf_adapter *adapter)
355 {
356 	struct virtchnl_queue_select vqs;
357 
358 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
359 		/* bail because we already have a command pending */
360 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
361 			adapter->current_op);
362 		return;
363 	}
364 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
365 	vqs.vsi_id = adapter->vsi_res->vsi_id;
366 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
367 	vqs.rx_queues = vqs.tx_queues;
368 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
369 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
370 			 (u8 *)&vqs, sizeof(vqs));
371 }
372 
373 /**
374  * iavf_map_queues
375  * @adapter: adapter structure
376  *
377  * Request that the PF map queues to interrupt vectors. Misc causes, including
378  * admin queue, are always mapped to vector 0.
379  **/
380 void iavf_map_queues(struct iavf_adapter *adapter)
381 {
382 	struct virtchnl_irq_map_info *vimi;
383 	struct virtchnl_vector_map *vecmap;
384 	struct iavf_q_vector *q_vector;
385 	int v_idx, q_vectors;
386 	size_t len;
387 
388 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
389 		/* bail because we already have a command pending */
390 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
391 			adapter->current_op);
392 		return;
393 	}
394 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
395 
396 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
397 
398 	len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
399 	vimi = kzalloc(len, GFP_KERNEL);
400 	if (!vimi)
401 		return;
402 
403 	vimi->num_vectors = adapter->num_msix_vectors;
404 	/* Queue vectors first */
405 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
406 		q_vector = &adapter->q_vectors[v_idx];
407 		vecmap = &vimi->vecmap[v_idx];
408 
409 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
410 		vecmap->vector_id = v_idx + NONQ_VECS;
411 		vecmap->txq_map = q_vector->ring_mask;
412 		vecmap->rxq_map = q_vector->ring_mask;
413 		vecmap->rxitr_idx = IAVF_RX_ITR;
414 		vecmap->txitr_idx = IAVF_TX_ITR;
415 	}
416 	/* Misc vector last - this is only for AdminQ messages */
417 	vecmap = &vimi->vecmap[v_idx];
418 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
419 	vecmap->vector_id = 0;
420 	vecmap->txq_map = 0;
421 	vecmap->rxq_map = 0;
422 
423 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
424 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
425 			 (u8 *)vimi, len);
426 	kfree(vimi);
427 }
428 
429 /**
430  * iavf_set_mac_addr_type - Set the correct request type from the filter type
431  * @virtchnl_ether_addr: pointer to requested list element
432  * @filter: pointer to requested filter
433  **/
434 static void
435 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
436 		       const struct iavf_mac_filter *filter)
437 {
438 	virtchnl_ether_addr->type = filter->is_primary ?
439 		VIRTCHNL_ETHER_ADDR_PRIMARY :
440 		VIRTCHNL_ETHER_ADDR_EXTRA;
441 }
442 
443 /**
444  * iavf_add_ether_addrs
445  * @adapter: adapter structure
446  *
447  * Request that the PF add one or more addresses to our filters.
448  **/
449 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
450 {
451 	struct virtchnl_ether_addr_list *veal;
452 	struct iavf_mac_filter *f;
453 	int i = 0, count = 0;
454 	bool more = false;
455 	size_t len;
456 
457 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
458 		/* bail because we already have a command pending */
459 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
460 			adapter->current_op);
461 		return;
462 	}
463 
464 	spin_lock_bh(&adapter->mac_vlan_list_lock);
465 
466 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
467 		if (f->add)
468 			count++;
469 	}
470 	if (!count) {
471 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
472 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
473 		return;
474 	}
475 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
476 
477 	len = virtchnl_struct_size(veal, list, count);
478 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
479 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
480 		while (len > IAVF_MAX_AQ_BUF_SIZE)
481 			len = virtchnl_struct_size(veal, list, --count);
482 		more = true;
483 	}
484 
485 	veal = kzalloc(len, GFP_ATOMIC);
486 	if (!veal) {
487 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
488 		return;
489 	}
490 
491 	veal->vsi_id = adapter->vsi_res->vsi_id;
492 	veal->num_elements = count;
493 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
494 		if (f->add) {
495 			ether_addr_copy(veal->list[i].addr, f->macaddr);
496 			iavf_set_mac_addr_type(&veal->list[i], f);
497 			i++;
498 			f->add = false;
499 			if (i == count)
500 				break;
501 		}
502 	}
503 	if (!more)
504 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
505 
506 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
507 
508 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
509 	kfree(veal);
510 }
511 
512 /**
513  * iavf_del_ether_addrs
514  * @adapter: adapter structure
515  *
516  * Request that the PF remove one or more addresses from our filters.
517  **/
518 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
519 {
520 	struct virtchnl_ether_addr_list *veal;
521 	struct iavf_mac_filter *f, *ftmp;
522 	int i = 0, count = 0;
523 	bool more = false;
524 	size_t len;
525 
526 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
527 		/* bail because we already have a command pending */
528 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
529 			adapter->current_op);
530 		return;
531 	}
532 
533 	spin_lock_bh(&adapter->mac_vlan_list_lock);
534 
535 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
536 		if (f->remove)
537 			count++;
538 	}
539 	if (!count) {
540 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
541 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
542 		return;
543 	}
544 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
545 
546 	len = virtchnl_struct_size(veal, list, count);
547 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
548 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
549 		while (len > IAVF_MAX_AQ_BUF_SIZE)
550 			len = virtchnl_struct_size(veal, list, --count);
551 		more = true;
552 	}
553 	veal = kzalloc(len, GFP_ATOMIC);
554 	if (!veal) {
555 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
556 		return;
557 	}
558 
559 	veal->vsi_id = adapter->vsi_res->vsi_id;
560 	veal->num_elements = count;
561 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
562 		if (f->remove) {
563 			ether_addr_copy(veal->list[i].addr, f->macaddr);
564 			iavf_set_mac_addr_type(&veal->list[i], f);
565 			i++;
566 			list_del(&f->list);
567 			kfree(f);
568 			if (i == count)
569 				break;
570 		}
571 	}
572 	if (!more)
573 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
574 
575 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
576 
577 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
578 	kfree(veal);
579 }
580 
581 /**
582  * iavf_mac_add_ok
583  * @adapter: adapter structure
584  *
585  * Submit list of filters based on PF response.
586  **/
587 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
588 {
589 	struct iavf_mac_filter *f, *ftmp;
590 
591 	spin_lock_bh(&adapter->mac_vlan_list_lock);
592 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
593 		f->is_new_mac = false;
594 		if (!f->add && !f->add_handled)
595 			f->add_handled = true;
596 	}
597 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
598 }
599 
600 /**
601  * iavf_mac_add_reject
602  * @adapter: adapter structure
603  *
604  * Remove filters from list based on PF response.
605  **/
606 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
607 {
608 	struct net_device *netdev = adapter->netdev;
609 	struct iavf_mac_filter *f, *ftmp;
610 
611 	spin_lock_bh(&adapter->mac_vlan_list_lock);
612 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
613 		if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
614 			f->remove = false;
615 
616 		if (!f->add && !f->add_handled)
617 			f->add_handled = true;
618 
619 		if (f->is_new_mac) {
620 			list_del(&f->list);
621 			kfree(f);
622 		}
623 	}
624 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
625 }
626 
627 /**
628  * iavf_vlan_add_reject
629  * @adapter: adapter structure
630  *
631  * Remove VLAN filters from list based on PF response.
632  **/
633 static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
634 {
635 	struct iavf_vlan_filter *f, *ftmp;
636 
637 	spin_lock_bh(&adapter->mac_vlan_list_lock);
638 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
639 		if (f->state == IAVF_VLAN_IS_NEW) {
640 			list_del(&f->list);
641 			kfree(f);
642 			adapter->num_vlan_filters--;
643 		}
644 	}
645 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
646 }
647 
648 /**
649  * iavf_add_vlans
650  * @adapter: adapter structure
651  *
652  * Request that the PF add one or more VLAN filters to our VSI.
653  **/
654 void iavf_add_vlans(struct iavf_adapter *adapter)
655 {
656 	int len, i = 0, count = 0;
657 	struct iavf_vlan_filter *f;
658 	bool more = false;
659 
660 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
661 		/* bail because we already have a command pending */
662 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
663 			adapter->current_op);
664 		return;
665 	}
666 
667 	spin_lock_bh(&adapter->mac_vlan_list_lock);
668 
669 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
670 		if (f->state == IAVF_VLAN_ADD)
671 			count++;
672 	}
673 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
674 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
675 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
676 		return;
677 	}
678 
679 	if (VLAN_ALLOWED(adapter)) {
680 		struct virtchnl_vlan_filter_list *vvfl;
681 
682 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
683 
684 		len = virtchnl_struct_size(vvfl, vlan_id, count);
685 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
686 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
687 			while (len > IAVF_MAX_AQ_BUF_SIZE)
688 				len = virtchnl_struct_size(vvfl, vlan_id,
689 							   --count);
690 			more = true;
691 		}
692 		vvfl = kzalloc(len, GFP_ATOMIC);
693 		if (!vvfl) {
694 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
695 			return;
696 		}
697 
698 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
699 		vvfl->num_elements = count;
700 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
701 			if (f->state == IAVF_VLAN_ADD) {
702 				vvfl->vlan_id[i] = f->vlan.vid;
703 				i++;
704 				f->state = IAVF_VLAN_IS_NEW;
705 				if (i == count)
706 					break;
707 			}
708 		}
709 		if (!more)
710 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
711 
712 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
713 
714 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
715 		kfree(vvfl);
716 	} else {
717 		u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
718 		u16 current_vlans = iavf_get_num_vlans_added(adapter);
719 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
720 
721 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
722 
723 		if ((count + current_vlans) > max_vlans &&
724 		    current_vlans < max_vlans) {
725 			count = max_vlans - iavf_get_num_vlans_added(adapter);
726 			more = true;
727 		}
728 
729 		len = virtchnl_struct_size(vvfl_v2, filters, count);
730 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
731 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
732 			while (len > IAVF_MAX_AQ_BUF_SIZE)
733 				len = virtchnl_struct_size(vvfl_v2, filters,
734 							   --count);
735 			more = true;
736 		}
737 
738 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
739 		if (!vvfl_v2) {
740 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
741 			return;
742 		}
743 
744 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
745 		vvfl_v2->num_elements = count;
746 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
747 			if (f->state == IAVF_VLAN_ADD) {
748 				struct virtchnl_vlan_supported_caps *filtering_support =
749 					&adapter->vlan_v2_caps.filtering.filtering_support;
750 				struct virtchnl_vlan *vlan;
751 
752 				if (i == count)
753 					break;
754 
755 				/* give priority over outer if it's enabled */
756 				if (filtering_support->outer)
757 					vlan = &vvfl_v2->filters[i].outer;
758 				else
759 					vlan = &vvfl_v2->filters[i].inner;
760 
761 				vlan->tci = f->vlan.vid;
762 				vlan->tpid = f->vlan.tpid;
763 
764 				i++;
765 				f->state = IAVF_VLAN_IS_NEW;
766 			}
767 		}
768 
769 		if (!more)
770 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
771 
772 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
773 
774 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
775 				 (u8 *)vvfl_v2, len);
776 		kfree(vvfl_v2);
777 	}
778 }
779 
780 /**
781  * iavf_del_vlans
782  * @adapter: adapter structure
783  *
784  * Request that the PF remove one or more VLAN filters from our VSI.
785  **/
786 void iavf_del_vlans(struct iavf_adapter *adapter)
787 {
788 	struct iavf_vlan_filter *f, *ftmp;
789 	int len, i = 0, count = 0;
790 	bool more = false;
791 
792 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
793 		/* bail because we already have a command pending */
794 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
795 			adapter->current_op);
796 		return;
797 	}
798 
799 	spin_lock_bh(&adapter->mac_vlan_list_lock);
800 
801 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
802 		/* since VLAN capabilities are not allowed, we dont want to send
803 		 * a VLAN delete request because it will most likely fail and
804 		 * create unnecessary errors/noise, so just free the VLAN
805 		 * filters marked for removal to enable bailing out before
806 		 * sending a virtchnl message
807 		 */
808 		if (f->state == IAVF_VLAN_REMOVE &&
809 		    !VLAN_FILTERING_ALLOWED(adapter)) {
810 			list_del(&f->list);
811 			kfree(f);
812 			adapter->num_vlan_filters--;
813 		} else if (f->state == IAVF_VLAN_DISABLE &&
814 		    !VLAN_FILTERING_ALLOWED(adapter)) {
815 			f->state = IAVF_VLAN_INACTIVE;
816 		} else if (f->state == IAVF_VLAN_REMOVE ||
817 			   f->state == IAVF_VLAN_DISABLE) {
818 			count++;
819 		}
820 	}
821 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
822 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
823 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
824 		return;
825 	}
826 
827 	if (VLAN_ALLOWED(adapter)) {
828 		struct virtchnl_vlan_filter_list *vvfl;
829 
830 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
831 
832 		len = virtchnl_struct_size(vvfl, vlan_id, count);
833 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
834 			dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
835 			while (len > IAVF_MAX_AQ_BUF_SIZE)
836 				len = virtchnl_struct_size(vvfl, vlan_id,
837 							   --count);
838 			more = true;
839 		}
840 		vvfl = kzalloc(len, GFP_ATOMIC);
841 		if (!vvfl) {
842 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
843 			return;
844 		}
845 
846 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
847 		vvfl->num_elements = count;
848 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
849 			if (f->state == IAVF_VLAN_DISABLE) {
850 				vvfl->vlan_id[i] = f->vlan.vid;
851 				f->state = IAVF_VLAN_INACTIVE;
852 				i++;
853 				if (i == count)
854 					break;
855 			} else if (f->state == IAVF_VLAN_REMOVE) {
856 				vvfl->vlan_id[i] = f->vlan.vid;
857 				list_del(&f->list);
858 				kfree(f);
859 				adapter->num_vlan_filters--;
860 				i++;
861 				if (i == count)
862 					break;
863 			}
864 		}
865 
866 		if (!more)
867 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
868 
869 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
870 
871 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
872 		kfree(vvfl);
873 	} else {
874 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
875 
876 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
877 
878 		len = virtchnl_struct_size(vvfl_v2, filters, count);
879 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
880 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
881 			while (len > IAVF_MAX_AQ_BUF_SIZE)
882 				len = virtchnl_struct_size(vvfl_v2, filters,
883 							   --count);
884 			more = true;
885 		}
886 
887 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
888 		if (!vvfl_v2) {
889 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
890 			return;
891 		}
892 
893 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
894 		vvfl_v2->num_elements = count;
895 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
896 			if (f->state == IAVF_VLAN_DISABLE ||
897 			    f->state == IAVF_VLAN_REMOVE) {
898 				struct virtchnl_vlan_supported_caps *filtering_support =
899 					&adapter->vlan_v2_caps.filtering.filtering_support;
900 				struct virtchnl_vlan *vlan;
901 
902 				/* give priority over outer if it's enabled */
903 				if (filtering_support->outer)
904 					vlan = &vvfl_v2->filters[i].outer;
905 				else
906 					vlan = &vvfl_v2->filters[i].inner;
907 
908 				vlan->tci = f->vlan.vid;
909 				vlan->tpid = f->vlan.tpid;
910 
911 				if (f->state == IAVF_VLAN_DISABLE) {
912 					f->state = IAVF_VLAN_INACTIVE;
913 				} else {
914 					list_del(&f->list);
915 					kfree(f);
916 					adapter->num_vlan_filters--;
917 				}
918 				i++;
919 				if (i == count)
920 					break;
921 			}
922 		}
923 
924 		if (!more)
925 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
926 
927 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
928 
929 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
930 				 (u8 *)vvfl_v2, len);
931 		kfree(vvfl_v2);
932 	}
933 }
934 
935 /**
936  * iavf_set_promiscuous
937  * @adapter: adapter structure
938  *
939  * Request that the PF enable promiscuous mode for our VSI.
940  **/
941 void iavf_set_promiscuous(struct iavf_adapter *adapter)
942 {
943 	struct net_device *netdev = adapter->netdev;
944 	struct virtchnl_promisc_info vpi;
945 	unsigned int flags;
946 
947 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
948 		/* bail because we already have a command pending */
949 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
950 			adapter->current_op);
951 		return;
952 	}
953 
954 	/* prevent changes to promiscuous flags */
955 	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
956 
957 	/* sanity check to prevent duplicate AQ calls */
958 	if (!iavf_promiscuous_mode_changed(adapter)) {
959 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
960 		dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
961 		/* allow changes to promiscuous flags */
962 		spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
963 		return;
964 	}
965 
966 	/* there are 2 bits, but only 3 states */
967 	if (!(netdev->flags & IFF_PROMISC) &&
968 	    netdev->flags & IFF_ALLMULTI) {
969 		/* State 1  - only multicast promiscuous mode enabled
970 		 * - !IFF_PROMISC && IFF_ALLMULTI
971 		 */
972 		flags = FLAG_VF_MULTICAST_PROMISC;
973 		adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
974 		adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
975 		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
976 	} else if (!(netdev->flags & IFF_PROMISC) &&
977 		   !(netdev->flags & IFF_ALLMULTI)) {
978 		/* State 2 - unicast/multicast promiscuous mode disabled
979 		 * - !IFF_PROMISC && !IFF_ALLMULTI
980 		 */
981 		flags = 0;
982 		adapter->current_netdev_promisc_flags &=
983 			~(IFF_PROMISC | IFF_ALLMULTI);
984 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
985 	} else {
986 		/* State 3 - unicast/multicast promiscuous mode enabled
987 		 * - IFF_PROMISC && IFF_ALLMULTI
988 		 * - IFF_PROMISC && !IFF_ALLMULTI
989 		 */
990 		flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
991 		adapter->current_netdev_promisc_flags |= IFF_PROMISC;
992 		if (netdev->flags & IFF_ALLMULTI)
993 			adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
994 		else
995 			adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
996 
997 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
998 	}
999 
1000 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1001 
1002 	/* allow changes to promiscuous flags */
1003 	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1004 
1005 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1006 	vpi.vsi_id = adapter->vsi_res->vsi_id;
1007 	vpi.flags = flags;
1008 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1009 			 (u8 *)&vpi, sizeof(vpi));
1010 }
1011 
1012 /**
1013  * iavf_request_stats
1014  * @adapter: adapter structure
1015  *
1016  * Request VSI statistics from PF.
1017  **/
1018 void iavf_request_stats(struct iavf_adapter *adapter)
1019 {
1020 	struct virtchnl_queue_select vqs;
1021 
1022 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1023 		/* no error message, this isn't crucial */
1024 		return;
1025 	}
1026 
1027 	adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
1028 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
1029 	vqs.vsi_id = adapter->vsi_res->vsi_id;
1030 	/* queue maps are ignored for this message - only the vsi is used */
1031 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
1032 			     sizeof(vqs)))
1033 		/* if the request failed, don't lock out others */
1034 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1035 }
1036 
1037 /**
1038  * iavf_get_hena
1039  * @adapter: adapter structure
1040  *
1041  * Request hash enable capabilities from PF
1042  **/
1043 void iavf_get_hena(struct iavf_adapter *adapter)
1044 {
1045 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1046 		/* bail because we already have a command pending */
1047 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
1048 			adapter->current_op);
1049 		return;
1050 	}
1051 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
1052 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
1053 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
1054 }
1055 
1056 /**
1057  * iavf_set_hena
1058  * @adapter: adapter structure
1059  *
1060  * Request the PF to set our RSS hash capabilities
1061  **/
1062 void iavf_set_hena(struct iavf_adapter *adapter)
1063 {
1064 	struct virtchnl_rss_hena vrh;
1065 
1066 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1067 		/* bail because we already have a command pending */
1068 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1069 			adapter->current_op);
1070 		return;
1071 	}
1072 	vrh.hena = adapter->hena;
1073 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
1074 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
1075 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
1076 			 sizeof(vrh));
1077 }
1078 
1079 /**
1080  * iavf_set_rss_key
1081  * @adapter: adapter structure
1082  *
1083  * Request the PF to set our RSS hash key
1084  **/
1085 void iavf_set_rss_key(struct iavf_adapter *adapter)
1086 {
1087 	struct virtchnl_rss_key *vrk;
1088 	int len;
1089 
1090 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1091 		/* bail because we already have a command pending */
1092 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1093 			adapter->current_op);
1094 		return;
1095 	}
1096 	len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
1097 	vrk = kzalloc(len, GFP_KERNEL);
1098 	if (!vrk)
1099 		return;
1100 	vrk->vsi_id = adapter->vsi.id;
1101 	vrk->key_len = adapter->rss_key_size;
1102 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1103 
1104 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1105 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1106 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
1107 	kfree(vrk);
1108 }
1109 
1110 /**
1111  * iavf_set_rss_lut
1112  * @adapter: adapter structure
1113  *
1114  * Request the PF to set our RSS lookup table
1115  **/
1116 void iavf_set_rss_lut(struct iavf_adapter *adapter)
1117 {
1118 	struct virtchnl_rss_lut *vrl;
1119 	int len;
1120 
1121 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1122 		/* bail because we already have a command pending */
1123 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1124 			adapter->current_op);
1125 		return;
1126 	}
1127 	len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
1128 	vrl = kzalloc(len, GFP_KERNEL);
1129 	if (!vrl)
1130 		return;
1131 	vrl->vsi_id = adapter->vsi.id;
1132 	vrl->lut_entries = adapter->rss_lut_size;
1133 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1134 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1135 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1136 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
1137 	kfree(vrl);
1138 }
1139 
1140 /**
1141  * iavf_set_rss_hfunc
1142  * @adapter: adapter structure
1143  *
1144  * Request the PF to set our RSS Hash function
1145  **/
1146 void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
1147 {
1148 	struct virtchnl_rss_hfunc *vrh;
1149 	int len = sizeof(*vrh);
1150 
1151 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1152 		/* bail because we already have a command pending */
1153 		dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
1154 			adapter->current_op);
1155 		return;
1156 	}
1157 	vrh = kzalloc(len, GFP_KERNEL);
1158 	if (!vrh)
1159 		return;
1160 	vrh->vsi_id = adapter->vsi.id;
1161 	vrh->rss_algorithm = adapter->hfunc;
1162 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
1163 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
1164 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
1165 	kfree(vrh);
1166 }
1167 
1168 /**
1169  * iavf_enable_vlan_stripping
1170  * @adapter: adapter structure
1171  *
1172  * Request VLAN header stripping to be enabled
1173  **/
1174 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1175 {
1176 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1177 		/* bail because we already have a command pending */
1178 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1179 			adapter->current_op);
1180 		return;
1181 	}
1182 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1183 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1184 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
1185 }
1186 
1187 /**
1188  * iavf_disable_vlan_stripping
1189  * @adapter: adapter structure
1190  *
1191  * Request VLAN header stripping to be disabled
1192  **/
1193 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1194 {
1195 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1196 		/* bail because we already have a command pending */
1197 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1198 			adapter->current_op);
1199 		return;
1200 	}
1201 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1202 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1203 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
1204 }
1205 
1206 /**
1207  * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1208  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1209  */
1210 static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1211 {
1212 	switch (tpid) {
1213 	case ETH_P_8021Q:
1214 		return VIRTCHNL_VLAN_ETHERTYPE_8100;
1215 	case ETH_P_8021AD:
1216 		return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 /**
1223  * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1224  * @adapter: adapter structure
1225  * @msg: message structure used for updating offloads over virtchnl to update
1226  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1227  * @offload_op: opcode used to determine which support structure to check
1228  */
1229 static int
1230 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1231 			      struct virtchnl_vlan_setting *msg, u16 tpid,
1232 			      enum virtchnl_ops offload_op)
1233 {
1234 	struct virtchnl_vlan_supported_caps *offload_support;
1235 	u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1236 
1237 	/* reference the correct offload support structure */
1238 	switch (offload_op) {
1239 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1240 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1241 		offload_support =
1242 			&adapter->vlan_v2_caps.offloads.stripping_support;
1243 		break;
1244 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1245 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1246 		offload_support =
1247 			&adapter->vlan_v2_caps.offloads.insertion_support;
1248 		break;
1249 	default:
1250 		dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1251 			offload_op);
1252 		return -EINVAL;
1253 	}
1254 
1255 	/* make sure ethertype is supported */
1256 	if (offload_support->outer & vc_ethertype &&
1257 	    offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1258 		msg->outer_ethertype_setting = vc_ethertype;
1259 	} else if (offload_support->inner & vc_ethertype &&
1260 		   offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1261 		msg->inner_ethertype_setting = vc_ethertype;
1262 	} else {
1263 		dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1264 			offload_op, tpid);
1265 		return -EINVAL;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 /**
1272  * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1273  * @adapter: adapter structure
1274  * @tpid: VLAN TPID
1275  * @offload_op: opcode used to determine which AQ required bit to clear
1276  */
1277 static void
1278 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1279 				  enum virtchnl_ops offload_op)
1280 {
1281 	switch (offload_op) {
1282 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1283 		if (tpid == ETH_P_8021Q)
1284 			adapter->aq_required &=
1285 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1286 		else if (tpid == ETH_P_8021AD)
1287 			adapter->aq_required &=
1288 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1289 		break;
1290 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1291 		if (tpid == ETH_P_8021Q)
1292 			adapter->aq_required &=
1293 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1294 		else if (tpid == ETH_P_8021AD)
1295 			adapter->aq_required &=
1296 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1297 		break;
1298 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1299 		if (tpid == ETH_P_8021Q)
1300 			adapter->aq_required &=
1301 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1302 		else if (tpid == ETH_P_8021AD)
1303 			adapter->aq_required &=
1304 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1305 		break;
1306 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1307 		if (tpid == ETH_P_8021Q)
1308 			adapter->aq_required &=
1309 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1310 		else if (tpid == ETH_P_8021AD)
1311 			adapter->aq_required &=
1312 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1313 		break;
1314 	default:
1315 		dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1316 			offload_op);
1317 	}
1318 }
1319 
1320 /**
1321  * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1322  * @adapter: adapter structure
1323  * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1324  * @offload_op: offload_op used to make the request over virtchnl
1325  */
1326 static void
1327 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1328 			  enum virtchnl_ops offload_op)
1329 {
1330 	struct virtchnl_vlan_setting *msg;
1331 	int len = sizeof(*msg);
1332 
1333 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1334 		/* bail because we already have a command pending */
1335 		dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1336 			offload_op, adapter->current_op);
1337 		return;
1338 	}
1339 
1340 	adapter->current_op = offload_op;
1341 
1342 	msg = kzalloc(len, GFP_KERNEL);
1343 	if (!msg)
1344 		return;
1345 
1346 	msg->vport_id = adapter->vsi_res->vsi_id;
1347 
1348 	/* always clear to prevent unsupported and endless requests */
1349 	iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1350 
1351 	/* only send valid offload requests */
1352 	if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1353 		iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
1354 	else
1355 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1356 
1357 	kfree(msg);
1358 }
1359 
1360 /**
1361  * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1362  * @adapter: adapter structure
1363  * @tpid: VLAN TPID used to enable VLAN stripping
1364  */
1365 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1366 {
1367 	iavf_send_vlan_offload_v2(adapter, tpid,
1368 				  VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1369 }
1370 
1371 /**
1372  * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1373  * @adapter: adapter structure
1374  * @tpid: VLAN TPID used to disable VLAN stripping
1375  */
1376 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1377 {
1378 	iavf_send_vlan_offload_v2(adapter, tpid,
1379 				  VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1380 }
1381 
1382 /**
1383  * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1384  * @adapter: adapter structure
1385  * @tpid: VLAN TPID used to enable VLAN insertion
1386  */
1387 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1388 {
1389 	iavf_send_vlan_offload_v2(adapter, tpid,
1390 				  VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1391 }
1392 
1393 /**
1394  * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1395  * @adapter: adapter structure
1396  * @tpid: VLAN TPID used to disable VLAN insertion
1397  */
1398 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1399 {
1400 	iavf_send_vlan_offload_v2(adapter, tpid,
1401 				  VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1402 }
1403 
1404 /**
1405  * iavf_print_link_message - print link up or down
1406  * @adapter: adapter structure
1407  *
1408  * Log a message telling the world of our wonderous link status
1409  */
1410 static void iavf_print_link_message(struct iavf_adapter *adapter)
1411 {
1412 	struct net_device *netdev = adapter->netdev;
1413 	int link_speed_mbps;
1414 	char *speed;
1415 
1416 	if (!adapter->link_up) {
1417 		netdev_info(netdev, "NIC Link is Down\n");
1418 		return;
1419 	}
1420 
1421 	if (ADV_LINK_SUPPORT(adapter)) {
1422 		link_speed_mbps = adapter->link_speed_mbps;
1423 		goto print_link_msg;
1424 	}
1425 
1426 	switch (adapter->link_speed) {
1427 	case VIRTCHNL_LINK_SPEED_40GB:
1428 		link_speed_mbps = SPEED_40000;
1429 		break;
1430 	case VIRTCHNL_LINK_SPEED_25GB:
1431 		link_speed_mbps = SPEED_25000;
1432 		break;
1433 	case VIRTCHNL_LINK_SPEED_20GB:
1434 		link_speed_mbps = SPEED_20000;
1435 		break;
1436 	case VIRTCHNL_LINK_SPEED_10GB:
1437 		link_speed_mbps = SPEED_10000;
1438 		break;
1439 	case VIRTCHNL_LINK_SPEED_5GB:
1440 		link_speed_mbps = SPEED_5000;
1441 		break;
1442 	case VIRTCHNL_LINK_SPEED_2_5GB:
1443 		link_speed_mbps = SPEED_2500;
1444 		break;
1445 	case VIRTCHNL_LINK_SPEED_1GB:
1446 		link_speed_mbps = SPEED_1000;
1447 		break;
1448 	case VIRTCHNL_LINK_SPEED_100MB:
1449 		link_speed_mbps = SPEED_100;
1450 		break;
1451 	default:
1452 		link_speed_mbps = SPEED_UNKNOWN;
1453 		break;
1454 	}
1455 
1456 print_link_msg:
1457 	if (link_speed_mbps > SPEED_1000) {
1458 		if (link_speed_mbps == SPEED_2500) {
1459 			speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps");
1460 		} else {
1461 			/* convert to Gbps inline */
1462 			speed = kasprintf(GFP_KERNEL, "%d Gbps",
1463 					  link_speed_mbps / 1000);
1464 		}
1465 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
1466 		speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps");
1467 	} else {
1468 		speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps);
1469 	}
1470 
1471 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1472 	kfree(speed);
1473 }
1474 
1475 /**
1476  * iavf_get_vpe_link_status
1477  * @adapter: adapter structure
1478  * @vpe: virtchnl_pf_event structure
1479  *
1480  * Helper function for determining the link status
1481  **/
1482 static bool
1483 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1484 			 struct virtchnl_pf_event *vpe)
1485 {
1486 	if (ADV_LINK_SUPPORT(adapter))
1487 		return vpe->event_data.link_event_adv.link_status;
1488 	else
1489 		return vpe->event_data.link_event.link_status;
1490 }
1491 
1492 /**
1493  * iavf_set_adapter_link_speed_from_vpe
1494  * @adapter: adapter structure for which we are setting the link speed
1495  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1496  *
1497  * Helper function for setting iavf_adapter link speed
1498  **/
1499 static void
1500 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1501 				     struct virtchnl_pf_event *vpe)
1502 {
1503 	if (ADV_LINK_SUPPORT(adapter))
1504 		adapter->link_speed_mbps =
1505 			vpe->event_data.link_event_adv.link_speed;
1506 	else
1507 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1508 }
1509 
1510 /**
1511  * iavf_enable_channels
1512  * @adapter: adapter structure
1513  *
1514  * Request that the PF enable channels as specified by
1515  * the user via tc tool.
1516  **/
1517 void iavf_enable_channels(struct iavf_adapter *adapter)
1518 {
1519 	struct virtchnl_tc_info *vti = NULL;
1520 	size_t len;
1521 	int i;
1522 
1523 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1524 		/* bail because we already have a command pending */
1525 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1526 			adapter->current_op);
1527 		return;
1528 	}
1529 
1530 	len = virtchnl_struct_size(vti, list, adapter->num_tc);
1531 	vti = kzalloc(len, GFP_KERNEL);
1532 	if (!vti)
1533 		return;
1534 	vti->num_tc = adapter->num_tc;
1535 	for (i = 0; i < vti->num_tc; i++) {
1536 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1537 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1538 		vti->list[i].pad = 0;
1539 		vti->list[i].max_tx_rate =
1540 				adapter->ch_config.ch_info[i].max_tx_rate;
1541 	}
1542 
1543 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1544 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1545 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1546 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1547 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1548 	kfree(vti);
1549 }
1550 
1551 /**
1552  * iavf_disable_channels
1553  * @adapter: adapter structure
1554  *
1555  * Request that the PF disable channels that are configured
1556  **/
1557 void iavf_disable_channels(struct iavf_adapter *adapter)
1558 {
1559 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1560 		/* bail because we already have a command pending */
1561 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1562 			adapter->current_op);
1563 		return;
1564 	}
1565 
1566 	adapter->ch_config.state = __IAVF_TC_INVALID;
1567 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1568 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1569 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1570 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1571 }
1572 
1573 /**
1574  * iavf_print_cloud_filter
1575  * @adapter: adapter structure
1576  * @f: cloud filter to print
1577  *
1578  * Print the cloud filter
1579  **/
1580 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1581 				    struct virtchnl_filter *f)
1582 {
1583 	switch (f->flow_type) {
1584 	case VIRTCHNL_TCP_V4_FLOW:
1585 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1586 			 &f->data.tcp_spec.dst_mac,
1587 			 &f->data.tcp_spec.src_mac,
1588 			 ntohs(f->data.tcp_spec.vlan_id),
1589 			 &f->data.tcp_spec.dst_ip[0],
1590 			 &f->data.tcp_spec.src_ip[0],
1591 			 ntohs(f->data.tcp_spec.dst_port),
1592 			 ntohs(f->data.tcp_spec.src_port));
1593 		break;
1594 	case VIRTCHNL_TCP_V6_FLOW:
1595 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1596 			 &f->data.tcp_spec.dst_mac,
1597 			 &f->data.tcp_spec.src_mac,
1598 			 ntohs(f->data.tcp_spec.vlan_id),
1599 			 &f->data.tcp_spec.dst_ip,
1600 			 &f->data.tcp_spec.src_ip,
1601 			 ntohs(f->data.tcp_spec.dst_port),
1602 			 ntohs(f->data.tcp_spec.src_port));
1603 		break;
1604 	}
1605 }
1606 
1607 /**
1608  * iavf_add_cloud_filter
1609  * @adapter: adapter structure
1610  *
1611  * Request that the PF add cloud filters as specified
1612  * by the user via tc tool.
1613  **/
1614 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1615 {
1616 	struct iavf_cloud_filter *cf;
1617 	struct virtchnl_filter *f;
1618 	int len = 0, count = 0;
1619 
1620 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1621 		/* bail because we already have a command pending */
1622 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1623 			adapter->current_op);
1624 		return;
1625 	}
1626 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1627 		if (cf->add) {
1628 			count++;
1629 			break;
1630 		}
1631 	}
1632 	if (!count) {
1633 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1634 		return;
1635 	}
1636 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1637 
1638 	len = sizeof(struct virtchnl_filter);
1639 	f = kzalloc(len, GFP_KERNEL);
1640 	if (!f)
1641 		return;
1642 
1643 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1644 		if (cf->add) {
1645 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1646 			cf->add = false;
1647 			cf->state = __IAVF_CF_ADD_PENDING;
1648 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1649 					 (u8 *)f, len);
1650 		}
1651 	}
1652 	kfree(f);
1653 }
1654 
1655 /**
1656  * iavf_del_cloud_filter
1657  * @adapter: adapter structure
1658  *
1659  * Request that the PF delete cloud filters as specified
1660  * by the user via tc tool.
1661  **/
1662 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1663 {
1664 	struct iavf_cloud_filter *cf, *cftmp;
1665 	struct virtchnl_filter *f;
1666 	int len = 0, count = 0;
1667 
1668 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1669 		/* bail because we already have a command pending */
1670 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1671 			adapter->current_op);
1672 		return;
1673 	}
1674 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1675 		if (cf->del) {
1676 			count++;
1677 			break;
1678 		}
1679 	}
1680 	if (!count) {
1681 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1682 		return;
1683 	}
1684 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1685 
1686 	len = sizeof(struct virtchnl_filter);
1687 	f = kzalloc(len, GFP_KERNEL);
1688 	if (!f)
1689 		return;
1690 
1691 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1692 		if (cf->del) {
1693 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1694 			cf->del = false;
1695 			cf->state = __IAVF_CF_DEL_PENDING;
1696 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1697 					 (u8 *)f, len);
1698 		}
1699 	}
1700 	kfree(f);
1701 }
1702 
1703 /**
1704  * iavf_add_fdir_filter
1705  * @adapter: the VF adapter structure
1706  *
1707  * Request that the PF add Flow Director filters as specified
1708  * by the user via ethtool.
1709  **/
1710 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1711 {
1712 	struct iavf_fdir_fltr *fdir;
1713 	struct virtchnl_fdir_add *f;
1714 	bool process_fltr = false;
1715 	int len;
1716 
1717 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1718 		/* bail because we already have a command pending */
1719 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1720 			adapter->current_op);
1721 		return;
1722 	}
1723 
1724 	len = sizeof(struct virtchnl_fdir_add);
1725 	f = kzalloc(len, GFP_KERNEL);
1726 	if (!f)
1727 		return;
1728 
1729 	spin_lock_bh(&adapter->fdir_fltr_lock);
1730 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1731 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1732 			process_fltr = true;
1733 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1734 			memcpy(f, &fdir->vc_add_msg, len);
1735 			break;
1736 		}
1737 	}
1738 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1739 
1740 	if (!process_fltr) {
1741 		/* prevent iavf_add_fdir_filter() from being called when there
1742 		 * are no filters to add
1743 		 */
1744 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1745 		kfree(f);
1746 		return;
1747 	}
1748 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1749 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
1750 	kfree(f);
1751 }
1752 
1753 /**
1754  * iavf_del_fdir_filter
1755  * @adapter: the VF adapter structure
1756  *
1757  * Request that the PF delete Flow Director filters as specified
1758  * by the user via ethtool.
1759  **/
1760 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1761 {
1762 	struct virtchnl_fdir_del f = {};
1763 	struct iavf_fdir_fltr *fdir;
1764 	bool process_fltr = false;
1765 	int len;
1766 
1767 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1768 		/* bail because we already have a command pending */
1769 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1770 			adapter->current_op);
1771 		return;
1772 	}
1773 
1774 	len = sizeof(struct virtchnl_fdir_del);
1775 
1776 	spin_lock_bh(&adapter->fdir_fltr_lock);
1777 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1778 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1779 			process_fltr = true;
1780 			f.vsi_id = fdir->vc_add_msg.vsi_id;
1781 			f.flow_id = fdir->flow_id;
1782 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1783 			break;
1784 		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
1785 			process_fltr = true;
1786 			f.vsi_id = fdir->vc_add_msg.vsi_id;
1787 			f.flow_id = fdir->flow_id;
1788 			fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
1789 			break;
1790 		}
1791 	}
1792 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1793 
1794 	if (!process_fltr) {
1795 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1796 		return;
1797 	}
1798 
1799 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1800 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
1801 }
1802 
1803 /**
1804  * iavf_add_adv_rss_cfg
1805  * @adapter: the VF adapter structure
1806  *
1807  * Request that the PF add RSS configuration as specified
1808  * by the user via ethtool.
1809  **/
1810 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
1811 {
1812 	struct virtchnl_rss_cfg *rss_cfg;
1813 	struct iavf_adv_rss *rss;
1814 	bool process_rss = false;
1815 	int len;
1816 
1817 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1818 		/* bail because we already have a command pending */
1819 		dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
1820 			adapter->current_op);
1821 		return;
1822 	}
1823 
1824 	len = sizeof(struct virtchnl_rss_cfg);
1825 	rss_cfg = kzalloc(len, GFP_KERNEL);
1826 	if (!rss_cfg)
1827 		return;
1828 
1829 	spin_lock_bh(&adapter->adv_rss_lock);
1830 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1831 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1832 			process_rss = true;
1833 			rss->state = IAVF_ADV_RSS_ADD_PENDING;
1834 			memcpy(rss_cfg, &rss->cfg_msg, len);
1835 			iavf_print_adv_rss_cfg(adapter, rss,
1836 					       "Input set change for",
1837 					       "is pending");
1838 			break;
1839 		}
1840 	}
1841 	spin_unlock_bh(&adapter->adv_rss_lock);
1842 
1843 	if (process_rss) {
1844 		adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
1845 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
1846 				 (u8 *)rss_cfg, len);
1847 	} else {
1848 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1849 	}
1850 
1851 	kfree(rss_cfg);
1852 }
1853 
1854 /**
1855  * iavf_del_adv_rss_cfg
1856  * @adapter: the VF adapter structure
1857  *
1858  * Request that the PF delete RSS configuration as specified
1859  * by the user via ethtool.
1860  **/
1861 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
1862 {
1863 	struct virtchnl_rss_cfg *rss_cfg;
1864 	struct iavf_adv_rss *rss;
1865 	bool process_rss = false;
1866 	int len;
1867 
1868 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1869 		/* bail because we already have a command pending */
1870 		dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
1871 			adapter->current_op);
1872 		return;
1873 	}
1874 
1875 	len = sizeof(struct virtchnl_rss_cfg);
1876 	rss_cfg = kzalloc(len, GFP_KERNEL);
1877 	if (!rss_cfg)
1878 		return;
1879 
1880 	spin_lock_bh(&adapter->adv_rss_lock);
1881 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1882 		if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
1883 			process_rss = true;
1884 			rss->state = IAVF_ADV_RSS_DEL_PENDING;
1885 			memcpy(rss_cfg, &rss->cfg_msg, len);
1886 			break;
1887 		}
1888 	}
1889 	spin_unlock_bh(&adapter->adv_rss_lock);
1890 
1891 	if (process_rss) {
1892 		adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
1893 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
1894 				 (u8 *)rss_cfg, len);
1895 	} else {
1896 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1897 	}
1898 
1899 	kfree(rss_cfg);
1900 }
1901 
1902 /**
1903  * iavf_request_reset
1904  * @adapter: adapter structure
1905  *
1906  * Request that the PF reset this VF. No response is expected.
1907  **/
1908 int iavf_request_reset(struct iavf_adapter *adapter)
1909 {
1910 	int err;
1911 	/* Don't check CURRENT_OP - this is always higher priority */
1912 	err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1913 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1914 	return err;
1915 }
1916 
1917 /**
1918  * iavf_netdev_features_vlan_strip_set - update vlan strip status
1919  * @netdev: ptr to netdev being adjusted
1920  * @enable: enable or disable vlan strip
1921  *
1922  * Helper function to change vlan strip status in netdev->features.
1923  */
1924 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
1925 						const bool enable)
1926 {
1927 	if (enable)
1928 		netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1929 	else
1930 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1931 }
1932 
1933 /**
1934  * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
1935  * @adapter: private adapter structure
1936  *
1937  * Called after a reset to re-add all FDIR filters and delete some of them
1938  * if they were pending to be deleted.
1939  */
1940 static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
1941 {
1942 	struct iavf_fdir_fltr *f, *ftmp;
1943 	bool add_filters = false;
1944 
1945 	spin_lock_bh(&adapter->fdir_fltr_lock);
1946 	list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
1947 		if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
1948 		    f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
1949 		    f->state == IAVF_FDIR_FLTR_ACTIVE) {
1950 			/* All filters and requests have been removed in PF,
1951 			 * restore them
1952 			 */
1953 			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
1954 			add_filters = true;
1955 		} else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
1956 			   f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
1957 			/* Link down state, leave filters as inactive */
1958 			f->state = IAVF_FDIR_FLTR_INACTIVE;
1959 		} else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
1960 			   f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1961 			/* Delete filters that were pending to be deleted, the
1962 			 * list on PF is already cleared after a reset
1963 			 */
1964 			list_del(&f->list);
1965 			iavf_dec_fdir_active_fltr(adapter, f);
1966 			kfree(f);
1967 		}
1968 	}
1969 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1970 
1971 	if (add_filters)
1972 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1973 }
1974 
1975 /**
1976  * iavf_virtchnl_completion
1977  * @adapter: adapter structure
1978  * @v_opcode: opcode sent by PF
1979  * @v_retval: retval sent by PF
1980  * @msg: message sent by PF
1981  * @msglen: message length
1982  *
1983  * Asynchronous completion function for admin queue messages. Rather than busy
1984  * wait, we fire off our requests and assume that no errors will be returned.
1985  * This function handles the reply messages.
1986  **/
1987 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1988 			      enum virtchnl_ops v_opcode,
1989 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
1990 {
1991 	struct net_device *netdev = adapter->netdev;
1992 
1993 	if (v_opcode == VIRTCHNL_OP_EVENT) {
1994 		struct virtchnl_pf_event *vpe =
1995 			(struct virtchnl_pf_event *)msg;
1996 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1997 
1998 		switch (vpe->event) {
1999 		case VIRTCHNL_EVENT_LINK_CHANGE:
2000 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
2001 
2002 			/* we've already got the right link status, bail */
2003 			if (adapter->link_up == link_up)
2004 				break;
2005 
2006 			if (link_up) {
2007 				/* If we get link up message and start queues
2008 				 * before our queues are configured it will
2009 				 * trigger a TX hang. In that case, just ignore
2010 				 * the link status message,we'll get another one
2011 				 * after we enable queues and actually prepared
2012 				 * to send traffic.
2013 				 */
2014 				if (adapter->state != __IAVF_RUNNING)
2015 					break;
2016 
2017 				/* For ADq enabled VF, we reconfigure VSIs and
2018 				 * re-allocate queues. Hence wait till all
2019 				 * queues are enabled.
2020 				 */
2021 				if (adapter->flags &
2022 				    IAVF_FLAG_QUEUES_DISABLED)
2023 					break;
2024 			}
2025 
2026 			adapter->link_up = link_up;
2027 			if (link_up) {
2028 				netif_tx_start_all_queues(netdev);
2029 				netif_carrier_on(netdev);
2030 			} else {
2031 				netif_tx_stop_all_queues(netdev);
2032 				netif_carrier_off(netdev);
2033 			}
2034 			iavf_print_link_message(adapter);
2035 			break;
2036 		case VIRTCHNL_EVENT_RESET_IMPENDING:
2037 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
2038 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
2039 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
2040 				iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2041 			}
2042 			break;
2043 		default:
2044 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
2045 				vpe->event);
2046 			break;
2047 		}
2048 		return;
2049 	}
2050 	if (v_retval) {
2051 		switch (v_opcode) {
2052 		case VIRTCHNL_OP_ADD_VLAN:
2053 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2054 				iavf_stat_str(&adapter->hw, v_retval));
2055 			break;
2056 		case VIRTCHNL_OP_ADD_ETH_ADDR:
2057 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
2058 				iavf_stat_str(&adapter->hw, v_retval));
2059 			iavf_mac_add_reject(adapter);
2060 			/* restore administratively set MAC address */
2061 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2062 			wake_up(&adapter->vc_waitqueue);
2063 			break;
2064 		case VIRTCHNL_OP_DEL_VLAN:
2065 			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
2066 				iavf_stat_str(&adapter->hw, v_retval));
2067 			break;
2068 		case VIRTCHNL_OP_DEL_ETH_ADDR:
2069 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
2070 				iavf_stat_str(&adapter->hw, v_retval));
2071 			break;
2072 		case VIRTCHNL_OP_ENABLE_CHANNELS:
2073 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
2074 				iavf_stat_str(&adapter->hw, v_retval));
2075 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2076 			adapter->ch_config.state = __IAVF_TC_INVALID;
2077 			netdev_reset_tc(netdev);
2078 			netif_tx_start_all_queues(netdev);
2079 			break;
2080 		case VIRTCHNL_OP_DISABLE_CHANNELS:
2081 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
2082 				iavf_stat_str(&adapter->hw, v_retval));
2083 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2084 			adapter->ch_config.state = __IAVF_TC_RUNNING;
2085 			netif_tx_start_all_queues(netdev);
2086 			break;
2087 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2088 			struct iavf_cloud_filter *cf, *cftmp;
2089 
2090 			list_for_each_entry_safe(cf, cftmp,
2091 						 &adapter->cloud_filter_list,
2092 						 list) {
2093 				if (cf->state == __IAVF_CF_ADD_PENDING) {
2094 					cf->state = __IAVF_CF_INVALID;
2095 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
2096 						 iavf_stat_str(&adapter->hw,
2097 							       v_retval));
2098 					iavf_print_cloud_filter(adapter,
2099 								&cf->f);
2100 					list_del(&cf->list);
2101 					kfree(cf);
2102 					adapter->num_cloud_filters--;
2103 				}
2104 			}
2105 			}
2106 			break;
2107 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2108 			struct iavf_cloud_filter *cf;
2109 
2110 			list_for_each_entry(cf, &adapter->cloud_filter_list,
2111 					    list) {
2112 				if (cf->state == __IAVF_CF_DEL_PENDING) {
2113 					cf->state = __IAVF_CF_ACTIVE;
2114 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
2115 						 iavf_stat_str(&adapter->hw,
2116 							       v_retval));
2117 					iavf_print_cloud_filter(adapter,
2118 								&cf->f);
2119 				}
2120 			}
2121 			}
2122 			break;
2123 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2124 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
2125 
2126 			spin_lock_bh(&adapter->fdir_fltr_lock);
2127 			list_for_each_entry_safe(fdir, fdir_tmp,
2128 						 &adapter->fdir_list_head,
2129 						 list) {
2130 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2131 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
2132 						 iavf_stat_str(&adapter->hw,
2133 							       v_retval));
2134 					iavf_print_fdir_fltr(adapter, fdir);
2135 					if (msglen)
2136 						dev_err(&adapter->pdev->dev,
2137 							"%s\n", msg);
2138 					list_del(&fdir->list);
2139 					iavf_dec_fdir_active_fltr(adapter, fdir);
2140 					kfree(fdir);
2141 				}
2142 			}
2143 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2144 			}
2145 			break;
2146 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2147 			struct iavf_fdir_fltr *fdir;
2148 
2149 			spin_lock_bh(&adapter->fdir_fltr_lock);
2150 			list_for_each_entry(fdir, &adapter->fdir_list_head,
2151 					    list) {
2152 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
2153 				    fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2154 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2155 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2156 						 iavf_stat_str(&adapter->hw,
2157 							       v_retval));
2158 					iavf_print_fdir_fltr(adapter, fdir);
2159 				}
2160 			}
2161 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2162 			}
2163 			break;
2164 		case VIRTCHNL_OP_ADD_RSS_CFG: {
2165 			struct iavf_adv_rss *rss, *rss_tmp;
2166 
2167 			spin_lock_bh(&adapter->adv_rss_lock);
2168 			list_for_each_entry_safe(rss, rss_tmp,
2169 						 &adapter->adv_rss_list_head,
2170 						 list) {
2171 				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2172 					iavf_print_adv_rss_cfg(adapter, rss,
2173 							       "Failed to change the input set for",
2174 							       NULL);
2175 					list_del(&rss->list);
2176 					kfree(rss);
2177 				}
2178 			}
2179 			spin_unlock_bh(&adapter->adv_rss_lock);
2180 			}
2181 			break;
2182 		case VIRTCHNL_OP_DEL_RSS_CFG: {
2183 			struct iavf_adv_rss *rss;
2184 
2185 			spin_lock_bh(&adapter->adv_rss_lock);
2186 			list_for_each_entry(rss, &adapter->adv_rss_list_head,
2187 					    list) {
2188 				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2189 					rss->state = IAVF_ADV_RSS_ACTIVE;
2190 					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2191 						iavf_stat_str(&adapter->hw,
2192 							      v_retval));
2193 				}
2194 			}
2195 			spin_unlock_bh(&adapter->adv_rss_lock);
2196 			}
2197 			break;
2198 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2199 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2200 			/* Vlan stripping could not be enabled by ethtool.
2201 			 * Disable it in netdev->features.
2202 			 */
2203 			iavf_netdev_features_vlan_strip_set(netdev, false);
2204 			break;
2205 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2206 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2207 			/* Vlan stripping could not be disabled by ethtool.
2208 			 * Enable it in netdev->features.
2209 			 */
2210 			iavf_netdev_features_vlan_strip_set(netdev, true);
2211 			break;
2212 		case VIRTCHNL_OP_ADD_VLAN_V2:
2213 			iavf_vlan_add_reject(adapter);
2214 			dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2215 				 iavf_stat_str(&adapter->hw, v_retval));
2216 			break;
2217 		case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
2218 			dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
2219 				 iavf_stat_str(&adapter->hw, v_retval));
2220 
2221 			if (adapter->hfunc ==
2222 					VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
2223 				adapter->hfunc =
2224 					VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
2225 			else
2226 				adapter->hfunc =
2227 					VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
2228 
2229 			break;
2230 		default:
2231 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2232 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
2233 				v_opcode);
2234 		}
2235 	}
2236 	switch (v_opcode) {
2237 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2238 		if (!v_retval)
2239 			iavf_mac_add_ok(adapter);
2240 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
2241 			if (!ether_addr_equal(netdev->dev_addr,
2242 					      adapter->hw.mac.addr)) {
2243 				netif_addr_lock_bh(netdev);
2244 				eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2245 				netif_addr_unlock_bh(netdev);
2246 			}
2247 		wake_up(&adapter->vc_waitqueue);
2248 		break;
2249 	case VIRTCHNL_OP_GET_STATS: {
2250 		struct iavf_eth_stats *stats =
2251 			(struct iavf_eth_stats *)msg;
2252 		netdev->stats.rx_packets = stats->rx_unicast +
2253 					   stats->rx_multicast +
2254 					   stats->rx_broadcast;
2255 		netdev->stats.tx_packets = stats->tx_unicast +
2256 					   stats->tx_multicast +
2257 					   stats->tx_broadcast;
2258 		netdev->stats.rx_bytes = stats->rx_bytes;
2259 		netdev->stats.tx_bytes = stats->tx_bytes;
2260 		netdev->stats.tx_errors = stats->tx_errors;
2261 		netdev->stats.rx_dropped = stats->rx_discards;
2262 		netdev->stats.tx_dropped = stats->tx_discards;
2263 		adapter->current_stats = *stats;
2264 		}
2265 		break;
2266 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
2267 		u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
2268 
2269 		memcpy(adapter->vf_res, msg, min(msglen, len));
2270 		iavf_validate_num_queues(adapter);
2271 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
2272 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
2273 			/* restore current mac address */
2274 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2275 		} else {
2276 			netif_addr_lock_bh(netdev);
2277 			/* refresh current mac address if changed */
2278 			ether_addr_copy(netdev->perm_addr,
2279 					adapter->hw.mac.addr);
2280 			netif_addr_unlock_bh(netdev);
2281 		}
2282 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2283 		iavf_add_filter(adapter, adapter->hw.mac.addr);
2284 
2285 		if (VLAN_ALLOWED(adapter)) {
2286 			if (!list_empty(&adapter->vlan_filter_list)) {
2287 				struct iavf_vlan_filter *vlf;
2288 
2289 				/* re-add all VLAN filters over virtchnl */
2290 				list_for_each_entry(vlf,
2291 						    &adapter->vlan_filter_list,
2292 						    list)
2293 					vlf->state = IAVF_VLAN_ADD;
2294 
2295 				adapter->aq_required |=
2296 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2297 			}
2298 		}
2299 
2300 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2301 
2302 		iavf_activate_fdir_filters(adapter);
2303 
2304 		iavf_parse_vf_resource_msg(adapter);
2305 
2306 		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2307 		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2308 		 * configuration
2309 		 */
2310 		if (VLAN_V2_ALLOWED(adapter))
2311 			break;
2312 		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2313 		 * wasn't successfully negotiated with the PF
2314 		 */
2315 		}
2316 		fallthrough;
2317 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2318 		struct iavf_mac_filter *f;
2319 		bool was_mac_changed;
2320 		u64 aq_required = 0;
2321 
2322 		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2323 			memcpy(&adapter->vlan_v2_caps, msg,
2324 			       min_t(u16, msglen,
2325 				     sizeof(adapter->vlan_v2_caps)));
2326 
2327 		iavf_process_config(adapter);
2328 		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
2329 		iavf_schedule_finish_config(adapter);
2330 
2331 		iavf_set_queue_vlan_tag_loc(adapter);
2332 
2333 		was_mac_changed = !ether_addr_equal(netdev->dev_addr,
2334 						    adapter->hw.mac.addr);
2335 
2336 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2337 
2338 		/* re-add all MAC filters */
2339 		list_for_each_entry(f, &adapter->mac_filter_list, list) {
2340 			if (was_mac_changed &&
2341 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2342 				ether_addr_copy(f->macaddr,
2343 						adapter->hw.mac.addr);
2344 
2345 			f->is_new_mac = true;
2346 			f->add = true;
2347 			f->add_handled = false;
2348 			f->remove = false;
2349 		}
2350 
2351 		/* re-add all VLAN filters */
2352 		if (VLAN_FILTERING_ALLOWED(adapter)) {
2353 			struct iavf_vlan_filter *vlf;
2354 
2355 			if (!list_empty(&adapter->vlan_filter_list)) {
2356 				list_for_each_entry(vlf,
2357 						    &adapter->vlan_filter_list,
2358 						    list)
2359 					vlf->state = IAVF_VLAN_ADD;
2360 
2361 				aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2362 			}
2363 		}
2364 
2365 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2366 
2367 		netif_addr_lock_bh(netdev);
2368 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2369 		netif_addr_unlock_bh(netdev);
2370 
2371 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
2372 			aq_required;
2373 		}
2374 		break;
2375 	case VIRTCHNL_OP_ENABLE_QUEUES:
2376 		/* enable transmits */
2377 		iavf_irq_enable(adapter, true);
2378 		wake_up(&adapter->reset_waitqueue);
2379 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2380 		break;
2381 	case VIRTCHNL_OP_DISABLE_QUEUES:
2382 		iavf_free_all_tx_resources(adapter);
2383 		iavf_free_all_rx_resources(adapter);
2384 		if (adapter->state == __IAVF_DOWN_PENDING) {
2385 			iavf_change_state(adapter, __IAVF_DOWN);
2386 			wake_up(&adapter->down_waitqueue);
2387 		}
2388 		break;
2389 	case VIRTCHNL_OP_VERSION:
2390 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2391 		/* Don't display an error if we get these out of sequence.
2392 		 * If the firmware needed to get kicked, we'll get these and
2393 		 * it's no problem.
2394 		 */
2395 		if (v_opcode != adapter->current_op)
2396 			return;
2397 		break;
2398 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
2399 		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
2400 
2401 		if (msglen == sizeof(*vrh))
2402 			adapter->hena = vrh->hena;
2403 		else
2404 			dev_warn(&adapter->pdev->dev,
2405 				 "Invalid message %d from PF\n", v_opcode);
2406 		}
2407 		break;
2408 	case VIRTCHNL_OP_REQUEST_QUEUES: {
2409 		struct virtchnl_vf_res_request *vfres =
2410 			(struct virtchnl_vf_res_request *)msg;
2411 
2412 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
2413 			dev_info(&adapter->pdev->dev,
2414 				 "Requested %d queues, PF can support %d\n",
2415 				 adapter->num_req_queues,
2416 				 vfres->num_queue_pairs);
2417 			adapter->num_req_queues = 0;
2418 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2419 		}
2420 		}
2421 		break;
2422 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2423 		struct iavf_cloud_filter *cf;
2424 
2425 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2426 			if (cf->state == __IAVF_CF_ADD_PENDING)
2427 				cf->state = __IAVF_CF_ACTIVE;
2428 		}
2429 		}
2430 		break;
2431 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2432 		struct iavf_cloud_filter *cf, *cftmp;
2433 
2434 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2435 					 list) {
2436 			if (cf->state == __IAVF_CF_DEL_PENDING) {
2437 				cf->state = __IAVF_CF_INVALID;
2438 				list_del(&cf->list);
2439 				kfree(cf);
2440 				adapter->num_cloud_filters--;
2441 			}
2442 		}
2443 		}
2444 		break;
2445 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2446 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2447 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2448 
2449 		spin_lock_bh(&adapter->fdir_fltr_lock);
2450 		list_for_each_entry_safe(fdir, fdir_tmp,
2451 					 &adapter->fdir_list_head,
2452 					 list) {
2453 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2454 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2455 					if (!iavf_is_raw_fdir(fdir))
2456 						dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2457 							 fdir->loc);
2458 					else
2459 						dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
2460 							 TC_U32_USERHTID(fdir->cls_u32_handle));
2461 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2462 					fdir->flow_id = add_fltr->flow_id;
2463 				} else {
2464 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2465 						 add_fltr->status);
2466 					iavf_print_fdir_fltr(adapter, fdir);
2467 					list_del(&fdir->list);
2468 					iavf_dec_fdir_active_fltr(adapter, fdir);
2469 					kfree(fdir);
2470 				}
2471 			}
2472 		}
2473 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2474 		}
2475 		break;
2476 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2477 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2478 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2479 
2480 		spin_lock_bh(&adapter->fdir_fltr_lock);
2481 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2482 					 list) {
2483 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2484 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2485 				    del_fltr->status ==
2486 				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2487 					if (!iavf_is_raw_fdir(fdir))
2488 						dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2489 							 fdir->loc);
2490 					else
2491 						dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
2492 							 TC_U32_USERHTID(fdir->cls_u32_handle));
2493 					list_del(&fdir->list);
2494 					iavf_dec_fdir_active_fltr(adapter, fdir);
2495 					kfree(fdir);
2496 				} else {
2497 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2498 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2499 						 del_fltr->status);
2500 					iavf_print_fdir_fltr(adapter, fdir);
2501 				}
2502 			} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2503 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2504 				    del_fltr->status ==
2505 				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2506 					fdir->state = IAVF_FDIR_FLTR_INACTIVE;
2507 				} else {
2508 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2509 					dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
2510 						 del_fltr->status);
2511 					iavf_print_fdir_fltr(adapter, fdir);
2512 				}
2513 			}
2514 		}
2515 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2516 		}
2517 		break;
2518 	case VIRTCHNL_OP_ADD_RSS_CFG: {
2519 		struct iavf_adv_rss *rss;
2520 
2521 		spin_lock_bh(&adapter->adv_rss_lock);
2522 		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2523 			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2524 				iavf_print_adv_rss_cfg(adapter, rss,
2525 						       "Input set change for",
2526 						       "successful");
2527 				rss->state = IAVF_ADV_RSS_ACTIVE;
2528 			}
2529 		}
2530 		spin_unlock_bh(&adapter->adv_rss_lock);
2531 		}
2532 		break;
2533 	case VIRTCHNL_OP_DEL_RSS_CFG: {
2534 		struct iavf_adv_rss *rss, *rss_tmp;
2535 
2536 		spin_lock_bh(&adapter->adv_rss_lock);
2537 		list_for_each_entry_safe(rss, rss_tmp,
2538 					 &adapter->adv_rss_list_head, list) {
2539 			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2540 				list_del(&rss->list);
2541 				kfree(rss);
2542 			}
2543 		}
2544 		spin_unlock_bh(&adapter->adv_rss_lock);
2545 		}
2546 		break;
2547 	case VIRTCHNL_OP_ADD_VLAN_V2: {
2548 		struct iavf_vlan_filter *f;
2549 
2550 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2551 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2552 			if (f->state == IAVF_VLAN_IS_NEW)
2553 				f->state = IAVF_VLAN_ACTIVE;
2554 		}
2555 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2556 		}
2557 		break;
2558 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2559 		/* PF enabled vlan strip on this VF.
2560 		 * Update netdev->features if needed to be in sync with ethtool.
2561 		 */
2562 		if (!v_retval)
2563 			iavf_netdev_features_vlan_strip_set(netdev, true);
2564 		break;
2565 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2566 		/* PF disabled vlan strip on this VF.
2567 		 * Update netdev->features if needed to be in sync with ethtool.
2568 		 */
2569 		if (!v_retval)
2570 			iavf_netdev_features_vlan_strip_set(netdev, false);
2571 		break;
2572 	default:
2573 		if (adapter->current_op && (v_opcode != adapter->current_op))
2574 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2575 				 adapter->current_op, v_opcode);
2576 		break;
2577 	} /* switch v_opcode */
2578 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2579 }
2580