xref: /linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (revision fefe5dc4afeafe896c90d5b20b605f2759343c3b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 
8 /**
9  * iavf_send_pf_msg
10  * @adapter: adapter structure
11  * @op: virtual channel opcode
12  * @msg: pointer to message buffer
13  * @len: message length
14  *
15  * Send message to PF and print status if failure.
16  **/
17 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
18 			    enum virtchnl_ops op, u8 *msg, u16 len)
19 {
20 	struct iavf_hw *hw = &adapter->hw;
21 	enum iavf_status status;
22 
23 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
24 		return 0; /* nothing to see here, move along */
25 
26 	status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
27 	if (status)
28 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
29 			op, iavf_stat_str(hw, status),
30 			iavf_aq_str(hw, hw->aq.asq_last_status));
31 	return iavf_status_to_errno(status);
32 }
33 
34 /**
35  * iavf_send_api_ver
36  * @adapter: adapter structure
37  *
38  * Send API version admin queue message to the PF. The reply is not checked
39  * in this function. Returns 0 if the message was successfully
40  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
41  **/
42 int iavf_send_api_ver(struct iavf_adapter *adapter)
43 {
44 	struct virtchnl_version_info vvi;
45 
46 	vvi.major = VIRTCHNL_VERSION_MAJOR;
47 	vvi.minor = VIRTCHNL_VERSION_MINOR;
48 
49 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
50 				sizeof(vvi));
51 }
52 
53 /**
54  * iavf_poll_virtchnl_msg
55  * @hw: HW configuration structure
56  * @event: event to populate on success
57  * @op_to_poll: requested virtchnl op to poll for
58  *
59  * Initialize poll for virtchnl msg matching the requested_op. Returns 0
60  * if a message of the correct opcode is in the queue or an error code
61  * if no message matching the op code is waiting and other failures.
62  */
63 static int
64 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
65 		       enum virtchnl_ops op_to_poll)
66 {
67 	enum virtchnl_ops received_op;
68 	enum iavf_status status;
69 	u32 v_retval;
70 
71 	while (1) {
72 		/* When the AQ is empty, iavf_clean_arq_element will return
73 		 * nonzero and this loop will terminate.
74 		 */
75 		status = iavf_clean_arq_element(hw, event, NULL);
76 		if (status != IAVF_SUCCESS)
77 			return iavf_status_to_errno(status);
78 		received_op =
79 		    (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
80 		if (op_to_poll == received_op)
81 			break;
82 	}
83 
84 	v_retval = le32_to_cpu(event->desc.cookie_low);
85 	return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
86 }
87 
88 /**
89  * iavf_verify_api_ver
90  * @adapter: adapter structure
91  *
92  * Compare API versions with the PF. Must be called after admin queue is
93  * initialized. Returns 0 if API versions match, -EIO if they do not,
94  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
95  * from the firmware are propagated.
96  **/
97 int iavf_verify_api_ver(struct iavf_adapter *adapter)
98 {
99 	struct iavf_arq_event_info event;
100 	int err;
101 
102 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
103 	event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
104 	if (!event.msg_buf)
105 		return -ENOMEM;
106 
107 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
108 	if (!err) {
109 		struct virtchnl_version_info *pf_vvi =
110 			(struct virtchnl_version_info *)event.msg_buf;
111 		adapter->pf_version = *pf_vvi;
112 
113 		if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
114 		    (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
115 		     pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
116 			err = -EIO;
117 	}
118 
119 	kfree(event.msg_buf);
120 
121 	return err;
122 }
123 
124 /**
125  * iavf_send_vf_config_msg
126  * @adapter: adapter structure
127  *
128  * Send VF configuration request admin queue message to the PF. The reply
129  * is not checked in this function. Returns 0 if the message was
130  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
131  **/
132 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
133 {
134 	u32 caps;
135 
136 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
137 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
138 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
139 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
140 	       VIRTCHNL_VF_OFFLOAD_VLAN |
141 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
142 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
143 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
144 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
145 	       VIRTCHNL_VF_OFFLOAD_CRC |
146 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
147 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
148 	       VIRTCHNL_VF_OFFLOAD_ADQ |
149 	       VIRTCHNL_VF_OFFLOAD_USO |
150 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
151 	       VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
152 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
153 
154 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
155 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
156 	if (PF_IS_V11(adapter))
157 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
158 					(u8 *)&caps, sizeof(caps));
159 	else
160 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
161 					NULL, 0);
162 }
163 
164 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
165 {
166 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
167 
168 	if (!VLAN_V2_ALLOWED(adapter))
169 		return -EOPNOTSUPP;
170 
171 	adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
172 
173 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
174 				NULL, 0);
175 }
176 
177 /**
178  * iavf_validate_num_queues
179  * @adapter: adapter structure
180  *
181  * Validate that the number of queues the PF has sent in
182  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
183  **/
184 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
185 {
186 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
187 		struct virtchnl_vsi_resource *vsi_res;
188 		int i;
189 
190 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
191 			 adapter->vf_res->num_queue_pairs,
192 			 IAVF_MAX_REQ_QUEUES);
193 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
194 			 IAVF_MAX_REQ_QUEUES);
195 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
196 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
197 			vsi_res = &adapter->vf_res->vsi_res[i];
198 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
199 		}
200 	}
201 }
202 
203 /**
204  * iavf_get_vf_config
205  * @adapter: private adapter structure
206  *
207  * Get VF configuration from PF and populate hw structure. Must be called after
208  * admin queue is initialized. Busy waits until response is received from PF,
209  * with maximum timeout. Response from PF is returned in the buffer for further
210  * processing by the caller.
211  **/
212 int iavf_get_vf_config(struct iavf_adapter *adapter)
213 {
214 	struct iavf_hw *hw = &adapter->hw;
215 	struct iavf_arq_event_info event;
216 	u16 len;
217 	int err;
218 
219 	len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
220 	event.buf_len = len;
221 	event.msg_buf = kzalloc(len, GFP_KERNEL);
222 	if (!event.msg_buf)
223 		return -ENOMEM;
224 
225 	err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
226 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
227 
228 	/* some PFs send more queues than we should have so validate that
229 	 * we aren't getting too many queues
230 	 */
231 	if (!err)
232 		iavf_validate_num_queues(adapter);
233 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
234 
235 	kfree(event.msg_buf);
236 
237 	return err;
238 }
239 
240 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
241 {
242 	struct iavf_arq_event_info event;
243 	int err;
244 	u16 len;
245 
246 	len = sizeof(struct virtchnl_vlan_caps);
247 	event.buf_len = len;
248 	event.msg_buf = kzalloc(len, GFP_KERNEL);
249 	if (!event.msg_buf)
250 		return -ENOMEM;
251 
252 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
253 				     VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
254 	if (!err)
255 		memcpy(&adapter->vlan_v2_caps, event.msg_buf,
256 		       min(event.msg_len, len));
257 
258 	kfree(event.msg_buf);
259 
260 	return err;
261 }
262 
263 /**
264  * iavf_configure_queues
265  * @adapter: adapter structure
266  *
267  * Request that the PF set up our (previously allocated) queues.
268  **/
269 void iavf_configure_queues(struct iavf_adapter *adapter)
270 {
271 	struct virtchnl_vsi_queue_config_info *vqci;
272 	int i, max_frame = adapter->vf_res->max_mtu;
273 	int pairs = adapter->num_active_queues;
274 	struct virtchnl_queue_pair_info *vqpi;
275 	size_t len;
276 
277 	if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
278 		max_frame = IAVF_MAX_RXBUFFER;
279 
280 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
281 		/* bail because we already have a command pending */
282 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
283 			adapter->current_op);
284 		return;
285 	}
286 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
287 	len = virtchnl_struct_size(vqci, qpair, pairs);
288 	vqci = kzalloc(len, GFP_KERNEL);
289 	if (!vqci)
290 		return;
291 
292 	/* Limit maximum frame size when jumbo frames is not enabled */
293 	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
294 	    (adapter->netdev->mtu <= ETH_DATA_LEN))
295 		max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
296 
297 	vqci->vsi_id = adapter->vsi_res->vsi_id;
298 	vqci->num_queue_pairs = pairs;
299 	vqpi = vqci->qpair;
300 	/* Size check is not needed here - HW max is 16 queue pairs, and we
301 	 * can fit info for 31 of them into the AQ buffer before it overflows.
302 	 */
303 	for (i = 0; i < pairs; i++) {
304 		vqpi->txq.vsi_id = vqci->vsi_id;
305 		vqpi->txq.queue_id = i;
306 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
307 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
308 		vqpi->rxq.vsi_id = vqci->vsi_id;
309 		vqpi->rxq.queue_id = i;
310 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
311 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
312 		vqpi->rxq.max_pkt_size = max_frame;
313 		vqpi->rxq.databuffer_size =
314 			ALIGN(adapter->rx_rings[i].rx_buf_len,
315 			      BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
316 		if (CRC_OFFLOAD_ALLOWED(adapter))
317 			vqpi->rxq.crc_disable = !!(adapter->netdev->features &
318 						   NETIF_F_RXFCS);
319 		vqpi++;
320 	}
321 
322 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
323 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
324 			 (u8 *)vqci, len);
325 	kfree(vqci);
326 }
327 
328 /**
329  * iavf_enable_queues
330  * @adapter: adapter structure
331  *
332  * Request that the PF enable all of our queues.
333  **/
334 void iavf_enable_queues(struct iavf_adapter *adapter)
335 {
336 	struct virtchnl_queue_select vqs;
337 
338 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
339 		/* bail because we already have a command pending */
340 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
341 			adapter->current_op);
342 		return;
343 	}
344 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
345 	vqs.vsi_id = adapter->vsi_res->vsi_id;
346 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
347 	vqs.rx_queues = vqs.tx_queues;
348 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
349 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
350 			 (u8 *)&vqs, sizeof(vqs));
351 }
352 
353 /**
354  * iavf_disable_queues
355  * @adapter: adapter structure
356  *
357  * Request that the PF disable all of our queues.
358  **/
359 void iavf_disable_queues(struct iavf_adapter *adapter)
360 {
361 	struct virtchnl_queue_select vqs;
362 
363 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
364 		/* bail because we already have a command pending */
365 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
366 			adapter->current_op);
367 		return;
368 	}
369 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
370 	vqs.vsi_id = adapter->vsi_res->vsi_id;
371 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
372 	vqs.rx_queues = vqs.tx_queues;
373 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
374 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
375 			 (u8 *)&vqs, sizeof(vqs));
376 }
377 
378 /**
379  * iavf_map_queues
380  * @adapter: adapter structure
381  *
382  * Request that the PF map queues to interrupt vectors. Misc causes, including
383  * admin queue, are always mapped to vector 0.
384  **/
385 void iavf_map_queues(struct iavf_adapter *adapter)
386 {
387 	struct virtchnl_irq_map_info *vimi;
388 	struct virtchnl_vector_map *vecmap;
389 	struct iavf_q_vector *q_vector;
390 	int v_idx, q_vectors;
391 	size_t len;
392 
393 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
394 		/* bail because we already have a command pending */
395 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
396 			adapter->current_op);
397 		return;
398 	}
399 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
400 
401 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
402 
403 	len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
404 	vimi = kzalloc(len, GFP_KERNEL);
405 	if (!vimi)
406 		return;
407 
408 	vimi->num_vectors = adapter->num_msix_vectors;
409 	/* Queue vectors first */
410 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
411 		q_vector = &adapter->q_vectors[v_idx];
412 		vecmap = &vimi->vecmap[v_idx];
413 
414 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
415 		vecmap->vector_id = v_idx + NONQ_VECS;
416 		vecmap->txq_map = q_vector->ring_mask;
417 		vecmap->rxq_map = q_vector->ring_mask;
418 		vecmap->rxitr_idx = IAVF_RX_ITR;
419 		vecmap->txitr_idx = IAVF_TX_ITR;
420 	}
421 	/* Misc vector last - this is only for AdminQ messages */
422 	vecmap = &vimi->vecmap[v_idx];
423 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
424 	vecmap->vector_id = 0;
425 	vecmap->txq_map = 0;
426 	vecmap->rxq_map = 0;
427 
428 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
429 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
430 			 (u8 *)vimi, len);
431 	kfree(vimi);
432 }
433 
434 /**
435  * iavf_set_mac_addr_type - Set the correct request type from the filter type
436  * @virtchnl_ether_addr: pointer to requested list element
437  * @filter: pointer to requested filter
438  **/
439 static void
440 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
441 		       const struct iavf_mac_filter *filter)
442 {
443 	virtchnl_ether_addr->type = filter->is_primary ?
444 		VIRTCHNL_ETHER_ADDR_PRIMARY :
445 		VIRTCHNL_ETHER_ADDR_EXTRA;
446 }
447 
448 /**
449  * iavf_add_ether_addrs
450  * @adapter: adapter structure
451  *
452  * Request that the PF add one or more addresses to our filters.
453  **/
454 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
455 {
456 	struct virtchnl_ether_addr_list *veal;
457 	struct iavf_mac_filter *f;
458 	int i = 0, count = 0;
459 	bool more = false;
460 	size_t len;
461 
462 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
463 		/* bail because we already have a command pending */
464 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
465 			adapter->current_op);
466 		return;
467 	}
468 
469 	spin_lock_bh(&adapter->mac_vlan_list_lock);
470 
471 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
472 		if (f->add)
473 			count++;
474 	}
475 	if (!count) {
476 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
477 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
478 		return;
479 	}
480 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
481 
482 	len = virtchnl_struct_size(veal, list, count);
483 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
484 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
485 		while (len > IAVF_MAX_AQ_BUF_SIZE)
486 			len = virtchnl_struct_size(veal, list, --count);
487 		more = true;
488 	}
489 
490 	veal = kzalloc(len, GFP_ATOMIC);
491 	if (!veal) {
492 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
493 		return;
494 	}
495 
496 	veal->vsi_id = adapter->vsi_res->vsi_id;
497 	veal->num_elements = count;
498 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
499 		if (f->add) {
500 			ether_addr_copy(veal->list[i].addr, f->macaddr);
501 			iavf_set_mac_addr_type(&veal->list[i], f);
502 			i++;
503 			f->add = false;
504 			if (i == count)
505 				break;
506 		}
507 	}
508 	if (!more)
509 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
510 
511 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
512 
513 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
514 	kfree(veal);
515 }
516 
517 /**
518  * iavf_del_ether_addrs
519  * @adapter: adapter structure
520  *
521  * Request that the PF remove one or more addresses from our filters.
522  **/
523 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
524 {
525 	struct virtchnl_ether_addr_list *veal;
526 	struct iavf_mac_filter *f, *ftmp;
527 	int i = 0, count = 0;
528 	bool more = false;
529 	size_t len;
530 
531 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
532 		/* bail because we already have a command pending */
533 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
534 			adapter->current_op);
535 		return;
536 	}
537 
538 	spin_lock_bh(&adapter->mac_vlan_list_lock);
539 
540 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
541 		if (f->remove)
542 			count++;
543 	}
544 	if (!count) {
545 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
546 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
547 		return;
548 	}
549 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
550 
551 	len = virtchnl_struct_size(veal, list, count);
552 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
553 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
554 		while (len > IAVF_MAX_AQ_BUF_SIZE)
555 			len = virtchnl_struct_size(veal, list, --count);
556 		more = true;
557 	}
558 	veal = kzalloc(len, GFP_ATOMIC);
559 	if (!veal) {
560 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
561 		return;
562 	}
563 
564 	veal->vsi_id = adapter->vsi_res->vsi_id;
565 	veal->num_elements = count;
566 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
567 		if (f->remove) {
568 			ether_addr_copy(veal->list[i].addr, f->macaddr);
569 			iavf_set_mac_addr_type(&veal->list[i], f);
570 			i++;
571 			list_del(&f->list);
572 			kfree(f);
573 			if (i == count)
574 				break;
575 		}
576 	}
577 	if (!more)
578 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
579 
580 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
581 
582 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
583 	kfree(veal);
584 }
585 
586 /**
587  * iavf_mac_add_ok
588  * @adapter: adapter structure
589  *
590  * Submit list of filters based on PF response.
591  **/
592 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
593 {
594 	struct iavf_mac_filter *f, *ftmp;
595 
596 	spin_lock_bh(&adapter->mac_vlan_list_lock);
597 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
598 		f->is_new_mac = false;
599 		if (!f->add && !f->add_handled)
600 			f->add_handled = true;
601 	}
602 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
603 }
604 
605 /**
606  * iavf_mac_add_reject
607  * @adapter: adapter structure
608  *
609  * Remove filters from list based on PF response.
610  **/
611 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
612 {
613 	struct net_device *netdev = adapter->netdev;
614 	struct iavf_mac_filter *f, *ftmp;
615 
616 	spin_lock_bh(&adapter->mac_vlan_list_lock);
617 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
618 		if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
619 			f->remove = false;
620 
621 		if (!f->add && !f->add_handled)
622 			f->add_handled = true;
623 
624 		if (f->is_new_mac) {
625 			list_del(&f->list);
626 			kfree(f);
627 		}
628 	}
629 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
630 }
631 
632 /**
633  * iavf_vlan_add_reject
634  * @adapter: adapter structure
635  *
636  * Remove VLAN filters from list based on PF response.
637  **/
638 static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
639 {
640 	struct iavf_vlan_filter *f, *ftmp;
641 
642 	spin_lock_bh(&adapter->mac_vlan_list_lock);
643 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
644 		if (f->state == IAVF_VLAN_IS_NEW) {
645 			list_del(&f->list);
646 			kfree(f);
647 			adapter->num_vlan_filters--;
648 		}
649 	}
650 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
651 }
652 
653 /**
654  * iavf_add_vlans
655  * @adapter: adapter structure
656  *
657  * Request that the PF add one or more VLAN filters to our VSI.
658  **/
659 void iavf_add_vlans(struct iavf_adapter *adapter)
660 {
661 	int len, i = 0, count = 0;
662 	struct iavf_vlan_filter *f;
663 	bool more = false;
664 
665 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
666 		/* bail because we already have a command pending */
667 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
668 			adapter->current_op);
669 		return;
670 	}
671 
672 	spin_lock_bh(&adapter->mac_vlan_list_lock);
673 
674 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
675 		if (f->state == IAVF_VLAN_ADD)
676 			count++;
677 	}
678 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
679 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
680 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
681 		return;
682 	}
683 
684 	if (VLAN_ALLOWED(adapter)) {
685 		struct virtchnl_vlan_filter_list *vvfl;
686 
687 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
688 
689 		len = virtchnl_struct_size(vvfl, vlan_id, count);
690 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
691 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
692 			while (len > IAVF_MAX_AQ_BUF_SIZE)
693 				len = virtchnl_struct_size(vvfl, vlan_id,
694 							   --count);
695 			more = true;
696 		}
697 		vvfl = kzalloc(len, GFP_ATOMIC);
698 		if (!vvfl) {
699 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
700 			return;
701 		}
702 
703 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
704 		vvfl->num_elements = count;
705 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
706 			if (f->state == IAVF_VLAN_ADD) {
707 				vvfl->vlan_id[i] = f->vlan.vid;
708 				i++;
709 				f->state = IAVF_VLAN_IS_NEW;
710 				if (i == count)
711 					break;
712 			}
713 		}
714 		if (!more)
715 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
716 
717 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
718 
719 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
720 		kfree(vvfl);
721 	} else {
722 		u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
723 		u16 current_vlans = iavf_get_num_vlans_added(adapter);
724 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
725 
726 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
727 
728 		if ((count + current_vlans) > max_vlans &&
729 		    current_vlans < max_vlans) {
730 			count = max_vlans - iavf_get_num_vlans_added(adapter);
731 			more = true;
732 		}
733 
734 		len = virtchnl_struct_size(vvfl_v2, filters, count);
735 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
736 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
737 			while (len > IAVF_MAX_AQ_BUF_SIZE)
738 				len = virtchnl_struct_size(vvfl_v2, filters,
739 							   --count);
740 			more = true;
741 		}
742 
743 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
744 		if (!vvfl_v2) {
745 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
746 			return;
747 		}
748 
749 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
750 		vvfl_v2->num_elements = count;
751 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
752 			if (f->state == IAVF_VLAN_ADD) {
753 				struct virtchnl_vlan_supported_caps *filtering_support =
754 					&adapter->vlan_v2_caps.filtering.filtering_support;
755 				struct virtchnl_vlan *vlan;
756 
757 				if (i == count)
758 					break;
759 
760 				/* give priority over outer if it's enabled */
761 				if (filtering_support->outer)
762 					vlan = &vvfl_v2->filters[i].outer;
763 				else
764 					vlan = &vvfl_v2->filters[i].inner;
765 
766 				vlan->tci = f->vlan.vid;
767 				vlan->tpid = f->vlan.tpid;
768 
769 				i++;
770 				f->state = IAVF_VLAN_IS_NEW;
771 			}
772 		}
773 
774 		if (!more)
775 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
776 
777 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
778 
779 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
780 				 (u8 *)vvfl_v2, len);
781 		kfree(vvfl_v2);
782 	}
783 }
784 
785 /**
786  * iavf_del_vlans
787  * @adapter: adapter structure
788  *
789  * Request that the PF remove one or more VLAN filters from our VSI.
790  **/
791 void iavf_del_vlans(struct iavf_adapter *adapter)
792 {
793 	struct iavf_vlan_filter *f, *ftmp;
794 	int len, i = 0, count = 0;
795 	bool more = false;
796 
797 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
798 		/* bail because we already have a command pending */
799 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
800 			adapter->current_op);
801 		return;
802 	}
803 
804 	spin_lock_bh(&adapter->mac_vlan_list_lock);
805 
806 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
807 		/* since VLAN capabilities are not allowed, we dont want to send
808 		 * a VLAN delete request because it will most likely fail and
809 		 * create unnecessary errors/noise, so just free the VLAN
810 		 * filters marked for removal to enable bailing out before
811 		 * sending a virtchnl message
812 		 */
813 		if (f->state == IAVF_VLAN_REMOVE &&
814 		    !VLAN_FILTERING_ALLOWED(adapter)) {
815 			list_del(&f->list);
816 			kfree(f);
817 			adapter->num_vlan_filters--;
818 		} else if (f->state == IAVF_VLAN_DISABLE &&
819 		    !VLAN_FILTERING_ALLOWED(adapter)) {
820 			f->state = IAVF_VLAN_INACTIVE;
821 		} else if (f->state == IAVF_VLAN_REMOVE ||
822 			   f->state == IAVF_VLAN_DISABLE) {
823 			count++;
824 		}
825 	}
826 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
827 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
828 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
829 		return;
830 	}
831 
832 	if (VLAN_ALLOWED(adapter)) {
833 		struct virtchnl_vlan_filter_list *vvfl;
834 
835 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
836 
837 		len = virtchnl_struct_size(vvfl, vlan_id, count);
838 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
839 			dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
840 			while (len > IAVF_MAX_AQ_BUF_SIZE)
841 				len = virtchnl_struct_size(vvfl, vlan_id,
842 							   --count);
843 			more = true;
844 		}
845 		vvfl = kzalloc(len, GFP_ATOMIC);
846 		if (!vvfl) {
847 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
848 			return;
849 		}
850 
851 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
852 		vvfl->num_elements = count;
853 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
854 			if (f->state == IAVF_VLAN_DISABLE) {
855 				vvfl->vlan_id[i] = f->vlan.vid;
856 				f->state = IAVF_VLAN_INACTIVE;
857 				i++;
858 				if (i == count)
859 					break;
860 			} else if (f->state == IAVF_VLAN_REMOVE) {
861 				vvfl->vlan_id[i] = f->vlan.vid;
862 				list_del(&f->list);
863 				kfree(f);
864 				adapter->num_vlan_filters--;
865 				i++;
866 				if (i == count)
867 					break;
868 			}
869 		}
870 
871 		if (!more)
872 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
873 
874 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
875 
876 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
877 		kfree(vvfl);
878 	} else {
879 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
880 
881 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
882 
883 		len = virtchnl_struct_size(vvfl_v2, filters, count);
884 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
885 			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
886 			while (len > IAVF_MAX_AQ_BUF_SIZE)
887 				len = virtchnl_struct_size(vvfl_v2, filters,
888 							   --count);
889 			more = true;
890 		}
891 
892 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
893 		if (!vvfl_v2) {
894 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
895 			return;
896 		}
897 
898 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
899 		vvfl_v2->num_elements = count;
900 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
901 			if (f->state == IAVF_VLAN_DISABLE ||
902 			    f->state == IAVF_VLAN_REMOVE) {
903 				struct virtchnl_vlan_supported_caps *filtering_support =
904 					&adapter->vlan_v2_caps.filtering.filtering_support;
905 				struct virtchnl_vlan *vlan;
906 
907 				/* give priority over outer if it's enabled */
908 				if (filtering_support->outer)
909 					vlan = &vvfl_v2->filters[i].outer;
910 				else
911 					vlan = &vvfl_v2->filters[i].inner;
912 
913 				vlan->tci = f->vlan.vid;
914 				vlan->tpid = f->vlan.tpid;
915 
916 				if (f->state == IAVF_VLAN_DISABLE) {
917 					f->state = IAVF_VLAN_INACTIVE;
918 				} else {
919 					list_del(&f->list);
920 					kfree(f);
921 					adapter->num_vlan_filters--;
922 				}
923 				i++;
924 				if (i == count)
925 					break;
926 			}
927 		}
928 
929 		if (!more)
930 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
931 
932 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
933 
934 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
935 				 (u8 *)vvfl_v2, len);
936 		kfree(vvfl_v2);
937 	}
938 }
939 
940 /**
941  * iavf_set_promiscuous
942  * @adapter: adapter structure
943  *
944  * Request that the PF enable promiscuous mode for our VSI.
945  **/
946 void iavf_set_promiscuous(struct iavf_adapter *adapter)
947 {
948 	struct net_device *netdev = adapter->netdev;
949 	struct virtchnl_promisc_info vpi;
950 	unsigned int flags;
951 
952 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
953 		/* bail because we already have a command pending */
954 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
955 			adapter->current_op);
956 		return;
957 	}
958 
959 	/* prevent changes to promiscuous flags */
960 	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
961 
962 	/* sanity check to prevent duplicate AQ calls */
963 	if (!iavf_promiscuous_mode_changed(adapter)) {
964 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
965 		dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
966 		/* allow changes to promiscuous flags */
967 		spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
968 		return;
969 	}
970 
971 	/* there are 2 bits, but only 3 states */
972 	if (!(netdev->flags & IFF_PROMISC) &&
973 	    netdev->flags & IFF_ALLMULTI) {
974 		/* State 1  - only multicast promiscuous mode enabled
975 		 * - !IFF_PROMISC && IFF_ALLMULTI
976 		 */
977 		flags = FLAG_VF_MULTICAST_PROMISC;
978 		adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
979 		adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
980 		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
981 	} else if (!(netdev->flags & IFF_PROMISC) &&
982 		   !(netdev->flags & IFF_ALLMULTI)) {
983 		/* State 2 - unicast/multicast promiscuous mode disabled
984 		 * - !IFF_PROMISC && !IFF_ALLMULTI
985 		 */
986 		flags = 0;
987 		adapter->current_netdev_promisc_flags &=
988 			~(IFF_PROMISC | IFF_ALLMULTI);
989 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
990 	} else {
991 		/* State 3 - unicast/multicast promiscuous mode enabled
992 		 * - IFF_PROMISC && IFF_ALLMULTI
993 		 * - IFF_PROMISC && !IFF_ALLMULTI
994 		 */
995 		flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
996 		adapter->current_netdev_promisc_flags |= IFF_PROMISC;
997 		if (netdev->flags & IFF_ALLMULTI)
998 			adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
999 		else
1000 			adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
1001 
1002 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
1003 	}
1004 
1005 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1006 
1007 	/* allow changes to promiscuous flags */
1008 	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1009 
1010 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1011 	vpi.vsi_id = adapter->vsi_res->vsi_id;
1012 	vpi.flags = flags;
1013 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1014 			 (u8 *)&vpi, sizeof(vpi));
1015 }
1016 
1017 /**
1018  * iavf_request_stats
1019  * @adapter: adapter structure
1020  *
1021  * Request VSI statistics from PF.
1022  **/
1023 void iavf_request_stats(struct iavf_adapter *adapter)
1024 {
1025 	struct virtchnl_queue_select vqs;
1026 
1027 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1028 		/* no error message, this isn't crucial */
1029 		return;
1030 	}
1031 
1032 	adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
1033 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
1034 	vqs.vsi_id = adapter->vsi_res->vsi_id;
1035 	/* queue maps are ignored for this message - only the vsi is used */
1036 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
1037 			     sizeof(vqs)))
1038 		/* if the request failed, don't lock out others */
1039 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1040 }
1041 
1042 /**
1043  * iavf_get_hena
1044  * @adapter: adapter structure
1045  *
1046  * Request hash enable capabilities from PF
1047  **/
1048 void iavf_get_hena(struct iavf_adapter *adapter)
1049 {
1050 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1051 		/* bail because we already have a command pending */
1052 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
1053 			adapter->current_op);
1054 		return;
1055 	}
1056 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
1057 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
1058 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
1059 }
1060 
1061 /**
1062  * iavf_set_hena
1063  * @adapter: adapter structure
1064  *
1065  * Request the PF to set our RSS hash capabilities
1066  **/
1067 void iavf_set_hena(struct iavf_adapter *adapter)
1068 {
1069 	struct virtchnl_rss_hena vrh;
1070 
1071 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1072 		/* bail because we already have a command pending */
1073 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1074 			adapter->current_op);
1075 		return;
1076 	}
1077 	vrh.hena = adapter->hena;
1078 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
1079 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
1080 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
1081 			 sizeof(vrh));
1082 }
1083 
1084 /**
1085  * iavf_set_rss_key
1086  * @adapter: adapter structure
1087  *
1088  * Request the PF to set our RSS hash key
1089  **/
1090 void iavf_set_rss_key(struct iavf_adapter *adapter)
1091 {
1092 	struct virtchnl_rss_key *vrk;
1093 	int len;
1094 
1095 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1096 		/* bail because we already have a command pending */
1097 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1098 			adapter->current_op);
1099 		return;
1100 	}
1101 	len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
1102 	vrk = kzalloc(len, GFP_KERNEL);
1103 	if (!vrk)
1104 		return;
1105 	vrk->vsi_id = adapter->vsi.id;
1106 	vrk->key_len = adapter->rss_key_size;
1107 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1108 
1109 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1110 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1111 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
1112 	kfree(vrk);
1113 }
1114 
1115 /**
1116  * iavf_set_rss_lut
1117  * @adapter: adapter structure
1118  *
1119  * Request the PF to set our RSS lookup table
1120  **/
1121 void iavf_set_rss_lut(struct iavf_adapter *adapter)
1122 {
1123 	struct virtchnl_rss_lut *vrl;
1124 	int len;
1125 
1126 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1127 		/* bail because we already have a command pending */
1128 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1129 			adapter->current_op);
1130 		return;
1131 	}
1132 	len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
1133 	vrl = kzalloc(len, GFP_KERNEL);
1134 	if (!vrl)
1135 		return;
1136 	vrl->vsi_id = adapter->vsi.id;
1137 	vrl->lut_entries = adapter->rss_lut_size;
1138 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1139 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1140 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1141 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
1142 	kfree(vrl);
1143 }
1144 
1145 /**
1146  * iavf_enable_vlan_stripping
1147  * @adapter: adapter structure
1148  *
1149  * Request VLAN header stripping to be enabled
1150  **/
1151 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1152 {
1153 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1154 		/* bail because we already have a command pending */
1155 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1156 			adapter->current_op);
1157 		return;
1158 	}
1159 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1160 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1161 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
1162 }
1163 
1164 /**
1165  * iavf_disable_vlan_stripping
1166  * @adapter: adapter structure
1167  *
1168  * Request VLAN header stripping to be disabled
1169  **/
1170 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1171 {
1172 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1173 		/* bail because we already have a command pending */
1174 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1175 			adapter->current_op);
1176 		return;
1177 	}
1178 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1179 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1180 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
1181 }
1182 
1183 /**
1184  * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1185  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1186  */
1187 static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1188 {
1189 	switch (tpid) {
1190 	case ETH_P_8021Q:
1191 		return VIRTCHNL_VLAN_ETHERTYPE_8100;
1192 	case ETH_P_8021AD:
1193 		return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 /**
1200  * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1201  * @adapter: adapter structure
1202  * @msg: message structure used for updating offloads over virtchnl to update
1203  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1204  * @offload_op: opcode used to determine which support structure to check
1205  */
1206 static int
1207 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1208 			      struct virtchnl_vlan_setting *msg, u16 tpid,
1209 			      enum virtchnl_ops offload_op)
1210 {
1211 	struct virtchnl_vlan_supported_caps *offload_support;
1212 	u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1213 
1214 	/* reference the correct offload support structure */
1215 	switch (offload_op) {
1216 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1217 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1218 		offload_support =
1219 			&adapter->vlan_v2_caps.offloads.stripping_support;
1220 		break;
1221 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1222 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1223 		offload_support =
1224 			&adapter->vlan_v2_caps.offloads.insertion_support;
1225 		break;
1226 	default:
1227 		dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1228 			offload_op);
1229 		return -EINVAL;
1230 	}
1231 
1232 	/* make sure ethertype is supported */
1233 	if (offload_support->outer & vc_ethertype &&
1234 	    offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1235 		msg->outer_ethertype_setting = vc_ethertype;
1236 	} else if (offload_support->inner & vc_ethertype &&
1237 		   offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1238 		msg->inner_ethertype_setting = vc_ethertype;
1239 	} else {
1240 		dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1241 			offload_op, tpid);
1242 		return -EINVAL;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 /**
1249  * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1250  * @adapter: adapter structure
1251  * @tpid: VLAN TPID
1252  * @offload_op: opcode used to determine which AQ required bit to clear
1253  */
1254 static void
1255 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1256 				  enum virtchnl_ops offload_op)
1257 {
1258 	switch (offload_op) {
1259 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1260 		if (tpid == ETH_P_8021Q)
1261 			adapter->aq_required &=
1262 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1263 		else if (tpid == ETH_P_8021AD)
1264 			adapter->aq_required &=
1265 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1266 		break;
1267 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1268 		if (tpid == ETH_P_8021Q)
1269 			adapter->aq_required &=
1270 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1271 		else if (tpid == ETH_P_8021AD)
1272 			adapter->aq_required &=
1273 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1274 		break;
1275 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1276 		if (tpid == ETH_P_8021Q)
1277 			adapter->aq_required &=
1278 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1279 		else if (tpid == ETH_P_8021AD)
1280 			adapter->aq_required &=
1281 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1282 		break;
1283 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1284 		if (tpid == ETH_P_8021Q)
1285 			adapter->aq_required &=
1286 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1287 		else if (tpid == ETH_P_8021AD)
1288 			adapter->aq_required &=
1289 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1290 		break;
1291 	default:
1292 		dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1293 			offload_op);
1294 	}
1295 }
1296 
1297 /**
1298  * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1299  * @adapter: adapter structure
1300  * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1301  * @offload_op: offload_op used to make the request over virtchnl
1302  */
1303 static void
1304 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1305 			  enum virtchnl_ops offload_op)
1306 {
1307 	struct virtchnl_vlan_setting *msg;
1308 	int len = sizeof(*msg);
1309 
1310 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1311 		/* bail because we already have a command pending */
1312 		dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1313 			offload_op, adapter->current_op);
1314 		return;
1315 	}
1316 
1317 	adapter->current_op = offload_op;
1318 
1319 	msg = kzalloc(len, GFP_KERNEL);
1320 	if (!msg)
1321 		return;
1322 
1323 	msg->vport_id = adapter->vsi_res->vsi_id;
1324 
1325 	/* always clear to prevent unsupported and endless requests */
1326 	iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1327 
1328 	/* only send valid offload requests */
1329 	if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1330 		iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
1331 	else
1332 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1333 
1334 	kfree(msg);
1335 }
1336 
1337 /**
1338  * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1339  * @adapter: adapter structure
1340  * @tpid: VLAN TPID used to enable VLAN stripping
1341  */
1342 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1343 {
1344 	iavf_send_vlan_offload_v2(adapter, tpid,
1345 				  VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1346 }
1347 
1348 /**
1349  * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1350  * @adapter: adapter structure
1351  * @tpid: VLAN TPID used to disable VLAN stripping
1352  */
1353 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1354 {
1355 	iavf_send_vlan_offload_v2(adapter, tpid,
1356 				  VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1357 }
1358 
1359 /**
1360  * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1361  * @adapter: adapter structure
1362  * @tpid: VLAN TPID used to enable VLAN insertion
1363  */
1364 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1365 {
1366 	iavf_send_vlan_offload_v2(adapter, tpid,
1367 				  VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1368 }
1369 
1370 /**
1371  * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1372  * @adapter: adapter structure
1373  * @tpid: VLAN TPID used to disable VLAN insertion
1374  */
1375 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1376 {
1377 	iavf_send_vlan_offload_v2(adapter, tpid,
1378 				  VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1379 }
1380 
1381 #define IAVF_MAX_SPEED_STRLEN	13
1382 
1383 /**
1384  * iavf_print_link_message - print link up or down
1385  * @adapter: adapter structure
1386  *
1387  * Log a message telling the world of our wonderous link status
1388  */
1389 static void iavf_print_link_message(struct iavf_adapter *adapter)
1390 {
1391 	struct net_device *netdev = adapter->netdev;
1392 	int link_speed_mbps;
1393 	char *speed;
1394 
1395 	if (!adapter->link_up) {
1396 		netdev_info(netdev, "NIC Link is Down\n");
1397 		return;
1398 	}
1399 
1400 	speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
1401 	if (!speed)
1402 		return;
1403 
1404 	if (ADV_LINK_SUPPORT(adapter)) {
1405 		link_speed_mbps = adapter->link_speed_mbps;
1406 		goto print_link_msg;
1407 	}
1408 
1409 	switch (adapter->link_speed) {
1410 	case VIRTCHNL_LINK_SPEED_40GB:
1411 		link_speed_mbps = SPEED_40000;
1412 		break;
1413 	case VIRTCHNL_LINK_SPEED_25GB:
1414 		link_speed_mbps = SPEED_25000;
1415 		break;
1416 	case VIRTCHNL_LINK_SPEED_20GB:
1417 		link_speed_mbps = SPEED_20000;
1418 		break;
1419 	case VIRTCHNL_LINK_SPEED_10GB:
1420 		link_speed_mbps = SPEED_10000;
1421 		break;
1422 	case VIRTCHNL_LINK_SPEED_5GB:
1423 		link_speed_mbps = SPEED_5000;
1424 		break;
1425 	case VIRTCHNL_LINK_SPEED_2_5GB:
1426 		link_speed_mbps = SPEED_2500;
1427 		break;
1428 	case VIRTCHNL_LINK_SPEED_1GB:
1429 		link_speed_mbps = SPEED_1000;
1430 		break;
1431 	case VIRTCHNL_LINK_SPEED_100MB:
1432 		link_speed_mbps = SPEED_100;
1433 		break;
1434 	default:
1435 		link_speed_mbps = SPEED_UNKNOWN;
1436 		break;
1437 	}
1438 
1439 print_link_msg:
1440 	if (link_speed_mbps > SPEED_1000) {
1441 		if (link_speed_mbps == SPEED_2500)
1442 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
1443 		else
1444 			/* convert to Gbps inline */
1445 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
1446 				 link_speed_mbps / 1000, "Gbps");
1447 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
1448 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
1449 	} else {
1450 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
1451 			 link_speed_mbps, "Mbps");
1452 	}
1453 
1454 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1455 	kfree(speed);
1456 }
1457 
1458 /**
1459  * iavf_get_vpe_link_status
1460  * @adapter: adapter structure
1461  * @vpe: virtchnl_pf_event structure
1462  *
1463  * Helper function for determining the link status
1464  **/
1465 static bool
1466 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1467 			 struct virtchnl_pf_event *vpe)
1468 {
1469 	if (ADV_LINK_SUPPORT(adapter))
1470 		return vpe->event_data.link_event_adv.link_status;
1471 	else
1472 		return vpe->event_data.link_event.link_status;
1473 }
1474 
1475 /**
1476  * iavf_set_adapter_link_speed_from_vpe
1477  * @adapter: adapter structure for which we are setting the link speed
1478  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1479  *
1480  * Helper function for setting iavf_adapter link speed
1481  **/
1482 static void
1483 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1484 				     struct virtchnl_pf_event *vpe)
1485 {
1486 	if (ADV_LINK_SUPPORT(adapter))
1487 		adapter->link_speed_mbps =
1488 			vpe->event_data.link_event_adv.link_speed;
1489 	else
1490 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1491 }
1492 
1493 /**
1494  * iavf_enable_channels
1495  * @adapter: adapter structure
1496  *
1497  * Request that the PF enable channels as specified by
1498  * the user via tc tool.
1499  **/
1500 void iavf_enable_channels(struct iavf_adapter *adapter)
1501 {
1502 	struct virtchnl_tc_info *vti = NULL;
1503 	size_t len;
1504 	int i;
1505 
1506 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1507 		/* bail because we already have a command pending */
1508 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1509 			adapter->current_op);
1510 		return;
1511 	}
1512 
1513 	len = virtchnl_struct_size(vti, list, adapter->num_tc);
1514 	vti = kzalloc(len, GFP_KERNEL);
1515 	if (!vti)
1516 		return;
1517 	vti->num_tc = adapter->num_tc;
1518 	for (i = 0; i < vti->num_tc; i++) {
1519 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1520 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1521 		vti->list[i].pad = 0;
1522 		vti->list[i].max_tx_rate =
1523 				adapter->ch_config.ch_info[i].max_tx_rate;
1524 	}
1525 
1526 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1527 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1528 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1529 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1530 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1531 	kfree(vti);
1532 }
1533 
1534 /**
1535  * iavf_disable_channels
1536  * @adapter: adapter structure
1537  *
1538  * Request that the PF disable channels that are configured
1539  **/
1540 void iavf_disable_channels(struct iavf_adapter *adapter)
1541 {
1542 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1543 		/* bail because we already have a command pending */
1544 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1545 			adapter->current_op);
1546 		return;
1547 	}
1548 
1549 	adapter->ch_config.state = __IAVF_TC_INVALID;
1550 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1551 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1552 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1553 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1554 }
1555 
1556 /**
1557  * iavf_print_cloud_filter
1558  * @adapter: adapter structure
1559  * @f: cloud filter to print
1560  *
1561  * Print the cloud filter
1562  **/
1563 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1564 				    struct virtchnl_filter *f)
1565 {
1566 	switch (f->flow_type) {
1567 	case VIRTCHNL_TCP_V4_FLOW:
1568 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1569 			 &f->data.tcp_spec.dst_mac,
1570 			 &f->data.tcp_spec.src_mac,
1571 			 ntohs(f->data.tcp_spec.vlan_id),
1572 			 &f->data.tcp_spec.dst_ip[0],
1573 			 &f->data.tcp_spec.src_ip[0],
1574 			 ntohs(f->data.tcp_spec.dst_port),
1575 			 ntohs(f->data.tcp_spec.src_port));
1576 		break;
1577 	case VIRTCHNL_TCP_V6_FLOW:
1578 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1579 			 &f->data.tcp_spec.dst_mac,
1580 			 &f->data.tcp_spec.src_mac,
1581 			 ntohs(f->data.tcp_spec.vlan_id),
1582 			 &f->data.tcp_spec.dst_ip,
1583 			 &f->data.tcp_spec.src_ip,
1584 			 ntohs(f->data.tcp_spec.dst_port),
1585 			 ntohs(f->data.tcp_spec.src_port));
1586 		break;
1587 	}
1588 }
1589 
1590 /**
1591  * iavf_add_cloud_filter
1592  * @adapter: adapter structure
1593  *
1594  * Request that the PF add cloud filters as specified
1595  * by the user via tc tool.
1596  **/
1597 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1598 {
1599 	struct iavf_cloud_filter *cf;
1600 	struct virtchnl_filter *f;
1601 	int len = 0, count = 0;
1602 
1603 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1604 		/* bail because we already have a command pending */
1605 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1606 			adapter->current_op);
1607 		return;
1608 	}
1609 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1610 		if (cf->add) {
1611 			count++;
1612 			break;
1613 		}
1614 	}
1615 	if (!count) {
1616 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1617 		return;
1618 	}
1619 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1620 
1621 	len = sizeof(struct virtchnl_filter);
1622 	f = kzalloc(len, GFP_KERNEL);
1623 	if (!f)
1624 		return;
1625 
1626 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1627 		if (cf->add) {
1628 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1629 			cf->add = false;
1630 			cf->state = __IAVF_CF_ADD_PENDING;
1631 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1632 					 (u8 *)f, len);
1633 		}
1634 	}
1635 	kfree(f);
1636 }
1637 
1638 /**
1639  * iavf_del_cloud_filter
1640  * @adapter: adapter structure
1641  *
1642  * Request that the PF delete cloud filters as specified
1643  * by the user via tc tool.
1644  **/
1645 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1646 {
1647 	struct iavf_cloud_filter *cf, *cftmp;
1648 	struct virtchnl_filter *f;
1649 	int len = 0, count = 0;
1650 
1651 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1652 		/* bail because we already have a command pending */
1653 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1654 			adapter->current_op);
1655 		return;
1656 	}
1657 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1658 		if (cf->del) {
1659 			count++;
1660 			break;
1661 		}
1662 	}
1663 	if (!count) {
1664 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1665 		return;
1666 	}
1667 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1668 
1669 	len = sizeof(struct virtchnl_filter);
1670 	f = kzalloc(len, GFP_KERNEL);
1671 	if (!f)
1672 		return;
1673 
1674 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1675 		if (cf->del) {
1676 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1677 			cf->del = false;
1678 			cf->state = __IAVF_CF_DEL_PENDING;
1679 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1680 					 (u8 *)f, len);
1681 		}
1682 	}
1683 	kfree(f);
1684 }
1685 
1686 /**
1687  * iavf_add_fdir_filter
1688  * @adapter: the VF adapter structure
1689  *
1690  * Request that the PF add Flow Director filters as specified
1691  * by the user via ethtool.
1692  **/
1693 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1694 {
1695 	struct iavf_fdir_fltr *fdir;
1696 	struct virtchnl_fdir_add *f;
1697 	bool process_fltr = false;
1698 	int len;
1699 
1700 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1701 		/* bail because we already have a command pending */
1702 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1703 			adapter->current_op);
1704 		return;
1705 	}
1706 
1707 	len = sizeof(struct virtchnl_fdir_add);
1708 	f = kzalloc(len, GFP_KERNEL);
1709 	if (!f)
1710 		return;
1711 
1712 	spin_lock_bh(&adapter->fdir_fltr_lock);
1713 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1714 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1715 			process_fltr = true;
1716 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1717 			memcpy(f, &fdir->vc_add_msg, len);
1718 			break;
1719 		}
1720 	}
1721 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1722 
1723 	if (!process_fltr) {
1724 		/* prevent iavf_add_fdir_filter() from being called when there
1725 		 * are no filters to add
1726 		 */
1727 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1728 		kfree(f);
1729 		return;
1730 	}
1731 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1732 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
1733 	kfree(f);
1734 }
1735 
1736 /**
1737  * iavf_del_fdir_filter
1738  * @adapter: the VF adapter structure
1739  *
1740  * Request that the PF delete Flow Director filters as specified
1741  * by the user via ethtool.
1742  **/
1743 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1744 {
1745 	struct iavf_fdir_fltr *fdir;
1746 	struct virtchnl_fdir_del f;
1747 	bool process_fltr = false;
1748 	int len;
1749 
1750 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1751 		/* bail because we already have a command pending */
1752 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1753 			adapter->current_op);
1754 		return;
1755 	}
1756 
1757 	len = sizeof(struct virtchnl_fdir_del);
1758 
1759 	spin_lock_bh(&adapter->fdir_fltr_lock);
1760 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1761 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1762 			process_fltr = true;
1763 			memset(&f, 0, len);
1764 			f.vsi_id = fdir->vc_add_msg.vsi_id;
1765 			f.flow_id = fdir->flow_id;
1766 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1767 			break;
1768 		}
1769 	}
1770 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1771 
1772 	if (!process_fltr) {
1773 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1774 		return;
1775 	}
1776 
1777 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1778 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
1779 }
1780 
1781 /**
1782  * iavf_add_adv_rss_cfg
1783  * @adapter: the VF adapter structure
1784  *
1785  * Request that the PF add RSS configuration as specified
1786  * by the user via ethtool.
1787  **/
1788 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
1789 {
1790 	struct virtchnl_rss_cfg *rss_cfg;
1791 	struct iavf_adv_rss *rss;
1792 	bool process_rss = false;
1793 	int len;
1794 
1795 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1796 		/* bail because we already have a command pending */
1797 		dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
1798 			adapter->current_op);
1799 		return;
1800 	}
1801 
1802 	len = sizeof(struct virtchnl_rss_cfg);
1803 	rss_cfg = kzalloc(len, GFP_KERNEL);
1804 	if (!rss_cfg)
1805 		return;
1806 
1807 	spin_lock_bh(&adapter->adv_rss_lock);
1808 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1809 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1810 			process_rss = true;
1811 			rss->state = IAVF_ADV_RSS_ADD_PENDING;
1812 			memcpy(rss_cfg, &rss->cfg_msg, len);
1813 			iavf_print_adv_rss_cfg(adapter, rss,
1814 					       "Input set change for",
1815 					       "is pending");
1816 			break;
1817 		}
1818 	}
1819 	spin_unlock_bh(&adapter->adv_rss_lock);
1820 
1821 	if (process_rss) {
1822 		adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
1823 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
1824 				 (u8 *)rss_cfg, len);
1825 	} else {
1826 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1827 	}
1828 
1829 	kfree(rss_cfg);
1830 }
1831 
1832 /**
1833  * iavf_del_adv_rss_cfg
1834  * @adapter: the VF adapter structure
1835  *
1836  * Request that the PF delete RSS configuration as specified
1837  * by the user via ethtool.
1838  **/
1839 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
1840 {
1841 	struct virtchnl_rss_cfg *rss_cfg;
1842 	struct iavf_adv_rss *rss;
1843 	bool process_rss = false;
1844 	int len;
1845 
1846 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1847 		/* bail because we already have a command pending */
1848 		dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
1849 			adapter->current_op);
1850 		return;
1851 	}
1852 
1853 	len = sizeof(struct virtchnl_rss_cfg);
1854 	rss_cfg = kzalloc(len, GFP_KERNEL);
1855 	if (!rss_cfg)
1856 		return;
1857 
1858 	spin_lock_bh(&adapter->adv_rss_lock);
1859 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1860 		if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
1861 			process_rss = true;
1862 			rss->state = IAVF_ADV_RSS_DEL_PENDING;
1863 			memcpy(rss_cfg, &rss->cfg_msg, len);
1864 			break;
1865 		}
1866 	}
1867 	spin_unlock_bh(&adapter->adv_rss_lock);
1868 
1869 	if (process_rss) {
1870 		adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
1871 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
1872 				 (u8 *)rss_cfg, len);
1873 	} else {
1874 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1875 	}
1876 
1877 	kfree(rss_cfg);
1878 }
1879 
1880 /**
1881  * iavf_request_reset
1882  * @adapter: adapter structure
1883  *
1884  * Request that the PF reset this VF. No response is expected.
1885  **/
1886 int iavf_request_reset(struct iavf_adapter *adapter)
1887 {
1888 	int err;
1889 	/* Don't check CURRENT_OP - this is always higher priority */
1890 	err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1891 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1892 	return err;
1893 }
1894 
1895 /**
1896  * iavf_netdev_features_vlan_strip_set - update vlan strip status
1897  * @netdev: ptr to netdev being adjusted
1898  * @enable: enable or disable vlan strip
1899  *
1900  * Helper function to change vlan strip status in netdev->features.
1901  */
1902 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
1903 						const bool enable)
1904 {
1905 	if (enable)
1906 		netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1907 	else
1908 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1909 }
1910 
1911 /**
1912  * iavf_virtchnl_completion
1913  * @adapter: adapter structure
1914  * @v_opcode: opcode sent by PF
1915  * @v_retval: retval sent by PF
1916  * @msg: message sent by PF
1917  * @msglen: message length
1918  *
1919  * Asynchronous completion function for admin queue messages. Rather than busy
1920  * wait, we fire off our requests and assume that no errors will be returned.
1921  * This function handles the reply messages.
1922  **/
1923 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1924 			      enum virtchnl_ops v_opcode,
1925 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
1926 {
1927 	struct net_device *netdev = adapter->netdev;
1928 
1929 	if (v_opcode == VIRTCHNL_OP_EVENT) {
1930 		struct virtchnl_pf_event *vpe =
1931 			(struct virtchnl_pf_event *)msg;
1932 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1933 
1934 		switch (vpe->event) {
1935 		case VIRTCHNL_EVENT_LINK_CHANGE:
1936 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
1937 
1938 			/* we've already got the right link status, bail */
1939 			if (adapter->link_up == link_up)
1940 				break;
1941 
1942 			if (link_up) {
1943 				/* If we get link up message and start queues
1944 				 * before our queues are configured it will
1945 				 * trigger a TX hang. In that case, just ignore
1946 				 * the link status message,we'll get another one
1947 				 * after we enable queues and actually prepared
1948 				 * to send traffic.
1949 				 */
1950 				if (adapter->state != __IAVF_RUNNING)
1951 					break;
1952 
1953 				/* For ADq enabled VF, we reconfigure VSIs and
1954 				 * re-allocate queues. Hence wait till all
1955 				 * queues are enabled.
1956 				 */
1957 				if (adapter->flags &
1958 				    IAVF_FLAG_QUEUES_DISABLED)
1959 					break;
1960 			}
1961 
1962 			adapter->link_up = link_up;
1963 			if (link_up) {
1964 				netif_tx_start_all_queues(netdev);
1965 				netif_carrier_on(netdev);
1966 			} else {
1967 				netif_tx_stop_all_queues(netdev);
1968 				netif_carrier_off(netdev);
1969 			}
1970 			iavf_print_link_message(adapter);
1971 			break;
1972 		case VIRTCHNL_EVENT_RESET_IMPENDING:
1973 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
1974 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
1975 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1976 				iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
1977 			}
1978 			break;
1979 		default:
1980 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1981 				vpe->event);
1982 			break;
1983 		}
1984 		return;
1985 	}
1986 	if (v_retval) {
1987 		switch (v_opcode) {
1988 		case VIRTCHNL_OP_ADD_VLAN:
1989 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1990 				iavf_stat_str(&adapter->hw, v_retval));
1991 			break;
1992 		case VIRTCHNL_OP_ADD_ETH_ADDR:
1993 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1994 				iavf_stat_str(&adapter->hw, v_retval));
1995 			iavf_mac_add_reject(adapter);
1996 			/* restore administratively set MAC address */
1997 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1998 			wake_up(&adapter->vc_waitqueue);
1999 			break;
2000 		case VIRTCHNL_OP_DEL_VLAN:
2001 			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
2002 				iavf_stat_str(&adapter->hw, v_retval));
2003 			break;
2004 		case VIRTCHNL_OP_DEL_ETH_ADDR:
2005 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
2006 				iavf_stat_str(&adapter->hw, v_retval));
2007 			break;
2008 		case VIRTCHNL_OP_ENABLE_CHANNELS:
2009 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
2010 				iavf_stat_str(&adapter->hw, v_retval));
2011 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2012 			adapter->ch_config.state = __IAVF_TC_INVALID;
2013 			netdev_reset_tc(netdev);
2014 			netif_tx_start_all_queues(netdev);
2015 			break;
2016 		case VIRTCHNL_OP_DISABLE_CHANNELS:
2017 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
2018 				iavf_stat_str(&adapter->hw, v_retval));
2019 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2020 			adapter->ch_config.state = __IAVF_TC_RUNNING;
2021 			netif_tx_start_all_queues(netdev);
2022 			break;
2023 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2024 			struct iavf_cloud_filter *cf, *cftmp;
2025 
2026 			list_for_each_entry_safe(cf, cftmp,
2027 						 &adapter->cloud_filter_list,
2028 						 list) {
2029 				if (cf->state == __IAVF_CF_ADD_PENDING) {
2030 					cf->state = __IAVF_CF_INVALID;
2031 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
2032 						 iavf_stat_str(&adapter->hw,
2033 							       v_retval));
2034 					iavf_print_cloud_filter(adapter,
2035 								&cf->f);
2036 					list_del(&cf->list);
2037 					kfree(cf);
2038 					adapter->num_cloud_filters--;
2039 				}
2040 			}
2041 			}
2042 			break;
2043 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2044 			struct iavf_cloud_filter *cf;
2045 
2046 			list_for_each_entry(cf, &adapter->cloud_filter_list,
2047 					    list) {
2048 				if (cf->state == __IAVF_CF_DEL_PENDING) {
2049 					cf->state = __IAVF_CF_ACTIVE;
2050 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
2051 						 iavf_stat_str(&adapter->hw,
2052 							       v_retval));
2053 					iavf_print_cloud_filter(adapter,
2054 								&cf->f);
2055 				}
2056 			}
2057 			}
2058 			break;
2059 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2060 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
2061 
2062 			spin_lock_bh(&adapter->fdir_fltr_lock);
2063 			list_for_each_entry_safe(fdir, fdir_tmp,
2064 						 &adapter->fdir_list_head,
2065 						 list) {
2066 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2067 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
2068 						 iavf_stat_str(&adapter->hw,
2069 							       v_retval));
2070 					iavf_print_fdir_fltr(adapter, fdir);
2071 					if (msglen)
2072 						dev_err(&adapter->pdev->dev,
2073 							"%s\n", msg);
2074 					list_del(&fdir->list);
2075 					kfree(fdir);
2076 					adapter->fdir_active_fltr--;
2077 				}
2078 			}
2079 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2080 			}
2081 			break;
2082 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2083 			struct iavf_fdir_fltr *fdir;
2084 
2085 			spin_lock_bh(&adapter->fdir_fltr_lock);
2086 			list_for_each_entry(fdir, &adapter->fdir_list_head,
2087 					    list) {
2088 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2089 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2090 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2091 						 iavf_stat_str(&adapter->hw,
2092 							       v_retval));
2093 					iavf_print_fdir_fltr(adapter, fdir);
2094 				}
2095 			}
2096 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2097 			}
2098 			break;
2099 		case VIRTCHNL_OP_ADD_RSS_CFG: {
2100 			struct iavf_adv_rss *rss, *rss_tmp;
2101 
2102 			spin_lock_bh(&adapter->adv_rss_lock);
2103 			list_for_each_entry_safe(rss, rss_tmp,
2104 						 &adapter->adv_rss_list_head,
2105 						 list) {
2106 				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2107 					iavf_print_adv_rss_cfg(adapter, rss,
2108 							       "Failed to change the input set for",
2109 							       NULL);
2110 					list_del(&rss->list);
2111 					kfree(rss);
2112 				}
2113 			}
2114 			spin_unlock_bh(&adapter->adv_rss_lock);
2115 			}
2116 			break;
2117 		case VIRTCHNL_OP_DEL_RSS_CFG: {
2118 			struct iavf_adv_rss *rss;
2119 
2120 			spin_lock_bh(&adapter->adv_rss_lock);
2121 			list_for_each_entry(rss, &adapter->adv_rss_list_head,
2122 					    list) {
2123 				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2124 					rss->state = IAVF_ADV_RSS_ACTIVE;
2125 					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2126 						iavf_stat_str(&adapter->hw,
2127 							      v_retval));
2128 				}
2129 			}
2130 			spin_unlock_bh(&adapter->adv_rss_lock);
2131 			}
2132 			break;
2133 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2134 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2135 			/* Vlan stripping could not be enabled by ethtool.
2136 			 * Disable it in netdev->features.
2137 			 */
2138 			iavf_netdev_features_vlan_strip_set(netdev, false);
2139 			break;
2140 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2141 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2142 			/* Vlan stripping could not be disabled by ethtool.
2143 			 * Enable it in netdev->features.
2144 			 */
2145 			iavf_netdev_features_vlan_strip_set(netdev, true);
2146 			break;
2147 		case VIRTCHNL_OP_ADD_VLAN_V2:
2148 			iavf_vlan_add_reject(adapter);
2149 			dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2150 				 iavf_stat_str(&adapter->hw, v_retval));
2151 			break;
2152 		default:
2153 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2154 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
2155 				v_opcode);
2156 		}
2157 	}
2158 	switch (v_opcode) {
2159 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2160 		if (!v_retval)
2161 			iavf_mac_add_ok(adapter);
2162 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
2163 			if (!ether_addr_equal(netdev->dev_addr,
2164 					      adapter->hw.mac.addr)) {
2165 				netif_addr_lock_bh(netdev);
2166 				eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2167 				netif_addr_unlock_bh(netdev);
2168 			}
2169 		wake_up(&adapter->vc_waitqueue);
2170 		break;
2171 	case VIRTCHNL_OP_GET_STATS: {
2172 		struct iavf_eth_stats *stats =
2173 			(struct iavf_eth_stats *)msg;
2174 		netdev->stats.rx_packets = stats->rx_unicast +
2175 					   stats->rx_multicast +
2176 					   stats->rx_broadcast;
2177 		netdev->stats.tx_packets = stats->tx_unicast +
2178 					   stats->tx_multicast +
2179 					   stats->tx_broadcast;
2180 		netdev->stats.rx_bytes = stats->rx_bytes;
2181 		netdev->stats.tx_bytes = stats->tx_bytes;
2182 		netdev->stats.tx_errors = stats->tx_errors;
2183 		netdev->stats.rx_dropped = stats->rx_discards;
2184 		netdev->stats.tx_dropped = stats->tx_discards;
2185 		adapter->current_stats = *stats;
2186 		}
2187 		break;
2188 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
2189 		u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
2190 
2191 		memcpy(adapter->vf_res, msg, min(msglen, len));
2192 		iavf_validate_num_queues(adapter);
2193 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
2194 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
2195 			/* restore current mac address */
2196 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2197 		} else {
2198 			netif_addr_lock_bh(netdev);
2199 			/* refresh current mac address if changed */
2200 			ether_addr_copy(netdev->perm_addr,
2201 					adapter->hw.mac.addr);
2202 			netif_addr_unlock_bh(netdev);
2203 		}
2204 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2205 		iavf_add_filter(adapter, adapter->hw.mac.addr);
2206 
2207 		if (VLAN_ALLOWED(adapter)) {
2208 			if (!list_empty(&adapter->vlan_filter_list)) {
2209 				struct iavf_vlan_filter *vlf;
2210 
2211 				/* re-add all VLAN filters over virtchnl */
2212 				list_for_each_entry(vlf,
2213 						    &adapter->vlan_filter_list,
2214 						    list)
2215 					vlf->state = IAVF_VLAN_ADD;
2216 
2217 				adapter->aq_required |=
2218 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2219 			}
2220 		}
2221 
2222 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2223 
2224 		iavf_parse_vf_resource_msg(adapter);
2225 
2226 		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2227 		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2228 		 * configuration
2229 		 */
2230 		if (VLAN_V2_ALLOWED(adapter))
2231 			break;
2232 		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2233 		 * wasn't successfully negotiated with the PF
2234 		 */
2235 		}
2236 		fallthrough;
2237 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2238 		struct iavf_mac_filter *f;
2239 		bool was_mac_changed;
2240 		u64 aq_required = 0;
2241 
2242 		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2243 			memcpy(&adapter->vlan_v2_caps, msg,
2244 			       min_t(u16, msglen,
2245 				     sizeof(adapter->vlan_v2_caps)));
2246 
2247 		iavf_process_config(adapter);
2248 		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
2249 		iavf_schedule_finish_config(adapter);
2250 
2251 		iavf_set_queue_vlan_tag_loc(adapter);
2252 
2253 		was_mac_changed = !ether_addr_equal(netdev->dev_addr,
2254 						    adapter->hw.mac.addr);
2255 
2256 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2257 
2258 		/* re-add all MAC filters */
2259 		list_for_each_entry(f, &adapter->mac_filter_list, list) {
2260 			if (was_mac_changed &&
2261 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2262 				ether_addr_copy(f->macaddr,
2263 						adapter->hw.mac.addr);
2264 
2265 			f->is_new_mac = true;
2266 			f->add = true;
2267 			f->add_handled = false;
2268 			f->remove = false;
2269 		}
2270 
2271 		/* re-add all VLAN filters */
2272 		if (VLAN_FILTERING_ALLOWED(adapter)) {
2273 			struct iavf_vlan_filter *vlf;
2274 
2275 			if (!list_empty(&adapter->vlan_filter_list)) {
2276 				list_for_each_entry(vlf,
2277 						    &adapter->vlan_filter_list,
2278 						    list)
2279 					vlf->state = IAVF_VLAN_ADD;
2280 
2281 				aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2282 			}
2283 		}
2284 
2285 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2286 
2287 		netif_addr_lock_bh(netdev);
2288 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2289 		netif_addr_unlock_bh(netdev);
2290 
2291 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
2292 			aq_required;
2293 		}
2294 		break;
2295 	case VIRTCHNL_OP_ENABLE_QUEUES:
2296 		/* enable transmits */
2297 		iavf_irq_enable(adapter, true);
2298 		wake_up(&adapter->reset_waitqueue);
2299 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2300 		break;
2301 	case VIRTCHNL_OP_DISABLE_QUEUES:
2302 		iavf_free_all_tx_resources(adapter);
2303 		iavf_free_all_rx_resources(adapter);
2304 		if (adapter->state == __IAVF_DOWN_PENDING) {
2305 			iavf_change_state(adapter, __IAVF_DOWN);
2306 			wake_up(&adapter->down_waitqueue);
2307 		}
2308 		break;
2309 	case VIRTCHNL_OP_VERSION:
2310 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2311 		/* Don't display an error if we get these out of sequence.
2312 		 * If the firmware needed to get kicked, we'll get these and
2313 		 * it's no problem.
2314 		 */
2315 		if (v_opcode != adapter->current_op)
2316 			return;
2317 		break;
2318 	case VIRTCHNL_OP_RDMA:
2319 		/* Gobble zero-length replies from the PF. They indicate that
2320 		 * a previous message was received OK, and the client doesn't
2321 		 * care about that.
2322 		 */
2323 		if (msglen && CLIENT_ENABLED(adapter))
2324 			iavf_notify_client_message(&adapter->vsi, msg, msglen);
2325 		break;
2326 
2327 	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
2328 		adapter->client_pending &=
2329 				~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP));
2330 		break;
2331 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
2332 		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
2333 
2334 		if (msglen == sizeof(*vrh))
2335 			adapter->hena = vrh->hena;
2336 		else
2337 			dev_warn(&adapter->pdev->dev,
2338 				 "Invalid message %d from PF\n", v_opcode);
2339 		}
2340 		break;
2341 	case VIRTCHNL_OP_REQUEST_QUEUES: {
2342 		struct virtchnl_vf_res_request *vfres =
2343 			(struct virtchnl_vf_res_request *)msg;
2344 
2345 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
2346 			dev_info(&adapter->pdev->dev,
2347 				 "Requested %d queues, PF can support %d\n",
2348 				 adapter->num_req_queues,
2349 				 vfres->num_queue_pairs);
2350 			adapter->num_req_queues = 0;
2351 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2352 		}
2353 		}
2354 		break;
2355 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2356 		struct iavf_cloud_filter *cf;
2357 
2358 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2359 			if (cf->state == __IAVF_CF_ADD_PENDING)
2360 				cf->state = __IAVF_CF_ACTIVE;
2361 		}
2362 		}
2363 		break;
2364 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2365 		struct iavf_cloud_filter *cf, *cftmp;
2366 
2367 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2368 					 list) {
2369 			if (cf->state == __IAVF_CF_DEL_PENDING) {
2370 				cf->state = __IAVF_CF_INVALID;
2371 				list_del(&cf->list);
2372 				kfree(cf);
2373 				adapter->num_cloud_filters--;
2374 			}
2375 		}
2376 		}
2377 		break;
2378 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2379 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2380 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2381 
2382 		spin_lock_bh(&adapter->fdir_fltr_lock);
2383 		list_for_each_entry_safe(fdir, fdir_tmp,
2384 					 &adapter->fdir_list_head,
2385 					 list) {
2386 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2387 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2388 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2389 						 fdir->loc);
2390 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2391 					fdir->flow_id = add_fltr->flow_id;
2392 				} else {
2393 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2394 						 add_fltr->status);
2395 					iavf_print_fdir_fltr(adapter, fdir);
2396 					list_del(&fdir->list);
2397 					kfree(fdir);
2398 					adapter->fdir_active_fltr--;
2399 				}
2400 			}
2401 		}
2402 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2403 		}
2404 		break;
2405 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2406 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2407 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2408 
2409 		spin_lock_bh(&adapter->fdir_fltr_lock);
2410 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2411 					 list) {
2412 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2413 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2414 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2415 						 fdir->loc);
2416 					list_del(&fdir->list);
2417 					kfree(fdir);
2418 					adapter->fdir_active_fltr--;
2419 				} else {
2420 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2421 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2422 						 del_fltr->status);
2423 					iavf_print_fdir_fltr(adapter, fdir);
2424 				}
2425 			}
2426 		}
2427 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2428 		}
2429 		break;
2430 	case VIRTCHNL_OP_ADD_RSS_CFG: {
2431 		struct iavf_adv_rss *rss;
2432 
2433 		spin_lock_bh(&adapter->adv_rss_lock);
2434 		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2435 			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2436 				iavf_print_adv_rss_cfg(adapter, rss,
2437 						       "Input set change for",
2438 						       "successful");
2439 				rss->state = IAVF_ADV_RSS_ACTIVE;
2440 			}
2441 		}
2442 		spin_unlock_bh(&adapter->adv_rss_lock);
2443 		}
2444 		break;
2445 	case VIRTCHNL_OP_DEL_RSS_CFG: {
2446 		struct iavf_adv_rss *rss, *rss_tmp;
2447 
2448 		spin_lock_bh(&adapter->adv_rss_lock);
2449 		list_for_each_entry_safe(rss, rss_tmp,
2450 					 &adapter->adv_rss_list_head, list) {
2451 			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2452 				list_del(&rss->list);
2453 				kfree(rss);
2454 			}
2455 		}
2456 		spin_unlock_bh(&adapter->adv_rss_lock);
2457 		}
2458 		break;
2459 	case VIRTCHNL_OP_ADD_VLAN_V2: {
2460 		struct iavf_vlan_filter *f;
2461 
2462 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2463 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2464 			if (f->state == IAVF_VLAN_IS_NEW)
2465 				f->state = IAVF_VLAN_ACTIVE;
2466 		}
2467 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2468 		}
2469 		break;
2470 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2471 		/* PF enabled vlan strip on this VF.
2472 		 * Update netdev->features if needed to be in sync with ethtool.
2473 		 */
2474 		if (!v_retval)
2475 			iavf_netdev_features_vlan_strip_set(netdev, true);
2476 		break;
2477 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2478 		/* PF disabled vlan strip on this VF.
2479 		 * Update netdev->features if needed to be in sync with ethtool.
2480 		 */
2481 		if (!v_retval)
2482 			iavf_netdev_features_vlan_strip_set(netdev, false);
2483 		break;
2484 	default:
2485 		if (adapter->current_op && (v_opcode != adapter->current_op))
2486 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2487 				 adapter->current_op, v_opcode);
2488 		break;
2489 	} /* switch v_opcode */
2490 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2491 }
2492