xref: /linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/net/intel/libie/rx.h>
5 
6 #include "iavf.h"
7 #include "iavf_ptp.h"
8 #include "iavf_prototype.h"
9 
10 /**
11  * iavf_send_pf_msg
12  * @adapter: adapter structure
13  * @op: virtual channel opcode
14  * @msg: pointer to message buffer
15  * @len: message length
16  *
17  * Send message to PF and print status if failure.
18  **/
iavf_send_pf_msg(struct iavf_adapter * adapter,enum virtchnl_ops op,u8 * msg,u16 len)19 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
20 			    enum virtchnl_ops op, u8 *msg, u16 len)
21 {
22 	struct iavf_hw *hw = &adapter->hw;
23 	enum iavf_status status;
24 
25 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
26 		return 0; /* nothing to see here, move along */
27 
28 	status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
29 	if (status)
30 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
31 			op, iavf_stat_str(hw, status),
32 			libie_aq_str(hw->aq.asq_last_status));
33 	return iavf_status_to_errno(status);
34 }
35 
36 /**
37  * iavf_send_api_ver
38  * @adapter: adapter structure
39  *
40  * Send API version admin queue message to the PF. The reply is not checked
41  * in this function. Returns 0 if the message was successfully
42  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
43  **/
iavf_send_api_ver(struct iavf_adapter * adapter)44 int iavf_send_api_ver(struct iavf_adapter *adapter)
45 {
46 	struct virtchnl_version_info vvi;
47 
48 	vvi.major = VIRTCHNL_VERSION_MAJOR;
49 	vvi.minor = VIRTCHNL_VERSION_MINOR;
50 
51 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
52 				sizeof(vvi));
53 }
54 
55 /**
56  * iavf_poll_virtchnl_msg
57  * @hw: HW configuration structure
58  * @event: event to populate on success
59  * @op_to_poll: requested virtchnl op to poll for
60  *
61  * Initialize poll for virtchnl msg matching the requested_op. Returns 0
62  * if a message of the correct opcode is in the queue or an error code
63  * if no message matching the op code is waiting and other failures.
64  */
65 static int
iavf_poll_virtchnl_msg(struct iavf_hw * hw,struct iavf_arq_event_info * event,enum virtchnl_ops op_to_poll)66 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
67 		       enum virtchnl_ops op_to_poll)
68 {
69 	enum virtchnl_ops received_op;
70 	enum iavf_status status;
71 	u32 v_retval;
72 
73 	while (1) {
74 		/* When the AQ is empty, iavf_clean_arq_element will return
75 		 * nonzero and this loop will terminate.
76 		 */
77 		status = iavf_clean_arq_element(hw, event, NULL);
78 		if (status != IAVF_SUCCESS)
79 			return iavf_status_to_errno(status);
80 		received_op =
81 		    (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
82 
83 		if (received_op == VIRTCHNL_OP_EVENT) {
84 			struct iavf_adapter *adapter = hw->back;
85 			struct virtchnl_pf_event *vpe =
86 				(struct virtchnl_pf_event *)event->msg_buf;
87 
88 			if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
89 				continue;
90 
91 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
92 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
93 				iavf_schedule_reset(adapter,
94 						    IAVF_FLAG_RESET_PENDING);
95 
96 			return -EIO;
97 		}
98 
99 		if (op_to_poll == received_op)
100 			break;
101 	}
102 
103 	v_retval = le32_to_cpu(event->desc.cookie_low);
104 	return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
105 }
106 
107 /**
108  * iavf_verify_api_ver
109  * @adapter: adapter structure
110  *
111  * Compare API versions with the PF. Must be called after admin queue is
112  * initialized. Returns 0 if API versions match, -EIO if they do not,
113  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
114  * from the firmware are propagated.
115  **/
iavf_verify_api_ver(struct iavf_adapter * adapter)116 int iavf_verify_api_ver(struct iavf_adapter *adapter)
117 {
118 	struct iavf_arq_event_info event;
119 	int err;
120 
121 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
122 	event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
123 	if (!event.msg_buf)
124 		return -ENOMEM;
125 
126 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
127 	if (!err) {
128 		struct virtchnl_version_info *pf_vvi =
129 			(struct virtchnl_version_info *)event.msg_buf;
130 		adapter->pf_version = *pf_vvi;
131 
132 		if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
133 		    (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
134 		     pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
135 			err = -EIO;
136 	}
137 
138 	kfree(event.msg_buf);
139 
140 	return err;
141 }
142 
143 /**
144  * iavf_send_vf_config_msg
145  * @adapter: adapter structure
146  *
147  * Send VF configuration request admin queue message to the PF. The reply
148  * is not checked in this function. Returns 0 if the message was
149  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
150  **/
iavf_send_vf_config_msg(struct iavf_adapter * adapter)151 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
152 {
153 	u32 caps;
154 
155 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
156 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
157 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
158 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
159 	       VIRTCHNL_VF_OFFLOAD_VLAN |
160 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
161 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
162 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
163 	       VIRTCHNL_VF_OFFLOAD_TC_U32 |
164 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
165 	       VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
166 	       VIRTCHNL_VF_OFFLOAD_CRC |
167 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
168 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
169 	       VIRTCHNL_VF_CAP_PTP |
170 	       VIRTCHNL_VF_OFFLOAD_ADQ |
171 	       VIRTCHNL_VF_OFFLOAD_USO |
172 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
173 	       VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
174 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
175 	       VIRTCHNL_VF_OFFLOAD_QOS;
176 
177 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
178 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
179 	if (PF_IS_V11(adapter))
180 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
181 					(u8 *)&caps, sizeof(caps));
182 	else
183 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
184 					NULL, 0);
185 }
186 
iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter * adapter)187 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
188 {
189 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
190 
191 	if (!VLAN_V2_ALLOWED(adapter))
192 		return -EOPNOTSUPP;
193 
194 	adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
195 
196 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
197 				NULL, 0);
198 }
199 
iavf_send_vf_supported_rxdids_msg(struct iavf_adapter * adapter)200 int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
201 {
202 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
203 
204 	if (!IAVF_RXDID_ALLOWED(adapter))
205 		return -EOPNOTSUPP;
206 
207 	adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
208 
209 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
210 				NULL, 0);
211 }
212 
213 /**
214  * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
215  * @adapter: private adapter structure
216  *
217  * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
218  * capabilities available to this device. This includes the following
219  * potential access:
220  *
221  * * READ_PHC - access to read the PTP hardware clock time
222  * * RX_TSTAMP - access to request Rx timestamps on all received packets
223  *
224  * The PF will reply with the same opcode a filled out copy of the
225  * virtchnl_ptp_caps structure which defines the specifics of which features
226  * are accessible to this device.
227  *
228  * Return: 0 if success, error code otherwise.
229  */
iavf_send_vf_ptp_caps_msg(struct iavf_adapter * adapter)230 int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
231 {
232 	struct virtchnl_ptp_caps hw_caps = {
233 		.caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
234 			VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
235 	};
236 
237 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
238 
239 	if (!IAVF_PTP_ALLOWED(adapter))
240 		return -EOPNOTSUPP;
241 
242 	adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
243 
244 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
245 				(u8 *)&hw_caps, sizeof(hw_caps));
246 }
247 
248 /**
249  * iavf_validate_num_queues
250  * @adapter: adapter structure
251  *
252  * Validate that the number of queues the PF has sent in
253  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
254  **/
iavf_validate_num_queues(struct iavf_adapter * adapter)255 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
256 {
257 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
258 		struct virtchnl_vsi_resource *vsi_res;
259 		int i;
260 
261 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
262 			 adapter->vf_res->num_queue_pairs,
263 			 IAVF_MAX_REQ_QUEUES);
264 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
265 			 IAVF_MAX_REQ_QUEUES);
266 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
267 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
268 			vsi_res = &adapter->vf_res->vsi_res[i];
269 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
270 		}
271 	}
272 }
273 
274 /**
275  * iavf_get_vf_config
276  * @adapter: private adapter structure
277  *
278  * Get VF configuration from PF and populate hw structure. Must be called after
279  * admin queue is initialized. Busy waits until response is received from PF,
280  * with maximum timeout. Response from PF is returned in the buffer for further
281  * processing by the caller.
282  **/
iavf_get_vf_config(struct iavf_adapter * adapter)283 int iavf_get_vf_config(struct iavf_adapter *adapter)
284 {
285 	struct iavf_hw *hw = &adapter->hw;
286 	struct iavf_arq_event_info event;
287 	u16 len;
288 	int err;
289 
290 	len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
291 	event.buf_len = len;
292 	event.msg_buf = kzalloc(len, GFP_KERNEL);
293 	if (!event.msg_buf)
294 		return -ENOMEM;
295 
296 	err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
297 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
298 
299 	/* some PFs send more queues than we should have so validate that
300 	 * we aren't getting too many queues
301 	 */
302 	if (!err)
303 		iavf_validate_num_queues(adapter);
304 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
305 
306 	kfree(event.msg_buf);
307 
308 	return err;
309 }
310 
iavf_get_vf_vlan_v2_caps(struct iavf_adapter * adapter)311 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
312 {
313 	struct iavf_arq_event_info event;
314 	int err;
315 	u16 len;
316 
317 	len = sizeof(struct virtchnl_vlan_caps);
318 	event.buf_len = len;
319 	event.msg_buf = kzalloc(len, GFP_KERNEL);
320 	if (!event.msg_buf)
321 		return -ENOMEM;
322 
323 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
324 				     VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
325 	if (!err)
326 		memcpy(&adapter->vlan_v2_caps, event.msg_buf,
327 		       min(event.msg_len, len));
328 
329 	kfree(event.msg_buf);
330 
331 	return err;
332 }
333 
iavf_get_vf_supported_rxdids(struct iavf_adapter * adapter)334 int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
335 {
336 	struct iavf_arq_event_info event;
337 	u64 rxdids;
338 	int err;
339 
340 	event.msg_buf = (u8 *)&rxdids;
341 	event.buf_len = sizeof(rxdids);
342 
343 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
344 				     VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
345 	if (!err)
346 		adapter->supp_rxdids = rxdids;
347 
348 	return err;
349 }
350 
iavf_get_vf_ptp_caps(struct iavf_adapter * adapter)351 int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
352 {
353 	struct virtchnl_ptp_caps caps = {};
354 	struct iavf_arq_event_info event;
355 	int err;
356 
357 	event.msg_buf = (u8 *)&caps;
358 	event.buf_len = sizeof(caps);
359 
360 	err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
361 				     VIRTCHNL_OP_1588_PTP_GET_CAPS);
362 	if (!err)
363 		adapter->ptp.hw_caps = caps;
364 
365 	return err;
366 }
367 
368 /**
369  * iavf_configure_queues
370  * @adapter: adapter structure
371  *
372  * Request that the PF set up our (previously allocated) queues.
373  **/
iavf_configure_queues(struct iavf_adapter * adapter)374 void iavf_configure_queues(struct iavf_adapter *adapter)
375 {
376 	struct virtchnl_vsi_queue_config_info *vqci;
377 	int pairs = adapter->num_active_queues;
378 	struct virtchnl_queue_pair_info *vqpi;
379 	u32 i, max_frame;
380 	u8 rx_flags = 0;
381 	size_t len;
382 
383 	max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
384 	max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
385 
386 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
387 		/* bail because we already have a command pending */
388 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
389 			adapter->current_op);
390 		return;
391 	}
392 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
393 	len = virtchnl_struct_size(vqci, qpair, pairs);
394 	vqci = kzalloc(len, GFP_KERNEL);
395 	if (!vqci)
396 		return;
397 
398 	if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
399 		rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
400 
401 	vqci->vsi_id = adapter->vsi_res->vsi_id;
402 	vqci->num_queue_pairs = pairs;
403 	vqpi = vqci->qpair;
404 	/* Size check is not needed here - HW max is 16 queue pairs, and we
405 	 * can fit info for 31 of them into the AQ buffer before it overflows.
406 	 */
407 	for (i = 0; i < pairs; i++) {
408 		vqpi->txq.vsi_id = vqci->vsi_id;
409 		vqpi->txq.queue_id = i;
410 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
411 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
412 		vqpi->rxq.vsi_id = vqci->vsi_id;
413 		vqpi->rxq.queue_id = i;
414 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
415 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
416 		vqpi->rxq.max_pkt_size = max_frame;
417 		vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
418 		if (IAVF_RXDID_ALLOWED(adapter))
419 			vqpi->rxq.rxdid = adapter->rxdid;
420 		if (CRC_OFFLOAD_ALLOWED(adapter))
421 			vqpi->rxq.crc_disable = !!(adapter->netdev->features &
422 						   NETIF_F_RXFCS);
423 		vqpi->rxq.flags = rx_flags;
424 		vqpi++;
425 	}
426 
427 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
428 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
429 			 (u8 *)vqci, len);
430 	kfree(vqci);
431 }
432 
433 /**
434  * iavf_enable_queues
435  * @adapter: adapter structure
436  *
437  * Request that the PF enable all of our queues.
438  **/
iavf_enable_queues(struct iavf_adapter * adapter)439 void iavf_enable_queues(struct iavf_adapter *adapter)
440 {
441 	struct virtchnl_queue_select vqs;
442 
443 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
444 		/* bail because we already have a command pending */
445 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
446 			adapter->current_op);
447 		return;
448 	}
449 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
450 	vqs.vsi_id = adapter->vsi_res->vsi_id;
451 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
452 	vqs.rx_queues = vqs.tx_queues;
453 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
454 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
455 			 (u8 *)&vqs, sizeof(vqs));
456 }
457 
458 /**
459  * iavf_disable_queues
460  * @adapter: adapter structure
461  *
462  * Request that the PF disable all of our queues.
463  **/
iavf_disable_queues(struct iavf_adapter * adapter)464 void iavf_disable_queues(struct iavf_adapter *adapter)
465 {
466 	struct virtchnl_queue_select vqs;
467 
468 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
469 		/* bail because we already have a command pending */
470 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
471 			adapter->current_op);
472 		return;
473 	}
474 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
475 	vqs.vsi_id = adapter->vsi_res->vsi_id;
476 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
477 	vqs.rx_queues = vqs.tx_queues;
478 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
479 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
480 			 (u8 *)&vqs, sizeof(vqs));
481 }
482 
483 /**
484  * iavf_map_queues
485  * @adapter: adapter structure
486  *
487  * Request that the PF map queues to interrupt vectors. Misc causes, including
488  * admin queue, are always mapped to vector 0.
489  **/
iavf_map_queues(struct iavf_adapter * adapter)490 void iavf_map_queues(struct iavf_adapter *adapter)
491 {
492 	struct virtchnl_irq_map_info *vimi;
493 	struct virtchnl_vector_map *vecmap;
494 	struct iavf_q_vector *q_vector;
495 	int v_idx, q_vectors;
496 	size_t len;
497 
498 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
499 		/* bail because we already have a command pending */
500 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
501 			adapter->current_op);
502 		return;
503 	}
504 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
505 
506 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
507 
508 	len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
509 	vimi = kzalloc(len, GFP_KERNEL);
510 	if (!vimi)
511 		return;
512 
513 	vimi->num_vectors = adapter->num_msix_vectors;
514 	/* Queue vectors first */
515 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
516 		q_vector = &adapter->q_vectors[v_idx];
517 		vecmap = &vimi->vecmap[v_idx];
518 
519 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
520 		vecmap->vector_id = v_idx + NONQ_VECS;
521 		vecmap->txq_map = q_vector->ring_mask;
522 		vecmap->rxq_map = q_vector->ring_mask;
523 		vecmap->rxitr_idx = IAVF_RX_ITR;
524 		vecmap->txitr_idx = IAVF_TX_ITR;
525 	}
526 	/* Misc vector last - this is only for AdminQ messages */
527 	vecmap = &vimi->vecmap[v_idx];
528 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
529 	vecmap->vector_id = 0;
530 	vecmap->txq_map = 0;
531 	vecmap->rxq_map = 0;
532 
533 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
534 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
535 			 (u8 *)vimi, len);
536 	kfree(vimi);
537 }
538 
539 /**
540  * iavf_set_mac_addr_type - Set the correct request type from the filter type
541  * @virtchnl_ether_addr: pointer to requested list element
542  * @filter: pointer to requested filter
543  **/
544 static void
iavf_set_mac_addr_type(struct virtchnl_ether_addr * virtchnl_ether_addr,const struct iavf_mac_filter * filter)545 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
546 		       const struct iavf_mac_filter *filter)
547 {
548 	virtchnl_ether_addr->type = filter->is_primary ?
549 		VIRTCHNL_ETHER_ADDR_PRIMARY :
550 		VIRTCHNL_ETHER_ADDR_EXTRA;
551 }
552 
553 /**
554  * iavf_add_ether_addrs
555  * @adapter: adapter structure
556  *
557  * Request that the PF add one or more addresses to our filters.
558  **/
iavf_add_ether_addrs(struct iavf_adapter * adapter)559 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
560 {
561 	struct virtchnl_ether_addr_list *veal;
562 	struct iavf_mac_filter *f;
563 	int i = 0, count = 0;
564 	bool more = false;
565 	size_t len;
566 
567 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
568 		/* bail because we already have a command pending */
569 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
570 			adapter->current_op);
571 		return;
572 	}
573 
574 	spin_lock_bh(&adapter->mac_vlan_list_lock);
575 
576 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
577 		if (f->add)
578 			count++;
579 	}
580 	if (!count) {
581 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
582 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
583 		return;
584 	}
585 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
586 
587 	len = virtchnl_struct_size(veal, list, count);
588 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
589 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
590 		while (len > IAVF_MAX_AQ_BUF_SIZE)
591 			len = virtchnl_struct_size(veal, list, --count);
592 		more = true;
593 	}
594 
595 	veal = kzalloc(len, GFP_ATOMIC);
596 	if (!veal) {
597 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
598 		return;
599 	}
600 
601 	veal->vsi_id = adapter->vsi_res->vsi_id;
602 	veal->num_elements = count;
603 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
604 		if (f->add) {
605 			ether_addr_copy(veal->list[i].addr, f->macaddr);
606 			iavf_set_mac_addr_type(&veal->list[i], f);
607 			i++;
608 			f->add = false;
609 			if (i == count)
610 				break;
611 		}
612 	}
613 	if (!more)
614 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
615 
616 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
617 
618 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
619 	kfree(veal);
620 }
621 
622 /**
623  * iavf_del_ether_addrs
624  * @adapter: adapter structure
625  *
626  * Request that the PF remove one or more addresses from our filters.
627  **/
iavf_del_ether_addrs(struct iavf_adapter * adapter)628 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
629 {
630 	struct virtchnl_ether_addr_list *veal;
631 	struct iavf_mac_filter *f, *ftmp;
632 	int i = 0, count = 0;
633 	bool more = false;
634 	size_t len;
635 
636 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
637 		/* bail because we already have a command pending */
638 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
639 			adapter->current_op);
640 		return;
641 	}
642 
643 	spin_lock_bh(&adapter->mac_vlan_list_lock);
644 
645 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
646 		if (f->remove)
647 			count++;
648 	}
649 	if (!count) {
650 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
651 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
652 		return;
653 	}
654 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
655 
656 	len = virtchnl_struct_size(veal, list, count);
657 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
658 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
659 		while (len > IAVF_MAX_AQ_BUF_SIZE)
660 			len = virtchnl_struct_size(veal, list, --count);
661 		more = true;
662 	}
663 	veal = kzalloc(len, GFP_ATOMIC);
664 	if (!veal) {
665 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
666 		return;
667 	}
668 
669 	veal->vsi_id = adapter->vsi_res->vsi_id;
670 	veal->num_elements = count;
671 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
672 		if (f->remove) {
673 			ether_addr_copy(veal->list[i].addr, f->macaddr);
674 			iavf_set_mac_addr_type(&veal->list[i], f);
675 			i++;
676 			list_del(&f->list);
677 			kfree(f);
678 			if (i == count)
679 				break;
680 		}
681 	}
682 	if (!more)
683 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
684 
685 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
686 
687 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
688 	kfree(veal);
689 }
690 
691 /**
692  * iavf_mac_add_ok
693  * @adapter: adapter structure
694  *
695  * Submit list of filters based on PF response.
696  **/
iavf_mac_add_ok(struct iavf_adapter * adapter)697 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
698 {
699 	struct iavf_mac_filter *f, *ftmp;
700 
701 	spin_lock_bh(&adapter->mac_vlan_list_lock);
702 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
703 		f->is_new_mac = false;
704 		if (!f->add && !f->add_handled)
705 			f->add_handled = true;
706 	}
707 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
708 }
709 
710 /**
711  * iavf_mac_add_reject
712  * @adapter: adapter structure
713  *
714  * Remove filters from list based on PF response.
715  **/
iavf_mac_add_reject(struct iavf_adapter * adapter)716 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
717 {
718 	struct net_device *netdev = adapter->netdev;
719 	struct iavf_mac_filter *f, *ftmp;
720 
721 	spin_lock_bh(&adapter->mac_vlan_list_lock);
722 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
723 		if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
724 			f->remove = false;
725 
726 		if (!f->add && !f->add_handled)
727 			f->add_handled = true;
728 
729 		if (f->is_new_mac) {
730 			list_del(&f->list);
731 			kfree(f);
732 		}
733 	}
734 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
735 }
736 
737 /**
738  * iavf_vlan_add_reject
739  * @adapter: adapter structure
740  *
741  * Remove VLAN filters from list based on PF response.
742  **/
iavf_vlan_add_reject(struct iavf_adapter * adapter)743 static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
744 {
745 	struct iavf_vlan_filter *f, *ftmp;
746 
747 	spin_lock_bh(&adapter->mac_vlan_list_lock);
748 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
749 		if (f->state == IAVF_VLAN_ADDING) {
750 			list_del(&f->list);
751 			kfree(f);
752 			adapter->num_vlan_filters--;
753 		}
754 	}
755 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
756 }
757 
758 /**
759  * iavf_add_vlans
760  * @adapter: adapter structure
761  *
762  * Request that the PF add one or more VLAN filters to our VSI.
763  **/
iavf_add_vlans(struct iavf_adapter * adapter)764 void iavf_add_vlans(struct iavf_adapter *adapter)
765 {
766 	int len, i = 0, count = 0;
767 	struct iavf_vlan_filter *f;
768 	bool more = false;
769 
770 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
771 		/* bail because we already have a command pending */
772 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
773 			adapter->current_op);
774 		return;
775 	}
776 
777 	spin_lock_bh(&adapter->mac_vlan_list_lock);
778 
779 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
780 		if (f->state == IAVF_VLAN_ADD)
781 			count++;
782 	}
783 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
784 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
785 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
786 		return;
787 	}
788 
789 	if (VLAN_ALLOWED(adapter)) {
790 		struct virtchnl_vlan_filter_list *vvfl;
791 
792 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
793 
794 		len = virtchnl_struct_size(vvfl, vlan_id, count);
795 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
796 			dev_info(&adapter->pdev->dev,
797 				 "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n");
798 			while (len > IAVF_MAX_AQ_BUF_SIZE)
799 				len = virtchnl_struct_size(vvfl, vlan_id,
800 							   --count);
801 			more = true;
802 		}
803 		vvfl = kzalloc(len, GFP_ATOMIC);
804 		if (!vvfl) {
805 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
806 			return;
807 		}
808 
809 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
810 		vvfl->num_elements = count;
811 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
812 			if (f->state == IAVF_VLAN_ADD) {
813 				vvfl->vlan_id[i] = f->vlan.vid;
814 				i++;
815 				f->state = IAVF_VLAN_ADDING;
816 				if (i == count)
817 					break;
818 			}
819 		}
820 		if (!more)
821 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
822 
823 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
824 
825 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
826 		kfree(vvfl);
827 	} else {
828 		u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
829 		u16 current_vlans = iavf_get_num_vlans_added(adapter);
830 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
831 
832 		adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
833 
834 		if ((count + current_vlans) > max_vlans &&
835 		    current_vlans < max_vlans) {
836 			count = max_vlans - iavf_get_num_vlans_added(adapter);
837 			more = true;
838 		}
839 
840 		len = virtchnl_struct_size(vvfl_v2, filters, count);
841 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
842 			dev_info(&adapter->pdev->dev,
843 				 "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n");
844 			while (len > IAVF_MAX_AQ_BUF_SIZE)
845 				len = virtchnl_struct_size(vvfl_v2, filters,
846 							   --count);
847 			more = true;
848 		}
849 
850 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
851 		if (!vvfl_v2) {
852 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
853 			return;
854 		}
855 
856 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
857 		vvfl_v2->num_elements = count;
858 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
859 			if (f->state == IAVF_VLAN_ADD) {
860 				struct virtchnl_vlan_supported_caps *filtering_support =
861 					&adapter->vlan_v2_caps.filtering.filtering_support;
862 				struct virtchnl_vlan *vlan;
863 
864 				if (i == count)
865 					break;
866 
867 				/* give priority over outer if it's enabled */
868 				if (filtering_support->outer)
869 					vlan = &vvfl_v2->filters[i].outer;
870 				else
871 					vlan = &vvfl_v2->filters[i].inner;
872 
873 				vlan->tci = f->vlan.vid;
874 				vlan->tpid = f->vlan.tpid;
875 
876 				i++;
877 				f->state = IAVF_VLAN_ADDING;
878 			}
879 		}
880 
881 		if (!more)
882 			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
883 
884 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
885 
886 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
887 				 (u8 *)vvfl_v2, len);
888 		kfree(vvfl_v2);
889 	}
890 }
891 
892 /**
893  * iavf_del_vlans
894  * @adapter: adapter structure
895  *
896  * Request that the PF remove one or more VLAN filters from our VSI.
897  **/
iavf_del_vlans(struct iavf_adapter * adapter)898 void iavf_del_vlans(struct iavf_adapter *adapter)
899 {
900 	struct iavf_vlan_filter *f, *ftmp;
901 	int len, i = 0, count = 0;
902 	bool more = false;
903 
904 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
905 		/* bail because we already have a command pending */
906 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
907 			adapter->current_op);
908 		return;
909 	}
910 
911 	spin_lock_bh(&adapter->mac_vlan_list_lock);
912 
913 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
914 		if (f->state == IAVF_VLAN_REMOVE &&
915 		    !VLAN_FILTERING_ALLOWED(adapter)) {
916 			list_del(&f->list);
917 			kfree(f);
918 			adapter->num_vlan_filters--;
919 		} else if (f->state == IAVF_VLAN_REMOVE) {
920 			count++;
921 		}
922 	}
923 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
924 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
925 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
926 		return;
927 	}
928 
929 	if (VLAN_ALLOWED(adapter)) {
930 		struct virtchnl_vlan_filter_list *vvfl;
931 
932 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
933 
934 		len = virtchnl_struct_size(vvfl, vlan_id, count);
935 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
936 			dev_info(&adapter->pdev->dev,
937 				 "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n");
938 			while (len > IAVF_MAX_AQ_BUF_SIZE)
939 				len = virtchnl_struct_size(vvfl, vlan_id,
940 							   --count);
941 			more = true;
942 		}
943 		vvfl = kzalloc(len, GFP_ATOMIC);
944 		if (!vvfl) {
945 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
946 			return;
947 		}
948 
949 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
950 		vvfl->num_elements = count;
951 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
952 			if (f->state == IAVF_VLAN_REMOVE) {
953 				vvfl->vlan_id[i] = f->vlan.vid;
954 				f->state = IAVF_VLAN_REMOVING;
955 				i++;
956 				if (i == count)
957 					break;
958 			}
959 		}
960 
961 		if (!more)
962 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
963 
964 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
965 
966 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
967 		kfree(vvfl);
968 	} else {
969 		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
970 
971 		adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
972 
973 		len = virtchnl_struct_size(vvfl_v2, filters, count);
974 		if (len > IAVF_MAX_AQ_BUF_SIZE) {
975 			dev_info(&adapter->pdev->dev,
976 				 "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n");
977 			while (len > IAVF_MAX_AQ_BUF_SIZE)
978 				len = virtchnl_struct_size(vvfl_v2, filters,
979 							   --count);
980 			more = true;
981 		}
982 
983 		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
984 		if (!vvfl_v2) {
985 			spin_unlock_bh(&adapter->mac_vlan_list_lock);
986 			return;
987 		}
988 
989 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
990 		vvfl_v2->num_elements = count;
991 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
992 			if (f->state == IAVF_VLAN_REMOVE) {
993 				struct virtchnl_vlan_supported_caps *filtering_support =
994 					&adapter->vlan_v2_caps.filtering.filtering_support;
995 				struct virtchnl_vlan *vlan;
996 
997 				/* give priority over outer if it's enabled */
998 				if (filtering_support->outer)
999 					vlan = &vvfl_v2->filters[i].outer;
1000 				else
1001 					vlan = &vvfl_v2->filters[i].inner;
1002 
1003 				vlan->tci = f->vlan.vid;
1004 				vlan->tpid = f->vlan.tpid;
1005 
1006 				f->state = IAVF_VLAN_REMOVING;
1007 				i++;
1008 				if (i == count)
1009 					break;
1010 			}
1011 		}
1012 
1013 		if (!more)
1014 			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1015 
1016 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
1017 
1018 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
1019 				 (u8 *)vvfl_v2, len);
1020 		kfree(vvfl_v2);
1021 	}
1022 }
1023 
1024 /**
1025  * iavf_set_promiscuous
1026  * @adapter: adapter structure
1027  *
1028  * Request that the PF enable promiscuous mode for our VSI.
1029  **/
iavf_set_promiscuous(struct iavf_adapter * adapter)1030 void iavf_set_promiscuous(struct iavf_adapter *adapter)
1031 {
1032 	struct net_device *netdev = adapter->netdev;
1033 	struct virtchnl_promisc_info vpi;
1034 	unsigned int flags;
1035 
1036 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1037 		/* bail because we already have a command pending */
1038 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
1039 			adapter->current_op);
1040 		return;
1041 	}
1042 
1043 	/* prevent changes to promiscuous flags */
1044 	spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
1045 
1046 	/* sanity check to prevent duplicate AQ calls */
1047 	if (!iavf_promiscuous_mode_changed(adapter)) {
1048 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1049 		dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
1050 		/* allow changes to promiscuous flags */
1051 		spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1052 		return;
1053 	}
1054 
1055 	/* there are 2 bits, but only 3 states */
1056 	if (!(netdev->flags & IFF_PROMISC) &&
1057 	    netdev->flags & IFF_ALLMULTI) {
1058 		/* State 1  - only multicast promiscuous mode enabled
1059 		 * - !IFF_PROMISC && IFF_ALLMULTI
1060 		 */
1061 		flags = FLAG_VF_MULTICAST_PROMISC;
1062 		adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
1063 		adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
1064 		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
1065 	} else if (!(netdev->flags & IFF_PROMISC) &&
1066 		   !(netdev->flags & IFF_ALLMULTI)) {
1067 		/* State 2 - unicast/multicast promiscuous mode disabled
1068 		 * - !IFF_PROMISC && !IFF_ALLMULTI
1069 		 */
1070 		flags = 0;
1071 		adapter->current_netdev_promisc_flags &=
1072 			~(IFF_PROMISC | IFF_ALLMULTI);
1073 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
1074 	} else {
1075 		/* State 3 - unicast/multicast promiscuous mode enabled
1076 		 * - IFF_PROMISC && IFF_ALLMULTI
1077 		 * - IFF_PROMISC && !IFF_ALLMULTI
1078 		 */
1079 		flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
1080 		adapter->current_netdev_promisc_flags |= IFF_PROMISC;
1081 		if (netdev->flags & IFF_ALLMULTI)
1082 			adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
1083 		else
1084 			adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
1085 
1086 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
1087 	}
1088 
1089 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1090 
1091 	/* allow changes to promiscuous flags */
1092 	spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1093 
1094 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1095 	vpi.vsi_id = adapter->vsi_res->vsi_id;
1096 	vpi.flags = flags;
1097 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1098 			 (u8 *)&vpi, sizeof(vpi));
1099 }
1100 
1101 /**
1102  * iavf_request_stats
1103  * @adapter: adapter structure
1104  *
1105  * Request VSI statistics from PF.
1106  **/
iavf_request_stats(struct iavf_adapter * adapter)1107 void iavf_request_stats(struct iavf_adapter *adapter)
1108 {
1109 	struct virtchnl_queue_select vqs;
1110 
1111 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1112 		/* no error message, this isn't crucial */
1113 		return;
1114 	}
1115 
1116 	adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
1117 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
1118 	vqs.vsi_id = adapter->vsi_res->vsi_id;
1119 	/* queue maps are ignored for this message - only the vsi is used */
1120 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
1121 			     sizeof(vqs)))
1122 		/* if the request failed, don't lock out others */
1123 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1124 }
1125 
1126 /**
1127  * iavf_get_rss_hashcfg
1128  * @adapter: adapter structure
1129  *
1130  * Request RSS Hash enable bits from PF
1131  **/
iavf_get_rss_hashcfg(struct iavf_adapter * adapter)1132 void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
1133 {
1134 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1135 		/* bail because we already have a command pending */
1136 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
1137 			adapter->current_op);
1138 		return;
1139 	}
1140 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
1141 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
1142 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
1143 }
1144 
1145 /**
1146  * iavf_set_rss_hashcfg
1147  * @adapter: adapter structure
1148  *
1149  * Request the PF to set our RSS hash capabilities
1150  **/
iavf_set_rss_hashcfg(struct iavf_adapter * adapter)1151 void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
1152 {
1153 	struct virtchnl_rss_hashcfg vrh;
1154 
1155 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1156 		/* bail because we already have a command pending */
1157 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1158 			adapter->current_op);
1159 		return;
1160 	}
1161 	vrh.hashcfg = adapter->rss_hashcfg;
1162 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
1163 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
1164 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh,
1165 			 sizeof(vrh));
1166 }
1167 
1168 /**
1169  * iavf_set_rss_key
1170  * @adapter: adapter structure
1171  *
1172  * Request the PF to set our RSS hash key
1173  **/
iavf_set_rss_key(struct iavf_adapter * adapter)1174 void iavf_set_rss_key(struct iavf_adapter *adapter)
1175 {
1176 	struct virtchnl_rss_key *vrk;
1177 	int len;
1178 
1179 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1180 		/* bail because we already have a command pending */
1181 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1182 			adapter->current_op);
1183 		return;
1184 	}
1185 	len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
1186 	vrk = kzalloc(len, GFP_KERNEL);
1187 	if (!vrk)
1188 		return;
1189 	vrk->vsi_id = adapter->vsi.id;
1190 	vrk->key_len = adapter->rss_key_size;
1191 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1192 
1193 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1194 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1195 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
1196 	kfree(vrk);
1197 }
1198 
1199 /**
1200  * iavf_set_rss_lut
1201  * @adapter: adapter structure
1202  *
1203  * Request the PF to set our RSS lookup table
1204  **/
iavf_set_rss_lut(struct iavf_adapter * adapter)1205 void iavf_set_rss_lut(struct iavf_adapter *adapter)
1206 {
1207 	struct virtchnl_rss_lut *vrl;
1208 	int len;
1209 
1210 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1211 		/* bail because we already have a command pending */
1212 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1213 			adapter->current_op);
1214 		return;
1215 	}
1216 	len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
1217 	vrl = kzalloc(len, GFP_KERNEL);
1218 	if (!vrl)
1219 		return;
1220 	vrl->vsi_id = adapter->vsi.id;
1221 	vrl->lut_entries = adapter->rss_lut_size;
1222 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1223 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1224 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1225 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
1226 	kfree(vrl);
1227 }
1228 
1229 /**
1230  * iavf_set_rss_hfunc
1231  * @adapter: adapter structure
1232  *
1233  * Request the PF to set our RSS Hash function
1234  **/
iavf_set_rss_hfunc(struct iavf_adapter * adapter)1235 void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
1236 {
1237 	struct virtchnl_rss_hfunc *vrh;
1238 	int len = sizeof(*vrh);
1239 
1240 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1241 		/* bail because we already have a command pending */
1242 		dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
1243 			adapter->current_op);
1244 		return;
1245 	}
1246 	vrh = kzalloc(len, GFP_KERNEL);
1247 	if (!vrh)
1248 		return;
1249 	vrh->vsi_id = adapter->vsi.id;
1250 	vrh->rss_algorithm = adapter->hfunc;
1251 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
1252 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
1253 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
1254 	kfree(vrh);
1255 }
1256 
1257 /**
1258  * iavf_enable_vlan_stripping
1259  * @adapter: adapter structure
1260  *
1261  * Request VLAN header stripping to be enabled
1262  **/
iavf_enable_vlan_stripping(struct iavf_adapter * adapter)1263 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1264 {
1265 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1266 		/* bail because we already have a command pending */
1267 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1268 			adapter->current_op);
1269 		return;
1270 	}
1271 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1272 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1273 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
1274 }
1275 
1276 /**
1277  * iavf_disable_vlan_stripping
1278  * @adapter: adapter structure
1279  *
1280  * Request VLAN header stripping to be disabled
1281  **/
iavf_disable_vlan_stripping(struct iavf_adapter * adapter)1282 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1283 {
1284 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1285 		/* bail because we already have a command pending */
1286 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1287 			adapter->current_op);
1288 		return;
1289 	}
1290 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1291 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1292 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
1293 }
1294 
1295 /**
1296  * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1297  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1298  */
iavf_tpid_to_vc_ethertype(u16 tpid)1299 static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1300 {
1301 	switch (tpid) {
1302 	case ETH_P_8021Q:
1303 		return VIRTCHNL_VLAN_ETHERTYPE_8100;
1304 	case ETH_P_8021AD:
1305 		return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1313  * @adapter: adapter structure
1314  * @msg: message structure used for updating offloads over virtchnl to update
1315  * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1316  * @offload_op: opcode used to determine which support structure to check
1317  */
1318 static int
iavf_set_vc_offload_ethertype(struct iavf_adapter * adapter,struct virtchnl_vlan_setting * msg,u16 tpid,enum virtchnl_ops offload_op)1319 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1320 			      struct virtchnl_vlan_setting *msg, u16 tpid,
1321 			      enum virtchnl_ops offload_op)
1322 {
1323 	struct virtchnl_vlan_supported_caps *offload_support;
1324 	u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1325 
1326 	/* reference the correct offload support structure */
1327 	switch (offload_op) {
1328 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1329 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1330 		offload_support =
1331 			&adapter->vlan_v2_caps.offloads.stripping_support;
1332 		break;
1333 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1334 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1335 		offload_support =
1336 			&adapter->vlan_v2_caps.offloads.insertion_support;
1337 		break;
1338 	default:
1339 		dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1340 			offload_op);
1341 		return -EINVAL;
1342 	}
1343 
1344 	/* make sure ethertype is supported */
1345 	if (offload_support->outer & vc_ethertype &&
1346 	    offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1347 		msg->outer_ethertype_setting = vc_ethertype;
1348 	} else if (offload_support->inner & vc_ethertype &&
1349 		   offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1350 		msg->inner_ethertype_setting = vc_ethertype;
1351 	} else {
1352 		dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1353 			offload_op, tpid);
1354 		return -EINVAL;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 /**
1361  * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1362  * @adapter: adapter structure
1363  * @tpid: VLAN TPID
1364  * @offload_op: opcode used to determine which AQ required bit to clear
1365  */
1366 static void
iavf_clear_offload_v2_aq_required(struct iavf_adapter * adapter,u16 tpid,enum virtchnl_ops offload_op)1367 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1368 				  enum virtchnl_ops offload_op)
1369 {
1370 	switch (offload_op) {
1371 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1372 		if (tpid == ETH_P_8021Q)
1373 			adapter->aq_required &=
1374 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1375 		else if (tpid == ETH_P_8021AD)
1376 			adapter->aq_required &=
1377 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1378 		break;
1379 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1380 		if (tpid == ETH_P_8021Q)
1381 			adapter->aq_required &=
1382 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1383 		else if (tpid == ETH_P_8021AD)
1384 			adapter->aq_required &=
1385 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1386 		break;
1387 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1388 		if (tpid == ETH_P_8021Q)
1389 			adapter->aq_required &=
1390 				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1391 		else if (tpid == ETH_P_8021AD)
1392 			adapter->aq_required &=
1393 				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1394 		break;
1395 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1396 		if (tpid == ETH_P_8021Q)
1397 			adapter->aq_required &=
1398 				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1399 		else if (tpid == ETH_P_8021AD)
1400 			adapter->aq_required &=
1401 				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1402 		break;
1403 	default:
1404 		dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1405 			offload_op);
1406 	}
1407 }
1408 
1409 /**
1410  * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1411  * @adapter: adapter structure
1412  * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1413  * @offload_op: offload_op used to make the request over virtchnl
1414  */
1415 static void
iavf_send_vlan_offload_v2(struct iavf_adapter * adapter,u16 tpid,enum virtchnl_ops offload_op)1416 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1417 			  enum virtchnl_ops offload_op)
1418 {
1419 	struct virtchnl_vlan_setting *msg;
1420 	int len = sizeof(*msg);
1421 
1422 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1423 		/* bail because we already have a command pending */
1424 		dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1425 			offload_op, adapter->current_op);
1426 		return;
1427 	}
1428 
1429 	adapter->current_op = offload_op;
1430 
1431 	msg = kzalloc(len, GFP_KERNEL);
1432 	if (!msg)
1433 		return;
1434 
1435 	msg->vport_id = adapter->vsi_res->vsi_id;
1436 
1437 	/* always clear to prevent unsupported and endless requests */
1438 	iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1439 
1440 	/* only send valid offload requests */
1441 	if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1442 		iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
1443 	else
1444 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1445 
1446 	kfree(msg);
1447 }
1448 
1449 /**
1450  * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1451  * @adapter: adapter structure
1452  * @tpid: VLAN TPID used to enable VLAN stripping
1453  */
iavf_enable_vlan_stripping_v2(struct iavf_adapter * adapter,u16 tpid)1454 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1455 {
1456 	iavf_send_vlan_offload_v2(adapter, tpid,
1457 				  VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1458 }
1459 
1460 /**
1461  * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1462  * @adapter: adapter structure
1463  * @tpid: VLAN TPID used to disable VLAN stripping
1464  */
iavf_disable_vlan_stripping_v2(struct iavf_adapter * adapter,u16 tpid)1465 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1466 {
1467 	iavf_send_vlan_offload_v2(adapter, tpid,
1468 				  VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1469 }
1470 
1471 /**
1472  * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1473  * @adapter: adapter structure
1474  * @tpid: VLAN TPID used to enable VLAN insertion
1475  */
iavf_enable_vlan_insertion_v2(struct iavf_adapter * adapter,u16 tpid)1476 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1477 {
1478 	iavf_send_vlan_offload_v2(adapter, tpid,
1479 				  VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1480 }
1481 
1482 /**
1483  * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1484  * @adapter: adapter structure
1485  * @tpid: VLAN TPID used to disable VLAN insertion
1486  */
iavf_disable_vlan_insertion_v2(struct iavf_adapter * adapter,u16 tpid)1487 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1488 {
1489 	iavf_send_vlan_offload_v2(adapter, tpid,
1490 				  VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1491 }
1492 
1493 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1494 /**
1495  * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
1496  * @adapter: adapter private structure
1497  *
1498  * De-queue one PTP command request and send the command message to the PF.
1499  * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
1500  */
iavf_virtchnl_send_ptp_cmd(struct iavf_adapter * adapter)1501 void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
1502 {
1503 	struct iavf_ptp_aq_cmd *cmd;
1504 	int err;
1505 
1506 	if (!adapter->ptp.clock) {
1507 		/* This shouldn't be possible to hit, since no messages should
1508 		 * be queued if PTP is not initialized.
1509 		 */
1510 		pci_err(adapter->pdev, "PTP is not initialized\n");
1511 		adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1512 		return;
1513 	}
1514 
1515 	mutex_lock(&adapter->ptp.aq_cmd_lock);
1516 	cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
1517 				       struct iavf_ptp_aq_cmd, list);
1518 	if (!cmd) {
1519 		/* no further PTP messages to send */
1520 		adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1521 		goto out_unlock;
1522 	}
1523 
1524 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1525 		/* bail because we already have a command pending */
1526 		pci_err(adapter->pdev,
1527 			"Cannot send PTP command %d, command %d pending\n",
1528 			cmd->v_opcode, adapter->current_op);
1529 		goto out_unlock;
1530 	}
1531 
1532 	err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
1533 	if (!err) {
1534 		/* Command was sent without errors, so we can remove it from
1535 		 * the list and discard it.
1536 		 */
1537 		list_del(&cmd->list);
1538 		kfree(cmd);
1539 	} else {
1540 		/* We failed to send the command, try again next cycle */
1541 		pci_err(adapter->pdev, "Failed to send PTP command %d\n",
1542 			cmd->v_opcode);
1543 	}
1544 
1545 	if (list_empty(&adapter->ptp.aq_cmds))
1546 		/* no further PTP messages to send */
1547 		adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1548 
1549 out_unlock:
1550 	mutex_unlock(&adapter->ptp.aq_cmd_lock);
1551 }
1552 #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
1553 
1554 /**
1555  * iavf_print_link_message - print link up or down
1556  * @adapter: adapter structure
1557  *
1558  * Log a message telling the world of our wonderous link status
1559  */
iavf_print_link_message(struct iavf_adapter * adapter)1560 static void iavf_print_link_message(struct iavf_adapter *adapter)
1561 {
1562 	struct net_device *netdev = adapter->netdev;
1563 	int link_speed_mbps;
1564 	char *speed;
1565 
1566 	if (!adapter->link_up) {
1567 		netdev_info(netdev, "NIC Link is Down\n");
1568 		return;
1569 	}
1570 
1571 	if (ADV_LINK_SUPPORT(adapter)) {
1572 		link_speed_mbps = adapter->link_speed_mbps;
1573 		goto print_link_msg;
1574 	}
1575 
1576 	switch (adapter->link_speed) {
1577 	case VIRTCHNL_LINK_SPEED_40GB:
1578 		link_speed_mbps = SPEED_40000;
1579 		break;
1580 	case VIRTCHNL_LINK_SPEED_25GB:
1581 		link_speed_mbps = SPEED_25000;
1582 		break;
1583 	case VIRTCHNL_LINK_SPEED_20GB:
1584 		link_speed_mbps = SPEED_20000;
1585 		break;
1586 	case VIRTCHNL_LINK_SPEED_10GB:
1587 		link_speed_mbps = SPEED_10000;
1588 		break;
1589 	case VIRTCHNL_LINK_SPEED_5GB:
1590 		link_speed_mbps = SPEED_5000;
1591 		break;
1592 	case VIRTCHNL_LINK_SPEED_2_5GB:
1593 		link_speed_mbps = SPEED_2500;
1594 		break;
1595 	case VIRTCHNL_LINK_SPEED_1GB:
1596 		link_speed_mbps = SPEED_1000;
1597 		break;
1598 	case VIRTCHNL_LINK_SPEED_100MB:
1599 		link_speed_mbps = SPEED_100;
1600 		break;
1601 	default:
1602 		link_speed_mbps = SPEED_UNKNOWN;
1603 		break;
1604 	}
1605 
1606 print_link_msg:
1607 	if (link_speed_mbps > SPEED_1000) {
1608 		if (link_speed_mbps == SPEED_2500) {
1609 			speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps");
1610 		} else {
1611 			/* convert to Gbps inline */
1612 			speed = kasprintf(GFP_KERNEL, "%d Gbps",
1613 					  link_speed_mbps / 1000);
1614 		}
1615 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
1616 		speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps");
1617 	} else {
1618 		speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps);
1619 	}
1620 
1621 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1622 	kfree(speed);
1623 }
1624 
1625 /**
1626  * iavf_get_vpe_link_status
1627  * @adapter: adapter structure
1628  * @vpe: virtchnl_pf_event structure
1629  *
1630  * Helper function for determining the link status
1631  **/
1632 static bool
iavf_get_vpe_link_status(struct iavf_adapter * adapter,struct virtchnl_pf_event * vpe)1633 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1634 			 struct virtchnl_pf_event *vpe)
1635 {
1636 	if (ADV_LINK_SUPPORT(adapter))
1637 		return vpe->event_data.link_event_adv.link_status;
1638 	else
1639 		return vpe->event_data.link_event.link_status;
1640 }
1641 
1642 /**
1643  * iavf_set_adapter_link_speed_from_vpe
1644  * @adapter: adapter structure for which we are setting the link speed
1645  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1646  *
1647  * Helper function for setting iavf_adapter link speed
1648  **/
1649 static void
iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter * adapter,struct virtchnl_pf_event * vpe)1650 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1651 				     struct virtchnl_pf_event *vpe)
1652 {
1653 	if (ADV_LINK_SUPPORT(adapter))
1654 		adapter->link_speed_mbps =
1655 			vpe->event_data.link_event_adv.link_speed;
1656 	else
1657 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1658 }
1659 
1660 /**
1661  * iavf_get_qos_caps - get qos caps support
1662  * @adapter: iavf adapter struct instance
1663  *
1664  * This function requests PF for Supported QoS Caps.
1665  */
iavf_get_qos_caps(struct iavf_adapter * adapter)1666 void iavf_get_qos_caps(struct iavf_adapter *adapter)
1667 {
1668 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1669 		/* bail because we already have a command pending */
1670 		dev_err(&adapter->pdev->dev,
1671 			"Cannot get qos caps, command %d pending\n",
1672 			adapter->current_op);
1673 		return;
1674 	}
1675 
1676 	adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS;
1677 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS;
1678 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0);
1679 }
1680 
1681 /**
1682  * iavf_set_quanta_size - set quanta size of queue chunk
1683  * @adapter: iavf adapter struct instance
1684  * @quanta_size: quanta size in bytes
1685  * @queue_index: starting index of queue chunk
1686  * @num_queues: number of queues in the queue chunk
1687  *
1688  * This function requests PF to set quanta size of queue chunk
1689  * starting at queue_index.
1690  */
1691 static void
iavf_set_quanta_size(struct iavf_adapter * adapter,u16 quanta_size,u16 queue_index,u16 num_queues)1692 iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size,
1693 		     u16 queue_index, u16 num_queues)
1694 {
1695 	struct virtchnl_quanta_cfg quanta_cfg;
1696 
1697 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1698 		/* bail because we already have a command pending */
1699 		dev_err(&adapter->pdev->dev,
1700 			"Cannot set queue quanta size, command %d pending\n",
1701 			adapter->current_op);
1702 		return;
1703 	}
1704 
1705 	adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA;
1706 	quanta_cfg.quanta_size = quanta_size;
1707 	quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
1708 	quanta_cfg.queue_select.start_queue_id = queue_index;
1709 	quanta_cfg.queue_select.num_queues = num_queues;
1710 	adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
1711 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA,
1712 			 (u8 *)&quanta_cfg, sizeof(quanta_cfg));
1713 }
1714 
1715 /**
1716  * iavf_cfg_queues_quanta_size - configure quanta size of queues
1717  * @adapter: adapter structure
1718  *
1719  * Request that the PF configure quanta size of allocated queues.
1720  **/
iavf_cfg_queues_quanta_size(struct iavf_adapter * adapter)1721 void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter)
1722 {
1723 	int quanta_size = IAVF_DEFAULT_QUANTA_SIZE;
1724 
1725 	/* Set Queue Quanta Size to default */
1726 	iavf_set_quanta_size(adapter, quanta_size, 0,
1727 			     adapter->num_active_queues);
1728 }
1729 
1730 /**
1731  * iavf_cfg_queues_bw - configure bandwidth of allocated queues
1732  * @adapter: iavf adapter structure instance
1733  *
1734  * This function requests PF to configure queue bandwidth of allocated queues
1735  */
iavf_cfg_queues_bw(struct iavf_adapter * adapter)1736 void iavf_cfg_queues_bw(struct iavf_adapter *adapter)
1737 {
1738 	struct virtchnl_queues_bw_cfg *qs_bw_cfg;
1739 	struct net_shaper *q_shaper;
1740 	int qs_to_update = 0;
1741 	int i, inx = 0;
1742 	size_t len;
1743 
1744 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1745 		/* bail because we already have a command pending */
1746 		dev_err(&adapter->pdev->dev,
1747 			"Cannot set tc queue bw, command %d pending\n",
1748 			adapter->current_op);
1749 		return;
1750 	}
1751 
1752 	for (i = 0; i < adapter->num_active_queues; i++) {
1753 		if (adapter->tx_rings[i].q_shaper_update)
1754 			qs_to_update++;
1755 	}
1756 	len = struct_size(qs_bw_cfg, cfg, qs_to_update);
1757 	qs_bw_cfg = kzalloc(len, GFP_KERNEL);
1758 	if (!qs_bw_cfg)
1759 		return;
1760 
1761 	qs_bw_cfg->vsi_id = adapter->vsi.id;
1762 	qs_bw_cfg->num_queues = qs_to_update;
1763 
1764 	for (i = 0; i < adapter->num_active_queues; i++) {
1765 		struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1766 
1767 		q_shaper = &tx_ring->q_shaper;
1768 		if (tx_ring->q_shaper_update) {
1769 			qs_bw_cfg->cfg[inx].queue_id = i;
1770 			qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max;
1771 			qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min;
1772 			qs_bw_cfg->cfg[inx].tc = 0;
1773 			inx++;
1774 		}
1775 	}
1776 
1777 	adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW;
1778 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
1779 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW,
1780 			 (u8 *)qs_bw_cfg, len);
1781 	kfree(qs_bw_cfg);
1782 }
1783 
1784 /**
1785  * iavf_enable_channels
1786  * @adapter: adapter structure
1787  *
1788  * Request that the PF enable channels as specified by
1789  * the user via tc tool.
1790  **/
iavf_enable_channels(struct iavf_adapter * adapter)1791 void iavf_enable_channels(struct iavf_adapter *adapter)
1792 {
1793 	struct virtchnl_tc_info *vti = NULL;
1794 	size_t len;
1795 	int i;
1796 
1797 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1798 		/* bail because we already have a command pending */
1799 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1800 			adapter->current_op);
1801 		return;
1802 	}
1803 
1804 	len = virtchnl_struct_size(vti, list, adapter->num_tc);
1805 	vti = kzalloc(len, GFP_KERNEL);
1806 	if (!vti)
1807 		return;
1808 	vti->num_tc = adapter->num_tc;
1809 	for (i = 0; i < vti->num_tc; i++) {
1810 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1811 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1812 		vti->list[i].pad = 0;
1813 		vti->list[i].max_tx_rate =
1814 				adapter->ch_config.ch_info[i].max_tx_rate;
1815 	}
1816 
1817 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1818 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1819 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1820 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1821 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1822 	kfree(vti);
1823 }
1824 
1825 /**
1826  * iavf_disable_channels
1827  * @adapter: adapter structure
1828  *
1829  * Request that the PF disable channels that are configured
1830  **/
iavf_disable_channels(struct iavf_adapter * adapter)1831 void iavf_disable_channels(struct iavf_adapter *adapter)
1832 {
1833 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1834 		/* bail because we already have a command pending */
1835 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1836 			adapter->current_op);
1837 		return;
1838 	}
1839 
1840 	adapter->ch_config.state = __IAVF_TC_INVALID;
1841 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1842 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1843 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1844 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1845 }
1846 
1847 /**
1848  * iavf_print_cloud_filter
1849  * @adapter: adapter structure
1850  * @f: cloud filter to print
1851  *
1852  * Print the cloud filter
1853  **/
iavf_print_cloud_filter(struct iavf_adapter * adapter,struct virtchnl_filter * f)1854 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1855 				    struct virtchnl_filter *f)
1856 {
1857 	switch (f->flow_type) {
1858 	case VIRTCHNL_TCP_V4_FLOW:
1859 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1860 			 &f->data.tcp_spec.dst_mac,
1861 			 &f->data.tcp_spec.src_mac,
1862 			 ntohs(f->data.tcp_spec.vlan_id),
1863 			 &f->data.tcp_spec.dst_ip[0],
1864 			 &f->data.tcp_spec.src_ip[0],
1865 			 ntohs(f->data.tcp_spec.dst_port),
1866 			 ntohs(f->data.tcp_spec.src_port));
1867 		break;
1868 	case VIRTCHNL_TCP_V6_FLOW:
1869 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1870 			 &f->data.tcp_spec.dst_mac,
1871 			 &f->data.tcp_spec.src_mac,
1872 			 ntohs(f->data.tcp_spec.vlan_id),
1873 			 &f->data.tcp_spec.dst_ip,
1874 			 &f->data.tcp_spec.src_ip,
1875 			 ntohs(f->data.tcp_spec.dst_port),
1876 			 ntohs(f->data.tcp_spec.src_port));
1877 		break;
1878 	}
1879 }
1880 
1881 /**
1882  * iavf_add_cloud_filter
1883  * @adapter: adapter structure
1884  *
1885  * Request that the PF add cloud filters as specified
1886  * by the user via tc tool.
1887  **/
iavf_add_cloud_filter(struct iavf_adapter * adapter)1888 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1889 {
1890 	struct iavf_cloud_filter *cf;
1891 	struct virtchnl_filter *f;
1892 	int len = 0, count = 0;
1893 
1894 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1895 		/* bail because we already have a command pending */
1896 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1897 			adapter->current_op);
1898 		return;
1899 	}
1900 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1901 		if (cf->add) {
1902 			count++;
1903 			break;
1904 		}
1905 	}
1906 	if (!count) {
1907 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1908 		return;
1909 	}
1910 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1911 
1912 	len = sizeof(struct virtchnl_filter);
1913 	f = kzalloc(len, GFP_KERNEL);
1914 	if (!f)
1915 		return;
1916 
1917 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1918 		if (cf->add) {
1919 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1920 			cf->add = false;
1921 			cf->state = __IAVF_CF_ADD_PENDING;
1922 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1923 					 (u8 *)f, len);
1924 		}
1925 	}
1926 	kfree(f);
1927 }
1928 
1929 /**
1930  * iavf_del_cloud_filter
1931  * @adapter: adapter structure
1932  *
1933  * Request that the PF delete cloud filters as specified
1934  * by the user via tc tool.
1935  **/
iavf_del_cloud_filter(struct iavf_adapter * adapter)1936 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1937 {
1938 	struct iavf_cloud_filter *cf, *cftmp;
1939 	struct virtchnl_filter *f;
1940 	int len = 0, count = 0;
1941 
1942 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1943 		/* bail because we already have a command pending */
1944 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1945 			adapter->current_op);
1946 		return;
1947 	}
1948 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1949 		if (cf->del) {
1950 			count++;
1951 			break;
1952 		}
1953 	}
1954 	if (!count) {
1955 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1956 		return;
1957 	}
1958 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1959 
1960 	len = sizeof(struct virtchnl_filter);
1961 	f = kzalloc(len, GFP_KERNEL);
1962 	if (!f)
1963 		return;
1964 
1965 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1966 		if (cf->del) {
1967 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1968 			cf->del = false;
1969 			cf->state = __IAVF_CF_DEL_PENDING;
1970 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1971 					 (u8 *)f, len);
1972 		}
1973 	}
1974 	kfree(f);
1975 }
1976 
1977 /**
1978  * iavf_add_fdir_filter
1979  * @adapter: the VF adapter structure
1980  *
1981  * Request that the PF add Flow Director filters as specified
1982  * by the user via ethtool.
1983  **/
iavf_add_fdir_filter(struct iavf_adapter * adapter)1984 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1985 {
1986 	struct iavf_fdir_fltr *fdir;
1987 	struct virtchnl_fdir_add *f;
1988 	bool process_fltr = false;
1989 	int len;
1990 
1991 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1992 		/* bail because we already have a command pending */
1993 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1994 			adapter->current_op);
1995 		return;
1996 	}
1997 
1998 	len = sizeof(struct virtchnl_fdir_add);
1999 	f = kzalloc(len, GFP_KERNEL);
2000 	if (!f)
2001 		return;
2002 
2003 	spin_lock_bh(&adapter->fdir_fltr_lock);
2004 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
2005 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
2006 			process_fltr = true;
2007 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
2008 			memcpy(f, &fdir->vc_add_msg, len);
2009 			break;
2010 		}
2011 	}
2012 	spin_unlock_bh(&adapter->fdir_fltr_lock);
2013 
2014 	if (!process_fltr) {
2015 		/* prevent iavf_add_fdir_filter() from being called when there
2016 		 * are no filters to add
2017 		 */
2018 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
2019 		kfree(f);
2020 		return;
2021 	}
2022 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
2023 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
2024 	kfree(f);
2025 }
2026 
2027 /**
2028  * iavf_del_fdir_filter
2029  * @adapter: the VF adapter structure
2030  *
2031  * Request that the PF delete Flow Director filters as specified
2032  * by the user via ethtool.
2033  **/
iavf_del_fdir_filter(struct iavf_adapter * adapter)2034 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
2035 {
2036 	struct virtchnl_fdir_del f = {};
2037 	struct iavf_fdir_fltr *fdir;
2038 	bool process_fltr = false;
2039 	int len;
2040 
2041 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2042 		/* bail because we already have a command pending */
2043 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
2044 			adapter->current_op);
2045 		return;
2046 	}
2047 
2048 	len = sizeof(struct virtchnl_fdir_del);
2049 
2050 	spin_lock_bh(&adapter->fdir_fltr_lock);
2051 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
2052 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
2053 			process_fltr = true;
2054 			f.vsi_id = fdir->vc_add_msg.vsi_id;
2055 			f.flow_id = fdir->flow_id;
2056 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
2057 			break;
2058 		} else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
2059 			process_fltr = true;
2060 			f.vsi_id = fdir->vc_add_msg.vsi_id;
2061 			f.flow_id = fdir->flow_id;
2062 			fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
2063 			break;
2064 		}
2065 	}
2066 	spin_unlock_bh(&adapter->fdir_fltr_lock);
2067 
2068 	if (!process_fltr) {
2069 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
2070 		return;
2071 	}
2072 
2073 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
2074 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
2075 }
2076 
2077 /**
2078  * iavf_add_adv_rss_cfg
2079  * @adapter: the VF adapter structure
2080  *
2081  * Request that the PF add RSS configuration as specified
2082  * by the user via ethtool.
2083  **/
iavf_add_adv_rss_cfg(struct iavf_adapter * adapter)2084 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
2085 {
2086 	struct virtchnl_rss_cfg *rss_cfg;
2087 	struct iavf_adv_rss *rss;
2088 	bool process_rss = false;
2089 	int len;
2090 
2091 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2092 		/* bail because we already have a command pending */
2093 		dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
2094 			adapter->current_op);
2095 		return;
2096 	}
2097 
2098 	len = sizeof(struct virtchnl_rss_cfg);
2099 	rss_cfg = kzalloc(len, GFP_KERNEL);
2100 	if (!rss_cfg)
2101 		return;
2102 
2103 	spin_lock_bh(&adapter->adv_rss_lock);
2104 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2105 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
2106 			process_rss = true;
2107 			rss->state = IAVF_ADV_RSS_ADD_PENDING;
2108 			memcpy(rss_cfg, &rss->cfg_msg, len);
2109 			iavf_print_adv_rss_cfg(adapter, rss,
2110 					       "Input set change for",
2111 					       "is pending");
2112 			break;
2113 		}
2114 	}
2115 	spin_unlock_bh(&adapter->adv_rss_lock);
2116 
2117 	if (process_rss) {
2118 		adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
2119 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
2120 				 (u8 *)rss_cfg, len);
2121 	} else {
2122 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
2123 	}
2124 
2125 	kfree(rss_cfg);
2126 }
2127 
2128 /**
2129  * iavf_del_adv_rss_cfg
2130  * @adapter: the VF adapter structure
2131  *
2132  * Request that the PF delete RSS configuration as specified
2133  * by the user via ethtool.
2134  **/
iavf_del_adv_rss_cfg(struct iavf_adapter * adapter)2135 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
2136 {
2137 	struct virtchnl_rss_cfg *rss_cfg;
2138 	struct iavf_adv_rss *rss;
2139 	bool process_rss = false;
2140 	int len;
2141 
2142 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2143 		/* bail because we already have a command pending */
2144 		dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
2145 			adapter->current_op);
2146 		return;
2147 	}
2148 
2149 	len = sizeof(struct virtchnl_rss_cfg);
2150 	rss_cfg = kzalloc(len, GFP_KERNEL);
2151 	if (!rss_cfg)
2152 		return;
2153 
2154 	spin_lock_bh(&adapter->adv_rss_lock);
2155 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2156 		if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
2157 			process_rss = true;
2158 			rss->state = IAVF_ADV_RSS_DEL_PENDING;
2159 			memcpy(rss_cfg, &rss->cfg_msg, len);
2160 			break;
2161 		}
2162 	}
2163 	spin_unlock_bh(&adapter->adv_rss_lock);
2164 
2165 	if (process_rss) {
2166 		adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
2167 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
2168 				 (u8 *)rss_cfg, len);
2169 	} else {
2170 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
2171 	}
2172 
2173 	kfree(rss_cfg);
2174 }
2175 
2176 /**
2177  * iavf_request_reset
2178  * @adapter: adapter structure
2179  *
2180  * Request that the PF reset this VF. No response is expected.
2181  **/
iavf_request_reset(struct iavf_adapter * adapter)2182 int iavf_request_reset(struct iavf_adapter *adapter)
2183 {
2184 	int err;
2185 	/* Don't check CURRENT_OP - this is always higher priority */
2186 	err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
2187 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2188 	return err;
2189 }
2190 
2191 /**
2192  * iavf_netdev_features_vlan_strip_set - update vlan strip status
2193  * @netdev: ptr to netdev being adjusted
2194  * @enable: enable or disable vlan strip
2195  *
2196  * Helper function to change vlan strip status in netdev->features.
2197  */
iavf_netdev_features_vlan_strip_set(struct net_device * netdev,const bool enable)2198 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
2199 						const bool enable)
2200 {
2201 	if (enable)
2202 		netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2203 	else
2204 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2205 }
2206 
2207 /**
2208  * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
2209  * @adapter: private adapter structure
2210  *
2211  * Called after a reset to re-add all FDIR filters and delete some of them
2212  * if they were pending to be deleted.
2213  */
iavf_activate_fdir_filters(struct iavf_adapter * adapter)2214 static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
2215 {
2216 	struct iavf_fdir_fltr *f, *ftmp;
2217 	bool add_filters = false;
2218 
2219 	spin_lock_bh(&adapter->fdir_fltr_lock);
2220 	list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
2221 		if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
2222 		    f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
2223 		    f->state == IAVF_FDIR_FLTR_ACTIVE) {
2224 			/* All filters and requests have been removed in PF,
2225 			 * restore them
2226 			 */
2227 			f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
2228 			add_filters = true;
2229 		} else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
2230 			   f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2231 			/* Link down state, leave filters as inactive */
2232 			f->state = IAVF_FDIR_FLTR_INACTIVE;
2233 		} else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
2234 			   f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2235 			/* Delete filters that were pending to be deleted, the
2236 			 * list on PF is already cleared after a reset
2237 			 */
2238 			list_del(&f->list);
2239 			iavf_dec_fdir_active_fltr(adapter, f);
2240 			kfree(f);
2241 		}
2242 	}
2243 	spin_unlock_bh(&adapter->fdir_fltr_lock);
2244 
2245 	if (add_filters)
2246 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
2247 }
2248 
2249 /**
2250  * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
2251  * @adapter: private adapter structure
2252  * @data: the message from the PF
2253  * @len: length of the message from the PF
2254  *
2255  * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
2256  * is sent by the PF in response to the same op as a request from the VF.
2257  * Extract the 64bit nanoseconds time from the message and store it in
2258  * cached_phc_time. Then, notify any thread that is waiting for the update via
2259  * the wait queue.
2260  */
iavf_virtchnl_ptp_get_time(struct iavf_adapter * adapter,void * data,u16 len)2261 static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
2262 				       void *data, u16 len)
2263 {
2264 	struct virtchnl_phc_time *msg = data;
2265 
2266 	if (len != sizeof(*msg)) {
2267 		dev_err_once(&adapter->pdev->dev,
2268 			     "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
2269 			     len, sizeof(*msg));
2270 		return;
2271 	}
2272 
2273 	adapter->ptp.cached_phc_time = msg->time;
2274 	adapter->ptp.cached_phc_updated = jiffies;
2275 	adapter->ptp.phc_time_ready = true;
2276 
2277 	wake_up(&adapter->ptp.phc_time_waitqueue);
2278 }
2279 
2280 /**
2281  * iavf_virtchnl_completion
2282  * @adapter: adapter structure
2283  * @v_opcode: opcode sent by PF
2284  * @v_retval: retval sent by PF
2285  * @msg: message sent by PF
2286  * @msglen: message length
2287  *
2288  * Asynchronous completion function for admin queue messages. Rather than busy
2289  * wait, we fire off our requests and assume that no errors will be returned.
2290  * This function handles the reply messages.
2291  **/
iavf_virtchnl_completion(struct iavf_adapter * adapter,enum virtchnl_ops v_opcode,enum iavf_status v_retval,u8 * msg,u16 msglen)2292 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
2293 			      enum virtchnl_ops v_opcode,
2294 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
2295 {
2296 	struct net_device *netdev = adapter->netdev;
2297 
2298 	if (v_opcode == VIRTCHNL_OP_EVENT) {
2299 		struct virtchnl_pf_event *vpe =
2300 			(struct virtchnl_pf_event *)msg;
2301 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
2302 
2303 		switch (vpe->event) {
2304 		case VIRTCHNL_EVENT_LINK_CHANGE:
2305 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
2306 
2307 			/* we've already got the right link status, bail */
2308 			if (adapter->link_up == link_up)
2309 				break;
2310 
2311 			if (link_up) {
2312 				/* If we get link up message and start queues
2313 				 * before our queues are configured it will
2314 				 * trigger a TX hang. In that case, just ignore
2315 				 * the link status message,we'll get another one
2316 				 * after we enable queues and actually prepared
2317 				 * to send traffic.
2318 				 */
2319 				if (adapter->state != __IAVF_RUNNING)
2320 					break;
2321 
2322 				/* For ADq enabled VF, we reconfigure VSIs and
2323 				 * re-allocate queues. Hence wait till all
2324 				 * queues are enabled.
2325 				 */
2326 				if (adapter->flags &
2327 				    IAVF_FLAG_QUEUES_DISABLED)
2328 					break;
2329 			}
2330 
2331 			adapter->link_up = link_up;
2332 			if (link_up) {
2333 				netif_tx_start_all_queues(netdev);
2334 				netif_carrier_on(netdev);
2335 			} else {
2336 				netif_tx_stop_all_queues(netdev);
2337 				netif_carrier_off(netdev);
2338 			}
2339 			iavf_print_link_message(adapter);
2340 			break;
2341 		case VIRTCHNL_EVENT_RESET_IMPENDING:
2342 			dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
2343 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
2344 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
2345 				iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2346 			}
2347 			break;
2348 		default:
2349 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
2350 				vpe->event);
2351 			break;
2352 		}
2353 		return;
2354 	}
2355 	if (v_retval) {
2356 		switch (v_opcode) {
2357 		case VIRTCHNL_OP_ADD_VLAN:
2358 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2359 				iavf_stat_str(&adapter->hw, v_retval));
2360 			break;
2361 		case VIRTCHNL_OP_ADD_ETH_ADDR:
2362 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
2363 				iavf_stat_str(&adapter->hw, v_retval));
2364 			iavf_mac_add_reject(adapter);
2365 			/* restore administratively set MAC address */
2366 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2367 			wake_up(&adapter->vc_waitqueue);
2368 			break;
2369 		case VIRTCHNL_OP_DEL_ETH_ADDR:
2370 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
2371 				iavf_stat_str(&adapter->hw, v_retval));
2372 			break;
2373 		case VIRTCHNL_OP_ENABLE_CHANNELS:
2374 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
2375 				iavf_stat_str(&adapter->hw, v_retval));
2376 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2377 			adapter->ch_config.state = __IAVF_TC_INVALID;
2378 			netdev_reset_tc(netdev);
2379 			netif_tx_start_all_queues(netdev);
2380 			break;
2381 		case VIRTCHNL_OP_DISABLE_CHANNELS:
2382 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
2383 				iavf_stat_str(&adapter->hw, v_retval));
2384 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2385 			adapter->ch_config.state = __IAVF_TC_RUNNING;
2386 			netif_tx_start_all_queues(netdev);
2387 			break;
2388 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2389 			struct iavf_cloud_filter *cf, *cftmp;
2390 
2391 			list_for_each_entry_safe(cf, cftmp,
2392 						 &adapter->cloud_filter_list,
2393 						 list) {
2394 				if (cf->state == __IAVF_CF_ADD_PENDING) {
2395 					cf->state = __IAVF_CF_INVALID;
2396 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
2397 						 iavf_stat_str(&adapter->hw,
2398 							       v_retval));
2399 					iavf_print_cloud_filter(adapter,
2400 								&cf->f);
2401 					list_del(&cf->list);
2402 					kfree(cf);
2403 					adapter->num_cloud_filters--;
2404 				}
2405 			}
2406 			}
2407 			break;
2408 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2409 			struct iavf_cloud_filter *cf;
2410 
2411 			list_for_each_entry(cf, &adapter->cloud_filter_list,
2412 					    list) {
2413 				if (cf->state == __IAVF_CF_DEL_PENDING) {
2414 					cf->state = __IAVF_CF_ACTIVE;
2415 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
2416 						 iavf_stat_str(&adapter->hw,
2417 							       v_retval));
2418 					iavf_print_cloud_filter(adapter,
2419 								&cf->f);
2420 				}
2421 			}
2422 			}
2423 			break;
2424 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2425 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
2426 
2427 			spin_lock_bh(&adapter->fdir_fltr_lock);
2428 			list_for_each_entry_safe(fdir, fdir_tmp,
2429 						 &adapter->fdir_list_head,
2430 						 list) {
2431 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2432 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
2433 						 iavf_stat_str(&adapter->hw,
2434 							       v_retval));
2435 					iavf_print_fdir_fltr(adapter, fdir);
2436 					if (msglen)
2437 						dev_err(&adapter->pdev->dev,
2438 							"%s\n", msg);
2439 					list_del(&fdir->list);
2440 					iavf_dec_fdir_active_fltr(adapter, fdir);
2441 					kfree(fdir);
2442 				}
2443 			}
2444 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2445 			}
2446 			break;
2447 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2448 			struct iavf_fdir_fltr *fdir;
2449 
2450 			spin_lock_bh(&adapter->fdir_fltr_lock);
2451 			list_for_each_entry(fdir, &adapter->fdir_list_head,
2452 					    list) {
2453 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
2454 				    fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2455 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2456 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2457 						 iavf_stat_str(&adapter->hw,
2458 							       v_retval));
2459 					iavf_print_fdir_fltr(adapter, fdir);
2460 				}
2461 			}
2462 			spin_unlock_bh(&adapter->fdir_fltr_lock);
2463 			}
2464 			break;
2465 		case VIRTCHNL_OP_ADD_RSS_CFG: {
2466 			struct iavf_adv_rss *rss, *rss_tmp;
2467 
2468 			spin_lock_bh(&adapter->adv_rss_lock);
2469 			list_for_each_entry_safe(rss, rss_tmp,
2470 						 &adapter->adv_rss_list_head,
2471 						 list) {
2472 				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2473 					iavf_print_adv_rss_cfg(adapter, rss,
2474 							       "Failed to change the input set for",
2475 							       NULL);
2476 					list_del(&rss->list);
2477 					kfree(rss);
2478 				}
2479 			}
2480 			spin_unlock_bh(&adapter->adv_rss_lock);
2481 			}
2482 			break;
2483 		case VIRTCHNL_OP_DEL_RSS_CFG: {
2484 			struct iavf_adv_rss *rss;
2485 
2486 			spin_lock_bh(&adapter->adv_rss_lock);
2487 			list_for_each_entry(rss, &adapter->adv_rss_list_head,
2488 					    list) {
2489 				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2490 					rss->state = IAVF_ADV_RSS_ACTIVE;
2491 					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2492 						iavf_stat_str(&adapter->hw,
2493 							      v_retval));
2494 				}
2495 			}
2496 			spin_unlock_bh(&adapter->adv_rss_lock);
2497 			}
2498 			break;
2499 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2500 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2501 			/* Vlan stripping could not be enabled by ethtool.
2502 			 * Disable it in netdev->features.
2503 			 */
2504 			iavf_netdev_features_vlan_strip_set(netdev, false);
2505 			break;
2506 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2507 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2508 			/* Vlan stripping could not be disabled by ethtool.
2509 			 * Enable it in netdev->features.
2510 			 */
2511 			iavf_netdev_features_vlan_strip_set(netdev, true);
2512 			break;
2513 		case VIRTCHNL_OP_ADD_VLAN_V2:
2514 			iavf_vlan_add_reject(adapter);
2515 			dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2516 				 iavf_stat_str(&adapter->hw, v_retval));
2517 			break;
2518 		case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
2519 			dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
2520 				 iavf_stat_str(&adapter->hw, v_retval));
2521 
2522 			if (adapter->hfunc ==
2523 					VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
2524 				adapter->hfunc =
2525 					VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
2526 			else
2527 				adapter->hfunc =
2528 					VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
2529 
2530 			break;
2531 		case VIRTCHNL_OP_GET_QOS_CAPS:
2532 			dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n",
2533 				 iavf_stat_str(&adapter->hw, v_retval));
2534 			break;
2535 		case VIRTCHNL_OP_CONFIG_QUANTA:
2536 			dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n",
2537 				 iavf_stat_str(&adapter->hw, v_retval));
2538 			break;
2539 		case VIRTCHNL_OP_CONFIG_QUEUE_BW:
2540 			dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n",
2541 				 iavf_stat_str(&adapter->hw, v_retval));
2542 			break;
2543 		default:
2544 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2545 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
2546 				v_opcode);
2547 		}
2548 	}
2549 	switch (v_opcode) {
2550 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2551 		if (!v_retval)
2552 			iavf_mac_add_ok(adapter);
2553 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
2554 			if (!ether_addr_equal(netdev->dev_addr,
2555 					      adapter->hw.mac.addr)) {
2556 				netif_addr_lock_bh(netdev);
2557 				eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2558 				netif_addr_unlock_bh(netdev);
2559 			}
2560 		wake_up(&adapter->vc_waitqueue);
2561 		break;
2562 	case VIRTCHNL_OP_GET_STATS: {
2563 		struct iavf_eth_stats *stats =
2564 			(struct iavf_eth_stats *)msg;
2565 		netdev->stats.rx_packets = stats->rx_unicast +
2566 					   stats->rx_multicast +
2567 					   stats->rx_broadcast;
2568 		netdev->stats.tx_packets = stats->tx_unicast +
2569 					   stats->tx_multicast +
2570 					   stats->tx_broadcast;
2571 		netdev->stats.rx_bytes = stats->rx_bytes;
2572 		netdev->stats.tx_bytes = stats->tx_bytes;
2573 		netdev->stats.tx_errors = stats->tx_errors;
2574 		netdev->stats.rx_dropped = stats->rx_discards;
2575 		netdev->stats.tx_dropped = stats->tx_discards;
2576 		adapter->current_stats = *stats;
2577 		}
2578 		break;
2579 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
2580 		u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
2581 
2582 		memcpy(adapter->vf_res, msg, min(msglen, len));
2583 		iavf_validate_num_queues(adapter);
2584 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
2585 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
2586 			/* restore current mac address */
2587 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2588 		} else {
2589 			netif_addr_lock_bh(netdev);
2590 			/* refresh current mac address if changed */
2591 			ether_addr_copy(netdev->perm_addr,
2592 					adapter->hw.mac.addr);
2593 			netif_addr_unlock_bh(netdev);
2594 		}
2595 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2596 		iavf_add_filter(adapter, adapter->hw.mac.addr);
2597 
2598 		if (VLAN_ALLOWED(adapter)) {
2599 			if (!list_empty(&adapter->vlan_filter_list)) {
2600 				struct iavf_vlan_filter *vlf;
2601 
2602 				/* re-add all VLAN filters over virtchnl */
2603 				list_for_each_entry(vlf,
2604 						    &adapter->vlan_filter_list,
2605 						    list)
2606 					vlf->state = IAVF_VLAN_ADD;
2607 
2608 				adapter->aq_required |=
2609 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2610 			}
2611 		}
2612 
2613 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2614 
2615 		iavf_activate_fdir_filters(adapter);
2616 
2617 		iavf_parse_vf_resource_msg(adapter);
2618 
2619 		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2620 		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2621 		 * configuration
2622 		 */
2623 		if (VLAN_V2_ALLOWED(adapter))
2624 			break;
2625 		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2626 		 * wasn't successfully negotiated with the PF
2627 		 */
2628 		}
2629 		fallthrough;
2630 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2631 		struct iavf_mac_filter *f;
2632 		bool was_mac_changed;
2633 		u64 aq_required = 0;
2634 
2635 		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2636 			memcpy(&adapter->vlan_v2_caps, msg,
2637 			       min_t(u16, msglen,
2638 				     sizeof(adapter->vlan_v2_caps)));
2639 
2640 		iavf_process_config(adapter);
2641 		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
2642 		iavf_schedule_finish_config(adapter);
2643 
2644 		iavf_set_queue_vlan_tag_loc(adapter);
2645 
2646 		was_mac_changed = !ether_addr_equal(netdev->dev_addr,
2647 						    adapter->hw.mac.addr);
2648 
2649 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2650 
2651 		/* re-add all MAC filters */
2652 		list_for_each_entry(f, &adapter->mac_filter_list, list) {
2653 			if (was_mac_changed &&
2654 			    ether_addr_equal(netdev->dev_addr, f->macaddr))
2655 				ether_addr_copy(f->macaddr,
2656 						adapter->hw.mac.addr);
2657 
2658 			f->is_new_mac = true;
2659 			f->add = true;
2660 			f->add_handled = false;
2661 			f->remove = false;
2662 		}
2663 
2664 		/* re-add all VLAN filters */
2665 		if (VLAN_FILTERING_ALLOWED(adapter)) {
2666 			struct iavf_vlan_filter *vlf;
2667 
2668 			if (!list_empty(&adapter->vlan_filter_list)) {
2669 				list_for_each_entry(vlf,
2670 						    &adapter->vlan_filter_list,
2671 						    list)
2672 					vlf->state = IAVF_VLAN_ADD;
2673 
2674 				aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2675 			}
2676 		}
2677 
2678 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2679 
2680 		netif_addr_lock_bh(netdev);
2681 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2682 		netif_addr_unlock_bh(netdev);
2683 
2684 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
2685 			aq_required;
2686 		}
2687 		break;
2688 	case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
2689 		if (msglen != sizeof(u64))
2690 			return;
2691 
2692 		adapter->supp_rxdids = *(u64 *)msg;
2693 
2694 		break;
2695 	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
2696 		if (msglen != sizeof(adapter->ptp.hw_caps))
2697 			return;
2698 
2699 		adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
2700 
2701 		/* process any state change needed due to new capabilities */
2702 		iavf_ptp_process_caps(adapter);
2703 		break;
2704 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
2705 		iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
2706 		break;
2707 	case VIRTCHNL_OP_ENABLE_QUEUES:
2708 		/* enable transmits */
2709 		iavf_irq_enable(adapter, true);
2710 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2711 		break;
2712 	case VIRTCHNL_OP_DISABLE_QUEUES:
2713 		iavf_free_all_tx_resources(adapter);
2714 		iavf_free_all_rx_resources(adapter);
2715 		if (adapter->state == __IAVF_DOWN_PENDING) {
2716 			iavf_change_state(adapter, __IAVF_DOWN);
2717 			wake_up(&adapter->down_waitqueue);
2718 		}
2719 		break;
2720 	case VIRTCHNL_OP_VERSION:
2721 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2722 		/* Don't display an error if we get these out of sequence.
2723 		 * If the firmware needed to get kicked, we'll get these and
2724 		 * it's no problem.
2725 		 */
2726 		if (v_opcode != adapter->current_op)
2727 			return;
2728 		break;
2729 	case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: {
2730 		struct virtchnl_rss_hashcfg *vrh =
2731 			(struct virtchnl_rss_hashcfg *)msg;
2732 
2733 		if (msglen == sizeof(*vrh))
2734 			adapter->rss_hashcfg = vrh->hashcfg;
2735 		else
2736 			dev_warn(&adapter->pdev->dev,
2737 				 "Invalid message %d from PF\n", v_opcode);
2738 		}
2739 		break;
2740 	case VIRTCHNL_OP_REQUEST_QUEUES: {
2741 		struct virtchnl_vf_res_request *vfres =
2742 			(struct virtchnl_vf_res_request *)msg;
2743 
2744 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
2745 			dev_info(&adapter->pdev->dev,
2746 				 "Requested %d queues, PF can support %d\n",
2747 				 adapter->num_req_queues,
2748 				 vfres->num_queue_pairs);
2749 			adapter->num_req_queues = 0;
2750 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2751 		}
2752 		}
2753 		break;
2754 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2755 		struct iavf_cloud_filter *cf;
2756 
2757 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2758 			if (cf->state == __IAVF_CF_ADD_PENDING)
2759 				cf->state = __IAVF_CF_ACTIVE;
2760 		}
2761 		}
2762 		break;
2763 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2764 		struct iavf_cloud_filter *cf, *cftmp;
2765 
2766 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2767 					 list) {
2768 			if (cf->state == __IAVF_CF_DEL_PENDING) {
2769 				cf->state = __IAVF_CF_INVALID;
2770 				list_del(&cf->list);
2771 				kfree(cf);
2772 				adapter->num_cloud_filters--;
2773 			}
2774 		}
2775 		}
2776 		break;
2777 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2778 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2779 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2780 
2781 		spin_lock_bh(&adapter->fdir_fltr_lock);
2782 		list_for_each_entry_safe(fdir, fdir_tmp,
2783 					 &adapter->fdir_list_head,
2784 					 list) {
2785 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2786 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2787 					if (!iavf_is_raw_fdir(fdir))
2788 						dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2789 							 fdir->loc);
2790 					else
2791 						dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
2792 							 TC_U32_USERHTID(fdir->cls_u32_handle));
2793 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2794 					fdir->flow_id = add_fltr->flow_id;
2795 				} else {
2796 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2797 						 add_fltr->status);
2798 					iavf_print_fdir_fltr(adapter, fdir);
2799 					list_del(&fdir->list);
2800 					iavf_dec_fdir_active_fltr(adapter, fdir);
2801 					kfree(fdir);
2802 				}
2803 			}
2804 		}
2805 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2806 		}
2807 		break;
2808 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2809 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2810 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
2811 
2812 		spin_lock_bh(&adapter->fdir_fltr_lock);
2813 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2814 					 list) {
2815 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2816 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2817 				    del_fltr->status ==
2818 				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2819 					if (!iavf_is_raw_fdir(fdir))
2820 						dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2821 							 fdir->loc);
2822 					else
2823 						dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
2824 							 TC_U32_USERHTID(fdir->cls_u32_handle));
2825 					list_del(&fdir->list);
2826 					iavf_dec_fdir_active_fltr(adapter, fdir);
2827 					kfree(fdir);
2828 				} else {
2829 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2830 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2831 						 del_fltr->status);
2832 					iavf_print_fdir_fltr(adapter, fdir);
2833 				}
2834 			} else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2835 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2836 				    del_fltr->status ==
2837 				    VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2838 					fdir->state = IAVF_FDIR_FLTR_INACTIVE;
2839 				} else {
2840 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2841 					dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
2842 						 del_fltr->status);
2843 					iavf_print_fdir_fltr(adapter, fdir);
2844 				}
2845 			}
2846 		}
2847 		spin_unlock_bh(&adapter->fdir_fltr_lock);
2848 		}
2849 		break;
2850 	case VIRTCHNL_OP_ADD_RSS_CFG: {
2851 		struct iavf_adv_rss *rss;
2852 
2853 		spin_lock_bh(&adapter->adv_rss_lock);
2854 		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2855 			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2856 				iavf_print_adv_rss_cfg(adapter, rss,
2857 						       "Input set change for",
2858 						       "successful");
2859 				rss->state = IAVF_ADV_RSS_ACTIVE;
2860 			}
2861 		}
2862 		spin_unlock_bh(&adapter->adv_rss_lock);
2863 		}
2864 		break;
2865 	case VIRTCHNL_OP_DEL_RSS_CFG: {
2866 		struct iavf_adv_rss *rss, *rss_tmp;
2867 
2868 		spin_lock_bh(&adapter->adv_rss_lock);
2869 		list_for_each_entry_safe(rss, rss_tmp,
2870 					 &adapter->adv_rss_list_head, list) {
2871 			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2872 				list_del(&rss->list);
2873 				kfree(rss);
2874 			}
2875 		}
2876 		spin_unlock_bh(&adapter->adv_rss_lock);
2877 		}
2878 		break;
2879 	case VIRTCHNL_OP_ADD_VLAN:
2880 	case VIRTCHNL_OP_ADD_VLAN_V2: {
2881 		struct iavf_vlan_filter *f;
2882 
2883 		if (v_retval)
2884 			break;
2885 
2886 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2887 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2888 			if (f->state == IAVF_VLAN_ADDING)
2889 				f->state = IAVF_VLAN_ACTIVE;
2890 		}
2891 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2892 		}
2893 		break;
2894 	case VIRTCHNL_OP_DEL_VLAN:
2895 	case VIRTCHNL_OP_DEL_VLAN_V2: {
2896 		struct iavf_vlan_filter *f, *ftmp;
2897 
2898 		spin_lock_bh(&adapter->mac_vlan_list_lock);
2899 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list,
2900 					 list) {
2901 			if (f->state == IAVF_VLAN_REMOVING) {
2902 				if (v_retval) {
2903 					/* PF rejected DEL, keep filter */
2904 					f->state = IAVF_VLAN_ACTIVE;
2905 				} else {
2906 					list_del(&f->list);
2907 					kfree(f);
2908 					adapter->num_vlan_filters--;
2909 				}
2910 			}
2911 		}
2912 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
2913 		}
2914 		break;
2915 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2916 		/* PF enabled vlan strip on this VF.
2917 		 * Update netdev->features if needed to be in sync with ethtool.
2918 		 */
2919 		if (!v_retval)
2920 			iavf_netdev_features_vlan_strip_set(netdev, true);
2921 		break;
2922 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2923 		/* PF disabled vlan strip on this VF.
2924 		 * Update netdev->features if needed to be in sync with ethtool.
2925 		 */
2926 		if (!v_retval)
2927 			iavf_netdev_features_vlan_strip_set(netdev, false);
2928 		break;
2929 	case VIRTCHNL_OP_GET_QOS_CAPS: {
2930 		u16 len = struct_size(adapter->qos_caps, cap,
2931 				      IAVF_MAX_QOS_TC_NUM);
2932 
2933 		memcpy(adapter->qos_caps, msg, min(msglen, len));
2934 
2935 		adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
2936 		}
2937 		break;
2938 	case VIRTCHNL_OP_CONFIG_QUANTA:
2939 		break;
2940 	case VIRTCHNL_OP_CONFIG_QUEUE_BW: {
2941 		int i;
2942 		/* shaper configuration is successful for all queues */
2943 		for (i = 0; i < adapter->num_active_queues; i++)
2944 			adapter->tx_rings[i].q_shaper_update = false;
2945 	}
2946 		break;
2947 	default:
2948 		if (adapter->current_op && (v_opcode != adapter->current_op))
2949 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2950 				 adapter->current_op, v_opcode);
2951 		break;
2952 	} /* switch v_opcode */
2953 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2954 }
2955