xref: /linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c (revision 7e17d44d61921cab8e52cb30cc082e3ee96abddd)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e.h"
5 #include "i40e_lan_hmc.h"
6 #include "i40e_virtchnl_pf.h"
7 
8 /*********************notification routines***********************/
9 
10 /**
11  * i40e_vc_vf_broadcast
12  * @pf: pointer to the PF structure
13  * @v_opcode: operation code
14  * @v_retval: return value
15  * @msg: pointer to the msg buffer
16  * @msglen: msg length
17  *
18  * send a message to all VFs on a given PF
19  **/
20 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
21 				 enum virtchnl_ops v_opcode,
22 				 int v_retval, u8 *msg,
23 				 u16 msglen)
24 {
25 	struct i40e_hw *hw = &pf->hw;
26 	struct i40e_vf *vf = pf->vf;
27 	int i;
28 
29 	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
30 		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
31 		/* Not all vfs are enabled so skip the ones that are not */
32 		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
33 		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 			continue;
35 
36 		/* Ignore return value on purpose - a given VF may fail, but
37 		 * we need to keep going and send to all of them
38 		 */
39 		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
40 				       msg, msglen, NULL);
41 	}
42 }
43 
44 /**
45  * i40e_vc_link_speed2mbps
46  * converts i40e_aq_link_speed to integer value of Mbps
47  * @link_speed: the speed to convert
48  *
49  * return the speed as direct value of Mbps.
50  **/
51 static u32
52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
53 {
54 	switch (link_speed) {
55 	case I40E_LINK_SPEED_100MB:
56 		return SPEED_100;
57 	case I40E_LINK_SPEED_1GB:
58 		return SPEED_1000;
59 	case I40E_LINK_SPEED_2_5GB:
60 		return SPEED_2500;
61 	case I40E_LINK_SPEED_5GB:
62 		return SPEED_5000;
63 	case I40E_LINK_SPEED_10GB:
64 		return SPEED_10000;
65 	case I40E_LINK_SPEED_20GB:
66 		return SPEED_20000;
67 	case I40E_LINK_SPEED_25GB:
68 		return SPEED_25000;
69 	case I40E_LINK_SPEED_40GB:
70 		return SPEED_40000;
71 	case I40E_LINK_SPEED_UNKNOWN:
72 		return SPEED_UNKNOWN;
73 	}
74 	return SPEED_UNKNOWN;
75 }
76 
77 /**
78  * i40e_set_vf_link_state
79  * @vf: pointer to the VF structure
80  * @pfe: pointer to PF event structure
81  * @ls: pointer to link status structure
82  *
83  * set a link state on a single vf
84  **/
85 static void i40e_set_vf_link_state(struct i40e_vf *vf,
86 				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
87 {
88 	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
89 
90 	if (vf->link_forced)
91 		link_status = vf->link_up;
92 
93 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
94 		pfe->event_data.link_event_adv.link_speed = link_status ?
95 			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
96 		pfe->event_data.link_event_adv.link_status = link_status;
97 	} else {
98 		pfe->event_data.link_event.link_speed = link_status ?
99 			i40e_virtchnl_link_speed(ls->link_speed) : 0;
100 		pfe->event_data.link_event.link_status = link_status;
101 	}
102 }
103 
104 /**
105  * i40e_vc_notify_vf_link_state
106  * @vf: pointer to the VF structure
107  *
108  * send a link status message to a single VF
109  **/
110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
111 {
112 	struct virtchnl_pf_event pfe;
113 	struct i40e_pf *pf = vf->pf;
114 	struct i40e_hw *hw = &pf->hw;
115 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
116 	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
117 
118 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
119 	pfe.severity = PF_EVENT_SEVERITY_INFO;
120 
121 	i40e_set_vf_link_state(vf, &pfe, ls);
122 
123 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
124 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
125 }
126 
127 /**
128  * i40e_vc_notify_link_state
129  * @pf: pointer to the PF structure
130  *
131  * send a link status message to all VFs on a given PF
132  **/
133 void i40e_vc_notify_link_state(struct i40e_pf *pf)
134 {
135 	int i;
136 
137 	for (i = 0; i < pf->num_alloc_vfs; i++)
138 		i40e_vc_notify_vf_link_state(&pf->vf[i]);
139 }
140 
141 /**
142  * i40e_vc_notify_reset
143  * @pf: pointer to the PF structure
144  *
145  * indicate a pending reset to all VFs on a given PF
146  **/
147 void i40e_vc_notify_reset(struct i40e_pf *pf)
148 {
149 	struct virtchnl_pf_event pfe;
150 
151 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
152 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
153 	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
154 			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
155 }
156 
157 /**
158  * i40e_vc_notify_vf_reset
159  * @vf: pointer to the VF structure
160  *
161  * indicate a pending reset to the given VF
162  **/
163 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
164 {
165 	struct virtchnl_pf_event pfe;
166 	int abs_vf_id;
167 
168 	/* validate the request */
169 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
170 		return;
171 
172 	/* verify if the VF is in either init or active before proceeding */
173 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
174 	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
175 		return;
176 
177 	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
178 
179 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
180 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
181 	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
182 			       0, (u8 *)&pfe,
183 			       sizeof(struct virtchnl_pf_event), NULL);
184 }
185 /***********************misc routines*****************************/
186 
187 /**
188  * i40e_vc_reset_vf
189  * @vf: pointer to the VF info
190  * @notify_vf: notify vf about reset or not
191  * Reset VF handler.
192  **/
193 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
194 {
195 	struct i40e_pf *pf = vf->pf;
196 	int i;
197 
198 	if (notify_vf)
199 		i40e_vc_notify_vf_reset(vf);
200 
201 	/* We want to ensure that an actual reset occurs initiated after this
202 	 * function was called. However, we do not want to wait forever, so
203 	 * we'll give a reasonable time and print a message if we failed to
204 	 * ensure a reset.
205 	 */
206 	for (i = 0; i < 20; i++) {
207 		/* If PF is in VFs releasing state reset VF is impossible,
208 		 * so leave it.
209 		 */
210 		if (test_bit(__I40E_VFS_RELEASING, pf->state))
211 			return;
212 		if (i40e_reset_vf(vf, false))
213 			return;
214 		usleep_range(10000, 20000);
215 	}
216 
217 	if (notify_vf)
218 		dev_warn(&vf->pf->pdev->dev,
219 			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
220 			 vf->vf_id);
221 	else
222 		dev_dbg(&vf->pf->pdev->dev,
223 			"Failed to initiate reset for VF %d after 200 milliseconds\n",
224 			vf->vf_id);
225 }
226 
227 /**
228  * i40e_vc_isvalid_vsi_id
229  * @vf: pointer to the VF info
230  * @vsi_id: VF relative VSI id
231  *
232  * check for the valid VSI id
233  **/
234 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
235 {
236 	struct i40e_pf *pf = vf->pf;
237 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
238 
239 	return (vsi && (vsi->vf_id == vf->vf_id));
240 }
241 
242 /**
243  * i40e_vc_isvalid_queue_id
244  * @vf: pointer to the VF info
245  * @vsi_id: vsi id
246  * @qid: vsi relative queue id
247  *
248  * check for the valid queue id
249  **/
250 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
251 					    u16 qid)
252 {
253 	struct i40e_pf *pf = vf->pf;
254 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
255 
256 	return (vsi && (qid < vsi->alloc_queue_pairs));
257 }
258 
259 /**
260  * i40e_vc_isvalid_vector_id
261  * @vf: pointer to the VF info
262  * @vector_id: VF relative vector id
263  *
264  * check for the valid vector id
265  **/
266 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
267 {
268 	struct i40e_pf *pf = vf->pf;
269 
270 	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
271 }
272 
273 /***********************vf resource mgmt routines*****************/
274 
275 /**
276  * i40e_vc_get_pf_queue_id
277  * @vf: pointer to the VF info
278  * @vsi_id: id of VSI as provided by the FW
279  * @vsi_queue_id: vsi relative queue id
280  *
281  * return PF relative queue id
282  **/
283 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
284 				   u8 vsi_queue_id)
285 {
286 	struct i40e_pf *pf = vf->pf;
287 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
288 	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
289 
290 	if (!vsi)
291 		return pf_queue_id;
292 
293 	if (le16_to_cpu(vsi->info.mapping_flags) &
294 	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
295 		pf_queue_id =
296 			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
297 	else
298 		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
299 			      vsi_queue_id;
300 
301 	return pf_queue_id;
302 }
303 
304 /**
305  * i40e_get_real_pf_qid
306  * @vf: pointer to the VF info
307  * @vsi_id: vsi id
308  * @queue_id: queue number
309  *
310  * wrapper function to get pf_queue_id handling ADq code as well
311  **/
312 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
313 {
314 	int i;
315 
316 	if (vf->adq_enabled) {
317 		/* Although VF considers all the queues(can be 1 to 16) as its
318 		 * own but they may actually belong to different VSIs(up to 4).
319 		 * We need to find which queues belongs to which VSI.
320 		 */
321 		for (i = 0; i < vf->num_tc; i++) {
322 			if (queue_id < vf->ch[i].num_qps) {
323 				vsi_id = vf->ch[i].vsi_id;
324 				break;
325 			}
326 			/* find right queue id which is relative to a
327 			 * given VSI.
328 			 */
329 			queue_id -= vf->ch[i].num_qps;
330 			}
331 		}
332 
333 	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
334 }
335 
336 /**
337  * i40e_config_irq_link_list
338  * @vf: pointer to the VF info
339  * @vsi_id: id of VSI as given by the FW
340  * @vecmap: irq map info
341  *
342  * configure irq link list from the map
343  **/
344 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
345 				      struct virtchnl_vector_map *vecmap)
346 {
347 	unsigned long linklistmap = 0, tempmap;
348 	struct i40e_pf *pf = vf->pf;
349 	struct i40e_hw *hw = &pf->hw;
350 	u16 vsi_queue_id, pf_queue_id;
351 	enum i40e_queue_type qtype;
352 	u16 next_q, vector_id, size;
353 	u32 reg, reg_idx;
354 	u16 itr_idx = 0;
355 
356 	vector_id = vecmap->vector_id;
357 	/* setup the head */
358 	if (0 == vector_id)
359 		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
360 	else
361 		reg_idx = I40E_VPINT_LNKLSTN(
362 		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
363 		     (vector_id - 1));
364 
365 	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
366 		/* Special case - No queues mapped on this vector */
367 		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
368 		goto irq_list_done;
369 	}
370 	tempmap = vecmap->rxq_map;
371 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
372 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
373 				    vsi_queue_id));
374 	}
375 
376 	tempmap = vecmap->txq_map;
377 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
378 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
379 				     vsi_queue_id + 1));
380 	}
381 
382 	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
383 	next_q = find_first_bit(&linklistmap, size);
384 	if (unlikely(next_q == size))
385 		goto irq_list_done;
386 
387 	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
388 	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
389 	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
390 	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
391 
392 	wr32(hw, reg_idx, reg);
393 
394 	while (next_q < size) {
395 		switch (qtype) {
396 		case I40E_QUEUE_TYPE_RX:
397 			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
398 			itr_idx = vecmap->rxitr_idx;
399 			break;
400 		case I40E_QUEUE_TYPE_TX:
401 			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
402 			itr_idx = vecmap->txitr_idx;
403 			break;
404 		default:
405 			break;
406 		}
407 
408 		next_q = find_next_bit(&linklistmap, size, next_q + 1);
409 		if (next_q < size) {
410 			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
411 			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
412 			pf_queue_id = i40e_get_real_pf_qid(vf,
413 							   vsi_id,
414 							   vsi_queue_id);
415 		} else {
416 			pf_queue_id = I40E_QUEUE_END_OF_LIST;
417 			qtype = 0;
418 		}
419 
420 		/* format for the RQCTL & TQCTL regs is same */
421 		reg = (vector_id) |
422 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
423 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
424 		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
425 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
426 		wr32(hw, reg_idx, reg);
427 	}
428 
429 	/* if the vf is running in polling mode and using interrupt zero,
430 	 * need to disable auto-mask on enabling zero interrupt for VFs.
431 	 */
432 	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
433 	    (vector_id == 0)) {
434 		reg = rd32(hw, I40E_GLINT_CTL);
435 		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
436 			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
437 			wr32(hw, I40E_GLINT_CTL, reg);
438 		}
439 	}
440 
441 irq_list_done:
442 	i40e_flush(hw);
443 }
444 
445 /**
446  * i40e_release_rdma_qvlist
447  * @vf: pointer to the VF.
448  *
449  **/
450 static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
451 {
452 	struct i40e_pf *pf = vf->pf;
453 	struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
454 	u32 msix_vf;
455 	u32 i;
456 
457 	if (!vf->qvlist_info)
458 		return;
459 
460 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
461 	for (i = 0; i < qvlist_info->num_vectors; i++) {
462 		struct virtchnl_rdma_qv_info *qv_info;
463 		u32 next_q_index, next_q_type;
464 		struct i40e_hw *hw = &pf->hw;
465 		u32 v_idx, reg_idx, reg;
466 
467 		qv_info = &qvlist_info->qv_info[i];
468 		if (!qv_info)
469 			continue;
470 		v_idx = qv_info->v_idx;
471 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
472 			/* Figure out the queue after CEQ and make that the
473 			 * first queue.
474 			 */
475 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
476 			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
477 			next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
478 					>> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
479 			next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
480 					>> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
481 
482 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
483 			reg = (next_q_index &
484 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
485 			       (next_q_type <<
486 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
487 
488 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
489 		}
490 	}
491 	kfree(vf->qvlist_info);
492 	vf->qvlist_info = NULL;
493 }
494 
495 /**
496  * i40e_config_rdma_qvlist
497  * @vf: pointer to the VF info
498  * @qvlist_info: queue and vector list
499  *
500  * Return 0 on success or < 0 on error
501  **/
502 static int
503 i40e_config_rdma_qvlist(struct i40e_vf *vf,
504 			struct virtchnl_rdma_qvlist_info *qvlist_info)
505 {
506 	struct i40e_pf *pf = vf->pf;
507 	struct i40e_hw *hw = &pf->hw;
508 	struct virtchnl_rdma_qv_info *qv_info;
509 	u32 v_idx, i, reg_idx, reg;
510 	u32 next_q_idx, next_q_type;
511 	size_t size;
512 	u32 msix_vf;
513 	int ret = 0;
514 
515 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
516 
517 	if (qvlist_info->num_vectors > msix_vf) {
518 		dev_warn(&pf->pdev->dev,
519 			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
520 			 qvlist_info->num_vectors,
521 			 msix_vf);
522 		ret = -EINVAL;
523 		goto err_out;
524 	}
525 
526 	kfree(vf->qvlist_info);
527 	size = virtchnl_struct_size(vf->qvlist_info, qv_info,
528 				    qvlist_info->num_vectors);
529 	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
530 	if (!vf->qvlist_info) {
531 		ret = -ENOMEM;
532 		goto err_out;
533 	}
534 	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
535 
536 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
537 	for (i = 0; i < qvlist_info->num_vectors; i++) {
538 		qv_info = &qvlist_info->qv_info[i];
539 		if (!qv_info)
540 			continue;
541 
542 		/* Validate vector id belongs to this vf */
543 		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
544 			ret = -EINVAL;
545 			goto err_free;
546 		}
547 
548 		v_idx = qv_info->v_idx;
549 
550 		vf->qvlist_info->qv_info[i] = *qv_info;
551 
552 		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
553 		/* We might be sharing the interrupt, so get the first queue
554 		 * index and type, push it down the list by adding the new
555 		 * queue on top. Also link it with the new queue in CEQCTL.
556 		 */
557 		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
558 		next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
559 				I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
560 		next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
561 				I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
562 
563 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
564 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
565 			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
566 			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
567 			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
568 			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
569 			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
570 			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
571 
572 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
573 			reg = (qv_info->ceq_idx &
574 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
575 			       (I40E_QUEUE_TYPE_PE_CEQ <<
576 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
577 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
578 		}
579 
580 		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
581 			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
582 			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
583 			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
584 
585 			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
586 		}
587 	}
588 
589 	return 0;
590 err_free:
591 	kfree(vf->qvlist_info);
592 	vf->qvlist_info = NULL;
593 err_out:
594 	return ret;
595 }
596 
597 /**
598  * i40e_config_vsi_tx_queue
599  * @vf: pointer to the VF info
600  * @vsi_id: id of VSI as provided by the FW
601  * @vsi_queue_id: vsi relative queue index
602  * @info: config. info
603  *
604  * configure tx queue
605  **/
606 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
607 				    u16 vsi_queue_id,
608 				    struct virtchnl_txq_info *info)
609 {
610 	struct i40e_pf *pf = vf->pf;
611 	struct i40e_hw *hw = &pf->hw;
612 	struct i40e_hmc_obj_txq tx_ctx;
613 	struct i40e_vsi *vsi;
614 	u16 pf_queue_id;
615 	u32 qtx_ctl;
616 	int ret = 0;
617 
618 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
619 		ret = -ENOENT;
620 		goto error_context;
621 	}
622 	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
623 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
624 	if (!vsi) {
625 		ret = -ENOENT;
626 		goto error_context;
627 	}
628 
629 	/* clear the context structure first */
630 	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
631 
632 	/* only set the required fields */
633 	tx_ctx.base = info->dma_ring_addr / 128;
634 	tx_ctx.qlen = info->ring_len;
635 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
636 	tx_ctx.rdylist_act = 0;
637 	tx_ctx.head_wb_ena = info->headwb_enabled;
638 	tx_ctx.head_wb_addr = info->dma_headwb_addr;
639 
640 	/* clear the context in the HMC */
641 	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
642 	if (ret) {
643 		dev_err(&pf->pdev->dev,
644 			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
645 			pf_queue_id, ret);
646 		ret = -ENOENT;
647 		goto error_context;
648 	}
649 
650 	/* set the context in the HMC */
651 	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
652 	if (ret) {
653 		dev_err(&pf->pdev->dev,
654 			"Failed to set VF LAN Tx queue context %d error: %d\n",
655 			pf_queue_id, ret);
656 		ret = -ENOENT;
657 		goto error_context;
658 	}
659 
660 	/* associate this queue with the PCI VF function */
661 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
662 	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
663 		    & I40E_QTX_CTL_PF_INDX_MASK);
664 	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
665 		     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
666 		    & I40E_QTX_CTL_VFVM_INDX_MASK);
667 	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
668 	i40e_flush(hw);
669 
670 error_context:
671 	return ret;
672 }
673 
674 /**
675  * i40e_config_vsi_rx_queue
676  * @vf: pointer to the VF info
677  * @vsi_id: id of VSI  as provided by the FW
678  * @vsi_queue_id: vsi relative queue index
679  * @info: config. info
680  *
681  * configure rx queue
682  **/
683 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
684 				    u16 vsi_queue_id,
685 				    struct virtchnl_rxq_info *info)
686 {
687 	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
688 	struct i40e_pf *pf = vf->pf;
689 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
690 	struct i40e_hw *hw = &pf->hw;
691 	struct i40e_hmc_obj_rxq rx_ctx;
692 	int ret = 0;
693 
694 	/* clear the context structure first */
695 	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
696 
697 	/* only set the required fields */
698 	rx_ctx.base = info->dma_ring_addr / 128;
699 	rx_ctx.qlen = info->ring_len;
700 
701 	if (info->splithdr_enabled) {
702 		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
703 				  I40E_RX_SPLIT_IP      |
704 				  I40E_RX_SPLIT_TCP_UDP |
705 				  I40E_RX_SPLIT_SCTP;
706 		/* header length validation */
707 		if (info->hdr_size > ((2 * 1024) - 64)) {
708 			ret = -EINVAL;
709 			goto error_param;
710 		}
711 		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
712 
713 		/* set split mode 10b */
714 		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
715 	}
716 
717 	/* databuffer length validation */
718 	if (info->databuffer_size > ((16 * 1024) - 128)) {
719 		ret = -EINVAL;
720 		goto error_param;
721 	}
722 	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
723 
724 	/* max pkt. length validation */
725 	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
726 		ret = -EINVAL;
727 		goto error_param;
728 	}
729 	rx_ctx.rxmax = info->max_pkt_size;
730 
731 	/* if port VLAN is configured increase the max packet size */
732 	if (vsi->info.pvid)
733 		rx_ctx.rxmax += VLAN_HLEN;
734 
735 	/* enable 32bytes desc always */
736 	rx_ctx.dsize = 1;
737 
738 	/* default values */
739 	rx_ctx.lrxqthresh = 1;
740 	rx_ctx.crcstrip = 1;
741 	rx_ctx.prefena = 1;
742 	rx_ctx.l2tsel = 1;
743 
744 	/* clear the context in the HMC */
745 	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
746 	if (ret) {
747 		dev_err(&pf->pdev->dev,
748 			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
749 			pf_queue_id, ret);
750 		ret = -ENOENT;
751 		goto error_param;
752 	}
753 
754 	/* set the context in the HMC */
755 	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
756 	if (ret) {
757 		dev_err(&pf->pdev->dev,
758 			"Failed to set VF LAN Rx queue context %d error: %d\n",
759 			pf_queue_id, ret);
760 		ret = -ENOENT;
761 		goto error_param;
762 	}
763 
764 error_param:
765 	return ret;
766 }
767 
768 /**
769  * i40e_alloc_vsi_res
770  * @vf: pointer to the VF info
771  * @idx: VSI index, applies only for ADq mode, zero otherwise
772  *
773  * alloc VF vsi context & resources
774  **/
775 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
776 {
777 	struct i40e_mac_filter *f = NULL;
778 	struct i40e_pf *pf = vf->pf;
779 	struct i40e_vsi *vsi;
780 	u64 max_tx_rate = 0;
781 	int ret = 0;
782 
783 	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
784 			     vf->vf_id);
785 
786 	if (!vsi) {
787 		dev_err(&pf->pdev->dev,
788 			"add vsi failed for VF %d, aq_err %d\n",
789 			vf->vf_id, pf->hw.aq.asq_last_status);
790 		ret = -ENOENT;
791 		goto error_alloc_vsi_res;
792 	}
793 
794 	if (!idx) {
795 		u64 hena = i40e_pf_get_default_rss_hena(pf);
796 		u8 broadcast[ETH_ALEN];
797 
798 		vf->lan_vsi_idx = vsi->idx;
799 		vf->lan_vsi_id = vsi->id;
800 		/* If the port VLAN has been configured and then the
801 		 * VF driver was removed then the VSI port VLAN
802 		 * configuration was destroyed.  Check if there is
803 		 * a port VLAN and restore the VSI configuration if
804 		 * needed.
805 		 */
806 		if (vf->port_vlan_id)
807 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
808 
809 		spin_lock_bh(&vsi->mac_filter_hash_lock);
810 		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
811 			f = i40e_add_mac_filter(vsi,
812 						vf->default_lan_addr.addr);
813 			if (!f)
814 				dev_info(&pf->pdev->dev,
815 					 "Could not add MAC filter %pM for VF %d\n",
816 					vf->default_lan_addr.addr, vf->vf_id);
817 		}
818 		eth_broadcast_addr(broadcast);
819 		f = i40e_add_mac_filter(vsi, broadcast);
820 		if (!f)
821 			dev_info(&pf->pdev->dev,
822 				 "Could not allocate VF broadcast filter\n");
823 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
824 		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
825 		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
826 		/* program mac filter only for VF VSI */
827 		ret = i40e_sync_vsi_filters(vsi);
828 		if (ret)
829 			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
830 	}
831 
832 	/* storing VSI index and id for ADq and don't apply the mac filter */
833 	if (vf->adq_enabled) {
834 		vf->ch[idx].vsi_idx = vsi->idx;
835 		vf->ch[idx].vsi_id = vsi->id;
836 	}
837 
838 	/* Set VF bandwidth if specified */
839 	if (vf->tx_rate) {
840 		max_tx_rate = vf->tx_rate;
841 	} else if (vf->ch[idx].max_tx_rate) {
842 		max_tx_rate = vf->ch[idx].max_tx_rate;
843 	}
844 
845 	if (max_tx_rate) {
846 		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
847 		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
848 						  max_tx_rate, 0, NULL);
849 		if (ret)
850 			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
851 				vf->vf_id, ret);
852 	}
853 
854 error_alloc_vsi_res:
855 	return ret;
856 }
857 
858 /**
859  * i40e_map_pf_queues_to_vsi
860  * @vf: pointer to the VF info
861  *
862  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
863  * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
864  **/
865 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
866 {
867 	struct i40e_pf *pf = vf->pf;
868 	struct i40e_hw *hw = &pf->hw;
869 	u32 reg, num_tc = 1; /* VF has at least one traffic class */
870 	u16 vsi_id, qps;
871 	int i, j;
872 
873 	if (vf->adq_enabled)
874 		num_tc = vf->num_tc;
875 
876 	for (i = 0; i < num_tc; i++) {
877 		if (vf->adq_enabled) {
878 			qps = vf->ch[i].num_qps;
879 			vsi_id =  vf->ch[i].vsi_id;
880 		} else {
881 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
882 			vsi_id = vf->lan_vsi_id;
883 		}
884 
885 		for (j = 0; j < 7; j++) {
886 			if (j * 2 >= qps) {
887 				/* end of list */
888 				reg = 0x07FF07FF;
889 			} else {
890 				u16 qid = i40e_vc_get_pf_queue_id(vf,
891 								  vsi_id,
892 								  j * 2);
893 				reg = qid;
894 				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
895 							      (j * 2) + 1);
896 				reg |= qid << 16;
897 			}
898 			i40e_write_rx_ctl(hw,
899 					  I40E_VSILAN_QTABLE(j, vsi_id),
900 					  reg);
901 		}
902 	}
903 }
904 
905 /**
906  * i40e_map_pf_to_vf_queues
907  * @vf: pointer to the VF info
908  *
909  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
910  * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
911  **/
912 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
913 {
914 	struct i40e_pf *pf = vf->pf;
915 	struct i40e_hw *hw = &pf->hw;
916 	u32 reg, total_qps = 0;
917 	u32 qps, num_tc = 1; /* VF has at least one traffic class */
918 	u16 vsi_id, qid;
919 	int i, j;
920 
921 	if (vf->adq_enabled)
922 		num_tc = vf->num_tc;
923 
924 	for (i = 0; i < num_tc; i++) {
925 		if (vf->adq_enabled) {
926 			qps = vf->ch[i].num_qps;
927 			vsi_id =  vf->ch[i].vsi_id;
928 		} else {
929 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
930 			vsi_id = vf->lan_vsi_id;
931 		}
932 
933 		for (j = 0; j < qps; j++) {
934 			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
935 
936 			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
937 			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
938 			     reg);
939 			total_qps++;
940 		}
941 	}
942 }
943 
944 /**
945  * i40e_enable_vf_mappings
946  * @vf: pointer to the VF info
947  *
948  * enable VF mappings
949  **/
950 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
951 {
952 	struct i40e_pf *pf = vf->pf;
953 	struct i40e_hw *hw = &pf->hw;
954 	u32 reg;
955 
956 	/* Tell the hardware we're using noncontiguous mapping. HW requires
957 	 * that VF queues be mapped using this method, even when they are
958 	 * contiguous in real life
959 	 */
960 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
961 			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
962 
963 	/* enable VF vplan_qtable mappings */
964 	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
965 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
966 
967 	i40e_map_pf_to_vf_queues(vf);
968 	i40e_map_pf_queues_to_vsi(vf);
969 
970 	i40e_flush(hw);
971 }
972 
973 /**
974  * i40e_disable_vf_mappings
975  * @vf: pointer to the VF info
976  *
977  * disable VF mappings
978  **/
979 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
980 {
981 	struct i40e_pf *pf = vf->pf;
982 	struct i40e_hw *hw = &pf->hw;
983 	int i;
984 
985 	/* disable qp mappings */
986 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
987 	for (i = 0; i < I40E_MAX_VSI_QP; i++)
988 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
989 		     I40E_QUEUE_END_OF_LIST);
990 	i40e_flush(hw);
991 }
992 
993 /**
994  * i40e_free_vf_res
995  * @vf: pointer to the VF info
996  *
997  * free VF resources
998  **/
999 static void i40e_free_vf_res(struct i40e_vf *vf)
1000 {
1001 	struct i40e_pf *pf = vf->pf;
1002 	struct i40e_hw *hw = &pf->hw;
1003 	u32 reg_idx, reg;
1004 	int i, j, msix_vf;
1005 
1006 	/* Start by disabling VF's configuration API to prevent the OS from
1007 	 * accessing the VF's VSI after it's freed / invalidated.
1008 	 */
1009 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1010 
1011 	/* It's possible the VF had requeuested more queues than the default so
1012 	 * do the accounting here when we're about to free them.
1013 	 */
1014 	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1015 		pf->queues_left += vf->num_queue_pairs -
1016 				   I40E_DEFAULT_QUEUES_PER_VF;
1017 	}
1018 
1019 	/* free vsi & disconnect it from the parent uplink */
1020 	if (vf->lan_vsi_idx) {
1021 		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1022 		vf->lan_vsi_idx = 0;
1023 		vf->lan_vsi_id = 0;
1024 	}
1025 
1026 	/* do the accounting and remove additional ADq VSI's */
1027 	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1028 		for (j = 0; j < vf->num_tc; j++) {
1029 			/* At this point VSI0 is already released so don't
1030 			 * release it again and only clear their values in
1031 			 * structure variables
1032 			 */
1033 			if (j)
1034 				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1035 			vf->ch[j].vsi_idx = 0;
1036 			vf->ch[j].vsi_id = 0;
1037 		}
1038 	}
1039 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1040 
1041 	/* disable interrupts so the VF starts in a known state */
1042 	for (i = 0; i < msix_vf; i++) {
1043 		/* format is same for both registers */
1044 		if (0 == i)
1045 			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1046 		else
1047 			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1048 						      (vf->vf_id))
1049 						     + (i - 1));
1050 		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1051 		i40e_flush(hw);
1052 	}
1053 
1054 	/* clear the irq settings */
1055 	for (i = 0; i < msix_vf; i++) {
1056 		/* format is same for both registers */
1057 		if (0 == i)
1058 			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1059 		else
1060 			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1061 						      (vf->vf_id))
1062 						     + (i - 1));
1063 		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1064 		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1065 		wr32(hw, reg_idx, reg);
1066 		i40e_flush(hw);
1067 	}
1068 	/* reset some of the state variables keeping track of the resources */
1069 	vf->num_queue_pairs = 0;
1070 	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1071 	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1072 }
1073 
1074 /**
1075  * i40e_alloc_vf_res
1076  * @vf: pointer to the VF info
1077  *
1078  * allocate VF resources
1079  **/
1080 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1081 {
1082 	struct i40e_pf *pf = vf->pf;
1083 	int total_queue_pairs = 0;
1084 	int ret, idx;
1085 
1086 	if (vf->num_req_queues &&
1087 	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1088 		pf->num_vf_qps = vf->num_req_queues;
1089 	else
1090 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1091 
1092 	/* allocate hw vsi context & associated resources */
1093 	ret = i40e_alloc_vsi_res(vf, 0);
1094 	if (ret)
1095 		goto error_alloc;
1096 	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1097 
1098 	/* allocate additional VSIs based on tc information for ADq */
1099 	if (vf->adq_enabled) {
1100 		if (pf->queues_left >=
1101 		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1102 			/* TC 0 always belongs to VF VSI */
1103 			for (idx = 1; idx < vf->num_tc; idx++) {
1104 				ret = i40e_alloc_vsi_res(vf, idx);
1105 				if (ret)
1106 					goto error_alloc;
1107 			}
1108 			/* send correct number of queues */
1109 			total_queue_pairs = I40E_MAX_VF_QUEUES;
1110 		} else {
1111 			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1112 				 vf->vf_id);
1113 			vf->adq_enabled = false;
1114 		}
1115 	}
1116 
1117 	/* We account for each VF to get a default number of queue pairs.  If
1118 	 * the VF has now requested more, we need to account for that to make
1119 	 * certain we never request more queues than we actually have left in
1120 	 * HW.
1121 	 */
1122 	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1123 		pf->queues_left -=
1124 			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1125 
1126 	if (vf->trusted)
1127 		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1128 	else
1129 		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1130 
1131 	/* store the total qps number for the runtime
1132 	 * VF req validation
1133 	 */
1134 	vf->num_queue_pairs = total_queue_pairs;
1135 
1136 	/* VF is now completely initialized */
1137 	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1138 
1139 error_alloc:
1140 	if (ret)
1141 		i40e_free_vf_res(vf);
1142 
1143 	return ret;
1144 }
1145 
1146 #define VF_DEVICE_STATUS 0xAA
1147 #define VF_TRANS_PENDING_MASK 0x20
1148 /**
1149  * i40e_quiesce_vf_pci
1150  * @vf: pointer to the VF structure
1151  *
1152  * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1153  * if the transactions never clear.
1154  **/
1155 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1156 {
1157 	struct i40e_pf *pf = vf->pf;
1158 	struct i40e_hw *hw = &pf->hw;
1159 	int vf_abs_id, i;
1160 	u32 reg;
1161 
1162 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1163 
1164 	wr32(hw, I40E_PF_PCI_CIAA,
1165 	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1166 	for (i = 0; i < 100; i++) {
1167 		reg = rd32(hw, I40E_PF_PCI_CIAD);
1168 		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1169 			return 0;
1170 		udelay(1);
1171 	}
1172 	return -EIO;
1173 }
1174 
1175 /**
1176  * __i40e_getnum_vf_vsi_vlan_filters
1177  * @vsi: pointer to the vsi
1178  *
1179  * called to get the number of VLANs offloaded on this VF
1180  **/
1181 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1182 {
1183 	struct i40e_mac_filter *f;
1184 	u16 num_vlans = 0, bkt;
1185 
1186 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1187 		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1188 			num_vlans++;
1189 	}
1190 
1191 	return num_vlans;
1192 }
1193 
1194 /**
1195  * i40e_getnum_vf_vsi_vlan_filters
1196  * @vsi: pointer to the vsi
1197  *
1198  * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1199  **/
1200 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1201 {
1202 	int num_vlans;
1203 
1204 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1205 	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1206 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1207 
1208 	return num_vlans;
1209 }
1210 
1211 /**
1212  * i40e_get_vlan_list_sync
1213  * @vsi: pointer to the VSI
1214  * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1215  * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1216  *             This array is allocated here, but has to be freed in caller.
1217  *
1218  * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1219  **/
1220 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1221 				    s16 **vlan_list)
1222 {
1223 	struct i40e_mac_filter *f;
1224 	int i = 0;
1225 	int bkt;
1226 
1227 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1228 	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1229 	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1230 	if (!(*vlan_list))
1231 		goto err;
1232 
1233 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1234 		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1235 			continue;
1236 		(*vlan_list)[i++] = f->vlan;
1237 	}
1238 err:
1239 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1240 }
1241 
1242 /**
1243  * i40e_set_vsi_promisc
1244  * @vf: pointer to the VF struct
1245  * @seid: VSI number
1246  * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1247  *                for a given VLAN
1248  * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1249  *                  for a given VLAN
1250  * @vl: List of VLANs - apply filter for given VLANs
1251  * @num_vlans: Number of elements in @vl
1252  **/
1253 static int
1254 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1255 		     bool unicast_enable, s16 *vl, u16 num_vlans)
1256 {
1257 	struct i40e_pf *pf = vf->pf;
1258 	struct i40e_hw *hw = &pf->hw;
1259 	int aq_ret, aq_tmp = 0;
1260 	int i;
1261 
1262 	/* No VLAN to set promisc on, set on VSI */
1263 	if (!num_vlans || !vl) {
1264 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1265 							       multi_enable,
1266 							       NULL);
1267 		if (aq_ret) {
1268 			int aq_err = pf->hw.aq.asq_last_status;
1269 
1270 			dev_err(&pf->pdev->dev,
1271 				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1272 				vf->vf_id,
1273 				ERR_PTR(aq_ret),
1274 				i40e_aq_str(&pf->hw, aq_err));
1275 
1276 			return aq_ret;
1277 		}
1278 
1279 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1280 							     unicast_enable,
1281 							     NULL, true);
1282 
1283 		if (aq_ret) {
1284 			int aq_err = pf->hw.aq.asq_last_status;
1285 
1286 			dev_err(&pf->pdev->dev,
1287 				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1288 				vf->vf_id,
1289 				ERR_PTR(aq_ret),
1290 				i40e_aq_str(&pf->hw, aq_err));
1291 		}
1292 
1293 		return aq_ret;
1294 	}
1295 
1296 	for (i = 0; i < num_vlans; i++) {
1297 		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1298 							    multi_enable,
1299 							    vl[i], NULL);
1300 		if (aq_ret) {
1301 			int aq_err = pf->hw.aq.asq_last_status;
1302 
1303 			dev_err(&pf->pdev->dev,
1304 				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1305 				vf->vf_id,
1306 				ERR_PTR(aq_ret),
1307 				i40e_aq_str(&pf->hw, aq_err));
1308 
1309 			if (!aq_tmp)
1310 				aq_tmp = aq_ret;
1311 		}
1312 
1313 		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1314 							    unicast_enable,
1315 							    vl[i], NULL);
1316 		if (aq_ret) {
1317 			int aq_err = pf->hw.aq.asq_last_status;
1318 
1319 			dev_err(&pf->pdev->dev,
1320 				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1321 				vf->vf_id,
1322 				ERR_PTR(aq_ret),
1323 				i40e_aq_str(&pf->hw, aq_err));
1324 
1325 			if (!aq_tmp)
1326 				aq_tmp = aq_ret;
1327 		}
1328 	}
1329 
1330 	if (aq_tmp)
1331 		aq_ret = aq_tmp;
1332 
1333 	return aq_ret;
1334 }
1335 
1336 /**
1337  * i40e_config_vf_promiscuous_mode
1338  * @vf: pointer to the VF info
1339  * @vsi_id: VSI id
1340  * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1341  * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1342  *
1343  * Called from the VF to configure the promiscuous mode of
1344  * VF vsis and from the VF reset path to reset promiscuous mode.
1345  **/
1346 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1347 					   u16 vsi_id,
1348 					   bool allmulti,
1349 					   bool alluni)
1350 {
1351 	struct i40e_pf *pf = vf->pf;
1352 	struct i40e_vsi *vsi;
1353 	int aq_ret = 0;
1354 	u16 num_vlans;
1355 	s16 *vl;
1356 
1357 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1358 	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1359 		return -EINVAL;
1360 
1361 	if (vf->port_vlan_id) {
1362 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1363 					      alluni, &vf->port_vlan_id, 1);
1364 		return aq_ret;
1365 	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1366 		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1367 
1368 		if (!vl)
1369 			return -ENOMEM;
1370 
1371 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1372 					      vl, num_vlans);
1373 		kfree(vl);
1374 		return aq_ret;
1375 	}
1376 
1377 	/* no VLANs to set on, set on VSI */
1378 	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1379 				      NULL, 0);
1380 	return aq_ret;
1381 }
1382 
1383 /**
1384  * i40e_sync_vfr_reset
1385  * @hw: pointer to hw struct
1386  * @vf_id: VF identifier
1387  *
1388  * Before trigger hardware reset, we need to know if no other process has
1389  * reserved the hardware for any reset operations. This check is done by
1390  * examining the status of the RSTAT1 register used to signal the reset.
1391  **/
1392 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1393 {
1394 	u32 reg;
1395 	int i;
1396 
1397 	for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1398 		reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1399 			   I40E_VFINT_ICR0_ADMINQ_MASK;
1400 		if (reg)
1401 			return 0;
1402 
1403 		usleep_range(100, 200);
1404 	}
1405 
1406 	return -EAGAIN;
1407 }
1408 
1409 /**
1410  * i40e_trigger_vf_reset
1411  * @vf: pointer to the VF structure
1412  * @flr: VFLR was issued or not
1413  *
1414  * Trigger hardware to start a reset for a particular VF. Expects the caller
1415  * to wait the proper amount of time to allow hardware to reset the VF before
1416  * it cleans up and restores VF functionality.
1417  **/
1418 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1419 {
1420 	struct i40e_pf *pf = vf->pf;
1421 	struct i40e_hw *hw = &pf->hw;
1422 	u32 reg, reg_idx, bit_idx;
1423 	bool vf_active;
1424 	u32 radq;
1425 
1426 	/* warn the VF */
1427 	vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1428 
1429 	/* Disable VF's configuration API during reset. The flag is re-enabled
1430 	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1431 	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1432 	 * to do it earlier to give some time to finish to any VF config
1433 	 * functions that may still be running at this point.
1434 	 */
1435 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1436 
1437 	/* In the case of a VFLR, the HW has already reset the VF and we
1438 	 * just need to clean up, so don't hit the VFRTRIG register.
1439 	 */
1440 	if (!flr) {
1441 		/* Sync VFR reset before trigger next one */
1442 		radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1443 			    I40E_VFINT_ICR0_ADMINQ_MASK;
1444 		if (vf_active && !radq)
1445 			/* waiting for finish reset by virtual driver */
1446 			if (i40e_sync_vfr_reset(hw, vf->vf_id))
1447 				dev_info(&pf->pdev->dev,
1448 					 "Reset VF %d never finished\n",
1449 				vf->vf_id);
1450 
1451 		/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1452 		 * in progress state in rstat1 register.
1453 		 */
1454 		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1455 		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1456 		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1457 		i40e_flush(hw);
1458 	}
1459 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1460 	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1461 	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1462 	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1463 	i40e_flush(hw);
1464 
1465 	if (i40e_quiesce_vf_pci(vf))
1466 		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1467 			vf->vf_id);
1468 }
1469 
1470 /**
1471  * i40e_cleanup_reset_vf
1472  * @vf: pointer to the VF structure
1473  *
1474  * Cleanup a VF after the hardware reset is finished. Expects the caller to
1475  * have verified whether the reset is finished properly, and ensure the
1476  * minimum amount of wait time has passed.
1477  **/
1478 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1479 {
1480 	struct i40e_pf *pf = vf->pf;
1481 	struct i40e_hw *hw = &pf->hw;
1482 	u32 reg;
1483 
1484 	/* disable promisc modes in case they were enabled */
1485 	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1486 
1487 	/* free VF resources to begin resetting the VSI state */
1488 	i40e_free_vf_res(vf);
1489 
1490 	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1491 	 * By doing this we allow HW to access VF memory at any point. If we
1492 	 * did it any sooner, HW could access memory while it was being freed
1493 	 * in i40e_free_vf_res(), causing an IOMMU fault.
1494 	 *
1495 	 * On the other hand, this needs to be done ASAP, because the VF driver
1496 	 * is waiting for this to happen and may report a timeout. It's
1497 	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1498 	 * it.
1499 	 */
1500 	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1501 	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1502 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1503 
1504 	/* reallocate VF resources to finish resetting the VSI state */
1505 	if (!i40e_alloc_vf_res(vf)) {
1506 		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1507 		i40e_enable_vf_mappings(vf);
1508 		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1509 		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1510 		/* Do not notify the client during VF init */
1511 		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1512 					&vf->vf_states))
1513 			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1514 		vf->num_vlan = 0;
1515 	}
1516 
1517 	/* Tell the VF driver the reset is done. This needs to be done only
1518 	 * after VF has been fully initialized, because the VF driver may
1519 	 * request resources immediately after setting this flag.
1520 	 */
1521 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1522 }
1523 
1524 /**
1525  * i40e_reset_vf
1526  * @vf: pointer to the VF structure
1527  * @flr: VFLR was issued or not
1528  *
1529  * Returns true if the VF is in reset, resets successfully, or resets
1530  * are disabled and false otherwise.
1531  **/
1532 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1533 {
1534 	struct i40e_pf *pf = vf->pf;
1535 	struct i40e_hw *hw = &pf->hw;
1536 	bool rsd = false;
1537 	u32 reg;
1538 	int i;
1539 
1540 	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1541 		return true;
1542 
1543 	/* Bail out if VFs are disabled. */
1544 	if (test_bit(__I40E_VF_DISABLE, pf->state))
1545 		return true;
1546 
1547 	/* If VF is being reset already we don't need to continue. */
1548 	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1549 		return true;
1550 
1551 	i40e_trigger_vf_reset(vf, flr);
1552 
1553 	/* poll VPGEN_VFRSTAT reg to make sure
1554 	 * that reset is complete
1555 	 */
1556 	for (i = 0; i < 10; i++) {
1557 		/* VF reset requires driver to first reset the VF and then
1558 		 * poll the status register to make sure that the reset
1559 		 * completed successfully. Due to internal HW FIFO flushes,
1560 		 * we must wait 10ms before the register will be valid.
1561 		 */
1562 		usleep_range(10000, 20000);
1563 		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1564 		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1565 			rsd = true;
1566 			break;
1567 		}
1568 	}
1569 
1570 	if (flr)
1571 		usleep_range(10000, 20000);
1572 
1573 	if (!rsd)
1574 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1575 			vf->vf_id);
1576 	usleep_range(10000, 20000);
1577 
1578 	/* On initial reset, we don't have any queues to disable */
1579 	if (vf->lan_vsi_idx != 0)
1580 		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1581 
1582 	i40e_cleanup_reset_vf(vf);
1583 
1584 	i40e_flush(hw);
1585 	usleep_range(20000, 40000);
1586 	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1587 
1588 	return true;
1589 }
1590 
1591 /**
1592  * i40e_reset_all_vfs
1593  * @pf: pointer to the PF structure
1594  * @flr: VFLR was issued or not
1595  *
1596  * Reset all allocated VFs in one go. First, tell the hardware to reset each
1597  * VF, then do all the waiting in one chunk, and finally finish restoring each
1598  * VF after the wait. This is useful during PF routines which need to reset
1599  * all VFs, as otherwise it must perform these resets in a serialized fashion.
1600  *
1601  * Returns true if any VFs were reset, and false otherwise.
1602  **/
1603 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1604 {
1605 	struct i40e_hw *hw = &pf->hw;
1606 	struct i40e_vf *vf;
1607 	int i, v;
1608 	u32 reg;
1609 
1610 	/* If we don't have any VFs, then there is nothing to reset */
1611 	if (!pf->num_alloc_vfs)
1612 		return false;
1613 
1614 	/* If VFs have been disabled, there is no need to reset */
1615 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1616 		return false;
1617 
1618 	/* Begin reset on all VFs at once */
1619 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1620 		vf = &pf->vf[v];
1621 		/* If VF is being reset no need to trigger reset again */
1622 		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1623 			i40e_trigger_vf_reset(&pf->vf[v], flr);
1624 	}
1625 
1626 	/* HW requires some time to make sure it can flush the FIFO for a VF
1627 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1628 	 * sequence to make sure that it has completed. We'll keep track of
1629 	 * the VFs using a simple iterator that increments once that VF has
1630 	 * finished resetting.
1631 	 */
1632 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1633 		usleep_range(10000, 20000);
1634 
1635 		/* Check each VF in sequence, beginning with the VF to fail
1636 		 * the previous check.
1637 		 */
1638 		while (v < pf->num_alloc_vfs) {
1639 			vf = &pf->vf[v];
1640 			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1641 				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1642 				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1643 					break;
1644 			}
1645 
1646 			/* If the current VF has finished resetting, move on
1647 			 * to the next VF in sequence.
1648 			 */
1649 			v++;
1650 		}
1651 	}
1652 
1653 	if (flr)
1654 		usleep_range(10000, 20000);
1655 
1656 	/* Display a warning if at least one VF didn't manage to reset in
1657 	 * time, but continue on with the operation.
1658 	 */
1659 	if (v < pf->num_alloc_vfs)
1660 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1661 			pf->vf[v].vf_id);
1662 	usleep_range(10000, 20000);
1663 
1664 	/* Begin disabling all the rings associated with VFs, but do not wait
1665 	 * between each VF.
1666 	 */
1667 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1668 		/* On initial reset, we don't have any queues to disable */
1669 		if (pf->vf[v].lan_vsi_idx == 0)
1670 			continue;
1671 
1672 		/* If VF is reset in another thread just continue */
1673 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1674 			continue;
1675 
1676 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1677 	}
1678 
1679 	/* Now that we've notified HW to disable all of the VF rings, wait
1680 	 * until they finish.
1681 	 */
1682 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1683 		/* On initial reset, we don't have any queues to disable */
1684 		if (pf->vf[v].lan_vsi_idx == 0)
1685 			continue;
1686 
1687 		/* If VF is reset in another thread just continue */
1688 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1689 			continue;
1690 
1691 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1692 	}
1693 
1694 	/* Hw may need up to 50ms to finish disabling the RX queues. We
1695 	 * minimize the wait by delaying only once for all VFs.
1696 	 */
1697 	mdelay(50);
1698 
1699 	/* Finish the reset on each VF */
1700 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1701 		/* If VF is reset in another thread just continue */
1702 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1703 			continue;
1704 
1705 		i40e_cleanup_reset_vf(&pf->vf[v]);
1706 	}
1707 
1708 	i40e_flush(hw);
1709 	usleep_range(20000, 40000);
1710 	clear_bit(__I40E_VF_DISABLE, pf->state);
1711 
1712 	return true;
1713 }
1714 
1715 /**
1716  * i40e_free_vfs
1717  * @pf: pointer to the PF structure
1718  *
1719  * free VF resources
1720  **/
1721 void i40e_free_vfs(struct i40e_pf *pf)
1722 {
1723 	struct i40e_hw *hw = &pf->hw;
1724 	u32 reg_idx, bit_idx;
1725 	int i, tmp, vf_id;
1726 
1727 	if (!pf->vf)
1728 		return;
1729 
1730 	set_bit(__I40E_VFS_RELEASING, pf->state);
1731 	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1732 		usleep_range(1000, 2000);
1733 
1734 	i40e_notify_client_of_vf_enable(pf, 0);
1735 
1736 	/* Disable IOV before freeing resources. This lets any VF drivers
1737 	 * running in the host get themselves cleaned up before we yank
1738 	 * the carpet out from underneath their feet.
1739 	 */
1740 	if (!pci_vfs_assigned(pf->pdev))
1741 		pci_disable_sriov(pf->pdev);
1742 	else
1743 		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1744 
1745 	/* Amortize wait time by stopping all VFs at the same time */
1746 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1747 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1748 			continue;
1749 
1750 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1751 	}
1752 
1753 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1754 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1755 			continue;
1756 
1757 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1758 	}
1759 
1760 	/* free up VF resources */
1761 	tmp = pf->num_alloc_vfs;
1762 	pf->num_alloc_vfs = 0;
1763 	for (i = 0; i < tmp; i++) {
1764 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1765 			i40e_free_vf_res(&pf->vf[i]);
1766 		/* disable qp mappings */
1767 		i40e_disable_vf_mappings(&pf->vf[i]);
1768 	}
1769 
1770 	kfree(pf->vf);
1771 	pf->vf = NULL;
1772 
1773 	/* This check is for when the driver is unloaded while VFs are
1774 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1775 	 * before this function ever gets called.
1776 	 */
1777 	if (!pci_vfs_assigned(pf->pdev)) {
1778 		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1779 		 * work correctly when SR-IOV gets re-enabled.
1780 		 */
1781 		for (vf_id = 0; vf_id < tmp; vf_id++) {
1782 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1783 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1784 			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1785 		}
1786 	}
1787 	clear_bit(__I40E_VF_DISABLE, pf->state);
1788 	clear_bit(__I40E_VFS_RELEASING, pf->state);
1789 }
1790 
1791 #ifdef CONFIG_PCI_IOV
1792 /**
1793  * i40e_alloc_vfs
1794  * @pf: pointer to the PF structure
1795  * @num_alloc_vfs: number of VFs to allocate
1796  *
1797  * allocate VF resources
1798  **/
1799 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1800 {
1801 	struct i40e_vf *vfs;
1802 	int i, ret = 0;
1803 
1804 	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1805 	i40e_irq_dynamic_disable_icr0(pf);
1806 
1807 	/* Check to see if we're just allocating resources for extant VFs */
1808 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1809 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1810 		if (ret) {
1811 			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1812 			pf->num_alloc_vfs = 0;
1813 			goto err_iov;
1814 		}
1815 	}
1816 	/* allocate memory */
1817 	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1818 	if (!vfs) {
1819 		ret = -ENOMEM;
1820 		goto err_alloc;
1821 	}
1822 	pf->vf = vfs;
1823 
1824 	/* apply default profile */
1825 	for (i = 0; i < num_alloc_vfs; i++) {
1826 		vfs[i].pf = pf;
1827 		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1828 		vfs[i].vf_id = i;
1829 
1830 		/* assign default capabilities */
1831 		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1832 		vfs[i].spoofchk = true;
1833 
1834 		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1835 
1836 	}
1837 	pf->num_alloc_vfs = num_alloc_vfs;
1838 
1839 	/* VF resources get allocated during reset */
1840 	i40e_reset_all_vfs(pf, false);
1841 
1842 	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1843 
1844 err_alloc:
1845 	if (ret)
1846 		i40e_free_vfs(pf);
1847 err_iov:
1848 	/* Re-enable interrupt 0. */
1849 	i40e_irq_dynamic_enable_icr0(pf);
1850 	return ret;
1851 }
1852 
1853 #endif
1854 /**
1855  * i40e_pci_sriov_enable
1856  * @pdev: pointer to a pci_dev structure
1857  * @num_vfs: number of VFs to allocate
1858  *
1859  * Enable or change the number of VFs
1860  **/
1861 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1862 {
1863 #ifdef CONFIG_PCI_IOV
1864 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1865 	int pre_existing_vfs = pci_num_vf(pdev);
1866 	int err = 0;
1867 
1868 	if (test_bit(__I40E_TESTING, pf->state)) {
1869 		dev_warn(&pdev->dev,
1870 			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1871 		err = -EPERM;
1872 		goto err_out;
1873 	}
1874 
1875 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1876 		i40e_free_vfs(pf);
1877 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1878 		goto out;
1879 
1880 	if (num_vfs > pf->num_req_vfs) {
1881 		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1882 			 num_vfs, pf->num_req_vfs);
1883 		err = -EPERM;
1884 		goto err_out;
1885 	}
1886 
1887 	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1888 	err = i40e_alloc_vfs(pf, num_vfs);
1889 	if (err) {
1890 		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1891 		goto err_out;
1892 	}
1893 
1894 out:
1895 	return num_vfs;
1896 
1897 err_out:
1898 	return err;
1899 #endif
1900 	return 0;
1901 }
1902 
1903 /**
1904  * i40e_pci_sriov_configure
1905  * @pdev: pointer to a pci_dev structure
1906  * @num_vfs: number of VFs to allocate
1907  *
1908  * Enable or change the number of VFs. Called when the user updates the number
1909  * of VFs in sysfs.
1910  **/
1911 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1912 {
1913 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1914 	int ret = 0;
1915 
1916 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1917 		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1918 		return -EAGAIN;
1919 	}
1920 
1921 	if (num_vfs) {
1922 		if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1923 			pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1924 			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1925 		}
1926 		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1927 		goto sriov_configure_out;
1928 	}
1929 
1930 	if (!pci_vfs_assigned(pf->pdev)) {
1931 		i40e_free_vfs(pf);
1932 		pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1933 		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1934 	} else {
1935 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1936 		ret = -EINVAL;
1937 		goto sriov_configure_out;
1938 	}
1939 sriov_configure_out:
1940 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1941 	return ret;
1942 }
1943 
1944 /***********************virtual channel routines******************/
1945 
1946 /**
1947  * i40e_vc_send_msg_to_vf
1948  * @vf: pointer to the VF info
1949  * @v_opcode: virtual channel opcode
1950  * @v_retval: virtual channel return value
1951  * @msg: pointer to the msg buffer
1952  * @msglen: msg length
1953  *
1954  * send msg to VF
1955  **/
1956 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1957 				  u32 v_retval, u8 *msg, u16 msglen)
1958 {
1959 	struct i40e_pf *pf;
1960 	struct i40e_hw *hw;
1961 	int abs_vf_id;
1962 	int aq_ret;
1963 
1964 	/* validate the request */
1965 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1966 		return -EINVAL;
1967 
1968 	pf = vf->pf;
1969 	hw = &pf->hw;
1970 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1971 
1972 	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1973 					msg, msglen, NULL);
1974 	if (aq_ret) {
1975 		dev_info(&pf->pdev->dev,
1976 			 "Unable to send the message to VF %d aq_err %d\n",
1977 			 vf->vf_id, pf->hw.aq.asq_last_status);
1978 		return -EIO;
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 /**
1985  * i40e_vc_send_resp_to_vf
1986  * @vf: pointer to the VF info
1987  * @opcode: operation code
1988  * @retval: return value
1989  *
1990  * send resp msg to VF
1991  **/
1992 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1993 				   enum virtchnl_ops opcode,
1994 				   int retval)
1995 {
1996 	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1997 }
1998 
1999 /**
2000  * i40e_sync_vf_state
2001  * @vf: pointer to the VF info
2002  * @state: VF state
2003  *
2004  * Called from a VF message to synchronize the service with a potential
2005  * VF reset state
2006  **/
2007 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2008 {
2009 	int i;
2010 
2011 	/* When handling some messages, it needs VF state to be set.
2012 	 * It is possible that this flag is cleared during VF reset,
2013 	 * so there is a need to wait until the end of the reset to
2014 	 * handle the request message correctly.
2015 	 */
2016 	for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2017 		if (test_bit(state, &vf->vf_states))
2018 			return true;
2019 		usleep_range(10000, 20000);
2020 	}
2021 
2022 	return test_bit(state, &vf->vf_states);
2023 }
2024 
2025 /**
2026  * i40e_vc_get_version_msg
2027  * @vf: pointer to the VF info
2028  * @msg: pointer to the msg buffer
2029  *
2030  * called from the VF to request the API version used by the PF
2031  **/
2032 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2033 {
2034 	struct virtchnl_version_info info = {
2035 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2036 	};
2037 
2038 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2039 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2040 	if (VF_IS_V10(&vf->vf_ver))
2041 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2042 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2043 				      0, (u8 *)&info,
2044 				      sizeof(struct virtchnl_version_info));
2045 }
2046 
2047 /**
2048  * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2049  * @vf: pointer to VF structure
2050  **/
2051 static void i40e_del_qch(struct i40e_vf *vf)
2052 {
2053 	struct i40e_pf *pf = vf->pf;
2054 	int i;
2055 
2056 	/* first element in the array belongs to primary VF VSI and we shouldn't
2057 	 * delete it. We should however delete the rest of the VSIs created
2058 	 */
2059 	for (i = 1; i < vf->num_tc; i++) {
2060 		if (vf->ch[i].vsi_idx) {
2061 			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2062 			vf->ch[i].vsi_idx = 0;
2063 			vf->ch[i].vsi_id = 0;
2064 		}
2065 	}
2066 }
2067 
2068 /**
2069  * i40e_vc_get_max_frame_size
2070  * @vf: pointer to the VF
2071  *
2072  * Max frame size is determined based on the current port's max frame size and
2073  * whether a port VLAN is configured on this VF. The VF is not aware whether
2074  * it's in a port VLAN so the PF needs to account for this in max frame size
2075  * checks and sending the max frame size to the VF.
2076  **/
2077 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2078 {
2079 	u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2080 
2081 	if (vf->port_vlan_id)
2082 		max_frame_size -= VLAN_HLEN;
2083 
2084 	return max_frame_size;
2085 }
2086 
2087 /**
2088  * i40e_vc_get_vf_resources_msg
2089  * @vf: pointer to the VF info
2090  * @msg: pointer to the msg buffer
2091  *
2092  * called from the VF to request its resources
2093  **/
2094 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2095 {
2096 	struct virtchnl_vf_resource *vfres = NULL;
2097 	struct i40e_pf *pf = vf->pf;
2098 	struct i40e_vsi *vsi;
2099 	int num_vsis = 1;
2100 	int aq_ret = 0;
2101 	size_t len = 0;
2102 	int ret;
2103 
2104 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2105 		aq_ret = -EINVAL;
2106 		goto err;
2107 	}
2108 
2109 	len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
2110 	vfres = kzalloc(len, GFP_KERNEL);
2111 	if (!vfres) {
2112 		aq_ret = -ENOMEM;
2113 		len = 0;
2114 		goto err;
2115 	}
2116 	if (VF_IS_V11(&vf->vf_ver))
2117 		vf->driver_caps = *(u32 *)msg;
2118 	else
2119 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2120 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2121 				  VIRTCHNL_VF_OFFLOAD_VLAN;
2122 
2123 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2124 	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2125 	vsi = pf->vsi[vf->lan_vsi_idx];
2126 	if (!vsi->info.pvid)
2127 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2128 
2129 	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2130 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2131 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2132 		set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2133 	} else {
2134 		clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2135 	}
2136 
2137 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2138 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2139 	} else {
2140 		if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2141 		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2142 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2143 		else
2144 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2145 	}
2146 
2147 	if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2148 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2149 			vfres->vf_cap_flags |=
2150 				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2151 	}
2152 
2153 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2154 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2155 
2156 	if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2157 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2158 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2159 
2160 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2161 		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2162 			dev_err(&pf->pdev->dev,
2163 				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2164 				 vf->vf_id);
2165 			aq_ret = -EINVAL;
2166 			goto err;
2167 		}
2168 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2169 	}
2170 
2171 	if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2172 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2173 			vfres->vf_cap_flags |=
2174 					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2175 	}
2176 
2177 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2178 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2179 
2180 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2181 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2182 
2183 	vfres->num_vsis = num_vsis;
2184 	vfres->num_queue_pairs = vf->num_queue_pairs;
2185 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2186 	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2187 	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2188 	vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2189 
2190 	if (vf->lan_vsi_idx) {
2191 		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2192 		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2193 		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2194 		/* VFs only use TC 0 */
2195 		vfres->vsi_res[0].qset_handle
2196 					  = le16_to_cpu(vsi->info.qs_handle[0]);
2197 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2198 			i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2199 			eth_zero_addr(vf->default_lan_addr.addr);
2200 		}
2201 		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2202 				vf->default_lan_addr.addr);
2203 	}
2204 	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2205 
2206 err:
2207 	/* send the response back to the VF */
2208 	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2209 				     aq_ret, (u8 *)vfres, len);
2210 
2211 	kfree(vfres);
2212 	return ret;
2213 }
2214 
2215 /**
2216  * i40e_vc_config_promiscuous_mode_msg
2217  * @vf: pointer to the VF info
2218  * @msg: pointer to the msg buffer
2219  *
2220  * called from the VF to configure the promiscuous mode of
2221  * VF vsis
2222  **/
2223 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2224 {
2225 	struct virtchnl_promisc_info *info =
2226 	    (struct virtchnl_promisc_info *)msg;
2227 	struct i40e_pf *pf = vf->pf;
2228 	bool allmulti = false;
2229 	bool alluni = false;
2230 	int aq_ret = 0;
2231 
2232 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2233 		aq_ret = -EINVAL;
2234 		goto err_out;
2235 	}
2236 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2237 		dev_err(&pf->pdev->dev,
2238 			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2239 			vf->vf_id);
2240 
2241 		/* Lie to the VF on purpose, because this is an error we can
2242 		 * ignore. Unprivileged VF is not a virtual channel error.
2243 		 */
2244 		aq_ret = 0;
2245 		goto err_out;
2246 	}
2247 
2248 	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2249 		aq_ret = -EINVAL;
2250 		goto err_out;
2251 	}
2252 
2253 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2254 		aq_ret = -EINVAL;
2255 		goto err_out;
2256 	}
2257 
2258 	/* Multicast promiscuous handling*/
2259 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2260 		allmulti = true;
2261 
2262 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2263 		alluni = true;
2264 	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2265 						 alluni);
2266 	if (aq_ret)
2267 		goto err_out;
2268 
2269 	if (allmulti) {
2270 		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2271 				      &vf->vf_states))
2272 			dev_info(&pf->pdev->dev,
2273 				 "VF %d successfully set multicast promiscuous mode\n",
2274 				 vf->vf_id);
2275 	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2276 				      &vf->vf_states))
2277 		dev_info(&pf->pdev->dev,
2278 			 "VF %d successfully unset multicast promiscuous mode\n",
2279 			 vf->vf_id);
2280 
2281 	if (alluni) {
2282 		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2283 				      &vf->vf_states))
2284 			dev_info(&pf->pdev->dev,
2285 				 "VF %d successfully set unicast promiscuous mode\n",
2286 				 vf->vf_id);
2287 	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2288 				      &vf->vf_states))
2289 		dev_info(&pf->pdev->dev,
2290 			 "VF %d successfully unset unicast promiscuous mode\n",
2291 			 vf->vf_id);
2292 
2293 err_out:
2294 	/* send the response to the VF */
2295 	return i40e_vc_send_resp_to_vf(vf,
2296 				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2297 				       aq_ret);
2298 }
2299 
2300 /**
2301  * i40e_vc_config_queues_msg
2302  * @vf: pointer to the VF info
2303  * @msg: pointer to the msg buffer
2304  *
2305  * called from the VF to configure the rx/tx
2306  * queues
2307  **/
2308 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2309 {
2310 	struct virtchnl_vsi_queue_config_info *qci =
2311 	    (struct virtchnl_vsi_queue_config_info *)msg;
2312 	struct virtchnl_queue_pair_info *qpi;
2313 	u16 vsi_id, vsi_queue_id = 0;
2314 	struct i40e_pf *pf = vf->pf;
2315 	int i, j = 0, idx = 0;
2316 	struct i40e_vsi *vsi;
2317 	u16 num_qps_all = 0;
2318 	int aq_ret = 0;
2319 
2320 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2321 		aq_ret = -EINVAL;
2322 		goto error_param;
2323 	}
2324 
2325 	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2326 		aq_ret = -EINVAL;
2327 		goto error_param;
2328 	}
2329 
2330 	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2331 		aq_ret = -EINVAL;
2332 		goto error_param;
2333 	}
2334 
2335 	if (vf->adq_enabled) {
2336 		for (i = 0; i < vf->num_tc; i++)
2337 			num_qps_all += vf->ch[i].num_qps;
2338 		if (num_qps_all != qci->num_queue_pairs) {
2339 			aq_ret = -EINVAL;
2340 			goto error_param;
2341 		}
2342 	}
2343 
2344 	vsi_id = qci->vsi_id;
2345 
2346 	for (i = 0; i < qci->num_queue_pairs; i++) {
2347 		qpi = &qci->qpair[i];
2348 
2349 		if (!vf->adq_enabled) {
2350 			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2351 						      qpi->txq.queue_id)) {
2352 				aq_ret = -EINVAL;
2353 				goto error_param;
2354 			}
2355 
2356 			vsi_queue_id = qpi->txq.queue_id;
2357 
2358 			if (qpi->txq.vsi_id != qci->vsi_id ||
2359 			    qpi->rxq.vsi_id != qci->vsi_id ||
2360 			    qpi->rxq.queue_id != vsi_queue_id) {
2361 				aq_ret = -EINVAL;
2362 				goto error_param;
2363 			}
2364 		}
2365 
2366 		if (vf->adq_enabled) {
2367 			if (idx >= ARRAY_SIZE(vf->ch)) {
2368 				aq_ret = -ENODEV;
2369 				goto error_param;
2370 			}
2371 			vsi_id = vf->ch[idx].vsi_id;
2372 		}
2373 
2374 		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2375 					     &qpi->rxq) ||
2376 		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2377 					     &qpi->txq)) {
2378 			aq_ret = -EINVAL;
2379 			goto error_param;
2380 		}
2381 
2382 		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2383 		 * VF does not know about these additional VSIs and all
2384 		 * it cares is about its own queues. PF configures these queues
2385 		 * to its appropriate VSIs based on TC mapping
2386 		 */
2387 		if (vf->adq_enabled) {
2388 			if (idx >= ARRAY_SIZE(vf->ch)) {
2389 				aq_ret = -ENODEV;
2390 				goto error_param;
2391 			}
2392 			if (j == (vf->ch[idx].num_qps - 1)) {
2393 				idx++;
2394 				j = 0; /* resetting the queue count */
2395 				vsi_queue_id = 0;
2396 			} else {
2397 				j++;
2398 				vsi_queue_id++;
2399 			}
2400 		}
2401 	}
2402 	/* set vsi num_queue_pairs in use to num configured by VF */
2403 	if (!vf->adq_enabled) {
2404 		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2405 			qci->num_queue_pairs;
2406 	} else {
2407 		for (i = 0; i < vf->num_tc; i++) {
2408 			vsi = pf->vsi[vf->ch[i].vsi_idx];
2409 			vsi->num_queue_pairs = vf->ch[i].num_qps;
2410 
2411 			if (i40e_update_adq_vsi_queues(vsi, i)) {
2412 				aq_ret = -EIO;
2413 				goto error_param;
2414 			}
2415 		}
2416 	}
2417 
2418 error_param:
2419 	/* send the response to the VF */
2420 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2421 				       aq_ret);
2422 }
2423 
2424 /**
2425  * i40e_validate_queue_map - check queue map is valid
2426  * @vf: the VF structure pointer
2427  * @vsi_id: vsi id
2428  * @queuemap: Tx or Rx queue map
2429  *
2430  * check if Tx or Rx queue map is valid
2431  **/
2432 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2433 				   unsigned long queuemap)
2434 {
2435 	u16 vsi_queue_id, queue_id;
2436 
2437 	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2438 		if (vf->adq_enabled) {
2439 			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2440 			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2441 		} else {
2442 			queue_id = vsi_queue_id;
2443 		}
2444 
2445 		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2446 			return -EINVAL;
2447 	}
2448 
2449 	return 0;
2450 }
2451 
2452 /**
2453  * i40e_vc_config_irq_map_msg
2454  * @vf: pointer to the VF info
2455  * @msg: pointer to the msg buffer
2456  *
2457  * called from the VF to configure the irq to
2458  * queue map
2459  **/
2460 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2461 {
2462 	struct virtchnl_irq_map_info *irqmap_info =
2463 	    (struct virtchnl_irq_map_info *)msg;
2464 	struct virtchnl_vector_map *map;
2465 	int aq_ret = 0;
2466 	u16 vsi_id;
2467 	int i;
2468 
2469 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2470 		aq_ret = -EINVAL;
2471 		goto error_param;
2472 	}
2473 
2474 	if (irqmap_info->num_vectors >
2475 	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2476 		aq_ret = -EINVAL;
2477 		goto error_param;
2478 	}
2479 
2480 	for (i = 0; i < irqmap_info->num_vectors; i++) {
2481 		map = &irqmap_info->vecmap[i];
2482 		/* validate msg params */
2483 		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2484 		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2485 			aq_ret = -EINVAL;
2486 			goto error_param;
2487 		}
2488 		vsi_id = map->vsi_id;
2489 
2490 		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2491 			aq_ret = -EINVAL;
2492 			goto error_param;
2493 		}
2494 
2495 		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2496 			aq_ret = -EINVAL;
2497 			goto error_param;
2498 		}
2499 
2500 		i40e_config_irq_link_list(vf, vsi_id, map);
2501 	}
2502 error_param:
2503 	/* send the response to the VF */
2504 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2505 				       aq_ret);
2506 }
2507 
2508 /**
2509  * i40e_ctrl_vf_tx_rings
2510  * @vsi: the SRIOV VSI being configured
2511  * @q_map: bit map of the queues to be enabled
2512  * @enable: start or stop the queue
2513  **/
2514 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2515 				 bool enable)
2516 {
2517 	struct i40e_pf *pf = vsi->back;
2518 	int ret = 0;
2519 	u16 q_id;
2520 
2521 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2522 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2523 					     vsi->base_queue + q_id,
2524 					     false /*is xdp*/, enable);
2525 		if (ret)
2526 			break;
2527 	}
2528 	return ret;
2529 }
2530 
2531 /**
2532  * i40e_ctrl_vf_rx_rings
2533  * @vsi: the SRIOV VSI being configured
2534  * @q_map: bit map of the queues to be enabled
2535  * @enable: start or stop the queue
2536  **/
2537 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2538 				 bool enable)
2539 {
2540 	struct i40e_pf *pf = vsi->back;
2541 	int ret = 0;
2542 	u16 q_id;
2543 
2544 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2545 		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2546 					     enable);
2547 		if (ret)
2548 			break;
2549 	}
2550 	return ret;
2551 }
2552 
2553 /**
2554  * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2555  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2556  *
2557  * Returns true if validation was successful, else false.
2558  */
2559 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2560 {
2561 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2562 	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2563 	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2564 		return false;
2565 
2566 	return true;
2567 }
2568 
2569 /**
2570  * i40e_vc_enable_queues_msg
2571  * @vf: pointer to the VF info
2572  * @msg: pointer to the msg buffer
2573  *
2574  * called from the VF to enable all or specific queue(s)
2575  **/
2576 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2577 {
2578 	struct virtchnl_queue_select *vqs =
2579 	    (struct virtchnl_queue_select *)msg;
2580 	struct i40e_pf *pf = vf->pf;
2581 	int aq_ret = 0;
2582 	int i;
2583 
2584 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2585 		aq_ret = -EINVAL;
2586 		goto error_param;
2587 	}
2588 
2589 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2590 		aq_ret = -EINVAL;
2591 		goto error_param;
2592 	}
2593 
2594 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2595 		aq_ret = -EINVAL;
2596 		goto error_param;
2597 	}
2598 
2599 	/* Use the queue bit map sent by the VF */
2600 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2601 				  true)) {
2602 		aq_ret = -EIO;
2603 		goto error_param;
2604 	}
2605 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2606 				  true)) {
2607 		aq_ret = -EIO;
2608 		goto error_param;
2609 	}
2610 
2611 	/* need to start the rings for additional ADq VSI's as well */
2612 	if (vf->adq_enabled) {
2613 		/* zero belongs to LAN VSI */
2614 		for (i = 1; i < vf->num_tc; i++) {
2615 			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2616 				aq_ret = -EIO;
2617 		}
2618 	}
2619 
2620 error_param:
2621 	/* send the response to the VF */
2622 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2623 				       aq_ret);
2624 }
2625 
2626 /**
2627  * i40e_vc_disable_queues_msg
2628  * @vf: pointer to the VF info
2629  * @msg: pointer to the msg buffer
2630  *
2631  * called from the VF to disable all or specific
2632  * queue(s)
2633  **/
2634 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2635 {
2636 	struct virtchnl_queue_select *vqs =
2637 	    (struct virtchnl_queue_select *)msg;
2638 	struct i40e_pf *pf = vf->pf;
2639 	int aq_ret = 0;
2640 
2641 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2642 		aq_ret = -EINVAL;
2643 		goto error_param;
2644 	}
2645 
2646 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2647 		aq_ret = -EINVAL;
2648 		goto error_param;
2649 	}
2650 
2651 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2652 		aq_ret = -EINVAL;
2653 		goto error_param;
2654 	}
2655 
2656 	/* Use the queue bit map sent by the VF */
2657 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2658 				  false)) {
2659 		aq_ret = -EIO;
2660 		goto error_param;
2661 	}
2662 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2663 				  false)) {
2664 		aq_ret = -EIO;
2665 		goto error_param;
2666 	}
2667 error_param:
2668 	/* send the response to the VF */
2669 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2670 				       aq_ret);
2671 }
2672 
2673 /**
2674  * i40e_check_enough_queue - find big enough queue number
2675  * @vf: pointer to the VF info
2676  * @needed: the number of items needed
2677  *
2678  * Returns the base item index of the queue, or negative for error
2679  **/
2680 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2681 {
2682 	unsigned int  i, cur_queues, more, pool_size;
2683 	struct i40e_lump_tracking *pile;
2684 	struct i40e_pf *pf = vf->pf;
2685 	struct i40e_vsi *vsi;
2686 
2687 	vsi = pf->vsi[vf->lan_vsi_idx];
2688 	cur_queues = vsi->alloc_queue_pairs;
2689 
2690 	/* if current allocated queues are enough for need */
2691 	if (cur_queues >= needed)
2692 		return vsi->base_queue;
2693 
2694 	pile = pf->qp_pile;
2695 	if (cur_queues > 0) {
2696 		/* if the allocated queues are not zero
2697 		 * just check if there are enough queues for more
2698 		 * behind the allocated queues.
2699 		 */
2700 		more = needed - cur_queues;
2701 		for (i = vsi->base_queue + cur_queues;
2702 			i < pile->num_entries; i++) {
2703 			if (pile->list[i] & I40E_PILE_VALID_BIT)
2704 				break;
2705 
2706 			if (more-- == 1)
2707 				/* there is enough */
2708 				return vsi->base_queue;
2709 		}
2710 	}
2711 
2712 	pool_size = 0;
2713 	for (i = 0; i < pile->num_entries; i++) {
2714 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
2715 			pool_size = 0;
2716 			continue;
2717 		}
2718 		if (needed <= ++pool_size)
2719 			/* there is enough */
2720 			return i;
2721 	}
2722 
2723 	return -ENOMEM;
2724 }
2725 
2726 /**
2727  * i40e_vc_request_queues_msg
2728  * @vf: pointer to the VF info
2729  * @msg: pointer to the msg buffer
2730  *
2731  * VFs get a default number of queues but can use this message to request a
2732  * different number.  If the request is successful, PF will reset the VF and
2733  * return 0.  If unsuccessful, PF will send message informing VF of number of
2734  * available queues and return result of sending VF a message.
2735  **/
2736 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2737 {
2738 	struct virtchnl_vf_res_request *vfres =
2739 		(struct virtchnl_vf_res_request *)msg;
2740 	u16 req_pairs = vfres->num_queue_pairs;
2741 	u8 cur_pairs = vf->num_queue_pairs;
2742 	struct i40e_pf *pf = vf->pf;
2743 
2744 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2745 		return -EINVAL;
2746 
2747 	if (req_pairs > I40E_MAX_VF_QUEUES) {
2748 		dev_err(&pf->pdev->dev,
2749 			"VF %d tried to request more than %d queues.\n",
2750 			vf->vf_id,
2751 			I40E_MAX_VF_QUEUES);
2752 		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2753 	} else if (req_pairs - cur_pairs > pf->queues_left) {
2754 		dev_warn(&pf->pdev->dev,
2755 			 "VF %d requested %d more queues, but only %d left.\n",
2756 			 vf->vf_id,
2757 			 req_pairs - cur_pairs,
2758 			 pf->queues_left);
2759 		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2760 	} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2761 		dev_warn(&pf->pdev->dev,
2762 			 "VF %d requested %d more queues, but there is not enough for it.\n",
2763 			 vf->vf_id,
2764 			 req_pairs - cur_pairs);
2765 		vfres->num_queue_pairs = cur_pairs;
2766 	} else {
2767 		/* successful request */
2768 		vf->num_req_queues = req_pairs;
2769 		i40e_vc_reset_vf(vf, true);
2770 		return 0;
2771 	}
2772 
2773 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2774 				      (u8 *)vfres, sizeof(*vfres));
2775 }
2776 
2777 /**
2778  * i40e_vc_get_stats_msg
2779  * @vf: pointer to the VF info
2780  * @msg: pointer to the msg buffer
2781  *
2782  * called from the VF to get vsi stats
2783  **/
2784 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2785 {
2786 	struct virtchnl_queue_select *vqs =
2787 	    (struct virtchnl_queue_select *)msg;
2788 	struct i40e_pf *pf = vf->pf;
2789 	struct i40e_eth_stats stats;
2790 	int aq_ret = 0;
2791 	struct i40e_vsi *vsi;
2792 
2793 	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2794 
2795 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2796 		aq_ret = -EINVAL;
2797 		goto error_param;
2798 	}
2799 
2800 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2801 		aq_ret = -EINVAL;
2802 		goto error_param;
2803 	}
2804 
2805 	vsi = pf->vsi[vf->lan_vsi_idx];
2806 	if (!vsi) {
2807 		aq_ret = -EINVAL;
2808 		goto error_param;
2809 	}
2810 	i40e_update_eth_stats(vsi);
2811 	stats = vsi->eth_stats;
2812 
2813 error_param:
2814 	/* send the response back to the VF */
2815 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2816 				      (u8 *)&stats, sizeof(stats));
2817 }
2818 
2819 #define I40E_MAX_MACVLAN_PER_HW 3072
2820 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
2821 	(num_ports))
2822 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2823  * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2824  */
2825 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2826 #define I40E_VC_MAX_VLAN_PER_VF 16
2827 
2828 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports)		\
2829 ({	typeof(vf_num) vf_num_ = (vf_num);				\
2830 	typeof(num_ports) num_ports_ = (num_ports);			\
2831 	((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ *		\
2832 	I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) +			\
2833 	I40E_VC_MAX_MAC_ADDR_PER_VF; })
2834 /**
2835  * i40e_check_vf_permission
2836  * @vf: pointer to the VF info
2837  * @al: MAC address list from virtchnl
2838  *
2839  * Check that the given list of MAC addresses is allowed. Will return -EPERM
2840  * if any address in the list is not valid. Checks the following conditions:
2841  *
2842  * 1) broadcast and zero addresses are never valid
2843  * 2) unicast addresses are not allowed if the VMM has administratively set
2844  *    the VF MAC address, unless the VF is marked as privileged.
2845  * 3) There is enough space to add all the addresses.
2846  *
2847  * Note that to guarantee consistency, it is expected this function be called
2848  * while holding the mac_filter_hash_lock, as otherwise the current number of
2849  * addresses might not be accurate.
2850  **/
2851 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2852 					   struct virtchnl_ether_addr_list *al)
2853 {
2854 	struct i40e_pf *pf = vf->pf;
2855 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2856 	struct i40e_hw *hw = &pf->hw;
2857 	int mac2add_cnt = 0;
2858 	int i;
2859 
2860 	for (i = 0; i < al->num_elements; i++) {
2861 		struct i40e_mac_filter *f;
2862 		u8 *addr = al->list[i].addr;
2863 
2864 		if (is_broadcast_ether_addr(addr) ||
2865 		    is_zero_ether_addr(addr)) {
2866 			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2867 				addr);
2868 			return -EINVAL;
2869 		}
2870 
2871 		/* If the host VMM administrator has set the VF MAC address
2872 		 * administratively via the ndo_set_vf_mac command then deny
2873 		 * permission to the VF to add or delete unicast MAC addresses.
2874 		 * Unless the VF is privileged and then it can do whatever.
2875 		 * The VF may request to set the MAC address filter already
2876 		 * assigned to it so do not return an error in that case.
2877 		 */
2878 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2879 		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2880 		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2881 			dev_err(&pf->pdev->dev,
2882 				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2883 			return -EPERM;
2884 		}
2885 
2886 		/*count filters that really will be added*/
2887 		f = i40e_find_mac(vsi, addr);
2888 		if (!f)
2889 			++mac2add_cnt;
2890 	}
2891 
2892 	/* If this VF is not privileged, then we can't add more than a limited
2893 	 * number of addresses. Check to make sure that the additions do not
2894 	 * push us over the limit.
2895 	 */
2896 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2897 		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2898 		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2899 			dev_err(&pf->pdev->dev,
2900 				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2901 			return -EPERM;
2902 		}
2903 	/* If this VF is trusted, it can use more resources than untrusted.
2904 	 * However to ensure that every trusted VF has appropriate number of
2905 	 * resources, divide whole pool of resources per port and then across
2906 	 * all VFs.
2907 	 */
2908 	} else {
2909 		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2910 		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2911 						       hw->num_ports)) {
2912 			dev_err(&pf->pdev->dev,
2913 				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2914 			return -EPERM;
2915 		}
2916 	}
2917 	return 0;
2918 }
2919 
2920 /**
2921  * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2922  * @vc_ether_addr: used to extract the type
2923  **/
2924 static u8
2925 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2926 {
2927 	return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2928 }
2929 
2930 /**
2931  * i40e_is_vc_addr_legacy
2932  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2933  *
2934  * check if the MAC address is from an older VF
2935  **/
2936 static bool
2937 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2938 {
2939 	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2940 		VIRTCHNL_ETHER_ADDR_LEGACY;
2941 }
2942 
2943 /**
2944  * i40e_is_vc_addr_primary
2945  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2946  *
2947  * check if the MAC address is the VF's primary MAC
2948  * This function should only be called when the MAC address in
2949  * virtchnl_ether_addr is a valid unicast MAC
2950  **/
2951 static bool
2952 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
2953 {
2954 	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2955 		VIRTCHNL_ETHER_ADDR_PRIMARY;
2956 }
2957 
2958 /**
2959  * i40e_update_vf_mac_addr
2960  * @vf: VF to update
2961  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
2962  *
2963  * update the VF's cached hardware MAC if allowed
2964  **/
2965 static void
2966 i40e_update_vf_mac_addr(struct i40e_vf *vf,
2967 			struct virtchnl_ether_addr *vc_ether_addr)
2968 {
2969 	u8 *mac_addr = vc_ether_addr->addr;
2970 
2971 	if (!is_valid_ether_addr(mac_addr))
2972 		return;
2973 
2974 	/* If request to add MAC filter is a primary request update its default
2975 	 * MAC address with the requested one. If it is a legacy request then
2976 	 * check if current default is empty if so update the default MAC
2977 	 */
2978 	if (i40e_is_vc_addr_primary(vc_ether_addr)) {
2979 		ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
2980 	} else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
2981 		if (is_zero_ether_addr(vf->default_lan_addr.addr))
2982 			ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
2983 	}
2984 }
2985 
2986 /**
2987  * i40e_vc_add_mac_addr_msg
2988  * @vf: pointer to the VF info
2989  * @msg: pointer to the msg buffer
2990  *
2991  * add guest mac address filter
2992  **/
2993 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2994 {
2995 	struct virtchnl_ether_addr_list *al =
2996 	    (struct virtchnl_ether_addr_list *)msg;
2997 	struct i40e_pf *pf = vf->pf;
2998 	struct i40e_vsi *vsi = NULL;
2999 	int ret = 0;
3000 	int i;
3001 
3002 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3003 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3004 		ret = -EINVAL;
3005 		goto error_param;
3006 	}
3007 
3008 	vsi = pf->vsi[vf->lan_vsi_idx];
3009 
3010 	/* Lock once, because all function inside for loop accesses VSI's
3011 	 * MAC filter list which needs to be protected using same lock.
3012 	 */
3013 	spin_lock_bh(&vsi->mac_filter_hash_lock);
3014 
3015 	ret = i40e_check_vf_permission(vf, al);
3016 	if (ret) {
3017 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3018 		goto error_param;
3019 	}
3020 
3021 	/* add new addresses to the list */
3022 	for (i = 0; i < al->num_elements; i++) {
3023 		struct i40e_mac_filter *f;
3024 
3025 		f = i40e_find_mac(vsi, al->list[i].addr);
3026 		if (!f) {
3027 			f = i40e_add_mac_filter(vsi, al->list[i].addr);
3028 
3029 			if (!f) {
3030 				dev_err(&pf->pdev->dev,
3031 					"Unable to add MAC filter %pM for VF %d\n",
3032 					al->list[i].addr, vf->vf_id);
3033 				ret = -EINVAL;
3034 				spin_unlock_bh(&vsi->mac_filter_hash_lock);
3035 				goto error_param;
3036 			}
3037 		}
3038 		i40e_update_vf_mac_addr(vf, &al->list[i]);
3039 	}
3040 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3041 
3042 	/* program the updated filter list */
3043 	ret = i40e_sync_vsi_filters(vsi);
3044 	if (ret)
3045 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3046 			vf->vf_id, ret);
3047 
3048 error_param:
3049 	/* send the response to the VF */
3050 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3051 				      ret, NULL, 0);
3052 }
3053 
3054 /**
3055  * i40e_vc_del_mac_addr_msg
3056  * @vf: pointer to the VF info
3057  * @msg: pointer to the msg buffer
3058  *
3059  * remove guest mac address filter
3060  **/
3061 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3062 {
3063 	struct virtchnl_ether_addr_list *al =
3064 	    (struct virtchnl_ether_addr_list *)msg;
3065 	bool was_unimac_deleted = false;
3066 	struct i40e_pf *pf = vf->pf;
3067 	struct i40e_vsi *vsi = NULL;
3068 	int ret = 0;
3069 	int i;
3070 
3071 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3072 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3073 		ret = -EINVAL;
3074 		goto error_param;
3075 	}
3076 
3077 	for (i = 0; i < al->num_elements; i++) {
3078 		if (is_broadcast_ether_addr(al->list[i].addr) ||
3079 		    is_zero_ether_addr(al->list[i].addr)) {
3080 			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3081 				al->list[i].addr, vf->vf_id);
3082 			ret = -EINVAL;
3083 			goto error_param;
3084 		}
3085 		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
3086 			was_unimac_deleted = true;
3087 	}
3088 	vsi = pf->vsi[vf->lan_vsi_idx];
3089 
3090 	spin_lock_bh(&vsi->mac_filter_hash_lock);
3091 	/* delete addresses from the list */
3092 	for (i = 0; i < al->num_elements; i++)
3093 		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3094 			ret = -EINVAL;
3095 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3096 			goto error_param;
3097 		}
3098 
3099 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3100 
3101 	if (was_unimac_deleted)
3102 		eth_zero_addr(vf->default_lan_addr.addr);
3103 
3104 	/* program the updated filter list */
3105 	ret = i40e_sync_vsi_filters(vsi);
3106 	if (ret)
3107 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3108 			vf->vf_id, ret);
3109 
3110 	if (vf->trusted && was_unimac_deleted) {
3111 		struct i40e_mac_filter *f;
3112 		struct hlist_node *h;
3113 		u8 *macaddr = NULL;
3114 		int bkt;
3115 
3116 		/* set last unicast mac address as default */
3117 		spin_lock_bh(&vsi->mac_filter_hash_lock);
3118 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3119 			if (is_valid_ether_addr(f->macaddr))
3120 				macaddr = f->macaddr;
3121 		}
3122 		if (macaddr)
3123 			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3124 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3125 	}
3126 error_param:
3127 	/* send the response to the VF */
3128 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3129 }
3130 
3131 /**
3132  * i40e_vc_add_vlan_msg
3133  * @vf: pointer to the VF info
3134  * @msg: pointer to the msg buffer
3135  *
3136  * program guest vlan id
3137  **/
3138 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3139 {
3140 	struct virtchnl_vlan_filter_list *vfl =
3141 	    (struct virtchnl_vlan_filter_list *)msg;
3142 	struct i40e_pf *pf = vf->pf;
3143 	struct i40e_vsi *vsi = NULL;
3144 	int aq_ret = 0;
3145 	int i;
3146 
3147 	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3148 	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3149 		dev_err(&pf->pdev->dev,
3150 			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3151 		goto error_param;
3152 	}
3153 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3154 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3155 		aq_ret = -EINVAL;
3156 		goto error_param;
3157 	}
3158 
3159 	for (i = 0; i < vfl->num_elements; i++) {
3160 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3161 			aq_ret = -EINVAL;
3162 			dev_err(&pf->pdev->dev,
3163 				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3164 			goto error_param;
3165 		}
3166 	}
3167 	vsi = pf->vsi[vf->lan_vsi_idx];
3168 	if (vsi->info.pvid) {
3169 		aq_ret = -EINVAL;
3170 		goto error_param;
3171 	}
3172 
3173 	i40e_vlan_stripping_enable(vsi);
3174 	for (i = 0; i < vfl->num_elements; i++) {
3175 		/* add new VLAN filter */
3176 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3177 		if (!ret)
3178 			vf->num_vlan++;
3179 
3180 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3181 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3182 							   true,
3183 							   vfl->vlan_id[i],
3184 							   NULL);
3185 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3186 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3187 							   true,
3188 							   vfl->vlan_id[i],
3189 							   NULL);
3190 
3191 		if (ret)
3192 			dev_err(&pf->pdev->dev,
3193 				"Unable to add VLAN filter %d for VF %d, error %d\n",
3194 				vfl->vlan_id[i], vf->vf_id, ret);
3195 	}
3196 
3197 error_param:
3198 	/* send the response to the VF */
3199 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3200 }
3201 
3202 /**
3203  * i40e_vc_remove_vlan_msg
3204  * @vf: pointer to the VF info
3205  * @msg: pointer to the msg buffer
3206  *
3207  * remove programmed guest vlan id
3208  **/
3209 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3210 {
3211 	struct virtchnl_vlan_filter_list *vfl =
3212 	    (struct virtchnl_vlan_filter_list *)msg;
3213 	struct i40e_pf *pf = vf->pf;
3214 	struct i40e_vsi *vsi = NULL;
3215 	int aq_ret = 0;
3216 	int i;
3217 
3218 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3219 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3220 		aq_ret = -EINVAL;
3221 		goto error_param;
3222 	}
3223 
3224 	for (i = 0; i < vfl->num_elements; i++) {
3225 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3226 			aq_ret = -EINVAL;
3227 			goto error_param;
3228 		}
3229 	}
3230 
3231 	vsi = pf->vsi[vf->lan_vsi_idx];
3232 	if (vsi->info.pvid) {
3233 		if (vfl->num_elements > 1 || vfl->vlan_id[0])
3234 			aq_ret = -EINVAL;
3235 		goto error_param;
3236 	}
3237 
3238 	for (i = 0; i < vfl->num_elements; i++) {
3239 		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3240 		vf->num_vlan--;
3241 
3242 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3243 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3244 							   false,
3245 							   vfl->vlan_id[i],
3246 							   NULL);
3247 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3248 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3249 							   false,
3250 							   vfl->vlan_id[i],
3251 							   NULL);
3252 	}
3253 
3254 error_param:
3255 	/* send the response to the VF */
3256 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3257 }
3258 
3259 /**
3260  * i40e_vc_rdma_msg
3261  * @vf: pointer to the VF info
3262  * @msg: pointer to the msg buffer
3263  * @msglen: msg length
3264  *
3265  * called from the VF for the iwarp msgs
3266  **/
3267 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3268 {
3269 	struct i40e_pf *pf = vf->pf;
3270 	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3271 	int aq_ret = 0;
3272 
3273 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3274 	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3275 		aq_ret = -EINVAL;
3276 		goto error_param;
3277 	}
3278 
3279 	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3280 				     msg, msglen);
3281 
3282 error_param:
3283 	/* send the response to the VF */
3284 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3285 				       aq_ret);
3286 }
3287 
3288 /**
3289  * i40e_vc_rdma_qvmap_msg
3290  * @vf: pointer to the VF info
3291  * @msg: pointer to the msg buffer
3292  * @config: config qvmap or release it
3293  *
3294  * called from the VF for the iwarp msgs
3295  **/
3296 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3297 {
3298 	struct virtchnl_rdma_qvlist_info *qvlist_info =
3299 				(struct virtchnl_rdma_qvlist_info *)msg;
3300 	int aq_ret = 0;
3301 
3302 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3303 	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3304 		aq_ret = -EINVAL;
3305 		goto error_param;
3306 	}
3307 
3308 	if (config) {
3309 		if (i40e_config_rdma_qvlist(vf, qvlist_info))
3310 			aq_ret = -EINVAL;
3311 	} else {
3312 		i40e_release_rdma_qvlist(vf);
3313 	}
3314 
3315 error_param:
3316 	/* send the response to the VF */
3317 	return i40e_vc_send_resp_to_vf(vf,
3318 			       config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3319 			       VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3320 			       aq_ret);
3321 }
3322 
3323 /**
3324  * i40e_vc_config_rss_key
3325  * @vf: pointer to the VF info
3326  * @msg: pointer to the msg buffer
3327  *
3328  * Configure the VF's RSS key
3329  **/
3330 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3331 {
3332 	struct virtchnl_rss_key *vrk =
3333 		(struct virtchnl_rss_key *)msg;
3334 	struct i40e_pf *pf = vf->pf;
3335 	struct i40e_vsi *vsi = NULL;
3336 	int aq_ret = 0;
3337 
3338 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3339 	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3340 	    vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3341 		aq_ret = -EINVAL;
3342 		goto err;
3343 	}
3344 
3345 	vsi = pf->vsi[vf->lan_vsi_idx];
3346 	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3347 err:
3348 	/* send the response to the VF */
3349 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3350 				       aq_ret);
3351 }
3352 
3353 /**
3354  * i40e_vc_config_rss_lut
3355  * @vf: pointer to the VF info
3356  * @msg: pointer to the msg buffer
3357  *
3358  * Configure the VF's RSS LUT
3359  **/
3360 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3361 {
3362 	struct virtchnl_rss_lut *vrl =
3363 		(struct virtchnl_rss_lut *)msg;
3364 	struct i40e_pf *pf = vf->pf;
3365 	struct i40e_vsi *vsi = NULL;
3366 	int aq_ret = 0;
3367 	u16 i;
3368 
3369 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3370 	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3371 	    vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3372 		aq_ret = -EINVAL;
3373 		goto err;
3374 	}
3375 
3376 	for (i = 0; i < vrl->lut_entries; i++)
3377 		if (vrl->lut[i] >= vf->num_queue_pairs) {
3378 			aq_ret = -EINVAL;
3379 			goto err;
3380 		}
3381 
3382 	vsi = pf->vsi[vf->lan_vsi_idx];
3383 	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3384 	/* send the response to the VF */
3385 err:
3386 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3387 				       aq_ret);
3388 }
3389 
3390 /**
3391  * i40e_vc_get_rss_hena
3392  * @vf: pointer to the VF info
3393  * @msg: pointer to the msg buffer
3394  *
3395  * Return the RSS HENA bits allowed by the hardware
3396  **/
3397 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3398 {
3399 	struct virtchnl_rss_hena *vrh = NULL;
3400 	struct i40e_pf *pf = vf->pf;
3401 	int aq_ret = 0;
3402 	int len = 0;
3403 
3404 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3405 		aq_ret = -EINVAL;
3406 		goto err;
3407 	}
3408 	len = sizeof(struct virtchnl_rss_hena);
3409 
3410 	vrh = kzalloc(len, GFP_KERNEL);
3411 	if (!vrh) {
3412 		aq_ret = -ENOMEM;
3413 		len = 0;
3414 		goto err;
3415 	}
3416 	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3417 err:
3418 	/* send the response back to the VF */
3419 	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3420 					aq_ret, (u8 *)vrh, len);
3421 	kfree(vrh);
3422 	return aq_ret;
3423 }
3424 
3425 /**
3426  * i40e_vc_set_rss_hena
3427  * @vf: pointer to the VF info
3428  * @msg: pointer to the msg buffer
3429  *
3430  * Set the RSS HENA bits for the VF
3431  **/
3432 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3433 {
3434 	struct virtchnl_rss_hena *vrh =
3435 		(struct virtchnl_rss_hena *)msg;
3436 	struct i40e_pf *pf = vf->pf;
3437 	struct i40e_hw *hw = &pf->hw;
3438 	int aq_ret = 0;
3439 
3440 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3441 		aq_ret = -EINVAL;
3442 		goto err;
3443 	}
3444 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3445 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3446 			  (u32)(vrh->hena >> 32));
3447 
3448 	/* send the response to the VF */
3449 err:
3450 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3451 }
3452 
3453 /**
3454  * i40e_vc_enable_vlan_stripping
3455  * @vf: pointer to the VF info
3456  * @msg: pointer to the msg buffer
3457  *
3458  * Enable vlan header stripping for the VF
3459  **/
3460 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3461 {
3462 	struct i40e_vsi *vsi;
3463 	int aq_ret = 0;
3464 
3465 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3466 		aq_ret = -EINVAL;
3467 		goto err;
3468 	}
3469 
3470 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3471 	i40e_vlan_stripping_enable(vsi);
3472 
3473 	/* send the response to the VF */
3474 err:
3475 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3476 				       aq_ret);
3477 }
3478 
3479 /**
3480  * i40e_vc_disable_vlan_stripping
3481  * @vf: pointer to the VF info
3482  * @msg: pointer to the msg buffer
3483  *
3484  * Disable vlan header stripping for the VF
3485  **/
3486 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3487 {
3488 	struct i40e_vsi *vsi;
3489 	int aq_ret = 0;
3490 
3491 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3492 		aq_ret = -EINVAL;
3493 		goto err;
3494 	}
3495 
3496 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3497 	i40e_vlan_stripping_disable(vsi);
3498 
3499 	/* send the response to the VF */
3500 err:
3501 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3502 				       aq_ret);
3503 }
3504 
3505 /**
3506  * i40e_validate_cloud_filter
3507  * @vf: pointer to VF structure
3508  * @tc_filter: pointer to filter requested
3509  *
3510  * This function validates cloud filter programmed as TC filter for ADq
3511  **/
3512 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3513 				      struct virtchnl_filter *tc_filter)
3514 {
3515 	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3516 	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3517 	struct i40e_pf *pf = vf->pf;
3518 	struct i40e_vsi *vsi = NULL;
3519 	struct i40e_mac_filter *f;
3520 	struct hlist_node *h;
3521 	bool found = false;
3522 	int bkt;
3523 
3524 	if (!tc_filter->action) {
3525 		dev_info(&pf->pdev->dev,
3526 			 "VF %d: Currently ADq doesn't support Drop Action\n",
3527 			 vf->vf_id);
3528 		goto err;
3529 	}
3530 
3531 	/* action_meta is TC number here to which the filter is applied */
3532 	if (!tc_filter->action_meta ||
3533 	    tc_filter->action_meta > I40E_MAX_VF_VSI) {
3534 		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3535 			 vf->vf_id, tc_filter->action_meta);
3536 		goto err;
3537 	}
3538 
3539 	/* Check filter if it's programmed for advanced mode or basic mode.
3540 	 * There are two ADq modes (for VF only),
3541 	 * 1. Basic mode: intended to allow as many filter options as possible
3542 	 *		  to be added to a VF in Non-trusted mode. Main goal is
3543 	 *		  to add filters to its own MAC and VLAN id.
3544 	 * 2. Advanced mode: is for allowing filters to be applied other than
3545 	 *		  its own MAC or VLAN. This mode requires the VF to be
3546 	 *		  Trusted.
3547 	 */
3548 	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3549 		vsi = pf->vsi[vf->lan_vsi_idx];
3550 		f = i40e_find_mac(vsi, data.dst_mac);
3551 
3552 		if (!f) {
3553 			dev_info(&pf->pdev->dev,
3554 				 "Destination MAC %pM doesn't belong to VF %d\n",
3555 				 data.dst_mac, vf->vf_id);
3556 			goto err;
3557 		}
3558 
3559 		if (mask.vlan_id) {
3560 			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3561 					   hlist) {
3562 				if (f->vlan == ntohs(data.vlan_id)) {
3563 					found = true;
3564 					break;
3565 				}
3566 			}
3567 			if (!found) {
3568 				dev_info(&pf->pdev->dev,
3569 					 "VF %d doesn't have any VLAN id %u\n",
3570 					 vf->vf_id, ntohs(data.vlan_id));
3571 				goto err;
3572 			}
3573 		}
3574 	} else {
3575 		/* Check if VF is trusted */
3576 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3577 			dev_err(&pf->pdev->dev,
3578 				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3579 				vf->vf_id);
3580 			return -EIO;
3581 		}
3582 	}
3583 
3584 	if (mask.dst_mac[0] & data.dst_mac[0]) {
3585 		if (is_broadcast_ether_addr(data.dst_mac) ||
3586 		    is_zero_ether_addr(data.dst_mac)) {
3587 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3588 				 vf->vf_id, data.dst_mac);
3589 			goto err;
3590 		}
3591 	}
3592 
3593 	if (mask.src_mac[0] & data.src_mac[0]) {
3594 		if (is_broadcast_ether_addr(data.src_mac) ||
3595 		    is_zero_ether_addr(data.src_mac)) {
3596 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3597 				 vf->vf_id, data.src_mac);
3598 			goto err;
3599 		}
3600 	}
3601 
3602 	if (mask.dst_port & data.dst_port) {
3603 		if (!data.dst_port) {
3604 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3605 				 vf->vf_id);
3606 			goto err;
3607 		}
3608 	}
3609 
3610 	if (mask.src_port & data.src_port) {
3611 		if (!data.src_port) {
3612 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3613 				 vf->vf_id);
3614 			goto err;
3615 		}
3616 	}
3617 
3618 	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3619 	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3620 		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3621 			 vf->vf_id);
3622 		goto err;
3623 	}
3624 
3625 	if (mask.vlan_id & data.vlan_id) {
3626 		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3627 			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3628 				 vf->vf_id);
3629 			goto err;
3630 		}
3631 	}
3632 
3633 	return 0;
3634 err:
3635 	return -EIO;
3636 }
3637 
3638 /**
3639  * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3640  * @vf: pointer to the VF info
3641  * @seid: seid of the vsi it is searching for
3642  **/
3643 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3644 {
3645 	struct i40e_pf *pf = vf->pf;
3646 	struct i40e_vsi *vsi = NULL;
3647 	int i;
3648 
3649 	for (i = 0; i < vf->num_tc ; i++) {
3650 		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3651 		if (vsi && vsi->seid == seid)
3652 			return vsi;
3653 	}
3654 	return NULL;
3655 }
3656 
3657 /**
3658  * i40e_del_all_cloud_filters
3659  * @vf: pointer to the VF info
3660  *
3661  * This function deletes all cloud filters
3662  **/
3663 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3664 {
3665 	struct i40e_cloud_filter *cfilter = NULL;
3666 	struct i40e_pf *pf = vf->pf;
3667 	struct i40e_vsi *vsi = NULL;
3668 	struct hlist_node *node;
3669 	int ret;
3670 
3671 	hlist_for_each_entry_safe(cfilter, node,
3672 				  &vf->cloud_filter_list, cloud_node) {
3673 		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3674 
3675 		if (!vsi) {
3676 			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3677 				vf->vf_id, cfilter->seid);
3678 			continue;
3679 		}
3680 
3681 		if (cfilter->dst_port)
3682 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3683 								false);
3684 		else
3685 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3686 		if (ret)
3687 			dev_err(&pf->pdev->dev,
3688 				"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3689 				vf->vf_id, ERR_PTR(ret),
3690 				i40e_aq_str(&pf->hw,
3691 					    pf->hw.aq.asq_last_status));
3692 
3693 		hlist_del(&cfilter->cloud_node);
3694 		kfree(cfilter);
3695 		vf->num_cloud_filters--;
3696 	}
3697 }
3698 
3699 /**
3700  * i40e_vc_del_cloud_filter
3701  * @vf: pointer to the VF info
3702  * @msg: pointer to the msg buffer
3703  *
3704  * This function deletes a cloud filter programmed as TC filter for ADq
3705  **/
3706 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3707 {
3708 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3709 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3710 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3711 	struct i40e_cloud_filter cfilter, *cf = NULL;
3712 	struct i40e_pf *pf = vf->pf;
3713 	struct i40e_vsi *vsi = NULL;
3714 	struct hlist_node *node;
3715 	int aq_ret = 0;
3716 	int i, ret;
3717 
3718 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3719 		aq_ret = -EINVAL;
3720 		goto err;
3721 	}
3722 
3723 	if (!vf->adq_enabled) {
3724 		dev_info(&pf->pdev->dev,
3725 			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3726 			 vf->vf_id);
3727 		aq_ret = -EINVAL;
3728 		goto err;
3729 	}
3730 
3731 	if (i40e_validate_cloud_filter(vf, vcf)) {
3732 		dev_info(&pf->pdev->dev,
3733 			 "VF %d: Invalid input, can't apply cloud filter\n",
3734 			 vf->vf_id);
3735 		aq_ret = -EINVAL;
3736 		goto err;
3737 	}
3738 
3739 	memset(&cfilter, 0, sizeof(cfilter));
3740 	/* parse destination mac address */
3741 	for (i = 0; i < ETH_ALEN; i++)
3742 		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3743 
3744 	/* parse source mac address */
3745 	for (i = 0; i < ETH_ALEN; i++)
3746 		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3747 
3748 	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3749 	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3750 	cfilter.src_port = mask.src_port & tcf.src_port;
3751 
3752 	switch (vcf->flow_type) {
3753 	case VIRTCHNL_TCP_V4_FLOW:
3754 		cfilter.n_proto = ETH_P_IP;
3755 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3756 			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3757 			       ARRAY_SIZE(tcf.dst_ip));
3758 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3759 			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3760 			       ARRAY_SIZE(tcf.dst_ip));
3761 		break;
3762 	case VIRTCHNL_TCP_V6_FLOW:
3763 		cfilter.n_proto = ETH_P_IPV6;
3764 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3765 			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3766 			       sizeof(cfilter.ip.v6.dst_ip6));
3767 		if (mask.src_ip[3] & tcf.src_ip[3])
3768 			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3769 			       sizeof(cfilter.ip.v6.src_ip6));
3770 		break;
3771 	default:
3772 		/* TC filter can be configured based on different combinations
3773 		 * and in this case IP is not a part of filter config
3774 		 */
3775 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3776 			 vf->vf_id);
3777 	}
3778 
3779 	/* get the vsi to which the tc belongs to */
3780 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3781 	cfilter.seid = vsi->seid;
3782 	cfilter.flags = vcf->field_flags;
3783 
3784 	/* Deleting TC filter */
3785 	if (tcf.dst_port)
3786 		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3787 	else
3788 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3789 	if (ret) {
3790 		dev_err(&pf->pdev->dev,
3791 			"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3792 			vf->vf_id, ERR_PTR(ret),
3793 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3794 		goto err;
3795 	}
3796 
3797 	hlist_for_each_entry_safe(cf, node,
3798 				  &vf->cloud_filter_list, cloud_node) {
3799 		if (cf->seid != cfilter.seid)
3800 			continue;
3801 		if (mask.dst_port)
3802 			if (cfilter.dst_port != cf->dst_port)
3803 				continue;
3804 		if (mask.dst_mac[0])
3805 			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3806 				continue;
3807 		/* for ipv4 data to be valid, only first byte of mask is set */
3808 		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3809 			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3810 				   ARRAY_SIZE(tcf.dst_ip)))
3811 				continue;
3812 		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3813 		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3814 			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3815 				   sizeof(cfilter.ip.v6.src_ip6)))
3816 				continue;
3817 		if (mask.vlan_id)
3818 			if (cfilter.vlan_id != cf->vlan_id)
3819 				continue;
3820 
3821 		hlist_del(&cf->cloud_node);
3822 		kfree(cf);
3823 		vf->num_cloud_filters--;
3824 	}
3825 
3826 err:
3827 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3828 				       aq_ret);
3829 }
3830 
3831 /**
3832  * i40e_vc_add_cloud_filter
3833  * @vf: pointer to the VF info
3834  * @msg: pointer to the msg buffer
3835  *
3836  * This function adds a cloud filter programmed as TC filter for ADq
3837  **/
3838 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3839 {
3840 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3841 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3842 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3843 	struct i40e_cloud_filter *cfilter = NULL;
3844 	struct i40e_pf *pf = vf->pf;
3845 	struct i40e_vsi *vsi = NULL;
3846 	int aq_ret = 0;
3847 	int i, ret;
3848 
3849 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3850 		aq_ret = -EINVAL;
3851 		goto err_out;
3852 	}
3853 
3854 	if (!vf->adq_enabled) {
3855 		dev_info(&pf->pdev->dev,
3856 			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3857 			 vf->vf_id);
3858 		aq_ret = -EINVAL;
3859 		goto err_out;
3860 	}
3861 
3862 	if (i40e_validate_cloud_filter(vf, vcf)) {
3863 		dev_info(&pf->pdev->dev,
3864 			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3865 			 vf->vf_id);
3866 		aq_ret = -EINVAL;
3867 		goto err_out;
3868 	}
3869 
3870 	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3871 	if (!cfilter)
3872 		return -ENOMEM;
3873 
3874 	/* parse destination mac address */
3875 	for (i = 0; i < ETH_ALEN; i++)
3876 		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3877 
3878 	/* parse source mac address */
3879 	for (i = 0; i < ETH_ALEN; i++)
3880 		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3881 
3882 	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3883 	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3884 	cfilter->src_port = mask.src_port & tcf.src_port;
3885 
3886 	switch (vcf->flow_type) {
3887 	case VIRTCHNL_TCP_V4_FLOW:
3888 		cfilter->n_proto = ETH_P_IP;
3889 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3890 			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3891 			       ARRAY_SIZE(tcf.dst_ip));
3892 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3893 			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3894 			       ARRAY_SIZE(tcf.dst_ip));
3895 		break;
3896 	case VIRTCHNL_TCP_V6_FLOW:
3897 		cfilter->n_proto = ETH_P_IPV6;
3898 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3899 			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3900 			       sizeof(cfilter->ip.v6.dst_ip6));
3901 		if (mask.src_ip[3] & tcf.src_ip[3])
3902 			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3903 			       sizeof(cfilter->ip.v6.src_ip6));
3904 		break;
3905 	default:
3906 		/* TC filter can be configured based on different combinations
3907 		 * and in this case IP is not a part of filter config
3908 		 */
3909 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3910 			 vf->vf_id);
3911 	}
3912 
3913 	/* get the VSI to which the TC belongs to */
3914 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3915 	cfilter->seid = vsi->seid;
3916 	cfilter->flags = vcf->field_flags;
3917 
3918 	/* Adding cloud filter programmed as TC filter */
3919 	if (tcf.dst_port)
3920 		ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3921 	else
3922 		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3923 	if (ret) {
3924 		dev_err(&pf->pdev->dev,
3925 			"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3926 			vf->vf_id, ERR_PTR(ret),
3927 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3928 		goto err_free;
3929 	}
3930 
3931 	INIT_HLIST_NODE(&cfilter->cloud_node);
3932 	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3933 	/* release the pointer passing it to the collection */
3934 	cfilter = NULL;
3935 	vf->num_cloud_filters++;
3936 err_free:
3937 	kfree(cfilter);
3938 err_out:
3939 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3940 				       aq_ret);
3941 }
3942 
3943 /**
3944  * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3945  * @vf: pointer to the VF info
3946  * @msg: pointer to the msg buffer
3947  **/
3948 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3949 {
3950 	struct virtchnl_tc_info *tci =
3951 		(struct virtchnl_tc_info *)msg;
3952 	struct i40e_pf *pf = vf->pf;
3953 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3954 	int i, adq_request_qps = 0;
3955 	int aq_ret = 0;
3956 	u64 speed = 0;
3957 
3958 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3959 		aq_ret = -EINVAL;
3960 		goto err;
3961 	}
3962 
3963 	/* ADq cannot be applied if spoof check is ON */
3964 	if (vf->spoofchk) {
3965 		dev_err(&pf->pdev->dev,
3966 			"Spoof check is ON, turn it OFF to enable ADq\n");
3967 		aq_ret = -EINVAL;
3968 		goto err;
3969 	}
3970 
3971 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3972 		dev_err(&pf->pdev->dev,
3973 			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3974 			vf->vf_id);
3975 		aq_ret = -EINVAL;
3976 		goto err;
3977 	}
3978 
3979 	/* max number of traffic classes for VF currently capped at 4 */
3980 	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3981 		dev_err(&pf->pdev->dev,
3982 			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3983 			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3984 		aq_ret = -EINVAL;
3985 		goto err;
3986 	}
3987 
3988 	/* validate queues for each TC */
3989 	for (i = 0; i < tci->num_tc; i++)
3990 		if (!tci->list[i].count ||
3991 		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3992 			dev_err(&pf->pdev->dev,
3993 				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3994 				vf->vf_id, i, tci->list[i].count,
3995 				I40E_DEFAULT_QUEUES_PER_VF);
3996 			aq_ret = -EINVAL;
3997 			goto err;
3998 		}
3999 
4000 	/* need Max VF queues but already have default number of queues */
4001 	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4002 
4003 	if (pf->queues_left < adq_request_qps) {
4004 		dev_err(&pf->pdev->dev,
4005 			"No queues left to allocate to VF %d\n",
4006 			vf->vf_id);
4007 		aq_ret = -EINVAL;
4008 		goto err;
4009 	} else {
4010 		/* we need to allocate max VF queues to enable ADq so as to
4011 		 * make sure ADq enabled VF always gets back queues when it
4012 		 * goes through a reset.
4013 		 */
4014 		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4015 	}
4016 
4017 	/* get link speed in MB to validate rate limit */
4018 	speed = i40e_vc_link_speed2mbps(ls->link_speed);
4019 	if (speed == SPEED_UNKNOWN) {
4020 		dev_err(&pf->pdev->dev,
4021 			"Cannot detect link speed\n");
4022 		aq_ret = -EINVAL;
4023 		goto err;
4024 	}
4025 
4026 	/* parse data from the queue channel info */
4027 	vf->num_tc = tci->num_tc;
4028 	for (i = 0; i < vf->num_tc; i++) {
4029 		if (tci->list[i].max_tx_rate) {
4030 			if (tci->list[i].max_tx_rate > speed) {
4031 				dev_err(&pf->pdev->dev,
4032 					"Invalid max tx rate %llu specified for VF %d.",
4033 					tci->list[i].max_tx_rate,
4034 					vf->vf_id);
4035 				aq_ret = -EINVAL;
4036 				goto err;
4037 			} else {
4038 				vf->ch[i].max_tx_rate =
4039 					tci->list[i].max_tx_rate;
4040 			}
4041 		}
4042 		vf->ch[i].num_qps = tci->list[i].count;
4043 	}
4044 
4045 	/* set this flag only after making sure all inputs are sane */
4046 	vf->adq_enabled = true;
4047 
4048 	/* reset the VF in order to allocate resources */
4049 	i40e_vc_reset_vf(vf, true);
4050 
4051 	return 0;
4052 
4053 	/* send the response to the VF */
4054 err:
4055 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4056 				       aq_ret);
4057 }
4058 
4059 /**
4060  * i40e_vc_del_qch_msg
4061  * @vf: pointer to the VF info
4062  * @msg: pointer to the msg buffer
4063  **/
4064 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4065 {
4066 	struct i40e_pf *pf = vf->pf;
4067 	int aq_ret = 0;
4068 
4069 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4070 		aq_ret = -EINVAL;
4071 		goto err;
4072 	}
4073 
4074 	if (vf->adq_enabled) {
4075 		i40e_del_all_cloud_filters(vf);
4076 		i40e_del_qch(vf);
4077 		vf->adq_enabled = false;
4078 		vf->num_tc = 0;
4079 		dev_info(&pf->pdev->dev,
4080 			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4081 			 vf->vf_id);
4082 	} else {
4083 		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4084 			 vf->vf_id);
4085 		aq_ret = -EINVAL;
4086 	}
4087 
4088 	/* reset the VF in order to allocate resources */
4089 	i40e_vc_reset_vf(vf, true);
4090 
4091 	return 0;
4092 
4093 err:
4094 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4095 				       aq_ret);
4096 }
4097 
4098 /**
4099  * i40e_vc_process_vf_msg
4100  * @pf: pointer to the PF structure
4101  * @vf_id: source VF id
4102  * @v_opcode: operation code
4103  * @v_retval: unused return value code
4104  * @msg: pointer to the msg buffer
4105  * @msglen: msg length
4106  *
4107  * called from the common aeq/arq handler to
4108  * process request from VF
4109  **/
4110 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4111 			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
4112 {
4113 	struct i40e_hw *hw = &pf->hw;
4114 	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4115 	struct i40e_vf *vf;
4116 	int ret;
4117 
4118 	pf->vf_aq_requests++;
4119 	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4120 		return -EINVAL;
4121 	vf = &(pf->vf[local_vf_id]);
4122 
4123 	/* Check if VF is disabled. */
4124 	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4125 		return -EINVAL;
4126 
4127 	/* perform basic checks on the msg */
4128 	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4129 
4130 	if (ret) {
4131 		i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4132 		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4133 			local_vf_id, v_opcode, msglen);
4134 		return ret;
4135 	}
4136 
4137 	switch (v_opcode) {
4138 	case VIRTCHNL_OP_VERSION:
4139 		ret = i40e_vc_get_version_msg(vf, msg);
4140 		break;
4141 	case VIRTCHNL_OP_GET_VF_RESOURCES:
4142 		ret = i40e_vc_get_vf_resources_msg(vf, msg);
4143 		i40e_vc_notify_vf_link_state(vf);
4144 		break;
4145 	case VIRTCHNL_OP_RESET_VF:
4146 		i40e_vc_reset_vf(vf, false);
4147 		ret = 0;
4148 		break;
4149 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4150 		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4151 		break;
4152 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4153 		ret = i40e_vc_config_queues_msg(vf, msg);
4154 		break;
4155 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4156 		ret = i40e_vc_config_irq_map_msg(vf, msg);
4157 		break;
4158 	case VIRTCHNL_OP_ENABLE_QUEUES:
4159 		ret = i40e_vc_enable_queues_msg(vf, msg);
4160 		i40e_vc_notify_vf_link_state(vf);
4161 		break;
4162 	case VIRTCHNL_OP_DISABLE_QUEUES:
4163 		ret = i40e_vc_disable_queues_msg(vf, msg);
4164 		break;
4165 	case VIRTCHNL_OP_ADD_ETH_ADDR:
4166 		ret = i40e_vc_add_mac_addr_msg(vf, msg);
4167 		break;
4168 	case VIRTCHNL_OP_DEL_ETH_ADDR:
4169 		ret = i40e_vc_del_mac_addr_msg(vf, msg);
4170 		break;
4171 	case VIRTCHNL_OP_ADD_VLAN:
4172 		ret = i40e_vc_add_vlan_msg(vf, msg);
4173 		break;
4174 	case VIRTCHNL_OP_DEL_VLAN:
4175 		ret = i40e_vc_remove_vlan_msg(vf, msg);
4176 		break;
4177 	case VIRTCHNL_OP_GET_STATS:
4178 		ret = i40e_vc_get_stats_msg(vf, msg);
4179 		break;
4180 	case VIRTCHNL_OP_RDMA:
4181 		ret = i40e_vc_rdma_msg(vf, msg, msglen);
4182 		break;
4183 	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4184 		ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4185 		break;
4186 	case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4187 		ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4188 		break;
4189 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4190 		ret = i40e_vc_config_rss_key(vf, msg);
4191 		break;
4192 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4193 		ret = i40e_vc_config_rss_lut(vf, msg);
4194 		break;
4195 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4196 		ret = i40e_vc_get_rss_hena(vf, msg);
4197 		break;
4198 	case VIRTCHNL_OP_SET_RSS_HENA:
4199 		ret = i40e_vc_set_rss_hena(vf, msg);
4200 		break;
4201 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4202 		ret = i40e_vc_enable_vlan_stripping(vf, msg);
4203 		break;
4204 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4205 		ret = i40e_vc_disable_vlan_stripping(vf, msg);
4206 		break;
4207 	case VIRTCHNL_OP_REQUEST_QUEUES:
4208 		ret = i40e_vc_request_queues_msg(vf, msg);
4209 		break;
4210 	case VIRTCHNL_OP_ENABLE_CHANNELS:
4211 		ret = i40e_vc_add_qch_msg(vf, msg);
4212 		break;
4213 	case VIRTCHNL_OP_DISABLE_CHANNELS:
4214 		ret = i40e_vc_del_qch_msg(vf, msg);
4215 		break;
4216 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4217 		ret = i40e_vc_add_cloud_filter(vf, msg);
4218 		break;
4219 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4220 		ret = i40e_vc_del_cloud_filter(vf, msg);
4221 		break;
4222 	case VIRTCHNL_OP_UNKNOWN:
4223 	default:
4224 		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4225 			v_opcode, local_vf_id);
4226 		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4227 					      -EOPNOTSUPP);
4228 		break;
4229 	}
4230 
4231 	return ret;
4232 }
4233 
4234 /**
4235  * i40e_vc_process_vflr_event
4236  * @pf: pointer to the PF structure
4237  *
4238  * called from the vlfr irq handler to
4239  * free up VF resources and state variables
4240  **/
4241 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4242 {
4243 	struct i40e_hw *hw = &pf->hw;
4244 	u32 reg, reg_idx, bit_idx;
4245 	struct i40e_vf *vf;
4246 	int vf_id;
4247 
4248 	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4249 		return 0;
4250 
4251 	/* Re-enable the VFLR interrupt cause here, before looking for which
4252 	 * VF got reset. Otherwise, if another VF gets a reset while the
4253 	 * first one is being processed, that interrupt will be lost, and
4254 	 * that VF will be stuck in reset forever.
4255 	 */
4256 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4257 	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4258 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4259 	i40e_flush(hw);
4260 
4261 	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4262 	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4263 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4264 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4265 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4266 		vf = &pf->vf[vf_id];
4267 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4268 		if (reg & BIT(bit_idx))
4269 			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4270 			i40e_reset_vf(vf, true);
4271 	}
4272 
4273 	return 0;
4274 }
4275 
4276 /**
4277  * i40e_validate_vf
4278  * @pf: the physical function
4279  * @vf_id: VF identifier
4280  *
4281  * Check that the VF is enabled and the VSI exists.
4282  *
4283  * Returns 0 on success, negative on failure
4284  **/
4285 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4286 {
4287 	struct i40e_vsi *vsi;
4288 	struct i40e_vf *vf;
4289 	int ret = 0;
4290 
4291 	if (vf_id >= pf->num_alloc_vfs) {
4292 		dev_err(&pf->pdev->dev,
4293 			"Invalid VF Identifier %d\n", vf_id);
4294 		ret = -EINVAL;
4295 		goto err_out;
4296 	}
4297 	vf = &pf->vf[vf_id];
4298 	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4299 	if (!vsi)
4300 		ret = -EINVAL;
4301 err_out:
4302 	return ret;
4303 }
4304 
4305 /**
4306  * i40e_check_vf_init_timeout
4307  * @vf: the virtual function
4308  *
4309  * Check that the VF's initialization was successfully done and if not
4310  * wait up to 300ms for its finish.
4311  *
4312  * Returns true when VF is initialized, false on timeout
4313  **/
4314 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4315 {
4316 	int i;
4317 
4318 	/* When the VF is resetting wait until it is done.
4319 	 * It can take up to 200 milliseconds, but wait for
4320 	 * up to 300 milliseconds to be safe.
4321 	 */
4322 	for (i = 0; i < 15; i++) {
4323 		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4324 			return true;
4325 		msleep(20);
4326 	}
4327 
4328 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4329 		dev_err(&vf->pf->pdev->dev,
4330 			"VF %d still in reset. Try again.\n", vf->vf_id);
4331 		return false;
4332 	}
4333 
4334 	return true;
4335 }
4336 
4337 /**
4338  * i40e_ndo_set_vf_mac
4339  * @netdev: network interface device structure
4340  * @vf_id: VF identifier
4341  * @mac: mac address
4342  *
4343  * program VF mac address
4344  **/
4345 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4346 {
4347 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4348 	struct i40e_vsi *vsi = np->vsi;
4349 	struct i40e_pf *pf = vsi->back;
4350 	struct i40e_mac_filter *f;
4351 	struct i40e_vf *vf;
4352 	int ret = 0;
4353 	struct hlist_node *h;
4354 	int bkt;
4355 
4356 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4357 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4358 		return -EAGAIN;
4359 	}
4360 
4361 	/* validate the request */
4362 	ret = i40e_validate_vf(pf, vf_id);
4363 	if (ret)
4364 		goto error_param;
4365 
4366 	vf = &pf->vf[vf_id];
4367 	if (!i40e_check_vf_init_timeout(vf)) {
4368 		ret = -EAGAIN;
4369 		goto error_param;
4370 	}
4371 	vsi = pf->vsi[vf->lan_vsi_idx];
4372 
4373 	if (is_multicast_ether_addr(mac)) {
4374 		dev_err(&pf->pdev->dev,
4375 			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4376 		ret = -EINVAL;
4377 		goto error_param;
4378 	}
4379 
4380 	/* Lock once because below invoked function add/del_filter requires
4381 	 * mac_filter_hash_lock to be held
4382 	 */
4383 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4384 
4385 	/* delete the temporary mac address */
4386 	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4387 		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4388 
4389 	/* Delete all the filters for this VSI - we're going to kill it
4390 	 * anyway.
4391 	 */
4392 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4393 		__i40e_del_filter(vsi, f);
4394 
4395 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4396 
4397 	/* program mac filter */
4398 	if (i40e_sync_vsi_filters(vsi)) {
4399 		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4400 		ret = -EIO;
4401 		goto error_param;
4402 	}
4403 	ether_addr_copy(vf->default_lan_addr.addr, mac);
4404 
4405 	if (is_zero_ether_addr(mac)) {
4406 		vf->pf_set_mac = false;
4407 		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4408 	} else {
4409 		vf->pf_set_mac = true;
4410 		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4411 			 mac, vf_id);
4412 	}
4413 
4414 	/* Force the VF interface down so it has to bring up with new MAC
4415 	 * address
4416 	 */
4417 	i40e_vc_reset_vf(vf, true);
4418 	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4419 
4420 error_param:
4421 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4422 	return ret;
4423 }
4424 
4425 /**
4426  * i40e_ndo_set_vf_port_vlan
4427  * @netdev: network interface device structure
4428  * @vf_id: VF identifier
4429  * @vlan_id: mac address
4430  * @qos: priority setting
4431  * @vlan_proto: vlan protocol
4432  *
4433  * program VF vlan id and/or qos
4434  **/
4435 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4436 			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4437 {
4438 	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4439 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4440 	bool allmulti = false, alluni = false;
4441 	struct i40e_pf *pf = np->vsi->back;
4442 	struct i40e_vsi *vsi;
4443 	struct i40e_vf *vf;
4444 	int ret = 0;
4445 
4446 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4447 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4448 		return -EAGAIN;
4449 	}
4450 
4451 	/* validate the request */
4452 	ret = i40e_validate_vf(pf, vf_id);
4453 	if (ret)
4454 		goto error_pvid;
4455 
4456 	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4457 		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4458 		ret = -EINVAL;
4459 		goto error_pvid;
4460 	}
4461 
4462 	if (vlan_proto != htons(ETH_P_8021Q)) {
4463 		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4464 		ret = -EPROTONOSUPPORT;
4465 		goto error_pvid;
4466 	}
4467 
4468 	vf = &pf->vf[vf_id];
4469 	if (!i40e_check_vf_init_timeout(vf)) {
4470 		ret = -EAGAIN;
4471 		goto error_pvid;
4472 	}
4473 	vsi = pf->vsi[vf->lan_vsi_idx];
4474 
4475 	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4476 		/* duplicate request, so just return success */
4477 		goto error_pvid;
4478 
4479 	i40e_vlan_stripping_enable(vsi);
4480 
4481 	/* Locked once because multiple functions below iterate list */
4482 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4483 
4484 	/* Check for condition where there was already a port VLAN ID
4485 	 * filter set and now it is being deleted by setting it to zero.
4486 	 * Additionally check for the condition where there was a port
4487 	 * VLAN but now there is a new and different port VLAN being set.
4488 	 * Before deleting all the old VLAN filters we must add new ones
4489 	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4490 	 * MAC addresses deleted.
4491 	 */
4492 	if ((!(vlan_id || qos) ||
4493 	     vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4494 	    vsi->info.pvid) {
4495 		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4496 		if (ret) {
4497 			dev_info(&vsi->back->pdev->dev,
4498 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4499 				 vsi->back->hw.aq.asq_last_status);
4500 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4501 			goto error_pvid;
4502 		}
4503 	}
4504 
4505 	if (vsi->info.pvid) {
4506 		/* remove all filters on the old VLAN */
4507 		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4508 					   VLAN_VID_MASK));
4509 	}
4510 
4511 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4512 
4513 	/* disable promisc modes in case they were enabled */
4514 	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4515 					      allmulti, alluni);
4516 	if (ret) {
4517 		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4518 		goto error_pvid;
4519 	}
4520 
4521 	if (vlan_id || qos)
4522 		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4523 	else
4524 		i40e_vsi_remove_pvid(vsi);
4525 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4526 
4527 	if (vlan_id) {
4528 		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4529 			 vlan_id, qos, vf_id);
4530 
4531 		/* add new VLAN filter for each MAC */
4532 		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4533 		if (ret) {
4534 			dev_info(&vsi->back->pdev->dev,
4535 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4536 				 vsi->back->hw.aq.asq_last_status);
4537 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4538 			goto error_pvid;
4539 		}
4540 
4541 		/* remove the previously added non-VLAN MAC filters */
4542 		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4543 	}
4544 
4545 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4546 
4547 	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4548 		alluni = true;
4549 
4550 	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4551 		allmulti = true;
4552 
4553 	/* Schedule the worker thread to take care of applying changes */
4554 	i40e_service_event_schedule(vsi->back);
4555 
4556 	if (ret) {
4557 		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4558 		goto error_pvid;
4559 	}
4560 
4561 	/* The Port VLAN needs to be saved across resets the same as the
4562 	 * default LAN MAC address.
4563 	 */
4564 	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4565 
4566 	i40e_vc_reset_vf(vf, true);
4567 	/* During reset the VF got a new VSI, so refresh a pointer. */
4568 	vsi = pf->vsi[vf->lan_vsi_idx];
4569 
4570 	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4571 	if (ret) {
4572 		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4573 		goto error_pvid;
4574 	}
4575 
4576 	ret = 0;
4577 
4578 error_pvid:
4579 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4580 	return ret;
4581 }
4582 
4583 /**
4584  * i40e_ndo_set_vf_bw
4585  * @netdev: network interface device structure
4586  * @vf_id: VF identifier
4587  * @min_tx_rate: Minimum Tx rate
4588  * @max_tx_rate: Maximum Tx rate
4589  *
4590  * configure VF Tx rate
4591  **/
4592 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4593 		       int max_tx_rate)
4594 {
4595 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4596 	struct i40e_pf *pf = np->vsi->back;
4597 	struct i40e_vsi *vsi;
4598 	struct i40e_vf *vf;
4599 	int ret = 0;
4600 
4601 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4602 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4603 		return -EAGAIN;
4604 	}
4605 
4606 	/* validate the request */
4607 	ret = i40e_validate_vf(pf, vf_id);
4608 	if (ret)
4609 		goto error;
4610 
4611 	if (min_tx_rate) {
4612 		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4613 			min_tx_rate, vf_id);
4614 		ret = -EINVAL;
4615 		goto error;
4616 	}
4617 
4618 	vf = &pf->vf[vf_id];
4619 	if (!i40e_check_vf_init_timeout(vf)) {
4620 		ret = -EAGAIN;
4621 		goto error;
4622 	}
4623 	vsi = pf->vsi[vf->lan_vsi_idx];
4624 
4625 	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4626 	if (ret)
4627 		goto error;
4628 
4629 	vf->tx_rate = max_tx_rate;
4630 error:
4631 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4632 	return ret;
4633 }
4634 
4635 /**
4636  * i40e_ndo_get_vf_config
4637  * @netdev: network interface device structure
4638  * @vf_id: VF identifier
4639  * @ivi: VF configuration structure
4640  *
4641  * return VF configuration
4642  **/
4643 int i40e_ndo_get_vf_config(struct net_device *netdev,
4644 			   int vf_id, struct ifla_vf_info *ivi)
4645 {
4646 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4647 	struct i40e_vsi *vsi = np->vsi;
4648 	struct i40e_pf *pf = vsi->back;
4649 	struct i40e_vf *vf;
4650 	int ret = 0;
4651 
4652 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4653 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4654 		return -EAGAIN;
4655 	}
4656 
4657 	/* validate the request */
4658 	ret = i40e_validate_vf(pf, vf_id);
4659 	if (ret)
4660 		goto error_param;
4661 
4662 	vf = &pf->vf[vf_id];
4663 	/* first vsi is always the LAN vsi */
4664 	vsi = pf->vsi[vf->lan_vsi_idx];
4665 	if (!vsi) {
4666 		ret = -ENOENT;
4667 		goto error_param;
4668 	}
4669 
4670 	ivi->vf = vf_id;
4671 
4672 	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4673 
4674 	ivi->max_tx_rate = vf->tx_rate;
4675 	ivi->min_tx_rate = 0;
4676 	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4677 	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4678 		   I40E_VLAN_PRIORITY_SHIFT;
4679 	if (vf->link_forced == false)
4680 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4681 	else if (vf->link_up == true)
4682 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4683 	else
4684 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4685 	ivi->spoofchk = vf->spoofchk;
4686 	ivi->trusted = vf->trusted;
4687 	ret = 0;
4688 
4689 error_param:
4690 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4691 	return ret;
4692 }
4693 
4694 /**
4695  * i40e_ndo_set_vf_link_state
4696  * @netdev: network interface device structure
4697  * @vf_id: VF identifier
4698  * @link: required link state
4699  *
4700  * Set the link state of a specified VF, regardless of physical link state
4701  **/
4702 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4703 {
4704 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4705 	struct i40e_pf *pf = np->vsi->back;
4706 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4707 	struct virtchnl_pf_event pfe;
4708 	struct i40e_hw *hw = &pf->hw;
4709 	struct i40e_vf *vf;
4710 	int abs_vf_id;
4711 	int ret = 0;
4712 
4713 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4714 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4715 		return -EAGAIN;
4716 	}
4717 
4718 	/* validate the request */
4719 	if (vf_id >= pf->num_alloc_vfs) {
4720 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4721 		ret = -EINVAL;
4722 		goto error_out;
4723 	}
4724 
4725 	vf = &pf->vf[vf_id];
4726 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4727 
4728 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4729 	pfe.severity = PF_EVENT_SEVERITY_INFO;
4730 
4731 	switch (link) {
4732 	case IFLA_VF_LINK_STATE_AUTO:
4733 		vf->link_forced = false;
4734 		i40e_set_vf_link_state(vf, &pfe, ls);
4735 		break;
4736 	case IFLA_VF_LINK_STATE_ENABLE:
4737 		vf->link_forced = true;
4738 		vf->link_up = true;
4739 		i40e_set_vf_link_state(vf, &pfe, ls);
4740 		break;
4741 	case IFLA_VF_LINK_STATE_DISABLE:
4742 		vf->link_forced = true;
4743 		vf->link_up = false;
4744 		i40e_set_vf_link_state(vf, &pfe, ls);
4745 		break;
4746 	default:
4747 		ret = -EINVAL;
4748 		goto error_out;
4749 	}
4750 	/* Notify the VF of its new link state */
4751 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4752 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4753 
4754 error_out:
4755 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4756 	return ret;
4757 }
4758 
4759 /**
4760  * i40e_ndo_set_vf_spoofchk
4761  * @netdev: network interface device structure
4762  * @vf_id: VF identifier
4763  * @enable: flag to enable or disable feature
4764  *
4765  * Enable or disable VF spoof checking
4766  **/
4767 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4768 {
4769 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4770 	struct i40e_vsi *vsi = np->vsi;
4771 	struct i40e_pf *pf = vsi->back;
4772 	struct i40e_vsi_context ctxt;
4773 	struct i40e_hw *hw = &pf->hw;
4774 	struct i40e_vf *vf;
4775 	int ret = 0;
4776 
4777 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4778 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4779 		return -EAGAIN;
4780 	}
4781 
4782 	/* validate the request */
4783 	if (vf_id >= pf->num_alloc_vfs) {
4784 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4785 		ret = -EINVAL;
4786 		goto out;
4787 	}
4788 
4789 	vf = &(pf->vf[vf_id]);
4790 	if (!i40e_check_vf_init_timeout(vf)) {
4791 		ret = -EAGAIN;
4792 		goto out;
4793 	}
4794 
4795 	if (enable == vf->spoofchk)
4796 		goto out;
4797 
4798 	vf->spoofchk = enable;
4799 	memset(&ctxt, 0, sizeof(ctxt));
4800 	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4801 	ctxt.pf_num = pf->hw.pf_id;
4802 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4803 	if (enable)
4804 		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4805 					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4806 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4807 	if (ret) {
4808 		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4809 			ret);
4810 		ret = -EIO;
4811 	}
4812 out:
4813 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4814 	return ret;
4815 }
4816 
4817 /**
4818  * i40e_ndo_set_vf_trust
4819  * @netdev: network interface device structure of the pf
4820  * @vf_id: VF identifier
4821  * @setting: trust setting
4822  *
4823  * Enable or disable VF trust setting
4824  **/
4825 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4826 {
4827 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4828 	struct i40e_pf *pf = np->vsi->back;
4829 	struct i40e_vf *vf;
4830 	int ret = 0;
4831 
4832 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4833 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4834 		return -EAGAIN;
4835 	}
4836 
4837 	/* validate the request */
4838 	if (vf_id >= pf->num_alloc_vfs) {
4839 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4840 		ret = -EINVAL;
4841 		goto out;
4842 	}
4843 
4844 	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4845 		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4846 		ret = -EINVAL;
4847 		goto out;
4848 	}
4849 
4850 	vf = &pf->vf[vf_id];
4851 
4852 	if (setting == vf->trusted)
4853 		goto out;
4854 
4855 	vf->trusted = setting;
4856 
4857 	/* request PF to sync mac/vlan filters for the VF */
4858 	set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4859 	pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4860 
4861 	i40e_vc_reset_vf(vf, true);
4862 	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4863 		 vf_id, setting ? "" : "un");
4864 
4865 	if (vf->adq_enabled) {
4866 		if (!vf->trusted) {
4867 			dev_info(&pf->pdev->dev,
4868 				 "VF %u no longer Trusted, deleting all cloud filters\n",
4869 				 vf_id);
4870 			i40e_del_all_cloud_filters(vf);
4871 		}
4872 	}
4873 
4874 out:
4875 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4876 	return ret;
4877 }
4878 
4879 /**
4880  * i40e_get_vf_stats - populate some stats for the VF
4881  * @netdev: the netdev of the PF
4882  * @vf_id: the host OS identifier (0-127)
4883  * @vf_stats: pointer to the OS memory to be initialized
4884  */
4885 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4886 		      struct ifla_vf_stats *vf_stats)
4887 {
4888 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4889 	struct i40e_pf *pf = np->vsi->back;
4890 	struct i40e_eth_stats *stats;
4891 	struct i40e_vsi *vsi;
4892 	struct i40e_vf *vf;
4893 
4894 	/* validate the request */
4895 	if (i40e_validate_vf(pf, vf_id))
4896 		return -EINVAL;
4897 
4898 	vf = &pf->vf[vf_id];
4899 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4900 		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4901 		return -EBUSY;
4902 	}
4903 
4904 	vsi = pf->vsi[vf->lan_vsi_idx];
4905 	if (!vsi)
4906 		return -EINVAL;
4907 
4908 	i40e_update_eth_stats(vsi);
4909 	stats = &vsi->eth_stats;
4910 
4911 	memset(vf_stats, 0, sizeof(*vf_stats));
4912 
4913 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4914 		stats->rx_multicast;
4915 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4916 		stats->tx_multicast;
4917 	vf_stats->rx_bytes   = stats->rx_bytes;
4918 	vf_stats->tx_bytes   = stats->tx_bytes;
4919 	vf_stats->broadcast  = stats->rx_broadcast;
4920 	vf_stats->multicast  = stats->rx_multicast;
4921 	vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
4922 	vf_stats->tx_dropped = stats->tx_discards;
4923 
4924 	return 0;
4925 }
4926