xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision fefe5dc4afeafe896c90d5b20b605f2759343c3b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 
6 /**
7  * idpf_recv_event_msg - Receive virtchnl event message
8  * @vport: virtual port structure
9  * @ctlq_msg: message to copy from
10  *
11  * Receive virtchnl event message
12  */
13 static void idpf_recv_event_msg(struct idpf_vport *vport,
14 				struct idpf_ctlq_msg *ctlq_msg)
15 {
16 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
17 	struct virtchnl2_event *v2e;
18 	bool link_status;
19 	u32 event;
20 
21 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
22 	event = le32_to_cpu(v2e->event);
23 
24 	switch (event) {
25 	case VIRTCHNL2_EVENT_LINK_CHANGE:
26 		vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
27 		link_status = v2e->link_status;
28 
29 		if (vport->link_up == link_status)
30 			break;
31 
32 		vport->link_up = link_status;
33 		if (np->state == __IDPF_VPORT_UP) {
34 			if (vport->link_up) {
35 				netif_carrier_on(vport->netdev);
36 				netif_tx_start_all_queues(vport->netdev);
37 			} else {
38 				netif_tx_stop_all_queues(vport->netdev);
39 				netif_carrier_off(vport->netdev);
40 			}
41 		}
42 		break;
43 	default:
44 		dev_err(&vport->adapter->pdev->dev,
45 			"Unknown event %d from PF\n", event);
46 		break;
47 	}
48 }
49 
50 /**
51  * idpf_mb_clean - Reclaim the send mailbox queue entries
52  * @adapter: Driver specific private structure
53  *
54  * Reclaim the send mailbox queue entries to be used to send further messages
55  *
56  * Returns 0 on success, negative on failure
57  */
58 static int idpf_mb_clean(struct idpf_adapter *adapter)
59 {
60 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
61 	struct idpf_ctlq_msg **q_msg;
62 	struct idpf_dma_mem *dma_mem;
63 	int err;
64 
65 	q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
66 	if (!q_msg)
67 		return -ENOMEM;
68 
69 	err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
70 	if (err)
71 		goto err_kfree;
72 
73 	for (i = 0; i < num_q_msg; i++) {
74 		if (!q_msg[i])
75 			continue;
76 		dma_mem = q_msg[i]->ctx.indirect.payload;
77 		if (dma_mem)
78 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
79 					  dma_mem->va, dma_mem->pa);
80 		kfree(q_msg[i]);
81 		kfree(dma_mem);
82 	}
83 
84 err_kfree:
85 	kfree(q_msg);
86 
87 	return err;
88 }
89 
90 /**
91  * idpf_send_mb_msg - Send message over mailbox
92  * @adapter: Driver specific private structure
93  * @op: virtchnl opcode
94  * @msg_size: size of the payload
95  * @msg: pointer to buffer holding the payload
96  *
97  * Will prepare the control queue message and initiates the send api
98  *
99  * Returns 0 on success, negative on failure
100  */
101 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
102 		     u16 msg_size, u8 *msg)
103 {
104 	struct idpf_ctlq_msg *ctlq_msg;
105 	struct idpf_dma_mem *dma_mem;
106 	int err;
107 
108 	/* If we are here and a reset is detected nothing much can be
109 	 * done. This thread should silently abort and expected to
110 	 * be corrected with a new run either by user or driver
111 	 * flows after reset
112 	 */
113 	if (idpf_is_reset_detected(adapter))
114 		return 0;
115 
116 	err = idpf_mb_clean(adapter);
117 	if (err)
118 		return err;
119 
120 	ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
121 	if (!ctlq_msg)
122 		return -ENOMEM;
123 
124 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
125 	if (!dma_mem) {
126 		err = -ENOMEM;
127 		goto dma_mem_error;
128 	}
129 
130 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
131 	ctlq_msg->func_id = 0;
132 	ctlq_msg->data_len = msg_size;
133 	ctlq_msg->cookie.mbx.chnl_opcode = op;
134 	ctlq_msg->cookie.mbx.chnl_retval = 0;
135 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
136 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
137 					 &dma_mem->pa, GFP_ATOMIC);
138 	if (!dma_mem->va) {
139 		err = -ENOMEM;
140 		goto dma_alloc_error;
141 	}
142 	memcpy(dma_mem->va, msg, msg_size);
143 	ctlq_msg->ctx.indirect.payload = dma_mem;
144 
145 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
146 	if (err)
147 		goto send_error;
148 
149 	return 0;
150 
151 send_error:
152 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
153 			  dma_mem->pa);
154 dma_alloc_error:
155 	kfree(dma_mem);
156 dma_mem_error:
157 	kfree(ctlq_msg);
158 
159 	return err;
160 }
161 
162 /**
163  * idpf_find_vport - Find vport pointer from control queue message
164  * @adapter: driver specific private structure
165  * @vport: address of vport pointer to copy the vport from adapters vport list
166  * @ctlq_msg: control queue message
167  *
168  * Return 0 on success, error value on failure. Also this function does check
169  * for the opcodes which expect to receive payload and return error value if
170  * it is not the case.
171  */
172 static int idpf_find_vport(struct idpf_adapter *adapter,
173 			   struct idpf_vport **vport,
174 			   struct idpf_ctlq_msg *ctlq_msg)
175 {
176 	bool no_op = false, vid_found = false;
177 	int i, err = 0;
178 	char *vc_msg;
179 	u32 v_id;
180 
181 	vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
182 	if (!vc_msg)
183 		return -ENOMEM;
184 
185 	if (ctlq_msg->data_len) {
186 		size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
187 
188 		if (!payload_size) {
189 			dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
190 			kfree(vc_msg);
191 
192 			return -EINVAL;
193 		}
194 
195 		memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
196 		       min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
197 	}
198 
199 	switch (ctlq_msg->cookie.mbx.chnl_opcode) {
200 	case VIRTCHNL2_OP_VERSION:
201 	case VIRTCHNL2_OP_GET_CAPS:
202 	case VIRTCHNL2_OP_CREATE_VPORT:
203 	case VIRTCHNL2_OP_SET_SRIOV_VFS:
204 	case VIRTCHNL2_OP_ALLOC_VECTORS:
205 	case VIRTCHNL2_OP_DEALLOC_VECTORS:
206 	case VIRTCHNL2_OP_GET_PTYPE_INFO:
207 		goto free_vc_msg;
208 	case VIRTCHNL2_OP_ENABLE_VPORT:
209 	case VIRTCHNL2_OP_DISABLE_VPORT:
210 	case VIRTCHNL2_OP_DESTROY_VPORT:
211 		v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
212 		break;
213 	case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
214 		v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
215 		break;
216 	case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
217 		v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
218 		break;
219 	case VIRTCHNL2_OP_ENABLE_QUEUES:
220 	case VIRTCHNL2_OP_DISABLE_QUEUES:
221 	case VIRTCHNL2_OP_DEL_QUEUES:
222 		v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
223 		break;
224 	case VIRTCHNL2_OP_ADD_QUEUES:
225 		v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
226 		break;
227 	case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
228 	case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
229 		v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
230 		break;
231 	case VIRTCHNL2_OP_GET_STATS:
232 		v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
233 		break;
234 	case VIRTCHNL2_OP_GET_RSS_LUT:
235 	case VIRTCHNL2_OP_SET_RSS_LUT:
236 		v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
237 		break;
238 	case VIRTCHNL2_OP_GET_RSS_KEY:
239 	case VIRTCHNL2_OP_SET_RSS_KEY:
240 		v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
241 		break;
242 	case VIRTCHNL2_OP_EVENT:
243 		v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
244 		break;
245 	case VIRTCHNL2_OP_LOOPBACK:
246 		v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
247 		break;
248 	case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
249 		v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
250 		break;
251 	case VIRTCHNL2_OP_ADD_MAC_ADDR:
252 	case VIRTCHNL2_OP_DEL_MAC_ADDR:
253 		v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
254 		break;
255 	default:
256 		no_op = true;
257 		break;
258 	}
259 
260 	if (no_op)
261 		goto free_vc_msg;
262 
263 	for (i = 0; i < idpf_get_max_vports(adapter); i++) {
264 		if (adapter->vport_ids[i] == v_id) {
265 			vid_found = true;
266 			break;
267 		}
268 	}
269 
270 	if (vid_found)
271 		*vport = adapter->vports[i];
272 	else
273 		err = -EINVAL;
274 
275 free_vc_msg:
276 	kfree(vc_msg);
277 
278 	return err;
279 }
280 
281 /**
282  * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
283  * @adapter: driver specific private structure
284  * @vport: virtual port structure
285  * @ctlq_msg: msg to copy from
286  * @err_enum: err bit to set on error
287  *
288  * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
289  * negative on failure.
290  */
291 static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
292 				    struct idpf_vport *vport,
293 				    struct idpf_ctlq_msg *ctlq_msg,
294 				    enum idpf_vport_vc_state err_enum)
295 {
296 	if (ctlq_msg->cookie.mbx.chnl_retval) {
297 		if (vport)
298 			set_bit(err_enum, vport->vc_state);
299 		else
300 			set_bit(err_enum, adapter->vc_state);
301 
302 		return -EINVAL;
303 	}
304 
305 	if (vport)
306 		memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
307 		       min_t(int, ctlq_msg->ctx.indirect.payload->size,
308 			     IDPF_CTLQ_MAX_BUF_LEN));
309 	else
310 		memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
311 		       min_t(int, ctlq_msg->ctx.indirect.payload->size,
312 			     IDPF_CTLQ_MAX_BUF_LEN));
313 
314 	return 0;
315 }
316 
317 /**
318  * idpf_recv_vchnl_op - helper function with common logic when handling the
319  * reception of VIRTCHNL OPs.
320  * @adapter: driver specific private structure
321  * @vport: virtual port structure
322  * @ctlq_msg: msg to copy from
323  * @state: state bit used on timeout check
324  * @err_state: err bit to set on error
325  */
326 static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
327 			       struct idpf_vport *vport,
328 			       struct idpf_ctlq_msg *ctlq_msg,
329 			       enum idpf_vport_vc_state state,
330 			       enum idpf_vport_vc_state err_state)
331 {
332 	wait_queue_head_t *vchnl_wq;
333 	int err;
334 
335 	if (vport)
336 		vchnl_wq = &vport->vchnl_wq;
337 	else
338 		vchnl_wq = &adapter->vchnl_wq;
339 
340 	err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
341 	if (wq_has_sleeper(vchnl_wq)) {
342 		if (vport)
343 			set_bit(state, vport->vc_state);
344 		else
345 			set_bit(state, adapter->vc_state);
346 
347 		wake_up(vchnl_wq);
348 	} else {
349 		if (!err) {
350 			dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
351 				 ctlq_msg->cookie.mbx.chnl_opcode);
352 		} else {
353 			/* Clear the errors since there is no sleeper to pass
354 			 * them on
355 			 */
356 			if (vport)
357 				clear_bit(err_state, vport->vc_state);
358 			else
359 				clear_bit(err_state, adapter->vc_state);
360 		}
361 	}
362 }
363 
364 /**
365  * idpf_recv_mb_msg - Receive message over mailbox
366  * @adapter: Driver specific private structure
367  * @op: virtchannel operation code
368  * @msg: Received message holding buffer
369  * @msg_size: message size
370  *
371  * Will receive control queue message and posts the receive buffer. Returns 0
372  * on success and negative on failure.
373  */
374 int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
375 		     void *msg, int msg_size)
376 {
377 	struct idpf_vport *vport = NULL;
378 	struct idpf_ctlq_msg ctlq_msg;
379 	struct idpf_dma_mem *dma_mem;
380 	bool work_done = false;
381 	int num_retry = 2000;
382 	u16 num_q_msg;
383 	int err;
384 
385 	while (1) {
386 		struct idpf_vport_config *vport_config;
387 		int payload_size = 0;
388 
389 		/* Try to get one message */
390 		num_q_msg = 1;
391 		dma_mem = NULL;
392 		err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
393 		/* If no message then decide if we have to retry based on
394 		 * opcode
395 		 */
396 		if (err || !num_q_msg) {
397 			/* Increasing num_retry to consider the delayed
398 			 * responses because of large number of VF's mailbox
399 			 * messages. If the mailbox message is received from
400 			 * the other side, we come out of the sleep cycle
401 			 * immediately else we wait for more time.
402 			 */
403 			if (!op || !num_retry--)
404 				break;
405 			if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
406 				err = -EIO;
407 				break;
408 			}
409 			msleep(20);
410 			continue;
411 		}
412 
413 		/* If we are here a message is received. Check if we are looking
414 		 * for a specific message based on opcode. If it is different
415 		 * ignore and post buffers
416 		 */
417 		if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
418 			goto post_buffs;
419 
420 		err = idpf_find_vport(adapter, &vport, &ctlq_msg);
421 		if (err)
422 			goto post_buffs;
423 
424 		if (ctlq_msg.data_len)
425 			payload_size = ctlq_msg.ctx.indirect.payload->size;
426 
427 		/* All conditions are met. Either a message requested is
428 		 * received or we received a message to be processed
429 		 */
430 		switch (ctlq_msg.cookie.mbx.chnl_opcode) {
431 		case VIRTCHNL2_OP_VERSION:
432 		case VIRTCHNL2_OP_GET_CAPS:
433 			if (ctlq_msg.cookie.mbx.chnl_retval) {
434 				dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
435 					ctlq_msg.cookie.mbx.chnl_opcode,
436 					ctlq_msg.cookie.mbx.chnl_retval);
437 				err = -EBADMSG;
438 			} else if (msg) {
439 				memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
440 				       min_t(int, payload_size, msg_size));
441 			}
442 			work_done = true;
443 			break;
444 		case VIRTCHNL2_OP_CREATE_VPORT:
445 			idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
446 					   IDPF_VC_CREATE_VPORT,
447 					   IDPF_VC_CREATE_VPORT_ERR);
448 			break;
449 		case VIRTCHNL2_OP_ENABLE_VPORT:
450 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
451 					   IDPF_VC_ENA_VPORT,
452 					   IDPF_VC_ENA_VPORT_ERR);
453 			break;
454 		case VIRTCHNL2_OP_DISABLE_VPORT:
455 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
456 					   IDPF_VC_DIS_VPORT,
457 					   IDPF_VC_DIS_VPORT_ERR);
458 			break;
459 		case VIRTCHNL2_OP_DESTROY_VPORT:
460 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
461 					   IDPF_VC_DESTROY_VPORT,
462 					   IDPF_VC_DESTROY_VPORT_ERR);
463 			break;
464 		case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
465 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
466 					   IDPF_VC_CONFIG_TXQ,
467 					   IDPF_VC_CONFIG_TXQ_ERR);
468 			break;
469 		case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
470 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
471 					   IDPF_VC_CONFIG_RXQ,
472 					   IDPF_VC_CONFIG_RXQ_ERR);
473 			break;
474 		case VIRTCHNL2_OP_ENABLE_QUEUES:
475 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
476 					   IDPF_VC_ENA_QUEUES,
477 					   IDPF_VC_ENA_QUEUES_ERR);
478 			break;
479 		case VIRTCHNL2_OP_DISABLE_QUEUES:
480 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
481 					   IDPF_VC_DIS_QUEUES,
482 					   IDPF_VC_DIS_QUEUES_ERR);
483 			break;
484 		case VIRTCHNL2_OP_ADD_QUEUES:
485 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
486 					   IDPF_VC_ADD_QUEUES,
487 					   IDPF_VC_ADD_QUEUES_ERR);
488 			break;
489 		case VIRTCHNL2_OP_DEL_QUEUES:
490 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
491 					   IDPF_VC_DEL_QUEUES,
492 					   IDPF_VC_DEL_QUEUES_ERR);
493 			break;
494 		case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
495 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
496 					   IDPF_VC_MAP_IRQ,
497 					   IDPF_VC_MAP_IRQ_ERR);
498 			break;
499 		case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
500 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
501 					   IDPF_VC_UNMAP_IRQ,
502 					   IDPF_VC_UNMAP_IRQ_ERR);
503 			break;
504 		case VIRTCHNL2_OP_GET_STATS:
505 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
506 					   IDPF_VC_GET_STATS,
507 					   IDPF_VC_GET_STATS_ERR);
508 			break;
509 		case VIRTCHNL2_OP_GET_RSS_LUT:
510 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
511 					   IDPF_VC_GET_RSS_LUT,
512 					   IDPF_VC_GET_RSS_LUT_ERR);
513 			break;
514 		case VIRTCHNL2_OP_SET_RSS_LUT:
515 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
516 					   IDPF_VC_SET_RSS_LUT,
517 					   IDPF_VC_SET_RSS_LUT_ERR);
518 			break;
519 		case VIRTCHNL2_OP_GET_RSS_KEY:
520 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
521 					   IDPF_VC_GET_RSS_KEY,
522 					   IDPF_VC_GET_RSS_KEY_ERR);
523 			break;
524 		case VIRTCHNL2_OP_SET_RSS_KEY:
525 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
526 					   IDPF_VC_SET_RSS_KEY,
527 					   IDPF_VC_SET_RSS_KEY_ERR);
528 			break;
529 		case VIRTCHNL2_OP_SET_SRIOV_VFS:
530 			idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
531 					   IDPF_VC_SET_SRIOV_VFS,
532 					   IDPF_VC_SET_SRIOV_VFS_ERR);
533 			break;
534 		case VIRTCHNL2_OP_ALLOC_VECTORS:
535 			idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
536 					   IDPF_VC_ALLOC_VECTORS,
537 					   IDPF_VC_ALLOC_VECTORS_ERR);
538 			break;
539 		case VIRTCHNL2_OP_DEALLOC_VECTORS:
540 			idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
541 					   IDPF_VC_DEALLOC_VECTORS,
542 					   IDPF_VC_DEALLOC_VECTORS_ERR);
543 			break;
544 		case VIRTCHNL2_OP_GET_PTYPE_INFO:
545 			idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
546 					   IDPF_VC_GET_PTYPE_INFO,
547 					   IDPF_VC_GET_PTYPE_INFO_ERR);
548 			break;
549 		case VIRTCHNL2_OP_LOOPBACK:
550 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
551 					   IDPF_VC_LOOPBACK_STATE,
552 					   IDPF_VC_LOOPBACK_STATE_ERR);
553 			break;
554 		case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
555 			/* This message can only be sent asynchronously. As
556 			 * such we'll have lost the context in which it was
557 			 * called and thus can only really report if it looks
558 			 * like an error occurred. Don't bother setting ERR bit
559 			 * or waking chnl_wq since no work queue will be waiting
560 			 * to read the message.
561 			 */
562 			if (ctlq_msg.cookie.mbx.chnl_retval) {
563 				dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
564 					ctlq_msg.cookie.mbx.chnl_retval);
565 			}
566 			break;
567 		case VIRTCHNL2_OP_ADD_MAC_ADDR:
568 			vport_config = adapter->vport_config[vport->idx];
569 			if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
570 					       vport_config->flags)) {
571 				/* Message was sent asynchronously. We don't
572 				 * normally print errors here, instead
573 				 * prefer to handle errors in the function
574 				 * calling wait_for_event. However, if
575 				 * asynchronous, the context in which the
576 				 * message was sent is lost. We can't really do
577 				 * anything about at it this point, but we
578 				 * should at a minimum indicate that it looks
579 				 * like something went wrong. Also don't bother
580 				 * setting ERR bit or waking vchnl_wq since no
581 				 * one will be waiting to read the async
582 				 * message.
583 				 */
584 				if (ctlq_msg.cookie.mbx.chnl_retval)
585 					dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
586 						ctlq_msg.cookie.mbx.chnl_retval);
587 				break;
588 			}
589 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
590 					   IDPF_VC_ADD_MAC_ADDR,
591 					   IDPF_VC_ADD_MAC_ADDR_ERR);
592 			break;
593 		case VIRTCHNL2_OP_DEL_MAC_ADDR:
594 			vport_config = adapter->vport_config[vport->idx];
595 			if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
596 					       vport_config->flags)) {
597 				/* Message was sent asynchronously like the
598 				 * VIRTCHNL2_OP_ADD_MAC_ADDR
599 				 */
600 				if (ctlq_msg.cookie.mbx.chnl_retval)
601 					dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
602 						ctlq_msg.cookie.mbx.chnl_retval);
603 				break;
604 			}
605 			idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
606 					   IDPF_VC_DEL_MAC_ADDR,
607 					   IDPF_VC_DEL_MAC_ADDR_ERR);
608 			break;
609 		case VIRTCHNL2_OP_EVENT:
610 			idpf_recv_event_msg(vport, &ctlq_msg);
611 			break;
612 		default:
613 			dev_warn(&adapter->pdev->dev,
614 				 "Unhandled virtchnl response %d\n",
615 				 ctlq_msg.cookie.mbx.chnl_opcode);
616 			break;
617 		}
618 
619 post_buffs:
620 		if (ctlq_msg.data_len)
621 			dma_mem = ctlq_msg.ctx.indirect.payload;
622 		else
623 			num_q_msg = 0;
624 
625 		err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
626 					      &num_q_msg, &dma_mem);
627 		/* If post failed clear the only buffer we supplied */
628 		if (err && dma_mem)
629 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
630 					  dma_mem->va, dma_mem->pa);
631 
632 		/* Applies only if we are looking for a specific opcode */
633 		if (work_done)
634 			break;
635 	}
636 
637 	return err;
638 }
639 
640 /**
641  * __idpf_wait_for_event - wrapper function for wait on virtchannel response
642  * @adapter: Driver private data structure
643  * @vport: virtual port structure
644  * @state: check on state upon timeout
645  * @err_check: check if this specific error bit is set
646  * @timeout: Max time to wait
647  *
648  * Checks if state is set upon expiry of timeout.  Returns 0 on success,
649  * negative on failure.
650  */
651 static int __idpf_wait_for_event(struct idpf_adapter *adapter,
652 				 struct idpf_vport *vport,
653 				 enum idpf_vport_vc_state state,
654 				 enum idpf_vport_vc_state err_check,
655 				 int timeout)
656 {
657 	int time_to_wait, num_waits;
658 	wait_queue_head_t *vchnl_wq;
659 	unsigned long *vc_state;
660 
661 	time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
662 	num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
663 
664 	if (vport) {
665 		vchnl_wq = &vport->vchnl_wq;
666 		vc_state = vport->vc_state;
667 	} else {
668 		vchnl_wq = &adapter->vchnl_wq;
669 		vc_state = adapter->vc_state;
670 	}
671 
672 	while (num_waits) {
673 		int event;
674 
675 		/* If we are here and a reset is detected do not wait but
676 		 * return. Reset timing is out of drivers control. So
677 		 * while we are cleaning resources as part of reset if the
678 		 * underlying HW mailbox is gone, wait on mailbox messages
679 		 * is not meaningful
680 		 */
681 		if (idpf_is_reset_detected(adapter))
682 			return 0;
683 
684 		event = wait_event_timeout(*vchnl_wq,
685 					   test_and_clear_bit(state, vc_state),
686 					   msecs_to_jiffies(time_to_wait));
687 		if (event) {
688 			if (test_and_clear_bit(err_check, vc_state)) {
689 				dev_err(&adapter->pdev->dev, "VC response error %s\n",
690 					idpf_vport_vc_state_str[err_check]);
691 
692 				return -EINVAL;
693 			}
694 
695 			return 0;
696 		}
697 		num_waits--;
698 	}
699 
700 	/* Timeout occurred */
701 	dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
702 		idpf_vport_vc_state_str[state]);
703 
704 	return -ETIMEDOUT;
705 }
706 
707 /**
708  * idpf_min_wait_for_event - wait for virtchannel response
709  * @adapter: Driver private data structure
710  * @vport: virtual port structure
711  * @state: check on state upon timeout
712  * @err_check: check if this specific error bit is set
713  *
714  * Returns 0 on success, negative on failure.
715  */
716 static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
717 				   struct idpf_vport *vport,
718 				   enum idpf_vport_vc_state state,
719 				   enum idpf_vport_vc_state err_check)
720 {
721 	return __idpf_wait_for_event(adapter, vport, state, err_check,
722 				     IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
723 }
724 
725 /**
726  * idpf_wait_for_event - wait for virtchannel response
727  * @adapter: Driver private data structure
728  * @vport: virtual port structure
729  * @state: check on state upon timeout after 500ms
730  * @err_check: check if this specific error bit is set
731  *
732  * Returns 0 on success, negative on failure.
733  */
734 static int idpf_wait_for_event(struct idpf_adapter *adapter,
735 			       struct idpf_vport *vport,
736 			       enum idpf_vport_vc_state state,
737 			       enum idpf_vport_vc_state err_check)
738 {
739 	/* Increasing the timeout in __IDPF_INIT_SW flow to consider large
740 	 * number of VF's mailbox message responses. When a message is received
741 	 * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
742 	 * the timeout expires. Only in the error case i.e. if no message is
743 	 * received on mailbox, we wait for the complete timeout which is
744 	 * less likely to happen.
745 	 */
746 	return __idpf_wait_for_event(adapter, vport, state, err_check,
747 				     IDPF_WAIT_FOR_EVENT_TIMEO);
748 }
749 
750 /**
751  * idpf_wait_for_marker_event - wait for software marker response
752  * @vport: virtual port data structure
753  *
754  * Returns 0 success, negative on failure.
755  **/
756 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
757 {
758 	int event;
759 	int i;
760 
761 	for (i = 0; i < vport->num_txq; i++)
762 		set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
763 
764 	event = wait_event_timeout(vport->sw_marker_wq,
765 				   test_and_clear_bit(IDPF_VPORT_SW_MARKER,
766 						      vport->flags),
767 				   msecs_to_jiffies(500));
768 
769 	for (i = 0; i < vport->num_txq; i++)
770 		clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
771 
772 	if (event)
773 		return 0;
774 
775 	dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
776 
777 	return -ETIMEDOUT;
778 }
779 
780 /**
781  * idpf_send_ver_msg - send virtchnl version message
782  * @adapter: Driver specific private structure
783  *
784  * Send virtchnl version message.  Returns 0 on success, negative on failure.
785  */
786 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
787 {
788 	struct virtchnl2_version_info vvi;
789 
790 	if (adapter->virt_ver_maj) {
791 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
792 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
793 	} else {
794 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
795 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
796 	}
797 
798 	return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
799 				(u8 *)&vvi);
800 }
801 
802 /**
803  * idpf_recv_ver_msg - Receive virtchnl version message
804  * @adapter: Driver specific private structure
805  *
806  * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
807  * to send version message again, otherwise negative on failure.
808  */
809 static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
810 {
811 	struct virtchnl2_version_info vvi;
812 	u32 major, minor;
813 	int err;
814 
815 	err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
816 			       sizeof(vvi));
817 	if (err)
818 		return err;
819 
820 	major = le32_to_cpu(vvi.major);
821 	minor = le32_to_cpu(vvi.minor);
822 
823 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
824 		dev_warn(&adapter->pdev->dev,
825 			 "Virtchnl major version (%d) greater than supported\n",
826 			 major);
827 
828 		return -EINVAL;
829 	}
830 
831 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
832 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
833 		dev_warn(&adapter->pdev->dev,
834 			 "Virtchnl minor version (%d) didn't match\n", minor);
835 
836 	/* If we have a mismatch, resend version to update receiver on what
837 	 * version we will use.
838 	 */
839 	if (!adapter->virt_ver_maj &&
840 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
841 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
842 		err = -EAGAIN;
843 
844 	adapter->virt_ver_maj = major;
845 	adapter->virt_ver_min = minor;
846 
847 	return err;
848 }
849 
850 /**
851  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
852  * @adapter: Driver specific private structure
853  *
854  * Send virtchl get capabilities message. Returns 0 on success, negative on
855  * failure.
856  */
857 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
858 {
859 	struct virtchnl2_get_capabilities caps = { };
860 
861 	caps.csum_caps =
862 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
863 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
864 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
865 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
866 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
867 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
868 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
869 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
870 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
871 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
872 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
873 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
874 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
875 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
876 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
877 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
878 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
879 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
880 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
881 
882 	caps.seg_caps =
883 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
884 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
885 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
886 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
887 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
888 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
889 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
890 
891 	caps.rss_caps =
892 		cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP		|
893 			    VIRTCHNL2_CAP_RSS_IPV4_UDP		|
894 			    VIRTCHNL2_CAP_RSS_IPV4_SCTP		|
895 			    VIRTCHNL2_CAP_RSS_IPV4_OTHER	|
896 			    VIRTCHNL2_CAP_RSS_IPV6_TCP		|
897 			    VIRTCHNL2_CAP_RSS_IPV6_UDP		|
898 			    VIRTCHNL2_CAP_RSS_IPV6_SCTP		|
899 			    VIRTCHNL2_CAP_RSS_IPV6_OTHER);
900 
901 	caps.hsplit_caps =
902 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
903 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
904 
905 	caps.rsc_caps =
906 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
907 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
908 
909 	caps.other_caps =
910 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
911 			    VIRTCHNL2_CAP_MACFILTER		|
912 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
913 			    VIRTCHNL2_CAP_PROMISC		|
914 			    VIRTCHNL2_CAP_LOOPBACK);
915 
916 	return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
917 				(u8 *)&caps);
918 }
919 
920 /**
921  * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
922  * @adapter: Driver specific private structure
923  *
924  * Receive virtchnl get capabilities message. Returns 0 on success, negative on
925  * failure.
926  */
927 static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
928 {
929 	return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
930 				sizeof(struct virtchnl2_get_capabilities));
931 }
932 
933 /**
934  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
935  * @adapter: Driver specific private structure
936  * @max_q: vport max queue structure
937  */
938 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
939 			    struct idpf_vport_max_q *max_q)
940 {
941 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
942 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
943 	u16 default_vports = idpf_get_default_vports(adapter);
944 	int max_rx_q, max_tx_q;
945 
946 	mutex_lock(&adapter->queue_lock);
947 
948 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
949 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
950 	if (adapter->num_alloc_vports < default_vports) {
951 		max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
952 		max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
953 	} else {
954 		max_q->max_rxq = IDPF_MIN_Q;
955 		max_q->max_txq = IDPF_MIN_Q;
956 	}
957 	max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
958 	max_q->max_complq = max_q->max_txq;
959 
960 	if (avail_queues->avail_rxq < max_q->max_rxq ||
961 	    avail_queues->avail_txq < max_q->max_txq ||
962 	    avail_queues->avail_bufq < max_q->max_bufq ||
963 	    avail_queues->avail_complq < max_q->max_complq) {
964 		mutex_unlock(&adapter->queue_lock);
965 
966 		return -EINVAL;
967 	}
968 
969 	avail_queues->avail_rxq -= max_q->max_rxq;
970 	avail_queues->avail_txq -= max_q->max_txq;
971 	avail_queues->avail_bufq -= max_q->max_bufq;
972 	avail_queues->avail_complq -= max_q->max_complq;
973 
974 	mutex_unlock(&adapter->queue_lock);
975 
976 	return 0;
977 }
978 
979 /**
980  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
981  * @adapter: Driver specific private structure
982  * @max_q: vport max queue structure
983  */
984 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
985 			       struct idpf_vport_max_q *max_q)
986 {
987 	struct idpf_avail_queue_info *avail_queues;
988 
989 	mutex_lock(&adapter->queue_lock);
990 	avail_queues = &adapter->avail_queues;
991 
992 	avail_queues->avail_rxq += max_q->max_rxq;
993 	avail_queues->avail_txq += max_q->max_txq;
994 	avail_queues->avail_bufq += max_q->max_bufq;
995 	avail_queues->avail_complq += max_q->max_complq;
996 
997 	mutex_unlock(&adapter->queue_lock);
998 }
999 
1000 /**
1001  * idpf_init_avail_queues - Initialize available queues on the device
1002  * @adapter: Driver specific private structure
1003  */
1004 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
1005 {
1006 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1007 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1008 
1009 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
1010 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
1011 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
1012 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
1013 }
1014 
1015 /**
1016  * idpf_get_reg_intr_vecs - Get vector queue register offset
1017  * @vport: virtual port structure
1018  * @reg_vals: Register offsets to store in
1019  *
1020  * Returns number of registers that got populated
1021  */
1022 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
1023 			   struct idpf_vec_regs *reg_vals)
1024 {
1025 	struct virtchnl2_vector_chunks *chunks;
1026 	struct idpf_vec_regs reg_val;
1027 	u16 num_vchunks, num_vec;
1028 	int num_regs = 0, i, j;
1029 
1030 	chunks = &vport->adapter->req_vec_chunks->vchunks;
1031 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
1032 
1033 	for (j = 0; j < num_vchunks; j++) {
1034 		struct virtchnl2_vector_chunk *chunk;
1035 		u32 dynctl_reg_spacing;
1036 		u32 itrn_reg_spacing;
1037 
1038 		chunk = &chunks->vchunks[j];
1039 		num_vec = le16_to_cpu(chunk->num_vectors);
1040 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1041 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1042 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1043 
1044 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1045 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1046 
1047 		for (i = 0; i < num_vec; i++) {
1048 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1049 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1050 			reg_vals[num_regs].itrn_index_spacing =
1051 						reg_val.itrn_index_spacing;
1052 
1053 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1054 			reg_val.itrn_reg += itrn_reg_spacing;
1055 			num_regs++;
1056 		}
1057 	}
1058 
1059 	return num_regs;
1060 }
1061 
1062 /**
1063  * idpf_vport_get_q_reg - Get the queue registers for the vport
1064  * @reg_vals: register values needing to be set
1065  * @num_regs: amount we expect to fill
1066  * @q_type: queue model
1067  * @chunks: queue regs received over mailbox
1068  *
1069  * This function parses the queue register offsets from the queue register
1070  * chunk information, with a specific queue type and stores it into the array
1071  * passed as an argument. It returns the actual number of queue registers that
1072  * are filled.
1073  */
1074 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1075 				struct virtchnl2_queue_reg_chunks *chunks)
1076 {
1077 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1078 	int reg_filled = 0, i;
1079 	u32 reg_val;
1080 
1081 	while (num_chunks--) {
1082 		struct virtchnl2_queue_reg_chunk *chunk;
1083 		u16 num_q;
1084 
1085 		chunk = &chunks->chunks[num_chunks];
1086 		if (le32_to_cpu(chunk->type) != q_type)
1087 			continue;
1088 
1089 		num_q = le32_to_cpu(chunk->num_queues);
1090 		reg_val = le64_to_cpu(chunk->qtail_reg_start);
1091 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1092 			reg_vals[reg_filled++] = reg_val;
1093 			reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
1094 		}
1095 	}
1096 
1097 	return reg_filled;
1098 }
1099 
1100 /**
1101  * __idpf_queue_reg_init - initialize queue registers
1102  * @vport: virtual port structure
1103  * @reg_vals: registers we are initializing
1104  * @num_regs: how many registers there are in total
1105  * @q_type: queue model
1106  *
1107  * Return number of queues that are initialized
1108  */
1109 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
1110 				 int num_regs, u32 q_type)
1111 {
1112 	struct idpf_adapter *adapter = vport->adapter;
1113 	struct idpf_queue *q;
1114 	int i, j, k = 0;
1115 
1116 	switch (q_type) {
1117 	case VIRTCHNL2_QUEUE_TYPE_TX:
1118 		for (i = 0; i < vport->num_txq_grp; i++) {
1119 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1120 
1121 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1122 				tx_qgrp->txqs[j]->tail =
1123 					idpf_get_reg_addr(adapter, reg_vals[k]);
1124 		}
1125 		break;
1126 	case VIRTCHNL2_QUEUE_TYPE_RX:
1127 		for (i = 0; i < vport->num_rxq_grp; i++) {
1128 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1129 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1130 
1131 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1132 				q = rx_qgrp->singleq.rxqs[j];
1133 				q->tail = idpf_get_reg_addr(adapter,
1134 							    reg_vals[k]);
1135 			}
1136 		}
1137 		break;
1138 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1139 		for (i = 0; i < vport->num_rxq_grp; i++) {
1140 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1141 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
1142 
1143 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1144 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1145 				q->tail = idpf_get_reg_addr(adapter,
1146 							    reg_vals[k]);
1147 			}
1148 		}
1149 		break;
1150 	default:
1151 		break;
1152 	}
1153 
1154 	return k;
1155 }
1156 
1157 /**
1158  * idpf_queue_reg_init - initialize queue registers
1159  * @vport: virtual port structure
1160  *
1161  * Return 0 on success, negative on failure
1162  */
1163 int idpf_queue_reg_init(struct idpf_vport *vport)
1164 {
1165 	struct virtchnl2_create_vport *vport_params;
1166 	struct virtchnl2_queue_reg_chunks *chunks;
1167 	struct idpf_vport_config *vport_config;
1168 	u16 vport_idx = vport->idx;
1169 	int num_regs, ret = 0;
1170 	u32 *reg_vals;
1171 
1172 	/* We may never deal with more than 256 same type of queues */
1173 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1174 	if (!reg_vals)
1175 		return -ENOMEM;
1176 
1177 	vport_config = vport->adapter->vport_config[vport_idx];
1178 	if (vport_config->req_qs_chunks) {
1179 		struct virtchnl2_add_queues *vc_aq =
1180 		  (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
1181 		chunks = &vc_aq->chunks;
1182 	} else {
1183 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
1184 		chunks = &vport_params->chunks;
1185 	}
1186 
1187 	/* Initialize Tx queue tail register address */
1188 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1189 					VIRTCHNL2_QUEUE_TYPE_TX,
1190 					chunks);
1191 	if (num_regs < vport->num_txq) {
1192 		ret = -EINVAL;
1193 		goto free_reg_vals;
1194 	}
1195 
1196 	num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1197 					 VIRTCHNL2_QUEUE_TYPE_TX);
1198 	if (num_regs < vport->num_txq) {
1199 		ret = -EINVAL;
1200 		goto free_reg_vals;
1201 	}
1202 
1203 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1204 	 * model
1205 	 */
1206 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1207 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1208 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1209 						chunks);
1210 		if (num_regs < vport->num_bufq) {
1211 			ret = -EINVAL;
1212 			goto free_reg_vals;
1213 		}
1214 
1215 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1216 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1217 		if (num_regs < vport->num_bufq) {
1218 			ret = -EINVAL;
1219 			goto free_reg_vals;
1220 		}
1221 	} else {
1222 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1223 						VIRTCHNL2_QUEUE_TYPE_RX,
1224 						chunks);
1225 		if (num_regs < vport->num_rxq) {
1226 			ret = -EINVAL;
1227 			goto free_reg_vals;
1228 		}
1229 
1230 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1231 						 VIRTCHNL2_QUEUE_TYPE_RX);
1232 		if (num_regs < vport->num_rxq) {
1233 			ret = -EINVAL;
1234 			goto free_reg_vals;
1235 		}
1236 	}
1237 
1238 free_reg_vals:
1239 	kfree(reg_vals);
1240 
1241 	return ret;
1242 }
1243 
1244 /**
1245  * idpf_send_create_vport_msg - Send virtchnl create vport message
1246  * @adapter: Driver specific private structure
1247  * @max_q: vport max queue info
1248  *
1249  * send virtchnl creae vport message
1250  *
1251  * Returns 0 on success, negative on failure
1252  */
1253 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1254 			       struct idpf_vport_max_q *max_q)
1255 {
1256 	struct virtchnl2_create_vport *vport_msg;
1257 	u16 idx = adapter->next_vport;
1258 	int err, buf_size;
1259 
1260 	buf_size = sizeof(struct virtchnl2_create_vport);
1261 	if (!adapter->vport_params_reqd[idx]) {
1262 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1263 							  GFP_KERNEL);
1264 		if (!adapter->vport_params_reqd[idx])
1265 			return -ENOMEM;
1266 	}
1267 
1268 	vport_msg = adapter->vport_params_reqd[idx];
1269 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1270 	vport_msg->vport_index = cpu_to_le16(idx);
1271 
1272 	if (adapter->req_tx_splitq)
1273 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1274 	else
1275 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1276 
1277 	if (adapter->req_rx_splitq)
1278 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1279 	else
1280 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1281 
1282 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1283 	if (err) {
1284 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1285 
1286 		return err;
1287 	}
1288 
1289 	mutex_lock(&adapter->vc_buf_lock);
1290 
1291 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
1292 			       (u8 *)vport_msg);
1293 	if (err)
1294 		goto rel_lock;
1295 
1296 	err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
1297 				  IDPF_VC_CREATE_VPORT_ERR);
1298 	if (err) {
1299 		dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
1300 
1301 		goto rel_lock;
1302 	}
1303 
1304 	if (!adapter->vport_params_recvd[idx]) {
1305 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1306 							   GFP_KERNEL);
1307 		if (!adapter->vport_params_recvd[idx]) {
1308 			err = -ENOMEM;
1309 			goto rel_lock;
1310 		}
1311 	}
1312 
1313 	vport_msg = adapter->vport_params_recvd[idx];
1314 	memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
1315 
1316 rel_lock:
1317 	mutex_unlock(&adapter->vc_buf_lock);
1318 
1319 	return err;
1320 }
1321 
1322 /**
1323  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1324  * @vport: virtual port structure
1325  *
1326  * Return 0 on success, error on failure
1327  */
1328 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1329 {
1330 	struct idpf_adapter *adapter = vport->adapter;
1331 	struct virtchnl2_create_vport *vport_msg;
1332 	u64 rx_desc_ids, tx_desc_ids;
1333 
1334 	vport_msg = adapter->vport_params_recvd[vport->idx];
1335 
1336 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1337 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1338 
1339 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1340 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1341 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1342 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1343 		}
1344 	} else {
1345 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1346 			vport->base_rxd = true;
1347 	}
1348 
1349 	if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
1350 		return 0;
1351 
1352 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1353 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1354 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 /**
1361  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1362  * @vport: virtual port data structure
1363  *
1364  * Send virtchnl destroy vport message.  Returns 0 on success, negative on
1365  * failure.
1366  */
1367 int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
1368 {
1369 	struct idpf_adapter *adapter = vport->adapter;
1370 	struct virtchnl2_vport v_id;
1371 	int err;
1372 
1373 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1374 
1375 	mutex_lock(&vport->vc_buf_lock);
1376 
1377 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
1378 			       sizeof(v_id), (u8 *)&v_id);
1379 	if (err)
1380 		goto rel_lock;
1381 
1382 	err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
1383 				      IDPF_VC_DESTROY_VPORT_ERR);
1384 
1385 rel_lock:
1386 	mutex_unlock(&vport->vc_buf_lock);
1387 
1388 	return err;
1389 }
1390 
1391 /**
1392  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1393  * @vport: virtual port data structure
1394  *
1395  * Send enable vport virtchnl message.  Returns 0 on success, negative on
1396  * failure.
1397  */
1398 int idpf_send_enable_vport_msg(struct idpf_vport *vport)
1399 {
1400 	struct idpf_adapter *adapter = vport->adapter;
1401 	struct virtchnl2_vport v_id;
1402 	int err;
1403 
1404 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1405 
1406 	mutex_lock(&vport->vc_buf_lock);
1407 
1408 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
1409 			       sizeof(v_id), (u8 *)&v_id);
1410 	if (err)
1411 		goto rel_lock;
1412 
1413 	err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
1414 				  IDPF_VC_ENA_VPORT_ERR);
1415 
1416 rel_lock:
1417 	mutex_unlock(&vport->vc_buf_lock);
1418 
1419 	return err;
1420 }
1421 
1422 /**
1423  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1424  * @vport: virtual port data structure
1425  *
1426  * Send disable vport virtchnl message.  Returns 0 on success, negative on
1427  * failure.
1428  */
1429 int idpf_send_disable_vport_msg(struct idpf_vport *vport)
1430 {
1431 	struct idpf_adapter *adapter = vport->adapter;
1432 	struct virtchnl2_vport v_id;
1433 	int err;
1434 
1435 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1436 
1437 	mutex_lock(&vport->vc_buf_lock);
1438 
1439 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
1440 			       sizeof(v_id), (u8 *)&v_id);
1441 	if (err)
1442 		goto rel_lock;
1443 
1444 	err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
1445 				      IDPF_VC_DIS_VPORT_ERR);
1446 
1447 rel_lock:
1448 	mutex_unlock(&vport->vc_buf_lock);
1449 
1450 	return err;
1451 }
1452 
1453 /**
1454  * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1455  * @vport: virtual port data structure
1456  *
1457  * Send config tx queues virtchnl message. Returns 0 on success, negative on
1458  * failure.
1459  */
1460 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
1461 {
1462 	struct virtchnl2_config_tx_queues *ctq;
1463 	u32 config_sz, chunk_sz, buf_sz;
1464 	int totqs, num_msgs, num_chunks;
1465 	struct virtchnl2_txq_info *qi;
1466 	int err = 0, i, k = 0;
1467 
1468 	totqs = vport->num_txq + vport->num_complq;
1469 	qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
1470 	if (!qi)
1471 		return -ENOMEM;
1472 
1473 	/* Populate the queue info buffer with all queue context info */
1474 	for (i = 0; i < vport->num_txq_grp; i++) {
1475 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1476 		int j;
1477 
1478 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1479 			qi[k].queue_id =
1480 				cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1481 			qi[k].model =
1482 				cpu_to_le16(vport->txq_model);
1483 			qi[k].type =
1484 				cpu_to_le32(tx_qgrp->txqs[j]->q_type);
1485 			qi[k].ring_len =
1486 				cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
1487 			qi[k].dma_ring_addr =
1488 				cpu_to_le64(tx_qgrp->txqs[j]->dma);
1489 			if (idpf_is_queue_model_split(vport->txq_model)) {
1490 				struct idpf_queue *q = tx_qgrp->txqs[j];
1491 
1492 				qi[k].tx_compl_queue_id =
1493 					cpu_to_le16(tx_qgrp->complq->q_id);
1494 				qi[k].relative_queue_id = cpu_to_le16(j);
1495 
1496 				if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
1497 					qi[k].sched_mode =
1498 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
1499 				else
1500 					qi[k].sched_mode =
1501 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1502 			} else {
1503 				qi[k].sched_mode =
1504 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1505 			}
1506 		}
1507 
1508 		if (!idpf_is_queue_model_split(vport->txq_model))
1509 			continue;
1510 
1511 		qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1512 		qi[k].model = cpu_to_le16(vport->txq_model);
1513 		qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
1514 		qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
1515 		qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
1516 
1517 		k++;
1518 	}
1519 
1520 	/* Make sure accounting agrees */
1521 	if (k != totqs) {
1522 		err = -EINVAL;
1523 		goto error;
1524 	}
1525 
1526 	/* Chunk up the queue contexts into multiple messages to avoid
1527 	 * sending a control queue message buffer that is too large
1528 	 */
1529 	config_sz = sizeof(struct virtchnl2_config_tx_queues);
1530 	chunk_sz = sizeof(struct virtchnl2_txq_info);
1531 
1532 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1533 			   totqs);
1534 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1535 
1536 	buf_sz = struct_size(ctq, qinfo, num_chunks);
1537 	ctq = kzalloc(buf_sz, GFP_KERNEL);
1538 	if (!ctq) {
1539 		err = -ENOMEM;
1540 		goto error;
1541 	}
1542 
1543 	mutex_lock(&vport->vc_buf_lock);
1544 
1545 	for (i = 0, k = 0; i < num_msgs; i++) {
1546 		memset(ctq, 0, buf_sz);
1547 		ctq->vport_id = cpu_to_le32(vport->vport_id);
1548 		ctq->num_qinfo = cpu_to_le16(num_chunks);
1549 		memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
1550 
1551 		err = idpf_send_mb_msg(vport->adapter,
1552 				       VIRTCHNL2_OP_CONFIG_TX_QUEUES,
1553 				       buf_sz, (u8 *)ctq);
1554 		if (err)
1555 			goto mbx_error;
1556 
1557 		err = idpf_wait_for_event(vport->adapter, vport,
1558 					  IDPF_VC_CONFIG_TXQ,
1559 					  IDPF_VC_CONFIG_TXQ_ERR);
1560 		if (err)
1561 			goto mbx_error;
1562 
1563 		k += num_chunks;
1564 		totqs -= num_chunks;
1565 		num_chunks = min(num_chunks, totqs);
1566 		/* Recalculate buffer size */
1567 		buf_sz = struct_size(ctq, qinfo, num_chunks);
1568 	}
1569 
1570 mbx_error:
1571 	mutex_unlock(&vport->vc_buf_lock);
1572 	kfree(ctq);
1573 error:
1574 	kfree(qi);
1575 
1576 	return err;
1577 }
1578 
1579 /**
1580  * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1581  * @vport: virtual port data structure
1582  *
1583  * Send config rx queues virtchnl message.  Returns 0 on success, negative on
1584  * failure.
1585  */
1586 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
1587 {
1588 	struct virtchnl2_config_rx_queues *crq;
1589 	u32 config_sz, chunk_sz, buf_sz;
1590 	int totqs, num_msgs, num_chunks;
1591 	struct virtchnl2_rxq_info *qi;
1592 	int err = 0, i, k = 0;
1593 
1594 	totqs = vport->num_rxq + vport->num_bufq;
1595 	qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
1596 	if (!qi)
1597 		return -ENOMEM;
1598 
1599 	/* Populate the queue info buffer with all queue context info */
1600 	for (i = 0; i < vport->num_rxq_grp; i++) {
1601 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1602 		u16 num_rxq;
1603 		int j;
1604 
1605 		if (!idpf_is_queue_model_split(vport->rxq_model))
1606 			goto setup_rxqs;
1607 
1608 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1609 			struct idpf_queue *bufq =
1610 				&rx_qgrp->splitq.bufq_sets[j].bufq;
1611 
1612 			qi[k].queue_id = cpu_to_le32(bufq->q_id);
1613 			qi[k].model = cpu_to_le16(vport->rxq_model);
1614 			qi[k].type = cpu_to_le32(bufq->q_type);
1615 			qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1616 			qi[k].ring_len = cpu_to_le16(bufq->desc_count);
1617 			qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
1618 			qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
1619 			qi[k].buffer_notif_stride = bufq->rx_buf_stride;
1620 			qi[k].rx_buffer_low_watermark =
1621 				cpu_to_le16(bufq->rx_buffer_low_watermark);
1622 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1623 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1624 		}
1625 
1626 setup_rxqs:
1627 		if (idpf_is_queue_model_split(vport->rxq_model))
1628 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1629 		else
1630 			num_rxq = rx_qgrp->singleq.num_rxq;
1631 
1632 		for (j = 0; j < num_rxq; j++, k++) {
1633 			struct idpf_queue *rxq;
1634 
1635 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1636 				rxq = rx_qgrp->singleq.rxqs[j];
1637 				goto common_qi_fields;
1638 			}
1639 			rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1640 			qi[k].rx_bufq1_id =
1641 			  cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
1642 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1643 				qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
1644 				qi[k].rx_bufq2_id =
1645 				  cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
1646 			}
1647 			qi[k].rx_buffer_low_watermark =
1648 				cpu_to_le16(rxq->rx_buffer_low_watermark);
1649 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1650 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1651 
1652 common_qi_fields:
1653 			if (rxq->rx_hsplit_en) {
1654 				qi[k].qflags |=
1655 					cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1656 				qi[k].hdr_buffer_size =
1657 					cpu_to_le16(rxq->rx_hbuf_size);
1658 			}
1659 			qi[k].queue_id = cpu_to_le32(rxq->q_id);
1660 			qi[k].model = cpu_to_le16(vport->rxq_model);
1661 			qi[k].type = cpu_to_le32(rxq->q_type);
1662 			qi[k].ring_len = cpu_to_le16(rxq->desc_count);
1663 			qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
1664 			qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
1665 			qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
1666 			qi[k].qflags |=
1667 				cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1668 			qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
1669 		}
1670 	}
1671 
1672 	/* Make sure accounting agrees */
1673 	if (k != totqs) {
1674 		err = -EINVAL;
1675 		goto error;
1676 	}
1677 
1678 	/* Chunk up the queue contexts into multiple messages to avoid
1679 	 * sending a control queue message buffer that is too large
1680 	 */
1681 	config_sz = sizeof(struct virtchnl2_config_rx_queues);
1682 	chunk_sz = sizeof(struct virtchnl2_rxq_info);
1683 
1684 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1685 			   totqs);
1686 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1687 
1688 	buf_sz = struct_size(crq, qinfo, num_chunks);
1689 	crq = kzalloc(buf_sz, GFP_KERNEL);
1690 	if (!crq) {
1691 		err = -ENOMEM;
1692 		goto error;
1693 	}
1694 
1695 	mutex_lock(&vport->vc_buf_lock);
1696 
1697 	for (i = 0, k = 0; i < num_msgs; i++) {
1698 		memset(crq, 0, buf_sz);
1699 		crq->vport_id = cpu_to_le32(vport->vport_id);
1700 		crq->num_qinfo = cpu_to_le16(num_chunks);
1701 		memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
1702 
1703 		err = idpf_send_mb_msg(vport->adapter,
1704 				       VIRTCHNL2_OP_CONFIG_RX_QUEUES,
1705 				       buf_sz, (u8 *)crq);
1706 		if (err)
1707 			goto mbx_error;
1708 
1709 		err = idpf_wait_for_event(vport->adapter, vport,
1710 					  IDPF_VC_CONFIG_RXQ,
1711 					  IDPF_VC_CONFIG_RXQ_ERR);
1712 		if (err)
1713 			goto mbx_error;
1714 
1715 		k += num_chunks;
1716 		totqs -= num_chunks;
1717 		num_chunks = min(num_chunks, totqs);
1718 		/* Recalculate buffer size */
1719 		buf_sz = struct_size(crq, qinfo, num_chunks);
1720 	}
1721 
1722 mbx_error:
1723 	mutex_unlock(&vport->vc_buf_lock);
1724 	kfree(crq);
1725 error:
1726 	kfree(qi);
1727 
1728 	return err;
1729 }
1730 
1731 /**
1732  * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1733  * queues message
1734  * @vport: virtual port data structure
1735  * @vc_op: virtchnl op code to send
1736  *
1737  * Send enable or disable queues virtchnl message. Returns 0 on success,
1738  * negative on failure.
1739  */
1740 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
1741 {
1742 	u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
1743 	struct idpf_adapter *adapter = vport->adapter;
1744 	struct virtchnl2_del_ena_dis_queues *eq;
1745 	struct virtchnl2_queue_chunks *qcs;
1746 	struct virtchnl2_queue_chunk *qc;
1747 	u32 config_sz, chunk_sz, buf_sz;
1748 	int i, j, k = 0, err = 0;
1749 
1750 	/* validate virtchnl op */
1751 	switch (vc_op) {
1752 	case VIRTCHNL2_OP_ENABLE_QUEUES:
1753 	case VIRTCHNL2_OP_DISABLE_QUEUES:
1754 		break;
1755 	default:
1756 		return -EINVAL;
1757 	}
1758 
1759 	num_txq = vport->num_txq + vport->num_complq;
1760 	num_rxq = vport->num_rxq + vport->num_bufq;
1761 	num_q = num_txq + num_rxq;
1762 	buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
1763 	qc = kzalloc(buf_sz, GFP_KERNEL);
1764 	if (!qc)
1765 		return -ENOMEM;
1766 
1767 	for (i = 0; i < vport->num_txq_grp; i++) {
1768 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1769 
1770 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1771 			qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
1772 			qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1773 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1774 		}
1775 	}
1776 	if (vport->num_txq != k) {
1777 		err = -EINVAL;
1778 		goto error;
1779 	}
1780 
1781 	if (!idpf_is_queue_model_split(vport->txq_model))
1782 		goto setup_rx;
1783 
1784 	for (i = 0; i < vport->num_txq_grp; i++, k++) {
1785 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1786 
1787 		qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
1788 		qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1789 		qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1790 	}
1791 	if (vport->num_complq != (k - vport->num_txq)) {
1792 		err = -EINVAL;
1793 		goto error;
1794 	}
1795 
1796 setup_rx:
1797 	for (i = 0; i < vport->num_rxq_grp; i++) {
1798 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1799 
1800 		if (idpf_is_queue_model_split(vport->rxq_model))
1801 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1802 		else
1803 			num_rxq = rx_qgrp->singleq.num_rxq;
1804 
1805 		for (j = 0; j < num_rxq; j++, k++) {
1806 			if (idpf_is_queue_model_split(vport->rxq_model)) {
1807 				qc[k].start_queue_id =
1808 				cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
1809 				qc[k].type =
1810 				cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
1811 			} else {
1812 				qc[k].start_queue_id =
1813 				cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
1814 				qc[k].type =
1815 				cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
1816 			}
1817 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1818 		}
1819 	}
1820 	if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
1821 		err = -EINVAL;
1822 		goto error;
1823 	}
1824 
1825 	if (!idpf_is_queue_model_split(vport->rxq_model))
1826 		goto send_msg;
1827 
1828 	for (i = 0; i < vport->num_rxq_grp; i++) {
1829 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1830 
1831 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1832 			struct idpf_queue *q;
1833 
1834 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1835 			qc[k].type = cpu_to_le32(q->q_type);
1836 			qc[k].start_queue_id = cpu_to_le32(q->q_id);
1837 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1838 		}
1839 	}
1840 	if (vport->num_bufq != k - (vport->num_txq +
1841 				    vport->num_complq +
1842 				    vport->num_rxq)) {
1843 		err = -EINVAL;
1844 		goto error;
1845 	}
1846 
1847 send_msg:
1848 	/* Chunk up the queue info into multiple messages */
1849 	config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
1850 	chunk_sz = sizeof(struct virtchnl2_queue_chunk);
1851 
1852 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1853 			   num_q);
1854 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1855 
1856 	buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1857 	eq = kzalloc(buf_sz, GFP_KERNEL);
1858 	if (!eq) {
1859 		err = -ENOMEM;
1860 		goto error;
1861 	}
1862 
1863 	mutex_lock(&vport->vc_buf_lock);
1864 
1865 	for (i = 0, k = 0; i < num_msgs; i++) {
1866 		memset(eq, 0, buf_sz);
1867 		eq->vport_id = cpu_to_le32(vport->vport_id);
1868 		eq->chunks.num_chunks = cpu_to_le16(num_chunks);
1869 		qcs = &eq->chunks;
1870 		memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
1871 
1872 		err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
1873 		if (err)
1874 			goto mbx_error;
1875 
1876 		if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
1877 			err = idpf_wait_for_event(adapter, vport,
1878 						  IDPF_VC_ENA_QUEUES,
1879 						  IDPF_VC_ENA_QUEUES_ERR);
1880 		else
1881 			err = idpf_min_wait_for_event(adapter, vport,
1882 						      IDPF_VC_DIS_QUEUES,
1883 						      IDPF_VC_DIS_QUEUES_ERR);
1884 		if (err)
1885 			goto mbx_error;
1886 
1887 		k += num_chunks;
1888 		num_q -= num_chunks;
1889 		num_chunks = min(num_chunks, num_q);
1890 		/* Recalculate buffer size */
1891 		buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1892 	}
1893 
1894 mbx_error:
1895 	mutex_unlock(&vport->vc_buf_lock);
1896 	kfree(eq);
1897 error:
1898 	kfree(qc);
1899 
1900 	return err;
1901 }
1902 
1903 /**
1904  * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1905  * vector message
1906  * @vport: virtual port data structure
1907  * @map: true for map and false for unmap
1908  *
1909  * Send map or unmap queue vector virtchnl message.  Returns 0 on success,
1910  * negative on failure.
1911  */
1912 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
1913 {
1914 	struct idpf_adapter *adapter = vport->adapter;
1915 	struct virtchnl2_queue_vector_maps *vqvm;
1916 	struct virtchnl2_queue_vector *vqv;
1917 	u32 config_sz, chunk_sz, buf_sz;
1918 	u32 num_msgs, num_chunks, num_q;
1919 	int i, j, k = 0, err = 0;
1920 
1921 	num_q = vport->num_txq + vport->num_rxq;
1922 
1923 	buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
1924 	vqv = kzalloc(buf_sz, GFP_KERNEL);
1925 	if (!vqv)
1926 		return -ENOMEM;
1927 
1928 	for (i = 0; i < vport->num_txq_grp; i++) {
1929 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1930 
1931 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1932 			vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
1933 			vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1934 
1935 			if (idpf_is_queue_model_split(vport->txq_model)) {
1936 				vqv[k].vector_id =
1937 				cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
1938 				vqv[k].itr_idx =
1939 				cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
1940 			} else {
1941 				vqv[k].vector_id =
1942 				cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
1943 				vqv[k].itr_idx =
1944 				cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
1945 			}
1946 		}
1947 	}
1948 
1949 	if (vport->num_txq != k) {
1950 		err = -EINVAL;
1951 		goto error;
1952 	}
1953 
1954 	for (i = 0; i < vport->num_rxq_grp; i++) {
1955 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1956 		u16 num_rxq;
1957 
1958 		if (idpf_is_queue_model_split(vport->rxq_model))
1959 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1960 		else
1961 			num_rxq = rx_qgrp->singleq.num_rxq;
1962 
1963 		for (j = 0; j < num_rxq; j++, k++) {
1964 			struct idpf_queue *rxq;
1965 
1966 			if (idpf_is_queue_model_split(vport->rxq_model))
1967 				rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1968 			else
1969 				rxq = rx_qgrp->singleq.rxqs[j];
1970 
1971 			vqv[k].queue_type = cpu_to_le32(rxq->q_type);
1972 			vqv[k].queue_id = cpu_to_le32(rxq->q_id);
1973 			vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
1974 			vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
1975 		}
1976 	}
1977 
1978 	if (idpf_is_queue_model_split(vport->txq_model)) {
1979 		if (vport->num_rxq != k - vport->num_complq) {
1980 			err = -EINVAL;
1981 			goto error;
1982 		}
1983 	} else {
1984 		if (vport->num_rxq != k - vport->num_txq) {
1985 			err = -EINVAL;
1986 			goto error;
1987 		}
1988 	}
1989 
1990 	/* Chunk up the vector info into multiple messages */
1991 	config_sz = sizeof(struct virtchnl2_queue_vector_maps);
1992 	chunk_sz = sizeof(struct virtchnl2_queue_vector);
1993 
1994 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1995 			   num_q);
1996 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1997 
1998 	buf_sz = struct_size(vqvm, qv_maps, num_chunks);
1999 	vqvm = kzalloc(buf_sz, GFP_KERNEL);
2000 	if (!vqvm) {
2001 		err = -ENOMEM;
2002 		goto error;
2003 	}
2004 
2005 	mutex_lock(&vport->vc_buf_lock);
2006 
2007 	for (i = 0, k = 0; i < num_msgs; i++) {
2008 		memset(vqvm, 0, buf_sz);
2009 		vqvm->vport_id = cpu_to_le32(vport->vport_id);
2010 		vqvm->num_qv_maps = cpu_to_le16(num_chunks);
2011 		memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
2012 
2013 		if (map) {
2014 			err = idpf_send_mb_msg(adapter,
2015 					       VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
2016 					       buf_sz, (u8 *)vqvm);
2017 			if (!err)
2018 				err = idpf_wait_for_event(adapter, vport,
2019 							  IDPF_VC_MAP_IRQ,
2020 							  IDPF_VC_MAP_IRQ_ERR);
2021 		} else {
2022 			err = idpf_send_mb_msg(adapter,
2023 					       VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
2024 					       buf_sz, (u8 *)vqvm);
2025 			if (!err)
2026 				err =
2027 				idpf_min_wait_for_event(adapter, vport,
2028 							IDPF_VC_UNMAP_IRQ,
2029 							IDPF_VC_UNMAP_IRQ_ERR);
2030 		}
2031 		if (err)
2032 			goto mbx_error;
2033 
2034 		k += num_chunks;
2035 		num_q -= num_chunks;
2036 		num_chunks = min(num_chunks, num_q);
2037 		/* Recalculate buffer size */
2038 		buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2039 	}
2040 
2041 mbx_error:
2042 	mutex_unlock(&vport->vc_buf_lock);
2043 	kfree(vqvm);
2044 error:
2045 	kfree(vqv);
2046 
2047 	return err;
2048 }
2049 
2050 /**
2051  * idpf_send_enable_queues_msg - send enable queues virtchnl message
2052  * @vport: Virtual port private data structure
2053  *
2054  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
2055  * failure.
2056  */
2057 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
2058 {
2059 	return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
2060 }
2061 
2062 /**
2063  * idpf_send_disable_queues_msg - send disable queues virtchnl message
2064  * @vport: Virtual port private data structure
2065  *
2066  * Will send disable queues virtchnl message.  Returns 0 on success, negative
2067  * on failure.
2068  */
2069 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
2070 {
2071 	int err, i;
2072 
2073 	err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
2074 	if (err)
2075 		return err;
2076 
2077 	/* switch to poll mode as interrupts will be disabled after disable
2078 	 * queues virtchnl message is sent
2079 	 */
2080 	for (i = 0; i < vport->num_txq; i++)
2081 		set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
2082 
2083 	/* schedule the napi to receive all the marker packets */
2084 	for (i = 0; i < vport->num_q_vectors; i++)
2085 		napi_schedule(&vport->q_vectors[i].napi);
2086 
2087 	return idpf_wait_for_marker_event(vport);
2088 }
2089 
2090 /**
2091  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2092  * structure
2093  * @dchunks: Destination chunks to store data to
2094  * @schunks: Source chunks to copy data from
2095  * @num_chunks: number of chunks to copy
2096  */
2097 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2098 					     struct virtchnl2_queue_reg_chunk *schunks,
2099 					     u16 num_chunks)
2100 {
2101 	u16 i;
2102 
2103 	for (i = 0; i < num_chunks; i++) {
2104 		dchunks[i].type = schunks[i].type;
2105 		dchunks[i].start_queue_id = schunks[i].start_queue_id;
2106 		dchunks[i].num_queues = schunks[i].num_queues;
2107 	}
2108 }
2109 
2110 /**
2111  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2112  * @vport: Virtual port private data structure
2113  *
2114  * Will send delete queues virtchnl message. Return 0 on success, negative on
2115  * failure.
2116  */
2117 int idpf_send_delete_queues_msg(struct idpf_vport *vport)
2118 {
2119 	struct idpf_adapter *adapter = vport->adapter;
2120 	struct virtchnl2_create_vport *vport_params;
2121 	struct virtchnl2_queue_reg_chunks *chunks;
2122 	struct virtchnl2_del_ena_dis_queues *eq;
2123 	struct idpf_vport_config *vport_config;
2124 	u16 vport_idx = vport->idx;
2125 	int buf_size, err;
2126 	u16 num_chunks;
2127 
2128 	vport_config = adapter->vport_config[vport_idx];
2129 	if (vport_config->req_qs_chunks) {
2130 		struct virtchnl2_add_queues *vc_aq =
2131 			(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
2132 		chunks = &vc_aq->chunks;
2133 	} else {
2134 		vport_params = adapter->vport_params_recvd[vport_idx];
2135 		chunks = &vport_params->chunks;
2136 	}
2137 
2138 	num_chunks = le16_to_cpu(chunks->num_chunks);
2139 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2140 
2141 	eq = kzalloc(buf_size, GFP_KERNEL);
2142 	if (!eq)
2143 		return -ENOMEM;
2144 
2145 	eq->vport_id = cpu_to_le32(vport->vport_id);
2146 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2147 
2148 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
2149 					 num_chunks);
2150 
2151 	mutex_lock(&vport->vc_buf_lock);
2152 
2153 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
2154 			       buf_size, (u8 *)eq);
2155 	if (err)
2156 		goto rel_lock;
2157 
2158 	err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
2159 				      IDPF_VC_DEL_QUEUES_ERR);
2160 
2161 rel_lock:
2162 	mutex_unlock(&vport->vc_buf_lock);
2163 	kfree(eq);
2164 
2165 	return err;
2166 }
2167 
2168 /**
2169  * idpf_send_config_queues_msg - Send config queues virtchnl message
2170  * @vport: Virtual port private data structure
2171  *
2172  * Will send config queues virtchnl message. Returns 0 on success, negative on
2173  * failure.
2174  */
2175 int idpf_send_config_queues_msg(struct idpf_vport *vport)
2176 {
2177 	int err;
2178 
2179 	err = idpf_send_config_tx_queues_msg(vport);
2180 	if (err)
2181 		return err;
2182 
2183 	return idpf_send_config_rx_queues_msg(vport);
2184 }
2185 
2186 /**
2187  * idpf_send_add_queues_msg - Send virtchnl add queues message
2188  * @vport: Virtual port private data structure
2189  * @num_tx_q: number of transmit queues
2190  * @num_complq: number of transmit completion queues
2191  * @num_rx_q: number of receive queues
2192  * @num_rx_bufq: number of receive buffer queues
2193  *
2194  * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2195  * we should not change any fields within vport itself in this function.
2196  */
2197 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
2198 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
2199 {
2200 	struct idpf_adapter *adapter = vport->adapter;
2201 	struct idpf_vport_config *vport_config;
2202 	struct virtchnl2_add_queues aq = { };
2203 	struct virtchnl2_add_queues *vc_msg;
2204 	u16 vport_idx = vport->idx;
2205 	int size, err;
2206 
2207 	vport_config = adapter->vport_config[vport_idx];
2208 
2209 	aq.vport_id = cpu_to_le32(vport->vport_id);
2210 	aq.num_tx_q = cpu_to_le16(num_tx_q);
2211 	aq.num_tx_complq = cpu_to_le16(num_complq);
2212 	aq.num_rx_q = cpu_to_le16(num_rx_q);
2213 	aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
2214 
2215 	mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
2216 
2217 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
2218 			       sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
2219 	if (err)
2220 		goto rel_lock;
2221 
2222 	/* We want vport to be const to prevent incidental code changes making
2223 	 * changes to the vport config. We're making a special exception here
2224 	 * to discard const to use the virtchnl.
2225 	 */
2226 	err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
2227 				  IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
2228 	if (err)
2229 		goto rel_lock;
2230 
2231 	kfree(vport_config->req_qs_chunks);
2232 	vport_config->req_qs_chunks = NULL;
2233 
2234 	vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
2235 	/* compare vc_msg num queues with vport num queues */
2236 	if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
2237 	    le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
2238 	    le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
2239 	    le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
2240 		err = -EINVAL;
2241 		goto rel_lock;
2242 	}
2243 
2244 	size = struct_size(vc_msg, chunks.chunks,
2245 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2246 	vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
2247 	if (!vport_config->req_qs_chunks) {
2248 		err = -ENOMEM;
2249 		goto rel_lock;
2250 	}
2251 
2252 rel_lock:
2253 	mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
2254 
2255 	return err;
2256 }
2257 
2258 /**
2259  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2260  * @adapter: Driver specific private structure
2261  * @num_vectors: number of vectors to be allocated
2262  *
2263  * Returns 0 on success, negative on failure.
2264  */
2265 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2266 {
2267 	struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
2268 	struct virtchnl2_alloc_vectors ac = { };
2269 	u16 num_vchunks;
2270 	int size, err;
2271 
2272 	ac.num_vectors = cpu_to_le16(num_vectors);
2273 
2274 	mutex_lock(&adapter->vc_buf_lock);
2275 
2276 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
2277 			       sizeof(ac), (u8 *)&ac);
2278 	if (err)
2279 		goto rel_lock;
2280 
2281 	err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
2282 				  IDPF_VC_ALLOC_VECTORS_ERR);
2283 	if (err)
2284 		goto rel_lock;
2285 
2286 	rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
2287 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2288 
2289 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2290 	if (size > sizeof(adapter->vc_msg)) {
2291 		err = -EINVAL;
2292 		goto rel_lock;
2293 	}
2294 
2295 	kfree(adapter->req_vec_chunks);
2296 	adapter->req_vec_chunks = NULL;
2297 	adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
2298 	if (!adapter->req_vec_chunks) {
2299 		err = -ENOMEM;
2300 		goto rel_lock;
2301 	}
2302 
2303 	alloc_vec = adapter->req_vec_chunks;
2304 	if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
2305 		kfree(adapter->req_vec_chunks);
2306 		adapter->req_vec_chunks = NULL;
2307 		err = -EINVAL;
2308 	}
2309 
2310 rel_lock:
2311 	mutex_unlock(&adapter->vc_buf_lock);
2312 
2313 	return err;
2314 }
2315 
2316 /**
2317  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2318  * @adapter: Driver specific private structure
2319  *
2320  * Returns 0 on success, negative on failure.
2321  */
2322 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2323 {
2324 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2325 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2326 	int buf_size, err;
2327 
2328 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2329 
2330 	mutex_lock(&adapter->vc_buf_lock);
2331 
2332 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
2333 			       (u8 *)vcs);
2334 	if (err)
2335 		goto rel_lock;
2336 
2337 	err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
2338 				      IDPF_VC_DEALLOC_VECTORS_ERR);
2339 	if (err)
2340 		goto rel_lock;
2341 
2342 	kfree(adapter->req_vec_chunks);
2343 	adapter->req_vec_chunks = NULL;
2344 
2345 rel_lock:
2346 	mutex_unlock(&adapter->vc_buf_lock);
2347 
2348 	return err;
2349 }
2350 
2351 /**
2352  * idpf_get_max_vfs - Get max number of vfs supported
2353  * @adapter: Driver specific private structure
2354  *
2355  * Returns max number of VFs
2356  */
2357 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2358 {
2359 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2360 }
2361 
2362 /**
2363  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2364  * @adapter: Driver specific private structure
2365  * @num_vfs: number of virtual functions to be created
2366  *
2367  * Returns 0 on success, negative on failure.
2368  */
2369 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2370 {
2371 	struct virtchnl2_sriov_vfs_info svi = { };
2372 	int err;
2373 
2374 	svi.num_vfs = cpu_to_le16(num_vfs);
2375 
2376 	mutex_lock(&adapter->vc_buf_lock);
2377 
2378 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
2379 			       sizeof(svi), (u8 *)&svi);
2380 	if (err)
2381 		goto rel_lock;
2382 
2383 	err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
2384 				  IDPF_VC_SET_SRIOV_VFS_ERR);
2385 
2386 rel_lock:
2387 	mutex_unlock(&adapter->vc_buf_lock);
2388 
2389 	return err;
2390 }
2391 
2392 /**
2393  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2394  * @vport: vport to get stats for
2395  *
2396  * Returns 0 on success, negative on failure.
2397  */
2398 int idpf_send_get_stats_msg(struct idpf_vport *vport)
2399 {
2400 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2401 	struct rtnl_link_stats64 *netstats = &np->netstats;
2402 	struct idpf_adapter *adapter = vport->adapter;
2403 	struct virtchnl2_vport_stats stats_msg = { };
2404 	struct virtchnl2_vport_stats *stats;
2405 	int err;
2406 
2407 	/* Don't send get_stats message if the link is down */
2408 	if (np->state <= __IDPF_VPORT_DOWN)
2409 		return 0;
2410 
2411 	stats_msg.vport_id = cpu_to_le32(vport->vport_id);
2412 
2413 	mutex_lock(&vport->vc_buf_lock);
2414 
2415 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
2416 			       sizeof(struct virtchnl2_vport_stats),
2417 			       (u8 *)&stats_msg);
2418 	if (err)
2419 		goto rel_lock;
2420 
2421 	err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
2422 				  IDPF_VC_GET_STATS_ERR);
2423 	if (err)
2424 		goto rel_lock;
2425 
2426 	stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
2427 
2428 	spin_lock_bh(&np->stats_lock);
2429 
2430 	netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
2431 			       le64_to_cpu(stats->rx_multicast) +
2432 			       le64_to_cpu(stats->rx_broadcast);
2433 	netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
2434 	netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
2435 	netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
2436 	netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
2437 
2438 	netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
2439 			       le64_to_cpu(stats->tx_multicast) +
2440 			       le64_to_cpu(stats->tx_broadcast);
2441 	netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
2442 	netstats->tx_errors = le64_to_cpu(stats->tx_errors);
2443 	netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
2444 
2445 	vport->port_stats.vport_stats = *stats;
2446 
2447 	spin_unlock_bh(&np->stats_lock);
2448 
2449 rel_lock:
2450 	mutex_unlock(&vport->vc_buf_lock);
2451 
2452 	return err;
2453 }
2454 
2455 /**
2456  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2457  * @vport: virtual port data structure
2458  * @get: flag to set or get rss look up table
2459  *
2460  * Returns 0 on success, negative on failure.
2461  */
2462 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
2463 {
2464 	struct idpf_adapter *adapter = vport->adapter;
2465 	struct virtchnl2_rss_lut *recv_rl;
2466 	struct idpf_rss_data *rss_data;
2467 	struct virtchnl2_rss_lut *rl;
2468 	int buf_size, lut_buf_size;
2469 	int i, err;
2470 
2471 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
2472 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2473 	rl = kzalloc(buf_size, GFP_KERNEL);
2474 	if (!rl)
2475 		return -ENOMEM;
2476 
2477 	rl->vport_id = cpu_to_le32(vport->vport_id);
2478 	mutex_lock(&vport->vc_buf_lock);
2479 
2480 	if (!get) {
2481 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2482 		for (i = 0; i < rss_data->rss_lut_size; i++)
2483 			rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
2484 
2485 		err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
2486 				       buf_size, (u8 *)rl);
2487 		if (err)
2488 			goto free_mem;
2489 
2490 		err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
2491 					  IDPF_VC_SET_RSS_LUT_ERR);
2492 
2493 		goto free_mem;
2494 	}
2495 
2496 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
2497 			       buf_size, (u8 *)rl);
2498 	if (err)
2499 		goto free_mem;
2500 
2501 	err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
2502 				  IDPF_VC_GET_RSS_LUT_ERR);
2503 	if (err)
2504 		goto free_mem;
2505 
2506 	recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
2507 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2508 		goto do_memcpy;
2509 
2510 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2511 	kfree(rss_data->rss_lut);
2512 
2513 	lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
2514 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2515 	if (!rss_data->rss_lut) {
2516 		rss_data->rss_lut_size = 0;
2517 		err = -ENOMEM;
2518 		goto free_mem;
2519 	}
2520 
2521 do_memcpy:
2522 	memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
2523 free_mem:
2524 	mutex_unlock(&vport->vc_buf_lock);
2525 	kfree(rl);
2526 
2527 	return err;
2528 }
2529 
2530 /**
2531  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2532  * @vport: virtual port data structure
2533  * @get: flag to set or get rss look up table
2534  *
2535  * Returns 0 on success, negative on failure
2536  */
2537 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
2538 {
2539 	struct idpf_adapter *adapter = vport->adapter;
2540 	struct virtchnl2_rss_key *recv_rk;
2541 	struct idpf_rss_data *rss_data;
2542 	struct virtchnl2_rss_key *rk;
2543 	int i, buf_size, err;
2544 
2545 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
2546 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2547 	rk = kzalloc(buf_size, GFP_KERNEL);
2548 	if (!rk)
2549 		return -ENOMEM;
2550 
2551 	rk->vport_id = cpu_to_le32(vport->vport_id);
2552 	mutex_lock(&vport->vc_buf_lock);
2553 
2554 	if (get) {
2555 		err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
2556 				       buf_size, (u8 *)rk);
2557 		if (err)
2558 			goto error;
2559 
2560 		err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
2561 					  IDPF_VC_GET_RSS_KEY_ERR);
2562 		if (err)
2563 			goto error;
2564 
2565 		recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
2566 		if (rss_data->rss_key_size !=
2567 		    le16_to_cpu(recv_rk->key_len)) {
2568 			rss_data->rss_key_size =
2569 				min_t(u16, NETDEV_RSS_KEY_LEN,
2570 				      le16_to_cpu(recv_rk->key_len));
2571 			kfree(rss_data->rss_key);
2572 			rss_data->rss_key = kzalloc(rss_data->rss_key_size,
2573 						    GFP_KERNEL);
2574 			if (!rss_data->rss_key) {
2575 				rss_data->rss_key_size = 0;
2576 				err = -ENOMEM;
2577 				goto error;
2578 			}
2579 		}
2580 		memcpy(rss_data->rss_key, recv_rk->key_flex,
2581 		       rss_data->rss_key_size);
2582 	} else {
2583 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2584 		for (i = 0; i < rss_data->rss_key_size; i++)
2585 			rk->key_flex[i] = rss_data->rss_key[i];
2586 
2587 		err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
2588 				       buf_size, (u8 *)rk);
2589 		if (err)
2590 			goto error;
2591 
2592 		err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
2593 					  IDPF_VC_SET_RSS_KEY_ERR);
2594 	}
2595 
2596 error:
2597 	mutex_unlock(&vport->vc_buf_lock);
2598 	kfree(rk);
2599 
2600 	return err;
2601 }
2602 
2603 /**
2604  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2605  * @ptype: ptype lookup table
2606  * @pstate: state machine for ptype lookup table
2607  * @ipv4: ipv4 or ipv6
2608  * @frag: fragmentation allowed
2609  *
2610  */
2611 static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
2612 				   struct idpf_ptype_state *pstate,
2613 				   bool ipv4, bool frag)
2614 {
2615 	if (!pstate->outer_ip || !pstate->outer_frag) {
2616 		ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
2617 		pstate->outer_ip = true;
2618 
2619 		if (ipv4)
2620 			ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
2621 		else
2622 			ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
2623 
2624 		if (frag) {
2625 			ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
2626 			pstate->outer_frag = true;
2627 		}
2628 	} else {
2629 		ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
2630 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
2631 
2632 		if (ipv4)
2633 			ptype->tunnel_end_prot =
2634 					IDPF_RX_PTYPE_TUNNEL_END_IPV4;
2635 		else
2636 			ptype->tunnel_end_prot =
2637 					IDPF_RX_PTYPE_TUNNEL_END_IPV6;
2638 
2639 		if (frag)
2640 			ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
2641 	}
2642 }
2643 
2644 /**
2645  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2646  * @vport: virtual port data structure
2647  *
2648  * Returns 0 on success, negative on failure.
2649  */
2650 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
2651 {
2652 	struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
2653 	struct virtchnl2_get_ptype_info get_ptype_info;
2654 	int max_ptype, ptypes_recvd = 0, ptype_offset;
2655 	struct idpf_adapter *adapter = vport->adapter;
2656 	struct virtchnl2_get_ptype_info *ptype_info;
2657 	u16 next_ptype_id = 0;
2658 	int err = 0, i, j, k;
2659 
2660 	if (idpf_is_queue_model_split(vport->rxq_model))
2661 		max_ptype = IDPF_RX_MAX_PTYPE;
2662 	else
2663 		max_ptype = IDPF_RX_MAX_BASE_PTYPE;
2664 
2665 	memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
2666 
2667 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2668 	if (!ptype_info)
2669 		return -ENOMEM;
2670 
2671 	mutex_lock(&adapter->vc_buf_lock);
2672 
2673 	while (next_ptype_id < max_ptype) {
2674 		get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
2675 
2676 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
2677 			get_ptype_info.num_ptypes =
2678 				cpu_to_le16(max_ptype - next_ptype_id);
2679 		else
2680 			get_ptype_info.num_ptypes =
2681 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
2682 
2683 		err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
2684 				       sizeof(struct virtchnl2_get_ptype_info),
2685 				       (u8 *)&get_ptype_info);
2686 		if (err)
2687 			goto vc_buf_unlock;
2688 
2689 		err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
2690 					  IDPF_VC_GET_PTYPE_INFO_ERR);
2691 		if (err)
2692 			goto vc_buf_unlock;
2693 
2694 		memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
2695 
2696 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
2697 		if (ptypes_recvd > max_ptype) {
2698 			err = -EINVAL;
2699 			goto vc_buf_unlock;
2700 		}
2701 
2702 		next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
2703 				le16_to_cpu(get_ptype_info.num_ptypes);
2704 
2705 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
2706 
2707 		for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
2708 			struct idpf_ptype_state pstate = { };
2709 			struct virtchnl2_ptype *ptype;
2710 			u16 id;
2711 
2712 			ptype = (struct virtchnl2_ptype *)
2713 					((u8 *)ptype_info + ptype_offset);
2714 
2715 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
2716 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
2717 				err = -EINVAL;
2718 				goto vc_buf_unlock;
2719 			}
2720 
2721 			/* 0xFFFF indicates end of ptypes */
2722 			if (le16_to_cpu(ptype->ptype_id_10) ==
2723 							IDPF_INVALID_PTYPE_ID) {
2724 				err = 0;
2725 				goto vc_buf_unlock;
2726 			}
2727 
2728 			if (idpf_is_queue_model_split(vport->rxq_model))
2729 				k = le16_to_cpu(ptype->ptype_id_10);
2730 			else
2731 				k = ptype->ptype_id_8;
2732 
2733 			if (ptype->proto_id_count)
2734 				ptype_lkup[k].known = 1;
2735 
2736 			for (j = 0; j < ptype->proto_id_count; j++) {
2737 				id = le16_to_cpu(ptype->proto_id[j]);
2738 				switch (id) {
2739 				case VIRTCHNL2_PROTO_HDR_GRE:
2740 					if (pstate.tunnel_state ==
2741 							IDPF_PTYPE_TUNNEL_IP) {
2742 						ptype_lkup[k].tunnel_type =
2743 						IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
2744 						pstate.tunnel_state |=
2745 						IDPF_PTYPE_TUNNEL_IP_GRENAT;
2746 					}
2747 					break;
2748 				case VIRTCHNL2_PROTO_HDR_MAC:
2749 					ptype_lkup[k].outer_ip =
2750 						IDPF_RX_PTYPE_OUTER_L2;
2751 					if (pstate.tunnel_state ==
2752 							IDPF_TUN_IP_GRE) {
2753 						ptype_lkup[k].tunnel_type =
2754 						IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
2755 						pstate.tunnel_state |=
2756 						IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
2757 					}
2758 					break;
2759 				case VIRTCHNL2_PROTO_HDR_IPV4:
2760 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2761 							       &pstate, true,
2762 							       false);
2763 					break;
2764 				case VIRTCHNL2_PROTO_HDR_IPV6:
2765 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2766 							       &pstate, false,
2767 							       false);
2768 					break;
2769 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
2770 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2771 							       &pstate, true,
2772 							       true);
2773 					break;
2774 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
2775 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2776 							       &pstate, false,
2777 							       true);
2778 					break;
2779 				case VIRTCHNL2_PROTO_HDR_UDP:
2780 					ptype_lkup[k].inner_prot =
2781 					IDPF_RX_PTYPE_INNER_PROT_UDP;
2782 					break;
2783 				case VIRTCHNL2_PROTO_HDR_TCP:
2784 					ptype_lkup[k].inner_prot =
2785 					IDPF_RX_PTYPE_INNER_PROT_TCP;
2786 					break;
2787 				case VIRTCHNL2_PROTO_HDR_SCTP:
2788 					ptype_lkup[k].inner_prot =
2789 					IDPF_RX_PTYPE_INNER_PROT_SCTP;
2790 					break;
2791 				case VIRTCHNL2_PROTO_HDR_ICMP:
2792 					ptype_lkup[k].inner_prot =
2793 					IDPF_RX_PTYPE_INNER_PROT_ICMP;
2794 					break;
2795 				case VIRTCHNL2_PROTO_HDR_PAY:
2796 					ptype_lkup[k].payload_layer =
2797 						IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
2798 					break;
2799 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
2800 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
2801 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
2802 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
2803 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
2804 				case VIRTCHNL2_PROTO_HDR_SVLAN:
2805 				case VIRTCHNL2_PROTO_HDR_CVLAN:
2806 				case VIRTCHNL2_PROTO_HDR_MPLS:
2807 				case VIRTCHNL2_PROTO_HDR_MMPLS:
2808 				case VIRTCHNL2_PROTO_HDR_PTP:
2809 				case VIRTCHNL2_PROTO_HDR_CTRL:
2810 				case VIRTCHNL2_PROTO_HDR_LLDP:
2811 				case VIRTCHNL2_PROTO_HDR_ARP:
2812 				case VIRTCHNL2_PROTO_HDR_ECP:
2813 				case VIRTCHNL2_PROTO_HDR_EAPOL:
2814 				case VIRTCHNL2_PROTO_HDR_PPPOD:
2815 				case VIRTCHNL2_PROTO_HDR_PPPOE:
2816 				case VIRTCHNL2_PROTO_HDR_IGMP:
2817 				case VIRTCHNL2_PROTO_HDR_AH:
2818 				case VIRTCHNL2_PROTO_HDR_ESP:
2819 				case VIRTCHNL2_PROTO_HDR_IKE:
2820 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
2821 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
2822 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
2823 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
2824 				case VIRTCHNL2_PROTO_HDR_GTP:
2825 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
2826 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
2827 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
2828 				case VIRTCHNL2_PROTO_HDR_GTPU:
2829 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
2830 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
2831 				case VIRTCHNL2_PROTO_HDR_ECPRI:
2832 				case VIRTCHNL2_PROTO_HDR_VRRP:
2833 				case VIRTCHNL2_PROTO_HDR_OSPF:
2834 				case VIRTCHNL2_PROTO_HDR_TUN:
2835 				case VIRTCHNL2_PROTO_HDR_NVGRE:
2836 				case VIRTCHNL2_PROTO_HDR_VXLAN:
2837 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
2838 				case VIRTCHNL2_PROTO_HDR_GENEVE:
2839 				case VIRTCHNL2_PROTO_HDR_NSH:
2840 				case VIRTCHNL2_PROTO_HDR_QUIC:
2841 				case VIRTCHNL2_PROTO_HDR_PFCP:
2842 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
2843 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
2844 				case VIRTCHNL2_PROTO_HDR_RTP:
2845 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
2846 					break;
2847 				default:
2848 					break;
2849 				}
2850 			}
2851 		}
2852 	}
2853 
2854 vc_buf_unlock:
2855 	mutex_unlock(&adapter->vc_buf_lock);
2856 	kfree(ptype_info);
2857 
2858 	return err;
2859 }
2860 
2861 /**
2862  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2863  *				    message
2864  * @vport: virtual port data structure
2865  *
2866  * Returns 0 on success, negative on failure.
2867  */
2868 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
2869 {
2870 	struct virtchnl2_loopback loopback;
2871 	int err;
2872 
2873 	loopback.vport_id = cpu_to_le32(vport->vport_id);
2874 	loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2875 
2876 	mutex_lock(&vport->vc_buf_lock);
2877 
2878 	err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
2879 			       sizeof(loopback), (u8 *)&loopback);
2880 	if (err)
2881 		goto rel_lock;
2882 
2883 	err = idpf_wait_for_event(vport->adapter, vport,
2884 				  IDPF_VC_LOOPBACK_STATE,
2885 				  IDPF_VC_LOOPBACK_STATE_ERR);
2886 
2887 rel_lock:
2888 	mutex_unlock(&vport->vc_buf_lock);
2889 
2890 	return err;
2891 }
2892 
2893 /**
2894  * idpf_find_ctlq - Given a type and id, find ctlq info
2895  * @hw: hardware struct
2896  * @type: type of ctrlq to find
2897  * @id: ctlq id to find
2898  *
2899  * Returns pointer to found ctlq info struct, NULL otherwise.
2900  */
2901 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
2902 					     enum idpf_ctlq_type type, int id)
2903 {
2904 	struct idpf_ctlq_info *cq, *tmp;
2905 
2906 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
2907 		if (cq->q_id == id && cq->cq_type == type)
2908 			return cq;
2909 
2910 	return NULL;
2911 }
2912 
2913 /**
2914  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2915  * @adapter: adapter info struct
2916  *
2917  * Returns 0 on success, negative otherwise
2918  */
2919 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
2920 {
2921 	struct idpf_ctlq_create_info ctlq_info[] = {
2922 		{
2923 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
2924 			.id = IDPF_DFLT_MBX_ID,
2925 			.len = IDPF_DFLT_MBX_Q_LEN,
2926 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2927 		},
2928 		{
2929 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
2930 			.id = IDPF_DFLT_MBX_ID,
2931 			.len = IDPF_DFLT_MBX_Q_LEN,
2932 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2933 		}
2934 	};
2935 	struct idpf_hw *hw = &adapter->hw;
2936 	int err;
2937 
2938 	adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
2939 
2940 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
2941 	if (err)
2942 		return err;
2943 
2944 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
2945 				 IDPF_DFLT_MBX_ID);
2946 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
2947 				 IDPF_DFLT_MBX_ID);
2948 
2949 	if (!hw->asq || !hw->arq) {
2950 		idpf_ctlq_deinit(hw);
2951 
2952 		return -ENOENT;
2953 	}
2954 
2955 	adapter->state = __IDPF_STARTUP;
2956 
2957 	return 0;
2958 }
2959 
2960 /**
2961  * idpf_deinit_dflt_mbx - Free up ctlqs setup
2962  * @adapter: Driver specific private data structure
2963  */
2964 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
2965 {
2966 	if (adapter->hw.arq && adapter->hw.asq) {
2967 		idpf_mb_clean(adapter);
2968 		idpf_ctlq_deinit(&adapter->hw);
2969 	}
2970 	adapter->hw.arq = NULL;
2971 	adapter->hw.asq = NULL;
2972 }
2973 
2974 /**
2975  * idpf_vport_params_buf_rel - Release memory for MailBox resources
2976  * @adapter: Driver specific private data structure
2977  *
2978  * Will release memory to hold the vport parameters received on MailBox
2979  */
2980 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
2981 {
2982 	kfree(adapter->vport_params_recvd);
2983 	adapter->vport_params_recvd = NULL;
2984 	kfree(adapter->vport_params_reqd);
2985 	adapter->vport_params_reqd = NULL;
2986 	kfree(adapter->vport_ids);
2987 	adapter->vport_ids = NULL;
2988 }
2989 
2990 /**
2991  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
2992  * @adapter: Driver specific private data structure
2993  *
2994  * Will alloc memory to hold the vport parameters received on MailBox
2995  */
2996 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
2997 {
2998 	u16 num_max_vports = idpf_get_max_vports(adapter);
2999 
3000 	adapter->vport_params_reqd = kcalloc(num_max_vports,
3001 					     sizeof(*adapter->vport_params_reqd),
3002 					     GFP_KERNEL);
3003 	if (!adapter->vport_params_reqd)
3004 		return -ENOMEM;
3005 
3006 	adapter->vport_params_recvd = kcalloc(num_max_vports,
3007 					      sizeof(*adapter->vport_params_recvd),
3008 					      GFP_KERNEL);
3009 	if (!adapter->vport_params_recvd)
3010 		goto err_mem;
3011 
3012 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
3013 	if (!adapter->vport_ids)
3014 		goto err_mem;
3015 
3016 	if (adapter->vport_config)
3017 		return 0;
3018 
3019 	adapter->vport_config = kcalloc(num_max_vports,
3020 					sizeof(*adapter->vport_config),
3021 					GFP_KERNEL);
3022 	if (!adapter->vport_config)
3023 		goto err_mem;
3024 
3025 	return 0;
3026 
3027 err_mem:
3028 	idpf_vport_params_buf_rel(adapter);
3029 
3030 	return -ENOMEM;
3031 }
3032 
3033 /**
3034  * idpf_vc_core_init - Initialize state machine and get driver specific
3035  * resources
3036  * @adapter: Driver specific private structure
3037  *
3038  * This function will initialize the state machine and request all necessary
3039  * resources required by the device driver. Once the state machine is
3040  * initialized, allocate memory to store vport specific information and also
3041  * requests required interrupts.
3042  *
3043  * Returns 0 on success, -EAGAIN function will get called again,
3044  * otherwise negative on failure.
3045  */
3046 int idpf_vc_core_init(struct idpf_adapter *adapter)
3047 {
3048 	int task_delay = 30;
3049 	u16 num_max_vports;
3050 	int err = 0;
3051 
3052 	while (adapter->state != __IDPF_INIT_SW) {
3053 		switch (adapter->state) {
3054 		case __IDPF_STARTUP:
3055 			if (idpf_send_ver_msg(adapter))
3056 				goto init_failed;
3057 			adapter->state = __IDPF_VER_CHECK;
3058 			goto restart;
3059 		case __IDPF_VER_CHECK:
3060 			err = idpf_recv_ver_msg(adapter);
3061 			if (err == -EIO) {
3062 				return err;
3063 			} else if (err == -EAGAIN) {
3064 				adapter->state = __IDPF_STARTUP;
3065 				goto restart;
3066 			} else if (err) {
3067 				goto init_failed;
3068 			}
3069 			if (idpf_send_get_caps_msg(adapter))
3070 				goto init_failed;
3071 			adapter->state = __IDPF_GET_CAPS;
3072 			goto restart;
3073 		case __IDPF_GET_CAPS:
3074 			if (idpf_recv_get_caps_msg(adapter))
3075 				goto init_failed;
3076 			adapter->state = __IDPF_INIT_SW;
3077 			break;
3078 		default:
3079 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
3080 				adapter->state);
3081 			goto init_failed;
3082 		}
3083 		break;
3084 restart:
3085 		/* Give enough time before proceeding further with
3086 		 * state machine
3087 		 */
3088 		msleep(task_delay);
3089 	}
3090 
3091 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
3092 	num_max_vports = idpf_get_max_vports(adapter);
3093 	adapter->max_vports = num_max_vports;
3094 	adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
3095 				  GFP_KERNEL);
3096 	if (!adapter->vports)
3097 		return -ENOMEM;
3098 
3099 	if (!adapter->netdevs) {
3100 		adapter->netdevs = kcalloc(num_max_vports,
3101 					   sizeof(struct net_device *),
3102 					   GFP_KERNEL);
3103 		if (!adapter->netdevs) {
3104 			err = -ENOMEM;
3105 			goto err_netdev_alloc;
3106 		}
3107 	}
3108 
3109 	err = idpf_vport_params_buf_alloc(adapter);
3110 	if (err) {
3111 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3112 			err);
3113 		goto err_netdev_alloc;
3114 	}
3115 
3116 	/* Start the mailbox task before requesting vectors. This will ensure
3117 	 * vector information response from mailbox is handled
3118 	 */
3119 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3120 
3121 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3122 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3123 
3124 	err = idpf_intr_req(adapter);
3125 	if (err) {
3126 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3127 			err);
3128 		goto err_intr_req;
3129 	}
3130 
3131 	idpf_init_avail_queues(adapter);
3132 
3133 	/* Skew the delay for init tasks for each function based on fn number
3134 	 * to prevent every function from making the same call simultaneously.
3135 	 */
3136 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3137 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3138 
3139 	goto no_err;
3140 
3141 err_intr_req:
3142 	cancel_delayed_work_sync(&adapter->serv_task);
3143 	idpf_vport_params_buf_rel(adapter);
3144 err_netdev_alloc:
3145 	kfree(adapter->vports);
3146 	adapter->vports = NULL;
3147 no_err:
3148 	return err;
3149 
3150 init_failed:
3151 	/* Don't retry if we're trying to go down, just bail. */
3152 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3153 		return err;
3154 
3155 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3156 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3157 
3158 		return -EFAULT;
3159 	}
3160 	/* If it reached here, it is possible that mailbox queue initialization
3161 	 * register writes might not have taken effect. Retry to initialize
3162 	 * the mailbox again
3163 	 */
3164 	adapter->state = __IDPF_STARTUP;
3165 	idpf_deinit_dflt_mbx(adapter);
3166 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3167 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3168 			   msecs_to_jiffies(task_delay));
3169 
3170 	return -EAGAIN;
3171 }
3172 
3173 /**
3174  * idpf_vc_core_deinit - Device deinit routine
3175  * @adapter: Driver specific private structure
3176  *
3177  */
3178 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3179 {
3180 	int i;
3181 
3182 	idpf_deinit_task(adapter);
3183 	idpf_intr_rel(adapter);
3184 	/* Set all bits as we dont know on which vc_state the vhnl_wq is
3185 	 * waiting on and wakeup the virtchnl workqueue even if it is waiting
3186 	 * for the response as we are going down
3187 	 */
3188 	for (i = 0; i < IDPF_VC_NBITS; i++)
3189 		set_bit(i, adapter->vc_state);
3190 	wake_up(&adapter->vchnl_wq);
3191 
3192 	cancel_delayed_work_sync(&adapter->serv_task);
3193 	cancel_delayed_work_sync(&adapter->mbx_task);
3194 
3195 	idpf_vport_params_buf_rel(adapter);
3196 
3197 	/* Clear all the bits */
3198 	for (i = 0; i < IDPF_VC_NBITS; i++)
3199 		clear_bit(i, adapter->vc_state);
3200 
3201 	kfree(adapter->vports);
3202 	adapter->vports = NULL;
3203 }
3204 
3205 /**
3206  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3207  * @vport: virtual port data struct
3208  *
3209  * This function requests the vector information required for the vport and
3210  * stores the vector indexes received from the 'global vector distribution'
3211  * in the vport's queue vectors array.
3212  *
3213  * Return 0 on success, error on failure
3214  */
3215 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
3216 {
3217 	struct idpf_vector_info vec_info;
3218 	int num_alloc_vecs;
3219 
3220 	vec_info.num_curr_vecs = vport->num_q_vectors;
3221 	vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
3222 	vec_info.default_vport = vport->default_vport;
3223 	vec_info.index = vport->idx;
3224 
3225 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3226 						     vport->q_vector_idxs,
3227 						     &vec_info);
3228 	if (num_alloc_vecs <= 0) {
3229 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3230 			num_alloc_vecs);
3231 		return -EINVAL;
3232 	}
3233 
3234 	vport->num_q_vectors = num_alloc_vecs;
3235 
3236 	return 0;
3237 }
3238 
3239 /**
3240  * idpf_vport_init - Initialize virtual port
3241  * @vport: virtual port to be initialized
3242  * @max_q: vport max queue info
3243  *
3244  * Will initialize vport with the info received through MB earlier
3245  */
3246 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3247 {
3248 	struct idpf_adapter *adapter = vport->adapter;
3249 	struct virtchnl2_create_vport *vport_msg;
3250 	struct idpf_vport_config *vport_config;
3251 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3252 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3253 	struct idpf_rss_data *rss_data;
3254 	u16 idx = vport->idx;
3255 
3256 	vport_config = adapter->vport_config[idx];
3257 	rss_data = &vport_config->user_config.rss_data;
3258 	vport_msg = adapter->vport_params_recvd[idx];
3259 
3260 	vport_config->max_q.max_txq = max_q->max_txq;
3261 	vport_config->max_q.max_rxq = max_q->max_rxq;
3262 	vport_config->max_q.max_complq = max_q->max_complq;
3263 	vport_config->max_q.max_bufq = max_q->max_bufq;
3264 
3265 	vport->txq_model = le16_to_cpu(vport_msg->txq_model);
3266 	vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3267 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3268 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3269 
3270 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3271 				       le16_to_cpu(vport_msg->rss_key_size));
3272 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3273 
3274 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3275 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
3276 
3277 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3278 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3279 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3280 
3281 	idpf_vport_init_num_qs(vport, vport_msg);
3282 	idpf_vport_calc_num_q_desc(vport);
3283 	idpf_vport_calc_num_q_groups(vport);
3284 	idpf_vport_alloc_vec_indexes(vport);
3285 
3286 	vport->crc_enable = adapter->crc_enable;
3287 }
3288 
3289 /**
3290  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3291  * @adapter: adapter structure to get the mailbox vector id
3292  * @vecids: Array of vector ids
3293  * @num_vecids: number of vector ids
3294  * @chunks: vector ids received over mailbox
3295  *
3296  * Will initialize the mailbox vector id which is received from the
3297  * get capabilities and data queue vector ids with ids received as
3298  * mailbox parameters.
3299  * Returns number of ids filled
3300  */
3301 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3302 		     u16 *vecids, int num_vecids,
3303 		     struct virtchnl2_vector_chunks *chunks)
3304 {
3305 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3306 	int num_vecid_filled = 0;
3307 	int i, j;
3308 
3309 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3310 	num_vecid_filled++;
3311 
3312 	for (j = 0; j < num_chunks; j++) {
3313 		struct virtchnl2_vector_chunk *chunk;
3314 		u16 start_vecid, num_vec;
3315 
3316 		chunk = &chunks->vchunks[j];
3317 		num_vec = le16_to_cpu(chunk->num_vectors);
3318 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3319 
3320 		for (i = 0; i < num_vec; i++) {
3321 			if ((num_vecid_filled + i) < num_vecids) {
3322 				vecids[num_vecid_filled + i] = start_vecid;
3323 				start_vecid++;
3324 			} else {
3325 				break;
3326 			}
3327 		}
3328 		num_vecid_filled = num_vecid_filled + i;
3329 	}
3330 
3331 	return num_vecid_filled;
3332 }
3333 
3334 /**
3335  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3336  * @qids: Array of queue ids
3337  * @num_qids: number of queue ids
3338  * @q_type: queue model
3339  * @chunks: queue ids received over mailbox
3340  *
3341  * Will initialize all queue ids with ids received as mailbox parameters
3342  * Returns number of ids filled
3343  */
3344 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3345 				    struct virtchnl2_queue_reg_chunks *chunks)
3346 {
3347 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
3348 	u32 num_q_id_filled = 0, i;
3349 	u32 start_q_id, num_q;
3350 
3351 	while (num_chunks--) {
3352 		struct virtchnl2_queue_reg_chunk *chunk;
3353 
3354 		chunk = &chunks->chunks[num_chunks];
3355 		if (le32_to_cpu(chunk->type) != q_type)
3356 			continue;
3357 
3358 		num_q = le32_to_cpu(chunk->num_queues);
3359 		start_q_id = le32_to_cpu(chunk->start_queue_id);
3360 
3361 		for (i = 0; i < num_q; i++) {
3362 			if ((num_q_id_filled + i) < num_qids) {
3363 				qids[num_q_id_filled + i] = start_q_id;
3364 				start_q_id++;
3365 			} else {
3366 				break;
3367 			}
3368 		}
3369 		num_q_id_filled = num_q_id_filled + i;
3370 	}
3371 
3372 	return num_q_id_filled;
3373 }
3374 
3375 /**
3376  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3377  * @vport: virtual port for which the queues ids are initialized
3378  * @qids: queue ids
3379  * @num_qids: number of queue ids
3380  * @q_type: type of queue
3381  *
3382  * Will initialize all queue ids with ids received as mailbox
3383  * parameters. Returns number of queue ids initialized.
3384  */
3385 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3386 				       const u32 *qids,
3387 				       int num_qids,
3388 				       u32 q_type)
3389 {
3390 	struct idpf_queue *q;
3391 	int i, j, k = 0;
3392 
3393 	switch (q_type) {
3394 	case VIRTCHNL2_QUEUE_TYPE_TX:
3395 		for (i = 0; i < vport->num_txq_grp; i++) {
3396 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3397 
3398 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
3399 				tx_qgrp->txqs[j]->q_id = qids[k];
3400 				tx_qgrp->txqs[j]->q_type =
3401 					VIRTCHNL2_QUEUE_TYPE_TX;
3402 			}
3403 		}
3404 		break;
3405 	case VIRTCHNL2_QUEUE_TYPE_RX:
3406 		for (i = 0; i < vport->num_rxq_grp; i++) {
3407 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3408 			u16 num_rxq;
3409 
3410 			if (idpf_is_queue_model_split(vport->rxq_model))
3411 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3412 			else
3413 				num_rxq = rx_qgrp->singleq.num_rxq;
3414 
3415 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3416 				if (idpf_is_queue_model_split(vport->rxq_model))
3417 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3418 				else
3419 					q = rx_qgrp->singleq.rxqs[j];
3420 				q->q_id = qids[k];
3421 				q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
3422 			}
3423 		}
3424 		break;
3425 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3426 		for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
3427 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3428 
3429 			tx_qgrp->complq->q_id = qids[k];
3430 			tx_qgrp->complq->q_type =
3431 				VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3432 		}
3433 		break;
3434 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3435 		for (i = 0; i < vport->num_rxq_grp; i++) {
3436 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3437 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
3438 
3439 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3440 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3441 				q->q_id = qids[k];
3442 				q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3443 			}
3444 		}
3445 		break;
3446 	default:
3447 		break;
3448 	}
3449 
3450 	return k;
3451 }
3452 
3453 /**
3454  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3455  * @vport: virtual port for which the queues ids are initialized
3456  *
3457  * Will initialize all queue ids with ids received as mailbox parameters.
3458  * Returns 0 on success, negative if all the queues are not initialized.
3459  */
3460 int idpf_vport_queue_ids_init(struct idpf_vport *vport)
3461 {
3462 	struct virtchnl2_create_vport *vport_params;
3463 	struct virtchnl2_queue_reg_chunks *chunks;
3464 	struct idpf_vport_config *vport_config;
3465 	u16 vport_idx = vport->idx;
3466 	int num_ids, err = 0;
3467 	u16 q_type;
3468 	u32 *qids;
3469 
3470 	vport_config = vport->adapter->vport_config[vport_idx];
3471 	if (vport_config->req_qs_chunks) {
3472 		struct virtchnl2_add_queues *vc_aq =
3473 			(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
3474 		chunks = &vc_aq->chunks;
3475 	} else {
3476 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
3477 		chunks = &vport_params->chunks;
3478 	}
3479 
3480 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3481 	if (!qids)
3482 		return -ENOMEM;
3483 
3484 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3485 					   VIRTCHNL2_QUEUE_TYPE_TX,
3486 					   chunks);
3487 	if (num_ids < vport->num_txq) {
3488 		err = -EINVAL;
3489 		goto mem_rel;
3490 	}
3491 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3492 					      VIRTCHNL2_QUEUE_TYPE_TX);
3493 	if (num_ids < vport->num_txq) {
3494 		err = -EINVAL;
3495 		goto mem_rel;
3496 	}
3497 
3498 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3499 					   VIRTCHNL2_QUEUE_TYPE_RX,
3500 					   chunks);
3501 	if (num_ids < vport->num_rxq) {
3502 		err = -EINVAL;
3503 		goto mem_rel;
3504 	}
3505 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3506 					      VIRTCHNL2_QUEUE_TYPE_RX);
3507 	if (num_ids < vport->num_rxq) {
3508 		err = -EINVAL;
3509 		goto mem_rel;
3510 	}
3511 
3512 	if (!idpf_is_queue_model_split(vport->txq_model))
3513 		goto check_rxq;
3514 
3515 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3516 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3517 	if (num_ids < vport->num_complq) {
3518 		err = -EINVAL;
3519 		goto mem_rel;
3520 	}
3521 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3522 	if (num_ids < vport->num_complq) {
3523 		err = -EINVAL;
3524 		goto mem_rel;
3525 	}
3526 
3527 check_rxq:
3528 	if (!idpf_is_queue_model_split(vport->rxq_model))
3529 		goto mem_rel;
3530 
3531 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3532 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3533 	if (num_ids < vport->num_bufq) {
3534 		err = -EINVAL;
3535 		goto mem_rel;
3536 	}
3537 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3538 	if (num_ids < vport->num_bufq)
3539 		err = -EINVAL;
3540 
3541 mem_rel:
3542 	kfree(qids);
3543 
3544 	return err;
3545 }
3546 
3547 /**
3548  * idpf_vport_adjust_qs - Adjust to new requested queues
3549  * @vport: virtual port data struct
3550  *
3551  * Renegotiate queues.  Returns 0 on success, negative on failure.
3552  */
3553 int idpf_vport_adjust_qs(struct idpf_vport *vport)
3554 {
3555 	struct virtchnl2_create_vport vport_msg;
3556 	int err;
3557 
3558 	vport_msg.txq_model = cpu_to_le16(vport->txq_model);
3559 	vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
3560 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
3561 				       NULL);
3562 	if (err)
3563 		return err;
3564 
3565 	idpf_vport_init_num_qs(vport, &vport_msg);
3566 	idpf_vport_calc_num_q_groups(vport);
3567 
3568 	return 0;
3569 }
3570 
3571 /**
3572  * idpf_is_capability_ena - Default implementation of capability checking
3573  * @adapter: Private data struct
3574  * @all: all or one flag
3575  * @field: caps field to check for flags
3576  * @flag: flag to check
3577  *
3578  * Return true if all capabilities are supported, false otherwise
3579  */
3580 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
3581 			    enum idpf_cap_field field, u64 flag)
3582 {
3583 	u8 *caps = (u8 *)&adapter->caps;
3584 	u32 *cap_field;
3585 
3586 	if (!caps)
3587 		return false;
3588 
3589 	if (field == IDPF_BASE_CAPS)
3590 		return false;
3591 
3592 	cap_field = (u32 *)(caps + field);
3593 
3594 	if (all)
3595 		return (*cap_field & flag) == flag;
3596 	else
3597 		return !!(*cap_field & flag);
3598 }
3599 
3600 /**
3601  * idpf_get_vport_id: Get vport id
3602  * @vport: virtual port structure
3603  *
3604  * Return vport id from the adapter persistent data
3605  */
3606 u32 idpf_get_vport_id(struct idpf_vport *vport)
3607 {
3608 	struct virtchnl2_create_vport *vport_msg;
3609 
3610 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3611 
3612 	return le32_to_cpu(vport_msg->vport_id);
3613 }
3614 
3615 /**
3616  * idpf_add_del_mac_filters - Add/del mac filters
3617  * @vport: Virtual port data structure
3618  * @np: Netdev private structure
3619  * @add: Add or delete flag
3620  * @async: Don't wait for return message
3621  *
3622  * Returns 0 on success, error on failure.
3623  **/
3624 int idpf_add_del_mac_filters(struct idpf_vport *vport,
3625 			     struct idpf_netdev_priv *np,
3626 			     bool add, bool async)
3627 {
3628 	struct virtchnl2_mac_addr_list *ma_list = NULL;
3629 	struct idpf_adapter *adapter = np->adapter;
3630 	struct idpf_vport_config *vport_config;
3631 	enum idpf_vport_config_flags mac_flag;
3632 	struct pci_dev *pdev = adapter->pdev;
3633 	enum idpf_vport_vc_state vc, vc_err;
3634 	struct virtchnl2_mac_addr *mac_addr;
3635 	struct idpf_mac_filter *f, *tmp;
3636 	u32 num_msgs, total_filters = 0;
3637 	int i = 0, k, err = 0;
3638 	u32 vop;
3639 
3640 	vport_config = adapter->vport_config[np->vport_idx];
3641 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3642 
3643 	/* Find the number of newly added filters */
3644 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3645 			    list) {
3646 		if (add && f->add)
3647 			total_filters++;
3648 		else if (!add && f->remove)
3649 			total_filters++;
3650 	}
3651 
3652 	if (!total_filters) {
3653 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3654 
3655 		return 0;
3656 	}
3657 
3658 	/* Fill all the new filters into virtchannel message */
3659 	mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
3660 			   GFP_ATOMIC);
3661 	if (!mac_addr) {
3662 		err = -ENOMEM;
3663 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3664 		goto error;
3665 	}
3666 
3667 	list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
3668 				 list) {
3669 		if (add && f->add) {
3670 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3671 			i++;
3672 			f->add = false;
3673 			if (i == total_filters)
3674 				break;
3675 		}
3676 		if (!add && f->remove) {
3677 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3678 			i++;
3679 			f->remove = false;
3680 			if (i == total_filters)
3681 				break;
3682 		}
3683 	}
3684 
3685 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3686 
3687 	if (add) {
3688 		vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
3689 		vc = IDPF_VC_ADD_MAC_ADDR;
3690 		vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
3691 		mac_flag = IDPF_VPORT_ADD_MAC_REQ;
3692 	} else {
3693 		vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
3694 		vc = IDPF_VC_DEL_MAC_ADDR;
3695 		vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
3696 		mac_flag = IDPF_VPORT_DEL_MAC_REQ;
3697 	}
3698 
3699 	/* Chunk up the filters into multiple messages to avoid
3700 	 * sending a control queue message buffer that is too large
3701 	 */
3702 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
3703 
3704 	if (!async)
3705 		mutex_lock(&vport->vc_buf_lock);
3706 
3707 	for (i = 0, k = 0; i < num_msgs; i++) {
3708 		u32 entries_size, buf_size, num_entries;
3709 
3710 		num_entries = min_t(u32, total_filters,
3711 				    IDPF_NUM_FILTERS_PER_MSG);
3712 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
3713 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
3714 
3715 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
3716 			kfree(ma_list);
3717 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
3718 			if (!ma_list) {
3719 				err = -ENOMEM;
3720 				goto list_prep_error;
3721 			}
3722 		} else {
3723 			memset(ma_list, 0, buf_size);
3724 		}
3725 
3726 		ma_list->vport_id = cpu_to_le32(np->vport_id);
3727 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
3728 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
3729 
3730 		if (async)
3731 			set_bit(mac_flag, vport_config->flags);
3732 
3733 		err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
3734 		if (err)
3735 			goto mbx_error;
3736 
3737 		if (!async) {
3738 			err = idpf_wait_for_event(adapter, vport, vc, vc_err);
3739 			if (err)
3740 				goto mbx_error;
3741 		}
3742 
3743 		k += num_entries;
3744 		total_filters -= num_entries;
3745 	}
3746 
3747 mbx_error:
3748 	if (!async)
3749 		mutex_unlock(&vport->vc_buf_lock);
3750 	kfree(ma_list);
3751 list_prep_error:
3752 	kfree(mac_addr);
3753 error:
3754 	if (err)
3755 		dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
3756 
3757 	return err;
3758 }
3759 
3760 /**
3761  * idpf_set_promiscuous - set promiscuous and send message to mailbox
3762  * @adapter: Driver specific private structure
3763  * @config_data: Vport specific config data
3764  * @vport_id: Vport identifier
3765  *
3766  * Request to enable promiscuous mode for the vport. Message is sent
3767  * asynchronously and won't wait for response.  Returns 0 on success, negative
3768  * on failure;
3769  */
3770 int idpf_set_promiscuous(struct idpf_adapter *adapter,
3771 			 struct idpf_vport_user_config_data *config_data,
3772 			 u32 vport_id)
3773 {
3774 	struct virtchnl2_promisc_info vpi;
3775 	u16 flags = 0;
3776 	int err;
3777 
3778 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
3779 		flags |= VIRTCHNL2_UNICAST_PROMISC;
3780 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
3781 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
3782 
3783 	vpi.vport_id = cpu_to_le32(vport_id);
3784 	vpi.flags = cpu_to_le16(flags);
3785 
3786 	err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
3787 			       sizeof(struct virtchnl2_promisc_info),
3788 			       (u8 *)&vpi);
3789 
3790 	return err;
3791 }
3792