xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision ccde82e909467abdf098a8ee6f63e1ecf9a47ce5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <linux/export.h>
5 #include <net/libeth/rx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 #include "idpf_ptp.h"
10 
11 /**
12  * struct idpf_vc_xn_manager - Manager for tracking transactions
13  * @ring: backing and lookup for transactions
14  * @free_xn_bm: bitmap for free transactions
15  * @xn_bm_lock: make bitmap access synchronous where necessary
16  * @salt: used to make cookie unique every message
17  */
18 struct idpf_vc_xn_manager {
19 	struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
20 	DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
21 	spinlock_t xn_bm_lock;
22 	u8 salt;
23 };
24 
25 /**
26  * idpf_vid_to_vport - Translate vport id to vport pointer
27  * @adapter: private data struct
28  * @v_id: vport id to translate
29  *
30  * Returns vport matching v_id, NULL if not found.
31  */
32 static
33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
34 {
35 	u16 num_max_vports = idpf_get_max_vports(adapter);
36 	int i;
37 
38 	for (i = 0; i < num_max_vports; i++)
39 		if (adapter->vport_ids[i] == v_id)
40 			return adapter->vports[i];
41 
42 	return NULL;
43 }
44 
45 /**
46  * idpf_handle_event_link - Handle link event message
47  * @adapter: private data struct
48  * @v2e: virtchnl event message
49  */
50 static void idpf_handle_event_link(struct idpf_adapter *adapter,
51 				   const struct virtchnl2_event *v2e)
52 {
53 	struct idpf_netdev_priv *np;
54 	struct idpf_vport *vport;
55 
56 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
57 	if (!vport) {
58 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
59 				    v2e->vport_id);
60 		return;
61 	}
62 	np = netdev_priv(vport->netdev);
63 
64 	np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
65 
66 	if (vport->link_up == v2e->link_status)
67 		return;
68 
69 	vport->link_up = v2e->link_status;
70 
71 	if (np->state != __IDPF_VPORT_UP)
72 		return;
73 
74 	if (vport->link_up) {
75 		netif_tx_start_all_queues(vport->netdev);
76 		netif_carrier_on(vport->netdev);
77 	} else {
78 		netif_tx_stop_all_queues(vport->netdev);
79 		netif_carrier_off(vport->netdev);
80 	}
81 }
82 
83 /**
84  * idpf_recv_event_msg - Receive virtchnl event message
85  * @adapter: Driver specific private structure
86  * @ctlq_msg: message to copy from
87  *
88  * Receive virtchnl event message
89  */
90 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
91 				struct idpf_ctlq_msg *ctlq_msg)
92 {
93 	int payload_size = ctlq_msg->ctx.indirect.payload->size;
94 	struct virtchnl2_event *v2e;
95 	u32 event;
96 
97 	if (payload_size < sizeof(*v2e)) {
98 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
99 				    ctlq_msg->cookie.mbx.chnl_opcode,
100 				    payload_size);
101 		return;
102 	}
103 
104 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
105 	event = le32_to_cpu(v2e->event);
106 
107 	switch (event) {
108 	case VIRTCHNL2_EVENT_LINK_CHANGE:
109 		idpf_handle_event_link(adapter, v2e);
110 		return;
111 	default:
112 		dev_err(&adapter->pdev->dev,
113 			"Unknown event %d from PF\n", event);
114 		break;
115 	}
116 }
117 
118 /**
119  * idpf_mb_clean - Reclaim the send mailbox queue entries
120  * @adapter: Driver specific private structure
121  *
122  * Reclaim the send mailbox queue entries to be used to send further messages
123  *
124  * Returns 0 on success, negative on failure
125  */
126 static int idpf_mb_clean(struct idpf_adapter *adapter)
127 {
128 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
129 	struct idpf_ctlq_msg **q_msg;
130 	struct idpf_dma_mem *dma_mem;
131 	int err;
132 
133 	q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
134 	if (!q_msg)
135 		return -ENOMEM;
136 
137 	err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
138 	if (err)
139 		goto err_kfree;
140 
141 	for (i = 0; i < num_q_msg; i++) {
142 		if (!q_msg[i])
143 			continue;
144 		dma_mem = q_msg[i]->ctx.indirect.payload;
145 		if (dma_mem)
146 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
147 					  dma_mem->va, dma_mem->pa);
148 		kfree(q_msg[i]);
149 		kfree(dma_mem);
150 	}
151 
152 err_kfree:
153 	kfree(q_msg);
154 
155 	return err;
156 }
157 
158 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
159 /**
160  * idpf_ptp_is_mb_msg - Check if the message is PTP-related
161  * @op: virtchnl opcode
162  *
163  * Return: true if msg is PTP-related, false otherwise.
164  */
165 static bool idpf_ptp_is_mb_msg(u32 op)
166 {
167 	switch (op) {
168 	case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
169 	case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
170 	case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
171 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
172 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
173 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
174 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
175 		return true;
176 	default:
177 		return false;
178 	}
179 }
180 
181 /**
182  * idpf_prepare_ptp_mb_msg - Prepare PTP related message
183  *
184  * @adapter: Driver specific private structure
185  * @op: virtchnl opcode
186  * @ctlq_msg: Corresponding control queue message
187  */
188 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
189 				    struct idpf_ctlq_msg *ctlq_msg)
190 {
191 	/* If the message is PTP-related and the secondary mailbox is available,
192 	 * send the message through the secondary mailbox.
193 	 */
194 	if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
195 		return;
196 
197 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
198 	ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
199 	ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
200 }
201 #else /* !CONFIG_PTP_1588_CLOCK */
202 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
203 				    struct idpf_ctlq_msg *ctlq_msg)
204 { }
205 #endif /* CONFIG_PTP_1588_CLOCK */
206 
207 /**
208  * idpf_send_mb_msg - Send message over mailbox
209  * @adapter: Driver specific private structure
210  * @op: virtchnl opcode
211  * @msg_size: size of the payload
212  * @msg: pointer to buffer holding the payload
213  * @cookie: unique SW generated cookie per message
214  *
215  * Will prepare the control queue message and initiates the send api
216  *
217  * Returns 0 on success, negative on failure
218  */
219 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
220 		     u16 msg_size, u8 *msg, u16 cookie)
221 {
222 	struct idpf_ctlq_msg *ctlq_msg;
223 	struct idpf_dma_mem *dma_mem;
224 	int err;
225 
226 	/* If we are here and a reset is detected nothing much can be
227 	 * done. This thread should silently abort and expected to
228 	 * be corrected with a new run either by user or driver
229 	 * flows after reset
230 	 */
231 	if (idpf_is_reset_detected(adapter))
232 		return 0;
233 
234 	err = idpf_mb_clean(adapter);
235 	if (err)
236 		return err;
237 
238 	ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
239 	if (!ctlq_msg)
240 		return -ENOMEM;
241 
242 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
243 	if (!dma_mem) {
244 		err = -ENOMEM;
245 		goto dma_mem_error;
246 	}
247 
248 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
249 	ctlq_msg->func_id = 0;
250 
251 	idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
252 
253 	ctlq_msg->data_len = msg_size;
254 	ctlq_msg->cookie.mbx.chnl_opcode = op;
255 	ctlq_msg->cookie.mbx.chnl_retval = 0;
256 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
257 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
258 					 &dma_mem->pa, GFP_ATOMIC);
259 	if (!dma_mem->va) {
260 		err = -ENOMEM;
261 		goto dma_alloc_error;
262 	}
263 
264 	/* It's possible we're just sending an opcode but no buffer */
265 	if (msg && msg_size)
266 		memcpy(dma_mem->va, msg, msg_size);
267 	ctlq_msg->ctx.indirect.payload = dma_mem;
268 	ctlq_msg->ctx.sw_cookie.data = cookie;
269 
270 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
271 	if (err)
272 		goto send_error;
273 
274 	return 0;
275 
276 send_error:
277 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
278 			  dma_mem->pa);
279 dma_alloc_error:
280 	kfree(dma_mem);
281 dma_mem_error:
282 	kfree(ctlq_msg);
283 
284 	return err;
285 }
286 
287 /* API for virtchnl "transaction" support ("xn" for short).
288  *
289  * We are reusing the completion lock to serialize the accesses to the
290  * transaction state for simplicity, but it could be its own separate synchro
291  * as well. For now, this API is only used from within a workqueue context;
292  * raw_spin_lock() is enough.
293  */
294 /**
295  * idpf_vc_xn_lock - Request exclusive access to vc transaction
296  * @xn: struct idpf_vc_xn* to access
297  */
298 #define idpf_vc_xn_lock(xn)			\
299 	raw_spin_lock(&(xn)->completed.wait.lock)
300 
301 /**
302  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
303  * @xn: struct idpf_vc_xn* to access
304  */
305 #define idpf_vc_xn_unlock(xn)		\
306 	raw_spin_unlock(&(xn)->completed.wait.lock)
307 
308 /**
309  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
310  * reset the transaction state.
311  * @xn: struct idpf_vc_xn to update
312  */
313 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
314 {
315 	xn->reply.iov_base = NULL;
316 	xn->reply.iov_len = 0;
317 
318 	if (xn->state != IDPF_VC_XN_SHUTDOWN)
319 		xn->state = IDPF_VC_XN_IDLE;
320 }
321 
322 /**
323  * idpf_vc_xn_init - Initialize virtchnl transaction object
324  * @vcxn_mngr: pointer to vc transaction manager struct
325  */
326 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
327 {
328 	int i;
329 
330 	spin_lock_init(&vcxn_mngr->xn_bm_lock);
331 
332 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
333 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
334 
335 		xn->state = IDPF_VC_XN_IDLE;
336 		xn->idx = i;
337 		idpf_vc_xn_release_bufs(xn);
338 		init_completion(&xn->completed);
339 	}
340 
341 	bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
342 }
343 
344 /**
345  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
346  * @vcxn_mngr: pointer to vc transaction manager struct
347  *
348  * All waiting threads will be woken-up and their transaction aborted. Further
349  * operations on that object will fail.
350  */
351 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
352 {
353 	int i;
354 
355 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
356 	bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
357 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
358 
359 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
360 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
361 
362 		idpf_vc_xn_lock(xn);
363 		xn->state = IDPF_VC_XN_SHUTDOWN;
364 		idpf_vc_xn_release_bufs(xn);
365 		idpf_vc_xn_unlock(xn);
366 		complete_all(&xn->completed);
367 	}
368 }
369 
370 /**
371  * idpf_vc_xn_pop_free - Pop a free transaction from free list
372  * @vcxn_mngr: transaction manager to pop from
373  *
374  * Returns NULL if no free transactions
375  */
376 static
377 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
378 {
379 	struct idpf_vc_xn *xn = NULL;
380 	unsigned long free_idx;
381 
382 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
383 	free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
384 	if (free_idx == IDPF_VC_XN_RING_LEN)
385 		goto do_unlock;
386 
387 	clear_bit(free_idx, vcxn_mngr->free_xn_bm);
388 	xn = &vcxn_mngr->ring[free_idx];
389 	xn->salt = vcxn_mngr->salt++;
390 
391 do_unlock:
392 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
393 
394 	return xn;
395 }
396 
397 /**
398  * idpf_vc_xn_push_free - Push a free transaction to free list
399  * @vcxn_mngr: transaction manager to push to
400  * @xn: transaction to push
401  */
402 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
403 				 struct idpf_vc_xn *xn)
404 {
405 	idpf_vc_xn_release_bufs(xn);
406 	set_bit(xn->idx, vcxn_mngr->free_xn_bm);
407 }
408 
409 /**
410  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
411  * @adapter: driver specific private structure with vcxn_mngr
412  * @params: parameters for this particular transaction including
413  *   -vc_op: virtchannel operation to send
414  *   -send_buf: kvec iov for send buf and len
415  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
416  *   -timeout_ms: timeout waiting for a reply (milliseconds)
417  *   -async: don't wait for message reply, will lose caller context
418  *   -async_handler: callback to handle async replies
419  *
420  * @returns >= 0 for success, the size of the initial reply (may or may not be
421  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
422  * error.
423  */
424 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
425 			const struct idpf_vc_xn_params *params)
426 {
427 	const struct kvec *send_buf = &params->send_buf;
428 	struct idpf_vc_xn *xn;
429 	ssize_t retval;
430 	u16 cookie;
431 
432 	xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
433 	/* no free transactions available */
434 	if (!xn)
435 		return -ENOSPC;
436 
437 	idpf_vc_xn_lock(xn);
438 	if (xn->state == IDPF_VC_XN_SHUTDOWN) {
439 		retval = -ENXIO;
440 		goto only_unlock;
441 	} else if (xn->state != IDPF_VC_XN_IDLE) {
442 		/* We're just going to clobber this transaction even though
443 		 * it's not IDLE. If we don't reuse it we could theoretically
444 		 * eventually leak all the free transactions and not be able to
445 		 * send any messages. At least this way we make an attempt to
446 		 * remain functional even though something really bad is
447 		 * happening that's corrupting what was supposed to be free
448 		 * transactions.
449 		 */
450 		WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
451 			  xn->idx, xn->vc_op);
452 	}
453 
454 	xn->reply = params->recv_buf;
455 	xn->reply_sz = 0;
456 	xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
457 	xn->vc_op = params->vc_op;
458 	xn->async_handler = params->async_handler;
459 	idpf_vc_xn_unlock(xn);
460 
461 	if (!params->async)
462 		reinit_completion(&xn->completed);
463 	cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
464 		 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
465 
466 	retval = idpf_send_mb_msg(adapter, params->vc_op,
467 				  send_buf->iov_len, send_buf->iov_base,
468 				  cookie);
469 	if (retval) {
470 		idpf_vc_xn_lock(xn);
471 		goto release_and_unlock;
472 	}
473 
474 	if (params->async)
475 		return 0;
476 
477 	wait_for_completion_timeout(&xn->completed,
478 				    msecs_to_jiffies(params->timeout_ms));
479 
480 	/* No need to check the return value; we check the final state of the
481 	 * transaction below. It's possible the transaction actually gets more
482 	 * timeout than specified if we get preempted here but after
483 	 * wait_for_completion_timeout returns. This should be non-issue
484 	 * however.
485 	 */
486 	idpf_vc_xn_lock(xn);
487 	switch (xn->state) {
488 	case IDPF_VC_XN_SHUTDOWN:
489 		retval = -ENXIO;
490 		goto only_unlock;
491 	case IDPF_VC_XN_WAITING:
492 		dev_notice_ratelimited(&adapter->pdev->dev,
493 				       "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
494 				       params->vc_op, cookie, xn->vc_op,
495 				       xn->salt, params->timeout_ms);
496 		retval = -ETIME;
497 		break;
498 	case IDPF_VC_XN_COMPLETED_SUCCESS:
499 		retval = xn->reply_sz;
500 		break;
501 	case IDPF_VC_XN_COMPLETED_FAILED:
502 		dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
503 				       params->vc_op);
504 		retval = -EIO;
505 		break;
506 	default:
507 		/* Invalid state. */
508 		WARN_ON_ONCE(1);
509 		retval = -EIO;
510 		break;
511 	}
512 
513 release_and_unlock:
514 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
515 	/* If we receive a VC reply after here, it will be dropped. */
516 only_unlock:
517 	idpf_vc_xn_unlock(xn);
518 
519 	return retval;
520 }
521 
522 /**
523  * idpf_vc_xn_forward_async - Handle async reply receives
524  * @adapter: private data struct
525  * @xn: transaction to handle
526  * @ctlq_msg: corresponding ctlq_msg
527  *
528  * For async sends we're going to lose the caller's context so, if an
529  * async_handler was provided, it can deal with the reply, otherwise we'll just
530  * check and report if there is an error.
531  */
532 static int
533 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
534 			 const struct idpf_ctlq_msg *ctlq_msg)
535 {
536 	int err = 0;
537 
538 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
539 		dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
540 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
541 		xn->reply_sz = 0;
542 		err = -EINVAL;
543 		goto release_bufs;
544 	}
545 
546 	if (xn->async_handler) {
547 		err = xn->async_handler(adapter, xn, ctlq_msg);
548 		goto release_bufs;
549 	}
550 
551 	if (ctlq_msg->cookie.mbx.chnl_retval) {
552 		xn->reply_sz = 0;
553 		dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
554 				    ctlq_msg->cookie.mbx.chnl_opcode);
555 		err = -EINVAL;
556 	}
557 
558 release_bufs:
559 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
560 
561 	return err;
562 }
563 
564 /**
565  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
566  * @adapter: driver specific private structure with vcxn_mngr
567  * @ctlq_msg: controlq message to send back to receiving thread
568  */
569 static int
570 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
571 			 const struct idpf_ctlq_msg *ctlq_msg)
572 {
573 	const void *payload = NULL;
574 	size_t payload_size = 0;
575 	struct idpf_vc_xn *xn;
576 	u16 msg_info;
577 	int err = 0;
578 	u16 xn_idx;
579 	u16 salt;
580 
581 	msg_info = ctlq_msg->ctx.sw_cookie.data;
582 	xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
583 	if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
584 		dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
585 				    xn_idx);
586 		return -EINVAL;
587 	}
588 	xn = &adapter->vcxn_mngr->ring[xn_idx];
589 	idpf_vc_xn_lock(xn);
590 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
591 	if (xn->salt != salt) {
592 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
593 				    xn->vc_op, xn->salt, xn->state,
594 				    ctlq_msg->cookie.mbx.chnl_opcode, salt);
595 		idpf_vc_xn_unlock(xn);
596 		return -EINVAL;
597 	}
598 
599 	switch (xn->state) {
600 	case IDPF_VC_XN_WAITING:
601 		/* success */
602 		break;
603 	case IDPF_VC_XN_IDLE:
604 		dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
605 				    ctlq_msg->cookie.mbx.chnl_opcode);
606 		err = -EINVAL;
607 		goto out_unlock;
608 	case IDPF_VC_XN_SHUTDOWN:
609 		/* ENXIO is a bit special here as the recv msg loop uses that
610 		 * know if it should stop trying to clean the ring if we lost
611 		 * the virtchnl. We need to stop playing with registers and
612 		 * yield.
613 		 */
614 		err = -ENXIO;
615 		goto out_unlock;
616 	case IDPF_VC_XN_ASYNC:
617 		err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
618 		idpf_vc_xn_unlock(xn);
619 		return err;
620 	default:
621 		dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
622 				    ctlq_msg->cookie.mbx.chnl_opcode);
623 		err = -EBUSY;
624 		goto out_unlock;
625 	}
626 
627 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
628 		dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
629 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
630 		xn->reply_sz = 0;
631 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
632 		err = -EINVAL;
633 		goto out_unlock;
634 	}
635 
636 	if (ctlq_msg->cookie.mbx.chnl_retval) {
637 		xn->reply_sz = 0;
638 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
639 		err = -EINVAL;
640 		goto out_unlock;
641 	}
642 
643 	if (ctlq_msg->data_len) {
644 		payload = ctlq_msg->ctx.indirect.payload->va;
645 		payload_size = ctlq_msg->data_len;
646 	}
647 
648 	xn->reply_sz = payload_size;
649 	xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
650 
651 	if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
652 		memcpy(xn->reply.iov_base, payload,
653 		       min_t(size_t, xn->reply.iov_len, payload_size));
654 
655 out_unlock:
656 	idpf_vc_xn_unlock(xn);
657 	/* we _cannot_ hold lock while calling complete */
658 	complete(&xn->completed);
659 
660 	return err;
661 }
662 
663 /**
664  * idpf_recv_mb_msg - Receive message over mailbox
665  * @adapter: Driver specific private structure
666  *
667  * Will receive control queue message and posts the receive buffer. Returns 0
668  * on success and negative on failure.
669  */
670 int idpf_recv_mb_msg(struct idpf_adapter *adapter)
671 {
672 	struct idpf_ctlq_msg ctlq_msg;
673 	struct idpf_dma_mem *dma_mem;
674 	int post_err, err;
675 	u16 num_recv;
676 
677 	while (1) {
678 		/* This will get <= num_recv messages and output how many
679 		 * actually received on num_recv.
680 		 */
681 		num_recv = 1;
682 		err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
683 		if (err || !num_recv)
684 			break;
685 
686 		if (ctlq_msg.data_len) {
687 			dma_mem = ctlq_msg.ctx.indirect.payload;
688 		} else {
689 			dma_mem = NULL;
690 			num_recv = 0;
691 		}
692 
693 		if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
694 			idpf_recv_event_msg(adapter, &ctlq_msg);
695 		else
696 			err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
697 
698 		post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
699 						   adapter->hw.arq,
700 						   &num_recv, &dma_mem);
701 
702 		/* If post failed clear the only buffer we supplied */
703 		if (post_err) {
704 			if (dma_mem)
705 				dmam_free_coherent(&adapter->pdev->dev,
706 						   dma_mem->size, dma_mem->va,
707 						   dma_mem->pa);
708 			break;
709 		}
710 
711 		/* virtchnl trying to shutdown, stop cleaning */
712 		if (err == -ENXIO)
713 			break;
714 	}
715 
716 	return err;
717 }
718 
719 /**
720  * idpf_wait_for_marker_event - wait for software marker response
721  * @vport: virtual port data structure
722  *
723  * Returns 0 success, negative on failure.
724  **/
725 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
726 {
727 	bool markers_rcvd = true;
728 
729 	for (u32 i = 0; i < vport->num_txq; i++) {
730 		struct idpf_tx_queue *txq = vport->txqs[i];
731 
732 		idpf_queue_set(SW_MARKER, txq);
733 		idpf_wait_for_sw_marker_completion(txq);
734 		markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
735 	}
736 
737 	if (markers_rcvd)
738 		return 0;
739 
740 	dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
741 
742 	return -ETIMEDOUT;
743 }
744 
745 /**
746  * idpf_send_ver_msg - send virtchnl version message
747  * @adapter: Driver specific private structure
748  *
749  * Send virtchnl version message.  Returns 0 on success, negative on failure.
750  */
751 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
752 {
753 	struct idpf_vc_xn_params xn_params = {};
754 	struct virtchnl2_version_info vvi;
755 	ssize_t reply_sz;
756 	u32 major, minor;
757 	int err = 0;
758 
759 	if (adapter->virt_ver_maj) {
760 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
761 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
762 	} else {
763 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
764 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
765 	}
766 
767 	xn_params.vc_op = VIRTCHNL2_OP_VERSION;
768 	xn_params.send_buf.iov_base = &vvi;
769 	xn_params.send_buf.iov_len = sizeof(vvi);
770 	xn_params.recv_buf = xn_params.send_buf;
771 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
772 
773 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
774 	if (reply_sz < 0)
775 		return reply_sz;
776 	if (reply_sz < sizeof(vvi))
777 		return -EIO;
778 
779 	major = le32_to_cpu(vvi.major);
780 	minor = le32_to_cpu(vvi.minor);
781 
782 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
783 		dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
784 		return -EINVAL;
785 	}
786 
787 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
788 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
789 		dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
790 
791 	/* If we have a mismatch, resend version to update receiver on what
792 	 * version we will use.
793 	 */
794 	if (!adapter->virt_ver_maj &&
795 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
796 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
797 		err = -EAGAIN;
798 
799 	adapter->virt_ver_maj = major;
800 	adapter->virt_ver_min = minor;
801 
802 	return err;
803 }
804 
805 /**
806  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
807  * @adapter: Driver specific private structure
808  *
809  * Send virtchl get capabilities message. Returns 0 on success, negative on
810  * failure.
811  */
812 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
813 {
814 	struct virtchnl2_get_capabilities caps = {};
815 	struct idpf_vc_xn_params xn_params = {};
816 	ssize_t reply_sz;
817 
818 	caps.csum_caps =
819 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
820 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
821 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
822 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
823 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
824 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
825 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
826 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
827 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
828 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
829 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
830 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
831 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
832 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
833 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
834 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
835 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
836 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
837 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
838 
839 	caps.seg_caps =
840 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
841 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
842 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
843 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
844 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
845 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
846 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
847 
848 	caps.rss_caps =
849 		cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP		|
850 			    VIRTCHNL2_FLOW_IPV4_UDP		|
851 			    VIRTCHNL2_FLOW_IPV4_SCTP		|
852 			    VIRTCHNL2_FLOW_IPV4_OTHER		|
853 			    VIRTCHNL2_FLOW_IPV6_TCP		|
854 			    VIRTCHNL2_FLOW_IPV6_UDP		|
855 			    VIRTCHNL2_FLOW_IPV6_SCTP		|
856 			    VIRTCHNL2_FLOW_IPV6_OTHER);
857 
858 	caps.hsplit_caps =
859 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
860 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
861 
862 	caps.rsc_caps =
863 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
864 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
865 
866 	caps.other_caps =
867 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
868 			    VIRTCHNL2_CAP_RDMA                  |
869 			    VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	|
870 			    VIRTCHNL2_CAP_MACFILTER		|
871 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
872 			    VIRTCHNL2_CAP_PROMISC		|
873 			    VIRTCHNL2_CAP_LOOPBACK		|
874 			    VIRTCHNL2_CAP_PTP);
875 
876 	xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
877 	xn_params.send_buf.iov_base = &caps;
878 	xn_params.send_buf.iov_len = sizeof(caps);
879 	xn_params.recv_buf.iov_base = &adapter->caps;
880 	xn_params.recv_buf.iov_len = sizeof(adapter->caps);
881 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
882 
883 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
884 	if (reply_sz < 0)
885 		return reply_sz;
886 	if (reply_sz < sizeof(adapter->caps))
887 		return -EIO;
888 
889 	return 0;
890 }
891 
892 /**
893  * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
894  * @adapter: Driver specific private struct
895  *
896  * Return: 0 on success or error code on failure.
897  */
898 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
899 {
900 	struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
901 	struct idpf_vc_xn_params xn_params = {
902 		.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
903 		.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
904 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
905 	};
906 	int num_regions, size;
907 	struct idpf_hw *hw;
908 	ssize_t reply_sz;
909 	int err = 0;
910 
911 	rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
912 	if (!rcvd_regions)
913 		return -ENOMEM;
914 
915 	xn_params.recv_buf.iov_base = rcvd_regions;
916 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
917 	if (reply_sz < 0)
918 		return reply_sz;
919 
920 	num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
921 	size = struct_size(rcvd_regions, mem_reg, num_regions);
922 	if (reply_sz < size)
923 		return -EIO;
924 
925 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
926 		return -EINVAL;
927 
928 	hw = &adapter->hw;
929 	hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
930 	if (!hw->lan_regs)
931 		return -ENOMEM;
932 
933 	for (int i = 0; i < num_regions; i++) {
934 		hw->lan_regs[i].addr_len =
935 			le64_to_cpu(rcvd_regions->mem_reg[i].size);
936 		hw->lan_regs[i].addr_start =
937 			le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
938 	}
939 	hw->num_lan_regs = num_regions;
940 
941 	return err;
942 }
943 
944 /**
945  * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
946  * @adapter: Driver specific private structure
947  *
948  * Called when idpf_send_get_lan_memory_regions is not supported. This will
949  * calculate the offsets and sizes for the regions before, in between, and
950  * after the mailbox and rstat MMIO mappings.
951  *
952  * Return: 0 on success or error code on failure.
953  */
954 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
955 {
956 	struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
957 	struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
958 	struct idpf_hw *hw = &adapter->hw;
959 
960 	hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
961 	hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
962 			       GFP_KERNEL);
963 	if (!hw->lan_regs)
964 		return -ENOMEM;
965 
966 	/* Region preceding mailbox */
967 	hw->lan_regs[0].addr_start = 0;
968 	hw->lan_regs[0].addr_len = mbx_reg->start;
969 	/* Region between mailbox and rstat */
970 	hw->lan_regs[1].addr_start = mbx_reg->end + 1;
971 	hw->lan_regs[1].addr_len = rstat_reg->start -
972 					hw->lan_regs[1].addr_start;
973 	/* Region after rstat */
974 	hw->lan_regs[2].addr_start = rstat_reg->end + 1;
975 	hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
976 					hw->lan_regs[2].addr_start;
977 
978 	return 0;
979 }
980 
981 /**
982  * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
983  * @adapter: Driver specific private structure
984  *
985  * Return: 0 on success or error code on failure.
986  */
987 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
988 {
989 	struct pci_dev *pdev = adapter->pdev;
990 	struct idpf_hw *hw = &adapter->hw;
991 	resource_size_t res_start;
992 
993 	res_start = pci_resource_start(pdev, 0);
994 
995 	for (int i = 0; i < hw->num_lan_regs; i++) {
996 		resource_size_t start;
997 		long len;
998 
999 		len = hw->lan_regs[i].addr_len;
1000 		if (!len)
1001 			continue;
1002 		start = hw->lan_regs[i].addr_start + res_start;
1003 
1004 		hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
1005 		if (!hw->lan_regs[i].vaddr) {
1006 			pci_err(pdev, "failed to allocate BAR0 region\n");
1007 			return -ENOMEM;
1008 		}
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 /**
1015  * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
1016  * @adapter: adapter info struct
1017  * @rule: Flow steering rule to add/delete
1018  * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
1019  *          VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
1020  *
1021  * Send ADD/DELETE flow steering virtchnl message and receive the result.
1022  *
1023  * Return: 0 on success, negative on failure.
1024  */
1025 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
1026 				struct virtchnl2_flow_rule_add_del *rule,
1027 				enum virtchnl2_op opcode)
1028 {
1029 	int rule_count = le32_to_cpu(rule->count);
1030 	struct idpf_vc_xn_params xn_params = {};
1031 	ssize_t reply_sz;
1032 
1033 	if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
1034 	    opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
1035 		return -EINVAL;
1036 
1037 	xn_params.vc_op = opcode;
1038 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1039 	xn_params.async = false;
1040 	xn_params.send_buf.iov_base = rule;
1041 	xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
1042 	xn_params.recv_buf.iov_base = rule;
1043 	xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
1044 
1045 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1046 	return reply_sz < 0 ? reply_sz : 0;
1047 }
1048 
1049 /**
1050  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
1051  * @adapter: Driver specific private structure
1052  * @max_q: vport max queue structure
1053  */
1054 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
1055 			    struct idpf_vport_max_q *max_q)
1056 {
1057 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1058 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1059 	u16 default_vports = idpf_get_default_vports(adapter);
1060 	u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
1061 
1062 	mutex_lock(&adapter->queue_lock);
1063 
1064 	/* Caps are device-wide. Give each vport an equal piece */
1065 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
1066 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
1067 	max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
1068 	max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
1069 
1070 	if (adapter->num_alloc_vports >= default_vports) {
1071 		max_rx_q = IDPF_MIN_Q;
1072 		max_tx_q = IDPF_MIN_Q;
1073 	}
1074 
1075 	/*
1076 	 * Harmonize the numbers. The current implementation always creates
1077 	 * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
1078 	 * one completion queue for each Tx queue for best performance.
1079 	 * If less buffer or completion queues is available, cap the number
1080 	 * of the corresponding Rx/Tx queues.
1081 	 */
1082 	max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
1083 	max_tx_q = min(max_tx_q, max_compl_q);
1084 
1085 	max_q->max_rxq = max_rx_q;
1086 	max_q->max_txq = max_tx_q;
1087 	max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
1088 	max_q->max_complq = max_tx_q;
1089 
1090 	if (avail_queues->avail_rxq < max_q->max_rxq ||
1091 	    avail_queues->avail_txq < max_q->max_txq ||
1092 	    avail_queues->avail_bufq < max_q->max_bufq ||
1093 	    avail_queues->avail_complq < max_q->max_complq) {
1094 		mutex_unlock(&adapter->queue_lock);
1095 
1096 		return -EINVAL;
1097 	}
1098 
1099 	avail_queues->avail_rxq -= max_q->max_rxq;
1100 	avail_queues->avail_txq -= max_q->max_txq;
1101 	avail_queues->avail_bufq -= max_q->max_bufq;
1102 	avail_queues->avail_complq -= max_q->max_complq;
1103 
1104 	mutex_unlock(&adapter->queue_lock);
1105 
1106 	return 0;
1107 }
1108 
1109 /**
1110  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
1111  * @adapter: Driver specific private structure
1112  * @max_q: vport max queue structure
1113  */
1114 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
1115 			       struct idpf_vport_max_q *max_q)
1116 {
1117 	struct idpf_avail_queue_info *avail_queues;
1118 
1119 	mutex_lock(&adapter->queue_lock);
1120 	avail_queues = &adapter->avail_queues;
1121 
1122 	avail_queues->avail_rxq += max_q->max_rxq;
1123 	avail_queues->avail_txq += max_q->max_txq;
1124 	avail_queues->avail_bufq += max_q->max_bufq;
1125 	avail_queues->avail_complq += max_q->max_complq;
1126 
1127 	mutex_unlock(&adapter->queue_lock);
1128 }
1129 
1130 /**
1131  * idpf_init_avail_queues - Initialize available queues on the device
1132  * @adapter: Driver specific private structure
1133  */
1134 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
1135 {
1136 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1137 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1138 
1139 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
1140 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
1141 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
1142 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
1143 }
1144 
1145 /**
1146  * idpf_get_reg_intr_vecs - Get vector queue register offset
1147  * @vport: virtual port structure
1148  * @reg_vals: Register offsets to store in
1149  *
1150  * Returns number of registers that got populated
1151  */
1152 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
1153 			   struct idpf_vec_regs *reg_vals)
1154 {
1155 	struct virtchnl2_vector_chunks *chunks;
1156 	struct idpf_vec_regs reg_val;
1157 	u16 num_vchunks, num_vec;
1158 	int num_regs = 0, i, j;
1159 
1160 	chunks = &vport->adapter->req_vec_chunks->vchunks;
1161 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
1162 
1163 	for (j = 0; j < num_vchunks; j++) {
1164 		struct virtchnl2_vector_chunk *chunk;
1165 		u32 dynctl_reg_spacing;
1166 		u32 itrn_reg_spacing;
1167 
1168 		chunk = &chunks->vchunks[j];
1169 		num_vec = le16_to_cpu(chunk->num_vectors);
1170 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1171 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1172 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1173 
1174 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1175 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1176 
1177 		for (i = 0; i < num_vec; i++) {
1178 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1179 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1180 			reg_vals[num_regs].itrn_index_spacing =
1181 						reg_val.itrn_index_spacing;
1182 
1183 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1184 			reg_val.itrn_reg += itrn_reg_spacing;
1185 			num_regs++;
1186 		}
1187 	}
1188 
1189 	return num_regs;
1190 }
1191 
1192 /**
1193  * idpf_vport_get_q_reg - Get the queue registers for the vport
1194  * @reg_vals: register values needing to be set
1195  * @num_regs: amount we expect to fill
1196  * @q_type: queue model
1197  * @chunks: queue regs received over mailbox
1198  *
1199  * This function parses the queue register offsets from the queue register
1200  * chunk information, with a specific queue type and stores it into the array
1201  * passed as an argument. It returns the actual number of queue registers that
1202  * are filled.
1203  */
1204 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1205 				struct virtchnl2_queue_reg_chunks *chunks)
1206 {
1207 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1208 	int reg_filled = 0, i;
1209 	u32 reg_val;
1210 
1211 	while (num_chunks--) {
1212 		struct virtchnl2_queue_reg_chunk *chunk;
1213 		u16 num_q;
1214 
1215 		chunk = &chunks->chunks[num_chunks];
1216 		if (le32_to_cpu(chunk->type) != q_type)
1217 			continue;
1218 
1219 		num_q = le32_to_cpu(chunk->num_queues);
1220 		reg_val = le64_to_cpu(chunk->qtail_reg_start);
1221 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1222 			reg_vals[reg_filled++] = reg_val;
1223 			reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
1224 		}
1225 	}
1226 
1227 	return reg_filled;
1228 }
1229 
1230 /**
1231  * __idpf_queue_reg_init - initialize queue registers
1232  * @vport: virtual port structure
1233  * @reg_vals: registers we are initializing
1234  * @num_regs: how many registers there are in total
1235  * @q_type: queue model
1236  *
1237  * Return number of queues that are initialized
1238  */
1239 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
1240 				 int num_regs, u32 q_type)
1241 {
1242 	struct idpf_adapter *adapter = vport->adapter;
1243 	int i, j, k = 0;
1244 
1245 	switch (q_type) {
1246 	case VIRTCHNL2_QUEUE_TYPE_TX:
1247 		for (i = 0; i < vport->num_txq_grp; i++) {
1248 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1249 
1250 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1251 				tx_qgrp->txqs[j]->tail =
1252 					idpf_get_reg_addr(adapter, reg_vals[k]);
1253 		}
1254 		break;
1255 	case VIRTCHNL2_QUEUE_TYPE_RX:
1256 		for (i = 0; i < vport->num_rxq_grp; i++) {
1257 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1258 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1259 
1260 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1261 				struct idpf_rx_queue *q;
1262 
1263 				q = rx_qgrp->singleq.rxqs[j];
1264 				q->tail = idpf_get_reg_addr(adapter,
1265 							    reg_vals[k]);
1266 			}
1267 		}
1268 		break;
1269 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1270 		for (i = 0; i < vport->num_rxq_grp; i++) {
1271 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1272 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
1273 
1274 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1275 				struct idpf_buf_queue *q;
1276 
1277 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1278 				q->tail = idpf_get_reg_addr(adapter,
1279 							    reg_vals[k]);
1280 			}
1281 		}
1282 		break;
1283 	default:
1284 		break;
1285 	}
1286 
1287 	return k;
1288 }
1289 
1290 /**
1291  * idpf_queue_reg_init - initialize queue registers
1292  * @vport: virtual port structure
1293  *
1294  * Return 0 on success, negative on failure
1295  */
1296 int idpf_queue_reg_init(struct idpf_vport *vport)
1297 {
1298 	struct virtchnl2_create_vport *vport_params;
1299 	struct virtchnl2_queue_reg_chunks *chunks;
1300 	struct idpf_vport_config *vport_config;
1301 	u16 vport_idx = vport->idx;
1302 	int num_regs, ret = 0;
1303 	u32 *reg_vals;
1304 
1305 	/* We may never deal with more than 256 same type of queues */
1306 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1307 	if (!reg_vals)
1308 		return -ENOMEM;
1309 
1310 	vport_config = vport->adapter->vport_config[vport_idx];
1311 	if (vport_config->req_qs_chunks) {
1312 		struct virtchnl2_add_queues *vc_aq =
1313 		  (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
1314 		chunks = &vc_aq->chunks;
1315 	} else {
1316 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
1317 		chunks = &vport_params->chunks;
1318 	}
1319 
1320 	/* Initialize Tx queue tail register address */
1321 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1322 					VIRTCHNL2_QUEUE_TYPE_TX,
1323 					chunks);
1324 	if (num_regs < vport->num_txq) {
1325 		ret = -EINVAL;
1326 		goto free_reg_vals;
1327 	}
1328 
1329 	num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1330 					 VIRTCHNL2_QUEUE_TYPE_TX);
1331 	if (num_regs < vport->num_txq) {
1332 		ret = -EINVAL;
1333 		goto free_reg_vals;
1334 	}
1335 
1336 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1337 	 * model
1338 	 */
1339 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1340 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1341 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1342 						chunks);
1343 		if (num_regs < vport->num_bufq) {
1344 			ret = -EINVAL;
1345 			goto free_reg_vals;
1346 		}
1347 
1348 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1349 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1350 		if (num_regs < vport->num_bufq) {
1351 			ret = -EINVAL;
1352 			goto free_reg_vals;
1353 		}
1354 	} else {
1355 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1356 						VIRTCHNL2_QUEUE_TYPE_RX,
1357 						chunks);
1358 		if (num_regs < vport->num_rxq) {
1359 			ret = -EINVAL;
1360 			goto free_reg_vals;
1361 		}
1362 
1363 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1364 						 VIRTCHNL2_QUEUE_TYPE_RX);
1365 		if (num_regs < vport->num_rxq) {
1366 			ret = -EINVAL;
1367 			goto free_reg_vals;
1368 		}
1369 	}
1370 
1371 free_reg_vals:
1372 	kfree(reg_vals);
1373 
1374 	return ret;
1375 }
1376 
1377 /**
1378  * idpf_send_create_vport_msg - Send virtchnl create vport message
1379  * @adapter: Driver specific private structure
1380  * @max_q: vport max queue info
1381  *
1382  * send virtchnl creae vport message
1383  *
1384  * Returns 0 on success, negative on failure
1385  */
1386 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1387 			       struct idpf_vport_max_q *max_q)
1388 {
1389 	struct virtchnl2_create_vport *vport_msg;
1390 	struct idpf_vc_xn_params xn_params = {};
1391 	u16 idx = adapter->next_vport;
1392 	int err, buf_size;
1393 	ssize_t reply_sz;
1394 
1395 	buf_size = sizeof(struct virtchnl2_create_vport);
1396 	if (!adapter->vport_params_reqd[idx]) {
1397 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1398 							  GFP_KERNEL);
1399 		if (!adapter->vport_params_reqd[idx])
1400 			return -ENOMEM;
1401 	}
1402 
1403 	vport_msg = adapter->vport_params_reqd[idx];
1404 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1405 	vport_msg->vport_index = cpu_to_le16(idx);
1406 
1407 	if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1408 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1409 	else
1410 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1411 
1412 	if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1413 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1414 	else
1415 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1416 
1417 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1418 	if (err) {
1419 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1420 
1421 		return err;
1422 	}
1423 
1424 	if (!adapter->vport_params_recvd[idx]) {
1425 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1426 							   GFP_KERNEL);
1427 		if (!adapter->vport_params_recvd[idx]) {
1428 			err = -ENOMEM;
1429 			goto free_vport_params;
1430 		}
1431 	}
1432 
1433 	xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1434 	xn_params.send_buf.iov_base = vport_msg;
1435 	xn_params.send_buf.iov_len = buf_size;
1436 	xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1437 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1438 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1439 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1440 	if (reply_sz < 0) {
1441 		err = reply_sz;
1442 		goto free_vport_params;
1443 	}
1444 
1445 	return 0;
1446 
1447 free_vport_params:
1448 	kfree(adapter->vport_params_recvd[idx]);
1449 	adapter->vport_params_recvd[idx] = NULL;
1450 	kfree(adapter->vport_params_reqd[idx]);
1451 	adapter->vport_params_reqd[idx] = NULL;
1452 
1453 	return err;
1454 }
1455 
1456 /**
1457  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1458  * @vport: virtual port structure
1459  *
1460  * Return 0 on success, error on failure
1461  */
1462 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1463 {
1464 	struct idpf_adapter *adapter = vport->adapter;
1465 	struct virtchnl2_create_vport *vport_msg;
1466 	u64 rx_desc_ids, tx_desc_ids;
1467 
1468 	vport_msg = adapter->vport_params_recvd[vport->idx];
1469 
1470 	if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1471 	    (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1472 	     vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1473 		pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1474 		return -EOPNOTSUPP;
1475 	}
1476 
1477 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1478 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1479 
1480 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1481 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1482 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1483 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1484 		}
1485 	} else {
1486 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1487 			vport->base_rxd = true;
1488 	}
1489 
1490 	if (!idpf_is_queue_model_split(vport->txq_model))
1491 		return 0;
1492 
1493 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1494 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1495 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 /**
1502  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1503  * @vport: virtual port data structure
1504  *
1505  * Send virtchnl destroy vport message.  Returns 0 on success, negative on
1506  * failure.
1507  */
1508 int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
1509 {
1510 	struct idpf_vc_xn_params xn_params = {};
1511 	struct virtchnl2_vport v_id;
1512 	ssize_t reply_sz;
1513 
1514 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1515 
1516 	xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1517 	xn_params.send_buf.iov_base = &v_id;
1518 	xn_params.send_buf.iov_len = sizeof(v_id);
1519 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1520 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1521 
1522 	return reply_sz < 0 ? reply_sz : 0;
1523 }
1524 
1525 /**
1526  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1527  * @vport: virtual port data structure
1528  *
1529  * Send enable vport virtchnl message.  Returns 0 on success, negative on
1530  * failure.
1531  */
1532 int idpf_send_enable_vport_msg(struct idpf_vport *vport)
1533 {
1534 	struct idpf_vc_xn_params xn_params = {};
1535 	struct virtchnl2_vport v_id;
1536 	ssize_t reply_sz;
1537 
1538 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1539 
1540 	xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1541 	xn_params.send_buf.iov_base = &v_id;
1542 	xn_params.send_buf.iov_len = sizeof(v_id);
1543 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1544 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1545 
1546 	return reply_sz < 0 ? reply_sz : 0;
1547 }
1548 
1549 /**
1550  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1551  * @vport: virtual port data structure
1552  *
1553  * Send disable vport virtchnl message.  Returns 0 on success, negative on
1554  * failure.
1555  */
1556 int idpf_send_disable_vport_msg(struct idpf_vport *vport)
1557 {
1558 	struct idpf_vc_xn_params xn_params = {};
1559 	struct virtchnl2_vport v_id;
1560 	ssize_t reply_sz;
1561 
1562 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1563 
1564 	xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1565 	xn_params.send_buf.iov_base = &v_id;
1566 	xn_params.send_buf.iov_len = sizeof(v_id);
1567 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1568 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1569 
1570 	return reply_sz < 0 ? reply_sz : 0;
1571 }
1572 
1573 /**
1574  * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1575  * @vport: virtual port data structure
1576  *
1577  * Send config tx queues virtchnl message. Returns 0 on success, negative on
1578  * failure.
1579  */
1580 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
1581 {
1582 	struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
1583 	struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1584 	struct idpf_vc_xn_params xn_params = {};
1585 	u32 config_sz, chunk_sz, buf_sz;
1586 	int totqs, num_msgs, num_chunks;
1587 	ssize_t reply_sz;
1588 	int i, k = 0;
1589 
1590 	totqs = vport->num_txq + vport->num_complq;
1591 	qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
1592 	if (!qi)
1593 		return -ENOMEM;
1594 
1595 	/* Populate the queue info buffer with all queue context info */
1596 	for (i = 0; i < vport->num_txq_grp; i++) {
1597 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1598 		int j, sched_mode;
1599 
1600 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1601 			qi[k].queue_id =
1602 				cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1603 			qi[k].model =
1604 				cpu_to_le16(vport->txq_model);
1605 			qi[k].type =
1606 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1607 			qi[k].ring_len =
1608 				cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
1609 			qi[k].dma_ring_addr =
1610 				cpu_to_le64(tx_qgrp->txqs[j]->dma);
1611 			if (idpf_is_queue_model_split(vport->txq_model)) {
1612 				struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1613 
1614 				qi[k].tx_compl_queue_id =
1615 					cpu_to_le16(tx_qgrp->complq->q_id);
1616 				qi[k].relative_queue_id = cpu_to_le16(j);
1617 
1618 				if (idpf_queue_has(FLOW_SCH_EN, q))
1619 					qi[k].sched_mode =
1620 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
1621 				else
1622 					qi[k].sched_mode =
1623 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1624 			} else {
1625 				qi[k].sched_mode =
1626 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1627 			}
1628 		}
1629 
1630 		if (!idpf_is_queue_model_split(vport->txq_model))
1631 			continue;
1632 
1633 		qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1634 		qi[k].model = cpu_to_le16(vport->txq_model);
1635 		qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1636 		qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
1637 		qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
1638 
1639 		if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
1640 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1641 		else
1642 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1643 		qi[k].sched_mode = cpu_to_le16(sched_mode);
1644 
1645 		k++;
1646 	}
1647 
1648 	/* Make sure accounting agrees */
1649 	if (k != totqs)
1650 		return -EINVAL;
1651 
1652 	/* Chunk up the queue contexts into multiple messages to avoid
1653 	 * sending a control queue message buffer that is too large
1654 	 */
1655 	config_sz = sizeof(struct virtchnl2_config_tx_queues);
1656 	chunk_sz = sizeof(struct virtchnl2_txq_info);
1657 
1658 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1659 			   totqs);
1660 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1661 
1662 	buf_sz = struct_size(ctq, qinfo, num_chunks);
1663 	ctq = kzalloc(buf_sz, GFP_KERNEL);
1664 	if (!ctq)
1665 		return -ENOMEM;
1666 
1667 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1668 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1669 
1670 	for (i = 0, k = 0; i < num_msgs; i++) {
1671 		memset(ctq, 0, buf_sz);
1672 		ctq->vport_id = cpu_to_le32(vport->vport_id);
1673 		ctq->num_qinfo = cpu_to_le16(num_chunks);
1674 		memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
1675 
1676 		xn_params.send_buf.iov_base = ctq;
1677 		xn_params.send_buf.iov_len = buf_sz;
1678 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1679 		if (reply_sz < 0)
1680 			return reply_sz;
1681 
1682 		k += num_chunks;
1683 		totqs -= num_chunks;
1684 		num_chunks = min(num_chunks, totqs);
1685 		/* Recalculate buffer size */
1686 		buf_sz = struct_size(ctq, qinfo, num_chunks);
1687 	}
1688 
1689 	return 0;
1690 }
1691 
1692 /**
1693  * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1694  * @vport: virtual port data structure
1695  *
1696  * Send config rx queues virtchnl message.  Returns 0 on success, negative on
1697  * failure.
1698  */
1699 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
1700 {
1701 	struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
1702 	struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
1703 	struct idpf_vc_xn_params xn_params = {};
1704 	u32 config_sz, chunk_sz, buf_sz;
1705 	int totqs, num_msgs, num_chunks;
1706 	ssize_t reply_sz;
1707 	int i, k = 0;
1708 
1709 	totqs = vport->num_rxq + vport->num_bufq;
1710 	qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
1711 	if (!qi)
1712 		return -ENOMEM;
1713 
1714 	/* Populate the queue info buffer with all queue context info */
1715 	for (i = 0; i < vport->num_rxq_grp; i++) {
1716 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1717 		u16 num_rxq;
1718 		int j;
1719 
1720 		if (!idpf_is_queue_model_split(vport->rxq_model))
1721 			goto setup_rxqs;
1722 
1723 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1724 			struct idpf_buf_queue *bufq =
1725 				&rx_qgrp->splitq.bufq_sets[j].bufq;
1726 
1727 			qi[k].queue_id = cpu_to_le32(bufq->q_id);
1728 			qi[k].model = cpu_to_le16(vport->rxq_model);
1729 			qi[k].type =
1730 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1731 			qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1732 			qi[k].ring_len = cpu_to_le16(bufq->desc_count);
1733 			qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
1734 			qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
1735 			qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1736 			qi[k].rx_buffer_low_watermark =
1737 				cpu_to_le16(bufq->rx_buffer_low_watermark);
1738 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1739 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1740 		}
1741 
1742 setup_rxqs:
1743 		if (idpf_is_queue_model_split(vport->rxq_model))
1744 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1745 		else
1746 			num_rxq = rx_qgrp->singleq.num_rxq;
1747 
1748 		for (j = 0; j < num_rxq; j++, k++) {
1749 			const struct idpf_bufq_set *sets;
1750 			struct idpf_rx_queue *rxq;
1751 			u32 rxdids;
1752 
1753 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1754 				rxq = rx_qgrp->singleq.rxqs[j];
1755 				rxdids = rxq->rxdids;
1756 
1757 				goto common_qi_fields;
1758 			}
1759 
1760 			rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1761 			sets = rxq->bufq_sets;
1762 
1763 			/* In splitq mode, RXQ buffer size should be
1764 			 * set to that of the first buffer queue
1765 			 * associated with this RXQ.
1766 			 */
1767 			rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
1768 
1769 			qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1770 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1771 				qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
1772 				qi[k].rx_bufq2_id =
1773 					cpu_to_le16(sets[1].bufq.q_id);
1774 			}
1775 			qi[k].rx_buffer_low_watermark =
1776 				cpu_to_le16(rxq->rx_buffer_low_watermark);
1777 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1778 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1779 
1780 			rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1781 
1782 			if (idpf_queue_has(HSPLIT_EN, rxq)) {
1783 				qi[k].qflags |=
1784 					cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1785 				qi[k].hdr_buffer_size =
1786 					cpu_to_le16(rxq->rx_hbuf_size);
1787 			}
1788 
1789 			rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1790 
1791 common_qi_fields:
1792 			qi[k].queue_id = cpu_to_le32(rxq->q_id);
1793 			qi[k].model = cpu_to_le16(vport->rxq_model);
1794 			qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1795 			qi[k].ring_len = cpu_to_le16(rxq->desc_count);
1796 			qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
1797 			qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
1798 			qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
1799 			qi[k].qflags |=
1800 				cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1801 			qi[k].desc_ids = cpu_to_le64(rxdids);
1802 		}
1803 	}
1804 
1805 	/* Make sure accounting agrees */
1806 	if (k != totqs)
1807 		return -EINVAL;
1808 
1809 	/* Chunk up the queue contexts into multiple messages to avoid
1810 	 * sending a control queue message buffer that is too large
1811 	 */
1812 	config_sz = sizeof(struct virtchnl2_config_rx_queues);
1813 	chunk_sz = sizeof(struct virtchnl2_rxq_info);
1814 
1815 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1816 			   totqs);
1817 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1818 
1819 	buf_sz = struct_size(crq, qinfo, num_chunks);
1820 	crq = kzalloc(buf_sz, GFP_KERNEL);
1821 	if (!crq)
1822 		return -ENOMEM;
1823 
1824 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1825 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1826 
1827 	for (i = 0, k = 0; i < num_msgs; i++) {
1828 		memset(crq, 0, buf_sz);
1829 		crq->vport_id = cpu_to_le32(vport->vport_id);
1830 		crq->num_qinfo = cpu_to_le16(num_chunks);
1831 		memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
1832 
1833 		xn_params.send_buf.iov_base = crq;
1834 		xn_params.send_buf.iov_len = buf_sz;
1835 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1836 		if (reply_sz < 0)
1837 			return reply_sz;
1838 
1839 		k += num_chunks;
1840 		totqs -= num_chunks;
1841 		num_chunks = min(num_chunks, totqs);
1842 		/* Recalculate buffer size */
1843 		buf_sz = struct_size(crq, qinfo, num_chunks);
1844 	}
1845 
1846 	return 0;
1847 }
1848 
1849 /**
1850  * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1851  * queues message
1852  * @vport: virtual port data structure
1853  * @ena: if true enable, false disable
1854  *
1855  * Send enable or disable queues virtchnl message. Returns 0 on success,
1856  * negative on failure.
1857  */
1858 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
1859 {
1860 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
1861 	struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
1862 	u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
1863 	struct idpf_vc_xn_params xn_params = {
1864 		.timeout_ms	= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
1865 	};
1866 	struct virtchnl2_queue_chunks *qcs;
1867 	u32 config_sz, chunk_sz, buf_sz;
1868 	ssize_t reply_sz;
1869 	int i, j, k = 0;
1870 
1871 	num_txq = vport->num_txq + vport->num_complq;
1872 	num_rxq = vport->num_rxq + vport->num_bufq;
1873 	num_q = num_txq + num_rxq;
1874 	buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
1875 	qc = kzalloc(buf_sz, GFP_KERNEL);
1876 	if (!qc)
1877 		return -ENOMEM;
1878 
1879 	for (i = 0; i < vport->num_txq_grp; i++) {
1880 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1881 
1882 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1883 			qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1884 			qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1885 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1886 		}
1887 	}
1888 	if (vport->num_txq != k)
1889 		return -EINVAL;
1890 
1891 	if (!idpf_is_queue_model_split(vport->txq_model))
1892 		goto setup_rx;
1893 
1894 	for (i = 0; i < vport->num_txq_grp; i++, k++) {
1895 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1896 
1897 		qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1898 		qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1899 		qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1900 	}
1901 	if (vport->num_complq != (k - vport->num_txq))
1902 		return -EINVAL;
1903 
1904 setup_rx:
1905 	for (i = 0; i < vport->num_rxq_grp; i++) {
1906 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1907 
1908 		if (idpf_is_queue_model_split(vport->rxq_model))
1909 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1910 		else
1911 			num_rxq = rx_qgrp->singleq.num_rxq;
1912 
1913 		for (j = 0; j < num_rxq; j++, k++) {
1914 			if (idpf_is_queue_model_split(vport->rxq_model)) {
1915 				qc[k].start_queue_id =
1916 				cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
1917 				qc[k].type =
1918 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1919 			} else {
1920 				qc[k].start_queue_id =
1921 				cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
1922 				qc[k].type =
1923 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1924 			}
1925 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1926 		}
1927 	}
1928 	if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
1929 		return -EINVAL;
1930 
1931 	if (!idpf_is_queue_model_split(vport->rxq_model))
1932 		goto send_msg;
1933 
1934 	for (i = 0; i < vport->num_rxq_grp; i++) {
1935 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1936 
1937 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1938 			const struct idpf_buf_queue *q;
1939 
1940 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1941 			qc[k].type =
1942 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1943 			qc[k].start_queue_id = cpu_to_le32(q->q_id);
1944 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1945 		}
1946 	}
1947 	if (vport->num_bufq != k - (vport->num_txq +
1948 				    vport->num_complq +
1949 				    vport->num_rxq))
1950 		return -EINVAL;
1951 
1952 send_msg:
1953 	/* Chunk up the queue info into multiple messages */
1954 	config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
1955 	chunk_sz = sizeof(struct virtchnl2_queue_chunk);
1956 
1957 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1958 			   num_q);
1959 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1960 
1961 	buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1962 	eq = kzalloc(buf_sz, GFP_KERNEL);
1963 	if (!eq)
1964 		return -ENOMEM;
1965 
1966 	if (ena)
1967 		xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
1968 	else
1969 		xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
1970 
1971 	for (i = 0, k = 0; i < num_msgs; i++) {
1972 		memset(eq, 0, buf_sz);
1973 		eq->vport_id = cpu_to_le32(vport->vport_id);
1974 		eq->chunks.num_chunks = cpu_to_le16(num_chunks);
1975 		qcs = &eq->chunks;
1976 		memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
1977 
1978 		xn_params.send_buf.iov_base = eq;
1979 		xn_params.send_buf.iov_len = buf_sz;
1980 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1981 		if (reply_sz < 0)
1982 			return reply_sz;
1983 
1984 		k += num_chunks;
1985 		num_q -= num_chunks;
1986 		num_chunks = min(num_chunks, num_q);
1987 		/* Recalculate buffer size */
1988 		buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1989 	}
1990 
1991 	return 0;
1992 }
1993 
1994 /**
1995  * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1996  * vector message
1997  * @vport: virtual port data structure
1998  * @map: true for map and false for unmap
1999  *
2000  * Send map or unmap queue vector virtchnl message.  Returns 0 on success,
2001  * negative on failure.
2002  */
2003 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
2004 {
2005 	struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
2006 	struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
2007 	struct idpf_vc_xn_params xn_params = {
2008 		.timeout_ms	= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
2009 	};
2010 	u32 config_sz, chunk_sz, buf_sz;
2011 	u32 num_msgs, num_chunks, num_q;
2012 	ssize_t reply_sz;
2013 	int i, j, k = 0;
2014 
2015 	num_q = vport->num_txq + vport->num_rxq;
2016 
2017 	buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
2018 	vqv = kzalloc(buf_sz, GFP_KERNEL);
2019 	if (!vqv)
2020 		return -ENOMEM;
2021 
2022 	for (i = 0; i < vport->num_txq_grp; i++) {
2023 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
2024 
2025 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
2026 			const struct idpf_tx_queue *txq = tx_qgrp->txqs[j];
2027 			const struct idpf_q_vector *vec;
2028 			u32 v_idx, tx_itr_idx;
2029 
2030 			vqv[k].queue_type =
2031 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
2032 			vqv[k].queue_id = cpu_to_le32(txq->q_id);
2033 
2034 			if (idpf_queue_has(NOIRQ, txq))
2035 				vec = NULL;
2036 			else if (idpf_queue_has(XDP, txq))
2037 				vec = txq->complq->q_vector;
2038 			else if (idpf_is_queue_model_split(vport->txq_model))
2039 				vec = txq->txq_grp->complq->q_vector;
2040 			else
2041 				vec = txq->q_vector;
2042 
2043 			if (vec) {
2044 				v_idx = vec->v_idx;
2045 				tx_itr_idx = vec->tx_itr_idx;
2046 			} else {
2047 				v_idx = vport->noirq_v_idx;
2048 				tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
2049 			}
2050 
2051 			vqv[k].vector_id = cpu_to_le16(v_idx);
2052 			vqv[k].itr_idx = cpu_to_le32(tx_itr_idx);
2053 		}
2054 	}
2055 
2056 	for (i = 0; i < vport->num_rxq_grp; i++) {
2057 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
2058 		u16 num_rxq;
2059 
2060 		if (idpf_is_queue_model_split(vport->rxq_model))
2061 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2062 		else
2063 			num_rxq = rx_qgrp->singleq.num_rxq;
2064 
2065 		for (j = 0; j < num_rxq; j++, k++) {
2066 			struct idpf_rx_queue *rxq;
2067 			u32 v_idx, rx_itr_idx;
2068 
2069 			if (idpf_is_queue_model_split(vport->rxq_model))
2070 				rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
2071 			else
2072 				rxq = rx_qgrp->singleq.rxqs[j];
2073 
2074 			vqv[k].queue_type =
2075 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
2076 			vqv[k].queue_id = cpu_to_le32(rxq->q_id);
2077 
2078 			if (idpf_queue_has(NOIRQ, rxq)) {
2079 				v_idx = vport->noirq_v_idx;
2080 				rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
2081 			} else {
2082 				v_idx = rxq->q_vector->v_idx;
2083 				rx_itr_idx = rxq->q_vector->rx_itr_idx;
2084 			}
2085 
2086 			vqv[k].vector_id = cpu_to_le16(v_idx);
2087 			vqv[k].itr_idx = cpu_to_le32(rx_itr_idx);
2088 		}
2089 	}
2090 
2091 	if (k != num_q)
2092 		return -EINVAL;
2093 
2094 	/* Chunk up the vector info into multiple messages */
2095 	config_sz = sizeof(struct virtchnl2_queue_vector_maps);
2096 	chunk_sz = sizeof(struct virtchnl2_queue_vector);
2097 
2098 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
2099 			   num_q);
2100 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
2101 
2102 	buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2103 	vqvm = kzalloc(buf_sz, GFP_KERNEL);
2104 	if (!vqvm)
2105 		return -ENOMEM;
2106 
2107 	if (map)
2108 		xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
2109 	else
2110 		xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
2111 
2112 	for (i = 0, k = 0; i < num_msgs; i++) {
2113 		memset(vqvm, 0, buf_sz);
2114 		xn_params.send_buf.iov_base = vqvm;
2115 		xn_params.send_buf.iov_len = buf_sz;
2116 		vqvm->vport_id = cpu_to_le32(vport->vport_id);
2117 		vqvm->num_qv_maps = cpu_to_le16(num_chunks);
2118 		memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
2119 
2120 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2121 		if (reply_sz < 0)
2122 			return reply_sz;
2123 
2124 		k += num_chunks;
2125 		num_q -= num_chunks;
2126 		num_chunks = min(num_chunks, num_q);
2127 		/* Recalculate buffer size */
2128 		buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2129 	}
2130 
2131 	return 0;
2132 }
2133 
2134 /**
2135  * idpf_send_enable_queues_msg - send enable queues virtchnl message
2136  * @vport: Virtual port private data structure
2137  *
2138  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
2139  * failure.
2140  */
2141 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
2142 {
2143 	return idpf_send_ena_dis_queues_msg(vport, true);
2144 }
2145 
2146 /**
2147  * idpf_send_disable_queues_msg - send disable queues virtchnl message
2148  * @vport: Virtual port private data structure
2149  *
2150  * Will send disable queues virtchnl message.  Returns 0 on success, negative
2151  * on failure.
2152  */
2153 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
2154 {
2155 	int err;
2156 
2157 	err = idpf_send_ena_dis_queues_msg(vport, false);
2158 	if (err)
2159 		return err;
2160 
2161 	return idpf_wait_for_marker_event(vport);
2162 }
2163 
2164 /**
2165  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2166  * structure
2167  * @dchunks: Destination chunks to store data to
2168  * @schunks: Source chunks to copy data from
2169  * @num_chunks: number of chunks to copy
2170  */
2171 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2172 					     struct virtchnl2_queue_reg_chunk *schunks,
2173 					     u16 num_chunks)
2174 {
2175 	u16 i;
2176 
2177 	for (i = 0; i < num_chunks; i++) {
2178 		dchunks[i].type = schunks[i].type;
2179 		dchunks[i].start_queue_id = schunks[i].start_queue_id;
2180 		dchunks[i].num_queues = schunks[i].num_queues;
2181 	}
2182 }
2183 
2184 /**
2185  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2186  * @vport: Virtual port private data structure
2187  *
2188  * Will send delete queues virtchnl message. Return 0 on success, negative on
2189  * failure.
2190  */
2191 int idpf_send_delete_queues_msg(struct idpf_vport *vport)
2192 {
2193 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2194 	struct virtchnl2_create_vport *vport_params;
2195 	struct virtchnl2_queue_reg_chunks *chunks;
2196 	struct idpf_vc_xn_params xn_params = {};
2197 	struct idpf_vport_config *vport_config;
2198 	u16 vport_idx = vport->idx;
2199 	ssize_t reply_sz;
2200 	u16 num_chunks;
2201 	int buf_size;
2202 
2203 	vport_config = vport->adapter->vport_config[vport_idx];
2204 	if (vport_config->req_qs_chunks) {
2205 		chunks = &vport_config->req_qs_chunks->chunks;
2206 	} else {
2207 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
2208 		chunks = &vport_params->chunks;
2209 	}
2210 
2211 	num_chunks = le16_to_cpu(chunks->num_chunks);
2212 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2213 
2214 	eq = kzalloc(buf_size, GFP_KERNEL);
2215 	if (!eq)
2216 		return -ENOMEM;
2217 
2218 	eq->vport_id = cpu_to_le32(vport->vport_id);
2219 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2220 
2221 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
2222 					 num_chunks);
2223 
2224 	xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2225 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2226 	xn_params.send_buf.iov_base = eq;
2227 	xn_params.send_buf.iov_len = buf_size;
2228 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2229 
2230 	return reply_sz < 0 ? reply_sz : 0;
2231 }
2232 
2233 /**
2234  * idpf_send_config_queues_msg - Send config queues virtchnl message
2235  * @vport: Virtual port private data structure
2236  *
2237  * Will send config queues virtchnl message. Returns 0 on success, negative on
2238  * failure.
2239  */
2240 int idpf_send_config_queues_msg(struct idpf_vport *vport)
2241 {
2242 	int err;
2243 
2244 	err = idpf_send_config_tx_queues_msg(vport);
2245 	if (err)
2246 		return err;
2247 
2248 	return idpf_send_config_rx_queues_msg(vport);
2249 }
2250 
2251 /**
2252  * idpf_send_add_queues_msg - Send virtchnl add queues message
2253  * @vport: Virtual port private data structure
2254  * @num_tx_q: number of transmit queues
2255  * @num_complq: number of transmit completion queues
2256  * @num_rx_q: number of receive queues
2257  * @num_rx_bufq: number of receive buffer queues
2258  *
2259  * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2260  * we should not change any fields within vport itself in this function.
2261  */
2262 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
2263 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
2264 {
2265 	struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2266 	struct idpf_vc_xn_params xn_params = {};
2267 	struct idpf_vport_config *vport_config;
2268 	struct virtchnl2_add_queues aq = {};
2269 	u16 vport_idx = vport->idx;
2270 	ssize_t reply_sz;
2271 	int size;
2272 
2273 	vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2274 	if (!vc_msg)
2275 		return -ENOMEM;
2276 
2277 	vport_config = vport->adapter->vport_config[vport_idx];
2278 	kfree(vport_config->req_qs_chunks);
2279 	vport_config->req_qs_chunks = NULL;
2280 
2281 	aq.vport_id = cpu_to_le32(vport->vport_id);
2282 	aq.num_tx_q = cpu_to_le16(num_tx_q);
2283 	aq.num_tx_complq = cpu_to_le16(num_complq);
2284 	aq.num_rx_q = cpu_to_le16(num_rx_q);
2285 	aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
2286 
2287 	xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2288 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2289 	xn_params.send_buf.iov_base = &aq;
2290 	xn_params.send_buf.iov_len = sizeof(aq);
2291 	xn_params.recv_buf.iov_base = vc_msg;
2292 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2293 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2294 	if (reply_sz < 0)
2295 		return reply_sz;
2296 
2297 	/* compare vc_msg num queues with vport num queues */
2298 	if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
2299 	    le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
2300 	    le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
2301 	    le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
2302 		return -EINVAL;
2303 
2304 	size = struct_size(vc_msg, chunks.chunks,
2305 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2306 	if (reply_sz < size)
2307 		return -EIO;
2308 
2309 	vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
2310 	if (!vport_config->req_qs_chunks)
2311 		return -ENOMEM;
2312 
2313 	return 0;
2314 }
2315 
2316 /**
2317  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2318  * @adapter: Driver specific private structure
2319  * @num_vectors: number of vectors to be allocated
2320  *
2321  * Returns 0 on success, negative on failure.
2322  */
2323 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2324 {
2325 	struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2326 	struct idpf_vc_xn_params xn_params = {};
2327 	struct virtchnl2_alloc_vectors ac = {};
2328 	ssize_t reply_sz;
2329 	u16 num_vchunks;
2330 	int size;
2331 
2332 	ac.num_vectors = cpu_to_le16(num_vectors);
2333 
2334 	rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2335 	if (!rcvd_vec)
2336 		return -ENOMEM;
2337 
2338 	xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2339 	xn_params.send_buf.iov_base = &ac;
2340 	xn_params.send_buf.iov_len = sizeof(ac);
2341 	xn_params.recv_buf.iov_base = rcvd_vec;
2342 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2343 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2344 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2345 	if (reply_sz < 0)
2346 		return reply_sz;
2347 
2348 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2349 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2350 	if (reply_sz < size)
2351 		return -EIO;
2352 
2353 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
2354 		return -EINVAL;
2355 
2356 	kfree(adapter->req_vec_chunks);
2357 	adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2358 	if (!adapter->req_vec_chunks)
2359 		return -ENOMEM;
2360 
2361 	if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2362 		kfree(adapter->req_vec_chunks);
2363 		adapter->req_vec_chunks = NULL;
2364 		return -EINVAL;
2365 	}
2366 
2367 	return 0;
2368 }
2369 
2370 /**
2371  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2372  * @adapter: Driver specific private structure
2373  *
2374  * Returns 0 on success, negative on failure.
2375  */
2376 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2377 {
2378 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2379 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2380 	struct idpf_vc_xn_params xn_params = {};
2381 	ssize_t reply_sz;
2382 	int buf_size;
2383 
2384 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2385 
2386 	xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2387 	xn_params.send_buf.iov_base = vcs;
2388 	xn_params.send_buf.iov_len = buf_size;
2389 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2390 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2391 	if (reply_sz < 0)
2392 		return reply_sz;
2393 
2394 	kfree(adapter->req_vec_chunks);
2395 	adapter->req_vec_chunks = NULL;
2396 
2397 	return 0;
2398 }
2399 
2400 /**
2401  * idpf_get_max_vfs - Get max number of vfs supported
2402  * @adapter: Driver specific private structure
2403  *
2404  * Returns max number of VFs
2405  */
2406 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2407 {
2408 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2409 }
2410 
2411 /**
2412  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2413  * @adapter: Driver specific private structure
2414  * @num_vfs: number of virtual functions to be created
2415  *
2416  * Returns 0 on success, negative on failure.
2417  */
2418 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2419 {
2420 	struct virtchnl2_sriov_vfs_info svi = {};
2421 	struct idpf_vc_xn_params xn_params = {};
2422 	ssize_t reply_sz;
2423 
2424 	svi.num_vfs = cpu_to_le16(num_vfs);
2425 	xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2426 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2427 	xn_params.send_buf.iov_base = &svi;
2428 	xn_params.send_buf.iov_len = sizeof(svi);
2429 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2430 
2431 	return reply_sz < 0 ? reply_sz : 0;
2432 }
2433 
2434 /**
2435  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2436  * @vport: vport to get stats for
2437  *
2438  * Returns 0 on success, negative on failure.
2439  */
2440 int idpf_send_get_stats_msg(struct idpf_vport *vport)
2441 {
2442 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2443 	struct rtnl_link_stats64 *netstats = &np->netstats;
2444 	struct virtchnl2_vport_stats stats_msg = {};
2445 	struct idpf_vc_xn_params xn_params = {};
2446 	ssize_t reply_sz;
2447 
2448 
2449 	/* Don't send get_stats message if the link is down */
2450 	if (np->state <= __IDPF_VPORT_DOWN)
2451 		return 0;
2452 
2453 	stats_msg.vport_id = cpu_to_le32(vport->vport_id);
2454 
2455 	xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2456 	xn_params.send_buf.iov_base = &stats_msg;
2457 	xn_params.send_buf.iov_len = sizeof(stats_msg);
2458 	xn_params.recv_buf = xn_params.send_buf;
2459 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2460 
2461 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2462 	if (reply_sz < 0)
2463 		return reply_sz;
2464 	if (reply_sz < sizeof(stats_msg))
2465 		return -EIO;
2466 
2467 	spin_lock_bh(&np->stats_lock);
2468 
2469 	netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2470 			       le64_to_cpu(stats_msg.rx_multicast) +
2471 			       le64_to_cpu(stats_msg.rx_broadcast);
2472 	netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2473 			       le64_to_cpu(stats_msg.tx_multicast) +
2474 			       le64_to_cpu(stats_msg.tx_broadcast);
2475 	netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2476 	netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2477 	netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2478 	netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2479 	netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2480 	netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2481 
2482 	vport->port_stats.vport_stats = stats_msg;
2483 
2484 	spin_unlock_bh(&np->stats_lock);
2485 
2486 	return 0;
2487 }
2488 
2489 /**
2490  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2491  * @vport: virtual port data structure
2492  * @get: flag to set or get rss look up table
2493  *
2494  * Returns 0 on success, negative on failure.
2495  */
2496 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
2497 {
2498 	struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2499 	struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2500 	struct idpf_vc_xn_params xn_params = {};
2501 	struct idpf_rss_data *rss_data;
2502 	int buf_size, lut_buf_size;
2503 	ssize_t reply_sz;
2504 	int i;
2505 
2506 	rss_data =
2507 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2508 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2509 	rl = kzalloc(buf_size, GFP_KERNEL);
2510 	if (!rl)
2511 		return -ENOMEM;
2512 
2513 	rl->vport_id = cpu_to_le32(vport->vport_id);
2514 
2515 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2516 	xn_params.send_buf.iov_base = rl;
2517 	xn_params.send_buf.iov_len = buf_size;
2518 
2519 	if (get) {
2520 		recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2521 		if (!recv_rl)
2522 			return -ENOMEM;
2523 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2524 		xn_params.recv_buf.iov_base = recv_rl;
2525 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2526 	} else {
2527 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2528 		for (i = 0; i < rss_data->rss_lut_size; i++)
2529 			rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
2530 
2531 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2532 	}
2533 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2534 	if (reply_sz < 0)
2535 		return reply_sz;
2536 	if (!get)
2537 		return 0;
2538 	if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2539 		return -EIO;
2540 
2541 	lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2542 	if (reply_sz < lut_buf_size)
2543 		return -EIO;
2544 
2545 	/* size didn't change, we can reuse existing lut buf */
2546 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2547 		goto do_memcpy;
2548 
2549 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2550 	kfree(rss_data->rss_lut);
2551 
2552 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2553 	if (!rss_data->rss_lut) {
2554 		rss_data->rss_lut_size = 0;
2555 		return -ENOMEM;
2556 	}
2557 
2558 do_memcpy:
2559 	memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2560 
2561 	return 0;
2562 }
2563 
2564 /**
2565  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2566  * @vport: virtual port data structure
2567  * @get: flag to set or get rss look up table
2568  *
2569  * Returns 0 on success, negative on failure
2570  */
2571 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
2572 {
2573 	struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2574 	struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2575 	struct idpf_vc_xn_params xn_params = {};
2576 	struct idpf_rss_data *rss_data;
2577 	ssize_t reply_sz;
2578 	int i, buf_size;
2579 	u16 key_size;
2580 
2581 	rss_data =
2582 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2583 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2584 	rk = kzalloc(buf_size, GFP_KERNEL);
2585 	if (!rk)
2586 		return -ENOMEM;
2587 
2588 	rk->vport_id = cpu_to_le32(vport->vport_id);
2589 	xn_params.send_buf.iov_base = rk;
2590 	xn_params.send_buf.iov_len = buf_size;
2591 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2592 	if (get) {
2593 		recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2594 		if (!recv_rk)
2595 			return -ENOMEM;
2596 
2597 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2598 		xn_params.recv_buf.iov_base = recv_rk;
2599 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2600 	} else {
2601 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2602 		for (i = 0; i < rss_data->rss_key_size; i++)
2603 			rk->key_flex[i] = rss_data->rss_key[i];
2604 
2605 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2606 	}
2607 
2608 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2609 	if (reply_sz < 0)
2610 		return reply_sz;
2611 	if (!get)
2612 		return 0;
2613 	if (reply_sz < sizeof(struct virtchnl2_rss_key))
2614 		return -EIO;
2615 
2616 	key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2617 			 le16_to_cpu(recv_rk->key_len));
2618 	if (reply_sz < key_size)
2619 		return -EIO;
2620 
2621 	/* key len didn't change, reuse existing buf */
2622 	if (rss_data->rss_key_size == key_size)
2623 		goto do_memcpy;
2624 
2625 	rss_data->rss_key_size = key_size;
2626 	kfree(rss_data->rss_key);
2627 	rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
2628 	if (!rss_data->rss_key) {
2629 		rss_data->rss_key_size = 0;
2630 		return -ENOMEM;
2631 	}
2632 
2633 do_memcpy:
2634 	memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
2635 
2636 	return 0;
2637 }
2638 
2639 /**
2640  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2641  * @ptype: ptype lookup table
2642  * @pstate: state machine for ptype lookup table
2643  * @ipv4: ipv4 or ipv6
2644  * @frag: fragmentation allowed
2645  *
2646  */
2647 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
2648 				   struct idpf_ptype_state *pstate,
2649 				   bool ipv4, bool frag)
2650 {
2651 	if (!pstate->outer_ip || !pstate->outer_frag) {
2652 		pstate->outer_ip = true;
2653 
2654 		if (ipv4)
2655 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
2656 		else
2657 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
2658 
2659 		if (frag) {
2660 			ptype->outer_frag = LIBETH_RX_PT_FRAG;
2661 			pstate->outer_frag = true;
2662 		}
2663 	} else {
2664 		ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
2665 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
2666 
2667 		if (ipv4)
2668 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
2669 		else
2670 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
2671 
2672 		if (frag)
2673 			ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
2674 	}
2675 }
2676 
2677 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
2678 {
2679 	if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2680 	    ptype->inner_prot)
2681 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
2682 	else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2683 		 ptype->outer_ip)
2684 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
2685 	else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
2686 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
2687 	else
2688 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
2689 
2690 	libeth_rx_pt_gen_hash_type(ptype);
2691 }
2692 
2693 /**
2694  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2695  * @vport: virtual port data structure
2696  *
2697  * Returns 0 on success, negative on failure.
2698  */
2699 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
2700 {
2701 	struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
2702 	struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
2703 	struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
2704 	int max_ptype, ptypes_recvd = 0, ptype_offset;
2705 	struct idpf_adapter *adapter = vport->adapter;
2706 	struct idpf_vc_xn_params xn_params = {};
2707 	u16 next_ptype_id = 0;
2708 	ssize_t reply_sz;
2709 	int i, j, k;
2710 
2711 	if (vport->rx_ptype_lkup)
2712 		return 0;
2713 
2714 	if (idpf_is_queue_model_split(vport->rxq_model))
2715 		max_ptype = IDPF_RX_MAX_PTYPE;
2716 	else
2717 		max_ptype = IDPF_RX_MAX_BASE_PTYPE;
2718 
2719 	ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
2720 	if (!ptype_lkup)
2721 		return -ENOMEM;
2722 
2723 	get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
2724 	if (!get_ptype_info)
2725 		return -ENOMEM;
2726 
2727 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2728 	if (!ptype_info)
2729 		return -ENOMEM;
2730 
2731 	xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
2732 	xn_params.send_buf.iov_base = get_ptype_info;
2733 	xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
2734 	xn_params.recv_buf.iov_base = ptype_info;
2735 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2736 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2737 
2738 	while (next_ptype_id < max_ptype) {
2739 		get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
2740 
2741 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
2742 			get_ptype_info->num_ptypes =
2743 				cpu_to_le16(max_ptype - next_ptype_id);
2744 		else
2745 			get_ptype_info->num_ptypes =
2746 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
2747 
2748 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2749 		if (reply_sz < 0)
2750 			return reply_sz;
2751 
2752 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
2753 		if (ptypes_recvd > max_ptype)
2754 			return -EINVAL;
2755 
2756 		next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
2757 				le16_to_cpu(get_ptype_info->num_ptypes);
2758 
2759 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
2760 
2761 		for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
2762 			struct idpf_ptype_state pstate = { };
2763 			struct virtchnl2_ptype *ptype;
2764 			u16 id;
2765 
2766 			ptype = (struct virtchnl2_ptype *)
2767 					((u8 *)ptype_info + ptype_offset);
2768 
2769 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
2770 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
2771 				return -EINVAL;
2772 
2773 			/* 0xFFFF indicates end of ptypes */
2774 			if (le16_to_cpu(ptype->ptype_id_10) ==
2775 							IDPF_INVALID_PTYPE_ID)
2776 				goto out;
2777 
2778 			if (idpf_is_queue_model_split(vport->rxq_model))
2779 				k = le16_to_cpu(ptype->ptype_id_10);
2780 			else
2781 				k = ptype->ptype_id_8;
2782 
2783 			for (j = 0; j < ptype->proto_id_count; j++) {
2784 				id = le16_to_cpu(ptype->proto_id[j]);
2785 				switch (id) {
2786 				case VIRTCHNL2_PROTO_HDR_GRE:
2787 					if (pstate.tunnel_state ==
2788 							IDPF_PTYPE_TUNNEL_IP) {
2789 						ptype_lkup[k].tunnel_type =
2790 						LIBETH_RX_PT_TUNNEL_IP_GRENAT;
2791 						pstate.tunnel_state |=
2792 						IDPF_PTYPE_TUNNEL_IP_GRENAT;
2793 					}
2794 					break;
2795 				case VIRTCHNL2_PROTO_HDR_MAC:
2796 					ptype_lkup[k].outer_ip =
2797 						LIBETH_RX_PT_OUTER_L2;
2798 					if (pstate.tunnel_state ==
2799 							IDPF_TUN_IP_GRE) {
2800 						ptype_lkup[k].tunnel_type =
2801 						LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
2802 						pstate.tunnel_state |=
2803 						IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
2804 					}
2805 					break;
2806 				case VIRTCHNL2_PROTO_HDR_IPV4:
2807 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2808 							       &pstate, true,
2809 							       false);
2810 					break;
2811 				case VIRTCHNL2_PROTO_HDR_IPV6:
2812 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2813 							       &pstate, false,
2814 							       false);
2815 					break;
2816 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
2817 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2818 							       &pstate, true,
2819 							       true);
2820 					break;
2821 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
2822 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2823 							       &pstate, false,
2824 							       true);
2825 					break;
2826 				case VIRTCHNL2_PROTO_HDR_UDP:
2827 					ptype_lkup[k].inner_prot =
2828 					LIBETH_RX_PT_INNER_UDP;
2829 					break;
2830 				case VIRTCHNL2_PROTO_HDR_TCP:
2831 					ptype_lkup[k].inner_prot =
2832 					LIBETH_RX_PT_INNER_TCP;
2833 					break;
2834 				case VIRTCHNL2_PROTO_HDR_SCTP:
2835 					ptype_lkup[k].inner_prot =
2836 					LIBETH_RX_PT_INNER_SCTP;
2837 					break;
2838 				case VIRTCHNL2_PROTO_HDR_ICMP:
2839 					ptype_lkup[k].inner_prot =
2840 					LIBETH_RX_PT_INNER_ICMP;
2841 					break;
2842 				case VIRTCHNL2_PROTO_HDR_PAY:
2843 					ptype_lkup[k].payload_layer =
2844 						LIBETH_RX_PT_PAYLOAD_L2;
2845 					break;
2846 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
2847 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
2848 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
2849 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
2850 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
2851 				case VIRTCHNL2_PROTO_HDR_SVLAN:
2852 				case VIRTCHNL2_PROTO_HDR_CVLAN:
2853 				case VIRTCHNL2_PROTO_HDR_MPLS:
2854 				case VIRTCHNL2_PROTO_HDR_MMPLS:
2855 				case VIRTCHNL2_PROTO_HDR_PTP:
2856 				case VIRTCHNL2_PROTO_HDR_CTRL:
2857 				case VIRTCHNL2_PROTO_HDR_LLDP:
2858 				case VIRTCHNL2_PROTO_HDR_ARP:
2859 				case VIRTCHNL2_PROTO_HDR_ECP:
2860 				case VIRTCHNL2_PROTO_HDR_EAPOL:
2861 				case VIRTCHNL2_PROTO_HDR_PPPOD:
2862 				case VIRTCHNL2_PROTO_HDR_PPPOE:
2863 				case VIRTCHNL2_PROTO_HDR_IGMP:
2864 				case VIRTCHNL2_PROTO_HDR_AH:
2865 				case VIRTCHNL2_PROTO_HDR_ESP:
2866 				case VIRTCHNL2_PROTO_HDR_IKE:
2867 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
2868 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
2869 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
2870 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
2871 				case VIRTCHNL2_PROTO_HDR_GTP:
2872 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
2873 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
2874 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
2875 				case VIRTCHNL2_PROTO_HDR_GTPU:
2876 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
2877 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
2878 				case VIRTCHNL2_PROTO_HDR_ECPRI:
2879 				case VIRTCHNL2_PROTO_HDR_VRRP:
2880 				case VIRTCHNL2_PROTO_HDR_OSPF:
2881 				case VIRTCHNL2_PROTO_HDR_TUN:
2882 				case VIRTCHNL2_PROTO_HDR_NVGRE:
2883 				case VIRTCHNL2_PROTO_HDR_VXLAN:
2884 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
2885 				case VIRTCHNL2_PROTO_HDR_GENEVE:
2886 				case VIRTCHNL2_PROTO_HDR_NSH:
2887 				case VIRTCHNL2_PROTO_HDR_QUIC:
2888 				case VIRTCHNL2_PROTO_HDR_PFCP:
2889 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
2890 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
2891 				case VIRTCHNL2_PROTO_HDR_RTP:
2892 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
2893 					break;
2894 				default:
2895 					break;
2896 				}
2897 			}
2898 
2899 			idpf_finalize_ptype_lookup(&ptype_lkup[k]);
2900 		}
2901 	}
2902 
2903 out:
2904 	vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
2905 
2906 	return 0;
2907 }
2908 
2909 /**
2910  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2911  *				    message
2912  * @vport: virtual port data structure
2913  *
2914  * Returns 0 on success, negative on failure.
2915  */
2916 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
2917 {
2918 	struct idpf_vc_xn_params xn_params = {};
2919 	struct virtchnl2_loopback loopback;
2920 	ssize_t reply_sz;
2921 
2922 	loopback.vport_id = cpu_to_le32(vport->vport_id);
2923 	loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2924 
2925 	xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
2926 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2927 	xn_params.send_buf.iov_base = &loopback;
2928 	xn_params.send_buf.iov_len = sizeof(loopback);
2929 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2930 
2931 	return reply_sz < 0 ? reply_sz : 0;
2932 }
2933 
2934 /**
2935  * idpf_find_ctlq - Given a type and id, find ctlq info
2936  * @hw: hardware struct
2937  * @type: type of ctrlq to find
2938  * @id: ctlq id to find
2939  *
2940  * Returns pointer to found ctlq info struct, NULL otherwise.
2941  */
2942 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
2943 					     enum idpf_ctlq_type type, int id)
2944 {
2945 	struct idpf_ctlq_info *cq, *tmp;
2946 
2947 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
2948 		if (cq->q_id == id && cq->cq_type == type)
2949 			return cq;
2950 
2951 	return NULL;
2952 }
2953 
2954 /**
2955  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2956  * @adapter: adapter info struct
2957  *
2958  * Returns 0 on success, negative otherwise
2959  */
2960 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
2961 {
2962 	struct idpf_ctlq_create_info ctlq_info[] = {
2963 		{
2964 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
2965 			.id = IDPF_DFLT_MBX_ID,
2966 			.len = IDPF_DFLT_MBX_Q_LEN,
2967 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2968 		},
2969 		{
2970 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
2971 			.id = IDPF_DFLT_MBX_ID,
2972 			.len = IDPF_DFLT_MBX_Q_LEN,
2973 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2974 		}
2975 	};
2976 	struct idpf_hw *hw = &adapter->hw;
2977 	int err;
2978 
2979 	adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
2980 
2981 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
2982 	if (err)
2983 		return err;
2984 
2985 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
2986 				 IDPF_DFLT_MBX_ID);
2987 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
2988 				 IDPF_DFLT_MBX_ID);
2989 
2990 	if (!hw->asq || !hw->arq) {
2991 		idpf_ctlq_deinit(hw);
2992 
2993 		return -ENOENT;
2994 	}
2995 
2996 	adapter->state = __IDPF_VER_CHECK;
2997 
2998 	return 0;
2999 }
3000 
3001 /**
3002  * idpf_deinit_dflt_mbx - Free up ctlqs setup
3003  * @adapter: Driver specific private data structure
3004  */
3005 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
3006 {
3007 	if (adapter->hw.arq && adapter->hw.asq) {
3008 		idpf_mb_clean(adapter);
3009 		idpf_ctlq_deinit(&adapter->hw);
3010 	}
3011 	adapter->hw.arq = NULL;
3012 	adapter->hw.asq = NULL;
3013 }
3014 
3015 /**
3016  * idpf_vport_params_buf_rel - Release memory for MailBox resources
3017  * @adapter: Driver specific private data structure
3018  *
3019  * Will release memory to hold the vport parameters received on MailBox
3020  */
3021 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
3022 {
3023 	kfree(adapter->vport_params_recvd);
3024 	adapter->vport_params_recvd = NULL;
3025 	kfree(adapter->vport_params_reqd);
3026 	adapter->vport_params_reqd = NULL;
3027 	kfree(adapter->vport_ids);
3028 	adapter->vport_ids = NULL;
3029 }
3030 
3031 /**
3032  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
3033  * @adapter: Driver specific private data structure
3034  *
3035  * Will alloc memory to hold the vport parameters received on MailBox
3036  */
3037 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
3038 {
3039 	u16 num_max_vports = idpf_get_max_vports(adapter);
3040 
3041 	adapter->vport_params_reqd = kcalloc(num_max_vports,
3042 					     sizeof(*adapter->vport_params_reqd),
3043 					     GFP_KERNEL);
3044 	if (!adapter->vport_params_reqd)
3045 		return -ENOMEM;
3046 
3047 	adapter->vport_params_recvd = kcalloc(num_max_vports,
3048 					      sizeof(*adapter->vport_params_recvd),
3049 					      GFP_KERNEL);
3050 	if (!adapter->vport_params_recvd)
3051 		goto err_mem;
3052 
3053 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
3054 	if (!adapter->vport_ids)
3055 		goto err_mem;
3056 
3057 	if (adapter->vport_config)
3058 		return 0;
3059 
3060 	adapter->vport_config = kcalloc(num_max_vports,
3061 					sizeof(*adapter->vport_config),
3062 					GFP_KERNEL);
3063 	if (!adapter->vport_config)
3064 		goto err_mem;
3065 
3066 	return 0;
3067 
3068 err_mem:
3069 	idpf_vport_params_buf_rel(adapter);
3070 
3071 	return -ENOMEM;
3072 }
3073 
3074 /**
3075  * idpf_vc_core_init - Initialize state machine and get driver specific
3076  * resources
3077  * @adapter: Driver specific private structure
3078  *
3079  * This function will initialize the state machine and request all necessary
3080  * resources required by the device driver. Once the state machine is
3081  * initialized, allocate memory to store vport specific information and also
3082  * requests required interrupts.
3083  *
3084  * Returns 0 on success, -EAGAIN function will get called again,
3085  * otherwise negative on failure.
3086  */
3087 int idpf_vc_core_init(struct idpf_adapter *adapter)
3088 {
3089 	int task_delay = 30;
3090 	u16 num_max_vports;
3091 	int err = 0;
3092 
3093 	if (!adapter->vcxn_mngr) {
3094 		adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
3095 		if (!adapter->vcxn_mngr) {
3096 			err = -ENOMEM;
3097 			goto init_failed;
3098 		}
3099 	}
3100 	idpf_vc_xn_init(adapter->vcxn_mngr);
3101 
3102 	while (adapter->state != __IDPF_INIT_SW) {
3103 		switch (adapter->state) {
3104 		case __IDPF_VER_CHECK:
3105 			err = idpf_send_ver_msg(adapter);
3106 			switch (err) {
3107 			case 0:
3108 				/* success, move state machine forward */
3109 				adapter->state = __IDPF_GET_CAPS;
3110 				fallthrough;
3111 			case -EAGAIN:
3112 				goto restart;
3113 			default:
3114 				/* Something bad happened, try again but only a
3115 				 * few times.
3116 				 */
3117 				goto init_failed;
3118 			}
3119 		case __IDPF_GET_CAPS:
3120 			err = idpf_send_get_caps_msg(adapter);
3121 			if (err)
3122 				goto init_failed;
3123 			adapter->state = __IDPF_INIT_SW;
3124 			break;
3125 		default:
3126 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
3127 				adapter->state);
3128 			err = -EINVAL;
3129 			goto init_failed;
3130 		}
3131 		break;
3132 restart:
3133 		/* Give enough time before proceeding further with
3134 		 * state machine
3135 		 */
3136 		msleep(task_delay);
3137 	}
3138 
3139 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
3140 		err = idpf_send_get_lan_memory_regions(adapter);
3141 		if (err) {
3142 			dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
3143 				err);
3144 			return -EINVAL;
3145 		}
3146 	} else {
3147 		/* Fallback to mapping the remaining regions of the entire BAR */
3148 		err = idpf_calc_remaining_mmio_regs(adapter);
3149 		if (err) {
3150 			dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
3151 				err);
3152 			return -ENOMEM;
3153 		}
3154 	}
3155 
3156 	err = idpf_map_lan_mmio_regs(adapter);
3157 	if (err) {
3158 		dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
3159 			err);
3160 		return -ENOMEM;
3161 	}
3162 
3163 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
3164 	num_max_vports = idpf_get_max_vports(adapter);
3165 	adapter->max_vports = num_max_vports;
3166 	adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
3167 				  GFP_KERNEL);
3168 	if (!adapter->vports)
3169 		return -ENOMEM;
3170 
3171 	if (!adapter->netdevs) {
3172 		adapter->netdevs = kcalloc(num_max_vports,
3173 					   sizeof(struct net_device *),
3174 					   GFP_KERNEL);
3175 		if (!adapter->netdevs) {
3176 			err = -ENOMEM;
3177 			goto err_netdev_alloc;
3178 		}
3179 	}
3180 
3181 	err = idpf_vport_params_buf_alloc(adapter);
3182 	if (err) {
3183 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3184 			err);
3185 		goto err_netdev_alloc;
3186 	}
3187 
3188 	/* Start the mailbox task before requesting vectors. This will ensure
3189 	 * vector information response from mailbox is handled
3190 	 */
3191 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3192 
3193 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3194 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3195 
3196 	err = idpf_intr_req(adapter);
3197 	if (err) {
3198 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3199 			err);
3200 		goto err_intr_req;
3201 	}
3202 
3203 	err = idpf_ptp_init(adapter);
3204 	if (err)
3205 		pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
3206 			ERR_PTR(err));
3207 
3208 	idpf_init_avail_queues(adapter);
3209 
3210 	/* Skew the delay for init tasks for each function based on fn number
3211 	 * to prevent every function from making the same call simultaneously.
3212 	 */
3213 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3214 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3215 
3216 	set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3217 
3218 	return 0;
3219 
3220 err_intr_req:
3221 	cancel_delayed_work_sync(&adapter->serv_task);
3222 	cancel_delayed_work_sync(&adapter->mbx_task);
3223 	idpf_vport_params_buf_rel(adapter);
3224 err_netdev_alloc:
3225 	kfree(adapter->vports);
3226 	adapter->vports = NULL;
3227 	return err;
3228 
3229 init_failed:
3230 	/* Don't retry if we're trying to go down, just bail. */
3231 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3232 		return err;
3233 
3234 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3235 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3236 
3237 		return -EFAULT;
3238 	}
3239 	/* If it reached here, it is possible that mailbox queue initialization
3240 	 * register writes might not have taken effect. Retry to initialize
3241 	 * the mailbox again
3242 	 */
3243 	adapter->state = __IDPF_VER_CHECK;
3244 	if (adapter->vcxn_mngr)
3245 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3246 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3247 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3248 			   msecs_to_jiffies(task_delay));
3249 
3250 	return -EAGAIN;
3251 }
3252 
3253 /**
3254  * idpf_vc_core_deinit - Device deinit routine
3255  * @adapter: Driver specific private structure
3256  *
3257  */
3258 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3259 {
3260 	bool remove_in_prog;
3261 
3262 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3263 		return;
3264 
3265 	/* Avoid transaction timeouts when called during reset */
3266 	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
3267 	if (!remove_in_prog)
3268 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3269 
3270 	idpf_ptp_release(adapter);
3271 	idpf_deinit_task(adapter);
3272 	idpf_idc_deinit_core_aux_device(adapter->cdev_info);
3273 	idpf_intr_rel(adapter);
3274 
3275 	if (remove_in_prog)
3276 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3277 
3278 	cancel_delayed_work_sync(&adapter->serv_task);
3279 	cancel_delayed_work_sync(&adapter->mbx_task);
3280 
3281 	idpf_vport_params_buf_rel(adapter);
3282 
3283 	kfree(adapter->vports);
3284 	adapter->vports = NULL;
3285 
3286 	clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3287 }
3288 
3289 /**
3290  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3291  * @vport: virtual port data struct
3292  *
3293  * This function requests the vector information required for the vport and
3294  * stores the vector indexes received from the 'global vector distribution'
3295  * in the vport's queue vectors array.
3296  *
3297  * Return 0 on success, error on failure
3298  */
3299 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
3300 {
3301 	struct idpf_vector_info vec_info;
3302 	int num_alloc_vecs;
3303 	u32 req;
3304 
3305 	vec_info.num_curr_vecs = vport->num_q_vectors;
3306 	if (vec_info.num_curr_vecs)
3307 		vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
3308 
3309 	/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
3310 	req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
3311 	      IDPF_RESERVED_VECS;
3312 	vec_info.num_req_vecs = req;
3313 
3314 	vec_info.default_vport = vport->default_vport;
3315 	vec_info.index = vport->idx;
3316 
3317 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3318 						     vport->q_vector_idxs,
3319 						     &vec_info);
3320 	if (num_alloc_vecs <= 0) {
3321 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3322 			num_alloc_vecs);
3323 		return -EINVAL;
3324 	}
3325 
3326 	vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
3327 
3328 	return 0;
3329 }
3330 
3331 /**
3332  * idpf_vport_init - Initialize virtual port
3333  * @vport: virtual port to be initialized
3334  * @max_q: vport max queue info
3335  *
3336  * Will initialize vport with the info received through MB earlier
3337  */
3338 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3339 {
3340 	struct idpf_adapter *adapter = vport->adapter;
3341 	struct virtchnl2_create_vport *vport_msg;
3342 	struct idpf_vport_config *vport_config;
3343 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3344 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3345 	struct idpf_rss_data *rss_data;
3346 	u16 idx = vport->idx;
3347 	int err;
3348 
3349 	vport_config = adapter->vport_config[idx];
3350 	rss_data = &vport_config->user_config.rss_data;
3351 	vport_msg = adapter->vport_params_recvd[idx];
3352 
3353 	vport_config->max_q.max_txq = max_q->max_txq;
3354 	vport_config->max_q.max_rxq = max_q->max_rxq;
3355 	vport_config->max_q.max_complq = max_q->max_complq;
3356 	vport_config->max_q.max_bufq = max_q->max_bufq;
3357 
3358 	vport->txq_model = le16_to_cpu(vport_msg->txq_model);
3359 	vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3360 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3361 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3362 
3363 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3364 				       le16_to_cpu(vport_msg->rss_key_size));
3365 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3366 
3367 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3368 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3369 
3370 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3371 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3372 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3373 
3374 	idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3375 
3376 	idpf_vport_init_num_qs(vport, vport_msg);
3377 	idpf_vport_calc_num_q_desc(vport);
3378 	idpf_vport_calc_num_q_groups(vport);
3379 	idpf_vport_alloc_vec_indexes(vport);
3380 
3381 	vport->crc_enable = adapter->crc_enable;
3382 
3383 	if (!(vport_msg->vport_flags &
3384 	      cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
3385 		return;
3386 
3387 	err = idpf_ptp_get_vport_tstamps_caps(vport);
3388 	if (err) {
3389 		pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
3390 		return;
3391 	}
3392 
3393 	INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
3394 }
3395 
3396 /**
3397  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3398  * @adapter: adapter structure to get the mailbox vector id
3399  * @vecids: Array of vector ids
3400  * @num_vecids: number of vector ids
3401  * @chunks: vector ids received over mailbox
3402  *
3403  * Will initialize the mailbox vector id which is received from the
3404  * get capabilities and data queue vector ids with ids received as
3405  * mailbox parameters.
3406  * Returns number of ids filled
3407  */
3408 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3409 		     u16 *vecids, int num_vecids,
3410 		     struct virtchnl2_vector_chunks *chunks)
3411 {
3412 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3413 	int num_vecid_filled = 0;
3414 	int i, j;
3415 
3416 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3417 	num_vecid_filled++;
3418 
3419 	for (j = 0; j < num_chunks; j++) {
3420 		struct virtchnl2_vector_chunk *chunk;
3421 		u16 start_vecid, num_vec;
3422 
3423 		chunk = &chunks->vchunks[j];
3424 		num_vec = le16_to_cpu(chunk->num_vectors);
3425 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3426 
3427 		for (i = 0; i < num_vec; i++) {
3428 			if ((num_vecid_filled + i) < num_vecids) {
3429 				vecids[num_vecid_filled + i] = start_vecid;
3430 				start_vecid++;
3431 			} else {
3432 				break;
3433 			}
3434 		}
3435 		num_vecid_filled = num_vecid_filled + i;
3436 	}
3437 
3438 	return num_vecid_filled;
3439 }
3440 
3441 /**
3442  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3443  * @qids: Array of queue ids
3444  * @num_qids: number of queue ids
3445  * @q_type: queue model
3446  * @chunks: queue ids received over mailbox
3447  *
3448  * Will initialize all queue ids with ids received as mailbox parameters
3449  * Returns number of ids filled
3450  */
3451 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3452 				    struct virtchnl2_queue_reg_chunks *chunks)
3453 {
3454 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
3455 	u32 num_q_id_filled = 0, i;
3456 	u32 start_q_id, num_q;
3457 
3458 	while (num_chunks--) {
3459 		struct virtchnl2_queue_reg_chunk *chunk;
3460 
3461 		chunk = &chunks->chunks[num_chunks];
3462 		if (le32_to_cpu(chunk->type) != q_type)
3463 			continue;
3464 
3465 		num_q = le32_to_cpu(chunk->num_queues);
3466 		start_q_id = le32_to_cpu(chunk->start_queue_id);
3467 
3468 		for (i = 0; i < num_q; i++) {
3469 			if ((num_q_id_filled + i) < num_qids) {
3470 				qids[num_q_id_filled + i] = start_q_id;
3471 				start_q_id++;
3472 			} else {
3473 				break;
3474 			}
3475 		}
3476 		num_q_id_filled = num_q_id_filled + i;
3477 	}
3478 
3479 	return num_q_id_filled;
3480 }
3481 
3482 /**
3483  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3484  * @vport: virtual port for which the queues ids are initialized
3485  * @qids: queue ids
3486  * @num_qids: number of queue ids
3487  * @q_type: type of queue
3488  *
3489  * Will initialize all queue ids with ids received as mailbox
3490  * parameters. Returns number of queue ids initialized.
3491  */
3492 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3493 				       const u32 *qids,
3494 				       int num_qids,
3495 				       u32 q_type)
3496 {
3497 	int i, j, k = 0;
3498 
3499 	switch (q_type) {
3500 	case VIRTCHNL2_QUEUE_TYPE_TX:
3501 		for (i = 0; i < vport->num_txq_grp; i++) {
3502 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3503 
3504 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3505 				tx_qgrp->txqs[j]->q_id = qids[k];
3506 		}
3507 		break;
3508 	case VIRTCHNL2_QUEUE_TYPE_RX:
3509 		for (i = 0; i < vport->num_rxq_grp; i++) {
3510 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3511 			u16 num_rxq;
3512 
3513 			if (idpf_is_queue_model_split(vport->rxq_model))
3514 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3515 			else
3516 				num_rxq = rx_qgrp->singleq.num_rxq;
3517 
3518 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3519 				struct idpf_rx_queue *q;
3520 
3521 				if (idpf_is_queue_model_split(vport->rxq_model))
3522 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3523 				else
3524 					q = rx_qgrp->singleq.rxqs[j];
3525 				q->q_id = qids[k];
3526 			}
3527 		}
3528 		break;
3529 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3530 		for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
3531 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3532 
3533 			tx_qgrp->complq->q_id = qids[k];
3534 		}
3535 		break;
3536 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3537 		for (i = 0; i < vport->num_rxq_grp; i++) {
3538 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3539 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
3540 
3541 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3542 				struct idpf_buf_queue *q;
3543 
3544 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3545 				q->q_id = qids[k];
3546 			}
3547 		}
3548 		break;
3549 	default:
3550 		break;
3551 	}
3552 
3553 	return k;
3554 }
3555 
3556 /**
3557  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3558  * @vport: virtual port for which the queues ids are initialized
3559  *
3560  * Will initialize all queue ids with ids received as mailbox parameters.
3561  * Returns 0 on success, negative if all the queues are not initialized.
3562  */
3563 int idpf_vport_queue_ids_init(struct idpf_vport *vport)
3564 {
3565 	struct virtchnl2_create_vport *vport_params;
3566 	struct virtchnl2_queue_reg_chunks *chunks;
3567 	struct idpf_vport_config *vport_config;
3568 	u16 vport_idx = vport->idx;
3569 	int num_ids, err = 0;
3570 	u16 q_type;
3571 	u32 *qids;
3572 
3573 	vport_config = vport->adapter->vport_config[vport_idx];
3574 	if (vport_config->req_qs_chunks) {
3575 		struct virtchnl2_add_queues *vc_aq =
3576 			(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
3577 		chunks = &vc_aq->chunks;
3578 	} else {
3579 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
3580 		chunks = &vport_params->chunks;
3581 	}
3582 
3583 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3584 	if (!qids)
3585 		return -ENOMEM;
3586 
3587 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3588 					   VIRTCHNL2_QUEUE_TYPE_TX,
3589 					   chunks);
3590 	if (num_ids < vport->num_txq) {
3591 		err = -EINVAL;
3592 		goto mem_rel;
3593 	}
3594 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3595 					      VIRTCHNL2_QUEUE_TYPE_TX);
3596 	if (num_ids < vport->num_txq) {
3597 		err = -EINVAL;
3598 		goto mem_rel;
3599 	}
3600 
3601 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3602 					   VIRTCHNL2_QUEUE_TYPE_RX,
3603 					   chunks);
3604 	if (num_ids < vport->num_rxq) {
3605 		err = -EINVAL;
3606 		goto mem_rel;
3607 	}
3608 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3609 					      VIRTCHNL2_QUEUE_TYPE_RX);
3610 	if (num_ids < vport->num_rxq) {
3611 		err = -EINVAL;
3612 		goto mem_rel;
3613 	}
3614 
3615 	if (!idpf_is_queue_model_split(vport->txq_model))
3616 		goto check_rxq;
3617 
3618 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3619 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3620 	if (num_ids < vport->num_complq) {
3621 		err = -EINVAL;
3622 		goto mem_rel;
3623 	}
3624 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3625 	if (num_ids < vport->num_complq) {
3626 		err = -EINVAL;
3627 		goto mem_rel;
3628 	}
3629 
3630 check_rxq:
3631 	if (!idpf_is_queue_model_split(vport->rxq_model))
3632 		goto mem_rel;
3633 
3634 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3635 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3636 	if (num_ids < vport->num_bufq) {
3637 		err = -EINVAL;
3638 		goto mem_rel;
3639 	}
3640 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3641 	if (num_ids < vport->num_bufq)
3642 		err = -EINVAL;
3643 
3644 mem_rel:
3645 	kfree(qids);
3646 
3647 	return err;
3648 }
3649 
3650 /**
3651  * idpf_vport_adjust_qs - Adjust to new requested queues
3652  * @vport: virtual port data struct
3653  *
3654  * Renegotiate queues.  Returns 0 on success, negative on failure.
3655  */
3656 int idpf_vport_adjust_qs(struct idpf_vport *vport)
3657 {
3658 	struct virtchnl2_create_vport vport_msg;
3659 	int err;
3660 
3661 	vport_msg.txq_model = cpu_to_le16(vport->txq_model);
3662 	vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
3663 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
3664 				       NULL);
3665 	if (err)
3666 		return err;
3667 
3668 	idpf_vport_init_num_qs(vport, &vport_msg);
3669 	idpf_vport_calc_num_q_groups(vport);
3670 
3671 	return 0;
3672 }
3673 
3674 /**
3675  * idpf_is_capability_ena - Default implementation of capability checking
3676  * @adapter: Private data struct
3677  * @all: all or one flag
3678  * @field: caps field to check for flags
3679  * @flag: flag to check
3680  *
3681  * Return true if all capabilities are supported, false otherwise
3682  */
3683 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
3684 			    enum idpf_cap_field field, u64 flag)
3685 {
3686 	u8 *caps = (u8 *)&adapter->caps;
3687 	u32 *cap_field;
3688 
3689 	if (!caps)
3690 		return false;
3691 
3692 	if (field == IDPF_BASE_CAPS)
3693 		return false;
3694 
3695 	cap_field = (u32 *)(caps + field);
3696 
3697 	if (all)
3698 		return (*cap_field & flag) == flag;
3699 	else
3700 		return !!(*cap_field & flag);
3701 }
3702 
3703 /**
3704  * idpf_vport_is_cap_ena - Check if vport capability is enabled
3705  * @vport: Private data struct
3706  * @flag: flag(s) to check
3707  *
3708  * Return: true if the capability is supported, false otherwise
3709  */
3710 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
3711 {
3712 	struct virtchnl2_create_vport *vport_msg;
3713 
3714 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3715 
3716 	return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
3717 }
3718 
3719 /**
3720  * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
3721  * @vport: Private data struct
3722  * @flow_type: flow type to check (from ethtool.h)
3723  *
3724  * Return: true if sideband filters are allowed for @flow_type, false otherwise
3725  */
3726 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
3727 {
3728 	struct virtchnl2_create_vport *vport_msg;
3729 	__le64 caps;
3730 
3731 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3732 	caps = vport_msg->sideband_flow_caps;
3733 
3734 	switch (flow_type) {
3735 	case TCP_V4_FLOW:
3736 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
3737 	case UDP_V4_FLOW:
3738 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
3739 	default:
3740 		return false;
3741 	}
3742 }
3743 
3744 /**
3745  * idpf_sideband_action_ena - Check if steering is enabled for action
3746  * @vport: Private data struct
3747  * @fsp: flow spec
3748  *
3749  * Return: true if sideband filters are allowed for @fsp, false otherwise
3750  */
3751 bool idpf_sideband_action_ena(struct idpf_vport *vport,
3752 			      struct ethtool_rx_flow_spec *fsp)
3753 {
3754 	struct virtchnl2_create_vport *vport_msg;
3755 	unsigned int supp_actions;
3756 
3757 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3758 	supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
3759 
3760 	/* Actions Drop/Wake are not supported */
3761 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
3762 	    fsp->ring_cookie == RX_CLS_FLOW_WAKE)
3763 		return false;
3764 
3765 	return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
3766 }
3767 
3768 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
3769 {
3770 	struct virtchnl2_create_vport *vport_msg;
3771 
3772 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3773 	return le32_to_cpu(vport_msg->flow_steer_max_rules);
3774 }
3775 
3776 /**
3777  * idpf_get_vport_id: Get vport id
3778  * @vport: virtual port structure
3779  *
3780  * Return vport id from the adapter persistent data
3781  */
3782 u32 idpf_get_vport_id(struct idpf_vport *vport)
3783 {
3784 	struct virtchnl2_create_vport *vport_msg;
3785 
3786 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3787 
3788 	return le32_to_cpu(vport_msg->vport_id);
3789 }
3790 
3791 static void idpf_set_mac_type(struct idpf_vport *vport,
3792 			      struct virtchnl2_mac_addr *mac_addr)
3793 {
3794 	bool is_primary;
3795 
3796 	is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
3797 	mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
3798 				      VIRTCHNL2_MAC_ADDR_EXTRA;
3799 }
3800 
3801 /**
3802  * idpf_mac_filter_async_handler - Async callback for mac filters
3803  * @adapter: private data struct
3804  * @xn: transaction for message
3805  * @ctlq_msg: received message
3806  *
3807  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
3808  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
3809  * situation to deal with errors returned on the reply. The best we can
3810  * ultimately do is remove it from our list of mac filters and report the
3811  * error.
3812  */
3813 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
3814 					 struct idpf_vc_xn *xn,
3815 					 const struct idpf_ctlq_msg *ctlq_msg)
3816 {
3817 	struct virtchnl2_mac_addr_list *ma_list;
3818 	struct idpf_vport_config *vport_config;
3819 	struct virtchnl2_mac_addr *mac_addr;
3820 	struct idpf_mac_filter *f, *tmp;
3821 	struct list_head *ma_list_head;
3822 	struct idpf_vport *vport;
3823 	u16 num_entries;
3824 	int i;
3825 
3826 	/* if success we're done, we're only here if something bad happened */
3827 	if (!ctlq_msg->cookie.mbx.chnl_retval)
3828 		return 0;
3829 
3830 	/* make sure at least struct is there */
3831 	if (xn->reply_sz < sizeof(*ma_list))
3832 		goto invalid_payload;
3833 
3834 	ma_list = ctlq_msg->ctx.indirect.payload->va;
3835 	mac_addr = ma_list->mac_addr_list;
3836 	num_entries = le16_to_cpu(ma_list->num_mac_addr);
3837 	/* we should have received a buffer at least this big */
3838 	if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
3839 		goto invalid_payload;
3840 
3841 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
3842 	if (!vport)
3843 		goto invalid_payload;
3844 
3845 	vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
3846 	ma_list_head = &vport_config->user_config.mac_filter_list;
3847 
3848 	/* We can't do much to reconcile bad filters at this point, however we
3849 	 * should at least remove them from our list one way or the other so we
3850 	 * have some idea what good filters we have.
3851 	 */
3852 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3853 	list_for_each_entry_safe(f, tmp, ma_list_head, list)
3854 		for (i = 0; i < num_entries; i++)
3855 			if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
3856 				list_del(&f->list);
3857 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3858 	dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
3859 			    xn->vc_op);
3860 
3861 	return 0;
3862 
3863 invalid_payload:
3864 	dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
3865 			    xn->vc_op, xn->reply_sz);
3866 
3867 	return -EINVAL;
3868 }
3869 
3870 /**
3871  * idpf_add_del_mac_filters - Add/del mac filters
3872  * @vport: Virtual port data structure
3873  * @np: Netdev private structure
3874  * @add: Add or delete flag
3875  * @async: Don't wait for return message
3876  *
3877  * Returns 0 on success, error on failure.
3878  **/
3879 int idpf_add_del_mac_filters(struct idpf_vport *vport,
3880 			     struct idpf_netdev_priv *np,
3881 			     bool add, bool async)
3882 {
3883 	struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
3884 	struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
3885 	struct idpf_adapter *adapter = np->adapter;
3886 	struct idpf_vc_xn_params xn_params = {};
3887 	struct idpf_vport_config *vport_config;
3888 	u32 num_msgs, total_filters = 0;
3889 	struct idpf_mac_filter *f;
3890 	ssize_t reply_sz;
3891 	int i = 0, k;
3892 
3893 	xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
3894 				VIRTCHNL2_OP_DEL_MAC_ADDR;
3895 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3896 	xn_params.async = async;
3897 	xn_params.async_handler = idpf_mac_filter_async_handler;
3898 
3899 	vport_config = adapter->vport_config[np->vport_idx];
3900 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3901 
3902 	/* Find the number of newly added filters */
3903 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3904 			    list) {
3905 		if (add && f->add)
3906 			total_filters++;
3907 		else if (!add && f->remove)
3908 			total_filters++;
3909 	}
3910 
3911 	if (!total_filters) {
3912 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3913 
3914 		return 0;
3915 	}
3916 
3917 	/* Fill all the new filters into virtchannel message */
3918 	mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
3919 			   GFP_ATOMIC);
3920 	if (!mac_addr) {
3921 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3922 
3923 		return -ENOMEM;
3924 	}
3925 
3926 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3927 			    list) {
3928 		if (add && f->add) {
3929 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3930 			idpf_set_mac_type(vport, &mac_addr[i]);
3931 			i++;
3932 			f->add = false;
3933 			if (i == total_filters)
3934 				break;
3935 		}
3936 		if (!add && f->remove) {
3937 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3938 			idpf_set_mac_type(vport, &mac_addr[i]);
3939 			i++;
3940 			f->remove = false;
3941 			if (i == total_filters)
3942 				break;
3943 		}
3944 	}
3945 
3946 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3947 
3948 	/* Chunk up the filters into multiple messages to avoid
3949 	 * sending a control queue message buffer that is too large
3950 	 */
3951 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
3952 
3953 	for (i = 0, k = 0; i < num_msgs; i++) {
3954 		u32 entries_size, buf_size, num_entries;
3955 
3956 		num_entries = min_t(u32, total_filters,
3957 				    IDPF_NUM_FILTERS_PER_MSG);
3958 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
3959 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
3960 
3961 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
3962 			kfree(ma_list);
3963 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
3964 			if (!ma_list)
3965 				return -ENOMEM;
3966 		} else {
3967 			memset(ma_list, 0, buf_size);
3968 		}
3969 
3970 		ma_list->vport_id = cpu_to_le32(np->vport_id);
3971 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
3972 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
3973 
3974 		xn_params.send_buf.iov_base = ma_list;
3975 		xn_params.send_buf.iov_len = buf_size;
3976 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3977 		if (reply_sz < 0)
3978 			return reply_sz;
3979 
3980 		k += num_entries;
3981 		total_filters -= num_entries;
3982 	}
3983 
3984 	return 0;
3985 }
3986 
3987 /**
3988  * idpf_set_promiscuous - set promiscuous and send message to mailbox
3989  * @adapter: Driver specific private structure
3990  * @config_data: Vport specific config data
3991  * @vport_id: Vport identifier
3992  *
3993  * Request to enable promiscuous mode for the vport. Message is sent
3994  * asynchronously and won't wait for response.  Returns 0 on success, negative
3995  * on failure;
3996  */
3997 int idpf_set_promiscuous(struct idpf_adapter *adapter,
3998 			 struct idpf_vport_user_config_data *config_data,
3999 			 u32 vport_id)
4000 {
4001 	struct idpf_vc_xn_params xn_params = {};
4002 	struct virtchnl2_promisc_info vpi;
4003 	ssize_t reply_sz;
4004 	u16 flags = 0;
4005 
4006 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
4007 		flags |= VIRTCHNL2_UNICAST_PROMISC;
4008 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
4009 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
4010 
4011 	vpi.vport_id = cpu_to_le32(vport_id);
4012 	vpi.flags = cpu_to_le16(flags);
4013 
4014 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
4015 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4016 	xn_params.send_buf.iov_base = &vpi;
4017 	xn_params.send_buf.iov_len = sizeof(vpi);
4018 	/* setting promiscuous is only ever done asynchronously */
4019 	xn_params.async = true;
4020 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4021 
4022 	return reply_sz < 0 ? reply_sz : 0;
4023 }
4024 
4025 /**
4026  * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
4027  * @cdev_info: IDC core device info pointer
4028  * @send_msg: message to send
4029  * @msg_size: size of message to send
4030  * @recv_msg: message to populate on reception of response
4031  * @recv_len: length of message copied into recv_msg or 0 on error
4032  *
4033  * Return: 0 on success or error code on failure.
4034  */
4035 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
4036 			       u8 *send_msg, u16 msg_size,
4037 			       u8 *recv_msg, u16 *recv_len)
4038 {
4039 	struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
4040 	struct idpf_vc_xn_params xn_params = { };
4041 	ssize_t reply_sz;
4042 	u16 recv_size;
4043 
4044 	if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
4045 		return -EINVAL;
4046 
4047 	recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
4048 	*recv_len = 0;
4049 	xn_params.vc_op = VIRTCHNL2_OP_RDMA;
4050 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4051 	xn_params.send_buf.iov_base = send_msg;
4052 	xn_params.send_buf.iov_len = msg_size;
4053 	xn_params.recv_buf.iov_base = recv_msg;
4054 	xn_params.recv_buf.iov_len = recv_size;
4055 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4056 	if (reply_sz < 0)
4057 		return reply_sz;
4058 	*recv_len = reply_sz;
4059 
4060 	return 0;
4061 }
4062 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
4063