xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision af2d6148d2a159e1a0862bce5a2c88c1618a2b27)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <linux/export.h>
5 #include <net/libeth/rx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 #include "idpf_ptp.h"
10 
11 /**
12  * struct idpf_vc_xn_manager - Manager for tracking transactions
13  * @ring: backing and lookup for transactions
14  * @free_xn_bm: bitmap for free transactions
15  * @xn_bm_lock: make bitmap access synchronous where necessary
16  * @salt: used to make cookie unique every message
17  */
18 struct idpf_vc_xn_manager {
19 	struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
20 	DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
21 	spinlock_t xn_bm_lock;
22 	u8 salt;
23 };
24 
25 /**
26  * idpf_vid_to_vport - Translate vport id to vport pointer
27  * @adapter: private data struct
28  * @v_id: vport id to translate
29  *
30  * Returns vport matching v_id, NULL if not found.
31  */
32 static
33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
34 {
35 	u16 num_max_vports = idpf_get_max_vports(adapter);
36 	int i;
37 
38 	for (i = 0; i < num_max_vports; i++)
39 		if (adapter->vport_ids[i] == v_id)
40 			return adapter->vports[i];
41 
42 	return NULL;
43 }
44 
45 /**
46  * idpf_handle_event_link - Handle link event message
47  * @adapter: private data struct
48  * @v2e: virtchnl event message
49  */
50 static void idpf_handle_event_link(struct idpf_adapter *adapter,
51 				   const struct virtchnl2_event *v2e)
52 {
53 	struct idpf_netdev_priv *np;
54 	struct idpf_vport *vport;
55 
56 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
57 	if (!vport) {
58 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
59 				    v2e->vport_id);
60 		return;
61 	}
62 	np = netdev_priv(vport->netdev);
63 
64 	np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
65 
66 	if (vport->link_up == v2e->link_status)
67 		return;
68 
69 	vport->link_up = v2e->link_status;
70 
71 	if (np->state != __IDPF_VPORT_UP)
72 		return;
73 
74 	if (vport->link_up) {
75 		netif_tx_start_all_queues(vport->netdev);
76 		netif_carrier_on(vport->netdev);
77 	} else {
78 		netif_tx_stop_all_queues(vport->netdev);
79 		netif_carrier_off(vport->netdev);
80 	}
81 }
82 
83 /**
84  * idpf_recv_event_msg - Receive virtchnl event message
85  * @adapter: Driver specific private structure
86  * @ctlq_msg: message to copy from
87  *
88  * Receive virtchnl event message
89  */
90 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
91 				struct idpf_ctlq_msg *ctlq_msg)
92 {
93 	int payload_size = ctlq_msg->ctx.indirect.payload->size;
94 	struct virtchnl2_event *v2e;
95 	u32 event;
96 
97 	if (payload_size < sizeof(*v2e)) {
98 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
99 				    ctlq_msg->cookie.mbx.chnl_opcode,
100 				    payload_size);
101 		return;
102 	}
103 
104 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
105 	event = le32_to_cpu(v2e->event);
106 
107 	switch (event) {
108 	case VIRTCHNL2_EVENT_LINK_CHANGE:
109 		idpf_handle_event_link(adapter, v2e);
110 		return;
111 	default:
112 		dev_err(&adapter->pdev->dev,
113 			"Unknown event %d from PF\n", event);
114 		break;
115 	}
116 }
117 
118 /**
119  * idpf_mb_clean - Reclaim the send mailbox queue entries
120  * @adapter: Driver specific private structure
121  *
122  * Reclaim the send mailbox queue entries to be used to send further messages
123  *
124  * Returns 0 on success, negative on failure
125  */
126 static int idpf_mb_clean(struct idpf_adapter *adapter)
127 {
128 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
129 	struct idpf_ctlq_msg **q_msg;
130 	struct idpf_dma_mem *dma_mem;
131 	int err;
132 
133 	q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
134 	if (!q_msg)
135 		return -ENOMEM;
136 
137 	err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
138 	if (err)
139 		goto err_kfree;
140 
141 	for (i = 0; i < num_q_msg; i++) {
142 		if (!q_msg[i])
143 			continue;
144 		dma_mem = q_msg[i]->ctx.indirect.payload;
145 		if (dma_mem)
146 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
147 					  dma_mem->va, dma_mem->pa);
148 		kfree(q_msg[i]);
149 		kfree(dma_mem);
150 	}
151 
152 err_kfree:
153 	kfree(q_msg);
154 
155 	return err;
156 }
157 
158 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
159 /**
160  * idpf_ptp_is_mb_msg - Check if the message is PTP-related
161  * @op: virtchnl opcode
162  *
163  * Return: true if msg is PTP-related, false otherwise.
164  */
165 static bool idpf_ptp_is_mb_msg(u32 op)
166 {
167 	switch (op) {
168 	case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
169 	case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
170 	case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
171 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
172 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
173 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
174 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
175 		return true;
176 	default:
177 		return false;
178 	}
179 }
180 
181 /**
182  * idpf_prepare_ptp_mb_msg - Prepare PTP related message
183  *
184  * @adapter: Driver specific private structure
185  * @op: virtchnl opcode
186  * @ctlq_msg: Corresponding control queue message
187  */
188 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
189 				    struct idpf_ctlq_msg *ctlq_msg)
190 {
191 	/* If the message is PTP-related and the secondary mailbox is available,
192 	 * send the message through the secondary mailbox.
193 	 */
194 	if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
195 		return;
196 
197 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
198 	ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
199 	ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
200 }
201 #else /* !CONFIG_PTP_1588_CLOCK */
202 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
203 				    struct idpf_ctlq_msg *ctlq_msg)
204 { }
205 #endif /* CONFIG_PTP_1588_CLOCK */
206 
207 /**
208  * idpf_send_mb_msg - Send message over mailbox
209  * @adapter: Driver specific private structure
210  * @op: virtchnl opcode
211  * @msg_size: size of the payload
212  * @msg: pointer to buffer holding the payload
213  * @cookie: unique SW generated cookie per message
214  *
215  * Will prepare the control queue message and initiates the send api
216  *
217  * Returns 0 on success, negative on failure
218  */
219 int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
220 		     u16 msg_size, u8 *msg, u16 cookie)
221 {
222 	struct idpf_ctlq_msg *ctlq_msg;
223 	struct idpf_dma_mem *dma_mem;
224 	int err;
225 
226 	/* If we are here and a reset is detected nothing much can be
227 	 * done. This thread should silently abort and expected to
228 	 * be corrected with a new run either by user or driver
229 	 * flows after reset
230 	 */
231 	if (idpf_is_reset_detected(adapter))
232 		return 0;
233 
234 	err = idpf_mb_clean(adapter);
235 	if (err)
236 		return err;
237 
238 	ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
239 	if (!ctlq_msg)
240 		return -ENOMEM;
241 
242 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
243 	if (!dma_mem) {
244 		err = -ENOMEM;
245 		goto dma_mem_error;
246 	}
247 
248 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
249 	ctlq_msg->func_id = 0;
250 
251 	idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
252 
253 	ctlq_msg->data_len = msg_size;
254 	ctlq_msg->cookie.mbx.chnl_opcode = op;
255 	ctlq_msg->cookie.mbx.chnl_retval = 0;
256 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
257 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
258 					 &dma_mem->pa, GFP_ATOMIC);
259 	if (!dma_mem->va) {
260 		err = -ENOMEM;
261 		goto dma_alloc_error;
262 	}
263 
264 	/* It's possible we're just sending an opcode but no buffer */
265 	if (msg && msg_size)
266 		memcpy(dma_mem->va, msg, msg_size);
267 	ctlq_msg->ctx.indirect.payload = dma_mem;
268 	ctlq_msg->ctx.sw_cookie.data = cookie;
269 
270 	err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
271 	if (err)
272 		goto send_error;
273 
274 	return 0;
275 
276 send_error:
277 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
278 			  dma_mem->pa);
279 dma_alloc_error:
280 	kfree(dma_mem);
281 dma_mem_error:
282 	kfree(ctlq_msg);
283 
284 	return err;
285 }
286 
287 /* API for virtchnl "transaction" support ("xn" for short).
288  *
289  * We are reusing the completion lock to serialize the accesses to the
290  * transaction state for simplicity, but it could be its own separate synchro
291  * as well. For now, this API is only used from within a workqueue context;
292  * raw_spin_lock() is enough.
293  */
294 /**
295  * idpf_vc_xn_lock - Request exclusive access to vc transaction
296  * @xn: struct idpf_vc_xn* to access
297  */
298 #define idpf_vc_xn_lock(xn)			\
299 	raw_spin_lock(&(xn)->completed.wait.lock)
300 
301 /**
302  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
303  * @xn: struct idpf_vc_xn* to access
304  */
305 #define idpf_vc_xn_unlock(xn)		\
306 	raw_spin_unlock(&(xn)->completed.wait.lock)
307 
308 /**
309  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
310  * reset the transaction state.
311  * @xn: struct idpf_vc_xn to update
312  */
313 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
314 {
315 	xn->reply.iov_base = NULL;
316 	xn->reply.iov_len = 0;
317 
318 	if (xn->state != IDPF_VC_XN_SHUTDOWN)
319 		xn->state = IDPF_VC_XN_IDLE;
320 }
321 
322 /**
323  * idpf_vc_xn_init - Initialize virtchnl transaction object
324  * @vcxn_mngr: pointer to vc transaction manager struct
325  */
326 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
327 {
328 	int i;
329 
330 	spin_lock_init(&vcxn_mngr->xn_bm_lock);
331 
332 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
333 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
334 
335 		xn->state = IDPF_VC_XN_IDLE;
336 		xn->idx = i;
337 		idpf_vc_xn_release_bufs(xn);
338 		init_completion(&xn->completed);
339 	}
340 
341 	bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
342 }
343 
344 /**
345  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
346  * @vcxn_mngr: pointer to vc transaction manager struct
347  *
348  * All waiting threads will be woken-up and their transaction aborted. Further
349  * operations on that object will fail.
350  */
351 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
352 {
353 	int i;
354 
355 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
356 	bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
357 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
358 
359 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
360 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
361 
362 		idpf_vc_xn_lock(xn);
363 		xn->state = IDPF_VC_XN_SHUTDOWN;
364 		idpf_vc_xn_release_bufs(xn);
365 		idpf_vc_xn_unlock(xn);
366 		complete_all(&xn->completed);
367 	}
368 }
369 
370 /**
371  * idpf_vc_xn_pop_free - Pop a free transaction from free list
372  * @vcxn_mngr: transaction manager to pop from
373  *
374  * Returns NULL if no free transactions
375  */
376 static
377 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
378 {
379 	struct idpf_vc_xn *xn = NULL;
380 	unsigned long free_idx;
381 
382 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
383 	free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
384 	if (free_idx == IDPF_VC_XN_RING_LEN)
385 		goto do_unlock;
386 
387 	clear_bit(free_idx, vcxn_mngr->free_xn_bm);
388 	xn = &vcxn_mngr->ring[free_idx];
389 	xn->salt = vcxn_mngr->salt++;
390 
391 do_unlock:
392 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
393 
394 	return xn;
395 }
396 
397 /**
398  * idpf_vc_xn_push_free - Push a free transaction to free list
399  * @vcxn_mngr: transaction manager to push to
400  * @xn: transaction to push
401  */
402 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
403 				 struct idpf_vc_xn *xn)
404 {
405 	idpf_vc_xn_release_bufs(xn);
406 	set_bit(xn->idx, vcxn_mngr->free_xn_bm);
407 }
408 
409 /**
410  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
411  * @adapter: driver specific private structure with vcxn_mngr
412  * @params: parameters for this particular transaction including
413  *   -vc_op: virtchannel operation to send
414  *   -send_buf: kvec iov for send buf and len
415  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
416  *   -timeout_ms: timeout waiting for a reply (milliseconds)
417  *   -async: don't wait for message reply, will lose caller context
418  *   -async_handler: callback to handle async replies
419  *
420  * @returns >= 0 for success, the size of the initial reply (may or may not be
421  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
422  * error.
423  */
424 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
425 			const struct idpf_vc_xn_params *params)
426 {
427 	const struct kvec *send_buf = &params->send_buf;
428 	struct idpf_vc_xn *xn;
429 	ssize_t retval;
430 	u16 cookie;
431 
432 	xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
433 	/* no free transactions available */
434 	if (!xn)
435 		return -ENOSPC;
436 
437 	idpf_vc_xn_lock(xn);
438 	if (xn->state == IDPF_VC_XN_SHUTDOWN) {
439 		retval = -ENXIO;
440 		goto only_unlock;
441 	} else if (xn->state != IDPF_VC_XN_IDLE) {
442 		/* We're just going to clobber this transaction even though
443 		 * it's not IDLE. If we don't reuse it we could theoretically
444 		 * eventually leak all the free transactions and not be able to
445 		 * send any messages. At least this way we make an attempt to
446 		 * remain functional even though something really bad is
447 		 * happening that's corrupting what was supposed to be free
448 		 * transactions.
449 		 */
450 		WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
451 			  xn->idx, xn->vc_op);
452 	}
453 
454 	xn->reply = params->recv_buf;
455 	xn->reply_sz = 0;
456 	xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
457 	xn->vc_op = params->vc_op;
458 	xn->async_handler = params->async_handler;
459 	idpf_vc_xn_unlock(xn);
460 
461 	if (!params->async)
462 		reinit_completion(&xn->completed);
463 	cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
464 		 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
465 
466 	retval = idpf_send_mb_msg(adapter, params->vc_op,
467 				  send_buf->iov_len, send_buf->iov_base,
468 				  cookie);
469 	if (retval) {
470 		idpf_vc_xn_lock(xn);
471 		goto release_and_unlock;
472 	}
473 
474 	if (params->async)
475 		return 0;
476 
477 	wait_for_completion_timeout(&xn->completed,
478 				    msecs_to_jiffies(params->timeout_ms));
479 
480 	/* No need to check the return value; we check the final state of the
481 	 * transaction below. It's possible the transaction actually gets more
482 	 * timeout than specified if we get preempted here but after
483 	 * wait_for_completion_timeout returns. This should be non-issue
484 	 * however.
485 	 */
486 	idpf_vc_xn_lock(xn);
487 	switch (xn->state) {
488 	case IDPF_VC_XN_SHUTDOWN:
489 		retval = -ENXIO;
490 		goto only_unlock;
491 	case IDPF_VC_XN_WAITING:
492 		dev_notice_ratelimited(&adapter->pdev->dev,
493 				       "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
494 				       params->vc_op, cookie, xn->vc_op,
495 				       xn->salt, params->timeout_ms);
496 		retval = -ETIME;
497 		break;
498 	case IDPF_VC_XN_COMPLETED_SUCCESS:
499 		retval = xn->reply_sz;
500 		break;
501 	case IDPF_VC_XN_COMPLETED_FAILED:
502 		dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
503 				       params->vc_op);
504 		retval = -EIO;
505 		break;
506 	default:
507 		/* Invalid state. */
508 		WARN_ON_ONCE(1);
509 		retval = -EIO;
510 		break;
511 	}
512 
513 release_and_unlock:
514 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
515 	/* If we receive a VC reply after here, it will be dropped. */
516 only_unlock:
517 	idpf_vc_xn_unlock(xn);
518 
519 	return retval;
520 }
521 
522 /**
523  * idpf_vc_xn_forward_async - Handle async reply receives
524  * @adapter: private data struct
525  * @xn: transaction to handle
526  * @ctlq_msg: corresponding ctlq_msg
527  *
528  * For async sends we're going to lose the caller's context so, if an
529  * async_handler was provided, it can deal with the reply, otherwise we'll just
530  * check and report if there is an error.
531  */
532 static int
533 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
534 			 const struct idpf_ctlq_msg *ctlq_msg)
535 {
536 	int err = 0;
537 
538 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
539 		dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
540 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
541 		xn->reply_sz = 0;
542 		err = -EINVAL;
543 		goto release_bufs;
544 	}
545 
546 	if (xn->async_handler) {
547 		err = xn->async_handler(adapter, xn, ctlq_msg);
548 		goto release_bufs;
549 	}
550 
551 	if (ctlq_msg->cookie.mbx.chnl_retval) {
552 		xn->reply_sz = 0;
553 		dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
554 				    ctlq_msg->cookie.mbx.chnl_opcode);
555 		err = -EINVAL;
556 	}
557 
558 release_bufs:
559 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
560 
561 	return err;
562 }
563 
564 /**
565  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
566  * @adapter: driver specific private structure with vcxn_mngr
567  * @ctlq_msg: controlq message to send back to receiving thread
568  */
569 static int
570 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
571 			 const struct idpf_ctlq_msg *ctlq_msg)
572 {
573 	const void *payload = NULL;
574 	size_t payload_size = 0;
575 	struct idpf_vc_xn *xn;
576 	u16 msg_info;
577 	int err = 0;
578 	u16 xn_idx;
579 	u16 salt;
580 
581 	msg_info = ctlq_msg->ctx.sw_cookie.data;
582 	xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
583 	if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
584 		dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
585 				    xn_idx);
586 		return -EINVAL;
587 	}
588 	xn = &adapter->vcxn_mngr->ring[xn_idx];
589 	idpf_vc_xn_lock(xn);
590 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
591 	if (xn->salt != salt) {
592 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
593 				    xn->vc_op, xn->salt, xn->state,
594 				    ctlq_msg->cookie.mbx.chnl_opcode, salt);
595 		idpf_vc_xn_unlock(xn);
596 		return -EINVAL;
597 	}
598 
599 	switch (xn->state) {
600 	case IDPF_VC_XN_WAITING:
601 		/* success */
602 		break;
603 	case IDPF_VC_XN_IDLE:
604 		dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
605 				    ctlq_msg->cookie.mbx.chnl_opcode);
606 		err = -EINVAL;
607 		goto out_unlock;
608 	case IDPF_VC_XN_SHUTDOWN:
609 		/* ENXIO is a bit special here as the recv msg loop uses that
610 		 * know if it should stop trying to clean the ring if we lost
611 		 * the virtchnl. We need to stop playing with registers and
612 		 * yield.
613 		 */
614 		err = -ENXIO;
615 		goto out_unlock;
616 	case IDPF_VC_XN_ASYNC:
617 		err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
618 		idpf_vc_xn_unlock(xn);
619 		return err;
620 	default:
621 		dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
622 				    ctlq_msg->cookie.mbx.chnl_opcode);
623 		err = -EBUSY;
624 		goto out_unlock;
625 	}
626 
627 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
628 		dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
629 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
630 		xn->reply_sz = 0;
631 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
632 		err = -EINVAL;
633 		goto out_unlock;
634 	}
635 
636 	if (ctlq_msg->cookie.mbx.chnl_retval) {
637 		xn->reply_sz = 0;
638 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
639 		err = -EINVAL;
640 		goto out_unlock;
641 	}
642 
643 	if (ctlq_msg->data_len) {
644 		payload = ctlq_msg->ctx.indirect.payload->va;
645 		payload_size = ctlq_msg->data_len;
646 	}
647 
648 	xn->reply_sz = payload_size;
649 	xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
650 
651 	if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
652 		memcpy(xn->reply.iov_base, payload,
653 		       min_t(size_t, xn->reply.iov_len, payload_size));
654 
655 out_unlock:
656 	idpf_vc_xn_unlock(xn);
657 	/* we _cannot_ hold lock while calling complete */
658 	complete(&xn->completed);
659 
660 	return err;
661 }
662 
663 /**
664  * idpf_recv_mb_msg - Receive message over mailbox
665  * @adapter: Driver specific private structure
666  *
667  * Will receive control queue message and posts the receive buffer. Returns 0
668  * on success and negative on failure.
669  */
670 int idpf_recv_mb_msg(struct idpf_adapter *adapter)
671 {
672 	struct idpf_ctlq_msg ctlq_msg;
673 	struct idpf_dma_mem *dma_mem;
674 	int post_err, err;
675 	u16 num_recv;
676 
677 	while (1) {
678 		/* This will get <= num_recv messages and output how many
679 		 * actually received on num_recv.
680 		 */
681 		num_recv = 1;
682 		err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
683 		if (err || !num_recv)
684 			break;
685 
686 		if (ctlq_msg.data_len) {
687 			dma_mem = ctlq_msg.ctx.indirect.payload;
688 		} else {
689 			dma_mem = NULL;
690 			num_recv = 0;
691 		}
692 
693 		if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
694 			idpf_recv_event_msg(adapter, &ctlq_msg);
695 		else
696 			err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
697 
698 		post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
699 						   adapter->hw.arq,
700 						   &num_recv, &dma_mem);
701 
702 		/* If post failed clear the only buffer we supplied */
703 		if (post_err) {
704 			if (dma_mem)
705 				dmam_free_coherent(&adapter->pdev->dev,
706 						   dma_mem->size, dma_mem->va,
707 						   dma_mem->pa);
708 			break;
709 		}
710 
711 		/* virtchnl trying to shutdown, stop cleaning */
712 		if (err == -ENXIO)
713 			break;
714 	}
715 
716 	return err;
717 }
718 
719 /**
720  * idpf_wait_for_marker_event - wait for software marker response
721  * @vport: virtual port data structure
722  *
723  * Returns 0 success, negative on failure.
724  **/
725 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
726 {
727 	int event;
728 	int i;
729 
730 	for (i = 0; i < vport->num_txq; i++)
731 		idpf_queue_set(SW_MARKER, vport->txqs[i]);
732 
733 	event = wait_event_timeout(vport->sw_marker_wq,
734 				   test_and_clear_bit(IDPF_VPORT_SW_MARKER,
735 						      vport->flags),
736 				   msecs_to_jiffies(500));
737 
738 	for (i = 0; i < vport->num_txq; i++)
739 		idpf_queue_clear(POLL_MODE, vport->txqs[i]);
740 
741 	if (event)
742 		return 0;
743 
744 	dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
745 
746 	return -ETIMEDOUT;
747 }
748 
749 /**
750  * idpf_send_ver_msg - send virtchnl version message
751  * @adapter: Driver specific private structure
752  *
753  * Send virtchnl version message.  Returns 0 on success, negative on failure.
754  */
755 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
756 {
757 	struct idpf_vc_xn_params xn_params = {};
758 	struct virtchnl2_version_info vvi;
759 	ssize_t reply_sz;
760 	u32 major, minor;
761 	int err = 0;
762 
763 	if (adapter->virt_ver_maj) {
764 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
765 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
766 	} else {
767 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
768 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
769 	}
770 
771 	xn_params.vc_op = VIRTCHNL2_OP_VERSION;
772 	xn_params.send_buf.iov_base = &vvi;
773 	xn_params.send_buf.iov_len = sizeof(vvi);
774 	xn_params.recv_buf = xn_params.send_buf;
775 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
776 
777 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
778 	if (reply_sz < 0)
779 		return reply_sz;
780 	if (reply_sz < sizeof(vvi))
781 		return -EIO;
782 
783 	major = le32_to_cpu(vvi.major);
784 	minor = le32_to_cpu(vvi.minor);
785 
786 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
787 		dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
788 		return -EINVAL;
789 	}
790 
791 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
792 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
793 		dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
794 
795 	/* If we have a mismatch, resend version to update receiver on what
796 	 * version we will use.
797 	 */
798 	if (!adapter->virt_ver_maj &&
799 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
800 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
801 		err = -EAGAIN;
802 
803 	adapter->virt_ver_maj = major;
804 	adapter->virt_ver_min = minor;
805 
806 	return err;
807 }
808 
809 /**
810  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
811  * @adapter: Driver specific private structure
812  *
813  * Send virtchl get capabilities message. Returns 0 on success, negative on
814  * failure.
815  */
816 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
817 {
818 	struct virtchnl2_get_capabilities caps = {};
819 	struct idpf_vc_xn_params xn_params = {};
820 	ssize_t reply_sz;
821 
822 	caps.csum_caps =
823 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
824 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
825 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
826 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
827 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
828 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
829 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
830 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
831 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
832 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
833 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
834 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
835 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
836 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
837 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
838 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
839 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
840 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
841 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
842 
843 	caps.seg_caps =
844 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
845 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
846 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
847 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
848 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
849 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
850 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
851 
852 	caps.rss_caps =
853 		cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP		|
854 			    VIRTCHNL2_CAP_RSS_IPV4_UDP		|
855 			    VIRTCHNL2_CAP_RSS_IPV4_SCTP		|
856 			    VIRTCHNL2_CAP_RSS_IPV4_OTHER	|
857 			    VIRTCHNL2_CAP_RSS_IPV6_TCP		|
858 			    VIRTCHNL2_CAP_RSS_IPV6_UDP		|
859 			    VIRTCHNL2_CAP_RSS_IPV6_SCTP		|
860 			    VIRTCHNL2_CAP_RSS_IPV6_OTHER);
861 
862 	caps.hsplit_caps =
863 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
864 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
865 
866 	caps.rsc_caps =
867 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
868 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
869 
870 	caps.other_caps =
871 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
872 			    VIRTCHNL2_CAP_RDMA                  |
873 			    VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	|
874 			    VIRTCHNL2_CAP_MACFILTER		|
875 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
876 			    VIRTCHNL2_CAP_PROMISC		|
877 			    VIRTCHNL2_CAP_LOOPBACK		|
878 			    VIRTCHNL2_CAP_PTP);
879 
880 	xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
881 	xn_params.send_buf.iov_base = &caps;
882 	xn_params.send_buf.iov_len = sizeof(caps);
883 	xn_params.recv_buf.iov_base = &adapter->caps;
884 	xn_params.recv_buf.iov_len = sizeof(adapter->caps);
885 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
886 
887 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
888 	if (reply_sz < 0)
889 		return reply_sz;
890 	if (reply_sz < sizeof(adapter->caps))
891 		return -EIO;
892 
893 	return 0;
894 }
895 
896 /**
897  * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
898  * @adapter: Driver specific private struct
899  *
900  * Return: 0 on success or error code on failure.
901  */
902 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
903 {
904 	struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
905 	struct idpf_vc_xn_params xn_params = {
906 		.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
907 		.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
908 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
909 	};
910 	int num_regions, size;
911 	struct idpf_hw *hw;
912 	ssize_t reply_sz;
913 	int err = 0;
914 
915 	rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
916 	if (!rcvd_regions)
917 		return -ENOMEM;
918 
919 	xn_params.recv_buf.iov_base = rcvd_regions;
920 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
921 	if (reply_sz < 0)
922 		return reply_sz;
923 
924 	num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
925 	size = struct_size(rcvd_regions, mem_reg, num_regions);
926 	if (reply_sz < size)
927 		return -EIO;
928 
929 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
930 		return -EINVAL;
931 
932 	hw = &adapter->hw;
933 	hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
934 	if (!hw->lan_regs)
935 		return -ENOMEM;
936 
937 	for (int i = 0; i < num_regions; i++) {
938 		hw->lan_regs[i].addr_len =
939 			le64_to_cpu(rcvd_regions->mem_reg[i].size);
940 		hw->lan_regs[i].addr_start =
941 			le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
942 	}
943 	hw->num_lan_regs = num_regions;
944 
945 	return err;
946 }
947 
948 /**
949  * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
950  * @adapter: Driver specific private structure
951  *
952  * Called when idpf_send_get_lan_memory_regions is not supported. This will
953  * calculate the offsets and sizes for the regions before, in between, and
954  * after the mailbox and rstat MMIO mappings.
955  *
956  * Return: 0 on success or error code on failure.
957  */
958 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
959 {
960 	struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
961 	struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
962 	struct idpf_hw *hw = &adapter->hw;
963 
964 	hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
965 	hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
966 			       GFP_KERNEL);
967 	if (!hw->lan_regs)
968 		return -ENOMEM;
969 
970 	/* Region preceding mailbox */
971 	hw->lan_regs[0].addr_start = 0;
972 	hw->lan_regs[0].addr_len = mbx_reg->start;
973 	/* Region between mailbox and rstat */
974 	hw->lan_regs[1].addr_start = mbx_reg->end + 1;
975 	hw->lan_regs[1].addr_len = rstat_reg->start -
976 					hw->lan_regs[1].addr_start;
977 	/* Region after rstat */
978 	hw->lan_regs[2].addr_start = rstat_reg->end + 1;
979 	hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
980 					hw->lan_regs[2].addr_start;
981 
982 	return 0;
983 }
984 
985 /**
986  * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
987  * @adapter: Driver specific private structure
988  *
989  * Return: 0 on success or error code on failure.
990  */
991 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
992 {
993 	struct pci_dev *pdev = adapter->pdev;
994 	struct idpf_hw *hw = &adapter->hw;
995 	resource_size_t res_start;
996 
997 	res_start = pci_resource_start(pdev, 0);
998 
999 	for (int i = 0; i < hw->num_lan_regs; i++) {
1000 		resource_size_t start;
1001 		long len;
1002 
1003 		len = hw->lan_regs[i].addr_len;
1004 		if (!len)
1005 			continue;
1006 		start = hw->lan_regs[i].addr_start + res_start;
1007 
1008 		hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
1009 		if (!hw->lan_regs[i].vaddr) {
1010 			pci_err(pdev, "failed to allocate BAR0 region\n");
1011 			return -ENOMEM;
1012 		}
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 /**
1019  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
1020  * @adapter: Driver specific private structure
1021  * @max_q: vport max queue structure
1022  */
1023 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
1024 			    struct idpf_vport_max_q *max_q)
1025 {
1026 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1027 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1028 	u16 default_vports = idpf_get_default_vports(adapter);
1029 	int max_rx_q, max_tx_q;
1030 
1031 	mutex_lock(&adapter->queue_lock);
1032 
1033 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
1034 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
1035 	if (adapter->num_alloc_vports < default_vports) {
1036 		max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
1037 		max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
1038 	} else {
1039 		max_q->max_rxq = IDPF_MIN_Q;
1040 		max_q->max_txq = IDPF_MIN_Q;
1041 	}
1042 	max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
1043 	max_q->max_complq = max_q->max_txq;
1044 
1045 	if (avail_queues->avail_rxq < max_q->max_rxq ||
1046 	    avail_queues->avail_txq < max_q->max_txq ||
1047 	    avail_queues->avail_bufq < max_q->max_bufq ||
1048 	    avail_queues->avail_complq < max_q->max_complq) {
1049 		mutex_unlock(&adapter->queue_lock);
1050 
1051 		return -EINVAL;
1052 	}
1053 
1054 	avail_queues->avail_rxq -= max_q->max_rxq;
1055 	avail_queues->avail_txq -= max_q->max_txq;
1056 	avail_queues->avail_bufq -= max_q->max_bufq;
1057 	avail_queues->avail_complq -= max_q->max_complq;
1058 
1059 	mutex_unlock(&adapter->queue_lock);
1060 
1061 	return 0;
1062 }
1063 
1064 /**
1065  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
1066  * @adapter: Driver specific private structure
1067  * @max_q: vport max queue structure
1068  */
1069 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
1070 			       struct idpf_vport_max_q *max_q)
1071 {
1072 	struct idpf_avail_queue_info *avail_queues;
1073 
1074 	mutex_lock(&adapter->queue_lock);
1075 	avail_queues = &adapter->avail_queues;
1076 
1077 	avail_queues->avail_rxq += max_q->max_rxq;
1078 	avail_queues->avail_txq += max_q->max_txq;
1079 	avail_queues->avail_bufq += max_q->max_bufq;
1080 	avail_queues->avail_complq += max_q->max_complq;
1081 
1082 	mutex_unlock(&adapter->queue_lock);
1083 }
1084 
1085 /**
1086  * idpf_init_avail_queues - Initialize available queues on the device
1087  * @adapter: Driver specific private structure
1088  */
1089 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
1090 {
1091 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1092 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1093 
1094 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
1095 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
1096 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
1097 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
1098 }
1099 
1100 /**
1101  * idpf_get_reg_intr_vecs - Get vector queue register offset
1102  * @vport: virtual port structure
1103  * @reg_vals: Register offsets to store in
1104  *
1105  * Returns number of registers that got populated
1106  */
1107 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
1108 			   struct idpf_vec_regs *reg_vals)
1109 {
1110 	struct virtchnl2_vector_chunks *chunks;
1111 	struct idpf_vec_regs reg_val;
1112 	u16 num_vchunks, num_vec;
1113 	int num_regs = 0, i, j;
1114 
1115 	chunks = &vport->adapter->req_vec_chunks->vchunks;
1116 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
1117 
1118 	for (j = 0; j < num_vchunks; j++) {
1119 		struct virtchnl2_vector_chunk *chunk;
1120 		u32 dynctl_reg_spacing;
1121 		u32 itrn_reg_spacing;
1122 
1123 		chunk = &chunks->vchunks[j];
1124 		num_vec = le16_to_cpu(chunk->num_vectors);
1125 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1126 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1127 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1128 
1129 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1130 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1131 
1132 		for (i = 0; i < num_vec; i++) {
1133 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1134 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1135 			reg_vals[num_regs].itrn_index_spacing =
1136 						reg_val.itrn_index_spacing;
1137 
1138 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1139 			reg_val.itrn_reg += itrn_reg_spacing;
1140 			num_regs++;
1141 		}
1142 	}
1143 
1144 	return num_regs;
1145 }
1146 
1147 /**
1148  * idpf_vport_get_q_reg - Get the queue registers for the vport
1149  * @reg_vals: register values needing to be set
1150  * @num_regs: amount we expect to fill
1151  * @q_type: queue model
1152  * @chunks: queue regs received over mailbox
1153  *
1154  * This function parses the queue register offsets from the queue register
1155  * chunk information, with a specific queue type and stores it into the array
1156  * passed as an argument. It returns the actual number of queue registers that
1157  * are filled.
1158  */
1159 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1160 				struct virtchnl2_queue_reg_chunks *chunks)
1161 {
1162 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1163 	int reg_filled = 0, i;
1164 	u32 reg_val;
1165 
1166 	while (num_chunks--) {
1167 		struct virtchnl2_queue_reg_chunk *chunk;
1168 		u16 num_q;
1169 
1170 		chunk = &chunks->chunks[num_chunks];
1171 		if (le32_to_cpu(chunk->type) != q_type)
1172 			continue;
1173 
1174 		num_q = le32_to_cpu(chunk->num_queues);
1175 		reg_val = le64_to_cpu(chunk->qtail_reg_start);
1176 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1177 			reg_vals[reg_filled++] = reg_val;
1178 			reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
1179 		}
1180 	}
1181 
1182 	return reg_filled;
1183 }
1184 
1185 /**
1186  * __idpf_queue_reg_init - initialize queue registers
1187  * @vport: virtual port structure
1188  * @reg_vals: registers we are initializing
1189  * @num_regs: how many registers there are in total
1190  * @q_type: queue model
1191  *
1192  * Return number of queues that are initialized
1193  */
1194 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
1195 				 int num_regs, u32 q_type)
1196 {
1197 	struct idpf_adapter *adapter = vport->adapter;
1198 	int i, j, k = 0;
1199 
1200 	switch (q_type) {
1201 	case VIRTCHNL2_QUEUE_TYPE_TX:
1202 		for (i = 0; i < vport->num_txq_grp; i++) {
1203 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1204 
1205 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1206 				tx_qgrp->txqs[j]->tail =
1207 					idpf_get_reg_addr(adapter, reg_vals[k]);
1208 		}
1209 		break;
1210 	case VIRTCHNL2_QUEUE_TYPE_RX:
1211 		for (i = 0; i < vport->num_rxq_grp; i++) {
1212 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1213 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1214 
1215 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1216 				struct idpf_rx_queue *q;
1217 
1218 				q = rx_qgrp->singleq.rxqs[j];
1219 				q->tail = idpf_get_reg_addr(adapter,
1220 							    reg_vals[k]);
1221 			}
1222 		}
1223 		break;
1224 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1225 		for (i = 0; i < vport->num_rxq_grp; i++) {
1226 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1227 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
1228 
1229 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1230 				struct idpf_buf_queue *q;
1231 
1232 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1233 				q->tail = idpf_get_reg_addr(adapter,
1234 							    reg_vals[k]);
1235 			}
1236 		}
1237 		break;
1238 	default:
1239 		break;
1240 	}
1241 
1242 	return k;
1243 }
1244 
1245 /**
1246  * idpf_queue_reg_init - initialize queue registers
1247  * @vport: virtual port structure
1248  *
1249  * Return 0 on success, negative on failure
1250  */
1251 int idpf_queue_reg_init(struct idpf_vport *vport)
1252 {
1253 	struct virtchnl2_create_vport *vport_params;
1254 	struct virtchnl2_queue_reg_chunks *chunks;
1255 	struct idpf_vport_config *vport_config;
1256 	u16 vport_idx = vport->idx;
1257 	int num_regs, ret = 0;
1258 	u32 *reg_vals;
1259 
1260 	/* We may never deal with more than 256 same type of queues */
1261 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1262 	if (!reg_vals)
1263 		return -ENOMEM;
1264 
1265 	vport_config = vport->adapter->vport_config[vport_idx];
1266 	if (vport_config->req_qs_chunks) {
1267 		struct virtchnl2_add_queues *vc_aq =
1268 		  (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
1269 		chunks = &vc_aq->chunks;
1270 	} else {
1271 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
1272 		chunks = &vport_params->chunks;
1273 	}
1274 
1275 	/* Initialize Tx queue tail register address */
1276 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1277 					VIRTCHNL2_QUEUE_TYPE_TX,
1278 					chunks);
1279 	if (num_regs < vport->num_txq) {
1280 		ret = -EINVAL;
1281 		goto free_reg_vals;
1282 	}
1283 
1284 	num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1285 					 VIRTCHNL2_QUEUE_TYPE_TX);
1286 	if (num_regs < vport->num_txq) {
1287 		ret = -EINVAL;
1288 		goto free_reg_vals;
1289 	}
1290 
1291 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1292 	 * model
1293 	 */
1294 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1295 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1296 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1297 						chunks);
1298 		if (num_regs < vport->num_bufq) {
1299 			ret = -EINVAL;
1300 			goto free_reg_vals;
1301 		}
1302 
1303 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1304 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1305 		if (num_regs < vport->num_bufq) {
1306 			ret = -EINVAL;
1307 			goto free_reg_vals;
1308 		}
1309 	} else {
1310 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1311 						VIRTCHNL2_QUEUE_TYPE_RX,
1312 						chunks);
1313 		if (num_regs < vport->num_rxq) {
1314 			ret = -EINVAL;
1315 			goto free_reg_vals;
1316 		}
1317 
1318 		num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
1319 						 VIRTCHNL2_QUEUE_TYPE_RX);
1320 		if (num_regs < vport->num_rxq) {
1321 			ret = -EINVAL;
1322 			goto free_reg_vals;
1323 		}
1324 	}
1325 
1326 free_reg_vals:
1327 	kfree(reg_vals);
1328 
1329 	return ret;
1330 }
1331 
1332 /**
1333  * idpf_send_create_vport_msg - Send virtchnl create vport message
1334  * @adapter: Driver specific private structure
1335  * @max_q: vport max queue info
1336  *
1337  * send virtchnl creae vport message
1338  *
1339  * Returns 0 on success, negative on failure
1340  */
1341 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1342 			       struct idpf_vport_max_q *max_q)
1343 {
1344 	struct virtchnl2_create_vport *vport_msg;
1345 	struct idpf_vc_xn_params xn_params = {};
1346 	u16 idx = adapter->next_vport;
1347 	int err, buf_size;
1348 	ssize_t reply_sz;
1349 
1350 	buf_size = sizeof(struct virtchnl2_create_vport);
1351 	if (!adapter->vport_params_reqd[idx]) {
1352 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1353 							  GFP_KERNEL);
1354 		if (!adapter->vport_params_reqd[idx])
1355 			return -ENOMEM;
1356 	}
1357 
1358 	vport_msg = adapter->vport_params_reqd[idx];
1359 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1360 	vport_msg->vport_index = cpu_to_le16(idx);
1361 
1362 	if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1363 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1364 	else
1365 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1366 
1367 	if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1368 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1369 	else
1370 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1371 
1372 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1373 	if (err) {
1374 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1375 
1376 		return err;
1377 	}
1378 
1379 	if (!adapter->vport_params_recvd[idx]) {
1380 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1381 							   GFP_KERNEL);
1382 		if (!adapter->vport_params_recvd[idx]) {
1383 			err = -ENOMEM;
1384 			goto free_vport_params;
1385 		}
1386 	}
1387 
1388 	xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1389 	xn_params.send_buf.iov_base = vport_msg;
1390 	xn_params.send_buf.iov_len = buf_size;
1391 	xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1392 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1393 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1394 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1395 	if (reply_sz < 0) {
1396 		err = reply_sz;
1397 		goto free_vport_params;
1398 	}
1399 
1400 	return 0;
1401 
1402 free_vport_params:
1403 	kfree(adapter->vport_params_recvd[idx]);
1404 	adapter->vport_params_recvd[idx] = NULL;
1405 	kfree(adapter->vport_params_reqd[idx]);
1406 	adapter->vport_params_reqd[idx] = NULL;
1407 
1408 	return err;
1409 }
1410 
1411 /**
1412  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1413  * @vport: virtual port structure
1414  *
1415  * Return 0 on success, error on failure
1416  */
1417 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1418 {
1419 	struct idpf_adapter *adapter = vport->adapter;
1420 	struct virtchnl2_create_vport *vport_msg;
1421 	u64 rx_desc_ids, tx_desc_ids;
1422 
1423 	vport_msg = adapter->vport_params_recvd[vport->idx];
1424 
1425 	if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1426 	    (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1427 	     vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1428 		pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1429 		return -EOPNOTSUPP;
1430 	}
1431 
1432 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1433 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1434 
1435 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1436 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1437 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1438 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1439 		}
1440 	} else {
1441 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1442 			vport->base_rxd = true;
1443 	}
1444 
1445 	if (!idpf_is_queue_model_split(vport->txq_model))
1446 		return 0;
1447 
1448 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1449 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1450 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1451 	}
1452 
1453 	return 0;
1454 }
1455 
1456 /**
1457  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1458  * @vport: virtual port data structure
1459  *
1460  * Send virtchnl destroy vport message.  Returns 0 on success, negative on
1461  * failure.
1462  */
1463 int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
1464 {
1465 	struct idpf_vc_xn_params xn_params = {};
1466 	struct virtchnl2_vport v_id;
1467 	ssize_t reply_sz;
1468 
1469 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1470 
1471 	xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1472 	xn_params.send_buf.iov_base = &v_id;
1473 	xn_params.send_buf.iov_len = sizeof(v_id);
1474 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1475 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1476 
1477 	return reply_sz < 0 ? reply_sz : 0;
1478 }
1479 
1480 /**
1481  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1482  * @vport: virtual port data structure
1483  *
1484  * Send enable vport virtchnl message.  Returns 0 on success, negative on
1485  * failure.
1486  */
1487 int idpf_send_enable_vport_msg(struct idpf_vport *vport)
1488 {
1489 	struct idpf_vc_xn_params xn_params = {};
1490 	struct virtchnl2_vport v_id;
1491 	ssize_t reply_sz;
1492 
1493 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1494 
1495 	xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1496 	xn_params.send_buf.iov_base = &v_id;
1497 	xn_params.send_buf.iov_len = sizeof(v_id);
1498 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1499 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1500 
1501 	return reply_sz < 0 ? reply_sz : 0;
1502 }
1503 
1504 /**
1505  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1506  * @vport: virtual port data structure
1507  *
1508  * Send disable vport virtchnl message.  Returns 0 on success, negative on
1509  * failure.
1510  */
1511 int idpf_send_disable_vport_msg(struct idpf_vport *vport)
1512 {
1513 	struct idpf_vc_xn_params xn_params = {};
1514 	struct virtchnl2_vport v_id;
1515 	ssize_t reply_sz;
1516 
1517 	v_id.vport_id = cpu_to_le32(vport->vport_id);
1518 
1519 	xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1520 	xn_params.send_buf.iov_base = &v_id;
1521 	xn_params.send_buf.iov_len = sizeof(v_id);
1522 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1523 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1524 
1525 	return reply_sz < 0 ? reply_sz : 0;
1526 }
1527 
1528 /**
1529  * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1530  * @vport: virtual port data structure
1531  *
1532  * Send config tx queues virtchnl message. Returns 0 on success, negative on
1533  * failure.
1534  */
1535 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
1536 {
1537 	struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
1538 	struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1539 	struct idpf_vc_xn_params xn_params = {};
1540 	u32 config_sz, chunk_sz, buf_sz;
1541 	int totqs, num_msgs, num_chunks;
1542 	ssize_t reply_sz;
1543 	int i, k = 0;
1544 
1545 	totqs = vport->num_txq + vport->num_complq;
1546 	qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
1547 	if (!qi)
1548 		return -ENOMEM;
1549 
1550 	/* Populate the queue info buffer with all queue context info */
1551 	for (i = 0; i < vport->num_txq_grp; i++) {
1552 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1553 		int j, sched_mode;
1554 
1555 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1556 			qi[k].queue_id =
1557 				cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1558 			qi[k].model =
1559 				cpu_to_le16(vport->txq_model);
1560 			qi[k].type =
1561 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1562 			qi[k].ring_len =
1563 				cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
1564 			qi[k].dma_ring_addr =
1565 				cpu_to_le64(tx_qgrp->txqs[j]->dma);
1566 			if (idpf_is_queue_model_split(vport->txq_model)) {
1567 				struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1568 
1569 				qi[k].tx_compl_queue_id =
1570 					cpu_to_le16(tx_qgrp->complq->q_id);
1571 				qi[k].relative_queue_id = cpu_to_le16(j);
1572 
1573 				if (idpf_queue_has(FLOW_SCH_EN, q))
1574 					qi[k].sched_mode =
1575 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
1576 				else
1577 					qi[k].sched_mode =
1578 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1579 			} else {
1580 				qi[k].sched_mode =
1581 					cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1582 			}
1583 		}
1584 
1585 		if (!idpf_is_queue_model_split(vport->txq_model))
1586 			continue;
1587 
1588 		qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1589 		qi[k].model = cpu_to_le16(vport->txq_model);
1590 		qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1591 		qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
1592 		qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
1593 
1594 		if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
1595 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1596 		else
1597 			sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1598 		qi[k].sched_mode = cpu_to_le16(sched_mode);
1599 
1600 		k++;
1601 	}
1602 
1603 	/* Make sure accounting agrees */
1604 	if (k != totqs)
1605 		return -EINVAL;
1606 
1607 	/* Chunk up the queue contexts into multiple messages to avoid
1608 	 * sending a control queue message buffer that is too large
1609 	 */
1610 	config_sz = sizeof(struct virtchnl2_config_tx_queues);
1611 	chunk_sz = sizeof(struct virtchnl2_txq_info);
1612 
1613 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1614 			   totqs);
1615 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1616 
1617 	buf_sz = struct_size(ctq, qinfo, num_chunks);
1618 	ctq = kzalloc(buf_sz, GFP_KERNEL);
1619 	if (!ctq)
1620 		return -ENOMEM;
1621 
1622 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1623 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1624 
1625 	for (i = 0, k = 0; i < num_msgs; i++) {
1626 		memset(ctq, 0, buf_sz);
1627 		ctq->vport_id = cpu_to_le32(vport->vport_id);
1628 		ctq->num_qinfo = cpu_to_le16(num_chunks);
1629 		memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
1630 
1631 		xn_params.send_buf.iov_base = ctq;
1632 		xn_params.send_buf.iov_len = buf_sz;
1633 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1634 		if (reply_sz < 0)
1635 			return reply_sz;
1636 
1637 		k += num_chunks;
1638 		totqs -= num_chunks;
1639 		num_chunks = min(num_chunks, totqs);
1640 		/* Recalculate buffer size */
1641 		buf_sz = struct_size(ctq, qinfo, num_chunks);
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 /**
1648  * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1649  * @vport: virtual port data structure
1650  *
1651  * Send config rx queues virtchnl message.  Returns 0 on success, negative on
1652  * failure.
1653  */
1654 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
1655 {
1656 	struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
1657 	struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
1658 	struct idpf_vc_xn_params xn_params = {};
1659 	u32 config_sz, chunk_sz, buf_sz;
1660 	int totqs, num_msgs, num_chunks;
1661 	ssize_t reply_sz;
1662 	int i, k = 0;
1663 
1664 	totqs = vport->num_rxq + vport->num_bufq;
1665 	qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
1666 	if (!qi)
1667 		return -ENOMEM;
1668 
1669 	/* Populate the queue info buffer with all queue context info */
1670 	for (i = 0; i < vport->num_rxq_grp; i++) {
1671 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1672 		u16 num_rxq;
1673 		int j;
1674 
1675 		if (!idpf_is_queue_model_split(vport->rxq_model))
1676 			goto setup_rxqs;
1677 
1678 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1679 			struct idpf_buf_queue *bufq =
1680 				&rx_qgrp->splitq.bufq_sets[j].bufq;
1681 
1682 			qi[k].queue_id = cpu_to_le32(bufq->q_id);
1683 			qi[k].model = cpu_to_le16(vport->rxq_model);
1684 			qi[k].type =
1685 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1686 			qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1687 			qi[k].ring_len = cpu_to_le16(bufq->desc_count);
1688 			qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
1689 			qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
1690 			qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1691 			qi[k].rx_buffer_low_watermark =
1692 				cpu_to_le16(bufq->rx_buffer_low_watermark);
1693 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1694 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1695 		}
1696 
1697 setup_rxqs:
1698 		if (idpf_is_queue_model_split(vport->rxq_model))
1699 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1700 		else
1701 			num_rxq = rx_qgrp->singleq.num_rxq;
1702 
1703 		for (j = 0; j < num_rxq; j++, k++) {
1704 			const struct idpf_bufq_set *sets;
1705 			struct idpf_rx_queue *rxq;
1706 
1707 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1708 				rxq = rx_qgrp->singleq.rxqs[j];
1709 				goto common_qi_fields;
1710 			}
1711 
1712 			rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1713 			sets = rxq->bufq_sets;
1714 
1715 			/* In splitq mode, RXQ buffer size should be
1716 			 * set to that of the first buffer queue
1717 			 * associated with this RXQ.
1718 			 */
1719 			rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
1720 
1721 			qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1722 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1723 				qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
1724 				qi[k].rx_bufq2_id =
1725 					cpu_to_le16(sets[1].bufq.q_id);
1726 			}
1727 			qi[k].rx_buffer_low_watermark =
1728 				cpu_to_le16(rxq->rx_buffer_low_watermark);
1729 			if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
1730 				qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1731 
1732 			rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1733 
1734 			if (idpf_queue_has(HSPLIT_EN, rxq)) {
1735 				qi[k].qflags |=
1736 					cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1737 				qi[k].hdr_buffer_size =
1738 					cpu_to_le16(rxq->rx_hbuf_size);
1739 			}
1740 
1741 common_qi_fields:
1742 			qi[k].queue_id = cpu_to_le32(rxq->q_id);
1743 			qi[k].model = cpu_to_le16(vport->rxq_model);
1744 			qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1745 			qi[k].ring_len = cpu_to_le16(rxq->desc_count);
1746 			qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
1747 			qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
1748 			qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
1749 			qi[k].qflags |=
1750 				cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1751 			qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
1752 		}
1753 	}
1754 
1755 	/* Make sure accounting agrees */
1756 	if (k != totqs)
1757 		return -EINVAL;
1758 
1759 	/* Chunk up the queue contexts into multiple messages to avoid
1760 	 * sending a control queue message buffer that is too large
1761 	 */
1762 	config_sz = sizeof(struct virtchnl2_config_rx_queues);
1763 	chunk_sz = sizeof(struct virtchnl2_rxq_info);
1764 
1765 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1766 			   totqs);
1767 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
1768 
1769 	buf_sz = struct_size(crq, qinfo, num_chunks);
1770 	crq = kzalloc(buf_sz, GFP_KERNEL);
1771 	if (!crq)
1772 		return -ENOMEM;
1773 
1774 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1775 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1776 
1777 	for (i = 0, k = 0; i < num_msgs; i++) {
1778 		memset(crq, 0, buf_sz);
1779 		crq->vport_id = cpu_to_le32(vport->vport_id);
1780 		crq->num_qinfo = cpu_to_le16(num_chunks);
1781 		memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
1782 
1783 		xn_params.send_buf.iov_base = crq;
1784 		xn_params.send_buf.iov_len = buf_sz;
1785 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1786 		if (reply_sz < 0)
1787 			return reply_sz;
1788 
1789 		k += num_chunks;
1790 		totqs -= num_chunks;
1791 		num_chunks = min(num_chunks, totqs);
1792 		/* Recalculate buffer size */
1793 		buf_sz = struct_size(crq, qinfo, num_chunks);
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 /**
1800  * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1801  * queues message
1802  * @vport: virtual port data structure
1803  * @ena: if true enable, false disable
1804  *
1805  * Send enable or disable queues virtchnl message. Returns 0 on success,
1806  * negative on failure.
1807  */
1808 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
1809 {
1810 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
1811 	struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
1812 	u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
1813 	struct idpf_vc_xn_params xn_params = {};
1814 	struct virtchnl2_queue_chunks *qcs;
1815 	u32 config_sz, chunk_sz, buf_sz;
1816 	ssize_t reply_sz;
1817 	int i, j, k = 0;
1818 
1819 	num_txq = vport->num_txq + vport->num_complq;
1820 	num_rxq = vport->num_rxq + vport->num_bufq;
1821 	num_q = num_txq + num_rxq;
1822 	buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
1823 	qc = kzalloc(buf_sz, GFP_KERNEL);
1824 	if (!qc)
1825 		return -ENOMEM;
1826 
1827 	for (i = 0; i < vport->num_txq_grp; i++) {
1828 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1829 
1830 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1831 			qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1832 			qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1833 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1834 		}
1835 	}
1836 	if (vport->num_txq != k)
1837 		return -EINVAL;
1838 
1839 	if (!idpf_is_queue_model_split(vport->txq_model))
1840 		goto setup_rx;
1841 
1842 	for (i = 0; i < vport->num_txq_grp; i++, k++) {
1843 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1844 
1845 		qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1846 		qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
1847 		qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1848 	}
1849 	if (vport->num_complq != (k - vport->num_txq))
1850 		return -EINVAL;
1851 
1852 setup_rx:
1853 	for (i = 0; i < vport->num_rxq_grp; i++) {
1854 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1855 
1856 		if (idpf_is_queue_model_split(vport->rxq_model))
1857 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1858 		else
1859 			num_rxq = rx_qgrp->singleq.num_rxq;
1860 
1861 		for (j = 0; j < num_rxq; j++, k++) {
1862 			if (idpf_is_queue_model_split(vport->rxq_model)) {
1863 				qc[k].start_queue_id =
1864 				cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
1865 				qc[k].type =
1866 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1867 			} else {
1868 				qc[k].start_queue_id =
1869 				cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
1870 				qc[k].type =
1871 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1872 			}
1873 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1874 		}
1875 	}
1876 	if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
1877 		return -EINVAL;
1878 
1879 	if (!idpf_is_queue_model_split(vport->rxq_model))
1880 		goto send_msg;
1881 
1882 	for (i = 0; i < vport->num_rxq_grp; i++) {
1883 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1884 
1885 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
1886 			const struct idpf_buf_queue *q;
1887 
1888 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1889 			qc[k].type =
1890 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1891 			qc[k].start_queue_id = cpu_to_le32(q->q_id);
1892 			qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
1893 		}
1894 	}
1895 	if (vport->num_bufq != k - (vport->num_txq +
1896 				    vport->num_complq +
1897 				    vport->num_rxq))
1898 		return -EINVAL;
1899 
1900 send_msg:
1901 	/* Chunk up the queue info into multiple messages */
1902 	config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
1903 	chunk_sz = sizeof(struct virtchnl2_queue_chunk);
1904 
1905 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
1906 			   num_q);
1907 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
1908 
1909 	buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1910 	eq = kzalloc(buf_sz, GFP_KERNEL);
1911 	if (!eq)
1912 		return -ENOMEM;
1913 
1914 	if (ena) {
1915 		xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
1916 		xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1917 	} else {
1918 		xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
1919 		xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
1920 	}
1921 
1922 	for (i = 0, k = 0; i < num_msgs; i++) {
1923 		memset(eq, 0, buf_sz);
1924 		eq->vport_id = cpu_to_le32(vport->vport_id);
1925 		eq->chunks.num_chunks = cpu_to_le16(num_chunks);
1926 		qcs = &eq->chunks;
1927 		memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
1928 
1929 		xn_params.send_buf.iov_base = eq;
1930 		xn_params.send_buf.iov_len = buf_sz;
1931 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
1932 		if (reply_sz < 0)
1933 			return reply_sz;
1934 
1935 		k += num_chunks;
1936 		num_q -= num_chunks;
1937 		num_chunks = min(num_chunks, num_q);
1938 		/* Recalculate buffer size */
1939 		buf_sz = struct_size(eq, chunks.chunks, num_chunks);
1940 	}
1941 
1942 	return 0;
1943 }
1944 
1945 /**
1946  * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1947  * vector message
1948  * @vport: virtual port data structure
1949  * @map: true for map and false for unmap
1950  *
1951  * Send map or unmap queue vector virtchnl message.  Returns 0 on success,
1952  * negative on failure.
1953  */
1954 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
1955 {
1956 	struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
1957 	struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
1958 	struct idpf_vc_xn_params xn_params = {};
1959 	u32 config_sz, chunk_sz, buf_sz;
1960 	u32 num_msgs, num_chunks, num_q;
1961 	ssize_t reply_sz;
1962 	int i, j, k = 0;
1963 
1964 	num_q = vport->num_txq + vport->num_rxq;
1965 
1966 	buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
1967 	vqv = kzalloc(buf_sz, GFP_KERNEL);
1968 	if (!vqv)
1969 		return -ENOMEM;
1970 
1971 	for (i = 0; i < vport->num_txq_grp; i++) {
1972 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1973 
1974 		for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
1975 			vqv[k].queue_type =
1976 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1977 			vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
1978 
1979 			if (idpf_is_queue_model_split(vport->txq_model)) {
1980 				vqv[k].vector_id =
1981 				cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
1982 				vqv[k].itr_idx =
1983 				cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
1984 			} else {
1985 				vqv[k].vector_id =
1986 				cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
1987 				vqv[k].itr_idx =
1988 				cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
1989 			}
1990 		}
1991 	}
1992 
1993 	if (vport->num_txq != k)
1994 		return -EINVAL;
1995 
1996 	for (i = 0; i < vport->num_rxq_grp; i++) {
1997 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1998 		u16 num_rxq;
1999 
2000 		if (idpf_is_queue_model_split(vport->rxq_model))
2001 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2002 		else
2003 			num_rxq = rx_qgrp->singleq.num_rxq;
2004 
2005 		for (j = 0; j < num_rxq; j++, k++) {
2006 			struct idpf_rx_queue *rxq;
2007 
2008 			if (idpf_is_queue_model_split(vport->rxq_model))
2009 				rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
2010 			else
2011 				rxq = rx_qgrp->singleq.rxqs[j];
2012 
2013 			vqv[k].queue_type =
2014 				cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
2015 			vqv[k].queue_id = cpu_to_le32(rxq->q_id);
2016 			vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
2017 			vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
2018 		}
2019 	}
2020 
2021 	if (idpf_is_queue_model_split(vport->txq_model)) {
2022 		if (vport->num_rxq != k - vport->num_complq)
2023 			return -EINVAL;
2024 	} else {
2025 		if (vport->num_rxq != k - vport->num_txq)
2026 			return -EINVAL;
2027 	}
2028 
2029 	/* Chunk up the vector info into multiple messages */
2030 	config_sz = sizeof(struct virtchnl2_queue_vector_maps);
2031 	chunk_sz = sizeof(struct virtchnl2_queue_vector);
2032 
2033 	num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
2034 			   num_q);
2035 	num_msgs = DIV_ROUND_UP(num_q, num_chunks);
2036 
2037 	buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2038 	vqvm = kzalloc(buf_sz, GFP_KERNEL);
2039 	if (!vqvm)
2040 		return -ENOMEM;
2041 
2042 	if (map) {
2043 		xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
2044 		xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2045 	} else {
2046 		xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
2047 		xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2048 	}
2049 
2050 	for (i = 0, k = 0; i < num_msgs; i++) {
2051 		memset(vqvm, 0, buf_sz);
2052 		xn_params.send_buf.iov_base = vqvm;
2053 		xn_params.send_buf.iov_len = buf_sz;
2054 		vqvm->vport_id = cpu_to_le32(vport->vport_id);
2055 		vqvm->num_qv_maps = cpu_to_le16(num_chunks);
2056 		memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
2057 
2058 		reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2059 		if (reply_sz < 0)
2060 			return reply_sz;
2061 
2062 		k += num_chunks;
2063 		num_q -= num_chunks;
2064 		num_chunks = min(num_chunks, num_q);
2065 		/* Recalculate buffer size */
2066 		buf_sz = struct_size(vqvm, qv_maps, num_chunks);
2067 	}
2068 
2069 	return 0;
2070 }
2071 
2072 /**
2073  * idpf_send_enable_queues_msg - send enable queues virtchnl message
2074  * @vport: Virtual port private data structure
2075  *
2076  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
2077  * failure.
2078  */
2079 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
2080 {
2081 	return idpf_send_ena_dis_queues_msg(vport, true);
2082 }
2083 
2084 /**
2085  * idpf_send_disable_queues_msg - send disable queues virtchnl message
2086  * @vport: Virtual port private data structure
2087  *
2088  * Will send disable queues virtchnl message.  Returns 0 on success, negative
2089  * on failure.
2090  */
2091 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
2092 {
2093 	int err, i;
2094 
2095 	err = idpf_send_ena_dis_queues_msg(vport, false);
2096 	if (err)
2097 		return err;
2098 
2099 	/* switch to poll mode as interrupts will be disabled after disable
2100 	 * queues virtchnl message is sent
2101 	 */
2102 	for (i = 0; i < vport->num_txq; i++)
2103 		idpf_queue_set(POLL_MODE, vport->txqs[i]);
2104 
2105 	/* schedule the napi to receive all the marker packets */
2106 	local_bh_disable();
2107 	for (i = 0; i < vport->num_q_vectors; i++)
2108 		napi_schedule(&vport->q_vectors[i].napi);
2109 	local_bh_enable();
2110 
2111 	return idpf_wait_for_marker_event(vport);
2112 }
2113 
2114 /**
2115  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2116  * structure
2117  * @dchunks: Destination chunks to store data to
2118  * @schunks: Source chunks to copy data from
2119  * @num_chunks: number of chunks to copy
2120  */
2121 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2122 					     struct virtchnl2_queue_reg_chunk *schunks,
2123 					     u16 num_chunks)
2124 {
2125 	u16 i;
2126 
2127 	for (i = 0; i < num_chunks; i++) {
2128 		dchunks[i].type = schunks[i].type;
2129 		dchunks[i].start_queue_id = schunks[i].start_queue_id;
2130 		dchunks[i].num_queues = schunks[i].num_queues;
2131 	}
2132 }
2133 
2134 /**
2135  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2136  * @vport: Virtual port private data structure
2137  *
2138  * Will send delete queues virtchnl message. Return 0 on success, negative on
2139  * failure.
2140  */
2141 int idpf_send_delete_queues_msg(struct idpf_vport *vport)
2142 {
2143 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2144 	struct virtchnl2_create_vport *vport_params;
2145 	struct virtchnl2_queue_reg_chunks *chunks;
2146 	struct idpf_vc_xn_params xn_params = {};
2147 	struct idpf_vport_config *vport_config;
2148 	u16 vport_idx = vport->idx;
2149 	ssize_t reply_sz;
2150 	u16 num_chunks;
2151 	int buf_size;
2152 
2153 	vport_config = vport->adapter->vport_config[vport_idx];
2154 	if (vport_config->req_qs_chunks) {
2155 		chunks = &vport_config->req_qs_chunks->chunks;
2156 	} else {
2157 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
2158 		chunks = &vport_params->chunks;
2159 	}
2160 
2161 	num_chunks = le16_to_cpu(chunks->num_chunks);
2162 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2163 
2164 	eq = kzalloc(buf_size, GFP_KERNEL);
2165 	if (!eq)
2166 		return -ENOMEM;
2167 
2168 	eq->vport_id = cpu_to_le32(vport->vport_id);
2169 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2170 
2171 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
2172 					 num_chunks);
2173 
2174 	xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2175 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2176 	xn_params.send_buf.iov_base = eq;
2177 	xn_params.send_buf.iov_len = buf_size;
2178 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2179 
2180 	return reply_sz < 0 ? reply_sz : 0;
2181 }
2182 
2183 /**
2184  * idpf_send_config_queues_msg - Send config queues virtchnl message
2185  * @vport: Virtual port private data structure
2186  *
2187  * Will send config queues virtchnl message. Returns 0 on success, negative on
2188  * failure.
2189  */
2190 int idpf_send_config_queues_msg(struct idpf_vport *vport)
2191 {
2192 	int err;
2193 
2194 	err = idpf_send_config_tx_queues_msg(vport);
2195 	if (err)
2196 		return err;
2197 
2198 	return idpf_send_config_rx_queues_msg(vport);
2199 }
2200 
2201 /**
2202  * idpf_send_add_queues_msg - Send virtchnl add queues message
2203  * @vport: Virtual port private data structure
2204  * @num_tx_q: number of transmit queues
2205  * @num_complq: number of transmit completion queues
2206  * @num_rx_q: number of receive queues
2207  * @num_rx_bufq: number of receive buffer queues
2208  *
2209  * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2210  * we should not change any fields within vport itself in this function.
2211  */
2212 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
2213 			     u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
2214 {
2215 	struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2216 	struct idpf_vc_xn_params xn_params = {};
2217 	struct idpf_vport_config *vport_config;
2218 	struct virtchnl2_add_queues aq = {};
2219 	u16 vport_idx = vport->idx;
2220 	ssize_t reply_sz;
2221 	int size;
2222 
2223 	vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2224 	if (!vc_msg)
2225 		return -ENOMEM;
2226 
2227 	vport_config = vport->adapter->vport_config[vport_idx];
2228 	kfree(vport_config->req_qs_chunks);
2229 	vport_config->req_qs_chunks = NULL;
2230 
2231 	aq.vport_id = cpu_to_le32(vport->vport_id);
2232 	aq.num_tx_q = cpu_to_le16(num_tx_q);
2233 	aq.num_tx_complq = cpu_to_le16(num_complq);
2234 	aq.num_rx_q = cpu_to_le16(num_rx_q);
2235 	aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
2236 
2237 	xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2238 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2239 	xn_params.send_buf.iov_base = &aq;
2240 	xn_params.send_buf.iov_len = sizeof(aq);
2241 	xn_params.recv_buf.iov_base = vc_msg;
2242 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2243 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2244 	if (reply_sz < 0)
2245 		return reply_sz;
2246 
2247 	/* compare vc_msg num queues with vport num queues */
2248 	if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
2249 	    le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
2250 	    le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
2251 	    le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
2252 		return -EINVAL;
2253 
2254 	size = struct_size(vc_msg, chunks.chunks,
2255 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2256 	if (reply_sz < size)
2257 		return -EIO;
2258 
2259 	vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
2260 	if (!vport_config->req_qs_chunks)
2261 		return -ENOMEM;
2262 
2263 	return 0;
2264 }
2265 
2266 /**
2267  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2268  * @adapter: Driver specific private structure
2269  * @num_vectors: number of vectors to be allocated
2270  *
2271  * Returns 0 on success, negative on failure.
2272  */
2273 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2274 {
2275 	struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2276 	struct idpf_vc_xn_params xn_params = {};
2277 	struct virtchnl2_alloc_vectors ac = {};
2278 	ssize_t reply_sz;
2279 	u16 num_vchunks;
2280 	int size;
2281 
2282 	ac.num_vectors = cpu_to_le16(num_vectors);
2283 
2284 	rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2285 	if (!rcvd_vec)
2286 		return -ENOMEM;
2287 
2288 	xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2289 	xn_params.send_buf.iov_base = &ac;
2290 	xn_params.send_buf.iov_len = sizeof(ac);
2291 	xn_params.recv_buf.iov_base = rcvd_vec;
2292 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2293 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2294 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2295 	if (reply_sz < 0)
2296 		return reply_sz;
2297 
2298 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2299 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2300 	if (reply_sz < size)
2301 		return -EIO;
2302 
2303 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
2304 		return -EINVAL;
2305 
2306 	kfree(adapter->req_vec_chunks);
2307 	adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2308 	if (!adapter->req_vec_chunks)
2309 		return -ENOMEM;
2310 
2311 	if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2312 		kfree(adapter->req_vec_chunks);
2313 		adapter->req_vec_chunks = NULL;
2314 		return -EINVAL;
2315 	}
2316 
2317 	return 0;
2318 }
2319 
2320 /**
2321  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2322  * @adapter: Driver specific private structure
2323  *
2324  * Returns 0 on success, negative on failure.
2325  */
2326 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2327 {
2328 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2329 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2330 	struct idpf_vc_xn_params xn_params = {};
2331 	ssize_t reply_sz;
2332 	int buf_size;
2333 
2334 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2335 
2336 	xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2337 	xn_params.send_buf.iov_base = vcs;
2338 	xn_params.send_buf.iov_len = buf_size;
2339 	xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
2340 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2341 	if (reply_sz < 0)
2342 		return reply_sz;
2343 
2344 	kfree(adapter->req_vec_chunks);
2345 	adapter->req_vec_chunks = NULL;
2346 
2347 	return 0;
2348 }
2349 
2350 /**
2351  * idpf_get_max_vfs - Get max number of vfs supported
2352  * @adapter: Driver specific private structure
2353  *
2354  * Returns max number of VFs
2355  */
2356 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2357 {
2358 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2359 }
2360 
2361 /**
2362  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2363  * @adapter: Driver specific private structure
2364  * @num_vfs: number of virtual functions to be created
2365  *
2366  * Returns 0 on success, negative on failure.
2367  */
2368 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2369 {
2370 	struct virtchnl2_sriov_vfs_info svi = {};
2371 	struct idpf_vc_xn_params xn_params = {};
2372 	ssize_t reply_sz;
2373 
2374 	svi.num_vfs = cpu_to_le16(num_vfs);
2375 	xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2376 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2377 	xn_params.send_buf.iov_base = &svi;
2378 	xn_params.send_buf.iov_len = sizeof(svi);
2379 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2380 
2381 	return reply_sz < 0 ? reply_sz : 0;
2382 }
2383 
2384 /**
2385  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2386  * @vport: vport to get stats for
2387  *
2388  * Returns 0 on success, negative on failure.
2389  */
2390 int idpf_send_get_stats_msg(struct idpf_vport *vport)
2391 {
2392 	struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
2393 	struct rtnl_link_stats64 *netstats = &np->netstats;
2394 	struct virtchnl2_vport_stats stats_msg = {};
2395 	struct idpf_vc_xn_params xn_params = {};
2396 	ssize_t reply_sz;
2397 
2398 
2399 	/* Don't send get_stats message if the link is down */
2400 	if (np->state <= __IDPF_VPORT_DOWN)
2401 		return 0;
2402 
2403 	stats_msg.vport_id = cpu_to_le32(vport->vport_id);
2404 
2405 	xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2406 	xn_params.send_buf.iov_base = &stats_msg;
2407 	xn_params.send_buf.iov_len = sizeof(stats_msg);
2408 	xn_params.recv_buf = xn_params.send_buf;
2409 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2410 
2411 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2412 	if (reply_sz < 0)
2413 		return reply_sz;
2414 	if (reply_sz < sizeof(stats_msg))
2415 		return -EIO;
2416 
2417 	spin_lock_bh(&np->stats_lock);
2418 
2419 	netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2420 			       le64_to_cpu(stats_msg.rx_multicast) +
2421 			       le64_to_cpu(stats_msg.rx_broadcast);
2422 	netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2423 			       le64_to_cpu(stats_msg.tx_multicast) +
2424 			       le64_to_cpu(stats_msg.tx_broadcast);
2425 	netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2426 	netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2427 	netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2428 	netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2429 	netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2430 	netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2431 
2432 	vport->port_stats.vport_stats = stats_msg;
2433 
2434 	spin_unlock_bh(&np->stats_lock);
2435 
2436 	return 0;
2437 }
2438 
2439 /**
2440  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2441  * @vport: virtual port data structure
2442  * @get: flag to set or get rss look up table
2443  *
2444  * Returns 0 on success, negative on failure.
2445  */
2446 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
2447 {
2448 	struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2449 	struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2450 	struct idpf_vc_xn_params xn_params = {};
2451 	struct idpf_rss_data *rss_data;
2452 	int buf_size, lut_buf_size;
2453 	ssize_t reply_sz;
2454 	int i;
2455 
2456 	rss_data =
2457 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2458 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2459 	rl = kzalloc(buf_size, GFP_KERNEL);
2460 	if (!rl)
2461 		return -ENOMEM;
2462 
2463 	rl->vport_id = cpu_to_le32(vport->vport_id);
2464 
2465 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2466 	xn_params.send_buf.iov_base = rl;
2467 	xn_params.send_buf.iov_len = buf_size;
2468 
2469 	if (get) {
2470 		recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2471 		if (!recv_rl)
2472 			return -ENOMEM;
2473 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2474 		xn_params.recv_buf.iov_base = recv_rl;
2475 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2476 	} else {
2477 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2478 		for (i = 0; i < rss_data->rss_lut_size; i++)
2479 			rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
2480 
2481 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2482 	}
2483 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2484 	if (reply_sz < 0)
2485 		return reply_sz;
2486 	if (!get)
2487 		return 0;
2488 	if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2489 		return -EIO;
2490 
2491 	lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2492 	if (reply_sz < lut_buf_size)
2493 		return -EIO;
2494 
2495 	/* size didn't change, we can reuse existing lut buf */
2496 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2497 		goto do_memcpy;
2498 
2499 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2500 	kfree(rss_data->rss_lut);
2501 
2502 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2503 	if (!rss_data->rss_lut) {
2504 		rss_data->rss_lut_size = 0;
2505 		return -ENOMEM;
2506 	}
2507 
2508 do_memcpy:
2509 	memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2510 
2511 	return 0;
2512 }
2513 
2514 /**
2515  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2516  * @vport: virtual port data structure
2517  * @get: flag to set or get rss look up table
2518  *
2519  * Returns 0 on success, negative on failure
2520  */
2521 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
2522 {
2523 	struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2524 	struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2525 	struct idpf_vc_xn_params xn_params = {};
2526 	struct idpf_rss_data *rss_data;
2527 	ssize_t reply_sz;
2528 	int i, buf_size;
2529 	u16 key_size;
2530 
2531 	rss_data =
2532 		&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
2533 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2534 	rk = kzalloc(buf_size, GFP_KERNEL);
2535 	if (!rk)
2536 		return -ENOMEM;
2537 
2538 	rk->vport_id = cpu_to_le32(vport->vport_id);
2539 	xn_params.send_buf.iov_base = rk;
2540 	xn_params.send_buf.iov_len = buf_size;
2541 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2542 	if (get) {
2543 		recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2544 		if (!recv_rk)
2545 			return -ENOMEM;
2546 
2547 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2548 		xn_params.recv_buf.iov_base = recv_rk;
2549 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2550 	} else {
2551 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2552 		for (i = 0; i < rss_data->rss_key_size; i++)
2553 			rk->key_flex[i] = rss_data->rss_key[i];
2554 
2555 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2556 	}
2557 
2558 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2559 	if (reply_sz < 0)
2560 		return reply_sz;
2561 	if (!get)
2562 		return 0;
2563 	if (reply_sz < sizeof(struct virtchnl2_rss_key))
2564 		return -EIO;
2565 
2566 	key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2567 			 le16_to_cpu(recv_rk->key_len));
2568 	if (reply_sz < key_size)
2569 		return -EIO;
2570 
2571 	/* key len didn't change, reuse existing buf */
2572 	if (rss_data->rss_key_size == key_size)
2573 		goto do_memcpy;
2574 
2575 	rss_data->rss_key_size = key_size;
2576 	kfree(rss_data->rss_key);
2577 	rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
2578 	if (!rss_data->rss_key) {
2579 		rss_data->rss_key_size = 0;
2580 		return -ENOMEM;
2581 	}
2582 
2583 do_memcpy:
2584 	memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
2585 
2586 	return 0;
2587 }
2588 
2589 /**
2590  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2591  * @ptype: ptype lookup table
2592  * @pstate: state machine for ptype lookup table
2593  * @ipv4: ipv4 or ipv6
2594  * @frag: fragmentation allowed
2595  *
2596  */
2597 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
2598 				   struct idpf_ptype_state *pstate,
2599 				   bool ipv4, bool frag)
2600 {
2601 	if (!pstate->outer_ip || !pstate->outer_frag) {
2602 		pstate->outer_ip = true;
2603 
2604 		if (ipv4)
2605 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
2606 		else
2607 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
2608 
2609 		if (frag) {
2610 			ptype->outer_frag = LIBETH_RX_PT_FRAG;
2611 			pstate->outer_frag = true;
2612 		}
2613 	} else {
2614 		ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
2615 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
2616 
2617 		if (ipv4)
2618 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
2619 		else
2620 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
2621 
2622 		if (frag)
2623 			ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
2624 	}
2625 }
2626 
2627 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
2628 {
2629 	if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2630 	    ptype->inner_prot)
2631 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
2632 	else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
2633 		 ptype->outer_ip)
2634 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
2635 	else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
2636 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
2637 	else
2638 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
2639 
2640 	libeth_rx_pt_gen_hash_type(ptype);
2641 }
2642 
2643 /**
2644  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2645  * @vport: virtual port data structure
2646  *
2647  * Returns 0 on success, negative on failure.
2648  */
2649 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
2650 {
2651 	struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
2652 	struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
2653 	struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
2654 	int max_ptype, ptypes_recvd = 0, ptype_offset;
2655 	struct idpf_adapter *adapter = vport->adapter;
2656 	struct idpf_vc_xn_params xn_params = {};
2657 	u16 next_ptype_id = 0;
2658 	ssize_t reply_sz;
2659 	int i, j, k;
2660 
2661 	if (vport->rx_ptype_lkup)
2662 		return 0;
2663 
2664 	if (idpf_is_queue_model_split(vport->rxq_model))
2665 		max_ptype = IDPF_RX_MAX_PTYPE;
2666 	else
2667 		max_ptype = IDPF_RX_MAX_BASE_PTYPE;
2668 
2669 	ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
2670 	if (!ptype_lkup)
2671 		return -ENOMEM;
2672 
2673 	get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
2674 	if (!get_ptype_info)
2675 		return -ENOMEM;
2676 
2677 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2678 	if (!ptype_info)
2679 		return -ENOMEM;
2680 
2681 	xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
2682 	xn_params.send_buf.iov_base = get_ptype_info;
2683 	xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
2684 	xn_params.recv_buf.iov_base = ptype_info;
2685 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2686 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2687 
2688 	while (next_ptype_id < max_ptype) {
2689 		get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
2690 
2691 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
2692 			get_ptype_info->num_ptypes =
2693 				cpu_to_le16(max_ptype - next_ptype_id);
2694 		else
2695 			get_ptype_info->num_ptypes =
2696 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
2697 
2698 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2699 		if (reply_sz < 0)
2700 			return reply_sz;
2701 
2702 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
2703 		if (ptypes_recvd > max_ptype)
2704 			return -EINVAL;
2705 
2706 		next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
2707 				le16_to_cpu(get_ptype_info->num_ptypes);
2708 
2709 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
2710 
2711 		for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
2712 			struct idpf_ptype_state pstate = { };
2713 			struct virtchnl2_ptype *ptype;
2714 			u16 id;
2715 
2716 			ptype = (struct virtchnl2_ptype *)
2717 					((u8 *)ptype_info + ptype_offset);
2718 
2719 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
2720 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
2721 				return -EINVAL;
2722 
2723 			/* 0xFFFF indicates end of ptypes */
2724 			if (le16_to_cpu(ptype->ptype_id_10) ==
2725 							IDPF_INVALID_PTYPE_ID)
2726 				goto out;
2727 
2728 			if (idpf_is_queue_model_split(vport->rxq_model))
2729 				k = le16_to_cpu(ptype->ptype_id_10);
2730 			else
2731 				k = ptype->ptype_id_8;
2732 
2733 			for (j = 0; j < ptype->proto_id_count; j++) {
2734 				id = le16_to_cpu(ptype->proto_id[j]);
2735 				switch (id) {
2736 				case VIRTCHNL2_PROTO_HDR_GRE:
2737 					if (pstate.tunnel_state ==
2738 							IDPF_PTYPE_TUNNEL_IP) {
2739 						ptype_lkup[k].tunnel_type =
2740 						LIBETH_RX_PT_TUNNEL_IP_GRENAT;
2741 						pstate.tunnel_state |=
2742 						IDPF_PTYPE_TUNNEL_IP_GRENAT;
2743 					}
2744 					break;
2745 				case VIRTCHNL2_PROTO_HDR_MAC:
2746 					ptype_lkup[k].outer_ip =
2747 						LIBETH_RX_PT_OUTER_L2;
2748 					if (pstate.tunnel_state ==
2749 							IDPF_TUN_IP_GRE) {
2750 						ptype_lkup[k].tunnel_type =
2751 						LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
2752 						pstate.tunnel_state |=
2753 						IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
2754 					}
2755 					break;
2756 				case VIRTCHNL2_PROTO_HDR_IPV4:
2757 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2758 							       &pstate, true,
2759 							       false);
2760 					break;
2761 				case VIRTCHNL2_PROTO_HDR_IPV6:
2762 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2763 							       &pstate, false,
2764 							       false);
2765 					break;
2766 				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
2767 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2768 							       &pstate, true,
2769 							       true);
2770 					break;
2771 				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
2772 					idpf_fill_ptype_lookup(&ptype_lkup[k],
2773 							       &pstate, false,
2774 							       true);
2775 					break;
2776 				case VIRTCHNL2_PROTO_HDR_UDP:
2777 					ptype_lkup[k].inner_prot =
2778 					LIBETH_RX_PT_INNER_UDP;
2779 					break;
2780 				case VIRTCHNL2_PROTO_HDR_TCP:
2781 					ptype_lkup[k].inner_prot =
2782 					LIBETH_RX_PT_INNER_TCP;
2783 					break;
2784 				case VIRTCHNL2_PROTO_HDR_SCTP:
2785 					ptype_lkup[k].inner_prot =
2786 					LIBETH_RX_PT_INNER_SCTP;
2787 					break;
2788 				case VIRTCHNL2_PROTO_HDR_ICMP:
2789 					ptype_lkup[k].inner_prot =
2790 					LIBETH_RX_PT_INNER_ICMP;
2791 					break;
2792 				case VIRTCHNL2_PROTO_HDR_PAY:
2793 					ptype_lkup[k].payload_layer =
2794 						LIBETH_RX_PT_PAYLOAD_L2;
2795 					break;
2796 				case VIRTCHNL2_PROTO_HDR_ICMPV6:
2797 				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
2798 				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
2799 				case VIRTCHNL2_PROTO_HDR_POST_MAC:
2800 				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
2801 				case VIRTCHNL2_PROTO_HDR_SVLAN:
2802 				case VIRTCHNL2_PROTO_HDR_CVLAN:
2803 				case VIRTCHNL2_PROTO_HDR_MPLS:
2804 				case VIRTCHNL2_PROTO_HDR_MMPLS:
2805 				case VIRTCHNL2_PROTO_HDR_PTP:
2806 				case VIRTCHNL2_PROTO_HDR_CTRL:
2807 				case VIRTCHNL2_PROTO_HDR_LLDP:
2808 				case VIRTCHNL2_PROTO_HDR_ARP:
2809 				case VIRTCHNL2_PROTO_HDR_ECP:
2810 				case VIRTCHNL2_PROTO_HDR_EAPOL:
2811 				case VIRTCHNL2_PROTO_HDR_PPPOD:
2812 				case VIRTCHNL2_PROTO_HDR_PPPOE:
2813 				case VIRTCHNL2_PROTO_HDR_IGMP:
2814 				case VIRTCHNL2_PROTO_HDR_AH:
2815 				case VIRTCHNL2_PROTO_HDR_ESP:
2816 				case VIRTCHNL2_PROTO_HDR_IKE:
2817 				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
2818 				case VIRTCHNL2_PROTO_HDR_L2TPV2:
2819 				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
2820 				case VIRTCHNL2_PROTO_HDR_L2TPV3:
2821 				case VIRTCHNL2_PROTO_HDR_GTP:
2822 				case VIRTCHNL2_PROTO_HDR_GTP_EH:
2823 				case VIRTCHNL2_PROTO_HDR_GTPCV2:
2824 				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
2825 				case VIRTCHNL2_PROTO_HDR_GTPU:
2826 				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
2827 				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
2828 				case VIRTCHNL2_PROTO_HDR_ECPRI:
2829 				case VIRTCHNL2_PROTO_HDR_VRRP:
2830 				case VIRTCHNL2_PROTO_HDR_OSPF:
2831 				case VIRTCHNL2_PROTO_HDR_TUN:
2832 				case VIRTCHNL2_PROTO_HDR_NVGRE:
2833 				case VIRTCHNL2_PROTO_HDR_VXLAN:
2834 				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
2835 				case VIRTCHNL2_PROTO_HDR_GENEVE:
2836 				case VIRTCHNL2_PROTO_HDR_NSH:
2837 				case VIRTCHNL2_PROTO_HDR_QUIC:
2838 				case VIRTCHNL2_PROTO_HDR_PFCP:
2839 				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
2840 				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
2841 				case VIRTCHNL2_PROTO_HDR_RTP:
2842 				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
2843 					break;
2844 				default:
2845 					break;
2846 				}
2847 			}
2848 
2849 			idpf_finalize_ptype_lookup(&ptype_lkup[k]);
2850 		}
2851 	}
2852 
2853 out:
2854 	vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
2855 
2856 	return 0;
2857 }
2858 
2859 /**
2860  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2861  *				    message
2862  * @vport: virtual port data structure
2863  *
2864  * Returns 0 on success, negative on failure.
2865  */
2866 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
2867 {
2868 	struct idpf_vc_xn_params xn_params = {};
2869 	struct virtchnl2_loopback loopback;
2870 	ssize_t reply_sz;
2871 
2872 	loopback.vport_id = cpu_to_le32(vport->vport_id);
2873 	loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
2874 
2875 	xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
2876 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2877 	xn_params.send_buf.iov_base = &loopback;
2878 	xn_params.send_buf.iov_len = sizeof(loopback);
2879 	reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
2880 
2881 	return reply_sz < 0 ? reply_sz : 0;
2882 }
2883 
2884 /**
2885  * idpf_find_ctlq - Given a type and id, find ctlq info
2886  * @hw: hardware struct
2887  * @type: type of ctrlq to find
2888  * @id: ctlq id to find
2889  *
2890  * Returns pointer to found ctlq info struct, NULL otherwise.
2891  */
2892 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
2893 					     enum idpf_ctlq_type type, int id)
2894 {
2895 	struct idpf_ctlq_info *cq, *tmp;
2896 
2897 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
2898 		if (cq->q_id == id && cq->cq_type == type)
2899 			return cq;
2900 
2901 	return NULL;
2902 }
2903 
2904 /**
2905  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2906  * @adapter: adapter info struct
2907  *
2908  * Returns 0 on success, negative otherwise
2909  */
2910 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
2911 {
2912 	struct idpf_ctlq_create_info ctlq_info[] = {
2913 		{
2914 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
2915 			.id = IDPF_DFLT_MBX_ID,
2916 			.len = IDPF_DFLT_MBX_Q_LEN,
2917 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2918 		},
2919 		{
2920 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
2921 			.id = IDPF_DFLT_MBX_ID,
2922 			.len = IDPF_DFLT_MBX_Q_LEN,
2923 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
2924 		}
2925 	};
2926 	struct idpf_hw *hw = &adapter->hw;
2927 	int err;
2928 
2929 	adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
2930 
2931 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
2932 	if (err)
2933 		return err;
2934 
2935 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
2936 				 IDPF_DFLT_MBX_ID);
2937 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
2938 				 IDPF_DFLT_MBX_ID);
2939 
2940 	if (!hw->asq || !hw->arq) {
2941 		idpf_ctlq_deinit(hw);
2942 
2943 		return -ENOENT;
2944 	}
2945 
2946 	adapter->state = __IDPF_VER_CHECK;
2947 
2948 	return 0;
2949 }
2950 
2951 /**
2952  * idpf_deinit_dflt_mbx - Free up ctlqs setup
2953  * @adapter: Driver specific private data structure
2954  */
2955 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
2956 {
2957 	if (adapter->hw.arq && adapter->hw.asq) {
2958 		idpf_mb_clean(adapter);
2959 		idpf_ctlq_deinit(&adapter->hw);
2960 	}
2961 	adapter->hw.arq = NULL;
2962 	adapter->hw.asq = NULL;
2963 }
2964 
2965 /**
2966  * idpf_vport_params_buf_rel - Release memory for MailBox resources
2967  * @adapter: Driver specific private data structure
2968  *
2969  * Will release memory to hold the vport parameters received on MailBox
2970  */
2971 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
2972 {
2973 	kfree(adapter->vport_params_recvd);
2974 	adapter->vport_params_recvd = NULL;
2975 	kfree(adapter->vport_params_reqd);
2976 	adapter->vport_params_reqd = NULL;
2977 	kfree(adapter->vport_ids);
2978 	adapter->vport_ids = NULL;
2979 }
2980 
2981 /**
2982  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
2983  * @adapter: Driver specific private data structure
2984  *
2985  * Will alloc memory to hold the vport parameters received on MailBox
2986  */
2987 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
2988 {
2989 	u16 num_max_vports = idpf_get_max_vports(adapter);
2990 
2991 	adapter->vport_params_reqd = kcalloc(num_max_vports,
2992 					     sizeof(*adapter->vport_params_reqd),
2993 					     GFP_KERNEL);
2994 	if (!adapter->vport_params_reqd)
2995 		return -ENOMEM;
2996 
2997 	adapter->vport_params_recvd = kcalloc(num_max_vports,
2998 					      sizeof(*adapter->vport_params_recvd),
2999 					      GFP_KERNEL);
3000 	if (!adapter->vport_params_recvd)
3001 		goto err_mem;
3002 
3003 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
3004 	if (!adapter->vport_ids)
3005 		goto err_mem;
3006 
3007 	if (adapter->vport_config)
3008 		return 0;
3009 
3010 	adapter->vport_config = kcalloc(num_max_vports,
3011 					sizeof(*adapter->vport_config),
3012 					GFP_KERNEL);
3013 	if (!adapter->vport_config)
3014 		goto err_mem;
3015 
3016 	return 0;
3017 
3018 err_mem:
3019 	idpf_vport_params_buf_rel(adapter);
3020 
3021 	return -ENOMEM;
3022 }
3023 
3024 /**
3025  * idpf_vc_core_init - Initialize state machine and get driver specific
3026  * resources
3027  * @adapter: Driver specific private structure
3028  *
3029  * This function will initialize the state machine and request all necessary
3030  * resources required by the device driver. Once the state machine is
3031  * initialized, allocate memory to store vport specific information and also
3032  * requests required interrupts.
3033  *
3034  * Returns 0 on success, -EAGAIN function will get called again,
3035  * otherwise negative on failure.
3036  */
3037 int idpf_vc_core_init(struct idpf_adapter *adapter)
3038 {
3039 	int task_delay = 30;
3040 	u16 num_max_vports;
3041 	int err = 0;
3042 
3043 	if (!adapter->vcxn_mngr) {
3044 		adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
3045 		if (!adapter->vcxn_mngr) {
3046 			err = -ENOMEM;
3047 			goto init_failed;
3048 		}
3049 	}
3050 	idpf_vc_xn_init(adapter->vcxn_mngr);
3051 
3052 	while (adapter->state != __IDPF_INIT_SW) {
3053 		switch (adapter->state) {
3054 		case __IDPF_VER_CHECK:
3055 			err = idpf_send_ver_msg(adapter);
3056 			switch (err) {
3057 			case 0:
3058 				/* success, move state machine forward */
3059 				adapter->state = __IDPF_GET_CAPS;
3060 				fallthrough;
3061 			case -EAGAIN:
3062 				goto restart;
3063 			default:
3064 				/* Something bad happened, try again but only a
3065 				 * few times.
3066 				 */
3067 				goto init_failed;
3068 			}
3069 		case __IDPF_GET_CAPS:
3070 			err = idpf_send_get_caps_msg(adapter);
3071 			if (err)
3072 				goto init_failed;
3073 			adapter->state = __IDPF_INIT_SW;
3074 			break;
3075 		default:
3076 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
3077 				adapter->state);
3078 			err = -EINVAL;
3079 			goto init_failed;
3080 		}
3081 		break;
3082 restart:
3083 		/* Give enough time before proceeding further with
3084 		 * state machine
3085 		 */
3086 		msleep(task_delay);
3087 	}
3088 
3089 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
3090 		err = idpf_send_get_lan_memory_regions(adapter);
3091 		if (err) {
3092 			dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
3093 				err);
3094 			return -EINVAL;
3095 		}
3096 	} else {
3097 		/* Fallback to mapping the remaining regions of the entire BAR */
3098 		err = idpf_calc_remaining_mmio_regs(adapter);
3099 		if (err) {
3100 			dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
3101 				err);
3102 			return -ENOMEM;
3103 		}
3104 	}
3105 
3106 	err = idpf_map_lan_mmio_regs(adapter);
3107 	if (err) {
3108 		dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
3109 			err);
3110 		return -ENOMEM;
3111 	}
3112 
3113 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
3114 	num_max_vports = idpf_get_max_vports(adapter);
3115 	adapter->max_vports = num_max_vports;
3116 	adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
3117 				  GFP_KERNEL);
3118 	if (!adapter->vports)
3119 		return -ENOMEM;
3120 
3121 	if (!adapter->netdevs) {
3122 		adapter->netdevs = kcalloc(num_max_vports,
3123 					   sizeof(struct net_device *),
3124 					   GFP_KERNEL);
3125 		if (!adapter->netdevs) {
3126 			err = -ENOMEM;
3127 			goto err_netdev_alloc;
3128 		}
3129 	}
3130 
3131 	err = idpf_vport_params_buf_alloc(adapter);
3132 	if (err) {
3133 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3134 			err);
3135 		goto err_netdev_alloc;
3136 	}
3137 
3138 	/* Start the mailbox task before requesting vectors. This will ensure
3139 	 * vector information response from mailbox is handled
3140 	 */
3141 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3142 
3143 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3144 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3145 
3146 	err = idpf_intr_req(adapter);
3147 	if (err) {
3148 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3149 			err);
3150 		goto err_intr_req;
3151 	}
3152 
3153 	err = idpf_ptp_init(adapter);
3154 	if (err)
3155 		pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
3156 			ERR_PTR(err));
3157 
3158 	idpf_init_avail_queues(adapter);
3159 
3160 	/* Skew the delay for init tasks for each function based on fn number
3161 	 * to prevent every function from making the same call simultaneously.
3162 	 */
3163 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3164 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3165 
3166 	set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3167 
3168 	return 0;
3169 
3170 err_intr_req:
3171 	cancel_delayed_work_sync(&adapter->serv_task);
3172 	cancel_delayed_work_sync(&adapter->mbx_task);
3173 	idpf_vport_params_buf_rel(adapter);
3174 err_netdev_alloc:
3175 	kfree(adapter->vports);
3176 	adapter->vports = NULL;
3177 	return err;
3178 
3179 init_failed:
3180 	/* Don't retry if we're trying to go down, just bail. */
3181 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3182 		return err;
3183 
3184 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3185 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3186 
3187 		return -EFAULT;
3188 	}
3189 	/* If it reached here, it is possible that mailbox queue initialization
3190 	 * register writes might not have taken effect. Retry to initialize
3191 	 * the mailbox again
3192 	 */
3193 	adapter->state = __IDPF_VER_CHECK;
3194 	if (adapter->vcxn_mngr)
3195 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3196 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3197 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3198 			   msecs_to_jiffies(task_delay));
3199 
3200 	return -EAGAIN;
3201 }
3202 
3203 /**
3204  * idpf_vc_core_deinit - Device deinit routine
3205  * @adapter: Driver specific private structure
3206  *
3207  */
3208 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3209 {
3210 	bool remove_in_prog;
3211 
3212 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3213 		return;
3214 
3215 	/* Avoid transaction timeouts when called during reset */
3216 	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
3217 	if (!remove_in_prog)
3218 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3219 
3220 	idpf_ptp_release(adapter);
3221 	idpf_deinit_task(adapter);
3222 	idpf_idc_deinit_core_aux_device(adapter->cdev_info);
3223 	idpf_intr_rel(adapter);
3224 
3225 	if (remove_in_prog)
3226 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3227 
3228 	cancel_delayed_work_sync(&adapter->serv_task);
3229 	cancel_delayed_work_sync(&adapter->mbx_task);
3230 
3231 	idpf_vport_params_buf_rel(adapter);
3232 
3233 	kfree(adapter->vports);
3234 	adapter->vports = NULL;
3235 
3236 	clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3237 }
3238 
3239 /**
3240  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3241  * @vport: virtual port data struct
3242  *
3243  * This function requests the vector information required for the vport and
3244  * stores the vector indexes received from the 'global vector distribution'
3245  * in the vport's queue vectors array.
3246  *
3247  * Return 0 on success, error on failure
3248  */
3249 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
3250 {
3251 	struct idpf_vector_info vec_info;
3252 	int num_alloc_vecs;
3253 
3254 	vec_info.num_curr_vecs = vport->num_q_vectors;
3255 	vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
3256 	vec_info.default_vport = vport->default_vport;
3257 	vec_info.index = vport->idx;
3258 
3259 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3260 						     vport->q_vector_idxs,
3261 						     &vec_info);
3262 	if (num_alloc_vecs <= 0) {
3263 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3264 			num_alloc_vecs);
3265 		return -EINVAL;
3266 	}
3267 
3268 	vport->num_q_vectors = num_alloc_vecs;
3269 
3270 	return 0;
3271 }
3272 
3273 /**
3274  * idpf_vport_init - Initialize virtual port
3275  * @vport: virtual port to be initialized
3276  * @max_q: vport max queue info
3277  *
3278  * Will initialize vport with the info received through MB earlier
3279  */
3280 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3281 {
3282 	struct idpf_adapter *adapter = vport->adapter;
3283 	struct virtchnl2_create_vport *vport_msg;
3284 	struct idpf_vport_config *vport_config;
3285 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3286 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3287 	struct idpf_rss_data *rss_data;
3288 	u16 idx = vport->idx;
3289 	int err;
3290 
3291 	vport_config = adapter->vport_config[idx];
3292 	rss_data = &vport_config->user_config.rss_data;
3293 	vport_msg = adapter->vport_params_recvd[idx];
3294 
3295 	vport_config->max_q.max_txq = max_q->max_txq;
3296 	vport_config->max_q.max_rxq = max_q->max_rxq;
3297 	vport_config->max_q.max_complq = max_q->max_complq;
3298 	vport_config->max_q.max_bufq = max_q->max_bufq;
3299 
3300 	vport->txq_model = le16_to_cpu(vport_msg->txq_model);
3301 	vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3302 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3303 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3304 
3305 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3306 				       le16_to_cpu(vport_msg->rss_key_size));
3307 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3308 
3309 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3310 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3311 
3312 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3313 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3314 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3315 
3316 	idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3317 
3318 	idpf_vport_init_num_qs(vport, vport_msg);
3319 	idpf_vport_calc_num_q_desc(vport);
3320 	idpf_vport_calc_num_q_groups(vport);
3321 	idpf_vport_alloc_vec_indexes(vport);
3322 
3323 	vport->crc_enable = adapter->crc_enable;
3324 
3325 	if (!(vport_msg->vport_flags &
3326 	      cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
3327 		return;
3328 
3329 	err = idpf_ptp_get_vport_tstamps_caps(vport);
3330 	if (err) {
3331 		pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
3332 		return;
3333 	}
3334 
3335 	INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
3336 }
3337 
3338 /**
3339  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3340  * @adapter: adapter structure to get the mailbox vector id
3341  * @vecids: Array of vector ids
3342  * @num_vecids: number of vector ids
3343  * @chunks: vector ids received over mailbox
3344  *
3345  * Will initialize the mailbox vector id which is received from the
3346  * get capabilities and data queue vector ids with ids received as
3347  * mailbox parameters.
3348  * Returns number of ids filled
3349  */
3350 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3351 		     u16 *vecids, int num_vecids,
3352 		     struct virtchnl2_vector_chunks *chunks)
3353 {
3354 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3355 	int num_vecid_filled = 0;
3356 	int i, j;
3357 
3358 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3359 	num_vecid_filled++;
3360 
3361 	for (j = 0; j < num_chunks; j++) {
3362 		struct virtchnl2_vector_chunk *chunk;
3363 		u16 start_vecid, num_vec;
3364 
3365 		chunk = &chunks->vchunks[j];
3366 		num_vec = le16_to_cpu(chunk->num_vectors);
3367 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3368 
3369 		for (i = 0; i < num_vec; i++) {
3370 			if ((num_vecid_filled + i) < num_vecids) {
3371 				vecids[num_vecid_filled + i] = start_vecid;
3372 				start_vecid++;
3373 			} else {
3374 				break;
3375 			}
3376 		}
3377 		num_vecid_filled = num_vecid_filled + i;
3378 	}
3379 
3380 	return num_vecid_filled;
3381 }
3382 
3383 /**
3384  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3385  * @qids: Array of queue ids
3386  * @num_qids: number of queue ids
3387  * @q_type: queue model
3388  * @chunks: queue ids received over mailbox
3389  *
3390  * Will initialize all queue ids with ids received as mailbox parameters
3391  * Returns number of ids filled
3392  */
3393 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3394 				    struct virtchnl2_queue_reg_chunks *chunks)
3395 {
3396 	u16 num_chunks = le16_to_cpu(chunks->num_chunks);
3397 	u32 num_q_id_filled = 0, i;
3398 	u32 start_q_id, num_q;
3399 
3400 	while (num_chunks--) {
3401 		struct virtchnl2_queue_reg_chunk *chunk;
3402 
3403 		chunk = &chunks->chunks[num_chunks];
3404 		if (le32_to_cpu(chunk->type) != q_type)
3405 			continue;
3406 
3407 		num_q = le32_to_cpu(chunk->num_queues);
3408 		start_q_id = le32_to_cpu(chunk->start_queue_id);
3409 
3410 		for (i = 0; i < num_q; i++) {
3411 			if ((num_q_id_filled + i) < num_qids) {
3412 				qids[num_q_id_filled + i] = start_q_id;
3413 				start_q_id++;
3414 			} else {
3415 				break;
3416 			}
3417 		}
3418 		num_q_id_filled = num_q_id_filled + i;
3419 	}
3420 
3421 	return num_q_id_filled;
3422 }
3423 
3424 /**
3425  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3426  * @vport: virtual port for which the queues ids are initialized
3427  * @qids: queue ids
3428  * @num_qids: number of queue ids
3429  * @q_type: type of queue
3430  *
3431  * Will initialize all queue ids with ids received as mailbox
3432  * parameters. Returns number of queue ids initialized.
3433  */
3434 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3435 				       const u32 *qids,
3436 				       int num_qids,
3437 				       u32 q_type)
3438 {
3439 	int i, j, k = 0;
3440 
3441 	switch (q_type) {
3442 	case VIRTCHNL2_QUEUE_TYPE_TX:
3443 		for (i = 0; i < vport->num_txq_grp; i++) {
3444 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3445 
3446 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3447 				tx_qgrp->txqs[j]->q_id = qids[k];
3448 		}
3449 		break;
3450 	case VIRTCHNL2_QUEUE_TYPE_RX:
3451 		for (i = 0; i < vport->num_rxq_grp; i++) {
3452 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3453 			u16 num_rxq;
3454 
3455 			if (idpf_is_queue_model_split(vport->rxq_model))
3456 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3457 			else
3458 				num_rxq = rx_qgrp->singleq.num_rxq;
3459 
3460 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3461 				struct idpf_rx_queue *q;
3462 
3463 				if (idpf_is_queue_model_split(vport->rxq_model))
3464 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3465 				else
3466 					q = rx_qgrp->singleq.rxqs[j];
3467 				q->q_id = qids[k];
3468 			}
3469 		}
3470 		break;
3471 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3472 		for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
3473 			struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
3474 
3475 			tx_qgrp->complq->q_id = qids[k];
3476 		}
3477 		break;
3478 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3479 		for (i = 0; i < vport->num_rxq_grp; i++) {
3480 			struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3481 			u8 num_bufqs = vport->num_bufqs_per_qgrp;
3482 
3483 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3484 				struct idpf_buf_queue *q;
3485 
3486 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3487 				q->q_id = qids[k];
3488 			}
3489 		}
3490 		break;
3491 	default:
3492 		break;
3493 	}
3494 
3495 	return k;
3496 }
3497 
3498 /**
3499  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3500  * @vport: virtual port for which the queues ids are initialized
3501  *
3502  * Will initialize all queue ids with ids received as mailbox parameters.
3503  * Returns 0 on success, negative if all the queues are not initialized.
3504  */
3505 int idpf_vport_queue_ids_init(struct idpf_vport *vport)
3506 {
3507 	struct virtchnl2_create_vport *vport_params;
3508 	struct virtchnl2_queue_reg_chunks *chunks;
3509 	struct idpf_vport_config *vport_config;
3510 	u16 vport_idx = vport->idx;
3511 	int num_ids, err = 0;
3512 	u16 q_type;
3513 	u32 *qids;
3514 
3515 	vport_config = vport->adapter->vport_config[vport_idx];
3516 	if (vport_config->req_qs_chunks) {
3517 		struct virtchnl2_add_queues *vc_aq =
3518 			(struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
3519 		chunks = &vc_aq->chunks;
3520 	} else {
3521 		vport_params = vport->adapter->vport_params_recvd[vport_idx];
3522 		chunks = &vport_params->chunks;
3523 	}
3524 
3525 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
3526 	if (!qids)
3527 		return -ENOMEM;
3528 
3529 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3530 					   VIRTCHNL2_QUEUE_TYPE_TX,
3531 					   chunks);
3532 	if (num_ids < vport->num_txq) {
3533 		err = -EINVAL;
3534 		goto mem_rel;
3535 	}
3536 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3537 					      VIRTCHNL2_QUEUE_TYPE_TX);
3538 	if (num_ids < vport->num_txq) {
3539 		err = -EINVAL;
3540 		goto mem_rel;
3541 	}
3542 
3543 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
3544 					   VIRTCHNL2_QUEUE_TYPE_RX,
3545 					   chunks);
3546 	if (num_ids < vport->num_rxq) {
3547 		err = -EINVAL;
3548 		goto mem_rel;
3549 	}
3550 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
3551 					      VIRTCHNL2_QUEUE_TYPE_RX);
3552 	if (num_ids < vport->num_rxq) {
3553 		err = -EINVAL;
3554 		goto mem_rel;
3555 	}
3556 
3557 	if (!idpf_is_queue_model_split(vport->txq_model))
3558 		goto check_rxq;
3559 
3560 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
3561 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3562 	if (num_ids < vport->num_complq) {
3563 		err = -EINVAL;
3564 		goto mem_rel;
3565 	}
3566 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3567 	if (num_ids < vport->num_complq) {
3568 		err = -EINVAL;
3569 		goto mem_rel;
3570 	}
3571 
3572 check_rxq:
3573 	if (!idpf_is_queue_model_split(vport->rxq_model))
3574 		goto mem_rel;
3575 
3576 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
3577 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
3578 	if (num_ids < vport->num_bufq) {
3579 		err = -EINVAL;
3580 		goto mem_rel;
3581 	}
3582 	num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
3583 	if (num_ids < vport->num_bufq)
3584 		err = -EINVAL;
3585 
3586 mem_rel:
3587 	kfree(qids);
3588 
3589 	return err;
3590 }
3591 
3592 /**
3593  * idpf_vport_adjust_qs - Adjust to new requested queues
3594  * @vport: virtual port data struct
3595  *
3596  * Renegotiate queues.  Returns 0 on success, negative on failure.
3597  */
3598 int idpf_vport_adjust_qs(struct idpf_vport *vport)
3599 {
3600 	struct virtchnl2_create_vport vport_msg;
3601 	int err;
3602 
3603 	vport_msg.txq_model = cpu_to_le16(vport->txq_model);
3604 	vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
3605 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
3606 				       NULL);
3607 	if (err)
3608 		return err;
3609 
3610 	idpf_vport_init_num_qs(vport, &vport_msg);
3611 	idpf_vport_calc_num_q_groups(vport);
3612 
3613 	return 0;
3614 }
3615 
3616 /**
3617  * idpf_is_capability_ena - Default implementation of capability checking
3618  * @adapter: Private data struct
3619  * @all: all or one flag
3620  * @field: caps field to check for flags
3621  * @flag: flag to check
3622  *
3623  * Return true if all capabilities are supported, false otherwise
3624  */
3625 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
3626 			    enum idpf_cap_field field, u64 flag)
3627 {
3628 	u8 *caps = (u8 *)&adapter->caps;
3629 	u32 *cap_field;
3630 
3631 	if (!caps)
3632 		return false;
3633 
3634 	if (field == IDPF_BASE_CAPS)
3635 		return false;
3636 
3637 	cap_field = (u32 *)(caps + field);
3638 
3639 	if (all)
3640 		return (*cap_field & flag) == flag;
3641 	else
3642 		return !!(*cap_field & flag);
3643 }
3644 
3645 /**
3646  * idpf_get_vport_id: Get vport id
3647  * @vport: virtual port structure
3648  *
3649  * Return vport id from the adapter persistent data
3650  */
3651 u32 idpf_get_vport_id(struct idpf_vport *vport)
3652 {
3653 	struct virtchnl2_create_vport *vport_msg;
3654 
3655 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
3656 
3657 	return le32_to_cpu(vport_msg->vport_id);
3658 }
3659 
3660 /**
3661  * idpf_mac_filter_async_handler - Async callback for mac filters
3662  * @adapter: private data struct
3663  * @xn: transaction for message
3664  * @ctlq_msg: received message
3665  *
3666  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
3667  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
3668  * situation to deal with errors returned on the reply. The best we can
3669  * ultimately do is remove it from our list of mac filters and report the
3670  * error.
3671  */
3672 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
3673 					 struct idpf_vc_xn *xn,
3674 					 const struct idpf_ctlq_msg *ctlq_msg)
3675 {
3676 	struct virtchnl2_mac_addr_list *ma_list;
3677 	struct idpf_vport_config *vport_config;
3678 	struct virtchnl2_mac_addr *mac_addr;
3679 	struct idpf_mac_filter *f, *tmp;
3680 	struct list_head *ma_list_head;
3681 	struct idpf_vport *vport;
3682 	u16 num_entries;
3683 	int i;
3684 
3685 	/* if success we're done, we're only here if something bad happened */
3686 	if (!ctlq_msg->cookie.mbx.chnl_retval)
3687 		return 0;
3688 
3689 	/* make sure at least struct is there */
3690 	if (xn->reply_sz < sizeof(*ma_list))
3691 		goto invalid_payload;
3692 
3693 	ma_list = ctlq_msg->ctx.indirect.payload->va;
3694 	mac_addr = ma_list->mac_addr_list;
3695 	num_entries = le16_to_cpu(ma_list->num_mac_addr);
3696 	/* we should have received a buffer at least this big */
3697 	if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
3698 		goto invalid_payload;
3699 
3700 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
3701 	if (!vport)
3702 		goto invalid_payload;
3703 
3704 	vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
3705 	ma_list_head = &vport_config->user_config.mac_filter_list;
3706 
3707 	/* We can't do much to reconcile bad filters at this point, however we
3708 	 * should at least remove them from our list one way or the other so we
3709 	 * have some idea what good filters we have.
3710 	 */
3711 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3712 	list_for_each_entry_safe(f, tmp, ma_list_head, list)
3713 		for (i = 0; i < num_entries; i++)
3714 			if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
3715 				list_del(&f->list);
3716 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3717 	dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
3718 			    xn->vc_op);
3719 
3720 	return 0;
3721 
3722 invalid_payload:
3723 	dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
3724 			    xn->vc_op, xn->reply_sz);
3725 
3726 	return -EINVAL;
3727 }
3728 
3729 /**
3730  * idpf_add_del_mac_filters - Add/del mac filters
3731  * @vport: Virtual port data structure
3732  * @np: Netdev private structure
3733  * @add: Add or delete flag
3734  * @async: Don't wait for return message
3735  *
3736  * Returns 0 on success, error on failure.
3737  **/
3738 int idpf_add_del_mac_filters(struct idpf_vport *vport,
3739 			     struct idpf_netdev_priv *np,
3740 			     bool add, bool async)
3741 {
3742 	struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
3743 	struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
3744 	struct idpf_adapter *adapter = np->adapter;
3745 	struct idpf_vc_xn_params xn_params = {};
3746 	struct idpf_vport_config *vport_config;
3747 	u32 num_msgs, total_filters = 0;
3748 	struct idpf_mac_filter *f;
3749 	ssize_t reply_sz;
3750 	int i = 0, k;
3751 
3752 	xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
3753 				VIRTCHNL2_OP_DEL_MAC_ADDR;
3754 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3755 	xn_params.async = async;
3756 	xn_params.async_handler = idpf_mac_filter_async_handler;
3757 
3758 	vport_config = adapter->vport_config[np->vport_idx];
3759 	spin_lock_bh(&vport_config->mac_filter_list_lock);
3760 
3761 	/* Find the number of newly added filters */
3762 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3763 			    list) {
3764 		if (add && f->add)
3765 			total_filters++;
3766 		else if (!add && f->remove)
3767 			total_filters++;
3768 	}
3769 
3770 	if (!total_filters) {
3771 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3772 
3773 		return 0;
3774 	}
3775 
3776 	/* Fill all the new filters into virtchannel message */
3777 	mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
3778 			   GFP_ATOMIC);
3779 	if (!mac_addr) {
3780 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
3781 
3782 		return -ENOMEM;
3783 	}
3784 
3785 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
3786 			    list) {
3787 		if (add && f->add) {
3788 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3789 			i++;
3790 			f->add = false;
3791 			if (i == total_filters)
3792 				break;
3793 		}
3794 		if (!add && f->remove) {
3795 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
3796 			i++;
3797 			f->remove = false;
3798 			if (i == total_filters)
3799 				break;
3800 		}
3801 	}
3802 
3803 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
3804 
3805 	/* Chunk up the filters into multiple messages to avoid
3806 	 * sending a control queue message buffer that is too large
3807 	 */
3808 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
3809 
3810 	for (i = 0, k = 0; i < num_msgs; i++) {
3811 		u32 entries_size, buf_size, num_entries;
3812 
3813 		num_entries = min_t(u32, total_filters,
3814 				    IDPF_NUM_FILTERS_PER_MSG);
3815 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
3816 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
3817 
3818 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
3819 			kfree(ma_list);
3820 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
3821 			if (!ma_list)
3822 				return -ENOMEM;
3823 		} else {
3824 			memset(ma_list, 0, buf_size);
3825 		}
3826 
3827 		ma_list->vport_id = cpu_to_le32(np->vport_id);
3828 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
3829 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
3830 
3831 		xn_params.send_buf.iov_base = ma_list;
3832 		xn_params.send_buf.iov_len = buf_size;
3833 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3834 		if (reply_sz < 0)
3835 			return reply_sz;
3836 
3837 		k += num_entries;
3838 		total_filters -= num_entries;
3839 	}
3840 
3841 	return 0;
3842 }
3843 
3844 /**
3845  * idpf_set_promiscuous - set promiscuous and send message to mailbox
3846  * @adapter: Driver specific private structure
3847  * @config_data: Vport specific config data
3848  * @vport_id: Vport identifier
3849  *
3850  * Request to enable promiscuous mode for the vport. Message is sent
3851  * asynchronously and won't wait for response.  Returns 0 on success, negative
3852  * on failure;
3853  */
3854 int idpf_set_promiscuous(struct idpf_adapter *adapter,
3855 			 struct idpf_vport_user_config_data *config_data,
3856 			 u32 vport_id)
3857 {
3858 	struct idpf_vc_xn_params xn_params = {};
3859 	struct virtchnl2_promisc_info vpi;
3860 	ssize_t reply_sz;
3861 	u16 flags = 0;
3862 
3863 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
3864 		flags |= VIRTCHNL2_UNICAST_PROMISC;
3865 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
3866 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
3867 
3868 	vpi.vport_id = cpu_to_le32(vport_id);
3869 	vpi.flags = cpu_to_le16(flags);
3870 
3871 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
3872 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3873 	xn_params.send_buf.iov_base = &vpi;
3874 	xn_params.send_buf.iov_len = sizeof(vpi);
3875 	/* setting promiscuous is only ever done asynchronously */
3876 	xn_params.async = true;
3877 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3878 
3879 	return reply_sz < 0 ? reply_sz : 0;
3880 }
3881 
3882 /**
3883  * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
3884  * @cdev_info: IDC core device info pointer
3885  * @send_msg: message to send
3886  * @msg_size: size of message to send
3887  * @recv_msg: message to populate on reception of response
3888  * @recv_len: length of message copied into recv_msg or 0 on error
3889  *
3890  * Return: 0 on success or error code on failure.
3891  */
3892 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
3893 			       u8 *send_msg, u16 msg_size,
3894 			       u8 *recv_msg, u16 *recv_len)
3895 {
3896 	struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
3897 	struct idpf_vc_xn_params xn_params = { };
3898 	ssize_t reply_sz;
3899 	u16 recv_size;
3900 
3901 	if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
3902 		return -EINVAL;
3903 
3904 	recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
3905 	*recv_len = 0;
3906 	xn_params.vc_op = VIRTCHNL2_OP_RDMA;
3907 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3908 	xn_params.send_buf.iov_base = send_msg;
3909 	xn_params.send_buf.iov_len = msg_size;
3910 	xn_params.recv_buf.iov_base = recv_msg;
3911 	xn_params.recv_buf.iov_len = recv_size;
3912 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3913 	if (reply_sz < 0)
3914 		return reply_sz;
3915 	*recv_len = reply_sz;
3916 
3917 	return 0;
3918 }
3919 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
3920