xref: /linux/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c (revision 086d030e99d25b27d89ab62c8509db7626bdcc48)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <linux/export.h>
5 #include <net/libeth/rx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 #include "idpf_ptp.h"
10 
11 /**
12  * struct idpf_vc_xn_manager - Manager for tracking transactions
13  * @ring: backing and lookup for transactions
14  * @free_xn_bm: bitmap for free transactions
15  * @xn_bm_lock: make bitmap access synchronous where necessary
16  * @salt: used to make cookie unique every message
17  */
18 struct idpf_vc_xn_manager {
19 	struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
20 	DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
21 	spinlock_t xn_bm_lock;
22 	u8 salt;
23 };
24 
25 /**
26  * idpf_vid_to_vport - Translate vport id to vport pointer
27  * @adapter: private data struct
28  * @v_id: vport id to translate
29  *
30  * Returns vport matching v_id, NULL if not found.
31  */
32 static
33 struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
34 {
35 	u16 num_max_vports = idpf_get_max_vports(adapter);
36 	int i;
37 
38 	for (i = 0; i < num_max_vports; i++)
39 		if (adapter->vport_ids[i] == v_id)
40 			return adapter->vports[i];
41 
42 	return NULL;
43 }
44 
45 /**
46  * idpf_handle_event_link - Handle link event message
47  * @adapter: private data struct
48  * @v2e: virtchnl event message
49  */
50 static void idpf_handle_event_link(struct idpf_adapter *adapter,
51 				   const struct virtchnl2_event *v2e)
52 {
53 	struct idpf_netdev_priv *np;
54 	struct idpf_vport *vport;
55 
56 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
57 	if (!vport) {
58 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
59 				    v2e->vport_id);
60 		return;
61 	}
62 	np = netdev_priv(vport->netdev);
63 
64 	np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
65 
66 	if (vport->link_up == v2e->link_status)
67 		return;
68 
69 	vport->link_up = v2e->link_status;
70 
71 	if (!test_bit(IDPF_VPORT_UP, np->state))
72 		return;
73 
74 	if (vport->link_up) {
75 		netif_tx_start_all_queues(vport->netdev);
76 		netif_carrier_on(vport->netdev);
77 	} else {
78 		netif_tx_stop_all_queues(vport->netdev);
79 		netif_carrier_off(vport->netdev);
80 	}
81 }
82 
83 /**
84  * idpf_recv_event_msg - Receive virtchnl event message
85  * @adapter: Driver specific private structure
86  * @ctlq_msg: message to copy from
87  *
88  * Receive virtchnl event message
89  */
90 static void idpf_recv_event_msg(struct idpf_adapter *adapter,
91 				struct idpf_ctlq_msg *ctlq_msg)
92 {
93 	int payload_size = ctlq_msg->ctx.indirect.payload->size;
94 	struct virtchnl2_event *v2e;
95 	u32 event;
96 
97 	if (payload_size < sizeof(*v2e)) {
98 		dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
99 				    ctlq_msg->cookie.mbx.chnl_opcode,
100 				    payload_size);
101 		return;
102 	}
103 
104 	v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
105 	event = le32_to_cpu(v2e->event);
106 
107 	switch (event) {
108 	case VIRTCHNL2_EVENT_LINK_CHANGE:
109 		idpf_handle_event_link(adapter, v2e);
110 		return;
111 	default:
112 		dev_err(&adapter->pdev->dev,
113 			"Unknown event %d from PF\n", event);
114 		break;
115 	}
116 }
117 
118 /**
119  * idpf_mb_clean - Reclaim the send mailbox queue entries
120  * @adapter: driver specific private structure
121  * @asq: send control queue info
122  *
123  * Reclaim the send mailbox queue entries to be used to send further messages
124  *
125  * Return: 0 on success, negative on failure
126  */
127 static int idpf_mb_clean(struct idpf_adapter *adapter,
128 			 struct idpf_ctlq_info *asq)
129 {
130 	u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
131 	struct idpf_ctlq_msg **q_msg;
132 	struct idpf_dma_mem *dma_mem;
133 	int err;
134 
135 	q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
136 	if (!q_msg)
137 		return -ENOMEM;
138 
139 	err = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg);
140 	if (err)
141 		goto err_kfree;
142 
143 	for (i = 0; i < num_q_msg; i++) {
144 		if (!q_msg[i])
145 			continue;
146 		dma_mem = q_msg[i]->ctx.indirect.payload;
147 		if (dma_mem)
148 			dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
149 					  dma_mem->va, dma_mem->pa);
150 		kfree(q_msg[i]);
151 		kfree(dma_mem);
152 	}
153 
154 err_kfree:
155 	kfree(q_msg);
156 
157 	return err;
158 }
159 
160 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
161 /**
162  * idpf_ptp_is_mb_msg - Check if the message is PTP-related
163  * @op: virtchnl opcode
164  *
165  * Return: true if msg is PTP-related, false otherwise.
166  */
167 static bool idpf_ptp_is_mb_msg(u32 op)
168 {
169 	switch (op) {
170 	case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
171 	case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
172 	case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
173 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
174 	case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
175 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
176 	case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
177 		return true;
178 	default:
179 		return false;
180 	}
181 }
182 
183 /**
184  * idpf_prepare_ptp_mb_msg - Prepare PTP related message
185  *
186  * @adapter: Driver specific private structure
187  * @op: virtchnl opcode
188  * @ctlq_msg: Corresponding control queue message
189  */
190 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
191 				    struct idpf_ctlq_msg *ctlq_msg)
192 {
193 	/* If the message is PTP-related and the secondary mailbox is available,
194 	 * send the message through the secondary mailbox.
195 	 */
196 	if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
197 		return;
198 
199 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
200 	ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
201 	ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
202 }
203 #else /* !CONFIG_PTP_1588_CLOCK */
204 static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
205 				    struct idpf_ctlq_msg *ctlq_msg)
206 { }
207 #endif /* CONFIG_PTP_1588_CLOCK */
208 
209 /**
210  * idpf_send_mb_msg - Send message over mailbox
211  * @adapter: driver specific private structure
212  * @asq: control queue to send message to
213  * @op: virtchnl opcode
214  * @msg_size: size of the payload
215  * @msg: pointer to buffer holding the payload
216  * @cookie: unique SW generated cookie per message
217  *
218  * Will prepare the control queue message and initiates the send api
219  *
220  * Return: 0 on success, negative on failure
221  */
222 int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
223 		     u32 op, u16 msg_size, u8 *msg, u16 cookie)
224 {
225 	struct idpf_ctlq_msg *ctlq_msg;
226 	struct idpf_dma_mem *dma_mem;
227 	int err;
228 
229 	/* If we are here and a reset is detected nothing much can be
230 	 * done. This thread should silently abort and expected to
231 	 * be corrected with a new run either by user or driver
232 	 * flows after reset
233 	 */
234 	if (idpf_is_reset_detected(adapter))
235 		return 0;
236 
237 	err = idpf_mb_clean(adapter, asq);
238 	if (err)
239 		return err;
240 
241 	ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
242 	if (!ctlq_msg)
243 		return -ENOMEM;
244 
245 	dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
246 	if (!dma_mem) {
247 		err = -ENOMEM;
248 		goto dma_mem_error;
249 	}
250 
251 	ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
252 	ctlq_msg->func_id = 0;
253 
254 	idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
255 
256 	ctlq_msg->data_len = msg_size;
257 	ctlq_msg->cookie.mbx.chnl_opcode = op;
258 	ctlq_msg->cookie.mbx.chnl_retval = 0;
259 	dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
260 	dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
261 					 &dma_mem->pa, GFP_ATOMIC);
262 	if (!dma_mem->va) {
263 		err = -ENOMEM;
264 		goto dma_alloc_error;
265 	}
266 
267 	/* It's possible we're just sending an opcode but no buffer */
268 	if (msg && msg_size)
269 		memcpy(dma_mem->va, msg, msg_size);
270 	ctlq_msg->ctx.indirect.payload = dma_mem;
271 	ctlq_msg->ctx.sw_cookie.data = cookie;
272 
273 	err = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg);
274 	if (err)
275 		goto send_error;
276 
277 	return 0;
278 
279 send_error:
280 	dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
281 			  dma_mem->pa);
282 dma_alloc_error:
283 	kfree(dma_mem);
284 dma_mem_error:
285 	kfree(ctlq_msg);
286 
287 	return err;
288 }
289 
290 /* API for virtchnl "transaction" support ("xn" for short).
291  *
292  * We are reusing the completion lock to serialize the accesses to the
293  * transaction state for simplicity, but it could be its own separate synchro
294  * as well. For now, this API is only used from within a workqueue context;
295  * raw_spin_lock() is enough.
296  */
297 /**
298  * idpf_vc_xn_lock - Request exclusive access to vc transaction
299  * @xn: struct idpf_vc_xn* to access
300  */
301 #define idpf_vc_xn_lock(xn)			\
302 	raw_spin_lock(&(xn)->completed.wait.lock)
303 
304 /**
305  * idpf_vc_xn_unlock - Release exclusive access to vc transaction
306  * @xn: struct idpf_vc_xn* to access
307  */
308 #define idpf_vc_xn_unlock(xn)		\
309 	raw_spin_unlock(&(xn)->completed.wait.lock)
310 
311 /**
312  * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
313  * reset the transaction state.
314  * @xn: struct idpf_vc_xn to update
315  */
316 static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
317 {
318 	xn->reply.iov_base = NULL;
319 	xn->reply.iov_len = 0;
320 
321 	if (xn->state != IDPF_VC_XN_SHUTDOWN)
322 		xn->state = IDPF_VC_XN_IDLE;
323 }
324 
325 /**
326  * idpf_vc_xn_init - Initialize virtchnl transaction object
327  * @vcxn_mngr: pointer to vc transaction manager struct
328  */
329 static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
330 {
331 	int i;
332 
333 	spin_lock_init(&vcxn_mngr->xn_bm_lock);
334 
335 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
336 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
337 
338 		xn->state = IDPF_VC_XN_IDLE;
339 		xn->idx = i;
340 		idpf_vc_xn_release_bufs(xn);
341 		init_completion(&xn->completed);
342 	}
343 
344 	bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
345 }
346 
347 /**
348  * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
349  * @vcxn_mngr: pointer to vc transaction manager struct
350  *
351  * All waiting threads will be woken-up and their transaction aborted. Further
352  * operations on that object will fail.
353  */
354 void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
355 {
356 	int i;
357 
358 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
359 	bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
360 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
361 
362 	for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
363 		struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
364 
365 		idpf_vc_xn_lock(xn);
366 		xn->state = IDPF_VC_XN_SHUTDOWN;
367 		idpf_vc_xn_release_bufs(xn);
368 		idpf_vc_xn_unlock(xn);
369 		complete_all(&xn->completed);
370 	}
371 }
372 
373 /**
374  * idpf_vc_xn_pop_free - Pop a free transaction from free list
375  * @vcxn_mngr: transaction manager to pop from
376  *
377  * Returns NULL if no free transactions
378  */
379 static
380 struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
381 {
382 	struct idpf_vc_xn *xn = NULL;
383 	unsigned long free_idx;
384 
385 	spin_lock_bh(&vcxn_mngr->xn_bm_lock);
386 	free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
387 	if (free_idx == IDPF_VC_XN_RING_LEN)
388 		goto do_unlock;
389 
390 	clear_bit(free_idx, vcxn_mngr->free_xn_bm);
391 	xn = &vcxn_mngr->ring[free_idx];
392 	xn->salt = vcxn_mngr->salt++;
393 
394 do_unlock:
395 	spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
396 
397 	return xn;
398 }
399 
400 /**
401  * idpf_vc_xn_push_free - Push a free transaction to free list
402  * @vcxn_mngr: transaction manager to push to
403  * @xn: transaction to push
404  */
405 static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
406 				 struct idpf_vc_xn *xn)
407 {
408 	idpf_vc_xn_release_bufs(xn);
409 	set_bit(xn->idx, vcxn_mngr->free_xn_bm);
410 }
411 
412 /**
413  * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
414  * @adapter: driver specific private structure with vcxn_mngr
415  * @params: parameters for this particular transaction including
416  *   -vc_op: virtchannel operation to send
417  *   -send_buf: kvec iov for send buf and len
418  *   -recv_buf: kvec iov for recv buf and len (ignored if NULL)
419  *   -timeout_ms: timeout waiting for a reply (milliseconds)
420  *   -async: don't wait for message reply, will lose caller context
421  *   -async_handler: callback to handle async replies
422  *
423  * @returns >= 0 for success, the size of the initial reply (may or may not be
424  * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
425  * error.
426  */
427 ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
428 			const struct idpf_vc_xn_params *params)
429 {
430 	const struct kvec *send_buf = &params->send_buf;
431 	struct idpf_vc_xn *xn;
432 	ssize_t retval;
433 	u16 cookie;
434 
435 	xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
436 	/* no free transactions available */
437 	if (!xn)
438 		return -ENOSPC;
439 
440 	idpf_vc_xn_lock(xn);
441 	if (xn->state == IDPF_VC_XN_SHUTDOWN) {
442 		retval = -ENXIO;
443 		goto only_unlock;
444 	} else if (xn->state != IDPF_VC_XN_IDLE) {
445 		/* We're just going to clobber this transaction even though
446 		 * it's not IDLE. If we don't reuse it we could theoretically
447 		 * eventually leak all the free transactions and not be able to
448 		 * send any messages. At least this way we make an attempt to
449 		 * remain functional even though something really bad is
450 		 * happening that's corrupting what was supposed to be free
451 		 * transactions.
452 		 */
453 		WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
454 			  xn->idx, xn->vc_op);
455 	}
456 
457 	xn->reply = params->recv_buf;
458 	xn->reply_sz = 0;
459 	xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
460 	xn->vc_op = params->vc_op;
461 	xn->async_handler = params->async_handler;
462 	idpf_vc_xn_unlock(xn);
463 
464 	if (!params->async)
465 		reinit_completion(&xn->completed);
466 	cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
467 		 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
468 
469 	retval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op,
470 				  send_buf->iov_len, send_buf->iov_base,
471 				  cookie);
472 	if (retval) {
473 		idpf_vc_xn_lock(xn);
474 		goto release_and_unlock;
475 	}
476 
477 	if (params->async)
478 		return 0;
479 
480 	wait_for_completion_timeout(&xn->completed,
481 				    msecs_to_jiffies(params->timeout_ms));
482 
483 	/* No need to check the return value; we check the final state of the
484 	 * transaction below. It's possible the transaction actually gets more
485 	 * timeout than specified if we get preempted here but after
486 	 * wait_for_completion_timeout returns. This should be non-issue
487 	 * however.
488 	 */
489 	idpf_vc_xn_lock(xn);
490 	switch (xn->state) {
491 	case IDPF_VC_XN_SHUTDOWN:
492 		retval = -ENXIO;
493 		goto only_unlock;
494 	case IDPF_VC_XN_WAITING:
495 		dev_notice_ratelimited(&adapter->pdev->dev,
496 				       "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
497 				       params->vc_op, cookie, xn->vc_op,
498 				       xn->salt, params->timeout_ms);
499 		retval = -ETIME;
500 		break;
501 	case IDPF_VC_XN_COMPLETED_SUCCESS:
502 		retval = xn->reply_sz;
503 		break;
504 	case IDPF_VC_XN_COMPLETED_FAILED:
505 		dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
506 				       params->vc_op);
507 		retval = -EIO;
508 		break;
509 	default:
510 		/* Invalid state. */
511 		WARN_ON_ONCE(1);
512 		retval = -EIO;
513 		break;
514 	}
515 
516 release_and_unlock:
517 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
518 	/* If we receive a VC reply after here, it will be dropped. */
519 only_unlock:
520 	idpf_vc_xn_unlock(xn);
521 
522 	return retval;
523 }
524 
525 /**
526  * idpf_vc_xn_forward_async - Handle async reply receives
527  * @adapter: private data struct
528  * @xn: transaction to handle
529  * @ctlq_msg: corresponding ctlq_msg
530  *
531  * For async sends we're going to lose the caller's context so, if an
532  * async_handler was provided, it can deal with the reply, otherwise we'll just
533  * check and report if there is an error.
534  */
535 static int
536 idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
537 			 const struct idpf_ctlq_msg *ctlq_msg)
538 {
539 	int err = 0;
540 
541 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
542 		dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
543 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
544 		xn->reply_sz = 0;
545 		err = -EINVAL;
546 		goto release_bufs;
547 	}
548 
549 	if (xn->async_handler) {
550 		err = xn->async_handler(adapter, xn, ctlq_msg);
551 		goto release_bufs;
552 	}
553 
554 	if (ctlq_msg->cookie.mbx.chnl_retval) {
555 		xn->reply_sz = 0;
556 		dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
557 				    ctlq_msg->cookie.mbx.chnl_opcode);
558 		err = -EINVAL;
559 	}
560 
561 release_bufs:
562 	idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
563 
564 	return err;
565 }
566 
567 /**
568  * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
569  * @adapter: driver specific private structure with vcxn_mngr
570  * @ctlq_msg: controlq message to send back to receiving thread
571  */
572 static int
573 idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
574 			 const struct idpf_ctlq_msg *ctlq_msg)
575 {
576 	const void *payload = NULL;
577 	size_t payload_size = 0;
578 	struct idpf_vc_xn *xn;
579 	u16 msg_info;
580 	int err = 0;
581 	u16 xn_idx;
582 	u16 salt;
583 
584 	msg_info = ctlq_msg->ctx.sw_cookie.data;
585 	xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
586 	if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
587 		dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
588 				    xn_idx);
589 		return -EINVAL;
590 	}
591 	xn = &adapter->vcxn_mngr->ring[xn_idx];
592 	idpf_vc_xn_lock(xn);
593 	salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
594 	if (xn->salt != salt) {
595 		dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
596 				    xn->vc_op, xn->salt, xn->state,
597 				    ctlq_msg->cookie.mbx.chnl_opcode, salt);
598 		idpf_vc_xn_unlock(xn);
599 		return -EINVAL;
600 	}
601 
602 	switch (xn->state) {
603 	case IDPF_VC_XN_WAITING:
604 		/* success */
605 		break;
606 	case IDPF_VC_XN_IDLE:
607 		dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
608 				    ctlq_msg->cookie.mbx.chnl_opcode);
609 		err = -EINVAL;
610 		goto out_unlock;
611 	case IDPF_VC_XN_SHUTDOWN:
612 		/* ENXIO is a bit special here as the recv msg loop uses that
613 		 * know if it should stop trying to clean the ring if we lost
614 		 * the virtchnl. We need to stop playing with registers and
615 		 * yield.
616 		 */
617 		err = -ENXIO;
618 		goto out_unlock;
619 	case IDPF_VC_XN_ASYNC:
620 		err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
621 		idpf_vc_xn_unlock(xn);
622 		return err;
623 	default:
624 		dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
625 				    ctlq_msg->cookie.mbx.chnl_opcode);
626 		err = -EBUSY;
627 		goto out_unlock;
628 	}
629 
630 	if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
631 		dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
632 				    ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
633 		xn->reply_sz = 0;
634 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
635 		err = -EINVAL;
636 		goto out_unlock;
637 	}
638 
639 	if (ctlq_msg->cookie.mbx.chnl_retval) {
640 		xn->reply_sz = 0;
641 		xn->state = IDPF_VC_XN_COMPLETED_FAILED;
642 		err = -EINVAL;
643 		goto out_unlock;
644 	}
645 
646 	if (ctlq_msg->data_len) {
647 		payload = ctlq_msg->ctx.indirect.payload->va;
648 		payload_size = ctlq_msg->data_len;
649 	}
650 
651 	xn->reply_sz = payload_size;
652 	xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
653 
654 	if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
655 		memcpy(xn->reply.iov_base, payload,
656 		       min_t(size_t, xn->reply.iov_len, payload_size));
657 
658 out_unlock:
659 	idpf_vc_xn_unlock(xn);
660 	/* we _cannot_ hold lock while calling complete */
661 	complete(&xn->completed);
662 
663 	return err;
664 }
665 
666 /**
667  * idpf_recv_mb_msg - Receive message over mailbox
668  * @adapter: driver specific private structure
669  * @arq: control queue to receive message from
670  *
671  * Will receive control queue message and posts the receive buffer.
672  *
673  * Return: 0 on success and negative on failure.
674  */
675 int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq)
676 {
677 	struct idpf_ctlq_msg ctlq_msg;
678 	struct idpf_dma_mem *dma_mem;
679 	int post_err, err;
680 	u16 num_recv;
681 
682 	while (1) {
683 		/* This will get <= num_recv messages and output how many
684 		 * actually received on num_recv.
685 		 */
686 		num_recv = 1;
687 		err = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg);
688 		if (err || !num_recv)
689 			break;
690 
691 		if (ctlq_msg.data_len) {
692 			dma_mem = ctlq_msg.ctx.indirect.payload;
693 		} else {
694 			dma_mem = NULL;
695 			num_recv = 0;
696 		}
697 
698 		if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
699 			idpf_recv_event_msg(adapter, &ctlq_msg);
700 		else
701 			err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
702 
703 		post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq,
704 						   &num_recv, &dma_mem);
705 
706 		/* If post failed clear the only buffer we supplied */
707 		if (post_err) {
708 			if (dma_mem)
709 				dma_free_coherent(&adapter->pdev->dev,
710 						  dma_mem->size, dma_mem->va,
711 						  dma_mem->pa);
712 			break;
713 		}
714 
715 		/* virtchnl trying to shutdown, stop cleaning */
716 		if (err == -ENXIO)
717 			break;
718 	}
719 
720 	return err;
721 }
722 
723 struct idpf_chunked_msg_params {
724 	u32			(*prepare_msg)(u32 vport_id, void *buf,
725 					       const void *pos, u32 num);
726 
727 	const void		*chunks;
728 	u32			num_chunks;
729 
730 	u32			chunk_sz;
731 	u32			config_sz;
732 
733 	u32			vc_op;
734 	u32			vport_id;
735 };
736 
737 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
738 					    struct idpf_q_vec_rsrc *qv_rsrc,
739 					    u32 vport_id, u32 num)
740 {
741 	struct idpf_queue_set *qp;
742 
743 	qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL);
744 	if (!qp)
745 		return NULL;
746 
747 	qp->adapter = adapter;
748 	qp->qv_rsrc = qv_rsrc;
749 	qp->vport_id = vport_id;
750 	qp->num = num;
751 
752 	return qp;
753 }
754 
755 /**
756  * idpf_send_chunked_msg - send VC message consisting of chunks
757  * @adapter: Driver specific private structure
758  * @params: message params
759  *
760  * Helper function for preparing a message describing queues to be enabled
761  * or disabled.
762  *
763  * Return: the total size of the prepared message.
764  */
765 static int idpf_send_chunked_msg(struct idpf_adapter *adapter,
766 				 const struct idpf_chunked_msg_params *params)
767 {
768 	struct idpf_vc_xn_params xn_params = {
769 		.vc_op		= params->vc_op,
770 		.timeout_ms	= IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
771 	};
772 	const void *pos = params->chunks;
773 	u32 num_chunks, num_msgs, buf_sz;
774 	void *buf __free(kfree) = NULL;
775 	u32 totqs = params->num_chunks;
776 	u32 vid = params->vport_id;
777 
778 	num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
779 						 params->chunk_sz), totqs);
780 	num_msgs = DIV_ROUND_UP(totqs, num_chunks);
781 
782 	buf_sz = params->config_sz + num_chunks * params->chunk_sz;
783 	buf = kzalloc(buf_sz, GFP_KERNEL);
784 	if (!buf)
785 		return -ENOMEM;
786 
787 	xn_params.send_buf.iov_base = buf;
788 
789 	for (u32 i = 0; i < num_msgs; i++) {
790 		ssize_t reply_sz;
791 
792 		memset(buf, 0, buf_sz);
793 		xn_params.send_buf.iov_len = buf_sz;
794 
795 		if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)
796 			return -EINVAL;
797 
798 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
799 		if (reply_sz < 0)
800 			return reply_sz;
801 
802 		pos += num_chunks * params->chunk_sz;
803 		totqs -= num_chunks;
804 
805 		num_chunks = min(num_chunks, totqs);
806 		buf_sz = params->config_sz + num_chunks * params->chunk_sz;
807 	}
808 
809 	return 0;
810 }
811 
812 /**
813  * idpf_wait_for_marker_event_set - wait for software marker response for
814  *				    selected Tx queues
815  * @qs: set of the Tx queues
816  *
817  * Return: 0 success, -errno on failure.
818  */
819 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
820 {
821 	struct net_device *netdev;
822 	struct idpf_tx_queue *txq;
823 	bool markers_rcvd = true;
824 
825 	for (u32 i = 0; i < qs->num; i++) {
826 		switch (qs->qs[i].type) {
827 		case VIRTCHNL2_QUEUE_TYPE_TX:
828 			txq = qs->qs[i].txq;
829 
830 			netdev = txq->netdev;
831 
832 			idpf_queue_set(SW_MARKER, txq);
833 			idpf_wait_for_sw_marker_completion(txq);
834 			markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
835 			break;
836 		default:
837 			break;
838 		}
839 	}
840 
841 	if (!markers_rcvd) {
842 		netdev_warn(netdev,
843 			    "Failed to receive marker packets\n");
844 		return -ETIMEDOUT;
845 	}
846 
847 	return 0;
848 }
849 
850 /**
851  * idpf_wait_for_marker_event - wait for software marker response
852  * @vport: virtual port data structure
853  *
854  * Return: 0 success, negative on failure.
855  **/
856 static int idpf_wait_for_marker_event(struct idpf_vport *vport)
857 {
858 	struct idpf_queue_set *qs __free(kfree) = NULL;
859 
860 	qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
861 				  vport->vport_id, vport->num_txq);
862 	if (!qs)
863 		return -ENOMEM;
864 
865 	for (u32 i = 0; i < qs->num; i++) {
866 		qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX;
867 		qs->qs[i].txq = vport->txqs[i];
868 	}
869 
870 	return idpf_wait_for_marker_event_set(qs);
871 }
872 
873 /**
874  * idpf_send_ver_msg - send virtchnl version message
875  * @adapter: Driver specific private structure
876  *
877  * Send virtchnl version message.  Returns 0 on success, negative on failure.
878  */
879 static int idpf_send_ver_msg(struct idpf_adapter *adapter)
880 {
881 	struct idpf_vc_xn_params xn_params = {};
882 	struct virtchnl2_version_info vvi;
883 	ssize_t reply_sz;
884 	u32 major, minor;
885 	int err = 0;
886 
887 	if (adapter->virt_ver_maj) {
888 		vvi.major = cpu_to_le32(adapter->virt_ver_maj);
889 		vvi.minor = cpu_to_le32(adapter->virt_ver_min);
890 	} else {
891 		vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
892 		vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
893 	}
894 
895 	xn_params.vc_op = VIRTCHNL2_OP_VERSION;
896 	xn_params.send_buf.iov_base = &vvi;
897 	xn_params.send_buf.iov_len = sizeof(vvi);
898 	xn_params.recv_buf = xn_params.send_buf;
899 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
900 
901 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
902 	if (reply_sz < 0)
903 		return reply_sz;
904 	if (reply_sz < sizeof(vvi))
905 		return -EIO;
906 
907 	major = le32_to_cpu(vvi.major);
908 	minor = le32_to_cpu(vvi.minor);
909 
910 	if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
911 		dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
912 		return -EINVAL;
913 	}
914 
915 	if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
916 	    minor > IDPF_VIRTCHNL_VERSION_MINOR)
917 		dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
918 
919 	/* If we have a mismatch, resend version to update receiver on what
920 	 * version we will use.
921 	 */
922 	if (!adapter->virt_ver_maj &&
923 	    major != IDPF_VIRTCHNL_VERSION_MAJOR &&
924 	    minor != IDPF_VIRTCHNL_VERSION_MINOR)
925 		err = -EAGAIN;
926 
927 	adapter->virt_ver_maj = major;
928 	adapter->virt_ver_min = minor;
929 
930 	return err;
931 }
932 
933 /**
934  * idpf_send_get_caps_msg - Send virtchnl get capabilities message
935  * @adapter: Driver specific private structure
936  *
937  * Send virtchl get capabilities message. Returns 0 on success, negative on
938  * failure.
939  */
940 static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
941 {
942 	struct virtchnl2_get_capabilities caps = {};
943 	struct idpf_vc_xn_params xn_params = {};
944 	ssize_t reply_sz;
945 
946 	caps.csum_caps =
947 		cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4	|
948 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
949 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
950 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
951 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
952 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
953 			    VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
954 			    VIRTCHNL2_CAP_RX_CSUM_L3_IPV4	|
955 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
956 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
957 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
958 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
959 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
960 			    VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
961 			    VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
962 			    VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
963 			    VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
964 			    VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
965 			    VIRTCHNL2_CAP_RX_CSUM_GENERIC);
966 
967 	caps.seg_caps =
968 		cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP		|
969 			    VIRTCHNL2_CAP_SEG_IPV4_UDP		|
970 			    VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
971 			    VIRTCHNL2_CAP_SEG_IPV6_TCP		|
972 			    VIRTCHNL2_CAP_SEG_IPV6_UDP		|
973 			    VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
974 			    VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
975 
976 	caps.rss_caps =
977 		cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP		|
978 			    VIRTCHNL2_FLOW_IPV4_UDP		|
979 			    VIRTCHNL2_FLOW_IPV4_SCTP		|
980 			    VIRTCHNL2_FLOW_IPV4_OTHER		|
981 			    VIRTCHNL2_FLOW_IPV6_TCP		|
982 			    VIRTCHNL2_FLOW_IPV6_UDP		|
983 			    VIRTCHNL2_FLOW_IPV6_SCTP		|
984 			    VIRTCHNL2_FLOW_IPV6_OTHER);
985 
986 	caps.hsplit_caps =
987 		cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4	|
988 			    VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
989 
990 	caps.rsc_caps =
991 		cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP		|
992 			    VIRTCHNL2_CAP_RSC_IPV6_TCP);
993 
994 	caps.other_caps =
995 		cpu_to_le64(VIRTCHNL2_CAP_SRIOV			|
996 			    VIRTCHNL2_CAP_RDMA                  |
997 			    VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	|
998 			    VIRTCHNL2_CAP_MACFILTER		|
999 			    VIRTCHNL2_CAP_SPLITQ_QSCHED		|
1000 			    VIRTCHNL2_CAP_PROMISC		|
1001 			    VIRTCHNL2_CAP_LOOPBACK		|
1002 			    VIRTCHNL2_CAP_PTP);
1003 
1004 	xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
1005 	xn_params.send_buf.iov_base = &caps;
1006 	xn_params.send_buf.iov_len = sizeof(caps);
1007 	xn_params.recv_buf.iov_base = &adapter->caps;
1008 	xn_params.recv_buf.iov_len = sizeof(adapter->caps);
1009 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1010 
1011 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1012 	if (reply_sz < 0)
1013 		return reply_sz;
1014 	if (reply_sz < sizeof(adapter->caps))
1015 		return -EIO;
1016 
1017 	return 0;
1018 }
1019 
1020 /**
1021  * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
1022  * @adapter: Driver specific private struct
1023  *
1024  * Return: 0 on success or error code on failure.
1025  */
1026 static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
1027 {
1028 	struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
1029 	struct idpf_vc_xn_params xn_params = {
1030 		.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
1031 		.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
1032 		.send_buf.iov_len =
1033 			sizeof(struct virtchnl2_get_lan_memory_regions) +
1034 			sizeof(struct virtchnl2_mem_region),
1035 		.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
1036 	};
1037 	int num_regions, size;
1038 	struct idpf_hw *hw;
1039 	ssize_t reply_sz;
1040 	int err = 0;
1041 
1042 	rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
1043 	if (!rcvd_regions)
1044 		return -ENOMEM;
1045 
1046 	xn_params.recv_buf.iov_base = rcvd_regions;
1047 	rcvd_regions->num_memory_regions = cpu_to_le16(1);
1048 	xn_params.send_buf.iov_base = rcvd_regions;
1049 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1050 	if (reply_sz < 0)
1051 		return reply_sz;
1052 
1053 	num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
1054 	size = struct_size(rcvd_regions, mem_reg, num_regions);
1055 	if (reply_sz < size)
1056 		return -EIO;
1057 
1058 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
1059 		return -EINVAL;
1060 
1061 	hw = &adapter->hw;
1062 	hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
1063 	if (!hw->lan_regs)
1064 		return -ENOMEM;
1065 
1066 	for (int i = 0; i < num_regions; i++) {
1067 		hw->lan_regs[i].addr_len =
1068 			le64_to_cpu(rcvd_regions->mem_reg[i].size);
1069 		hw->lan_regs[i].addr_start =
1070 			le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
1071 	}
1072 	hw->num_lan_regs = num_regions;
1073 
1074 	return err;
1075 }
1076 
1077 /**
1078  * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
1079  * @adapter: Driver specific private structure
1080  *
1081  * Called when idpf_send_get_lan_memory_regions is not supported. This will
1082  * calculate the offsets and sizes for the regions before, in between, and
1083  * after the mailbox and rstat MMIO mappings.
1084  *
1085  * Return: 0 on success or error code on failure.
1086  */
1087 static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
1088 {
1089 	struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
1090 	struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
1091 	struct idpf_hw *hw = &adapter->hw;
1092 
1093 	hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
1094 	hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
1095 			       GFP_KERNEL);
1096 	if (!hw->lan_regs)
1097 		return -ENOMEM;
1098 
1099 	/* Region preceding mailbox */
1100 	hw->lan_regs[0].addr_start = 0;
1101 	hw->lan_regs[0].addr_len = mbx_reg->start;
1102 	/* Region between mailbox and rstat */
1103 	hw->lan_regs[1].addr_start = mbx_reg->end + 1;
1104 	hw->lan_regs[1].addr_len = rstat_reg->start -
1105 					hw->lan_regs[1].addr_start;
1106 	/* Region after rstat */
1107 	hw->lan_regs[2].addr_start = rstat_reg->end + 1;
1108 	hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
1109 					hw->lan_regs[2].addr_start;
1110 
1111 	return 0;
1112 }
1113 
1114 /**
1115  * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
1116  * @adapter: Driver specific private structure
1117  *
1118  * Return: 0 on success or error code on failure.
1119  */
1120 static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
1121 {
1122 	struct pci_dev *pdev = adapter->pdev;
1123 	struct idpf_hw *hw = &adapter->hw;
1124 	resource_size_t res_start;
1125 
1126 	res_start = pci_resource_start(pdev, 0);
1127 
1128 	for (int i = 0; i < hw->num_lan_regs; i++) {
1129 		resource_size_t start;
1130 		long len;
1131 
1132 		len = hw->lan_regs[i].addr_len;
1133 		if (!len)
1134 			continue;
1135 		start = hw->lan_regs[i].addr_start + res_start;
1136 
1137 		hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
1138 		if (!hw->lan_regs[i].vaddr) {
1139 			pci_err(pdev, "failed to allocate BAR0 region\n");
1140 			return -ENOMEM;
1141 		}
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 /**
1148  * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
1149  * @adapter: adapter info struct
1150  * @rule: Flow steering rule to add/delete
1151  * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
1152  *          VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
1153  *
1154  * Send ADD/DELETE flow steering virtchnl message and receive the result.
1155  *
1156  * Return: 0 on success, negative on failure.
1157  */
1158 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
1159 				struct virtchnl2_flow_rule_add_del *rule,
1160 				enum virtchnl2_op opcode)
1161 {
1162 	int rule_count = le32_to_cpu(rule->count);
1163 	struct idpf_vc_xn_params xn_params = {};
1164 	ssize_t reply_sz;
1165 
1166 	if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
1167 	    opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
1168 		return -EINVAL;
1169 
1170 	xn_params.vc_op = opcode;
1171 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1172 	xn_params.async = false;
1173 	xn_params.send_buf.iov_base = rule;
1174 	xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
1175 	xn_params.recv_buf.iov_base = rule;
1176 	xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
1177 
1178 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1179 	return reply_sz < 0 ? reply_sz : 0;
1180 }
1181 
1182 /**
1183  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
1184  * @adapter: Driver specific private structure
1185  * @max_q: vport max queue structure
1186  */
1187 int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
1188 			    struct idpf_vport_max_q *max_q)
1189 {
1190 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1191 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1192 	u16 default_vports = idpf_get_default_vports(adapter);
1193 	u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
1194 
1195 	mutex_lock(&adapter->queue_lock);
1196 
1197 	/* Caps are device-wide. Give each vport an equal piece */
1198 	max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
1199 	max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
1200 	max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
1201 	max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
1202 
1203 	if (adapter->num_alloc_vports >= default_vports) {
1204 		max_rx_q = IDPF_MIN_Q;
1205 		max_tx_q = IDPF_MIN_Q;
1206 	}
1207 
1208 	/*
1209 	 * Harmonize the numbers. The current implementation always creates
1210 	 * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
1211 	 * one completion queue for each Tx queue for best performance.
1212 	 * If less buffer or completion queues is available, cap the number
1213 	 * of the corresponding Rx/Tx queues.
1214 	 */
1215 	max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
1216 	max_tx_q = min(max_tx_q, max_compl_q);
1217 
1218 	max_q->max_rxq = max_rx_q;
1219 	max_q->max_txq = max_tx_q;
1220 	max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
1221 	max_q->max_complq = max_tx_q;
1222 
1223 	if (avail_queues->avail_rxq < max_q->max_rxq ||
1224 	    avail_queues->avail_txq < max_q->max_txq ||
1225 	    avail_queues->avail_bufq < max_q->max_bufq ||
1226 	    avail_queues->avail_complq < max_q->max_complq) {
1227 		mutex_unlock(&adapter->queue_lock);
1228 
1229 		return -EINVAL;
1230 	}
1231 
1232 	avail_queues->avail_rxq -= max_q->max_rxq;
1233 	avail_queues->avail_txq -= max_q->max_txq;
1234 	avail_queues->avail_bufq -= max_q->max_bufq;
1235 	avail_queues->avail_complq -= max_q->max_complq;
1236 
1237 	mutex_unlock(&adapter->queue_lock);
1238 
1239 	return 0;
1240 }
1241 
1242 /**
1243  * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
1244  * @adapter: Driver specific private structure
1245  * @max_q: vport max queue structure
1246  */
1247 void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
1248 			       struct idpf_vport_max_q *max_q)
1249 {
1250 	struct idpf_avail_queue_info *avail_queues;
1251 
1252 	mutex_lock(&adapter->queue_lock);
1253 	avail_queues = &adapter->avail_queues;
1254 
1255 	avail_queues->avail_rxq += max_q->max_rxq;
1256 	avail_queues->avail_txq += max_q->max_txq;
1257 	avail_queues->avail_bufq += max_q->max_bufq;
1258 	avail_queues->avail_complq += max_q->max_complq;
1259 
1260 	mutex_unlock(&adapter->queue_lock);
1261 }
1262 
1263 /**
1264  * idpf_init_avail_queues - Initialize available queues on the device
1265  * @adapter: Driver specific private structure
1266  */
1267 static void idpf_init_avail_queues(struct idpf_adapter *adapter)
1268 {
1269 	struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
1270 	struct virtchnl2_get_capabilities *caps = &adapter->caps;
1271 
1272 	avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
1273 	avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
1274 	avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
1275 	avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
1276 }
1277 
1278 /**
1279  * idpf_vport_init_queue_reg_chunks - initialize queue register chunks
1280  * @vport_config: persistent vport structure to store the queue register info
1281  * @schunks: source chunks to copy data from
1282  *
1283  * Return: 0 on success, negative on failure.
1284  */
1285 static int
1286 idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config,
1287 				 struct virtchnl2_queue_reg_chunks *schunks)
1288 {
1289 	struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info;
1290 	u16 num_chunks = le16_to_cpu(schunks->num_chunks);
1291 
1292 	kfree(q_info->queue_chunks);
1293 
1294 	q_info->queue_chunks = kcalloc(num_chunks, sizeof(*q_info->queue_chunks),
1295 				       GFP_KERNEL);
1296 	if (!q_info->queue_chunks) {
1297 		q_info->num_chunks = 0;
1298 		return -ENOMEM;
1299 	}
1300 
1301 	q_info->num_chunks = num_chunks;
1302 
1303 	for (u16 i = 0; i < num_chunks; i++) {
1304 		struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i];
1305 		struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i];
1306 
1307 		dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start);
1308 		dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing);
1309 		dchunk->type = le32_to_cpu(schunk->type);
1310 		dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id);
1311 		dchunk->num_queues = le32_to_cpu(schunk->num_queues);
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 /**
1318  * idpf_get_reg_intr_vecs - Get vector queue register offset
1319  * @adapter: adapter structure to get the vector chunks
1320  * @reg_vals: Register offsets to store in
1321  *
1322  * Return: number of registers that got populated
1323  */
1324 int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
1325 			   struct idpf_vec_regs *reg_vals)
1326 {
1327 	struct virtchnl2_vector_chunks *chunks;
1328 	struct idpf_vec_regs reg_val;
1329 	u16 num_vchunks, num_vec;
1330 	int num_regs = 0, i, j;
1331 
1332 	chunks = &adapter->req_vec_chunks->vchunks;
1333 	num_vchunks = le16_to_cpu(chunks->num_vchunks);
1334 
1335 	for (j = 0; j < num_vchunks; j++) {
1336 		struct virtchnl2_vector_chunk *chunk;
1337 		u32 dynctl_reg_spacing;
1338 		u32 itrn_reg_spacing;
1339 
1340 		chunk = &chunks->vchunks[j];
1341 		num_vec = le16_to_cpu(chunk->num_vectors);
1342 		reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
1343 		reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
1344 		reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
1345 
1346 		dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
1347 		itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
1348 
1349 		for (i = 0; i < num_vec; i++) {
1350 			reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
1351 			reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
1352 			reg_vals[num_regs].itrn_index_spacing =
1353 						reg_val.itrn_index_spacing;
1354 
1355 			reg_val.dyn_ctl_reg += dynctl_reg_spacing;
1356 			reg_val.itrn_reg += itrn_reg_spacing;
1357 			num_regs++;
1358 		}
1359 	}
1360 
1361 	return num_regs;
1362 }
1363 
1364 /**
1365  * idpf_vport_get_q_reg - Get the queue registers for the vport
1366  * @reg_vals: register values needing to be set
1367  * @num_regs: amount we expect to fill
1368  * @q_type: queue model
1369  * @chunks: queue regs received over mailbox
1370  *
1371  * This function parses the queue register offsets from the queue register
1372  * chunk information, with a specific queue type and stores it into the array
1373  * passed as an argument. It returns the actual number of queue registers that
1374  * are filled.
1375  */
1376 static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
1377 				struct idpf_queue_id_reg_info *chunks)
1378 {
1379 	u16 num_chunks = chunks->num_chunks;
1380 	int reg_filled = 0, i;
1381 	u32 reg_val;
1382 
1383 	while (num_chunks--) {
1384 		struct idpf_queue_id_reg_chunk *chunk;
1385 		u16 num_q;
1386 
1387 		chunk = &chunks->queue_chunks[num_chunks];
1388 		if (chunk->type != q_type)
1389 			continue;
1390 
1391 		num_q = chunk->num_queues;
1392 		reg_val = chunk->qtail_reg_start;
1393 		for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
1394 			reg_vals[reg_filled++] = reg_val;
1395 			reg_val += chunk->qtail_reg_spacing;
1396 		}
1397 	}
1398 
1399 	return reg_filled;
1400 }
1401 
1402 /**
1403  * __idpf_queue_reg_init - initialize queue registers
1404  * @vport: virtual port structure
1405  * @rsrc: pointer to queue and vector resources
1406  * @reg_vals: registers we are initializing
1407  * @num_regs: how many registers there are in total
1408  * @q_type: queue model
1409  *
1410  * Return number of queues that are initialized
1411  */
1412 static int __idpf_queue_reg_init(struct idpf_vport *vport,
1413 				 struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals,
1414 				 int num_regs, u32 q_type)
1415 {
1416 	struct idpf_adapter *adapter = vport->adapter;
1417 	int i, j, k = 0;
1418 
1419 	switch (q_type) {
1420 	case VIRTCHNL2_QUEUE_TYPE_TX:
1421 		for (i = 0; i < rsrc->num_txq_grp; i++) {
1422 			struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
1423 
1424 			for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
1425 				tx_qgrp->txqs[j]->tail =
1426 					idpf_get_reg_addr(adapter, reg_vals[k]);
1427 		}
1428 		break;
1429 	case VIRTCHNL2_QUEUE_TYPE_RX:
1430 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
1431 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1432 			u16 num_rxq = rx_qgrp->singleq.num_rxq;
1433 
1434 			for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
1435 				struct idpf_rx_queue *q;
1436 
1437 				q = rx_qgrp->singleq.rxqs[j];
1438 				q->tail = idpf_get_reg_addr(adapter,
1439 							    reg_vals[k]);
1440 			}
1441 		}
1442 		break;
1443 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
1444 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
1445 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
1446 			u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
1447 
1448 			for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
1449 				struct idpf_buf_queue *q;
1450 
1451 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1452 				q->tail = idpf_get_reg_addr(adapter,
1453 							    reg_vals[k]);
1454 			}
1455 		}
1456 		break;
1457 	default:
1458 		break;
1459 	}
1460 
1461 	return k;
1462 }
1463 
1464 /**
1465  * idpf_queue_reg_init - initialize queue registers
1466  * @vport: virtual port structure
1467  * @rsrc: pointer to queue and vector resources
1468  * @chunks: queue registers received over mailbox
1469  *
1470  * Return: 0 on success, negative on failure
1471  */
1472 int idpf_queue_reg_init(struct idpf_vport *vport,
1473 			struct idpf_q_vec_rsrc *rsrc,
1474 			struct idpf_queue_id_reg_info *chunks)
1475 {
1476 	int num_regs, ret = 0;
1477 	u32 *reg_vals;
1478 
1479 	/* We may never deal with more than 256 same type of queues */
1480 	reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
1481 	if (!reg_vals)
1482 		return -ENOMEM;
1483 
1484 	/* Initialize Tx queue tail register address */
1485 	num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1486 					VIRTCHNL2_QUEUE_TYPE_TX,
1487 					chunks);
1488 	if (num_regs < rsrc->num_txq) {
1489 		ret = -EINVAL;
1490 		goto free_reg_vals;
1491 	}
1492 
1493 	num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
1494 					 VIRTCHNL2_QUEUE_TYPE_TX);
1495 	if (num_regs < rsrc->num_txq) {
1496 		ret = -EINVAL;
1497 		goto free_reg_vals;
1498 	}
1499 
1500 	/* Initialize Rx/buffer queue tail register address based on Rx queue
1501 	 * model
1502 	 */
1503 	if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1504 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1505 						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
1506 						chunks);
1507 		if (num_regs < rsrc->num_bufq) {
1508 			ret = -EINVAL;
1509 			goto free_reg_vals;
1510 		}
1511 
1512 		num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
1513 						 VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1514 		if (num_regs < rsrc->num_bufq) {
1515 			ret = -EINVAL;
1516 			goto free_reg_vals;
1517 		}
1518 	} else {
1519 		num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
1520 						VIRTCHNL2_QUEUE_TYPE_RX,
1521 						chunks);
1522 		if (num_regs < rsrc->num_rxq) {
1523 			ret = -EINVAL;
1524 			goto free_reg_vals;
1525 		}
1526 
1527 		num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
1528 						 VIRTCHNL2_QUEUE_TYPE_RX);
1529 		if (num_regs < rsrc->num_rxq) {
1530 			ret = -EINVAL;
1531 			goto free_reg_vals;
1532 		}
1533 	}
1534 
1535 free_reg_vals:
1536 	kfree(reg_vals);
1537 
1538 	return ret;
1539 }
1540 
1541 /**
1542  * idpf_send_create_vport_msg - Send virtchnl create vport message
1543  * @adapter: Driver specific private structure
1544  * @max_q: vport max queue info
1545  *
1546  * send virtchnl creae vport message
1547  *
1548  * Returns 0 on success, negative on failure
1549  */
1550 int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
1551 			       struct idpf_vport_max_q *max_q)
1552 {
1553 	struct virtchnl2_create_vport *vport_msg;
1554 	struct idpf_vc_xn_params xn_params = {};
1555 	u16 idx = adapter->next_vport;
1556 	int err, buf_size;
1557 	ssize_t reply_sz;
1558 
1559 	buf_size = sizeof(struct virtchnl2_create_vport);
1560 	if (!adapter->vport_params_reqd[idx]) {
1561 		adapter->vport_params_reqd[idx] = kzalloc(buf_size,
1562 							  GFP_KERNEL);
1563 		if (!adapter->vport_params_reqd[idx])
1564 			return -ENOMEM;
1565 	}
1566 
1567 	vport_msg = adapter->vport_params_reqd[idx];
1568 	vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
1569 	vport_msg->vport_index = cpu_to_le16(idx);
1570 
1571 	if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1572 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1573 	else
1574 		vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1575 
1576 	if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
1577 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
1578 	else
1579 		vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
1580 
1581 	err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
1582 	if (err) {
1583 		dev_err(&adapter->pdev->dev, "Enough queues are not available");
1584 
1585 		return err;
1586 	}
1587 
1588 	if (!adapter->vport_params_recvd[idx]) {
1589 		adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
1590 							   GFP_KERNEL);
1591 		if (!adapter->vport_params_recvd[idx]) {
1592 			err = -ENOMEM;
1593 			goto free_vport_params;
1594 		}
1595 	}
1596 
1597 	xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
1598 	xn_params.send_buf.iov_base = vport_msg;
1599 	xn_params.send_buf.iov_len = buf_size;
1600 	xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
1601 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
1602 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1603 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1604 	if (reply_sz < 0) {
1605 		err = reply_sz;
1606 		goto free_vport_params;
1607 	}
1608 
1609 	return 0;
1610 
1611 free_vport_params:
1612 	kfree(adapter->vport_params_recvd[idx]);
1613 	adapter->vport_params_recvd[idx] = NULL;
1614 	kfree(adapter->vport_params_reqd[idx]);
1615 	adapter->vport_params_reqd[idx] = NULL;
1616 
1617 	return err;
1618 }
1619 
1620 /**
1621  * idpf_check_supported_desc_ids - Verify we have required descriptor support
1622  * @vport: virtual port structure
1623  *
1624  * Return 0 on success, error on failure
1625  */
1626 int idpf_check_supported_desc_ids(struct idpf_vport *vport)
1627 {
1628 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
1629 	struct idpf_adapter *adapter = vport->adapter;
1630 	struct virtchnl2_create_vport *vport_msg;
1631 	u64 rx_desc_ids, tx_desc_ids;
1632 
1633 	vport_msg = adapter->vport_params_recvd[vport->idx];
1634 
1635 	if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
1636 	    (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
1637 	     vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
1638 		pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
1639 		return -EOPNOTSUPP;
1640 	}
1641 
1642 	rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
1643 	tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
1644 
1645 	if (idpf_is_queue_model_split(rsrc->rxq_model)) {
1646 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
1647 			dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
1648 			vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1649 		}
1650 	} else {
1651 		if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
1652 			rsrc->base_rxd = true;
1653 	}
1654 
1655 	if (!idpf_is_queue_model_split(rsrc->txq_model))
1656 		return 0;
1657 
1658 	if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
1659 		dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
1660 		vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 /**
1667  * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1668  * @adapter: adapter pointer used to send virtchnl message
1669  * @vport_id: vport identifier used while preparing the virtchnl message
1670  *
1671  * Return: 0 on success, negative on failure.
1672  */
1673 int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
1674 {
1675 	struct idpf_vc_xn_params xn_params = {};
1676 	struct virtchnl2_vport v_id;
1677 	ssize_t reply_sz;
1678 
1679 	v_id.vport_id = cpu_to_le32(vport_id);
1680 
1681 	xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
1682 	xn_params.send_buf.iov_base = &v_id;
1683 	xn_params.send_buf.iov_len = sizeof(v_id);
1684 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1685 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1686 
1687 	return reply_sz < 0 ? reply_sz : 0;
1688 }
1689 
1690 /**
1691  * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1692  * @adapter: adapter pointer used to send virtchnl message
1693  * @vport_id: vport identifier used while preparing the virtchnl message
1694  *
1695  * Return: 0 on success, negative on failure.
1696  */
1697 int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
1698 {
1699 	struct idpf_vc_xn_params xn_params = {};
1700 	struct virtchnl2_vport v_id;
1701 	ssize_t reply_sz;
1702 
1703 	v_id.vport_id = cpu_to_le32(vport_id);
1704 
1705 	xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
1706 	xn_params.send_buf.iov_base = &v_id;
1707 	xn_params.send_buf.iov_len = sizeof(v_id);
1708 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1709 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1710 
1711 	return reply_sz < 0 ? reply_sz : 0;
1712 }
1713 
1714 /**
1715  * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1716  * @adapter: adapter pointer used to send virtchnl message
1717  * @vport_id: vport identifier used while preparing the virtchnl message
1718  *
1719  * Return: 0 on success, negative on failure.
1720  */
1721 int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
1722 {
1723 	struct idpf_vc_xn_params xn_params = {};
1724 	struct virtchnl2_vport v_id;
1725 	ssize_t reply_sz;
1726 
1727 	v_id.vport_id = cpu_to_le32(vport_id);
1728 
1729 	xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
1730 	xn_params.send_buf.iov_base = &v_id;
1731 	xn_params.send_buf.iov_len = sizeof(v_id);
1732 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
1733 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
1734 
1735 	return reply_sz < 0 ? reply_sz : 0;
1736 }
1737 
1738 /**
1739  * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
1740  * @rsrc: pointer to queue and vector resources
1741  * @q: Tx queue to be inserted into VC chunk
1742  * @qi: pointer to the buffer containing the VC chunk
1743  */
1744 static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1745 				       const struct idpf_tx_queue *q,
1746 				       struct virtchnl2_txq_info *qi)
1747 {
1748 	u32 val;
1749 
1750 	qi->queue_id = cpu_to_le32(q->q_id);
1751 	qi->model = cpu_to_le16(rsrc->txq_model);
1752 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
1753 	qi->ring_len = cpu_to_le16(q->desc_count);
1754 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1755 	qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
1756 
1757 	if (!idpf_is_queue_model_split(rsrc->txq_model)) {
1758 		qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
1759 		return;
1760 	}
1761 
1762 	if (idpf_queue_has(XDP, q))
1763 		val = q->complq->q_id;
1764 	else
1765 		val = q->txq_grp->complq->q_id;
1766 
1767 	qi->tx_compl_queue_id = cpu_to_le16(val);
1768 
1769 	if (idpf_queue_has(FLOW_SCH_EN, q))
1770 		val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1771 	else
1772 		val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1773 
1774 	qi->sched_mode = cpu_to_le16(val);
1775 }
1776 
1777 /**
1778  * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
1779  * @rsrc: pointer to queue and vector resources
1780  * @q: completion queue to be inserted into VC chunk
1781  * @qi: pointer to the buffer containing the VC chunk
1782  */
1783 static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1784 					  const struct idpf_compl_queue *q,
1785 					  struct virtchnl2_txq_info *qi)
1786 {
1787 	u32 val;
1788 
1789 	qi->queue_id = cpu_to_le32(q->q_id);
1790 	qi->model = cpu_to_le16(rsrc->txq_model);
1791 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
1792 	qi->ring_len = cpu_to_le16(q->desc_count);
1793 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1794 
1795 	if (idpf_queue_has(FLOW_SCH_EN, q))
1796 		val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1797 	else
1798 		val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1799 
1800 	qi->sched_mode = cpu_to_le16(val);
1801 }
1802 
1803 /**
1804  * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
1805  * @vport_id: ID of virtual port queues are associated with
1806  * @buf: buffer containing the message
1807  * @pos: pointer to the first chunk describing the tx queue
1808  * @num_chunks: number of chunks in the message
1809  *
1810  * Helper function for preparing the message describing configuration of
1811  * Tx queues.
1812  *
1813  * Return: the total size of the prepared message.
1814  */
1815 static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos,
1816 				     u32 num_chunks)
1817 {
1818 	struct virtchnl2_config_tx_queues *ctq = buf;
1819 
1820 	ctq->vport_id = cpu_to_le32(vport_id);
1821 	ctq->num_qinfo = cpu_to_le16(num_chunks);
1822 	memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
1823 
1824 	return struct_size(ctq, qinfo, num_chunks);
1825 }
1826 
1827 /**
1828  * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
1829  *				       message for selected queues
1830  * @qs: set of the Tx queues to configure
1831  *
1832  * Send config queues virtchnl message for queues contained in the @qs array.
1833  * The @qs array can contain Tx queues (or completion queues) only.
1834  *
1835  * Return: 0 on success, -errno on failure.
1836  */
1837 static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
1838 {
1839 	struct virtchnl2_txq_info *qi __free(kfree) = NULL;
1840 	struct idpf_chunked_msg_params params = {
1841 		.vport_id	= qs->vport_id,
1842 		.vc_op		= VIRTCHNL2_OP_CONFIG_TX_QUEUES,
1843 		.prepare_msg	= idpf_prepare_cfg_txqs_msg,
1844 		.config_sz	= sizeof(struct virtchnl2_config_tx_queues),
1845 		.chunk_sz	= sizeof(*qi),
1846 	};
1847 
1848 	qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
1849 	if (!qi)
1850 		return -ENOMEM;
1851 
1852 	params.chunks = qi;
1853 
1854 	for (u32 i = 0; i < qs->num; i++) {
1855 		if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
1856 			idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq,
1857 						   &qi[params.num_chunks++]);
1858 		else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
1859 			idpf_fill_complq_config_chunk(qs->qv_rsrc,
1860 						      qs->qs[i].complq,
1861 						      &qi[params.num_chunks++]);
1862 	}
1863 
1864 	return idpf_send_chunked_msg(qs->adapter, &params);
1865 }
1866 
1867 /**
1868  * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
1869  * @adapter: adapter pointer used to send virtchnl message
1870  * @rsrc: pointer to queue and vector resources
1871  * @vport_id: vport identifier used while preparing the virtchnl message
1872  *
1873  * Return: 0 on success, -errno on failure.
1874  */
1875 static int idpf_send_config_tx_queues_msg(struct idpf_adapter *adapter,
1876 					  struct idpf_q_vec_rsrc *rsrc,
1877 					  u32 vport_id)
1878 {
1879 	struct idpf_queue_set *qs __free(kfree) = NULL;
1880 	u32 totqs = rsrc->num_txq + rsrc->num_complq;
1881 	u32 k = 0;
1882 
1883 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
1884 	if (!qs)
1885 		return -ENOMEM;
1886 
1887 	/* Populate the queue info buffer with all queue context info */
1888 	for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
1889 		const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
1890 
1891 		for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
1892 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
1893 			qs->qs[k++].txq = tx_qgrp->txqs[j];
1894 		}
1895 
1896 		if (idpf_is_queue_model_split(rsrc->txq_model)) {
1897 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1898 			qs->qs[k++].complq = tx_qgrp->complq;
1899 		}
1900 	}
1901 
1902 	/* Make sure accounting agrees */
1903 	if (k != totqs)
1904 		return -EINVAL;
1905 
1906 	return idpf_send_config_tx_queue_set_msg(qs);
1907 }
1908 
1909 /**
1910  * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
1911  * @rsrc: pointer to queue and vector resources
1912  * @q: Rx queue to be inserted into VC chunk
1913  * @qi: pointer to the buffer containing the VC chunk
1914  */
1915 static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1916 				       struct idpf_rx_queue *q,
1917 				       struct virtchnl2_rxq_info *qi)
1918 {
1919 	const struct idpf_bufq_set *sets;
1920 
1921 	qi->queue_id = cpu_to_le32(q->q_id);
1922 	qi->model = cpu_to_le16(rsrc->rxq_model);
1923 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
1924 	qi->ring_len = cpu_to_le16(q->desc_count);
1925 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1926 	qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
1927 	qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
1928 	qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
1929 	if (idpf_queue_has(RSC_EN, q))
1930 		qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1931 
1932 	if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
1933 		qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
1934 		qi->desc_ids = cpu_to_le64(q->rxdids);
1935 
1936 		return;
1937 	}
1938 
1939 	sets = q->bufq_sets;
1940 
1941 	/*
1942 	 * In splitq mode, RxQ buffer size should be set to that of the first
1943 	 * buffer queue associated with this RxQ.
1944 	 */
1945 	q->rx_buf_size = sets[0].bufq.rx_buf_size;
1946 	qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
1947 
1948 	qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
1949 	if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
1950 		qi->bufq2_ena = IDPF_BUFQ2_ENA;
1951 		qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
1952 	}
1953 
1954 	q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
1955 
1956 	if (idpf_queue_has(HSPLIT_EN, q)) {
1957 		qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1958 		qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
1959 	}
1960 
1961 	qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1962 }
1963 
1964 /**
1965  * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
1966  * @rsrc: pointer to queue and vector resources
1967  * @q: buffer queue to be inserted into VC chunk
1968  * @qi: pointer to the buffer containing the VC chunk
1969  */
1970 static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
1971 					const struct idpf_buf_queue *q,
1972 					struct virtchnl2_rxq_info *qi)
1973 {
1974 	qi->queue_id = cpu_to_le32(q->q_id);
1975 	qi->model = cpu_to_le16(rsrc->rxq_model);
1976 	qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
1977 	qi->ring_len = cpu_to_le16(q->desc_count);
1978 	qi->dma_ring_addr = cpu_to_le64(q->dma);
1979 	qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
1980 	qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
1981 	qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
1982 	qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1983 	if (idpf_queue_has(RSC_EN, q))
1984 		qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
1985 
1986 	if (idpf_queue_has(HSPLIT_EN, q)) {
1987 		qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
1988 		qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
1989 	}
1990 }
1991 
1992 /**
1993  * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
1994  * @vport_id: ID of virtual port queues are associated with
1995  * @buf: buffer containing the message
1996  * @pos: pointer to the first chunk describing the rx queue
1997  * @num_chunks: number of chunks in the message
1998  *
1999  * Helper function for preparing the message describing configuration of
2000  * Rx queues.
2001  *
2002  * Return: the total size of the prepared message.
2003  */
2004 static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos,
2005 				     u32 num_chunks)
2006 {
2007 	struct virtchnl2_config_rx_queues *crq = buf;
2008 
2009 	crq->vport_id = cpu_to_le32(vport_id);
2010 	crq->num_qinfo = cpu_to_le16(num_chunks);
2011 	memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
2012 
2013 	return struct_size(crq, qinfo, num_chunks);
2014 }
2015 
2016 /**
2017  * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
2018  *				       for selected queues.
2019  * @qs: set of the Rx queues to configure
2020  *
2021  * Send config queues virtchnl message for queues contained in the @qs array.
2022  * The @qs array can contain Rx queues (or buffer queues) only.
2023  *
2024  * Return: 0 on success, -errno on failure.
2025  */
2026 static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
2027 {
2028 	struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
2029 	struct idpf_chunked_msg_params params = {
2030 		.vport_id	= qs->vport_id,
2031 		.vc_op		= VIRTCHNL2_OP_CONFIG_RX_QUEUES,
2032 		.prepare_msg	= idpf_prepare_cfg_rxqs_msg,
2033 		.config_sz	= sizeof(struct virtchnl2_config_rx_queues),
2034 		.chunk_sz	= sizeof(*qi),
2035 	};
2036 
2037 	qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
2038 	if (!qi)
2039 		return -ENOMEM;
2040 
2041 	params.chunks = qi;
2042 
2043 	for (u32 i = 0; i < qs->num; i++) {
2044 		if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
2045 			idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
2046 						   &qi[params.num_chunks++]);
2047 		else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
2048 			idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq,
2049 						    &qi[params.num_chunks++]);
2050 	}
2051 
2052 	return idpf_send_chunked_msg(qs->adapter, &params);
2053 }
2054 
2055 /**
2056  * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
2057  * @adapter: adapter pointer used to send virtchnl message
2058  * @rsrc: pointer to queue and vector resources
2059  * @vport_id: vport identifier used while preparing the virtchnl message
2060  *
2061  * Return: 0 on success, -errno on failure.
2062  */
2063 static int idpf_send_config_rx_queues_msg(struct idpf_adapter *adapter,
2064 					  struct idpf_q_vec_rsrc *rsrc,
2065 					  u32 vport_id)
2066 {
2067 	bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
2068 	struct idpf_queue_set *qs __free(kfree) = NULL;
2069 	u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
2070 	u32 k = 0;
2071 
2072 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
2073 	if (!qs)
2074 		return -ENOMEM;
2075 
2076 	/* Populate the queue info buffer with all queue context info */
2077 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
2078 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
2079 		u32 num_rxq;
2080 
2081 		if (!splitq) {
2082 			num_rxq = rx_qgrp->singleq.num_rxq;
2083 			goto rxq;
2084 		}
2085 
2086 		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
2087 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
2088 			qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
2089 		}
2090 
2091 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
2092 
2093 rxq:
2094 		for (u32 j = 0; j < num_rxq; j++) {
2095 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
2096 
2097 			if (splitq)
2098 				qs->qs[k++].rxq =
2099 					&rx_qgrp->splitq.rxq_sets[j]->rxq;
2100 			else
2101 				qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
2102 		}
2103 	}
2104 
2105 	/* Make sure accounting agrees */
2106 	if (k != totqs)
2107 		return -EINVAL;
2108 
2109 	return idpf_send_config_rx_queue_set_msg(qs);
2110 }
2111 
2112 /**
2113  * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
2114  *				 queues
2115  * @vport_id: ID of virtual port queues are associated with
2116  * @buf: buffer containing the message
2117  * @pos: pointer to the first chunk describing the queue
2118  * @num_chunks: number of chunks in the message
2119  *
2120  * Helper function for preparing the message describing queues to be enabled
2121  * or disabled.
2122  *
2123  * Return: the total size of the prepared message.
2124  */
2125 static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos,
2126 				       u32 num_chunks)
2127 {
2128 	struct virtchnl2_del_ena_dis_queues *eq = buf;
2129 
2130 	eq->vport_id = cpu_to_le32(vport_id);
2131 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2132 	memcpy(eq->chunks.chunks, pos,
2133 	       num_chunks * sizeof(*eq->chunks.chunks));
2134 
2135 	return struct_size(eq, chunks.chunks, num_chunks);
2136 }
2137 
2138 /**
2139  * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
2140  *				     message for selected queues
2141  * @qs: set of the queues to enable or disable
2142  * @en: whether to enable or disable queues
2143  *
2144  * Send enable or disable queues virtchnl message for queues contained
2145  * in the @qs array.
2146  * The @qs array can contain pointers to both Rx and Tx queues.
2147  *
2148  * Return: 0 on success, -errno on failure.
2149  */
2150 static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
2151 					   bool en)
2152 {
2153 	struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
2154 	struct idpf_chunked_msg_params params = {
2155 		.vport_id	= qs->vport_id,
2156 		.vc_op		= en ? VIRTCHNL2_OP_ENABLE_QUEUES :
2157 				       VIRTCHNL2_OP_DISABLE_QUEUES,
2158 		.prepare_msg	= idpf_prepare_ena_dis_qs_msg,
2159 		.config_sz	= sizeof(struct virtchnl2_del_ena_dis_queues),
2160 		.chunk_sz	= sizeof(*qc),
2161 		.num_chunks	= qs->num,
2162 	};
2163 
2164 	qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL);
2165 	if (!qc)
2166 		return -ENOMEM;
2167 
2168 	params.chunks = qc;
2169 
2170 	for (u32 i = 0; i < qs->num; i++) {
2171 		const struct idpf_queue_ptr *q = &qs->qs[i];
2172 		u32 qid;
2173 
2174 		qc[i].type = cpu_to_le32(q->type);
2175 		qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
2176 
2177 		switch (q->type) {
2178 		case VIRTCHNL2_QUEUE_TYPE_RX:
2179 			qid = q->rxq->q_id;
2180 			break;
2181 		case VIRTCHNL2_QUEUE_TYPE_TX:
2182 			qid = q->txq->q_id;
2183 			break;
2184 		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
2185 			qid = q->bufq->q_id;
2186 			break;
2187 		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
2188 			qid = q->complq->q_id;
2189 			break;
2190 		default:
2191 			return -EINVAL;
2192 		}
2193 
2194 		qc[i].start_queue_id = cpu_to_le32(qid);
2195 	}
2196 
2197 	return idpf_send_chunked_msg(qs->adapter, &params);
2198 }
2199 
2200 /**
2201  * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
2202  *				  message
2203  * @adapter: adapter pointer used to send virtchnl message
2204  * @rsrc: pointer to queue and vector resources
2205  * @vport_id: vport identifier used while preparing the virtchnl message
2206  * @en: whether to enable or disable queues
2207  *
2208  * Return: 0 on success, -errno on failure.
2209  */
2210 static int idpf_send_ena_dis_queues_msg(struct idpf_adapter *adapter,
2211 					struct idpf_q_vec_rsrc *rsrc,
2212 					u32 vport_id, bool en)
2213 {
2214 	struct idpf_queue_set *qs __free(kfree) = NULL;
2215 	u32 num_txq, num_q, k = 0;
2216 	bool split;
2217 
2218 	num_txq = rsrc->num_txq + rsrc->num_complq;
2219 	num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
2220 
2221 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
2222 	if (!qs)
2223 		return -ENOMEM;
2224 
2225 	split = idpf_is_queue_model_split(rsrc->txq_model);
2226 
2227 	for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
2228 		const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
2229 
2230 		for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
2231 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
2232 			qs->qs[k++].txq = tx_qgrp->txqs[j];
2233 		}
2234 
2235 		if (!split)
2236 			continue;
2237 
2238 		qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
2239 		qs->qs[k++].complq = tx_qgrp->complq;
2240 	}
2241 
2242 	if (k != num_txq)
2243 		return -EINVAL;
2244 
2245 	split = idpf_is_queue_model_split(rsrc->rxq_model);
2246 
2247 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
2248 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
2249 		u32 num_rxq;
2250 
2251 		if (split)
2252 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2253 		else
2254 			num_rxq = rx_qgrp->singleq.num_rxq;
2255 
2256 		for (u32 j = 0; j < num_rxq; j++) {
2257 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
2258 
2259 			if (split)
2260 				qs->qs[k++].rxq =
2261 					&rx_qgrp->splitq.rxq_sets[j]->rxq;
2262 			else
2263 				qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
2264 		}
2265 
2266 		if (!split)
2267 			continue;
2268 
2269 		for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
2270 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
2271 			qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
2272 		}
2273 	}
2274 
2275 	if (k != num_q)
2276 		return -EINVAL;
2277 
2278 	return idpf_send_ena_dis_queue_set_msg(qs, en);
2279 }
2280 
2281 /**
2282  * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
2283  *					      queue set to the interrupt vector
2284  * @vport_id: ID of virtual port queues are associated with
2285  * @buf: buffer containing the message
2286  * @pos: pointer to the first chunk describing the vector mapping
2287  * @num_chunks: number of chunks in the message
2288  *
2289  * Helper function for preparing the message describing mapping queues to
2290  * q_vectors.
2291  *
2292  * Return: the total size of the prepared message.
2293  */
2294 static u32
2295 idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf,
2296 					 const void *pos, u32 num_chunks)
2297 {
2298 	struct virtchnl2_queue_vector_maps *vqvm = buf;
2299 
2300 	vqvm->vport_id = cpu_to_le32(vport_id);
2301 	vqvm->num_qv_maps = cpu_to_le16(num_chunks);
2302 	memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
2303 
2304 	return struct_size(vqvm, qv_maps, num_chunks);
2305 }
2306 
2307 /**
2308  * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
2309  *					      queue set vector message
2310  * @qs: set of the queues to map or unmap
2311  * @map: true for map and false for unmap
2312  *
2313  * Return: 0 on success, -errno on failure.
2314  */
2315 static int
2316 idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
2317 					 bool map)
2318 {
2319 	struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
2320 	struct idpf_chunked_msg_params params = {
2321 		.vport_id	= qs->vport_id,
2322 		.vc_op		= map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
2323 					VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
2324 		.prepare_msg	= idpf_prep_map_unmap_queue_set_vector_msg,
2325 		.config_sz	= sizeof(struct virtchnl2_queue_vector_maps),
2326 		.chunk_sz	= sizeof(*vqv),
2327 		.num_chunks	= qs->num,
2328 	};
2329 	bool split;
2330 
2331 	vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL);
2332 	if (!vqv)
2333 		return -ENOMEM;
2334 
2335 	params.chunks = vqv;
2336 
2337 	split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model);
2338 
2339 	for (u32 i = 0; i < qs->num; i++) {
2340 		const struct idpf_queue_ptr *q = &qs->qs[i];
2341 		const struct idpf_q_vector *vec;
2342 		u32 qid, v_idx, itr_idx;
2343 
2344 		vqv[i].queue_type = cpu_to_le32(q->type);
2345 
2346 		switch (q->type) {
2347 		case VIRTCHNL2_QUEUE_TYPE_RX:
2348 			qid = q->rxq->q_id;
2349 
2350 			if (idpf_queue_has(NOIRQ, q->rxq))
2351 				vec = NULL;
2352 			else
2353 				vec = q->rxq->q_vector;
2354 
2355 			if (vec) {
2356 				v_idx = vec->v_idx;
2357 				itr_idx = vec->rx_itr_idx;
2358 			} else {
2359 				v_idx = qs->qv_rsrc->noirq_v_idx;
2360 				itr_idx = VIRTCHNL2_ITR_IDX_0;
2361 			}
2362 			break;
2363 		case VIRTCHNL2_QUEUE_TYPE_TX:
2364 			qid = q->txq->q_id;
2365 
2366 			if (idpf_queue_has(NOIRQ, q->txq))
2367 				vec = NULL;
2368 			else if (idpf_queue_has(XDP, q->txq))
2369 				vec = q->txq->complq->q_vector;
2370 			else if (split)
2371 				vec = q->txq->txq_grp->complq->q_vector;
2372 			else
2373 				vec = q->txq->q_vector;
2374 
2375 			if (vec) {
2376 				v_idx = vec->v_idx;
2377 				itr_idx = vec->tx_itr_idx;
2378 			} else {
2379 				v_idx = qs->qv_rsrc->noirq_v_idx;
2380 				itr_idx = VIRTCHNL2_ITR_IDX_1;
2381 			}
2382 			break;
2383 		default:
2384 			return -EINVAL;
2385 		}
2386 
2387 		vqv[i].queue_id = cpu_to_le32(qid);
2388 		vqv[i].vector_id = cpu_to_le16(v_idx);
2389 		vqv[i].itr_idx = cpu_to_le32(itr_idx);
2390 	}
2391 
2392 	return idpf_send_chunked_msg(qs->adapter, &params);
2393 }
2394 
2395 /**
2396  * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
2397  *					  vector message
2398  * @adapter: adapter pointer used to send virtchnl message
2399  * @rsrc: pointer to queue and vector resources
2400  * @vport_id: vport identifier used while preparing the virtchnl message
2401  * @map: true for map and false for unmap
2402  *
2403  * Return: 0 on success, -errno on failure.
2404  */
2405 int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
2406 					 struct idpf_q_vec_rsrc *rsrc,
2407 					 u32 vport_id, bool map)
2408 {
2409 	struct idpf_queue_set *qs __free(kfree) = NULL;
2410 	u32 num_q = rsrc->num_txq + rsrc->num_rxq;
2411 	u32 k = 0;
2412 
2413 	qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
2414 	if (!qs)
2415 		return -ENOMEM;
2416 
2417 	for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
2418 		const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
2419 
2420 		for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
2421 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
2422 			qs->qs[k++].txq = tx_qgrp->txqs[j];
2423 		}
2424 	}
2425 
2426 	if (k != rsrc->num_txq)
2427 		return -EINVAL;
2428 
2429 	for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
2430 		const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
2431 		u32 num_rxq;
2432 
2433 		if (idpf_is_queue_model_split(rsrc->rxq_model))
2434 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
2435 		else
2436 			num_rxq = rx_qgrp->singleq.num_rxq;
2437 
2438 		for (u32 j = 0; j < num_rxq; j++) {
2439 			qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
2440 
2441 			if (idpf_is_queue_model_split(rsrc->rxq_model))
2442 				qs->qs[k++].rxq =
2443 					&rx_qgrp->splitq.rxq_sets[j]->rxq;
2444 			else
2445 				qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
2446 		}
2447 	}
2448 
2449 	if (k != num_q)
2450 		return -EINVAL;
2451 
2452 	return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
2453 }
2454 
2455 /**
2456  * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
2457  *				    selected queues
2458  * @qs: set of the queues
2459  *
2460  * Send enable queues virtchnl message for queues contained in the @qs array.
2461  *
2462  * Return: 0 on success, -errno on failure.
2463  */
2464 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs)
2465 {
2466 	return idpf_send_ena_dis_queue_set_msg(qs, true);
2467 }
2468 
2469 /**
2470  * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
2471  *				     selected queues
2472  * @qs: set of the queues
2473  *
2474  * Return: 0 on success, -errno on failure.
2475  */
2476 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs)
2477 {
2478 	int err;
2479 
2480 	err = idpf_send_ena_dis_queue_set_msg(qs, false);
2481 	if (err)
2482 		return err;
2483 
2484 	return idpf_wait_for_marker_event_set(qs);
2485 }
2486 
2487 /**
2488  * idpf_send_config_queue_set_msg - send virtchnl config queues message for
2489  *				    selected queues
2490  * @qs: set of the queues
2491  *
2492  * Send config queues virtchnl message for queues contained in the @qs array.
2493  * The @qs array can contain both Rx or Tx queues.
2494  *
2495  * Return: 0 on success, -errno on failure.
2496  */
2497 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
2498 {
2499 	int err;
2500 
2501 	err = idpf_send_config_tx_queue_set_msg(qs);
2502 	if (err)
2503 		return err;
2504 
2505 	return idpf_send_config_rx_queue_set_msg(qs);
2506 }
2507 
2508 /**
2509  * idpf_send_enable_queues_msg - send enable queues virtchnl message
2510  * @vport: Virtual port private data structure
2511  *
2512  * Will send enable queues virtchnl message.  Returns 0 on success, negative on
2513  * failure.
2514  */
2515 int idpf_send_enable_queues_msg(struct idpf_vport *vport)
2516 {
2517 	return idpf_send_ena_dis_queues_msg(vport->adapter,
2518 					    &vport->dflt_qv_rsrc,
2519 					    vport->vport_id, true);
2520 }
2521 
2522 /**
2523  * idpf_send_disable_queues_msg - send disable queues virtchnl message
2524  * @vport: Virtual port private data structure
2525  *
2526  * Will send disable queues virtchnl message.  Returns 0 on success, negative
2527  * on failure.
2528  */
2529 int idpf_send_disable_queues_msg(struct idpf_vport *vport)
2530 {
2531 	int err;
2532 
2533 	err = idpf_send_ena_dis_queues_msg(vport->adapter,
2534 					   &vport->dflt_qv_rsrc,
2535 					   vport->vport_id, false);
2536 	if (err)
2537 		return err;
2538 
2539 	return idpf_wait_for_marker_event(vport);
2540 }
2541 
2542 /**
2543  * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2544  * structure
2545  * @dchunks: Destination chunks to store data to
2546  * @schunks: Source chunks to copy data from
2547  * @num_chunks: number of chunks to copy
2548  */
2549 static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
2550 					     struct idpf_queue_id_reg_chunk *schunks,
2551 					     u16 num_chunks)
2552 {
2553 	u16 i;
2554 
2555 	for (i = 0; i < num_chunks; i++) {
2556 		dchunks[i].type = cpu_to_le32(schunks[i].type);
2557 		dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id);
2558 		dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues);
2559 	}
2560 }
2561 
2562 /**
2563  * idpf_send_delete_queues_msg - send delete queues virtchnl message
2564  * @adapter: adapter pointer used to send virtchnl message
2565  * @chunks: queue ids received over mailbox
2566  * @vport_id: vport identifier used while preparing the virtchnl message
2567  *
2568  * Return: 0 on success, negative on failure.
2569  */
2570 int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
2571 				struct idpf_queue_id_reg_info *chunks,
2572 				u32 vport_id)
2573 {
2574 	struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
2575 	struct idpf_vc_xn_params xn_params = {};
2576 	ssize_t reply_sz;
2577 	u16 num_chunks;
2578 	int buf_size;
2579 
2580 	num_chunks = chunks->num_chunks;
2581 	buf_size = struct_size(eq, chunks.chunks, num_chunks);
2582 
2583 	eq = kzalloc(buf_size, GFP_KERNEL);
2584 	if (!eq)
2585 		return -ENOMEM;
2586 
2587 	eq->vport_id = cpu_to_le32(vport_id);
2588 	eq->chunks.num_chunks = cpu_to_le16(num_chunks);
2589 
2590 	idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,
2591 					 num_chunks);
2592 
2593 	xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
2594 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2595 	xn_params.send_buf.iov_base = eq;
2596 	xn_params.send_buf.iov_len = buf_size;
2597 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2598 
2599 	return reply_sz < 0 ? reply_sz : 0;
2600 }
2601 
2602 /**
2603  * idpf_send_config_queues_msg - Send config queues virtchnl message
2604  * @adapter: adapter pointer used to send virtchnl message
2605  * @rsrc: pointer to queue and vector resources
2606  * @vport_id: vport identifier used while preparing the virtchnl message
2607  *
2608  * Return: 0 on success, negative on failure.
2609  */
2610 int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
2611 				struct idpf_q_vec_rsrc *rsrc,
2612 				u32 vport_id)
2613 {
2614 	int err;
2615 
2616 	err = idpf_send_config_tx_queues_msg(adapter, rsrc, vport_id);
2617 	if (err)
2618 		return err;
2619 
2620 	return idpf_send_config_rx_queues_msg(adapter, rsrc, vport_id);
2621 }
2622 
2623 /**
2624  * idpf_send_add_queues_msg - Send virtchnl add queues message
2625  * @adapter: adapter pointer used to send virtchnl message
2626  * @vport_config: vport persistent structure to store the queue chunk info
2627  * @rsrc: pointer to queue and vector resources
2628  * @vport_id: vport identifier used while preparing the virtchnl message
2629  *
2630  * Return: 0 on success, negative on failure.
2631  */
2632 int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
2633 			     struct idpf_vport_config *vport_config,
2634 			     struct idpf_q_vec_rsrc *rsrc,
2635 			     u32 vport_id)
2636 {
2637 	struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
2638 	struct idpf_vc_xn_params xn_params = {};
2639 	struct virtchnl2_add_queues aq = {};
2640 	ssize_t reply_sz;
2641 	int size;
2642 
2643 	vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2644 	if (!vc_msg)
2645 		return -ENOMEM;
2646 
2647 	aq.vport_id = cpu_to_le32(vport_id);
2648 	aq.num_tx_q = cpu_to_le16(rsrc->num_txq);
2649 	aq.num_tx_complq = cpu_to_le16(rsrc->num_complq);
2650 	aq.num_rx_q = cpu_to_le16(rsrc->num_rxq);
2651 	aq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq);
2652 
2653 	xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
2654 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2655 	xn_params.send_buf.iov_base = &aq;
2656 	xn_params.send_buf.iov_len = sizeof(aq);
2657 	xn_params.recv_buf.iov_base = vc_msg;
2658 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2659 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2660 	if (reply_sz < 0)
2661 		return reply_sz;
2662 
2663 	/* compare vc_msg num queues with vport num queues */
2664 	if (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq ||
2665 	    le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq ||
2666 	    le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq ||
2667 	    le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq)
2668 		return -EINVAL;
2669 
2670 	size = struct_size(vc_msg, chunks.chunks,
2671 			   le16_to_cpu(vc_msg->chunks.num_chunks));
2672 	if (reply_sz < size)
2673 		return -EIO;
2674 
2675 	return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);
2676 }
2677 
2678 /**
2679  * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2680  * @adapter: Driver specific private structure
2681  * @num_vectors: number of vectors to be allocated
2682  *
2683  * Returns 0 on success, negative on failure.
2684  */
2685 int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
2686 {
2687 	struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
2688 	struct idpf_vc_xn_params xn_params = {};
2689 	struct virtchnl2_alloc_vectors ac = {};
2690 	ssize_t reply_sz;
2691 	u16 num_vchunks;
2692 	int size;
2693 
2694 	ac.num_vectors = cpu_to_le16(num_vectors);
2695 
2696 	rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2697 	if (!rcvd_vec)
2698 		return -ENOMEM;
2699 
2700 	xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
2701 	xn_params.send_buf.iov_base = &ac;
2702 	xn_params.send_buf.iov_len = sizeof(ac);
2703 	xn_params.recv_buf.iov_base = rcvd_vec;
2704 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2705 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2706 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2707 	if (reply_sz < 0)
2708 		return reply_sz;
2709 
2710 	num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
2711 	size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
2712 	if (reply_sz < size)
2713 		return -EIO;
2714 
2715 	if (size > IDPF_CTLQ_MAX_BUF_LEN)
2716 		return -EINVAL;
2717 
2718 	kfree(adapter->req_vec_chunks);
2719 	adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
2720 	if (!adapter->req_vec_chunks)
2721 		return -ENOMEM;
2722 
2723 	if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
2724 		kfree(adapter->req_vec_chunks);
2725 		adapter->req_vec_chunks = NULL;
2726 		return -EINVAL;
2727 	}
2728 
2729 	return 0;
2730 }
2731 
2732 /**
2733  * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2734  * @adapter: Driver specific private structure
2735  *
2736  * Returns 0 on success, negative on failure.
2737  */
2738 int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
2739 {
2740 	struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
2741 	struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
2742 	struct idpf_vc_xn_params xn_params = {};
2743 	ssize_t reply_sz;
2744 	int buf_size;
2745 
2746 	buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
2747 
2748 	xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
2749 	xn_params.send_buf.iov_base = vcs;
2750 	xn_params.send_buf.iov_len = buf_size;
2751 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2752 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2753 	if (reply_sz < 0)
2754 		return reply_sz;
2755 
2756 	kfree(adapter->req_vec_chunks);
2757 	adapter->req_vec_chunks = NULL;
2758 
2759 	return 0;
2760 }
2761 
2762 /**
2763  * idpf_get_max_vfs - Get max number of vfs supported
2764  * @adapter: Driver specific private structure
2765  *
2766  * Returns max number of VFs
2767  */
2768 static int idpf_get_max_vfs(struct idpf_adapter *adapter)
2769 {
2770 	return le16_to_cpu(adapter->caps.max_sriov_vfs);
2771 }
2772 
2773 /**
2774  * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2775  * @adapter: Driver specific private structure
2776  * @num_vfs: number of virtual functions to be created
2777  *
2778  * Returns 0 on success, negative on failure.
2779  */
2780 int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
2781 {
2782 	struct virtchnl2_sriov_vfs_info svi = {};
2783 	struct idpf_vc_xn_params xn_params = {};
2784 	ssize_t reply_sz;
2785 
2786 	svi.num_vfs = cpu_to_le16(num_vfs);
2787 	xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
2788 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2789 	xn_params.send_buf.iov_base = &svi;
2790 	xn_params.send_buf.iov_len = sizeof(svi);
2791 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2792 
2793 	return reply_sz < 0 ? reply_sz : 0;
2794 }
2795 
2796 /**
2797  * idpf_send_get_stats_msg - Send virtchnl get statistics message
2798  * @np: netdev private structure
2799  * @port_stats: structure to store the vport statistics
2800  *
2801  * Return: 0 on success, negative on failure.
2802  */
2803 int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
2804 			    struct idpf_port_stats *port_stats)
2805 {
2806 	struct rtnl_link_stats64 *netstats = &np->netstats;
2807 	struct virtchnl2_vport_stats stats_msg = {};
2808 	struct idpf_vc_xn_params xn_params = {};
2809 	ssize_t reply_sz;
2810 
2811 
2812 	/* Don't send get_stats message if the link is down */
2813 	if (!test_bit(IDPF_VPORT_UP, np->state))
2814 		return 0;
2815 
2816 	stats_msg.vport_id = cpu_to_le32(np->vport_id);
2817 
2818 	xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
2819 	xn_params.send_buf.iov_base = &stats_msg;
2820 	xn_params.send_buf.iov_len = sizeof(stats_msg);
2821 	xn_params.recv_buf = xn_params.send_buf;
2822 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2823 
2824 	reply_sz = idpf_vc_xn_exec(np->adapter, &xn_params);
2825 	if (reply_sz < 0)
2826 		return reply_sz;
2827 	if (reply_sz < sizeof(stats_msg))
2828 		return -EIO;
2829 
2830 	spin_lock_bh(&np->stats_lock);
2831 
2832 	netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
2833 			       le64_to_cpu(stats_msg.rx_multicast) +
2834 			       le64_to_cpu(stats_msg.rx_broadcast);
2835 	netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
2836 			       le64_to_cpu(stats_msg.tx_multicast) +
2837 			       le64_to_cpu(stats_msg.tx_broadcast);
2838 	netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
2839 	netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
2840 	netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
2841 	netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
2842 	netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
2843 	netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
2844 
2845 	port_stats->vport_stats = stats_msg;
2846 
2847 	spin_unlock_bh(&np->stats_lock);
2848 
2849 	return 0;
2850 }
2851 
2852 /**
2853  * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set RSS lut message
2854  * @adapter: adapter pointer used to send virtchnl message
2855  * @rss_data: pointer to RSS key and lut info
2856  * @vport_id: vport identifier used while preparing the virtchnl message
2857  * @get: flag to set or get RSS look up table
2858  *
2859  * When rxhash is disabled, RSS LUT will be configured with zeros.  If rxhash
2860  * is enabled, the LUT values stored in driver's soft copy will be used to setup
2861  * the HW.
2862  *
2863  * Return: 0 on success, negative on failure.
2864  */
2865 int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
2866 				  struct idpf_rss_data *rss_data,
2867 				  u32 vport_id, bool get)
2868 {
2869 	struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
2870 	struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
2871 	struct idpf_vc_xn_params xn_params = {};
2872 	int buf_size, lut_buf_size;
2873 	struct idpf_vport *vport;
2874 	ssize_t reply_sz;
2875 	bool rxhash_ena;
2876 	int i;
2877 
2878 	vport = idpf_vid_to_vport(adapter, vport_id);
2879 	if (!vport)
2880 		return -EINVAL;
2881 
2882 	rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
2883 
2884 	buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
2885 	rl = kzalloc(buf_size, GFP_KERNEL);
2886 	if (!rl)
2887 		return -ENOMEM;
2888 
2889 	rl->vport_id = cpu_to_le32(vport_id);
2890 
2891 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2892 	xn_params.send_buf.iov_base = rl;
2893 	xn_params.send_buf.iov_len = buf_size;
2894 
2895 	if (get) {
2896 		recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2897 		if (!recv_rl)
2898 			return -ENOMEM;
2899 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
2900 		xn_params.recv_buf.iov_base = recv_rl;
2901 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2902 	} else {
2903 		rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
2904 		for (i = 0; i < rss_data->rss_lut_size; i++)
2905 			rl->lut[i] = rxhash_ena ?
2906 				cpu_to_le32(rss_data->rss_lut[i]) : 0;
2907 
2908 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
2909 	}
2910 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2911 	if (reply_sz < 0)
2912 		return reply_sz;
2913 	if (!get)
2914 		return 0;
2915 	if (reply_sz < sizeof(struct virtchnl2_rss_lut))
2916 		return -EIO;
2917 
2918 	lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
2919 	if (reply_sz < lut_buf_size)
2920 		return -EIO;
2921 
2922 	/* size didn't change, we can reuse existing lut buf */
2923 	if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
2924 		goto do_memcpy;
2925 
2926 	rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
2927 	kfree(rss_data->rss_lut);
2928 
2929 	rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
2930 	if (!rss_data->rss_lut) {
2931 		rss_data->rss_lut_size = 0;
2932 		return -ENOMEM;
2933 	}
2934 
2935 do_memcpy:
2936 	memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
2937 
2938 	return 0;
2939 }
2940 
2941 /**
2942  * idpf_send_get_set_rss_key_msg - Send virtchnl get or set RSS key message
2943  * @adapter: adapter pointer used to send virtchnl message
2944  * @rss_data: pointer to RSS key and lut info
2945  * @vport_id: vport identifier used while preparing the virtchnl message
2946  * @get: flag to set or get RSS look up table
2947  *
2948  * Return: 0 on success, negative on failure
2949  */
2950 int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
2951 				  struct idpf_rss_data *rss_data,
2952 				  u32 vport_id, bool get)
2953 {
2954 	struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
2955 	struct virtchnl2_rss_key *rk __free(kfree) = NULL;
2956 	struct idpf_vc_xn_params xn_params = {};
2957 	ssize_t reply_sz;
2958 	int i, buf_size;
2959 	u16 key_size;
2960 
2961 	buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
2962 	rk = kzalloc(buf_size, GFP_KERNEL);
2963 	if (!rk)
2964 		return -ENOMEM;
2965 
2966 	rk->vport_id = cpu_to_le32(vport_id);
2967 	xn_params.send_buf.iov_base = rk;
2968 	xn_params.send_buf.iov_len = buf_size;
2969 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
2970 	if (get) {
2971 		recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
2972 		if (!recv_rk)
2973 			return -ENOMEM;
2974 
2975 		xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
2976 		xn_params.recv_buf.iov_base = recv_rk;
2977 		xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
2978 	} else {
2979 		rk->key_len = cpu_to_le16(rss_data->rss_key_size);
2980 		for (i = 0; i < rss_data->rss_key_size; i++)
2981 			rk->key_flex[i] = rss_data->rss_key[i];
2982 
2983 		xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
2984 	}
2985 
2986 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
2987 	if (reply_sz < 0)
2988 		return reply_sz;
2989 	if (!get)
2990 		return 0;
2991 	if (reply_sz < sizeof(struct virtchnl2_rss_key))
2992 		return -EIO;
2993 
2994 	key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
2995 			 le16_to_cpu(recv_rk->key_len));
2996 	if (reply_sz < key_size)
2997 		return -EIO;
2998 
2999 	/* key len didn't change, reuse existing buf */
3000 	if (rss_data->rss_key_size == key_size)
3001 		goto do_memcpy;
3002 
3003 	rss_data->rss_key_size = key_size;
3004 	kfree(rss_data->rss_key);
3005 	rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
3006 	if (!rss_data->rss_key) {
3007 		rss_data->rss_key_size = 0;
3008 		return -ENOMEM;
3009 	}
3010 
3011 do_memcpy:
3012 	memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
3013 
3014 	return 0;
3015 }
3016 
3017 /**
3018  * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
3019  * @ptype: ptype lookup table
3020  * @pstate: state machine for ptype lookup table
3021  * @ipv4: ipv4 or ipv6
3022  * @frag: fragmentation allowed
3023  *
3024  */
3025 static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
3026 				   struct idpf_ptype_state *pstate,
3027 				   bool ipv4, bool frag)
3028 {
3029 	if (!pstate->outer_ip || !pstate->outer_frag) {
3030 		pstate->outer_ip = true;
3031 
3032 		if (ipv4)
3033 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
3034 		else
3035 			ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
3036 
3037 		if (frag) {
3038 			ptype->outer_frag = LIBETH_RX_PT_FRAG;
3039 			pstate->outer_frag = true;
3040 		}
3041 	} else {
3042 		ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
3043 		pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
3044 
3045 		if (ipv4)
3046 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
3047 		else
3048 			ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
3049 
3050 		if (frag)
3051 			ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
3052 	}
3053 }
3054 
3055 static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
3056 {
3057 	if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
3058 	    ptype->inner_prot)
3059 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
3060 	else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
3061 		 ptype->outer_ip)
3062 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
3063 	else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
3064 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
3065 	else
3066 		ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
3067 
3068 	libeth_rx_pt_gen_hash_type(ptype);
3069 }
3070 
3071 /**
3072  * idpf_parse_protocol_ids - parse protocol IDs for a given packet type
3073  * @ptype: packet type to parse
3074  * @rx_pt: store the parsed packet type info into
3075  */
3076 static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype,
3077 				    struct libeth_rx_pt *rx_pt)
3078 {
3079 	struct idpf_ptype_state pstate = {};
3080 
3081 	for (u32 j = 0; j < ptype->proto_id_count; j++) {
3082 		u16 id = le16_to_cpu(ptype->proto_id[j]);
3083 
3084 		switch (id) {
3085 		case VIRTCHNL2_PROTO_HDR_GRE:
3086 			if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) {
3087 				rx_pt->tunnel_type =
3088 					LIBETH_RX_PT_TUNNEL_IP_GRENAT;
3089 				pstate.tunnel_state |=
3090 					IDPF_PTYPE_TUNNEL_IP_GRENAT;
3091 			}
3092 			break;
3093 		case VIRTCHNL2_PROTO_HDR_MAC:
3094 			rx_pt->outer_ip = LIBETH_RX_PT_OUTER_L2;
3095 			if (pstate.tunnel_state == IDPF_TUN_IP_GRE) {
3096 				rx_pt->tunnel_type =
3097 					LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
3098 				pstate.tunnel_state |=
3099 					IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
3100 			}
3101 			break;
3102 		case VIRTCHNL2_PROTO_HDR_IPV4:
3103 			idpf_fill_ptype_lookup(rx_pt, &pstate, true, false);
3104 			break;
3105 		case VIRTCHNL2_PROTO_HDR_IPV6:
3106 			idpf_fill_ptype_lookup(rx_pt, &pstate, false, false);
3107 			break;
3108 		case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
3109 			idpf_fill_ptype_lookup(rx_pt, &pstate, true, true);
3110 			break;
3111 		case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
3112 			idpf_fill_ptype_lookup(rx_pt, &pstate, false, true);
3113 			break;
3114 		case VIRTCHNL2_PROTO_HDR_UDP:
3115 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_UDP;
3116 			break;
3117 		case VIRTCHNL2_PROTO_HDR_TCP:
3118 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_TCP;
3119 			break;
3120 		case VIRTCHNL2_PROTO_HDR_SCTP:
3121 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_SCTP;
3122 			break;
3123 		case VIRTCHNL2_PROTO_HDR_ICMP:
3124 			rx_pt->inner_prot = LIBETH_RX_PT_INNER_ICMP;
3125 			break;
3126 		case VIRTCHNL2_PROTO_HDR_PAY:
3127 			rx_pt->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
3128 			break;
3129 		case VIRTCHNL2_PROTO_HDR_ICMPV6:
3130 		case VIRTCHNL2_PROTO_HDR_IPV6_EH:
3131 		case VIRTCHNL2_PROTO_HDR_PRE_MAC:
3132 		case VIRTCHNL2_PROTO_HDR_POST_MAC:
3133 		case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
3134 		case VIRTCHNL2_PROTO_HDR_SVLAN:
3135 		case VIRTCHNL2_PROTO_HDR_CVLAN:
3136 		case VIRTCHNL2_PROTO_HDR_MPLS:
3137 		case VIRTCHNL2_PROTO_HDR_MMPLS:
3138 		case VIRTCHNL2_PROTO_HDR_PTP:
3139 		case VIRTCHNL2_PROTO_HDR_CTRL:
3140 		case VIRTCHNL2_PROTO_HDR_LLDP:
3141 		case VIRTCHNL2_PROTO_HDR_ARP:
3142 		case VIRTCHNL2_PROTO_HDR_ECP:
3143 		case VIRTCHNL2_PROTO_HDR_EAPOL:
3144 		case VIRTCHNL2_PROTO_HDR_PPPOD:
3145 		case VIRTCHNL2_PROTO_HDR_PPPOE:
3146 		case VIRTCHNL2_PROTO_HDR_IGMP:
3147 		case VIRTCHNL2_PROTO_HDR_AH:
3148 		case VIRTCHNL2_PROTO_HDR_ESP:
3149 		case VIRTCHNL2_PROTO_HDR_IKE:
3150 		case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
3151 		case VIRTCHNL2_PROTO_HDR_L2TPV2:
3152 		case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
3153 		case VIRTCHNL2_PROTO_HDR_L2TPV3:
3154 		case VIRTCHNL2_PROTO_HDR_GTP:
3155 		case VIRTCHNL2_PROTO_HDR_GTP_EH:
3156 		case VIRTCHNL2_PROTO_HDR_GTPCV2:
3157 		case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
3158 		case VIRTCHNL2_PROTO_HDR_GTPU:
3159 		case VIRTCHNL2_PROTO_HDR_GTPU_UL:
3160 		case VIRTCHNL2_PROTO_HDR_GTPU_DL:
3161 		case VIRTCHNL2_PROTO_HDR_ECPRI:
3162 		case VIRTCHNL2_PROTO_HDR_VRRP:
3163 		case VIRTCHNL2_PROTO_HDR_OSPF:
3164 		case VIRTCHNL2_PROTO_HDR_TUN:
3165 		case VIRTCHNL2_PROTO_HDR_NVGRE:
3166 		case VIRTCHNL2_PROTO_HDR_VXLAN:
3167 		case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
3168 		case VIRTCHNL2_PROTO_HDR_GENEVE:
3169 		case VIRTCHNL2_PROTO_HDR_NSH:
3170 		case VIRTCHNL2_PROTO_HDR_QUIC:
3171 		case VIRTCHNL2_PROTO_HDR_PFCP:
3172 		case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
3173 		case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
3174 		case VIRTCHNL2_PROTO_HDR_RTP:
3175 		case VIRTCHNL2_PROTO_HDR_NO_PROTO:
3176 			break;
3177 		default:
3178 			break;
3179 		}
3180 	}
3181 }
3182 
3183 /**
3184  * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
3185  * @adapter: driver specific private structure
3186  *
3187  * Return: 0 on success, negative on failure.
3188  */
3189 static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)
3190 {
3191 	struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
3192 	struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
3193 	struct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL;
3194 	struct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL;
3195 	struct idpf_vc_xn_params xn_params = {};
3196 	int ptypes_recvd = 0, ptype_offset;
3197 	u32 max_ptype = IDPF_RX_MAX_PTYPE;
3198 	u16 next_ptype_id = 0;
3199 	ssize_t reply_sz;
3200 
3201 	singleq_pt_lkup = kcalloc(IDPF_RX_MAX_BASE_PTYPE,
3202 				  sizeof(*singleq_pt_lkup), GFP_KERNEL);
3203 	if (!singleq_pt_lkup)
3204 		return -ENOMEM;
3205 
3206 	splitq_pt_lkup = kcalloc(max_ptype, sizeof(*splitq_pt_lkup), GFP_KERNEL);
3207 	if (!splitq_pt_lkup)
3208 		return -ENOMEM;
3209 
3210 	get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
3211 	if (!get_ptype_info)
3212 		return -ENOMEM;
3213 
3214 	ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
3215 	if (!ptype_info)
3216 		return -ENOMEM;
3217 
3218 	xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
3219 	xn_params.send_buf.iov_base = get_ptype_info;
3220 	xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
3221 	xn_params.recv_buf.iov_base = ptype_info;
3222 	xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
3223 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3224 
3225 	while (next_ptype_id < max_ptype) {
3226 		get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
3227 
3228 		if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
3229 			get_ptype_info->num_ptypes =
3230 				cpu_to_le16(max_ptype - next_ptype_id);
3231 		else
3232 			get_ptype_info->num_ptypes =
3233 				cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
3234 
3235 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3236 		if (reply_sz < 0)
3237 			return reply_sz;
3238 
3239 		ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
3240 		if (ptypes_recvd > max_ptype)
3241 			return -EINVAL;
3242 
3243 		next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
3244 				le16_to_cpu(get_ptype_info->num_ptypes);
3245 
3246 		ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
3247 
3248 		for (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
3249 			struct libeth_rx_pt rx_pt = {};
3250 			struct virtchnl2_ptype *ptype;
3251 			u16 pt_10, pt_8;
3252 
3253 			ptype = (struct virtchnl2_ptype *)
3254 					((u8 *)ptype_info + ptype_offset);
3255 
3256 			pt_10 = le16_to_cpu(ptype->ptype_id_10);
3257 			pt_8 = ptype->ptype_id_8;
3258 
3259 			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
3260 			if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
3261 				return -EINVAL;
3262 
3263 			/* 0xFFFF indicates end of ptypes */
3264 			if (pt_10 == IDPF_INVALID_PTYPE_ID)
3265 				goto out;
3266 			if (pt_10 >= max_ptype)
3267 				return -EINVAL;
3268 
3269 			idpf_parse_protocol_ids(ptype, &rx_pt);
3270 			idpf_finalize_ptype_lookup(&rx_pt);
3271 
3272 			/* For a given protocol ID stack, the ptype value might
3273 			 * vary between ptype_id_10 and ptype_id_8. So store
3274 			 * them separately for splitq and singleq. Also skip
3275 			 * the repeated ptypes in case of singleq.
3276 			 */
3277 			splitq_pt_lkup[pt_10] = rx_pt;
3278 			if (!singleq_pt_lkup[pt_8].outer_ip)
3279 				singleq_pt_lkup[pt_8] = rx_pt;
3280 		}
3281 	}
3282 
3283 out:
3284 	adapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup);
3285 	adapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup);
3286 
3287 	return 0;
3288 }
3289 
3290 /**
3291  * idpf_rel_rx_pt_lkup - release RX ptype lookup table
3292  * @adapter: adapter pointer to get the lookup table
3293  */
3294 static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter)
3295 {
3296 	kfree(adapter->splitq_pt_lkup);
3297 	adapter->splitq_pt_lkup = NULL;
3298 
3299 	kfree(adapter->singleq_pt_lkup);
3300 	adapter->singleq_pt_lkup = NULL;
3301 }
3302 
3303 /**
3304  * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
3305  *				    message
3306  * @adapter: adapter pointer used to send virtchnl message
3307  * @vport_id: vport identifier used while preparing the virtchnl message
3308  * @loopback_ena: flag to enable or disable loopback
3309  *
3310  * Return: 0 on success, negative on failure.
3311  */
3312 int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
3313 				   bool loopback_ena)
3314 {
3315 	struct idpf_vc_xn_params xn_params = {};
3316 	struct virtchnl2_loopback loopback;
3317 	ssize_t reply_sz;
3318 
3319 	loopback.vport_id = cpu_to_le32(vport_id);
3320 	loopback.enable = loopback_ena;
3321 
3322 	xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
3323 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
3324 	xn_params.send_buf.iov_base = &loopback;
3325 	xn_params.send_buf.iov_len = sizeof(loopback);
3326 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
3327 
3328 	return reply_sz < 0 ? reply_sz : 0;
3329 }
3330 
3331 /**
3332  * idpf_find_ctlq - Given a type and id, find ctlq info
3333  * @hw: hardware struct
3334  * @type: type of ctrlq to find
3335  * @id: ctlq id to find
3336  *
3337  * Returns pointer to found ctlq info struct, NULL otherwise.
3338  */
3339 static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
3340 					     enum idpf_ctlq_type type, int id)
3341 {
3342 	struct idpf_ctlq_info *cq, *tmp;
3343 
3344 	list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
3345 		if (cq->q_id == id && cq->cq_type == type)
3346 			return cq;
3347 
3348 	return NULL;
3349 }
3350 
3351 /**
3352  * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
3353  * @adapter: adapter info struct
3354  *
3355  * Returns 0 on success, negative otherwise
3356  */
3357 int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
3358 {
3359 	struct idpf_ctlq_create_info ctlq_info[] = {
3360 		{
3361 			.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
3362 			.id = IDPF_DFLT_MBX_ID,
3363 			.len = IDPF_DFLT_MBX_Q_LEN,
3364 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
3365 		},
3366 		{
3367 			.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
3368 			.id = IDPF_DFLT_MBX_ID,
3369 			.len = IDPF_DFLT_MBX_Q_LEN,
3370 			.buf_size = IDPF_CTLQ_MAX_BUF_LEN
3371 		}
3372 	};
3373 	struct idpf_hw *hw = &adapter->hw;
3374 	int err;
3375 
3376 	adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
3377 
3378 	err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
3379 	if (err)
3380 		return err;
3381 
3382 	hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
3383 				 IDPF_DFLT_MBX_ID);
3384 	hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
3385 				 IDPF_DFLT_MBX_ID);
3386 
3387 	if (!hw->asq || !hw->arq) {
3388 		idpf_ctlq_deinit(hw);
3389 
3390 		return -ENOENT;
3391 	}
3392 
3393 	adapter->state = __IDPF_VER_CHECK;
3394 
3395 	return 0;
3396 }
3397 
3398 /**
3399  * idpf_deinit_dflt_mbx - Free up ctlqs setup
3400  * @adapter: Driver specific private data structure
3401  */
3402 void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
3403 {
3404 	if (adapter->hw.arq && adapter->hw.asq) {
3405 		idpf_mb_clean(adapter, adapter->hw.asq);
3406 		idpf_ctlq_deinit(&adapter->hw);
3407 	}
3408 	adapter->hw.arq = NULL;
3409 	adapter->hw.asq = NULL;
3410 }
3411 
3412 /**
3413  * idpf_vport_params_buf_rel - Release memory for MailBox resources
3414  * @adapter: Driver specific private data structure
3415  *
3416  * Will release memory to hold the vport parameters received on MailBox
3417  */
3418 static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
3419 {
3420 	kfree(adapter->vport_params_recvd);
3421 	adapter->vport_params_recvd = NULL;
3422 	kfree(adapter->vport_params_reqd);
3423 	adapter->vport_params_reqd = NULL;
3424 	kfree(adapter->vport_ids);
3425 	adapter->vport_ids = NULL;
3426 }
3427 
3428 /**
3429  * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
3430  * @adapter: Driver specific private data structure
3431  *
3432  * Will alloc memory to hold the vport parameters received on MailBox
3433  */
3434 static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
3435 {
3436 	u16 num_max_vports = idpf_get_max_vports(adapter);
3437 
3438 	adapter->vport_params_reqd = kcalloc(num_max_vports,
3439 					     sizeof(*adapter->vport_params_reqd),
3440 					     GFP_KERNEL);
3441 	if (!adapter->vport_params_reqd)
3442 		return -ENOMEM;
3443 
3444 	adapter->vport_params_recvd = kcalloc(num_max_vports,
3445 					      sizeof(*adapter->vport_params_recvd),
3446 					      GFP_KERNEL);
3447 	if (!adapter->vport_params_recvd)
3448 		goto err_mem;
3449 
3450 	adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
3451 	if (!adapter->vport_ids)
3452 		goto err_mem;
3453 
3454 	if (adapter->vport_config)
3455 		return 0;
3456 
3457 	adapter->vport_config = kcalloc(num_max_vports,
3458 					sizeof(*adapter->vport_config),
3459 					GFP_KERNEL);
3460 	if (!adapter->vport_config)
3461 		goto err_mem;
3462 
3463 	return 0;
3464 
3465 err_mem:
3466 	idpf_vport_params_buf_rel(adapter);
3467 
3468 	return -ENOMEM;
3469 }
3470 
3471 /**
3472  * idpf_vc_core_init - Initialize state machine and get driver specific
3473  * resources
3474  * @adapter: Driver specific private structure
3475  *
3476  * This function will initialize the state machine and request all necessary
3477  * resources required by the device driver. Once the state machine is
3478  * initialized, allocate memory to store vport specific information and also
3479  * requests required interrupts.
3480  *
3481  * Returns 0 on success, -EAGAIN function will get called again,
3482  * otherwise negative on failure.
3483  */
3484 int idpf_vc_core_init(struct idpf_adapter *adapter)
3485 {
3486 	int task_delay = 30;
3487 	u16 num_max_vports;
3488 	int err = 0;
3489 
3490 	if (!adapter->vcxn_mngr) {
3491 		adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
3492 		if (!adapter->vcxn_mngr) {
3493 			err = -ENOMEM;
3494 			goto init_failed;
3495 		}
3496 	}
3497 	idpf_vc_xn_init(adapter->vcxn_mngr);
3498 
3499 	while (adapter->state != __IDPF_INIT_SW) {
3500 		switch (adapter->state) {
3501 		case __IDPF_VER_CHECK:
3502 			err = idpf_send_ver_msg(adapter);
3503 			switch (err) {
3504 			case 0:
3505 				/* success, move state machine forward */
3506 				adapter->state = __IDPF_GET_CAPS;
3507 				fallthrough;
3508 			case -EAGAIN:
3509 				goto restart;
3510 			default:
3511 				/* Something bad happened, try again but only a
3512 				 * few times.
3513 				 */
3514 				goto init_failed;
3515 			}
3516 		case __IDPF_GET_CAPS:
3517 			err = idpf_send_get_caps_msg(adapter);
3518 			if (err)
3519 				goto init_failed;
3520 			adapter->state = __IDPF_INIT_SW;
3521 			break;
3522 		default:
3523 			dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
3524 				adapter->state);
3525 			err = -EINVAL;
3526 			goto init_failed;
3527 		}
3528 		break;
3529 restart:
3530 		/* Give enough time before proceeding further with
3531 		 * state machine
3532 		 */
3533 		msleep(task_delay);
3534 	}
3535 
3536 	if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
3537 		err = idpf_send_get_lan_memory_regions(adapter);
3538 		if (err) {
3539 			dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
3540 				err);
3541 			return -EINVAL;
3542 		}
3543 	} else {
3544 		/* Fallback to mapping the remaining regions of the entire BAR */
3545 		err = idpf_calc_remaining_mmio_regs(adapter);
3546 		if (err) {
3547 			dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
3548 				err);
3549 			return -ENOMEM;
3550 		}
3551 	}
3552 
3553 	err = idpf_map_lan_mmio_regs(adapter);
3554 	if (err) {
3555 		dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
3556 			err);
3557 		return -ENOMEM;
3558 	}
3559 
3560 	pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
3561 	num_max_vports = idpf_get_max_vports(adapter);
3562 	adapter->max_vports = num_max_vports;
3563 	adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
3564 				  GFP_KERNEL);
3565 	if (!adapter->vports)
3566 		return -ENOMEM;
3567 
3568 	if (!adapter->netdevs) {
3569 		adapter->netdevs = kcalloc(num_max_vports,
3570 					   sizeof(struct net_device *),
3571 					   GFP_KERNEL);
3572 		if (!adapter->netdevs) {
3573 			err = -ENOMEM;
3574 			goto err_netdev_alloc;
3575 		}
3576 	}
3577 
3578 	err = idpf_vport_params_buf_alloc(adapter);
3579 	if (err) {
3580 		dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
3581 			err);
3582 		goto err_netdev_alloc;
3583 	}
3584 
3585 	/* Start the mailbox task before requesting vectors. This will ensure
3586 	 * vector information response from mailbox is handled
3587 	 */
3588 	queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
3589 
3590 	queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
3591 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3592 
3593 	err = idpf_intr_req(adapter);
3594 	if (err) {
3595 		dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
3596 			err);
3597 		goto err_intr_req;
3598 	}
3599 
3600 	err = idpf_send_get_rx_ptype_msg(adapter);
3601 	if (err) {
3602 		dev_err(&adapter->pdev->dev, "failed to get RX ptypes: %d\n",
3603 			err);
3604 		goto intr_rel;
3605 	}
3606 
3607 	err = idpf_ptp_init(adapter);
3608 	if (err)
3609 		pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
3610 			ERR_PTR(err));
3611 
3612 	idpf_init_avail_queues(adapter);
3613 
3614 	/* Skew the delay for init tasks for each function based on fn number
3615 	 * to prevent every function from making the same call simultaneously.
3616 	 */
3617 	queue_delayed_work(adapter->init_wq, &adapter->init_task,
3618 			   msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
3619 
3620 	set_bit(IDPF_VC_CORE_INIT, adapter->flags);
3621 
3622 	return 0;
3623 
3624 intr_rel:
3625 	idpf_intr_rel(adapter);
3626 err_intr_req:
3627 	cancel_delayed_work_sync(&adapter->serv_task);
3628 	cancel_delayed_work_sync(&adapter->mbx_task);
3629 	idpf_vport_params_buf_rel(adapter);
3630 err_netdev_alloc:
3631 	kfree(adapter->vports);
3632 	adapter->vports = NULL;
3633 	return err;
3634 
3635 init_failed:
3636 	/* Don't retry if we're trying to go down, just bail. */
3637 	if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
3638 		return err;
3639 
3640 	if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
3641 		dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
3642 
3643 		return -EFAULT;
3644 	}
3645 	/* If it reached here, it is possible that mailbox queue initialization
3646 	 * register writes might not have taken effect. Retry to initialize
3647 	 * the mailbox again
3648 	 */
3649 	adapter->state = __IDPF_VER_CHECK;
3650 	if (adapter->vcxn_mngr)
3651 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3652 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
3653 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
3654 			   msecs_to_jiffies(task_delay));
3655 
3656 	return -EAGAIN;
3657 }
3658 
3659 /**
3660  * idpf_vc_core_deinit - Device deinit routine
3661  * @adapter: Driver specific private structure
3662  *
3663  */
3664 void idpf_vc_core_deinit(struct idpf_adapter *adapter)
3665 {
3666 	struct idpf_hw *hw = &adapter->hw;
3667 	bool remove_in_prog;
3668 
3669 	if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
3670 		return;
3671 
3672 	/* Avoid transaction timeouts when called during reset */
3673 	remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
3674 	if (!remove_in_prog)
3675 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3676 
3677 	idpf_ptp_release(adapter);
3678 	idpf_deinit_task(adapter);
3679 	idpf_idc_deinit_core_aux_device(adapter->cdev_info);
3680 	idpf_rel_rx_pt_lkup(adapter);
3681 	idpf_intr_rel(adapter);
3682 
3683 	if (remove_in_prog)
3684 		idpf_vc_xn_shutdown(adapter->vcxn_mngr);
3685 
3686 	cancel_delayed_work_sync(&adapter->serv_task);
3687 	cancel_delayed_work_sync(&adapter->mbx_task);
3688 
3689 	idpf_vport_params_buf_rel(adapter);
3690 
3691 	kfree(hw->lan_regs);
3692 	hw->lan_regs = NULL;
3693 
3694 	kfree(adapter->vports);
3695 	adapter->vports = NULL;
3696 
3697 	clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
3698 }
3699 
3700 /**
3701  * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3702  * @vport: virtual port data struct
3703  * @rsrc: pointer to queue and vector resources
3704  *
3705  * This function requests the vector information required for the vport and
3706  * stores the vector indexes received from the 'global vector distribution'
3707  * in the vport's queue vectors array.
3708  *
3709  * Return: 0 on success, error on failure
3710  */
3711 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
3712 				 struct idpf_q_vec_rsrc *rsrc)
3713 {
3714 	struct idpf_vector_info vec_info;
3715 	int num_alloc_vecs;
3716 	u32 req;
3717 
3718 	vec_info.num_curr_vecs = rsrc->num_q_vectors;
3719 	if (vec_info.num_curr_vecs)
3720 		vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
3721 
3722 	/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
3723 	req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) +
3724 	      IDPF_RESERVED_VECS;
3725 	vec_info.num_req_vecs = req;
3726 
3727 	vec_info.default_vport = vport->default_vport;
3728 	vec_info.index = vport->idx;
3729 
3730 	num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
3731 						     rsrc->q_vector_idxs,
3732 						     &vec_info);
3733 	if (num_alloc_vecs <= 0) {
3734 		dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
3735 			num_alloc_vecs);
3736 		return -EINVAL;
3737 	}
3738 
3739 	rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
3740 
3741 	return 0;
3742 }
3743 
3744 /**
3745  * idpf_vport_init - Initialize virtual port
3746  * @vport: virtual port to be initialized
3747  * @max_q: vport max queue info
3748  *
3749  * Will initialize vport with the info received through MB earlier
3750  *
3751  * Return: 0 on success, negative on failure.
3752  */
3753 int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
3754 {
3755 	struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
3756 	struct idpf_adapter *adapter = vport->adapter;
3757 	struct virtchnl2_create_vport *vport_msg;
3758 	struct idpf_vport_config *vport_config;
3759 	u16 tx_itr[] = {2, 8, 64, 128, 256};
3760 	u16 rx_itr[] = {2, 8, 32, 96, 128};
3761 	struct idpf_rss_data *rss_data;
3762 	u16 idx = vport->idx;
3763 	int err;
3764 
3765 	vport_config = adapter->vport_config[idx];
3766 	rss_data = &vport_config->user_config.rss_data;
3767 	vport_msg = adapter->vport_params_recvd[idx];
3768 
3769 	err = idpf_vport_init_queue_reg_chunks(vport_config,
3770 					       &vport_msg->chunks);
3771 	if (err)
3772 		return err;
3773 
3774 	vport_config->max_q.max_txq = max_q->max_txq;
3775 	vport_config->max_q.max_rxq = max_q->max_rxq;
3776 	vport_config->max_q.max_complq = max_q->max_complq;
3777 	vport_config->max_q.max_bufq = max_q->max_bufq;
3778 
3779 	rsrc->txq_model = le16_to_cpu(vport_msg->txq_model);
3780 	rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model);
3781 	vport->vport_type = le16_to_cpu(vport_msg->vport_type);
3782 	vport->vport_id = le32_to_cpu(vport_msg->vport_id);
3783 
3784 	rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
3785 				       le16_to_cpu(vport_msg->rss_key_size));
3786 	rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
3787 
3788 	ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
3789 	vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
3790 
3791 	/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
3792 	memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
3793 	memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
3794 
3795 	idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
3796 
3797 	idpf_vport_init_num_qs(vport, vport_msg, rsrc);
3798 	idpf_vport_calc_num_q_desc(vport, rsrc);
3799 	idpf_vport_calc_num_q_groups(rsrc);
3800 	idpf_vport_alloc_vec_indexes(vport, rsrc);
3801 
3802 	vport->crc_enable = adapter->crc_enable;
3803 
3804 	if (!(vport_msg->vport_flags &
3805 	      cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
3806 		return 0;
3807 
3808 	err = idpf_ptp_get_vport_tstamps_caps(vport);
3809 	if (err) {
3810 		/* Do not error on timestamp failure */
3811 		pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
3812 		return 0;
3813 	}
3814 
3815 	INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
3816 
3817 	return 0;
3818 }
3819 
3820 /**
3821  * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3822  * @adapter: adapter structure to get the mailbox vector id
3823  * @vecids: Array of vector ids
3824  * @num_vecids: number of vector ids
3825  * @chunks: vector ids received over mailbox
3826  *
3827  * Will initialize the mailbox vector id which is received from the
3828  * get capabilities and data queue vector ids with ids received as
3829  * mailbox parameters.
3830  * Returns number of ids filled
3831  */
3832 int idpf_get_vec_ids(struct idpf_adapter *adapter,
3833 		     u16 *vecids, int num_vecids,
3834 		     struct virtchnl2_vector_chunks *chunks)
3835 {
3836 	u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
3837 	int num_vecid_filled = 0;
3838 	int i, j;
3839 
3840 	vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
3841 	num_vecid_filled++;
3842 
3843 	for (j = 0; j < num_chunks; j++) {
3844 		struct virtchnl2_vector_chunk *chunk;
3845 		u16 start_vecid, num_vec;
3846 
3847 		chunk = &chunks->vchunks[j];
3848 		num_vec = le16_to_cpu(chunk->num_vectors);
3849 		start_vecid = le16_to_cpu(chunk->start_vector_id);
3850 
3851 		for (i = 0; i < num_vec; i++) {
3852 			if ((num_vecid_filled + i) < num_vecids) {
3853 				vecids[num_vecid_filled + i] = start_vecid;
3854 				start_vecid++;
3855 			} else {
3856 				break;
3857 			}
3858 		}
3859 		num_vecid_filled = num_vecid_filled + i;
3860 	}
3861 
3862 	return num_vecid_filled;
3863 }
3864 
3865 /**
3866  * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3867  * @qids: Array of queue ids
3868  * @num_qids: number of queue ids
3869  * @q_type: queue model
3870  * @chunks: queue ids received over mailbox
3871  *
3872  * Will initialize all queue ids with ids received as mailbox parameters
3873  * Returns number of ids filled
3874  */
3875 static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
3876 				    struct idpf_queue_id_reg_info *chunks)
3877 {
3878 	u16 num_chunks = chunks->num_chunks;
3879 	u32 num_q_id_filled = 0, i;
3880 	u32 start_q_id, num_q;
3881 
3882 	while (num_chunks--) {
3883 		struct idpf_queue_id_reg_chunk *chunk;
3884 
3885 		chunk = &chunks->queue_chunks[num_chunks];
3886 		if (chunk->type != q_type)
3887 			continue;
3888 
3889 		num_q = chunk->num_queues;
3890 		start_q_id = chunk->start_queue_id;
3891 
3892 		for (i = 0; i < num_q; i++) {
3893 			if ((num_q_id_filled + i) < num_qids) {
3894 				qids[num_q_id_filled + i] = start_q_id;
3895 				start_q_id++;
3896 			} else {
3897 				break;
3898 			}
3899 		}
3900 		num_q_id_filled = num_q_id_filled + i;
3901 	}
3902 
3903 	return num_q_id_filled;
3904 }
3905 
3906 /**
3907  * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3908  * @vport: virtual port for which the queues ids are initialized
3909  * @rsrc: pointer to queue and vector resources
3910  * @qids: queue ids
3911  * @num_qids: number of queue ids
3912  * @q_type: type of queue
3913  *
3914  * Will initialize all queue ids with ids received as mailbox
3915  * parameters. Returns number of queue ids initialized.
3916  */
3917 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
3918 				       struct idpf_q_vec_rsrc *rsrc,
3919 				       const u32 *qids,
3920 				       int num_qids,
3921 				       u32 q_type)
3922 {
3923 	int i, j, k = 0;
3924 
3925 	switch (q_type) {
3926 	case VIRTCHNL2_QUEUE_TYPE_TX:
3927 		for (i = 0; i < rsrc->num_txq_grp; i++) {
3928 			struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
3929 
3930 			for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
3931 				tx_qgrp->txqs[j]->q_id = qids[k];
3932 		}
3933 		break;
3934 	case VIRTCHNL2_QUEUE_TYPE_RX:
3935 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
3936 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
3937 			u16 num_rxq;
3938 
3939 			if (idpf_is_queue_model_split(rsrc->rxq_model))
3940 				num_rxq = rx_qgrp->splitq.num_rxq_sets;
3941 			else
3942 				num_rxq = rx_qgrp->singleq.num_rxq;
3943 
3944 			for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
3945 				struct idpf_rx_queue *q;
3946 
3947 				if (idpf_is_queue_model_split(rsrc->rxq_model))
3948 					q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3949 				else
3950 					q = rx_qgrp->singleq.rxqs[j];
3951 				q->q_id = qids[k];
3952 			}
3953 		}
3954 		break;
3955 	case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
3956 		for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) {
3957 			struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
3958 
3959 			tx_qgrp->complq->q_id = qids[k];
3960 		}
3961 		break;
3962 	case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
3963 		for (i = 0; i < rsrc->num_rxq_grp; i++) {
3964 			struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
3965 			u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
3966 
3967 			for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
3968 				struct idpf_buf_queue *q;
3969 
3970 				q = &rx_qgrp->splitq.bufq_sets[j].bufq;
3971 				q->q_id = qids[k];
3972 			}
3973 		}
3974 		break;
3975 	default:
3976 		break;
3977 	}
3978 
3979 	return k;
3980 }
3981 
3982 /**
3983  * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3984  * @vport: virtual port for which the queues ids are initialized
3985  * @rsrc: pointer to queue and vector resources
3986  * @chunks: queue ids received over mailbox
3987  *
3988  * Will initialize all queue ids with ids received as mailbox parameters.
3989  *
3990  * Return: 0 on success, negative if all the queues are not initialized.
3991  */
3992 int idpf_vport_queue_ids_init(struct idpf_vport *vport,
3993 			      struct idpf_q_vec_rsrc *rsrc,
3994 			      struct idpf_queue_id_reg_info *chunks)
3995 {
3996 	int num_ids, err = 0;
3997 	u16 q_type;
3998 	u32 *qids;
3999 
4000 	qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
4001 	if (!qids)
4002 		return -ENOMEM;
4003 
4004 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
4005 					   VIRTCHNL2_QUEUE_TYPE_TX,
4006 					   chunks);
4007 	if (num_ids < rsrc->num_txq) {
4008 		err = -EINVAL;
4009 		goto mem_rel;
4010 	}
4011 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
4012 					      VIRTCHNL2_QUEUE_TYPE_TX);
4013 	if (num_ids < rsrc->num_txq) {
4014 		err = -EINVAL;
4015 		goto mem_rel;
4016 	}
4017 
4018 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
4019 					   VIRTCHNL2_QUEUE_TYPE_RX,
4020 					   chunks);
4021 	if (num_ids < rsrc->num_rxq) {
4022 		err = -EINVAL;
4023 		goto mem_rel;
4024 	}
4025 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
4026 					      VIRTCHNL2_QUEUE_TYPE_RX);
4027 	if (num_ids < rsrc->num_rxq) {
4028 		err = -EINVAL;
4029 		goto mem_rel;
4030 	}
4031 
4032 	if (!idpf_is_queue_model_split(rsrc->txq_model))
4033 		goto check_rxq;
4034 
4035 	q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
4036 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
4037 	if (num_ids < rsrc->num_complq) {
4038 		err = -EINVAL;
4039 		goto mem_rel;
4040 	}
4041 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
4042 					      num_ids, q_type);
4043 	if (num_ids < rsrc->num_complq) {
4044 		err = -EINVAL;
4045 		goto mem_rel;
4046 	}
4047 
4048 check_rxq:
4049 	if (!idpf_is_queue_model_split(rsrc->rxq_model))
4050 		goto mem_rel;
4051 
4052 	q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
4053 	num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
4054 	if (num_ids < rsrc->num_bufq) {
4055 		err = -EINVAL;
4056 		goto mem_rel;
4057 	}
4058 	num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
4059 					      num_ids, q_type);
4060 	if (num_ids < rsrc->num_bufq)
4061 		err = -EINVAL;
4062 
4063 mem_rel:
4064 	kfree(qids);
4065 
4066 	return err;
4067 }
4068 
4069 /**
4070  * idpf_vport_adjust_qs - Adjust to new requested queues
4071  * @vport: virtual port data struct
4072  * @rsrc: pointer to queue and vector resources
4073  *
4074  * Renegotiate queues.  Returns 0 on success, negative on failure.
4075  */
4076 int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
4077 {
4078 	struct virtchnl2_create_vport vport_msg;
4079 	int err;
4080 
4081 	vport_msg.txq_model = cpu_to_le16(rsrc->txq_model);
4082 	vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model);
4083 	err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
4084 				       NULL);
4085 	if (err)
4086 		return err;
4087 
4088 	idpf_vport_init_num_qs(vport, &vport_msg, rsrc);
4089 	idpf_vport_calc_num_q_groups(rsrc);
4090 
4091 	return 0;
4092 }
4093 
4094 /**
4095  * idpf_is_capability_ena - Default implementation of capability checking
4096  * @adapter: Private data struct
4097  * @all: all or one flag
4098  * @field: caps field to check for flags
4099  * @flag: flag to check
4100  *
4101  * Return true if all capabilities are supported, false otherwise
4102  */
4103 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
4104 			    enum idpf_cap_field field, u64 flag)
4105 {
4106 	u8 *caps = (u8 *)&adapter->caps;
4107 	u32 *cap_field;
4108 
4109 	if (!caps)
4110 		return false;
4111 
4112 	if (field == IDPF_BASE_CAPS)
4113 		return false;
4114 
4115 	cap_field = (u32 *)(caps + field);
4116 
4117 	if (all)
4118 		return (*cap_field & flag) == flag;
4119 	else
4120 		return !!(*cap_field & flag);
4121 }
4122 
4123 /**
4124  * idpf_vport_is_cap_ena - Check if vport capability is enabled
4125  * @vport: Private data struct
4126  * @flag: flag(s) to check
4127  *
4128  * Return: true if the capability is supported, false otherwise
4129  */
4130 bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
4131 {
4132 	struct virtchnl2_create_vport *vport_msg;
4133 
4134 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4135 
4136 	return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
4137 }
4138 
4139 /**
4140  * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
4141  * @vport: Private data struct
4142  * @flow_type: flow type to check (from ethtool.h)
4143  *
4144  * Return: true if sideband filters are allowed for @flow_type, false otherwise
4145  */
4146 bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
4147 {
4148 	struct virtchnl2_create_vport *vport_msg;
4149 	__le64 caps;
4150 
4151 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4152 	caps = vport_msg->sideband_flow_caps;
4153 
4154 	switch (flow_type) {
4155 	case TCP_V4_FLOW:
4156 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
4157 	case UDP_V4_FLOW:
4158 		return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
4159 	default:
4160 		return false;
4161 	}
4162 }
4163 
4164 /**
4165  * idpf_sideband_action_ena - Check if steering is enabled for action
4166  * @vport: Private data struct
4167  * @fsp: flow spec
4168  *
4169  * Return: true if sideband filters are allowed for @fsp, false otherwise
4170  */
4171 bool idpf_sideband_action_ena(struct idpf_vport *vport,
4172 			      struct ethtool_rx_flow_spec *fsp)
4173 {
4174 	struct virtchnl2_create_vport *vport_msg;
4175 	unsigned int supp_actions;
4176 
4177 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4178 	supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
4179 
4180 	/* Actions Drop/Wake are not supported */
4181 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
4182 	    fsp->ring_cookie == RX_CLS_FLOW_WAKE)
4183 		return false;
4184 
4185 	return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
4186 }
4187 
4188 unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
4189 {
4190 	struct virtchnl2_create_vport *vport_msg;
4191 
4192 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4193 	return le32_to_cpu(vport_msg->flow_steer_max_rules);
4194 }
4195 
4196 /**
4197  * idpf_get_vport_id: Get vport id
4198  * @vport: virtual port structure
4199  *
4200  * Return vport id from the adapter persistent data
4201  */
4202 u32 idpf_get_vport_id(struct idpf_vport *vport)
4203 {
4204 	struct virtchnl2_create_vport *vport_msg;
4205 
4206 	vport_msg = vport->adapter->vport_params_recvd[vport->idx];
4207 
4208 	return le32_to_cpu(vport_msg->vport_id);
4209 }
4210 
4211 static void idpf_set_mac_type(const u8 *default_mac_addr,
4212 			      struct virtchnl2_mac_addr *mac_addr)
4213 {
4214 	bool is_primary;
4215 
4216 	is_primary = ether_addr_equal(default_mac_addr, mac_addr->addr);
4217 	mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
4218 				      VIRTCHNL2_MAC_ADDR_EXTRA;
4219 }
4220 
4221 /**
4222  * idpf_mac_filter_async_handler - Async callback for mac filters
4223  * @adapter: private data struct
4224  * @xn: transaction for message
4225  * @ctlq_msg: received message
4226  *
4227  * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
4228  * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
4229  * situation to deal with errors returned on the reply. The best we can
4230  * ultimately do is remove it from our list of mac filters and report the
4231  * error.
4232  */
4233 static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
4234 					 struct idpf_vc_xn *xn,
4235 					 const struct idpf_ctlq_msg *ctlq_msg)
4236 {
4237 	struct virtchnl2_mac_addr_list *ma_list;
4238 	struct idpf_vport_config *vport_config;
4239 	struct virtchnl2_mac_addr *mac_addr;
4240 	struct idpf_mac_filter *f, *tmp;
4241 	struct list_head *ma_list_head;
4242 	struct idpf_vport *vport;
4243 	u16 num_entries;
4244 	int i;
4245 
4246 	/* if success we're done, we're only here if something bad happened */
4247 	if (!ctlq_msg->cookie.mbx.chnl_retval)
4248 		return 0;
4249 
4250 	/* make sure at least struct is there */
4251 	if (xn->reply_sz < sizeof(*ma_list))
4252 		goto invalid_payload;
4253 
4254 	ma_list = ctlq_msg->ctx.indirect.payload->va;
4255 	mac_addr = ma_list->mac_addr_list;
4256 	num_entries = le16_to_cpu(ma_list->num_mac_addr);
4257 	/* we should have received a buffer at least this big */
4258 	if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
4259 		goto invalid_payload;
4260 
4261 	vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
4262 	if (!vport)
4263 		goto invalid_payload;
4264 
4265 	vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
4266 	ma_list_head = &vport_config->user_config.mac_filter_list;
4267 
4268 	/* We can't do much to reconcile bad filters at this point, however we
4269 	 * should at least remove them from our list one way or the other so we
4270 	 * have some idea what good filters we have.
4271 	 */
4272 	spin_lock_bh(&vport_config->mac_filter_list_lock);
4273 	list_for_each_entry_safe(f, tmp, ma_list_head, list)
4274 		for (i = 0; i < num_entries; i++)
4275 			if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
4276 				list_del(&f->list);
4277 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
4278 	dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
4279 			    xn->vc_op);
4280 
4281 	return 0;
4282 
4283 invalid_payload:
4284 	dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
4285 			    xn->vc_op, xn->reply_sz);
4286 
4287 	return -EINVAL;
4288 }
4289 
4290 /**
4291  * idpf_add_del_mac_filters - Add/del mac filters
4292  * @adapter: adapter pointer used to send virtchnl message
4293  * @vport_config: persistent vport structure to get the MAC filter list
4294  * @default_mac_addr: default MAC address to compare with
4295  * @vport_id: vport identifier used while preparing the virtchnl message
4296  * @add: Add or delete flag
4297  * @async: Don't wait for return message
4298  *
4299  * Return: 0 on success, error on failure.
4300  **/
4301 int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
4302 			     struct idpf_vport_config *vport_config,
4303 			     const u8 *default_mac_addr, u32 vport_id,
4304 			     bool add, bool async)
4305 {
4306 	struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
4307 	struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
4308 	struct idpf_vc_xn_params xn_params = {};
4309 	u32 num_msgs, total_filters = 0;
4310 	struct idpf_mac_filter *f;
4311 	ssize_t reply_sz;
4312 	int i = 0, k;
4313 
4314 	xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
4315 				VIRTCHNL2_OP_DEL_MAC_ADDR;
4316 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4317 	xn_params.async = async;
4318 	xn_params.async_handler = idpf_mac_filter_async_handler;
4319 
4320 	spin_lock_bh(&vport_config->mac_filter_list_lock);
4321 
4322 	/* Find the number of newly added filters */
4323 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
4324 			    list) {
4325 		if (add && f->add)
4326 			total_filters++;
4327 		else if (!add && f->remove)
4328 			total_filters++;
4329 	}
4330 
4331 	if (!total_filters) {
4332 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
4333 
4334 		return 0;
4335 	}
4336 
4337 	/* Fill all the new filters into virtchannel message */
4338 	mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
4339 			   GFP_ATOMIC);
4340 	if (!mac_addr) {
4341 		spin_unlock_bh(&vport_config->mac_filter_list_lock);
4342 
4343 		return -ENOMEM;
4344 	}
4345 
4346 	list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
4347 			    list) {
4348 		if (add && f->add) {
4349 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
4350 			idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
4351 			i++;
4352 			f->add = false;
4353 			if (i == total_filters)
4354 				break;
4355 		}
4356 		if (!add && f->remove) {
4357 			ether_addr_copy(mac_addr[i].addr, f->macaddr);
4358 			idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
4359 			i++;
4360 			f->remove = false;
4361 			if (i == total_filters)
4362 				break;
4363 		}
4364 	}
4365 
4366 	spin_unlock_bh(&vport_config->mac_filter_list_lock);
4367 
4368 	/* Chunk up the filters into multiple messages to avoid
4369 	 * sending a control queue message buffer that is too large
4370 	 */
4371 	num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
4372 
4373 	for (i = 0, k = 0; i < num_msgs; i++) {
4374 		u32 entries_size, buf_size, num_entries;
4375 
4376 		num_entries = min_t(u32, total_filters,
4377 				    IDPF_NUM_FILTERS_PER_MSG);
4378 		entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
4379 		buf_size = struct_size(ma_list, mac_addr_list, num_entries);
4380 
4381 		if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
4382 			kfree(ma_list);
4383 			ma_list = kzalloc(buf_size, GFP_ATOMIC);
4384 			if (!ma_list)
4385 				return -ENOMEM;
4386 		} else {
4387 			memset(ma_list, 0, buf_size);
4388 		}
4389 
4390 		ma_list->vport_id = cpu_to_le32(vport_id);
4391 		ma_list->num_mac_addr = cpu_to_le16(num_entries);
4392 		memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
4393 
4394 		xn_params.send_buf.iov_base = ma_list;
4395 		xn_params.send_buf.iov_len = buf_size;
4396 		reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4397 		if (reply_sz < 0)
4398 			return reply_sz;
4399 
4400 		k += num_entries;
4401 		total_filters -= num_entries;
4402 	}
4403 
4404 	return 0;
4405 }
4406 
4407 /**
4408  * idpf_set_promiscuous - set promiscuous and send message to mailbox
4409  * @adapter: Driver specific private structure
4410  * @config_data: Vport specific config data
4411  * @vport_id: Vport identifier
4412  *
4413  * Request to enable promiscuous mode for the vport. Message is sent
4414  * asynchronously and won't wait for response.  Returns 0 on success, negative
4415  * on failure;
4416  */
4417 int idpf_set_promiscuous(struct idpf_adapter *adapter,
4418 			 struct idpf_vport_user_config_data *config_data,
4419 			 u32 vport_id)
4420 {
4421 	struct idpf_vc_xn_params xn_params = {};
4422 	struct virtchnl2_promisc_info vpi;
4423 	ssize_t reply_sz;
4424 	u16 flags = 0;
4425 
4426 	if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
4427 		flags |= VIRTCHNL2_UNICAST_PROMISC;
4428 	if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
4429 		flags |= VIRTCHNL2_MULTICAST_PROMISC;
4430 
4431 	vpi.vport_id = cpu_to_le32(vport_id);
4432 	vpi.flags = cpu_to_le16(flags);
4433 
4434 	xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
4435 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4436 	xn_params.send_buf.iov_base = &vpi;
4437 	xn_params.send_buf.iov_len = sizeof(vpi);
4438 	/* setting promiscuous is only ever done asynchronously */
4439 	xn_params.async = true;
4440 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4441 
4442 	return reply_sz < 0 ? reply_sz : 0;
4443 }
4444 
4445 /**
4446  * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
4447  * @cdev_info: IDC core device info pointer
4448  * @send_msg: message to send
4449  * @msg_size: size of message to send
4450  * @recv_msg: message to populate on reception of response
4451  * @recv_len: length of message copied into recv_msg or 0 on error
4452  *
4453  * Return: 0 on success or error code on failure.
4454  */
4455 int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
4456 			       u8 *send_msg, u16 msg_size,
4457 			       u8 *recv_msg, u16 *recv_len)
4458 {
4459 	struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev);
4460 	struct idpf_vc_xn_params xn_params = { };
4461 	ssize_t reply_sz;
4462 	u16 recv_size;
4463 
4464 	if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN)
4465 		return -EINVAL;
4466 
4467 	recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN);
4468 	*recv_len = 0;
4469 	xn_params.vc_op = VIRTCHNL2_OP_RDMA;
4470 	xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
4471 	xn_params.send_buf.iov_base = send_msg;
4472 	xn_params.send_buf.iov_len = msg_size;
4473 	xn_params.recv_buf.iov_base = recv_msg;
4474 	xn_params.recv_buf.iov_len = recv_size;
4475 	reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
4476 	if (reply_sz < 0)
4477 		return reply_sz;
4478 	*recv_len = reply_sz;
4479 
4480 	return 0;
4481 }
4482 EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync);
4483