Lines Matching +full:num +full:- +full:txq

1 // SPDX-License-Identifier: GPL-2.0-only
12 * struct idpf_vc_xn_manager - Manager for tracking transactions
26 * idpf_vid_to_vport - Translate vport id to vport pointer
39 if (adapter->vport_ids[i] == v_id) in idpf_vid_to_vport()
40 return adapter->vports[i]; in idpf_vid_to_vport()
46 * idpf_handle_event_link - Handle link event message
56 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); in idpf_handle_event_link()
58 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", in idpf_handle_event_link()
59 v2e->vport_id); in idpf_handle_event_link()
62 np = netdev_priv(vport->netdev); in idpf_handle_event_link()
64 np->link_speed_mbps = le32_to_cpu(v2e->link_speed); in idpf_handle_event_link()
66 if (vport->link_up == v2e->link_status) in idpf_handle_event_link()
69 vport->link_up = v2e->link_status; in idpf_handle_event_link()
71 if (np->state != __IDPF_VPORT_UP) in idpf_handle_event_link()
74 if (vport->link_up) { in idpf_handle_event_link()
75 netif_tx_start_all_queues(vport->netdev); in idpf_handle_event_link()
76 netif_carrier_on(vport->netdev); in idpf_handle_event_link()
78 netif_tx_stop_all_queues(vport->netdev); in idpf_handle_event_link()
79 netif_carrier_off(vport->netdev); in idpf_handle_event_link()
84 * idpf_recv_event_msg - Receive virtchnl event message
93 int payload_size = ctlq_msg->ctx.indirect.payload->size; in idpf_recv_event_msg()
98 …dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len… in idpf_recv_event_msg()
99 ctlq_msg->cookie.mbx.chnl_opcode, in idpf_recv_event_msg()
104 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; in idpf_recv_event_msg()
105 event = le32_to_cpu(v2e->event); in idpf_recv_event_msg()
112 dev_err(&adapter->pdev->dev, in idpf_recv_event_msg()
119 * idpf_mb_clean - Reclaim the send mailbox queue entries
135 return -ENOMEM; in idpf_mb_clean()
137 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); in idpf_mb_clean()
144 dma_mem = q_msg[i]->ctx.indirect.payload; in idpf_mb_clean()
146 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, in idpf_mb_clean()
147 dma_mem->va, dma_mem->pa); in idpf_mb_clean()
160 * idpf_ptp_is_mb_msg - Check if the message is PTP-related
163 * Return: true if msg is PTP-related, false otherwise.
182 * idpf_prepare_ptp_mb_msg - Prepare PTP related message
191 /* If the message is PTP-related and the secondary mailbox is available, in idpf_prepare_ptp_mb_msg()
194 if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) in idpf_prepare_ptp_mb_msg()
197 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv; in idpf_prepare_ptp_mb_msg()
198 ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id; in idpf_prepare_ptp_mb_msg()
199 ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id; in idpf_prepare_ptp_mb_msg()
208 * idpf_send_mb_msg - Send message over mailbox
240 return -ENOMEM; in idpf_send_mb_msg()
244 err = -ENOMEM; in idpf_send_mb_msg()
248 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; in idpf_send_mb_msg()
249 ctlq_msg->func_id = 0; in idpf_send_mb_msg()
253 ctlq_msg->data_len = msg_size; in idpf_send_mb_msg()
254 ctlq_msg->cookie.mbx.chnl_opcode = op; in idpf_send_mb_msg()
255 ctlq_msg->cookie.mbx.chnl_retval = 0; in idpf_send_mb_msg()
256 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; in idpf_send_mb_msg()
257 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, in idpf_send_mb_msg()
258 &dma_mem->pa, GFP_ATOMIC); in idpf_send_mb_msg()
259 if (!dma_mem->va) { in idpf_send_mb_msg()
260 err = -ENOMEM; in idpf_send_mb_msg()
266 memcpy(dma_mem->va, msg, msg_size); in idpf_send_mb_msg()
267 ctlq_msg->ctx.indirect.payload = dma_mem; in idpf_send_mb_msg()
268 ctlq_msg->ctx.sw_cookie.data = cookie; in idpf_send_mb_msg()
270 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); in idpf_send_mb_msg()
277 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, in idpf_send_mb_msg()
278 dma_mem->pa); in idpf_send_mb_msg()
295 * idpf_vc_xn_lock - Request exclusive access to vc transaction
299 raw_spin_lock(&(xn)->completed.wait.lock)
302 * idpf_vc_xn_unlock - Release exclusive access to vc transaction
306 raw_spin_unlock(&(xn)->completed.wait.lock)
309 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
315 xn->reply.iov_base = NULL; in idpf_vc_xn_release_bufs()
316 xn->reply.iov_len = 0; in idpf_vc_xn_release_bufs()
318 if (xn->state != IDPF_VC_XN_SHUTDOWN) in idpf_vc_xn_release_bufs()
319 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_release_bufs()
323 * idpf_vc_xn_init - Initialize virtchnl transaction object
330 spin_lock_init(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_init()
332 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { in idpf_vc_xn_init()
333 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_init()
335 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_init()
336 xn->idx = i; in idpf_vc_xn_init()
338 init_completion(&xn->completed); in idpf_vc_xn_init()
341 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_init()
345 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
348 * All waiting threads will be woken-up and their transaction aborted. Further
355 spin_lock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_shutdown()
356 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_shutdown()
357 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_shutdown()
359 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { in idpf_vc_xn_shutdown()
360 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_shutdown()
363 xn->state = IDPF_VC_XN_SHUTDOWN; in idpf_vc_xn_shutdown()
366 complete_all(&xn->completed); in idpf_vc_xn_shutdown()
371 * idpf_vc_xn_pop_free - Pop a free transaction from free list
382 spin_lock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_pop_free()
383 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_pop_free()
387 clear_bit(free_idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_pop_free()
388 xn = &vcxn_mngr->ring[free_idx]; in idpf_vc_xn_pop_free()
389 xn->salt = vcxn_mngr->salt++; in idpf_vc_xn_pop_free()
392 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_pop_free()
398 * idpf_vc_xn_push_free - Push a free transaction to free list
406 set_bit(xn->idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_push_free()
410 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
413 * -vc_op: virtchannel operation to send
414 * -send_buf: kvec iov for send buf and len
415 * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
416 * -timeout_ms: timeout waiting for a reply (milliseconds)
417 * -async: don't wait for message reply, will lose caller context
418 * -async_handler: callback to handle async replies
427 const struct kvec *send_buf = &params->send_buf; in idpf_vc_xn_exec()
432 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); in idpf_vc_xn_exec()
435 return -ENOSPC; in idpf_vc_xn_exec()
438 if (xn->state == IDPF_VC_XN_SHUTDOWN) { in idpf_vc_xn_exec()
439 retval = -ENXIO; in idpf_vc_xn_exec()
441 } else if (xn->state != IDPF_VC_XN_IDLE) { in idpf_vc_xn_exec()
451 xn->idx, xn->vc_op); in idpf_vc_xn_exec()
454 xn->reply = params->recv_buf; in idpf_vc_xn_exec()
455 xn->reply_sz = 0; in idpf_vc_xn_exec()
456 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; in idpf_vc_xn_exec()
457 xn->vc_op = params->vc_op; in idpf_vc_xn_exec()
458 xn->async_handler = params->async_handler; in idpf_vc_xn_exec()
461 if (!params->async) in idpf_vc_xn_exec()
462 reinit_completion(&xn->completed); in idpf_vc_xn_exec()
463 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | in idpf_vc_xn_exec()
464 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); in idpf_vc_xn_exec()
466 retval = idpf_send_mb_msg(adapter, params->vc_op, in idpf_vc_xn_exec()
467 send_buf->iov_len, send_buf->iov_base, in idpf_vc_xn_exec()
474 if (params->async) in idpf_vc_xn_exec()
477 wait_for_completion_timeout(&xn->completed, in idpf_vc_xn_exec()
478 msecs_to_jiffies(params->timeout_ms)); in idpf_vc_xn_exec()
483 * wait_for_completion_timeout returns. This should be non-issue in idpf_vc_xn_exec()
487 switch (xn->state) { in idpf_vc_xn_exec()
489 retval = -ENXIO; in idpf_vc_xn_exec()
492 dev_notice_ratelimited(&adapter->pdev->dev, in idpf_vc_xn_exec()
493 "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", in idpf_vc_xn_exec()
494 params->vc_op, cookie, xn->vc_op, in idpf_vc_xn_exec()
495 xn->salt, params->timeout_ms); in idpf_vc_xn_exec()
496 retval = -ETIME; in idpf_vc_xn_exec()
499 retval = xn->reply_sz; in idpf_vc_xn_exec()
502 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", in idpf_vc_xn_exec()
503 params->vc_op); in idpf_vc_xn_exec()
504 retval = -EIO; in idpf_vc_xn_exec()
509 retval = -EIO; in idpf_vc_xn_exec()
514 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_exec()
523 * idpf_vc_xn_forward_async - Handle async reply receives
538 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_async()
539 …dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (… in idpf_vc_xn_forward_async()
540 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_async()
541 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
542 err = -EINVAL; in idpf_vc_xn_forward_async()
546 if (xn->async_handler) { in idpf_vc_xn_forward_async()
547 err = xn->async_handler(adapter, xn, ctlq_msg); in idpf_vc_xn_forward_async()
551 if (ctlq_msg->cookie.mbx.chnl_retval) { in idpf_vc_xn_forward_async()
552 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
553 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", in idpf_vc_xn_forward_async()
554 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_async()
555 err = -EINVAL; in idpf_vc_xn_forward_async()
559 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_forward_async()
565 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
581 msg_info = ctlq_msg->ctx.sw_cookie.data; in idpf_vc_xn_forward_reply()
583 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { in idpf_vc_xn_forward_reply()
584 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", in idpf_vc_xn_forward_reply()
586 return -EINVAL; in idpf_vc_xn_forward_reply()
588 xn = &adapter->vcxn_mngr->ring[xn_idx]; in idpf_vc_xn_forward_reply()
591 if (xn->salt != salt) { in idpf_vc_xn_forward_reply()
592 …dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:… in idpf_vc_xn_forward_reply()
593 xn->vc_op, xn->salt, xn->state, in idpf_vc_xn_forward_reply()
594 ctlq_msg->cookie.mbx.chnl_opcode, salt); in idpf_vc_xn_forward_reply()
596 return -EINVAL; in idpf_vc_xn_forward_reply()
599 switch (xn->state) { in idpf_vc_xn_forward_reply()
604 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", in idpf_vc_xn_forward_reply()
605 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_reply()
606 err = -EINVAL; in idpf_vc_xn_forward_reply()
614 err = -ENXIO; in idpf_vc_xn_forward_reply()
621 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", in idpf_vc_xn_forward_reply()
622 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_reply()
623 err = -EBUSY; in idpf_vc_xn_forward_reply()
627 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_reply()
628 …dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %… in idpf_vc_xn_forward_reply()
629 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_reply()
630 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
631 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
632 err = -EINVAL; in idpf_vc_xn_forward_reply()
636 if (ctlq_msg->cookie.mbx.chnl_retval) { in idpf_vc_xn_forward_reply()
637 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
638 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
639 err = -EINVAL; in idpf_vc_xn_forward_reply()
643 if (ctlq_msg->data_len) { in idpf_vc_xn_forward_reply()
644 payload = ctlq_msg->ctx.indirect.payload->va; in idpf_vc_xn_forward_reply()
645 payload_size = ctlq_msg->data_len; in idpf_vc_xn_forward_reply()
648 xn->reply_sz = payload_size; in idpf_vc_xn_forward_reply()
649 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; in idpf_vc_xn_forward_reply()
651 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) in idpf_vc_xn_forward_reply()
652 memcpy(xn->reply.iov_base, payload, in idpf_vc_xn_forward_reply()
653 min_t(size_t, xn->reply.iov_len, payload_size)); in idpf_vc_xn_forward_reply()
658 complete(&xn->completed); in idpf_vc_xn_forward_reply()
664 * idpf_recv_mb_msg - Receive message over mailbox
682 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); in idpf_recv_mb_msg()
698 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, in idpf_recv_mb_msg()
699 adapter->hw.arq, in idpf_recv_mb_msg()
705 dma_free_coherent(&adapter->pdev->dev, in idpf_recv_mb_msg()
706 dma_mem->size, dma_mem->va, in idpf_recv_mb_msg()
707 dma_mem->pa); in idpf_recv_mb_msg()
712 if (err == -ENXIO) in idpf_recv_mb_msg()
722 u32 num);
733 struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num) in idpf_alloc_queue_set() argument
737 qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL); in idpf_alloc_queue_set()
741 qp->vport = vport; in idpf_alloc_queue_set()
742 qp->num = num; in idpf_alloc_queue_set()
748 * idpf_send_chunked_msg - send VC message consisting of chunks
761 .vc_op = params->vc_op, in idpf_send_chunked_msg()
764 const void *pos = params->chunks; in idpf_send_chunked_msg()
767 u32 totqs = params->num_chunks; in idpf_send_chunked_msg()
769 num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz, in idpf_send_chunked_msg()
770 params->chunk_sz), totqs); in idpf_send_chunked_msg()
773 buf_sz = params->config_sz + num_chunks * params->chunk_sz; in idpf_send_chunked_msg()
776 return -ENOMEM; in idpf_send_chunked_msg()
786 if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz) in idpf_send_chunked_msg()
787 return -EINVAL; in idpf_send_chunked_msg()
789 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_chunked_msg()
793 pos += num_chunks * params->chunk_sz; in idpf_send_chunked_msg()
794 totqs -= num_chunks; in idpf_send_chunked_msg()
797 buf_sz = params->config_sz + num_chunks * params->chunk_sz; in idpf_send_chunked_msg()
804 * idpf_wait_for_marker_event_set - wait for software marker response for
808 * Return: 0 success, -errno on failure.
812 struct idpf_tx_queue *txq; in idpf_wait_for_marker_event_set() local
815 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event_set()
816 switch (qs->qs[i].type) { in idpf_wait_for_marker_event_set()
818 txq = qs->qs[i].txq; in idpf_wait_for_marker_event_set()
820 idpf_queue_set(SW_MARKER, txq); in idpf_wait_for_marker_event_set()
821 idpf_wait_for_sw_marker_completion(txq); in idpf_wait_for_marker_event_set()
822 markers_rcvd &= !idpf_queue_has(SW_MARKER, txq); in idpf_wait_for_marker_event_set()
830 netdev_warn(qs->vport->netdev, in idpf_wait_for_marker_event_set()
832 return -ETIMEDOUT; in idpf_wait_for_marker_event_set()
839 * idpf_wait_for_marker_event - wait for software marker response
848 qs = idpf_alloc_queue_set(vport, vport->num_txq); in idpf_wait_for_marker_event()
850 return -ENOMEM; in idpf_wait_for_marker_event()
852 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event()
853 qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_wait_for_marker_event()
854 qs->qs[i].txq = vport->txqs[i]; in idpf_wait_for_marker_event()
861 * idpf_send_ver_msg - send virtchnl version message
874 if (adapter->virt_ver_maj) { in idpf_send_ver_msg()
875 vvi.major = cpu_to_le32(adapter->virt_ver_maj); in idpf_send_ver_msg()
876 vvi.minor = cpu_to_le32(adapter->virt_ver_min); in idpf_send_ver_msg()
892 return -EIO; in idpf_send_ver_msg()
898 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); in idpf_send_ver_msg()
899 return -EINVAL; in idpf_send_ver_msg()
904 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); in idpf_send_ver_msg()
909 if (!adapter->virt_ver_maj && in idpf_send_ver_msg()
912 err = -EAGAIN; in idpf_send_ver_msg()
914 adapter->virt_ver_maj = major; in idpf_send_ver_msg()
915 adapter->virt_ver_min = minor; in idpf_send_ver_msg()
921 * idpf_send_get_caps_msg - Send virtchnl get capabilities message
994 xn_params.recv_buf.iov_base = &adapter->caps; in idpf_send_get_caps_msg()
995 xn_params.recv_buf.iov_len = sizeof(adapter->caps); in idpf_send_get_caps_msg()
1001 if (reply_sz < sizeof(adapter->caps)) in idpf_send_get_caps_msg()
1002 return -EIO; in idpf_send_get_caps_msg()
1008 * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
1028 return -ENOMEM; in idpf_send_get_lan_memory_regions()
1035 num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); in idpf_send_get_lan_memory_regions()
1038 return -EIO; in idpf_send_get_lan_memory_regions()
1041 return -EINVAL; in idpf_send_get_lan_memory_regions()
1043 hw = &adapter->hw; in idpf_send_get_lan_memory_regions()
1044 hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL); in idpf_send_get_lan_memory_regions()
1045 if (!hw->lan_regs) in idpf_send_get_lan_memory_regions()
1046 return -ENOMEM; in idpf_send_get_lan_memory_regions()
1049 hw->lan_regs[i].addr_len = in idpf_send_get_lan_memory_regions()
1050 le64_to_cpu(rcvd_regions->mem_reg[i].size); in idpf_send_get_lan_memory_regions()
1051 hw->lan_regs[i].addr_start = in idpf_send_get_lan_memory_regions()
1052 le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); in idpf_send_get_lan_memory_regions()
1054 hw->num_lan_regs = num_regions; in idpf_send_get_lan_memory_regions()
1060 * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
1071 struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; in idpf_calc_remaining_mmio_regs()
1072 struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; in idpf_calc_remaining_mmio_regs()
1073 struct idpf_hw *hw = &adapter->hw; in idpf_calc_remaining_mmio_regs()
1075 hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; in idpf_calc_remaining_mmio_regs()
1076 hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs), in idpf_calc_remaining_mmio_regs()
1078 if (!hw->lan_regs) in idpf_calc_remaining_mmio_regs()
1079 return -ENOMEM; in idpf_calc_remaining_mmio_regs()
1082 hw->lan_regs[0].addr_start = 0; in idpf_calc_remaining_mmio_regs()
1083 hw->lan_regs[0].addr_len = mbx_reg->start; in idpf_calc_remaining_mmio_regs()
1085 hw->lan_regs[1].addr_start = mbx_reg->end + 1; in idpf_calc_remaining_mmio_regs()
1086 hw->lan_regs[1].addr_len = rstat_reg->start - in idpf_calc_remaining_mmio_regs()
1087 hw->lan_regs[1].addr_start; in idpf_calc_remaining_mmio_regs()
1089 hw->lan_regs[2].addr_start = rstat_reg->end + 1; in idpf_calc_remaining_mmio_regs()
1090 hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - in idpf_calc_remaining_mmio_regs()
1091 hw->lan_regs[2].addr_start; in idpf_calc_remaining_mmio_regs()
1097 * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
1104 struct pci_dev *pdev = adapter->pdev; in idpf_map_lan_mmio_regs()
1105 struct idpf_hw *hw = &adapter->hw; in idpf_map_lan_mmio_regs()
1110 for (int i = 0; i < hw->num_lan_regs; i++) { in idpf_map_lan_mmio_regs()
1114 len = hw->lan_regs[i].addr_len; in idpf_map_lan_mmio_regs()
1117 start = hw->lan_regs[i].addr_start + res_start; in idpf_map_lan_mmio_regs()
1119 hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); in idpf_map_lan_mmio_regs()
1120 if (!hw->lan_regs[i].vaddr) { in idpf_map_lan_mmio_regs()
1122 return -ENOMEM; in idpf_map_lan_mmio_regs()
1130 * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
1144 int rule_count = le32_to_cpu(rule->count); in idpf_add_del_fsteer_filters()
1150 return -EINVAL; in idpf_add_del_fsteer_filters()
1165 * idpf_vport_alloc_max_qs - Allocate max queues for a vport
1172 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; in idpf_vport_alloc_max_qs()
1173 struct virtchnl2_get_capabilities *caps = &adapter->caps; in idpf_vport_alloc_max_qs()
1177 mutex_lock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
1179 /* Caps are device-wide. Give each vport an equal piece */ in idpf_vport_alloc_max_qs()
1180 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; in idpf_vport_alloc_max_qs()
1181 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; in idpf_vport_alloc_max_qs()
1182 max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports; in idpf_vport_alloc_max_qs()
1183 max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports; in idpf_vport_alloc_max_qs()
1185 if (adapter->num_alloc_vports >= default_vports) { in idpf_vport_alloc_max_qs()
1200 max_q->max_rxq = max_rx_q; in idpf_vport_alloc_max_qs()
1201 max_q->max_txq = max_tx_q; in idpf_vport_alloc_max_qs()
1202 max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP; in idpf_vport_alloc_max_qs()
1203 max_q->max_complq = max_tx_q; in idpf_vport_alloc_max_qs()
1205 if (avail_queues->avail_rxq < max_q->max_rxq || in idpf_vport_alloc_max_qs()
1206 avail_queues->avail_txq < max_q->max_txq || in idpf_vport_alloc_max_qs()
1207 avail_queues->avail_bufq < max_q->max_bufq || in idpf_vport_alloc_max_qs()
1208 avail_queues->avail_complq < max_q->max_complq) { in idpf_vport_alloc_max_qs()
1209 mutex_unlock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
1211 return -EINVAL; in idpf_vport_alloc_max_qs()
1214 avail_queues->avail_rxq -= max_q->max_rxq; in idpf_vport_alloc_max_qs()
1215 avail_queues->avail_txq -= max_q->max_txq; in idpf_vport_alloc_max_qs()
1216 avail_queues->avail_bufq -= max_q->max_bufq; in idpf_vport_alloc_max_qs()
1217 avail_queues->avail_complq -= max_q->max_complq; in idpf_vport_alloc_max_qs()
1219 mutex_unlock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
1225 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
1234 mutex_lock(&adapter->queue_lock); in idpf_vport_dealloc_max_qs()
1235 avail_queues = &adapter->avail_queues; in idpf_vport_dealloc_max_qs()
1237 avail_queues->avail_rxq += max_q->max_rxq; in idpf_vport_dealloc_max_qs()
1238 avail_queues->avail_txq += max_q->max_txq; in idpf_vport_dealloc_max_qs()
1239 avail_queues->avail_bufq += max_q->max_bufq; in idpf_vport_dealloc_max_qs()
1240 avail_queues->avail_complq += max_q->max_complq; in idpf_vport_dealloc_max_qs()
1242 mutex_unlock(&adapter->queue_lock); in idpf_vport_dealloc_max_qs()
1246 * idpf_init_avail_queues - Initialize available queues on the device
1251 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; in idpf_init_avail_queues()
1252 struct virtchnl2_get_capabilities *caps = &adapter->caps; in idpf_init_avail_queues()
1254 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); in idpf_init_avail_queues()
1255 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); in idpf_init_avail_queues()
1256 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); in idpf_init_avail_queues()
1257 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); in idpf_init_avail_queues()
1261 * idpf_get_reg_intr_vecs - Get vector queue register offset
1275 chunks = &vport->adapter->req_vec_chunks->vchunks; in idpf_get_reg_intr_vecs()
1276 num_vchunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_reg_intr_vecs()
1283 chunk = &chunks->vchunks[j]; in idpf_get_reg_intr_vecs()
1284 num_vec = le16_to_cpu(chunk->num_vectors); in idpf_get_reg_intr_vecs()
1285 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); in idpf_get_reg_intr_vecs()
1286 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); in idpf_get_reg_intr_vecs()
1287 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); in idpf_get_reg_intr_vecs()
1289 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); in idpf_get_reg_intr_vecs()
1290 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); in idpf_get_reg_intr_vecs()
1308 * idpf_vport_get_q_reg - Get the queue registers for the vport
1322 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_q_reg()
1326 while (num_chunks--) { in idpf_vport_get_q_reg()
1330 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_q_reg()
1331 if (le32_to_cpu(chunk->type) != q_type) in idpf_vport_get_q_reg()
1334 num_q = le32_to_cpu(chunk->num_queues); in idpf_vport_get_q_reg()
1335 reg_val = le64_to_cpu(chunk->qtail_reg_start); in idpf_vport_get_q_reg()
1338 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); in idpf_vport_get_q_reg()
1346 * __idpf_queue_reg_init - initialize queue registers
1357 struct idpf_adapter *adapter = vport->adapter; in __idpf_queue_reg_init()
1362 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_queue_reg_init()
1363 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_queue_reg_init()
1365 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) in __idpf_queue_reg_init()
1366 tx_qgrp->txqs[j]->tail = in __idpf_queue_reg_init()
1371 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1372 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1373 u16 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_queue_reg_init()
1378 q = rx_qgrp->singleq.rxqs[j]; in __idpf_queue_reg_init()
1379 q->tail = idpf_get_reg_addr(adapter, in __idpf_queue_reg_init()
1385 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1386 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1387 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_queue_reg_init()
1392 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in __idpf_queue_reg_init()
1393 q->tail = idpf_get_reg_addr(adapter, in __idpf_queue_reg_init()
1406 * idpf_queue_reg_init - initialize queue registers
1416 u16 vport_idx = vport->idx; in idpf_queue_reg_init()
1423 return -ENOMEM; in idpf_queue_reg_init()
1425 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_queue_reg_init()
1426 if (vport_config->req_qs_chunks) { in idpf_queue_reg_init()
1428 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; in idpf_queue_reg_init()
1429 chunks = &vc_aq->chunks; in idpf_queue_reg_init()
1431 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_queue_reg_init()
1432 chunks = &vport_params->chunks; in idpf_queue_reg_init()
1439 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1440 ret = -EINVAL; in idpf_queue_reg_init()
1446 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1447 ret = -EINVAL; in idpf_queue_reg_init()
1454 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_queue_reg_init()
1458 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1459 ret = -EINVAL; in idpf_queue_reg_init()
1465 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1466 ret = -EINVAL; in idpf_queue_reg_init()
1473 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1474 ret = -EINVAL; in idpf_queue_reg_init()
1480 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1481 ret = -EINVAL; in idpf_queue_reg_init()
1493 * idpf_send_create_vport_msg - Send virtchnl create vport message
1506 u16 idx = adapter->next_vport; in idpf_send_create_vport_msg()
1511 if (!adapter->vport_params_reqd[idx]) { in idpf_send_create_vport_msg()
1512 adapter->vport_params_reqd[idx] = kzalloc(buf_size, in idpf_send_create_vport_msg()
1514 if (!adapter->vport_params_reqd[idx]) in idpf_send_create_vport_msg()
1515 return -ENOMEM; in idpf_send_create_vport_msg()
1518 vport_msg = adapter->vport_params_reqd[idx]; in idpf_send_create_vport_msg()
1519 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); in idpf_send_create_vport_msg()
1520 vport_msg->vport_index = cpu_to_le16(idx); in idpf_send_create_vport_msg()
1522 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) in idpf_send_create_vport_msg()
1523 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); in idpf_send_create_vport_msg()
1525 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); in idpf_send_create_vport_msg()
1527 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) in idpf_send_create_vport_msg()
1528 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); in idpf_send_create_vport_msg()
1530 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); in idpf_send_create_vport_msg()
1534 dev_err(&adapter->pdev->dev, "Enough queues are not available"); in idpf_send_create_vport_msg()
1539 if (!adapter->vport_params_recvd[idx]) { in idpf_send_create_vport_msg()
1540 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, in idpf_send_create_vport_msg()
1542 if (!adapter->vport_params_recvd[idx]) { in idpf_send_create_vport_msg()
1543 err = -ENOMEM; in idpf_send_create_vport_msg()
1551 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; in idpf_send_create_vport_msg()
1563 kfree(adapter->vport_params_recvd[idx]); in idpf_send_create_vport_msg()
1564 adapter->vport_params_recvd[idx] = NULL; in idpf_send_create_vport_msg()
1565 kfree(adapter->vport_params_reqd[idx]); in idpf_send_create_vport_msg()
1566 adapter->vport_params_reqd[idx] = NULL; in idpf_send_create_vport_msg()
1572 * idpf_check_supported_desc_ids - Verify we have required descriptor support
1579 struct idpf_adapter *adapter = vport->adapter; in idpf_check_supported_desc_ids()
1583 vport_msg = adapter->vport_params_recvd[vport->idx]; in idpf_check_supported_desc_ids()
1586 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || in idpf_check_supported_desc_ids()
1587 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { in idpf_check_supported_desc_ids()
1588 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); in idpf_check_supported_desc_ids()
1589 return -EOPNOTSUPP; in idpf_check_supported_desc_ids()
1592 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); in idpf_check_supported_desc_ids()
1593 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); in idpf_check_supported_desc_ids()
1595 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_check_supported_desc_ids()
1597 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); in idpf_check_supported_desc_ids()
1598 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); in idpf_check_supported_desc_ids()
1602 vport->base_rxd = true; in idpf_check_supported_desc_ids()
1605 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_check_supported_desc_ids()
1609 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); in idpf_check_supported_desc_ids()
1610 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); in idpf_check_supported_desc_ids()
1617 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1629 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_destroy_vport_msg()
1635 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_destroy_vport_msg()
1641 * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1653 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_enable_vport_msg()
1659 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_enable_vport_msg()
1665 * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1677 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_disable_vport_msg()
1683 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_disable_vport_msg()
1689 * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
1700 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_txq_config_chunk()
1701 qi->model = cpu_to_le16(vport->txq_model); in idpf_fill_txq_config_chunk()
1702 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); in idpf_fill_txq_config_chunk()
1703 qi->ring_len = cpu_to_le16(q->desc_count); in idpf_fill_txq_config_chunk()
1704 qi->dma_ring_addr = cpu_to_le64(q->dma); in idpf_fill_txq_config_chunk()
1705 qi->relative_queue_id = cpu_to_le16(q->rel_q_id); in idpf_fill_txq_config_chunk()
1707 if (!idpf_is_queue_model_split(vport->txq_model)) { in idpf_fill_txq_config_chunk()
1708 qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); in idpf_fill_txq_config_chunk()
1713 val = q->complq->q_id; in idpf_fill_txq_config_chunk()
1715 val = q->txq_grp->complq->q_id; in idpf_fill_txq_config_chunk()
1717 qi->tx_compl_queue_id = cpu_to_le16(val); in idpf_fill_txq_config_chunk()
1724 qi->sched_mode = cpu_to_le16(val); in idpf_fill_txq_config_chunk()
1728 * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
1739 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_complq_config_chunk()
1740 qi->model = cpu_to_le16(vport->txq_model); in idpf_fill_complq_config_chunk()
1741 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); in idpf_fill_complq_config_chunk()
1742 qi->ring_len = cpu_to_le16(q->desc_count); in idpf_fill_complq_config_chunk()
1743 qi->dma_ring_addr = cpu_to_le64(q->dma); in idpf_fill_complq_config_chunk()
1750 qi->sched_mode = cpu_to_le16(val); in idpf_fill_complq_config_chunk()
1754 * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
1771 ctq->vport_id = cpu_to_le32(vport->vport_id); in idpf_prepare_cfg_txqs_msg()
1772 ctq->num_qinfo = cpu_to_le16(num_chunks); in idpf_prepare_cfg_txqs_msg()
1773 memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo)); in idpf_prepare_cfg_txqs_msg()
1779 * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
1786 * Return: 0 on success, -errno on failure.
1798 qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL); in idpf_send_config_tx_queue_set_msg()
1800 return -ENOMEM; in idpf_send_config_tx_queue_set_msg()
1804 for (u32 i = 0; i < qs->num; i++) { in idpf_send_config_tx_queue_set_msg()
1805 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX) in idpf_send_config_tx_queue_set_msg()
1806 idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq, in idpf_send_config_tx_queue_set_msg()
1808 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) in idpf_send_config_tx_queue_set_msg()
1809 idpf_fill_complq_config_chunk(qs->vport, in idpf_send_config_tx_queue_set_msg()
1810 qs->qs[i].complq, in idpf_send_config_tx_queue_set_msg()
1814 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_config_tx_queue_set_msg()
1818 * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
1821 * Return: 0 on success, -errno on failure.
1826 u32 totqs = vport->num_txq + vport->num_complq; in idpf_send_config_tx_queues_msg()
1831 return -ENOMEM; in idpf_send_config_tx_queues_msg()
1834 for (u32 i = 0; i < vport->num_txq_grp; i++) { in idpf_send_config_tx_queues_msg()
1835 const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_config_tx_queues_msg()
1837 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { in idpf_send_config_tx_queues_msg()
1838 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_send_config_tx_queues_msg()
1839 qs->qs[k++].txq = tx_qgrp->txqs[j]; in idpf_send_config_tx_queues_msg()
1842 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_config_tx_queues_msg()
1843 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_send_config_tx_queues_msg()
1844 qs->qs[k++].complq = tx_qgrp->complq; in idpf_send_config_tx_queues_msg()
1850 return -EINVAL; in idpf_send_config_tx_queues_msg()
1856 * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
1867 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_rxq_config_chunk()
1868 qi->model = cpu_to_le16(vport->rxq_model); in idpf_fill_rxq_config_chunk()
1869 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); in idpf_fill_rxq_config_chunk()
1870 qi->ring_len = cpu_to_le16(q->desc_count); in idpf_fill_rxq_config_chunk()
1871 qi->dma_ring_addr = cpu_to_le64(q->dma); in idpf_fill_rxq_config_chunk()
1872 qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size); in idpf_fill_rxq_config_chunk()
1873 qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); in idpf_fill_rxq_config_chunk()
1874 qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); in idpf_fill_rxq_config_chunk()
1876 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); in idpf_fill_rxq_config_chunk()
1878 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_fill_rxq_config_chunk()
1879 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); in idpf_fill_rxq_config_chunk()
1880 qi->desc_ids = cpu_to_le64(q->rxdids); in idpf_fill_rxq_config_chunk()
1885 sets = q->bufq_sets; in idpf_fill_rxq_config_chunk()
1891 q->rx_buf_size = sets[0].bufq.rx_buf_size; in idpf_fill_rxq_config_chunk()
1892 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); in idpf_fill_rxq_config_chunk()
1894 qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); in idpf_fill_rxq_config_chunk()
1895 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { in idpf_fill_rxq_config_chunk()
1896 qi->bufq2_ena = IDPF_BUFQ2_ENA; in idpf_fill_rxq_config_chunk()
1897 qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id); in idpf_fill_rxq_config_chunk()
1900 q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; in idpf_fill_rxq_config_chunk()
1903 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); in idpf_fill_rxq_config_chunk()
1904 qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size); in idpf_fill_rxq_config_chunk()
1907 qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); in idpf_fill_rxq_config_chunk()
1911 * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
1920 qi->queue_id = cpu_to_le32(q->q_id); in idpf_fill_bufq_config_chunk()
1921 qi->model = cpu_to_le16(vport->rxq_model); in idpf_fill_bufq_config_chunk()
1922 qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); in idpf_fill_bufq_config_chunk()
1923 qi->ring_len = cpu_to_le16(q->desc_count); in idpf_fill_bufq_config_chunk()
1924 qi->dma_ring_addr = cpu_to_le64(q->dma); in idpf_fill_bufq_config_chunk()
1925 qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); in idpf_fill_bufq_config_chunk()
1926 qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); in idpf_fill_bufq_config_chunk()
1927 qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); in idpf_fill_bufq_config_chunk()
1928 qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE; in idpf_fill_bufq_config_chunk()
1930 qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC); in idpf_fill_bufq_config_chunk()
1933 qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); in idpf_fill_bufq_config_chunk()
1934 qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size); in idpf_fill_bufq_config_chunk()
1939 * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
1956 crq->vport_id = cpu_to_le32(vport->vport_id); in idpf_prepare_cfg_rxqs_msg()
1957 crq->num_qinfo = cpu_to_le16(num_chunks); in idpf_prepare_cfg_rxqs_msg()
1958 memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo)); in idpf_prepare_cfg_rxqs_msg()
1964 * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
1971 * Return: 0 on success, -errno on failure.
1983 qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL); in idpf_send_config_rx_queue_set_msg()
1985 return -ENOMEM; in idpf_send_config_rx_queue_set_msg()
1989 for (u32 i = 0; i < qs->num; i++) { in idpf_send_config_rx_queue_set_msg()
1990 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX) in idpf_send_config_rx_queue_set_msg()
1991 idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq, in idpf_send_config_rx_queue_set_msg()
1993 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) in idpf_send_config_rx_queue_set_msg()
1994 idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq, in idpf_send_config_rx_queue_set_msg()
1998 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_config_rx_queue_set_msg()
2002 * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
2005 * Return: 0 on success, -errno on failure.
2009 bool splitq = idpf_is_queue_model_split(vport->rxq_model); in idpf_send_config_rx_queues_msg()
2011 u32 totqs = vport->num_rxq + vport->num_bufq; in idpf_send_config_rx_queues_msg()
2016 return -ENOMEM; in idpf_send_config_rx_queues_msg()
2019 for (u32 i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_config_rx_queues_msg()
2020 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_config_rx_queues_msg()
2024 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_config_rx_queues_msg()
2028 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_send_config_rx_queues_msg()
2029 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; in idpf_send_config_rx_queues_msg()
2030 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_config_rx_queues_msg()
2033 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_config_rx_queues_msg()
2037 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_send_config_rx_queues_msg()
2040 qs->qs[k++].rxq = in idpf_send_config_rx_queues_msg()
2041 &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_config_rx_queues_msg()
2043 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_config_rx_queues_msg()
2049 return -EINVAL; in idpf_send_config_rx_queues_msg()
2055 * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
2073 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_prepare_ena_dis_qs_msg()
2074 eq->chunks.num_chunks = cpu_to_le16(num_chunks); in idpf_prepare_ena_dis_qs_msg()
2075 memcpy(eq->chunks.chunks, pos, in idpf_prepare_ena_dis_qs_msg()
2076 num_chunks * sizeof(*eq->chunks.chunks)); in idpf_prepare_ena_dis_qs_msg()
2082 * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
2091 * Return: 0 on success, -errno on failure.
2103 .num_chunks = qs->num, in idpf_send_ena_dis_queue_set_msg()
2106 qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL); in idpf_send_ena_dis_queue_set_msg()
2108 return -ENOMEM; in idpf_send_ena_dis_queue_set_msg()
2112 for (u32 i = 0; i < qs->num; i++) { in idpf_send_ena_dis_queue_set_msg()
2113 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_send_ena_dis_queue_set_msg()
2116 qc[i].type = cpu_to_le32(q->type); in idpf_send_ena_dis_queue_set_msg()
2119 switch (q->type) { in idpf_send_ena_dis_queue_set_msg()
2121 qid = q->rxq->q_id; in idpf_send_ena_dis_queue_set_msg()
2124 qid = q->txq->q_id; in idpf_send_ena_dis_queue_set_msg()
2127 qid = q->bufq->q_id; in idpf_send_ena_dis_queue_set_msg()
2130 qid = q->complq->q_id; in idpf_send_ena_dis_queue_set_msg()
2133 return -EINVAL; in idpf_send_ena_dis_queue_set_msg()
2139 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_ena_dis_queue_set_msg()
2143 * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
2148 * Return: 0 on success, -errno on failure.
2156 num_txq = vport->num_txq + vport->num_complq; in idpf_send_ena_dis_queues_msg()
2157 num_q = num_txq + vport->num_rxq + vport->num_bufq; in idpf_send_ena_dis_queues_msg()
2161 return -ENOMEM; in idpf_send_ena_dis_queues_msg()
2163 split = idpf_is_queue_model_split(vport->txq_model); in idpf_send_ena_dis_queues_msg()
2165 for (u32 i = 0; i < vport->num_txq_grp; i++) { in idpf_send_ena_dis_queues_msg()
2166 const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
2168 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { in idpf_send_ena_dis_queues_msg()
2169 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_send_ena_dis_queues_msg()
2170 qs->qs[k++].txq = tx_qgrp->txqs[j]; in idpf_send_ena_dis_queues_msg()
2176 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_send_ena_dis_queues_msg()
2177 qs->qs[k++].complq = tx_qgrp->complq; in idpf_send_ena_dis_queues_msg()
2181 return -EINVAL; in idpf_send_ena_dis_queues_msg()
2183 split = idpf_is_queue_model_split(vport->rxq_model); in idpf_send_ena_dis_queues_msg()
2185 for (u32 i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
2186 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
2190 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_ena_dis_queues_msg()
2192 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_ena_dis_queues_msg()
2195 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_send_ena_dis_queues_msg()
2198 qs->qs[k++].rxq = in idpf_send_ena_dis_queues_msg()
2199 &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_ena_dis_queues_msg()
2201 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_ena_dis_queues_msg()
2207 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_send_ena_dis_queues_msg()
2208 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; in idpf_send_ena_dis_queues_msg()
2209 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_ena_dis_queues_msg()
2214 return -EINVAL; in idpf_send_ena_dis_queues_msg()
2220 * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
2239 vqvm->vport_id = cpu_to_le32(vport->vport_id); in idpf_prep_map_unmap_queue_set_vector_msg()
2240 vqvm->num_qv_maps = cpu_to_le16(num_chunks); in idpf_prep_map_unmap_queue_set_vector_msg()
2241 memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps)); in idpf_prep_map_unmap_queue_set_vector_msg()
2247 * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
2252 * Return: 0 on success, -errno on failure.
2265 .num_chunks = qs->num, in idpf_send_map_unmap_queue_set_vector_msg()
2269 vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL); in idpf_send_map_unmap_queue_set_vector_msg()
2271 return -ENOMEM; in idpf_send_map_unmap_queue_set_vector_msg()
2275 split = idpf_is_queue_model_split(qs->vport->txq_model); in idpf_send_map_unmap_queue_set_vector_msg()
2277 for (u32 i = 0; i < qs->num; i++) { in idpf_send_map_unmap_queue_set_vector_msg()
2278 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_send_map_unmap_queue_set_vector_msg()
2282 vqv[i].queue_type = cpu_to_le32(q->type); in idpf_send_map_unmap_queue_set_vector_msg()
2284 switch (q->type) { in idpf_send_map_unmap_queue_set_vector_msg()
2286 qid = q->rxq->q_id; in idpf_send_map_unmap_queue_set_vector_msg()
2288 if (idpf_queue_has(NOIRQ, q->rxq)) in idpf_send_map_unmap_queue_set_vector_msg()
2291 vec = q->rxq->q_vector; in idpf_send_map_unmap_queue_set_vector_msg()
2294 v_idx = vec->v_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2295 itr_idx = vec->rx_itr_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2297 v_idx = qs->vport->noirq_v_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2302 qid = q->txq->q_id; in idpf_send_map_unmap_queue_set_vector_msg()
2304 if (idpf_queue_has(NOIRQ, q->txq)) in idpf_send_map_unmap_queue_set_vector_msg()
2306 else if (idpf_queue_has(XDP, q->txq)) in idpf_send_map_unmap_queue_set_vector_msg()
2307 vec = q->txq->complq->q_vector; in idpf_send_map_unmap_queue_set_vector_msg()
2309 vec = q->txq->txq_grp->complq->q_vector; in idpf_send_map_unmap_queue_set_vector_msg()
2311 vec = q->txq->q_vector; in idpf_send_map_unmap_queue_set_vector_msg()
2314 v_idx = vec->v_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2315 itr_idx = vec->tx_itr_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2317 v_idx = qs->vport->noirq_v_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2322 return -EINVAL; in idpf_send_map_unmap_queue_set_vector_msg()
2330 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_map_unmap_queue_set_vector_msg()
2334 * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
2339 * Return: 0 on success, -errno on failure.
2344 u32 num_q = vport->num_txq + vport->num_rxq; in idpf_send_map_unmap_queue_vector_msg()
2349 return -ENOMEM; in idpf_send_map_unmap_queue_vector_msg()
2351 for (u32 i = 0; i < vport->num_txq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
2352 const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
2354 for (u32 j = 0; j < tx_qgrp->num_txq; j++) { in idpf_send_map_unmap_queue_vector_msg()
2355 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_send_map_unmap_queue_vector_msg()
2356 qs->qs[k++].txq = tx_qgrp->txqs[j]; in idpf_send_map_unmap_queue_vector_msg()
2360 if (k != vport->num_txq) in idpf_send_map_unmap_queue_vector_msg()
2361 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
2363 for (u32 i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
2364 const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
2367 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
2368 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_map_unmap_queue_vector_msg()
2370 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_map_unmap_queue_vector_msg()
2373 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_send_map_unmap_queue_vector_msg()
2375 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
2376 qs->qs[k++].rxq = in idpf_send_map_unmap_queue_vector_msg()
2377 &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_map_unmap_queue_vector_msg()
2379 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_map_unmap_queue_vector_msg()
2384 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
2390 * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
2396 * Return: 0 on success, -errno on failure.
2404 * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
2408 * Return: 0 on success, -errno on failure.
2422 * idpf_send_config_queue_set_msg - send virtchnl config queues message for
2429 * Return: 0 on success, -errno on failure.
2443 * idpf_send_enable_queues_msg - send enable queues virtchnl message
2455 * idpf_send_disable_queues_msg - send disable queues virtchnl message
2473 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2493 * idpf_send_delete_queues_msg - send delete queues virtchnl message
2506 u16 vport_idx = vport->idx; in idpf_send_delete_queues_msg()
2511 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_delete_queues_msg()
2512 if (vport_config->req_qs_chunks) { in idpf_send_delete_queues_msg()
2513 chunks = &vport_config->req_qs_chunks->chunks; in idpf_send_delete_queues_msg()
2515 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_send_delete_queues_msg()
2516 chunks = &vport_params->chunks; in idpf_send_delete_queues_msg()
2519 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_send_delete_queues_msg()
2524 return -ENOMEM; in idpf_send_delete_queues_msg()
2526 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_delete_queues_msg()
2527 eq->chunks.num_chunks = cpu_to_le16(num_chunks); in idpf_send_delete_queues_msg()
2529 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, in idpf_send_delete_queues_msg()
2536 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_delete_queues_msg()
2542 * idpf_send_config_queues_msg - Send config queues virtchnl message
2560 * idpf_send_add_queues_msg - Send virtchnl add queues message
2577 u16 vport_idx = vport->idx; in idpf_send_add_queues_msg()
2583 return -ENOMEM; in idpf_send_add_queues_msg()
2585 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_add_queues_msg()
2586 kfree(vport_config->req_qs_chunks); in idpf_send_add_queues_msg()
2587 vport_config->req_qs_chunks = NULL; in idpf_send_add_queues_msg()
2589 aq.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_add_queues_msg()
2601 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_add_queues_msg()
2605 /* compare vc_msg num queues with vport num queues */ in idpf_send_add_queues_msg()
2606 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || in idpf_send_add_queues_msg()
2607 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || in idpf_send_add_queues_msg()
2608 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || in idpf_send_add_queues_msg()
2609 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) in idpf_send_add_queues_msg()
2610 return -EINVAL; in idpf_send_add_queues_msg()
2613 le16_to_cpu(vc_msg->chunks.num_chunks)); in idpf_send_add_queues_msg()
2615 return -EIO; in idpf_send_add_queues_msg()
2617 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); in idpf_send_add_queues_msg()
2618 if (!vport_config->req_qs_chunks) in idpf_send_add_queues_msg()
2619 return -ENOMEM; in idpf_send_add_queues_msg()
2625 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2644 return -ENOMEM; in idpf_send_alloc_vectors_msg()
2656 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); in idpf_send_alloc_vectors_msg()
2659 return -EIO; in idpf_send_alloc_vectors_msg()
2662 return -EINVAL; in idpf_send_alloc_vectors_msg()
2664 kfree(adapter->req_vec_chunks); in idpf_send_alloc_vectors_msg()
2665 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); in idpf_send_alloc_vectors_msg()
2666 if (!adapter->req_vec_chunks) in idpf_send_alloc_vectors_msg()
2667 return -ENOMEM; in idpf_send_alloc_vectors_msg()
2669 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { in idpf_send_alloc_vectors_msg()
2670 kfree(adapter->req_vec_chunks); in idpf_send_alloc_vectors_msg()
2671 adapter->req_vec_chunks = NULL; in idpf_send_alloc_vectors_msg()
2672 return -EINVAL; in idpf_send_alloc_vectors_msg()
2679 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2686 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; in idpf_send_dealloc_vectors_msg()
2687 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; in idpf_send_dealloc_vectors_msg()
2692 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); in idpf_send_dealloc_vectors_msg()
2702 kfree(adapter->req_vec_chunks); in idpf_send_dealloc_vectors_msg()
2703 adapter->req_vec_chunks = NULL; in idpf_send_dealloc_vectors_msg()
2709 * idpf_get_max_vfs - Get max number of vfs supported
2716 return le16_to_cpu(adapter->caps.max_sriov_vfs); in idpf_get_max_vfs()
2720 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2743 * idpf_send_get_stats_msg - Send virtchnl get statistics message
2750 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); in idpf_send_get_stats_msg()
2751 struct rtnl_link_stats64 *netstats = &np->netstats; in idpf_send_get_stats_msg()
2758 if (np->state <= __IDPF_VPORT_DOWN) in idpf_send_get_stats_msg()
2761 stats_msg.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_stats_msg()
2769 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_stats_msg()
2773 return -EIO; in idpf_send_get_stats_msg()
2775 spin_lock_bh(&np->stats_lock); in idpf_send_get_stats_msg()
2777 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + in idpf_send_get_stats_msg()
2780 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + in idpf_send_get_stats_msg()
2783 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); in idpf_send_get_stats_msg()
2784 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); in idpf_send_get_stats_msg()
2785 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); in idpf_send_get_stats_msg()
2786 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); in idpf_send_get_stats_msg()
2787 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); in idpf_send_get_stats_msg()
2788 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); in idpf_send_get_stats_msg()
2790 vport->port_stats.vport_stats = stats_msg; in idpf_send_get_stats_msg()
2792 spin_unlock_bh(&np->stats_lock); in idpf_send_get_stats_msg()
2798 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2815 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_lut_msg()
2816 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2819 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2821 rl->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_lut_msg()
2830 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2835 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2836 for (i = 0; i < rss_data->rss_lut_size; i++) in idpf_send_get_set_rss_lut_msg()
2837 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); in idpf_send_get_set_rss_lut_msg()
2841 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_lut_msg()
2847 return -EIO; in idpf_send_get_set_rss_lut_msg()
2849 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); in idpf_send_get_set_rss_lut_msg()
2851 return -EIO; in idpf_send_get_set_rss_lut_msg()
2854 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) in idpf_send_get_set_rss_lut_msg()
2857 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); in idpf_send_get_set_rss_lut_msg()
2858 kfree(rss_data->rss_lut); in idpf_send_get_set_rss_lut_msg()
2860 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); in idpf_send_get_set_rss_lut_msg()
2861 if (!rss_data->rss_lut) { in idpf_send_get_set_rss_lut_msg()
2862 rss_data->rss_lut_size = 0; in idpf_send_get_set_rss_lut_msg()
2863 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2867 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2873 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2890 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_key_msg()
2891 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2894 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2896 rk->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_key_msg()
2903 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2909 rk->key_len = cpu_to_le16(rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2910 for (i = 0; i < rss_data->rss_key_size; i++) in idpf_send_get_set_rss_key_msg()
2911 rk->key_flex[i] = rss_data->rss_key[i]; in idpf_send_get_set_rss_key_msg()
2916 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_key_msg()
2922 return -EIO; in idpf_send_get_set_rss_key_msg()
2925 le16_to_cpu(recv_rk->key_len)); in idpf_send_get_set_rss_key_msg()
2927 return -EIO; in idpf_send_get_set_rss_key_msg()
2930 if (rss_data->rss_key_size == key_size) in idpf_send_get_set_rss_key_msg()
2933 rss_data->rss_key_size = key_size; in idpf_send_get_set_rss_key_msg()
2934 kfree(rss_data->rss_key); in idpf_send_get_set_rss_key_msg()
2935 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); in idpf_send_get_set_rss_key_msg()
2936 if (!rss_data->rss_key) { in idpf_send_get_set_rss_key_msg()
2937 rss_data->rss_key_size = 0; in idpf_send_get_set_rss_key_msg()
2938 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2942 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2948 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2959 if (!pstate->outer_ip || !pstate->outer_frag) { in idpf_fill_ptype_lookup()
2960 pstate->outer_ip = true; in idpf_fill_ptype_lookup()
2963 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; in idpf_fill_ptype_lookup()
2965 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; in idpf_fill_ptype_lookup()
2968 ptype->outer_frag = LIBETH_RX_PT_FRAG; in idpf_fill_ptype_lookup()
2969 pstate->outer_frag = true; in idpf_fill_ptype_lookup()
2972 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; in idpf_fill_ptype_lookup()
2973 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; in idpf_fill_ptype_lookup()
2976 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; in idpf_fill_ptype_lookup()
2978 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; in idpf_fill_ptype_lookup()
2981 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; in idpf_fill_ptype_lookup()
2987 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && in idpf_finalize_ptype_lookup()
2988 ptype->inner_prot) in idpf_finalize_ptype_lookup()
2989 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; in idpf_finalize_ptype_lookup()
2990 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && in idpf_finalize_ptype_lookup()
2991 ptype->outer_ip) in idpf_finalize_ptype_lookup()
2992 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; in idpf_finalize_ptype_lookup()
2993 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) in idpf_finalize_ptype_lookup()
2994 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; in idpf_finalize_ptype_lookup()
2996 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; in idpf_finalize_ptype_lookup()
3002 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
3013 struct idpf_adapter *adapter = vport->adapter; in idpf_send_get_rx_ptype_msg()
3019 if (vport->rx_ptype_lkup) in idpf_send_get_rx_ptype_msg()
3022 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
3029 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
3033 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
3037 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
3047 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); in idpf_send_get_rx_ptype_msg()
3050 get_ptype_info->num_ptypes = in idpf_send_get_rx_ptype_msg()
3051 cpu_to_le16(max_ptype - next_ptype_id); in idpf_send_get_rx_ptype_msg()
3053 get_ptype_info->num_ptypes = in idpf_send_get_rx_ptype_msg()
3060 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); in idpf_send_get_rx_ptype_msg()
3062 return -EINVAL; in idpf_send_get_rx_ptype_msg()
3064 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + in idpf_send_get_rx_ptype_msg()
3065 le16_to_cpu(get_ptype_info->num_ptypes); in idpf_send_get_rx_ptype_msg()
3069 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { in idpf_send_get_rx_ptype_msg()
3079 return -EINVAL; in idpf_send_get_rx_ptype_msg()
3082 if (le16_to_cpu(ptype->ptype_id_10) == in idpf_send_get_rx_ptype_msg()
3086 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
3087 k = le16_to_cpu(ptype->ptype_id_10); in idpf_send_get_rx_ptype_msg()
3089 k = ptype->ptype_id_8; in idpf_send_get_rx_ptype_msg()
3091 for (j = 0; j < ptype->proto_id_count; j++) { in idpf_send_get_rx_ptype_msg()
3092 id = le16_to_cpu(ptype->proto_id[j]); in idpf_send_get_rx_ptype_msg()
3212 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); in idpf_send_get_rx_ptype_msg()
3218 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
3230 loopback.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_loopback_msg()
3237 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_loopback_msg()
3243 * idpf_find_ctlq - Given a type and id, find ctlq info
3255 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) in idpf_find_ctlq()
3256 if (cq->q_id == id && cq->cq_type == type) in idpf_find_ctlq()
3263 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
3284 struct idpf_hw *hw = &adapter->hw; in idpf_init_dflt_mbx()
3287 adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); in idpf_init_dflt_mbx()
3293 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, in idpf_init_dflt_mbx()
3295 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, in idpf_init_dflt_mbx()
3298 if (!hw->asq || !hw->arq) { in idpf_init_dflt_mbx()
3301 return -ENOENT; in idpf_init_dflt_mbx()
3304 adapter->state = __IDPF_VER_CHECK; in idpf_init_dflt_mbx()
3310 * idpf_deinit_dflt_mbx - Free up ctlqs setup
3315 if (adapter->hw.arq && adapter->hw.asq) { in idpf_deinit_dflt_mbx()
3317 idpf_ctlq_deinit(&adapter->hw); in idpf_deinit_dflt_mbx()
3319 adapter->hw.arq = NULL; in idpf_deinit_dflt_mbx()
3320 adapter->hw.asq = NULL; in idpf_deinit_dflt_mbx()
3324 * idpf_vport_params_buf_rel - Release memory for MailBox resources
3331 kfree(adapter->vport_params_recvd); in idpf_vport_params_buf_rel()
3332 adapter->vport_params_recvd = NULL; in idpf_vport_params_buf_rel()
3333 kfree(adapter->vport_params_reqd); in idpf_vport_params_buf_rel()
3334 adapter->vport_params_reqd = NULL; in idpf_vport_params_buf_rel()
3335 kfree(adapter->vport_ids); in idpf_vport_params_buf_rel()
3336 adapter->vport_ids = NULL; in idpf_vport_params_buf_rel()
3340 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
3349 adapter->vport_params_reqd = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
3350 sizeof(*adapter->vport_params_reqd), in idpf_vport_params_buf_alloc()
3352 if (!adapter->vport_params_reqd) in idpf_vport_params_buf_alloc()
3353 return -ENOMEM; in idpf_vport_params_buf_alloc()
3355 adapter->vport_params_recvd = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
3356 sizeof(*adapter->vport_params_recvd), in idpf_vport_params_buf_alloc()
3358 if (!adapter->vport_params_recvd) in idpf_vport_params_buf_alloc()
3361 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); in idpf_vport_params_buf_alloc()
3362 if (!adapter->vport_ids) in idpf_vport_params_buf_alloc()
3365 if (adapter->vport_config) in idpf_vport_params_buf_alloc()
3368 adapter->vport_config = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
3369 sizeof(*adapter->vport_config), in idpf_vport_params_buf_alloc()
3371 if (!adapter->vport_config) in idpf_vport_params_buf_alloc()
3379 return -ENOMEM; in idpf_vport_params_buf_alloc()
3383 * idpf_vc_core_init - Initialize state machine and get driver specific
3392 * Returns 0 on success, -EAGAIN function will get called again,
3401 if (!adapter->vcxn_mngr) { in idpf_vc_core_init()
3402 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); in idpf_vc_core_init()
3403 if (!adapter->vcxn_mngr) { in idpf_vc_core_init()
3404 err = -ENOMEM; in idpf_vc_core_init()
3408 idpf_vc_xn_init(adapter->vcxn_mngr); in idpf_vc_core_init()
3410 while (adapter->state != __IDPF_INIT_SW) { in idpf_vc_core_init()
3411 switch (adapter->state) { in idpf_vc_core_init()
3417 adapter->state = __IDPF_GET_CAPS; in idpf_vc_core_init()
3419 case -EAGAIN: in idpf_vc_core_init()
3431 adapter->state = __IDPF_INIT_SW; in idpf_vc_core_init()
3434 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", in idpf_vc_core_init()
3435 adapter->state); in idpf_vc_core_init()
3436 err = -EINVAL; in idpf_vc_core_init()
3450 dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", in idpf_vc_core_init()
3452 return -EINVAL; in idpf_vc_core_init()
3458 dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", in idpf_vc_core_init()
3460 return -ENOMEM; in idpf_vc_core_init()
3466 dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", in idpf_vc_core_init()
3468 return -ENOMEM; in idpf_vc_core_init()
3471 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); in idpf_vc_core_init()
3473 adapter->max_vports = num_max_vports; in idpf_vc_core_init()
3474 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), in idpf_vc_core_init()
3476 if (!adapter->vports) in idpf_vc_core_init()
3477 return -ENOMEM; in idpf_vc_core_init()
3479 if (!adapter->netdevs) { in idpf_vc_core_init()
3480 adapter->netdevs = kcalloc(num_max_vports, in idpf_vc_core_init()
3483 if (!adapter->netdevs) { in idpf_vc_core_init()
3484 err = -ENOMEM; in idpf_vc_core_init()
3491 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", in idpf_vc_core_init()
3499 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); in idpf_vc_core_init()
3501 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, in idpf_vc_core_init()
3502 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); in idpf_vc_core_init()
3506 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", in idpf_vc_core_init()
3513 pci_err(adapter->pdev, "PTP init failed, err=%pe\n", in idpf_vc_core_init()
3521 queue_delayed_work(adapter->init_wq, &adapter->init_task, in idpf_vc_core_init()
3522 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); in idpf_vc_core_init()
3524 set_bit(IDPF_VC_CORE_INIT, adapter->flags); in idpf_vc_core_init()
3529 cancel_delayed_work_sync(&adapter->serv_task); in idpf_vc_core_init()
3530 cancel_delayed_work_sync(&adapter->mbx_task); in idpf_vc_core_init()
3533 kfree(adapter->vports); in idpf_vc_core_init()
3534 adapter->vports = NULL; in idpf_vc_core_init()
3539 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) in idpf_vc_core_init()
3542 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { in idpf_vc_core_init()
3543 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); in idpf_vc_core_init()
3545 return -EFAULT; in idpf_vc_core_init()
3551 adapter->state = __IDPF_VER_CHECK; in idpf_vc_core_init()
3552 if (adapter->vcxn_mngr) in idpf_vc_core_init()
3553 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_init()
3554 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); in idpf_vc_core_init()
3555 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, in idpf_vc_core_init()
3558 return -EAGAIN; in idpf_vc_core_init()
3562 * idpf_vc_core_deinit - Device deinit routine
3570 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) in idpf_vc_core_deinit()
3574 remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); in idpf_vc_core_deinit()
3576 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_deinit()
3580 idpf_idc_deinit_core_aux_device(adapter->cdev_info); in idpf_vc_core_deinit()
3584 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_deinit()
3586 cancel_delayed_work_sync(&adapter->serv_task); in idpf_vc_core_deinit()
3587 cancel_delayed_work_sync(&adapter->mbx_task); in idpf_vc_core_deinit()
3591 kfree(adapter->vports); in idpf_vc_core_deinit()
3592 adapter->vports = NULL; in idpf_vc_core_deinit()
3594 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); in idpf_vc_core_deinit()
3598 * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3613 vec_info.num_curr_vecs = vport->num_q_vectors; in idpf_vport_alloc_vec_indexes()
3618 req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) + in idpf_vport_alloc_vec_indexes()
3622 vec_info.default_vport = vport->default_vport; in idpf_vport_alloc_vec_indexes()
3623 vec_info.index = vport->idx; in idpf_vport_alloc_vec_indexes()
3625 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, in idpf_vport_alloc_vec_indexes()
3626 vport->q_vector_idxs, in idpf_vport_alloc_vec_indexes()
3629 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", in idpf_vport_alloc_vec_indexes()
3631 return -EINVAL; in idpf_vport_alloc_vec_indexes()
3634 vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS; in idpf_vport_alloc_vec_indexes()
3640 * idpf_vport_init - Initialize virtual port
3648 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_init()
3654 u16 idx = vport->idx; in idpf_vport_init()
3657 vport_config = adapter->vport_config[idx]; in idpf_vport_init()
3658 rss_data = &vport_config->user_config.rss_data; in idpf_vport_init()
3659 vport_msg = adapter->vport_params_recvd[idx]; in idpf_vport_init()
3661 vport_config->max_q.max_txq = max_q->max_txq; in idpf_vport_init()
3662 vport_config->max_q.max_rxq = max_q->max_rxq; in idpf_vport_init()
3663 vport_config->max_q.max_complq = max_q->max_complq; in idpf_vport_init()
3664 vport_config->max_q.max_bufq = max_q->max_bufq; in idpf_vport_init()
3666 vport->txq_model = le16_to_cpu(vport_msg->txq_model); in idpf_vport_init()
3667 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); in idpf_vport_init()
3668 vport->vport_type = le16_to_cpu(vport_msg->vport_type); in idpf_vport_init()
3669 vport->vport_id = le32_to_cpu(vport_msg->vport_id); in idpf_vport_init()
3671 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, in idpf_vport_init()
3672 le16_to_cpu(vport_msg->rss_key_size)); in idpf_vport_init()
3673 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); in idpf_vport_init()
3675 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); in idpf_vport_init()
3676 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; in idpf_vport_init()
3679 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3680 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3689 vport->crc_enable = adapter->crc_enable; in idpf_vport_init()
3691 if (!(vport_msg->vport_flags & in idpf_vport_init()
3697 pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n"); in idpf_vport_init()
3701 INIT_WORK(&vport->tstamp_task, idpf_tstamp_task); in idpf_vport_init()
3705 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3720 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_vec_ids()
3724 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; in idpf_get_vec_ids()
3731 chunk = &chunks->vchunks[j]; in idpf_get_vec_ids()
3732 num_vec = le16_to_cpu(chunk->num_vectors); in idpf_get_vec_ids()
3733 start_vecid = le16_to_cpu(chunk->start_vector_id); in idpf_get_vec_ids()
3750 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3762 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_queue_ids()
3766 while (num_chunks--) { in idpf_vport_get_queue_ids()
3769 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_queue_ids()
3770 if (le32_to_cpu(chunk->type) != q_type) in idpf_vport_get_queue_ids()
3773 num_q = le32_to_cpu(chunk->num_queues); in idpf_vport_get_queue_ids()
3774 start_q_id = le32_to_cpu(chunk->start_queue_id); in idpf_vport_get_queue_ids()
3791 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3809 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_vport_queue_ids_init()
3810 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3812 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) in __idpf_vport_queue_ids_init()
3813 tx_qgrp->txqs[j]->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3817 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3818 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3821 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3822 num_rxq = rx_qgrp->splitq.num_rxq_sets; in __idpf_vport_queue_ids_init()
3824 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_vport_queue_ids_init()
3829 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3830 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in __idpf_vport_queue_ids_init()
3832 q = rx_qgrp->singleq.rxqs[j]; in __idpf_vport_queue_ids_init()
3833 q->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3838 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { in __idpf_vport_queue_ids_init()
3839 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3841 tx_qgrp->complq->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3845 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3846 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3847 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_vport_queue_ids_init()
3852 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in __idpf_vport_queue_ids_init()
3853 q->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3865 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3876 u16 vport_idx = vport->idx; in idpf_vport_queue_ids_init()
3881 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_vport_queue_ids_init()
3882 if (vport_config->req_qs_chunks) { in idpf_vport_queue_ids_init()
3884 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; in idpf_vport_queue_ids_init()
3885 chunks = &vc_aq->chunks; in idpf_vport_queue_ids_init()
3887 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_vport_queue_ids_init()
3888 chunks = &vport_params->chunks; in idpf_vport_queue_ids_init()
3893 return -ENOMEM; in idpf_vport_queue_ids_init()
3898 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3899 err = -EINVAL; in idpf_vport_queue_ids_init()
3904 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3905 err = -EINVAL; in idpf_vport_queue_ids_init()
3912 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3913 err = -EINVAL; in idpf_vport_queue_ids_init()
3918 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3919 err = -EINVAL; in idpf_vport_queue_ids_init()
3923 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_queue_ids_init()
3928 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3929 err = -EINVAL; in idpf_vport_queue_ids_init()
3933 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3934 err = -EINVAL; in idpf_vport_queue_ids_init()
3939 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_queue_ids_init()
3944 if (num_ids < vport->num_bufq) { in idpf_vport_queue_ids_init()
3945 err = -EINVAL; in idpf_vport_queue_ids_init()
3949 if (num_ids < vport->num_bufq) in idpf_vport_queue_ids_init()
3950 err = -EINVAL; in idpf_vport_queue_ids_init()
3959 * idpf_vport_adjust_qs - Adjust to new requested queues
3969 vport_msg.txq_model = cpu_to_le16(vport->txq_model); in idpf_vport_adjust_qs()
3970 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); in idpf_vport_adjust_qs()
3971 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, in idpf_vport_adjust_qs()
3983 * idpf_is_capability_ena - Default implementation of capability checking
3994 u8 *caps = (u8 *)&adapter->caps; in idpf_is_capability_ena()
4012 * idpf_vport_is_cap_ena - Check if vport capability is enabled
4022 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_vport_is_cap_ena()
4024 return !!(le16_to_cpu(vport_msg->vport_flags) & flag); in idpf_vport_is_cap_ena()
4028 * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
4039 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_sideband_flow_type_ena()
4040 caps = vport_msg->sideband_flow_caps; in idpf_sideband_flow_type_ena()
4053 * idpf_sideband_action_ena - Check if steering is enabled for action
4065 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_sideband_action_ena()
4066 supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions); in idpf_sideband_action_ena()
4069 if (fsp->ring_cookie == RX_CLS_FLOW_DISC || in idpf_sideband_action_ena()
4070 fsp->ring_cookie == RX_CLS_FLOW_WAKE) in idpf_sideband_action_ena()
4080 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_fsteer_max_rules()
4081 return le32_to_cpu(vport_msg->flow_steer_max_rules); in idpf_fsteer_max_rules()
4094 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_get_vport_id()
4096 return le32_to_cpu(vport_msg->vport_id); in idpf_get_vport_id()
4104 is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr); in idpf_set_mac_type()
4105 mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY : in idpf_set_mac_type()
4110 * idpf_mac_filter_async_handler - Async callback for mac filters
4135 if (!ctlq_msg->cookie.mbx.chnl_retval) in idpf_mac_filter_async_handler()
4139 if (xn->reply_sz < sizeof(*ma_list)) in idpf_mac_filter_async_handler()
4142 ma_list = ctlq_msg->ctx.indirect.payload->va; in idpf_mac_filter_async_handler()
4143 mac_addr = ma_list->mac_addr_list; in idpf_mac_filter_async_handler()
4144 num_entries = le16_to_cpu(ma_list->num_mac_addr); in idpf_mac_filter_async_handler()
4146 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) in idpf_mac_filter_async_handler()
4149 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); in idpf_mac_filter_async_handler()
4153 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; in idpf_mac_filter_async_handler()
4154 ma_list_head = &vport_config->user_config.mac_filter_list; in idpf_mac_filter_async_handler()
4160 spin_lock_bh(&vport_config->mac_filter_list_lock); in idpf_mac_filter_async_handler()
4163 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) in idpf_mac_filter_async_handler()
4164 list_del(&f->list); in idpf_mac_filter_async_handler()
4165 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_mac_filter_async_handler()
4166 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", in idpf_mac_filter_async_handler()
4167 xn->vc_op); in idpf_mac_filter_async_handler()
4172 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", in idpf_mac_filter_async_handler()
4173 xn->vc_op, xn->reply_sz); in idpf_mac_filter_async_handler()
4175 return -EINVAL; in idpf_mac_filter_async_handler()
4179 * idpf_add_del_mac_filters - Add/del mac filters
4193 struct idpf_adapter *adapter = np->adapter; in idpf_add_del_mac_filters()
4207 vport_config = adapter->vport_config[np->vport_idx]; in idpf_add_del_mac_filters()
4208 spin_lock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
4211 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, in idpf_add_del_mac_filters()
4213 if (add && f->add) in idpf_add_del_mac_filters()
4215 else if (!add && f->remove) in idpf_add_del_mac_filters()
4220 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
4229 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
4231 return -ENOMEM; in idpf_add_del_mac_filters()
4234 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, in idpf_add_del_mac_filters()
4236 if (add && f->add) { in idpf_add_del_mac_filters()
4237 ether_addr_copy(mac_addr[i].addr, f->macaddr); in idpf_add_del_mac_filters()
4240 f->add = false; in idpf_add_del_mac_filters()
4244 if (!add && f->remove) { in idpf_add_del_mac_filters()
4245 ether_addr_copy(mac_addr[i].addr, f->macaddr); in idpf_add_del_mac_filters()
4248 f->remove = false; in idpf_add_del_mac_filters()
4254 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
4273 return -ENOMEM; in idpf_add_del_mac_filters()
4278 ma_list->vport_id = cpu_to_le32(np->vport_id); in idpf_add_del_mac_filters()
4279 ma_list->num_mac_addr = cpu_to_le16(num_entries); in idpf_add_del_mac_filters()
4280 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); in idpf_add_del_mac_filters()
4289 total_filters -= num_entries; in idpf_add_del_mac_filters()
4296 * idpf_set_promiscuous - set promiscuous and send message to mailbox
4314 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) in idpf_set_promiscuous()
4316 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) in idpf_set_promiscuous()
4334 * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers
4347 struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); in idpf_idc_rdma_vc_send_sync()
4353 return -EINVAL; in idpf_idc_rdma_vc_send_sync()