Lines Matching +full:num +full:- +full:rxq

1 // SPDX-License-Identifier: GPL-2.0-only
16 * enum idpf_vc_xn_state - Virtchnl transaction status
43 * struct idpf_vc_xn - Data structure representing virtchnl transactions
52 * to. May be 0-length (then NULL address permitted) if the reply data
72 * struct idpf_vc_xn_params - Parameters for executing transaction
92 * struct idpf_vc_xn_manager - Manager for tracking transactions
106 * idpf_vid_to_vport - Translate vport id to vport pointer
119 if (adapter->vport_ids[i] == v_id) in idpf_vid_to_vport()
120 return adapter->vports[i]; in idpf_vid_to_vport()
126 * idpf_handle_event_link - Handle link event message
136 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); in idpf_handle_event_link()
138 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", in idpf_handle_event_link()
139 v2e->vport_id); in idpf_handle_event_link()
142 np = netdev_priv(vport->netdev); in idpf_handle_event_link()
144 np->link_speed_mbps = le32_to_cpu(v2e->link_speed); in idpf_handle_event_link()
146 if (vport->link_up == v2e->link_status) in idpf_handle_event_link()
149 vport->link_up = v2e->link_status; in idpf_handle_event_link()
151 if (np->state != __IDPF_VPORT_UP) in idpf_handle_event_link()
154 if (vport->link_up) { in idpf_handle_event_link()
155 netif_tx_start_all_queues(vport->netdev); in idpf_handle_event_link()
156 netif_carrier_on(vport->netdev); in idpf_handle_event_link()
158 netif_tx_stop_all_queues(vport->netdev); in idpf_handle_event_link()
159 netif_carrier_off(vport->netdev); in idpf_handle_event_link()
164 * idpf_recv_event_msg - Receive virtchnl event message
173 int payload_size = ctlq_msg->ctx.indirect.payload->size; in idpf_recv_event_msg()
178 …dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len… in idpf_recv_event_msg()
179 ctlq_msg->cookie.mbx.chnl_opcode, in idpf_recv_event_msg()
184 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; in idpf_recv_event_msg()
185 event = le32_to_cpu(v2e->event); in idpf_recv_event_msg()
192 dev_err(&adapter->pdev->dev, in idpf_recv_event_msg()
199 * idpf_mb_clean - Reclaim the send mailbox queue entries
215 return -ENOMEM; in idpf_mb_clean()
217 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); in idpf_mb_clean()
224 dma_mem = q_msg[i]->ctx.indirect.payload; in idpf_mb_clean()
226 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, in idpf_mb_clean()
227 dma_mem->va, dma_mem->pa); in idpf_mb_clean()
239 * idpf_send_mb_msg - Send message over mailbox
271 return -ENOMEM; in idpf_send_mb_msg()
275 err = -ENOMEM; in idpf_send_mb_msg()
279 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; in idpf_send_mb_msg()
280 ctlq_msg->func_id = 0; in idpf_send_mb_msg()
281 ctlq_msg->data_len = msg_size; in idpf_send_mb_msg()
282 ctlq_msg->cookie.mbx.chnl_opcode = op; in idpf_send_mb_msg()
283 ctlq_msg->cookie.mbx.chnl_retval = 0; in idpf_send_mb_msg()
284 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; in idpf_send_mb_msg()
285 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, in idpf_send_mb_msg()
286 &dma_mem->pa, GFP_ATOMIC); in idpf_send_mb_msg()
287 if (!dma_mem->va) { in idpf_send_mb_msg()
288 err = -ENOMEM; in idpf_send_mb_msg()
294 memcpy(dma_mem->va, msg, msg_size); in idpf_send_mb_msg()
295 ctlq_msg->ctx.indirect.payload = dma_mem; in idpf_send_mb_msg()
296 ctlq_msg->ctx.sw_cookie.data = cookie; in idpf_send_mb_msg()
298 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); in idpf_send_mb_msg()
305 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, in idpf_send_mb_msg()
306 dma_mem->pa); in idpf_send_mb_msg()
323 * idpf_vc_xn_lock - Request exclusive access to vc transaction
327 raw_spin_lock(&(xn)->completed.wait.lock)
330 * idpf_vc_xn_unlock - Release exclusive access to vc transaction
334 raw_spin_unlock(&(xn)->completed.wait.lock)
337 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
343 xn->reply.iov_base = NULL; in idpf_vc_xn_release_bufs()
344 xn->reply.iov_len = 0; in idpf_vc_xn_release_bufs()
346 if (xn->state != IDPF_VC_XN_SHUTDOWN) in idpf_vc_xn_release_bufs()
347 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_release_bufs()
351 * idpf_vc_xn_init - Initialize virtchnl transaction object
358 spin_lock_init(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_init()
360 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { in idpf_vc_xn_init()
361 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_init()
363 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_init()
364 xn->idx = i; in idpf_vc_xn_init()
366 init_completion(&xn->completed); in idpf_vc_xn_init()
369 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_init()
373 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
376 * All waiting threads will be woken-up and their transaction aborted. Further
383 spin_lock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_shutdown()
384 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_shutdown()
385 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_shutdown()
387 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { in idpf_vc_xn_shutdown()
388 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_shutdown()
391 xn->state = IDPF_VC_XN_SHUTDOWN; in idpf_vc_xn_shutdown()
394 complete_all(&xn->completed); in idpf_vc_xn_shutdown()
399 * idpf_vc_xn_pop_free - Pop a free transaction from free list
410 spin_lock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_pop_free()
411 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_pop_free()
415 clear_bit(free_idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_pop_free()
416 xn = &vcxn_mngr->ring[free_idx]; in idpf_vc_xn_pop_free()
417 xn->salt = vcxn_mngr->salt++; in idpf_vc_xn_pop_free()
420 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_pop_free()
426 * idpf_vc_xn_push_free - Push a free transaction to free list
434 set_bit(xn->idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_push_free()
438 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
441 * -vc_op: virtchannel operation to send
442 * -send_buf: kvec iov for send buf and len
443 * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
444 * -timeout_ms: timeout waiting for a reply (milliseconds)
445 * -async: don't wait for message reply, will lose caller context
446 * -async_handler: callback to handle async replies
455 const struct kvec *send_buf = &params->send_buf; in idpf_vc_xn_exec()
460 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); in idpf_vc_xn_exec()
463 return -ENOSPC; in idpf_vc_xn_exec()
466 if (xn->state == IDPF_VC_XN_SHUTDOWN) { in idpf_vc_xn_exec()
467 retval = -ENXIO; in idpf_vc_xn_exec()
469 } else if (xn->state != IDPF_VC_XN_IDLE) { in idpf_vc_xn_exec()
479 xn->idx, xn->vc_op); in idpf_vc_xn_exec()
482 xn->reply = params->recv_buf; in idpf_vc_xn_exec()
483 xn->reply_sz = 0; in idpf_vc_xn_exec()
484 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; in idpf_vc_xn_exec()
485 xn->vc_op = params->vc_op; in idpf_vc_xn_exec()
486 xn->async_handler = params->async_handler; in idpf_vc_xn_exec()
489 if (!params->async) in idpf_vc_xn_exec()
490 reinit_completion(&xn->completed); in idpf_vc_xn_exec()
491 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | in idpf_vc_xn_exec()
492 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); in idpf_vc_xn_exec()
494 retval = idpf_send_mb_msg(adapter, params->vc_op, in idpf_vc_xn_exec()
495 send_buf->iov_len, send_buf->iov_base, in idpf_vc_xn_exec()
502 if (params->async) in idpf_vc_xn_exec()
505 wait_for_completion_timeout(&xn->completed, in idpf_vc_xn_exec()
506 msecs_to_jiffies(params->timeout_ms)); in idpf_vc_xn_exec()
511 * wait_for_completion_timeout returns. This should be non-issue in idpf_vc_xn_exec()
515 switch (xn->state) { in idpf_vc_xn_exec()
517 retval = -ENXIO; in idpf_vc_xn_exec()
520 dev_notice_ratelimited(&adapter->pdev->dev, in idpf_vc_xn_exec()
521 "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", in idpf_vc_xn_exec()
522 params->vc_op, cookie, xn->vc_op, in idpf_vc_xn_exec()
523 xn->salt, params->timeout_ms); in idpf_vc_xn_exec()
524 retval = -ETIME; in idpf_vc_xn_exec()
527 retval = xn->reply_sz; in idpf_vc_xn_exec()
530 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", in idpf_vc_xn_exec()
531 params->vc_op); in idpf_vc_xn_exec()
532 retval = -EIO; in idpf_vc_xn_exec()
537 retval = -EIO; in idpf_vc_xn_exec()
542 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_exec()
551 * idpf_vc_xn_forward_async - Handle async reply receives
566 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_async()
567 …dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (… in idpf_vc_xn_forward_async()
568 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_async()
569 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
570 err = -EINVAL; in idpf_vc_xn_forward_async()
574 if (xn->async_handler) { in idpf_vc_xn_forward_async()
575 err = xn->async_handler(adapter, xn, ctlq_msg); in idpf_vc_xn_forward_async()
579 if (ctlq_msg->cookie.mbx.chnl_retval) { in idpf_vc_xn_forward_async()
580 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
581 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", in idpf_vc_xn_forward_async()
582 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_async()
583 err = -EINVAL; in idpf_vc_xn_forward_async()
587 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_forward_async()
593 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
609 msg_info = ctlq_msg->ctx.sw_cookie.data; in idpf_vc_xn_forward_reply()
611 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { in idpf_vc_xn_forward_reply()
612 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", in idpf_vc_xn_forward_reply()
614 return -EINVAL; in idpf_vc_xn_forward_reply()
616 xn = &adapter->vcxn_mngr->ring[xn_idx]; in idpf_vc_xn_forward_reply()
619 if (xn->salt != salt) { in idpf_vc_xn_forward_reply()
620 …dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:… in idpf_vc_xn_forward_reply()
621 xn->vc_op, xn->salt, xn->state, in idpf_vc_xn_forward_reply()
622 ctlq_msg->cookie.mbx.chnl_opcode, salt); in idpf_vc_xn_forward_reply()
624 return -EINVAL; in idpf_vc_xn_forward_reply()
627 switch (xn->state) { in idpf_vc_xn_forward_reply()
632 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", in idpf_vc_xn_forward_reply()
633 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_reply()
634 err = -EINVAL; in idpf_vc_xn_forward_reply()
642 err = -ENXIO; in idpf_vc_xn_forward_reply()
649 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", in idpf_vc_xn_forward_reply()
650 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_reply()
651 err = -EBUSY; in idpf_vc_xn_forward_reply()
655 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_reply()
656 …dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %… in idpf_vc_xn_forward_reply()
657 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_reply()
658 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
659 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
660 err = -EINVAL; in idpf_vc_xn_forward_reply()
664 if (ctlq_msg->cookie.mbx.chnl_retval) { in idpf_vc_xn_forward_reply()
665 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
666 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
667 err = -EINVAL; in idpf_vc_xn_forward_reply()
671 if (ctlq_msg->data_len) { in idpf_vc_xn_forward_reply()
672 payload = ctlq_msg->ctx.indirect.payload->va; in idpf_vc_xn_forward_reply()
673 payload_size = ctlq_msg->data_len; in idpf_vc_xn_forward_reply()
676 xn->reply_sz = payload_size; in idpf_vc_xn_forward_reply()
677 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; in idpf_vc_xn_forward_reply()
679 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) in idpf_vc_xn_forward_reply()
680 memcpy(xn->reply.iov_base, payload, in idpf_vc_xn_forward_reply()
681 min_t(size_t, xn->reply.iov_len, payload_size)); in idpf_vc_xn_forward_reply()
686 complete(&xn->completed); in idpf_vc_xn_forward_reply()
692 * idpf_recv_mb_msg - Receive message over mailbox
710 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); in idpf_recv_mb_msg()
726 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, in idpf_recv_mb_msg()
727 adapter->hw.arq, in idpf_recv_mb_msg()
733 dmam_free_coherent(&adapter->pdev->dev, in idpf_recv_mb_msg()
734 dma_mem->size, dma_mem->va, in idpf_recv_mb_msg()
735 dma_mem->pa); in idpf_recv_mb_msg()
740 if (err == -ENXIO) in idpf_recv_mb_msg()
748 * idpf_wait_for_marker_event - wait for software marker response
758 for (i = 0; i < vport->num_txq; i++) in idpf_wait_for_marker_event()
759 idpf_queue_set(SW_MARKER, vport->txqs[i]); in idpf_wait_for_marker_event()
761 event = wait_event_timeout(vport->sw_marker_wq, in idpf_wait_for_marker_event()
763 vport->flags), in idpf_wait_for_marker_event()
766 for (i = 0; i < vport->num_txq; i++) in idpf_wait_for_marker_event()
767 idpf_queue_clear(POLL_MODE, vport->txqs[i]); in idpf_wait_for_marker_event()
772 dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); in idpf_wait_for_marker_event()
774 return -ETIMEDOUT; in idpf_wait_for_marker_event()
778 * idpf_send_ver_msg - send virtchnl version message
791 if (adapter->virt_ver_maj) { in idpf_send_ver_msg()
792 vvi.major = cpu_to_le32(adapter->virt_ver_maj); in idpf_send_ver_msg()
793 vvi.minor = cpu_to_le32(adapter->virt_ver_min); in idpf_send_ver_msg()
809 return -EIO; in idpf_send_ver_msg()
815 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); in idpf_send_ver_msg()
816 return -EINVAL; in idpf_send_ver_msg()
821 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); in idpf_send_ver_msg()
826 if (!adapter->virt_ver_maj && in idpf_send_ver_msg()
829 err = -EAGAIN; in idpf_send_ver_msg()
831 adapter->virt_ver_maj = major; in idpf_send_ver_msg()
832 adapter->virt_ver_min = minor; in idpf_send_ver_msg()
838 * idpf_send_get_caps_msg - Send virtchnl get capabilities message
908 xn_params.recv_buf.iov_base = &adapter->caps; in idpf_send_get_caps_msg()
909 xn_params.recv_buf.iov_len = sizeof(adapter->caps); in idpf_send_get_caps_msg()
915 if (reply_sz < sizeof(adapter->caps)) in idpf_send_get_caps_msg()
916 return -EIO; in idpf_send_get_caps_msg()
922 * idpf_vport_alloc_max_qs - Allocate max queues for a vport
929 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; in idpf_vport_alloc_max_qs()
930 struct virtchnl2_get_capabilities *caps = &adapter->caps; in idpf_vport_alloc_max_qs()
934 mutex_lock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
936 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; in idpf_vport_alloc_max_qs()
937 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; in idpf_vport_alloc_max_qs()
938 if (adapter->num_alloc_vports < default_vports) { in idpf_vport_alloc_max_qs()
939 max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); in idpf_vport_alloc_max_qs()
940 max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); in idpf_vport_alloc_max_qs()
942 max_q->max_rxq = IDPF_MIN_Q; in idpf_vport_alloc_max_qs()
943 max_q->max_txq = IDPF_MIN_Q; in idpf_vport_alloc_max_qs()
945 max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; in idpf_vport_alloc_max_qs()
946 max_q->max_complq = max_q->max_txq; in idpf_vport_alloc_max_qs()
948 if (avail_queues->avail_rxq < max_q->max_rxq || in idpf_vport_alloc_max_qs()
949 avail_queues->avail_txq < max_q->max_txq || in idpf_vport_alloc_max_qs()
950 avail_queues->avail_bufq < max_q->max_bufq || in idpf_vport_alloc_max_qs()
951 avail_queues->avail_complq < max_q->max_complq) { in idpf_vport_alloc_max_qs()
952 mutex_unlock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
954 return -EINVAL; in idpf_vport_alloc_max_qs()
957 avail_queues->avail_rxq -= max_q->max_rxq; in idpf_vport_alloc_max_qs()
958 avail_queues->avail_txq -= max_q->max_txq; in idpf_vport_alloc_max_qs()
959 avail_queues->avail_bufq -= max_q->max_bufq; in idpf_vport_alloc_max_qs()
960 avail_queues->avail_complq -= max_q->max_complq; in idpf_vport_alloc_max_qs()
962 mutex_unlock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
968 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
977 mutex_lock(&adapter->queue_lock); in idpf_vport_dealloc_max_qs()
978 avail_queues = &adapter->avail_queues; in idpf_vport_dealloc_max_qs()
980 avail_queues->avail_rxq += max_q->max_rxq; in idpf_vport_dealloc_max_qs()
981 avail_queues->avail_txq += max_q->max_txq; in idpf_vport_dealloc_max_qs()
982 avail_queues->avail_bufq += max_q->max_bufq; in idpf_vport_dealloc_max_qs()
983 avail_queues->avail_complq += max_q->max_complq; in idpf_vport_dealloc_max_qs()
985 mutex_unlock(&adapter->queue_lock); in idpf_vport_dealloc_max_qs()
989 * idpf_init_avail_queues - Initialize available queues on the device
994 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; in idpf_init_avail_queues()
995 struct virtchnl2_get_capabilities *caps = &adapter->caps; in idpf_init_avail_queues()
997 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); in idpf_init_avail_queues()
998 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); in idpf_init_avail_queues()
999 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); in idpf_init_avail_queues()
1000 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); in idpf_init_avail_queues()
1004 * idpf_get_reg_intr_vecs - Get vector queue register offset
1018 chunks = &vport->adapter->req_vec_chunks->vchunks; in idpf_get_reg_intr_vecs()
1019 num_vchunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_reg_intr_vecs()
1026 chunk = &chunks->vchunks[j]; in idpf_get_reg_intr_vecs()
1027 num_vec = le16_to_cpu(chunk->num_vectors); in idpf_get_reg_intr_vecs()
1028 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); in idpf_get_reg_intr_vecs()
1029 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); in idpf_get_reg_intr_vecs()
1030 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); in idpf_get_reg_intr_vecs()
1032 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); in idpf_get_reg_intr_vecs()
1033 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); in idpf_get_reg_intr_vecs()
1051 * idpf_vport_get_q_reg - Get the queue registers for the vport
1065 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_q_reg()
1069 while (num_chunks--) { in idpf_vport_get_q_reg()
1073 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_q_reg()
1074 if (le32_to_cpu(chunk->type) != q_type) in idpf_vport_get_q_reg()
1077 num_q = le32_to_cpu(chunk->num_queues); in idpf_vport_get_q_reg()
1078 reg_val = le64_to_cpu(chunk->qtail_reg_start); in idpf_vport_get_q_reg()
1081 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); in idpf_vport_get_q_reg()
1089 * __idpf_queue_reg_init - initialize queue registers
1100 struct idpf_adapter *adapter = vport->adapter; in __idpf_queue_reg_init()
1105 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_queue_reg_init()
1106 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_queue_reg_init()
1108 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) in __idpf_queue_reg_init()
1109 tx_qgrp->txqs[j]->tail = in __idpf_queue_reg_init()
1114 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1115 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1116 u16 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_queue_reg_init()
1121 q = rx_qgrp->singleq.rxqs[j]; in __idpf_queue_reg_init()
1122 q->tail = idpf_get_reg_addr(adapter, in __idpf_queue_reg_init()
1128 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1129 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1130 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_queue_reg_init()
1135 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in __idpf_queue_reg_init()
1136 q->tail = idpf_get_reg_addr(adapter, in __idpf_queue_reg_init()
1149 * idpf_queue_reg_init - initialize queue registers
1159 u16 vport_idx = vport->idx; in idpf_queue_reg_init()
1166 return -ENOMEM; in idpf_queue_reg_init()
1168 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_queue_reg_init()
1169 if (vport_config->req_qs_chunks) { in idpf_queue_reg_init()
1171 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; in idpf_queue_reg_init()
1172 chunks = &vc_aq->chunks; in idpf_queue_reg_init()
1174 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_queue_reg_init()
1175 chunks = &vport_params->chunks; in idpf_queue_reg_init()
1182 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1183 ret = -EINVAL; in idpf_queue_reg_init()
1189 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1190 ret = -EINVAL; in idpf_queue_reg_init()
1197 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_queue_reg_init()
1201 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1202 ret = -EINVAL; in idpf_queue_reg_init()
1208 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1209 ret = -EINVAL; in idpf_queue_reg_init()
1216 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1217 ret = -EINVAL; in idpf_queue_reg_init()
1223 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1224 ret = -EINVAL; in idpf_queue_reg_init()
1236 * idpf_send_create_vport_msg - Send virtchnl create vport message
1249 u16 idx = adapter->next_vport; in idpf_send_create_vport_msg()
1254 if (!adapter->vport_params_reqd[idx]) { in idpf_send_create_vport_msg()
1255 adapter->vport_params_reqd[idx] = kzalloc(buf_size, in idpf_send_create_vport_msg()
1257 if (!adapter->vport_params_reqd[idx]) in idpf_send_create_vport_msg()
1258 return -ENOMEM; in idpf_send_create_vport_msg()
1261 vport_msg = adapter->vport_params_reqd[idx]; in idpf_send_create_vport_msg()
1262 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); in idpf_send_create_vport_msg()
1263 vport_msg->vport_index = cpu_to_le16(idx); in idpf_send_create_vport_msg()
1265 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) in idpf_send_create_vport_msg()
1266 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); in idpf_send_create_vport_msg()
1268 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); in idpf_send_create_vport_msg()
1270 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) in idpf_send_create_vport_msg()
1271 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); in idpf_send_create_vport_msg()
1273 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); in idpf_send_create_vport_msg()
1277 dev_err(&adapter->pdev->dev, "Enough queues are not available"); in idpf_send_create_vport_msg()
1282 if (!adapter->vport_params_recvd[idx]) { in idpf_send_create_vport_msg()
1283 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, in idpf_send_create_vport_msg()
1285 if (!adapter->vport_params_recvd[idx]) { in idpf_send_create_vport_msg()
1286 err = -ENOMEM; in idpf_send_create_vport_msg()
1294 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; in idpf_send_create_vport_msg()
1306 kfree(adapter->vport_params_recvd[idx]); in idpf_send_create_vport_msg()
1307 adapter->vport_params_recvd[idx] = NULL; in idpf_send_create_vport_msg()
1308 kfree(adapter->vport_params_reqd[idx]); in idpf_send_create_vport_msg()
1309 adapter->vport_params_reqd[idx] = NULL; in idpf_send_create_vport_msg()
1315 * idpf_check_supported_desc_ids - Verify we have required descriptor support
1322 struct idpf_adapter *adapter = vport->adapter; in idpf_check_supported_desc_ids()
1326 vport_msg = adapter->vport_params_recvd[vport->idx]; in idpf_check_supported_desc_ids()
1329 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || in idpf_check_supported_desc_ids()
1330 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { in idpf_check_supported_desc_ids()
1331 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); in idpf_check_supported_desc_ids()
1332 return -EOPNOTSUPP; in idpf_check_supported_desc_ids()
1335 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); in idpf_check_supported_desc_ids()
1336 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); in idpf_check_supported_desc_ids()
1338 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_check_supported_desc_ids()
1340 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); in idpf_check_supported_desc_ids()
1341 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); in idpf_check_supported_desc_ids()
1345 vport->base_rxd = true; in idpf_check_supported_desc_ids()
1348 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_check_supported_desc_ids()
1352 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); in idpf_check_supported_desc_ids()
1353 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); in idpf_check_supported_desc_ids()
1360 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1372 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_destroy_vport_msg()
1378 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_destroy_vport_msg()
1384 * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1396 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_enable_vport_msg()
1402 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_enable_vport_msg()
1408 * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1420 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_disable_vport_msg()
1426 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_disable_vport_msg()
1432 * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1448 totqs = vport->num_txq + vport->num_complq; in idpf_send_config_tx_queues_msg()
1451 return -ENOMEM; in idpf_send_config_tx_queues_msg()
1454 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_config_tx_queues_msg()
1455 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_config_tx_queues_msg()
1458 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { in idpf_send_config_tx_queues_msg()
1460 cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_config_tx_queues_msg()
1462 cpu_to_le16(vport->txq_model); in idpf_send_config_tx_queues_msg()
1466 cpu_to_le16(tx_qgrp->txqs[j]->desc_count); in idpf_send_config_tx_queues_msg()
1468 cpu_to_le64(tx_qgrp->txqs[j]->dma); in idpf_send_config_tx_queues_msg()
1469 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_config_tx_queues_msg()
1470 struct idpf_tx_queue *q = tx_qgrp->txqs[j]; in idpf_send_config_tx_queues_msg()
1473 cpu_to_le16(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg()
1488 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_send_config_tx_queues_msg()
1491 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg()
1492 qi[k].model = cpu_to_le16(vport->txq_model); in idpf_send_config_tx_queues_msg()
1494 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); in idpf_send_config_tx_queues_msg()
1495 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); in idpf_send_config_tx_queues_msg()
1497 if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) in idpf_send_config_tx_queues_msg()
1508 return -EINVAL; in idpf_send_config_tx_queues_msg()
1523 return -ENOMEM; in idpf_send_config_tx_queues_msg()
1530 ctq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_config_tx_queues_msg()
1531 ctq->num_qinfo = cpu_to_le16(num_chunks); in idpf_send_config_tx_queues_msg()
1532 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); in idpf_send_config_tx_queues_msg()
1536 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_config_tx_queues_msg()
1541 totqs -= num_chunks; in idpf_send_config_tx_queues_msg()
1551 * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1567 totqs = vport->num_rxq + vport->num_bufq; in idpf_send_config_rx_queues_msg()
1570 return -ENOMEM; in idpf_send_config_rx_queues_msg()
1573 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_config_rx_queues_msg()
1574 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_config_rx_queues_msg()
1578 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_config_rx_queues_msg()
1581 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { in idpf_send_config_rx_queues_msg()
1583 &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_config_rx_queues_msg()
1585 qi[k].queue_id = cpu_to_le32(bufq->q_id); in idpf_send_config_rx_queues_msg()
1586 qi[k].model = cpu_to_le16(vport->rxq_model); in idpf_send_config_rx_queues_msg()
1590 qi[k].ring_len = cpu_to_le16(bufq->desc_count); in idpf_send_config_rx_queues_msg()
1591 qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); in idpf_send_config_rx_queues_msg()
1592 qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); in idpf_send_config_rx_queues_msg()
1595 cpu_to_le16(bufq->rx_buffer_low_watermark); in idpf_send_config_rx_queues_msg()
1601 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_config_rx_queues_msg()
1602 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_config_rx_queues_msg()
1604 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_config_rx_queues_msg()
1608 struct idpf_rx_queue *rxq; in idpf_send_config_rx_queues_msg() local
1610 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_send_config_rx_queues_msg()
1611 rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_config_rx_queues_msg()
1615 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_config_rx_queues_msg()
1616 sets = rxq->bufq_sets; in idpf_send_config_rx_queues_msg()
1618 /* In splitq mode, RXQ buffer size should be in idpf_send_config_rx_queues_msg()
1620 * associated with this RXQ. in idpf_send_config_rx_queues_msg()
1622 rxq->rx_buf_size = sets[0].bufq.rx_buf_size; in idpf_send_config_rx_queues_msg()
1625 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { in idpf_send_config_rx_queues_msg()
1631 cpu_to_le16(rxq->rx_buffer_low_watermark); in idpf_send_config_rx_queues_msg()
1635 rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; in idpf_send_config_rx_queues_msg()
1637 if (idpf_queue_has(HSPLIT_EN, rxq)) { in idpf_send_config_rx_queues_msg()
1641 cpu_to_le16(rxq->rx_hbuf_size); in idpf_send_config_rx_queues_msg()
1645 qi[k].queue_id = cpu_to_le32(rxq->q_id); in idpf_send_config_rx_queues_msg()
1646 qi[k].model = cpu_to_le16(vport->rxq_model); in idpf_send_config_rx_queues_msg()
1648 qi[k].ring_len = cpu_to_le16(rxq->desc_count); in idpf_send_config_rx_queues_msg()
1649 qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); in idpf_send_config_rx_queues_msg()
1650 qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); in idpf_send_config_rx_queues_msg()
1651 qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); in idpf_send_config_rx_queues_msg()
1654 qi[k].desc_ids = cpu_to_le64(rxq->rxdids); in idpf_send_config_rx_queues_msg()
1660 return -EINVAL; in idpf_send_config_rx_queues_msg()
1675 return -ENOMEM; in idpf_send_config_rx_queues_msg()
1682 crq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_config_rx_queues_msg()
1683 crq->num_qinfo = cpu_to_le16(num_chunks); in idpf_send_config_rx_queues_msg()
1684 memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); in idpf_send_config_rx_queues_msg()
1688 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_config_rx_queues_msg()
1693 totqs -= num_chunks; in idpf_send_config_rx_queues_msg()
1703 * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1722 num_txq = vport->num_txq + vport->num_complq; in idpf_send_ena_dis_queues_msg()
1723 num_rxq = vport->num_rxq + vport->num_bufq; in idpf_send_ena_dis_queues_msg()
1728 return -ENOMEM; in idpf_send_ena_dis_queues_msg()
1730 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1731 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
1733 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { in idpf_send_ena_dis_queues_msg()
1735 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_ena_dis_queues_msg()
1739 if (vport->num_txq != k) in idpf_send_ena_dis_queues_msg()
1740 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1742 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_send_ena_dis_queues_msg()
1745 for (i = 0; i < vport->num_txq_grp; i++, k++) { in idpf_send_ena_dis_queues_msg()
1746 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
1749 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_ena_dis_queues_msg()
1752 if (vport->num_complq != (k - vport->num_txq)) in idpf_send_ena_dis_queues_msg()
1753 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1756 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1757 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
1759 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_ena_dis_queues_msg()
1760 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_ena_dis_queues_msg()
1762 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_ena_dis_queues_msg()
1765 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_send_ena_dis_queues_msg()
1767 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); in idpf_send_ena_dis_queues_msg()
1772 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); in idpf_send_ena_dis_queues_msg()
1779 if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) in idpf_send_ena_dis_queues_msg()
1780 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1782 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_ena_dis_queues_msg()
1785 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1786 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
1788 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { in idpf_send_ena_dis_queues_msg()
1791 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_ena_dis_queues_msg()
1794 qc[k].start_queue_id = cpu_to_le32(q->q_id); in idpf_send_ena_dis_queues_msg()
1798 if (vport->num_bufq != k - (vport->num_txq + in idpf_send_ena_dis_queues_msg()
1799 vport->num_complq + in idpf_send_ena_dis_queues_msg()
1800 vport->num_rxq)) in idpf_send_ena_dis_queues_msg()
1801 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1815 return -ENOMEM; in idpf_send_ena_dis_queues_msg()
1827 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_queues_msg()
1828 eq->chunks.num_chunks = cpu_to_le16(num_chunks); in idpf_send_ena_dis_queues_msg()
1829 qcs = &eq->chunks; in idpf_send_ena_dis_queues_msg()
1830 memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); in idpf_send_ena_dis_queues_msg()
1834 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_queues_msg()
1839 num_q -= num_chunks; in idpf_send_ena_dis_queues_msg()
1849 * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1867 num_q = vport->num_txq + vport->num_rxq; in idpf_send_map_unmap_queue_vector_msg()
1872 return -ENOMEM; in idpf_send_map_unmap_queue_vector_msg()
1874 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
1875 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
1877 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { in idpf_send_map_unmap_queue_vector_msg()
1880 vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_map_unmap_queue_vector_msg()
1882 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_map_unmap_queue_vector_msg()
1884 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()
1886 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()
1889 cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()
1891 cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()
1896 if (vport->num_txq != k) in idpf_send_map_unmap_queue_vector_msg()
1897 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
1899 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
1900 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
1903 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
1904 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_map_unmap_queue_vector_msg()
1906 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_map_unmap_queue_vector_msg()
1909 struct idpf_rx_queue *rxq; in idpf_send_map_unmap_queue_vector_msg() local
1911 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
1912 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_map_unmap_queue_vector_msg()
1914 rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_map_unmap_queue_vector_msg()
1918 vqv[k].queue_id = cpu_to_le32(rxq->q_id); in idpf_send_map_unmap_queue_vector_msg()
1919 vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()
1920 vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()
1924 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_map_unmap_queue_vector_msg()
1925 if (vport->num_rxq != k - vport->num_complq) in idpf_send_map_unmap_queue_vector_msg()
1926 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
1928 if (vport->num_rxq != k - vport->num_txq) in idpf_send_map_unmap_queue_vector_msg()
1929 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
1943 return -ENOMEM; in idpf_send_map_unmap_queue_vector_msg()
1957 vqvm->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_map_unmap_queue_vector_msg()
1958 vqvm->num_qv_maps = cpu_to_le16(num_chunks); in idpf_send_map_unmap_queue_vector_msg()
1959 memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); in idpf_send_map_unmap_queue_vector_msg()
1961 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_map_unmap_queue_vector_msg()
1966 num_q -= num_chunks; in idpf_send_map_unmap_queue_vector_msg()
1976 * idpf_send_enable_queues_msg - send enable queues virtchnl message
1988 * idpf_send_disable_queues_msg - send disable queues virtchnl message
2005 for (i = 0; i < vport->num_txq; i++) in idpf_send_disable_queues_msg()
2006 idpf_queue_set(POLL_MODE, vport->txqs[i]); in idpf_send_disable_queues_msg()
2010 for (i = 0; i < vport->num_q_vectors; i++) in idpf_send_disable_queues_msg()
2011 napi_schedule(&vport->q_vectors[i].napi); in idpf_send_disable_queues_msg()
2018 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2038 * idpf_send_delete_queues_msg - send delete queues virtchnl message
2051 u16 vport_idx = vport->idx; in idpf_send_delete_queues_msg()
2056 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_delete_queues_msg()
2057 if (vport_config->req_qs_chunks) { in idpf_send_delete_queues_msg()
2058 chunks = &vport_config->req_qs_chunks->chunks; in idpf_send_delete_queues_msg()
2060 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_send_delete_queues_msg()
2061 chunks = &vport_params->chunks; in idpf_send_delete_queues_msg()
2064 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_send_delete_queues_msg()
2069 return -ENOMEM; in idpf_send_delete_queues_msg()
2071 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_delete_queues_msg()
2072 eq->chunks.num_chunks = cpu_to_le16(num_chunks); in idpf_send_delete_queues_msg()
2074 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, in idpf_send_delete_queues_msg()
2081 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_delete_queues_msg()
2087 * idpf_send_config_queues_msg - Send config queues virtchnl message
2105 * idpf_send_add_queues_msg - Send virtchnl add queues message
2122 u16 vport_idx = vport->idx; in idpf_send_add_queues_msg()
2128 return -ENOMEM; in idpf_send_add_queues_msg()
2130 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_add_queues_msg()
2131 kfree(vport_config->req_qs_chunks); in idpf_send_add_queues_msg()
2132 vport_config->req_qs_chunks = NULL; in idpf_send_add_queues_msg()
2134 aq.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_add_queues_msg()
2146 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_add_queues_msg()
2150 /* compare vc_msg num queues with vport num queues */ in idpf_send_add_queues_msg()
2151 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || in idpf_send_add_queues_msg()
2152 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || in idpf_send_add_queues_msg()
2153 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || in idpf_send_add_queues_msg()
2154 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) in idpf_send_add_queues_msg()
2155 return -EINVAL; in idpf_send_add_queues_msg()
2158 le16_to_cpu(vc_msg->chunks.num_chunks)); in idpf_send_add_queues_msg()
2160 return -EIO; in idpf_send_add_queues_msg()
2162 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); in idpf_send_add_queues_msg()
2163 if (!vport_config->req_qs_chunks) in idpf_send_add_queues_msg()
2164 return -ENOMEM; in idpf_send_add_queues_msg()
2170 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2189 return -ENOMEM; in idpf_send_alloc_vectors_msg()
2201 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); in idpf_send_alloc_vectors_msg()
2204 return -EIO; in idpf_send_alloc_vectors_msg()
2207 return -EINVAL; in idpf_send_alloc_vectors_msg()
2209 kfree(adapter->req_vec_chunks); in idpf_send_alloc_vectors_msg()
2210 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); in idpf_send_alloc_vectors_msg()
2211 if (!adapter->req_vec_chunks) in idpf_send_alloc_vectors_msg()
2212 return -ENOMEM; in idpf_send_alloc_vectors_msg()
2214 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { in idpf_send_alloc_vectors_msg()
2215 kfree(adapter->req_vec_chunks); in idpf_send_alloc_vectors_msg()
2216 adapter->req_vec_chunks = NULL; in idpf_send_alloc_vectors_msg()
2217 return -EINVAL; in idpf_send_alloc_vectors_msg()
2224 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2231 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; in idpf_send_dealloc_vectors_msg()
2232 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; in idpf_send_dealloc_vectors_msg()
2237 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); in idpf_send_dealloc_vectors_msg()
2247 kfree(adapter->req_vec_chunks); in idpf_send_dealloc_vectors_msg()
2248 adapter->req_vec_chunks = NULL; in idpf_send_dealloc_vectors_msg()
2254 * idpf_get_max_vfs - Get max number of vfs supported
2261 return le16_to_cpu(adapter->caps.max_sriov_vfs); in idpf_get_max_vfs()
2265 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2288 * idpf_send_get_stats_msg - Send virtchnl get statistics message
2295 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); in idpf_send_get_stats_msg()
2296 struct rtnl_link_stats64 *netstats = &np->netstats; in idpf_send_get_stats_msg()
2303 if (np->state <= __IDPF_VPORT_DOWN) in idpf_send_get_stats_msg()
2306 stats_msg.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_stats_msg()
2314 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_stats_msg()
2318 return -EIO; in idpf_send_get_stats_msg()
2320 spin_lock_bh(&np->stats_lock); in idpf_send_get_stats_msg()
2322 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + in idpf_send_get_stats_msg()
2325 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + in idpf_send_get_stats_msg()
2328 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); in idpf_send_get_stats_msg()
2329 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); in idpf_send_get_stats_msg()
2330 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); in idpf_send_get_stats_msg()
2331 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); in idpf_send_get_stats_msg()
2332 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); in idpf_send_get_stats_msg()
2333 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); in idpf_send_get_stats_msg()
2335 vport->port_stats.vport_stats = stats_msg; in idpf_send_get_stats_msg()
2337 spin_unlock_bh(&np->stats_lock); in idpf_send_get_stats_msg()
2343 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2360 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_lut_msg()
2361 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2364 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2366 rl->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_lut_msg()
2375 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2380 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2381 for (i = 0; i < rss_data->rss_lut_size; i++) in idpf_send_get_set_rss_lut_msg()
2382 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); in idpf_send_get_set_rss_lut_msg()
2386 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_lut_msg()
2392 return -EIO; in idpf_send_get_set_rss_lut_msg()
2394 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); in idpf_send_get_set_rss_lut_msg()
2396 return -EIO; in idpf_send_get_set_rss_lut_msg()
2399 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) in idpf_send_get_set_rss_lut_msg()
2402 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); in idpf_send_get_set_rss_lut_msg()
2403 kfree(rss_data->rss_lut); in idpf_send_get_set_rss_lut_msg()
2405 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); in idpf_send_get_set_rss_lut_msg()
2406 if (!rss_data->rss_lut) { in idpf_send_get_set_rss_lut_msg()
2407 rss_data->rss_lut_size = 0; in idpf_send_get_set_rss_lut_msg()
2408 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2412 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2418 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2435 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_key_msg()
2436 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2439 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2441 rk->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_key_msg()
2448 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2454 rk->key_len = cpu_to_le16(rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2455 for (i = 0; i < rss_data->rss_key_size; i++) in idpf_send_get_set_rss_key_msg()
2456 rk->key_flex[i] = rss_data->rss_key[i]; in idpf_send_get_set_rss_key_msg()
2461 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_key_msg()
2467 return -EIO; in idpf_send_get_set_rss_key_msg()
2470 le16_to_cpu(recv_rk->key_len)); in idpf_send_get_set_rss_key_msg()
2472 return -EIO; in idpf_send_get_set_rss_key_msg()
2475 if (rss_data->rss_key_size == key_size) in idpf_send_get_set_rss_key_msg()
2478 rss_data->rss_key_size = key_size; in idpf_send_get_set_rss_key_msg()
2479 kfree(rss_data->rss_key); in idpf_send_get_set_rss_key_msg()
2480 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); in idpf_send_get_set_rss_key_msg()
2481 if (!rss_data->rss_key) { in idpf_send_get_set_rss_key_msg()
2482 rss_data->rss_key_size = 0; in idpf_send_get_set_rss_key_msg()
2483 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2487 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2493 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2504 if (!pstate->outer_ip || !pstate->outer_frag) { in idpf_fill_ptype_lookup()
2505 pstate->outer_ip = true; in idpf_fill_ptype_lookup()
2508 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; in idpf_fill_ptype_lookup()
2510 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; in idpf_fill_ptype_lookup()
2513 ptype->outer_frag = LIBETH_RX_PT_FRAG; in idpf_fill_ptype_lookup()
2514 pstate->outer_frag = true; in idpf_fill_ptype_lookup()
2517 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; in idpf_fill_ptype_lookup()
2518 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; in idpf_fill_ptype_lookup()
2521 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; in idpf_fill_ptype_lookup()
2523 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; in idpf_fill_ptype_lookup()
2526 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; in idpf_fill_ptype_lookup()
2532 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && in idpf_finalize_ptype_lookup()
2533 ptype->inner_prot) in idpf_finalize_ptype_lookup()
2534 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; in idpf_finalize_ptype_lookup()
2535 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && in idpf_finalize_ptype_lookup()
2536 ptype->outer_ip) in idpf_finalize_ptype_lookup()
2537 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; in idpf_finalize_ptype_lookup()
2538 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) in idpf_finalize_ptype_lookup()
2539 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; in idpf_finalize_ptype_lookup()
2541 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; in idpf_finalize_ptype_lookup()
2547 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2558 struct idpf_adapter *adapter = vport->adapter; in idpf_send_get_rx_ptype_msg()
2564 if (vport->rx_ptype_lkup) in idpf_send_get_rx_ptype_msg()
2567 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
2574 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
2578 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
2582 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
2592 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); in idpf_send_get_rx_ptype_msg()
2595 get_ptype_info->num_ptypes = in idpf_send_get_rx_ptype_msg()
2596 cpu_to_le16(max_ptype - next_ptype_id); in idpf_send_get_rx_ptype_msg()
2598 get_ptype_info->num_ptypes = in idpf_send_get_rx_ptype_msg()
2605 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); in idpf_send_get_rx_ptype_msg()
2607 return -EINVAL; in idpf_send_get_rx_ptype_msg()
2609 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + in idpf_send_get_rx_ptype_msg()
2610 le16_to_cpu(get_ptype_info->num_ptypes); in idpf_send_get_rx_ptype_msg()
2614 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { in idpf_send_get_rx_ptype_msg()
2624 return -EINVAL; in idpf_send_get_rx_ptype_msg()
2627 if (le16_to_cpu(ptype->ptype_id_10) == in idpf_send_get_rx_ptype_msg()
2631 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
2632 k = le16_to_cpu(ptype->ptype_id_10); in idpf_send_get_rx_ptype_msg()
2634 k = ptype->ptype_id_8; in idpf_send_get_rx_ptype_msg()
2636 for (j = 0; j < ptype->proto_id_count; j++) { in idpf_send_get_rx_ptype_msg()
2637 id = le16_to_cpu(ptype->proto_id[j]); in idpf_send_get_rx_ptype_msg()
2757 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); in idpf_send_get_rx_ptype_msg()
2763 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2775 loopback.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_loopback_msg()
2782 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_loopback_msg()
2788 * idpf_find_ctlq - Given a type and id, find ctlq info
2800 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) in idpf_find_ctlq()
2801 if (cq->q_id == id && cq->cq_type == type) in idpf_find_ctlq()
2808 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2829 struct idpf_hw *hw = &adapter->hw; in idpf_init_dflt_mbx()
2832 adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); in idpf_init_dflt_mbx()
2838 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, in idpf_init_dflt_mbx()
2840 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, in idpf_init_dflt_mbx()
2843 if (!hw->asq || !hw->arq) { in idpf_init_dflt_mbx()
2846 return -ENOENT; in idpf_init_dflt_mbx()
2849 adapter->state = __IDPF_VER_CHECK; in idpf_init_dflt_mbx()
2855 * idpf_deinit_dflt_mbx - Free up ctlqs setup
2860 if (adapter->hw.arq && adapter->hw.asq) { in idpf_deinit_dflt_mbx()
2862 idpf_ctlq_deinit(&adapter->hw); in idpf_deinit_dflt_mbx()
2864 adapter->hw.arq = NULL; in idpf_deinit_dflt_mbx()
2865 adapter->hw.asq = NULL; in idpf_deinit_dflt_mbx()
2869 * idpf_vport_params_buf_rel - Release memory for MailBox resources
2876 kfree(adapter->vport_params_recvd); in idpf_vport_params_buf_rel()
2877 adapter->vport_params_recvd = NULL; in idpf_vport_params_buf_rel()
2878 kfree(adapter->vport_params_reqd); in idpf_vport_params_buf_rel()
2879 adapter->vport_params_reqd = NULL; in idpf_vport_params_buf_rel()
2880 kfree(adapter->vport_ids); in idpf_vport_params_buf_rel()
2881 adapter->vport_ids = NULL; in idpf_vport_params_buf_rel()
2885 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
2894 adapter->vport_params_reqd = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
2895 sizeof(*adapter->vport_params_reqd), in idpf_vport_params_buf_alloc()
2897 if (!adapter->vport_params_reqd) in idpf_vport_params_buf_alloc()
2898 return -ENOMEM; in idpf_vport_params_buf_alloc()
2900 adapter->vport_params_recvd = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
2901 sizeof(*adapter->vport_params_recvd), in idpf_vport_params_buf_alloc()
2903 if (!adapter->vport_params_recvd) in idpf_vport_params_buf_alloc()
2906 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); in idpf_vport_params_buf_alloc()
2907 if (!adapter->vport_ids) in idpf_vport_params_buf_alloc()
2910 if (adapter->vport_config) in idpf_vport_params_buf_alloc()
2913 adapter->vport_config = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
2914 sizeof(*adapter->vport_config), in idpf_vport_params_buf_alloc()
2916 if (!adapter->vport_config) in idpf_vport_params_buf_alloc()
2924 return -ENOMEM; in idpf_vport_params_buf_alloc()
2928 * idpf_vc_core_init - Initialize state machine and get driver specific
2937 * Returns 0 on success, -EAGAIN function will get called again,
2946 if (!adapter->vcxn_mngr) { in idpf_vc_core_init()
2947 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); in idpf_vc_core_init()
2948 if (!adapter->vcxn_mngr) { in idpf_vc_core_init()
2949 err = -ENOMEM; in idpf_vc_core_init()
2953 idpf_vc_xn_init(adapter->vcxn_mngr); in idpf_vc_core_init()
2955 while (adapter->state != __IDPF_INIT_SW) { in idpf_vc_core_init()
2956 switch (adapter->state) { in idpf_vc_core_init()
2962 adapter->state = __IDPF_GET_CAPS; in idpf_vc_core_init()
2964 case -EAGAIN: in idpf_vc_core_init()
2976 adapter->state = __IDPF_INIT_SW; in idpf_vc_core_init()
2979 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", in idpf_vc_core_init()
2980 adapter->state); in idpf_vc_core_init()
2981 err = -EINVAL; in idpf_vc_core_init()
2992 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); in idpf_vc_core_init()
2994 adapter->max_vports = num_max_vports; in idpf_vc_core_init()
2995 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), in idpf_vc_core_init()
2997 if (!adapter->vports) in idpf_vc_core_init()
2998 return -ENOMEM; in idpf_vc_core_init()
3000 if (!adapter->netdevs) { in idpf_vc_core_init()
3001 adapter->netdevs = kcalloc(num_max_vports, in idpf_vc_core_init()
3004 if (!adapter->netdevs) { in idpf_vc_core_init()
3005 err = -ENOMEM; in idpf_vc_core_init()
3012 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", in idpf_vc_core_init()
3020 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); in idpf_vc_core_init()
3022 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, in idpf_vc_core_init()
3023 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); in idpf_vc_core_init()
3027 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", in idpf_vc_core_init()
3037 queue_delayed_work(adapter->init_wq, &adapter->init_task, in idpf_vc_core_init()
3038 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); in idpf_vc_core_init()
3040 set_bit(IDPF_VC_CORE_INIT, adapter->flags); in idpf_vc_core_init()
3045 cancel_delayed_work_sync(&adapter->serv_task); in idpf_vc_core_init()
3046 cancel_delayed_work_sync(&adapter->mbx_task); in idpf_vc_core_init()
3049 kfree(adapter->vports); in idpf_vc_core_init()
3050 adapter->vports = NULL; in idpf_vc_core_init()
3055 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) in idpf_vc_core_init()
3058 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { in idpf_vc_core_init()
3059 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); in idpf_vc_core_init()
3061 return -EFAULT; in idpf_vc_core_init()
3067 adapter->state = __IDPF_VER_CHECK; in idpf_vc_core_init()
3068 if (adapter->vcxn_mngr) in idpf_vc_core_init()
3069 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_init()
3070 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); in idpf_vc_core_init()
3071 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, in idpf_vc_core_init()
3074 return -EAGAIN; in idpf_vc_core_init()
3078 * idpf_vc_core_deinit - Device deinit routine
3086 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) in idpf_vc_core_deinit()
3090 remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); in idpf_vc_core_deinit()
3092 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_deinit()
3098 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_deinit()
3100 cancel_delayed_work_sync(&adapter->serv_task); in idpf_vc_core_deinit()
3101 cancel_delayed_work_sync(&adapter->mbx_task); in idpf_vc_core_deinit()
3105 kfree(adapter->vports); in idpf_vc_core_deinit()
3106 adapter->vports = NULL; in idpf_vc_core_deinit()
3108 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); in idpf_vc_core_deinit()
3112 * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3126 vec_info.num_curr_vecs = vport->num_q_vectors; in idpf_vport_alloc_vec_indexes()
3127 vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); in idpf_vport_alloc_vec_indexes()
3128 vec_info.default_vport = vport->default_vport; in idpf_vport_alloc_vec_indexes()
3129 vec_info.index = vport->idx; in idpf_vport_alloc_vec_indexes()
3131 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, in idpf_vport_alloc_vec_indexes()
3132 vport->q_vector_idxs, in idpf_vport_alloc_vec_indexes()
3135 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", in idpf_vport_alloc_vec_indexes()
3137 return -EINVAL; in idpf_vport_alloc_vec_indexes()
3140 vport->num_q_vectors = num_alloc_vecs; in idpf_vport_alloc_vec_indexes()
3146 * idpf_vport_init - Initialize virtual port
3154 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_init()
3160 u16 idx = vport->idx; in idpf_vport_init()
3162 vport_config = adapter->vport_config[idx]; in idpf_vport_init()
3163 rss_data = &vport_config->user_config.rss_data; in idpf_vport_init()
3164 vport_msg = adapter->vport_params_recvd[idx]; in idpf_vport_init()
3166 vport_config->max_q.max_txq = max_q->max_txq; in idpf_vport_init()
3167 vport_config->max_q.max_rxq = max_q->max_rxq; in idpf_vport_init()
3168 vport_config->max_q.max_complq = max_q->max_complq; in idpf_vport_init()
3169 vport_config->max_q.max_bufq = max_q->max_bufq; in idpf_vport_init()
3171 vport->txq_model = le16_to_cpu(vport_msg->txq_model); in idpf_vport_init()
3172 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); in idpf_vport_init()
3173 vport->vport_type = le16_to_cpu(vport_msg->vport_type); in idpf_vport_init()
3174 vport->vport_id = le32_to_cpu(vport_msg->vport_id); in idpf_vport_init()
3176 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, in idpf_vport_init()
3177 le16_to_cpu(vport_msg->rss_key_size)); in idpf_vport_init()
3178 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); in idpf_vport_init()
3180 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); in idpf_vport_init()
3181 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; in idpf_vport_init()
3184 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3185 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3194 vport->crc_enable = adapter->crc_enable; in idpf_vport_init()
3198 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3213 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_vec_ids()
3217 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; in idpf_get_vec_ids()
3224 chunk = &chunks->vchunks[j]; in idpf_get_vec_ids()
3225 num_vec = le16_to_cpu(chunk->num_vectors); in idpf_get_vec_ids()
3226 start_vecid = le16_to_cpu(chunk->start_vector_id); in idpf_get_vec_ids()
3243 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3255 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_queue_ids()
3259 while (num_chunks--) { in idpf_vport_get_queue_ids()
3262 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_queue_ids()
3263 if (le32_to_cpu(chunk->type) != q_type) in idpf_vport_get_queue_ids()
3266 num_q = le32_to_cpu(chunk->num_queues); in idpf_vport_get_queue_ids()
3267 start_q_id = le32_to_cpu(chunk->start_queue_id); in idpf_vport_get_queue_ids()
3284 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3302 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_vport_queue_ids_init()
3303 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3305 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) in __idpf_vport_queue_ids_init()
3306 tx_qgrp->txqs[j]->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3310 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3311 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3314 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3315 num_rxq = rx_qgrp->splitq.num_rxq_sets; in __idpf_vport_queue_ids_init()
3317 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_vport_queue_ids_init()
3322 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3323 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in __idpf_vport_queue_ids_init()
3325 q = rx_qgrp->singleq.rxqs[j]; in __idpf_vport_queue_ids_init()
3326 q->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3331 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { in __idpf_vport_queue_ids_init()
3332 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3334 tx_qgrp->complq->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3338 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3339 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3340 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_vport_queue_ids_init()
3345 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in __idpf_vport_queue_ids_init()
3346 q->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3358 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3369 u16 vport_idx = vport->idx; in idpf_vport_queue_ids_init()
3374 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_vport_queue_ids_init()
3375 if (vport_config->req_qs_chunks) { in idpf_vport_queue_ids_init()
3377 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; in idpf_vport_queue_ids_init()
3378 chunks = &vc_aq->chunks; in idpf_vport_queue_ids_init()
3380 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_vport_queue_ids_init()
3381 chunks = &vport_params->chunks; in idpf_vport_queue_ids_init()
3386 return -ENOMEM; in idpf_vport_queue_ids_init()
3391 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3392 err = -EINVAL; in idpf_vport_queue_ids_init()
3397 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3398 err = -EINVAL; in idpf_vport_queue_ids_init()
3405 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3406 err = -EINVAL; in idpf_vport_queue_ids_init()
3411 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3412 err = -EINVAL; in idpf_vport_queue_ids_init()
3416 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_queue_ids_init()
3421 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3422 err = -EINVAL; in idpf_vport_queue_ids_init()
3426 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3427 err = -EINVAL; in idpf_vport_queue_ids_init()
3432 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_queue_ids_init()
3437 if (num_ids < vport->num_bufq) { in idpf_vport_queue_ids_init()
3438 err = -EINVAL; in idpf_vport_queue_ids_init()
3442 if (num_ids < vport->num_bufq) in idpf_vport_queue_ids_init()
3443 err = -EINVAL; in idpf_vport_queue_ids_init()
3452 * idpf_vport_adjust_qs - Adjust to new requested queues
3462 vport_msg.txq_model = cpu_to_le16(vport->txq_model); in idpf_vport_adjust_qs()
3463 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); in idpf_vport_adjust_qs()
3464 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, in idpf_vport_adjust_qs()
3476 * idpf_is_capability_ena - Default implementation of capability checking
3487 u8 *caps = (u8 *)&adapter->caps; in idpf_is_capability_ena()
3514 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_get_vport_id()
3516 return le32_to_cpu(vport_msg->vport_id); in idpf_get_vport_id()
3520 * idpf_mac_filter_async_handler - Async callback for mac filters
3545 if (!ctlq_msg->cookie.mbx.chnl_retval) in idpf_mac_filter_async_handler()
3549 if (xn->reply_sz < sizeof(*ma_list)) in idpf_mac_filter_async_handler()
3552 ma_list = ctlq_msg->ctx.indirect.payload->va; in idpf_mac_filter_async_handler()
3553 mac_addr = ma_list->mac_addr_list; in idpf_mac_filter_async_handler()
3554 num_entries = le16_to_cpu(ma_list->num_mac_addr); in idpf_mac_filter_async_handler()
3556 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) in idpf_mac_filter_async_handler()
3559 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); in idpf_mac_filter_async_handler()
3563 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; in idpf_mac_filter_async_handler()
3564 ma_list_head = &vport_config->user_config.mac_filter_list; in idpf_mac_filter_async_handler()
3570 spin_lock_bh(&vport_config->mac_filter_list_lock); in idpf_mac_filter_async_handler()
3573 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) in idpf_mac_filter_async_handler()
3574 list_del(&f->list); in idpf_mac_filter_async_handler()
3575 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_mac_filter_async_handler()
3576 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", in idpf_mac_filter_async_handler()
3577 xn->vc_op); in idpf_mac_filter_async_handler()
3582 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", in idpf_mac_filter_async_handler()
3583 xn->vc_op, xn->reply_sz); in idpf_mac_filter_async_handler()
3585 return -EINVAL; in idpf_mac_filter_async_handler()
3589 * idpf_add_del_mac_filters - Add/del mac filters
3603 struct idpf_adapter *adapter = np->adapter; in idpf_add_del_mac_filters()
3617 vport_config = adapter->vport_config[np->vport_idx]; in idpf_add_del_mac_filters()
3618 spin_lock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3621 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, in idpf_add_del_mac_filters()
3623 if (add && f->add) in idpf_add_del_mac_filters()
3625 else if (!add && f->remove) in idpf_add_del_mac_filters()
3630 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3639 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3641 return -ENOMEM; in idpf_add_del_mac_filters()
3644 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, in idpf_add_del_mac_filters()
3646 if (add && f->add) { in idpf_add_del_mac_filters()
3647 ether_addr_copy(mac_addr[i].addr, f->macaddr); in idpf_add_del_mac_filters()
3649 f->add = false; in idpf_add_del_mac_filters()
3653 if (!add && f->remove) { in idpf_add_del_mac_filters()
3654 ether_addr_copy(mac_addr[i].addr, f->macaddr); in idpf_add_del_mac_filters()
3656 f->remove = false; in idpf_add_del_mac_filters()
3662 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3681 return -ENOMEM; in idpf_add_del_mac_filters()
3686 ma_list->vport_id = cpu_to_le32(np->vport_id); in idpf_add_del_mac_filters()
3687 ma_list->num_mac_addr = cpu_to_le16(num_entries); in idpf_add_del_mac_filters()
3688 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); in idpf_add_del_mac_filters()
3697 total_filters -= num_entries; in idpf_add_del_mac_filters()
3704 * idpf_set_promiscuous - set promiscuous and send message to mailbox
3722 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) in idpf_set_promiscuous()
3724 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) in idpf_set_promiscuous()