Lines Matching full:qs

737 	qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL);  in idpf_alloc_queue_set()
806 * @qs: set of the Tx queues
810 static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs) in idpf_wait_for_marker_event_set() argument
815 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event_set()
816 switch (qs->qs[i].type) { in idpf_wait_for_marker_event_set()
818 txq = qs->qs[i].txq; in idpf_wait_for_marker_event_set()
830 netdev_warn(qs->vport->netdev, in idpf_wait_for_marker_event_set()
846 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_wait_for_marker_event()
848 qs = idpf_alloc_queue_set(vport, vport->num_txq); in idpf_wait_for_marker_event()
849 if (!qs) in idpf_wait_for_marker_event()
852 for (u32 i = 0; i < qs->num; i++) { in idpf_wait_for_marker_event()
853 qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_wait_for_marker_event()
854 qs->qs[i].txq = vport->txqs[i]; in idpf_wait_for_marker_event()
857 return idpf_wait_for_marker_event_set(qs); in idpf_wait_for_marker_event()
1781 * @qs: set of the Tx queues to configure
1783 * Send config queues virtchnl message for queues contained in the @qs array.
1784 * The @qs array can contain Tx queues (or completion queues) only.
1788 static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs) in idpf_send_config_tx_queue_set_msg() argument
1798 qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL); in idpf_send_config_tx_queue_set_msg()
1804 for (u32 i = 0; i < qs->num; i++) { in idpf_send_config_tx_queue_set_msg()
1805 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX) in idpf_send_config_tx_queue_set_msg()
1806 idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq, in idpf_send_config_tx_queue_set_msg()
1808 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) in idpf_send_config_tx_queue_set_msg()
1809 idpf_fill_complq_config_chunk(qs->vport, in idpf_send_config_tx_queue_set_msg()
1810 qs->qs[i].complq, in idpf_send_config_tx_queue_set_msg()
1814 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_config_tx_queue_set_msg()
1825 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_send_config_tx_queues_msg()
1829 qs = idpf_alloc_queue_set(vport, totqs); in idpf_send_config_tx_queues_msg()
1830 if (!qs) in idpf_send_config_tx_queues_msg()
1838 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_send_config_tx_queues_msg()
1839 qs->qs[k++].txq = tx_qgrp->txqs[j]; in idpf_send_config_tx_queues_msg()
1843 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_send_config_tx_queues_msg()
1844 qs->qs[k++].complq = tx_qgrp->complq; in idpf_send_config_tx_queues_msg()
1852 return idpf_send_config_tx_queue_set_msg(qs); in idpf_send_config_tx_queues_msg()
1966 * @qs: set of the Rx queues to configure
1968 * Send config queues virtchnl message for queues contained in the @qs array.
1969 * The @qs array can contain Rx queues (or buffer queues) only.
1973 static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs) in idpf_send_config_rx_queue_set_msg() argument
1983 qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL); in idpf_send_config_rx_queue_set_msg()
1989 for (u32 i = 0; i < qs->num; i++) { in idpf_send_config_rx_queue_set_msg()
1990 if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX) in idpf_send_config_rx_queue_set_msg()
1991 idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq, in idpf_send_config_rx_queue_set_msg()
1993 else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) in idpf_send_config_rx_queue_set_msg()
1994 idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq, in idpf_send_config_rx_queue_set_msg()
1998 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_config_rx_queue_set_msg()
2010 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_send_config_rx_queues_msg()
2014 qs = idpf_alloc_queue_set(vport, totqs); in idpf_send_config_rx_queues_msg()
2015 if (!qs) in idpf_send_config_rx_queues_msg()
2029 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; in idpf_send_config_rx_queues_msg()
2030 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_config_rx_queues_msg()
2037 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_send_config_rx_queues_msg()
2040 qs->qs[k++].rxq = in idpf_send_config_rx_queues_msg()
2043 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_config_rx_queues_msg()
2051 return idpf_send_config_rx_queue_set_msg(qs); in idpf_send_config_rx_queues_msg()
2084 * @qs: set of the queues to enable or disable
2088 * in the @qs array.
2089 * The @qs array can contain pointers to both Rx and Tx queues.
2093 static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs, in idpf_send_ena_dis_queue_set_msg() argument
2103 .num_chunks = qs->num, in idpf_send_ena_dis_queue_set_msg()
2106 qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL); in idpf_send_ena_dis_queue_set_msg()
2112 for (u32 i = 0; i < qs->num; i++) { in idpf_send_ena_dis_queue_set_msg()
2113 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_send_ena_dis_queue_set_msg()
2139 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_ena_dis_queue_set_msg()
2152 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_send_ena_dis_queues_msg()
2159 qs = idpf_alloc_queue_set(vport, num_q); in idpf_send_ena_dis_queues_msg()
2160 if (!qs) in idpf_send_ena_dis_queues_msg()
2169 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_send_ena_dis_queues_msg()
2170 qs->qs[k++].txq = tx_qgrp->txqs[j]; in idpf_send_ena_dis_queues_msg()
2176 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; in idpf_send_ena_dis_queues_msg()
2177 qs->qs[k++].complq = tx_qgrp->complq; in idpf_send_ena_dis_queues_msg()
2195 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_send_ena_dis_queues_msg()
2198 qs->qs[k++].rxq = in idpf_send_ena_dis_queues_msg()
2201 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_ena_dis_queues_msg()
2208 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; in idpf_send_ena_dis_queues_msg()
2209 qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_ena_dis_queues_msg()
2216 return idpf_send_ena_dis_queue_set_msg(qs, en); in idpf_send_ena_dis_queues_msg()
2249 * @qs: set of the queues to map or unmap
2255 idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, in idpf_send_map_unmap_queue_set_vector_msg() argument
2265 .num_chunks = qs->num, in idpf_send_map_unmap_queue_set_vector_msg()
2269 vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL); in idpf_send_map_unmap_queue_set_vector_msg()
2275 split = idpf_is_queue_model_split(qs->vport->txq_model); in idpf_send_map_unmap_queue_set_vector_msg()
2277 for (u32 i = 0; i < qs->num; i++) { in idpf_send_map_unmap_queue_set_vector_msg()
2278 const struct idpf_queue_ptr *q = &qs->qs[i]; in idpf_send_map_unmap_queue_set_vector_msg()
2297 v_idx = qs->vport->noirq_v_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2317 v_idx = qs->vport->noirq_v_idx; in idpf_send_map_unmap_queue_set_vector_msg()
2330 return idpf_send_chunked_msg(qs->vport, &params); in idpf_send_map_unmap_queue_set_vector_msg()
2343 struct idpf_queue_set *qs __free(kfree) = NULL; in idpf_send_map_unmap_queue_vector_msg()
2347 qs = idpf_alloc_queue_set(vport, num_q); in idpf_send_map_unmap_queue_vector_msg()
2348 if (!qs) in idpf_send_map_unmap_queue_vector_msg()
2355 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; in idpf_send_map_unmap_queue_vector_msg()
2356 qs->qs[k++].txq = tx_qgrp->txqs[j]; in idpf_send_map_unmap_queue_vector_msg()
2373 qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; in idpf_send_map_unmap_queue_vector_msg()
2376 qs->qs[k++].rxq = in idpf_send_map_unmap_queue_vector_msg()
2379 qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_map_unmap_queue_vector_msg()
2386 return idpf_send_map_unmap_queue_set_vector_msg(qs, map); in idpf_send_map_unmap_queue_vector_msg()
2392 * @qs: set of the queues
2394 * Send enable queues virtchnl message for queues contained in the @qs array.
2398 int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs) in idpf_send_enable_queue_set_msg() argument
2400 return idpf_send_ena_dis_queue_set_msg(qs, true); in idpf_send_enable_queue_set_msg()
2406 * @qs: set of the queues
2410 int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs) in idpf_send_disable_queue_set_msg() argument
2414 err = idpf_send_ena_dis_queue_set_msg(qs, false); in idpf_send_disable_queue_set_msg()
2418 return idpf_wait_for_marker_event_set(qs); in idpf_send_disable_queue_set_msg()
2424 * @qs: set of the queues
2426 * Send config queues virtchnl message for queues contained in the @qs array.
2427 * The @qs array can contain both Rx or Tx queues.
2431 int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs) in idpf_send_config_queue_set_msg() argument
2435 err = idpf_send_config_tx_queue_set_msg(qs); in idpf_send_config_queue_set_msg()
2439 return idpf_send_config_rx_queue_set_msg(qs); in idpf_send_config_queue_set_msg()