Lines Matching +full:tx +full:- +full:rings +full:- +full:empty
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
12 #include <linux/dma-mapping.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
21 #include "k3-udma.h"
22 #include "k3-psil-priv.h"
105 common->udmax = of_xudma_dev_get(udmax_np, NULL);
106 if (IS_ERR(common->udmax))
107 return PTR_ERR(common->udmax);
109 common->ringacc = xudma_get_ringacc(common->udmax);
110 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
119 return -EINVAL;
122 return -EINVAL;
125 common->ep_config = psil_get_ep_config(thread_id);
126 if (IS_ERR(common->ep_config)) {
127 dev_err(common->dev,
128 "No configuration for psi-l thread 0x%04x\n",
130 return PTR_ERR(common->ep_config);
133 common->epib = common->ep_config->needs_epib;
134 common->psdata_size = common->ep_config->psd_size;
137 common->dst_thread = thread_id;
139 common->src_thread = thread_id;
154 return -EINVAL;
156 index = of_property_match_string(chn_np, "dma-names", name);
160 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
162 return -ENOENT;
170 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
171 dev_err(common->dev, "Invalid channel atype: %u\n",
173 ret = -EINVAL;
176 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
177 dev_err(common->dev, "Invalid channel asel: %u\n",
179 ret = -EINVAL;
183 common->atype_asel = dma_spec.args[1];
200 return -EINVAL;
212 struct device *dev = tx_chn->common.dev;
218 tx_chn->udma_tchan_id,
219 tx_chn->common.src_thread,
220 tx_chn->common.dst_thread);
226 struct device *dev = chn->common.dev;
230 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
232 xudma_tchanrt_read(chn->udma_tchanx,
235 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
237 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
239 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
244 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
257 req.nav_id = tisci_rm->tisci_dev_id;
258 req.index = tx_chn->udma_tchan_id;
259 if (tx_chn->tx_pause_on_err)
261 if (tx_chn->tx_filt_einfo)
263 if (tx_chn->tx_filt_pswords)
266 if (tx_chn->tx_supr_tdpkt)
268 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
269 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
270 req.tx_atype = tx_chn->common.atype_asel;
272 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
282 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
283 tx_chn->common.psdata_size,
284 tx_chn->common.swdata_size);
286 if (xudma_is_pktdma(tx_chn->common.udmax))
287 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
289 tx_chn->udma_tchan_id = -1;
291 /* request and cfg UDMAP TX channel */
292 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
293 tx_chn->udma_tchan_id);
294 if (IS_ERR(tx_chn->udma_tchanx)) {
295 ret = PTR_ERR(tx_chn->udma_tchanx);
299 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
301 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
302 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
303 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
304 tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
305 ret = device_register(&tx_chn->common.chan_dev);
308 put_device(&tx_chn->common.chan_dev);
309 tx_chn->common.chan_dev.parent = NULL;
313 if (xudma_is_pktdma(tx_chn->common.udmax)) {
315 tx_chn->common.chan_dev.dma_coherent = true;
316 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
320 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
322 if (xudma_is_pktdma(tx_chn->common.udmax))
323 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
325 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
327 /* request and cfg rings */
328 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
329 tx_chn->udma_tflow_id, -1,
330 &tx_chn->ringtx,
331 &tx_chn->ringtxcq);
333 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
337 /* Set the dma_dev for the rings to be configured */
338 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
339 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
341 /* Set the ASEL value for DMA rings of PKTDMA */
342 if (xudma_is_pktdma(tx_chn->common.udmax)) {
343 cfg->tx_cfg.asel = tx_chn->common.atype_asel;
344 cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
347 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
353 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
359 /* request and cfg psi-l */
360 tx_chn->common.src_thread =
361 xudma_dev_get_psil_base(tx_chn->common.udmax) +
362 tx_chn->udma_tchan_id;
384 return ERR_PTR(-ENOMEM);
386 tx_chn->common.dev = dev;
387 tx_chn->common.swdata_size = cfg->swdata_size;
388 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
389 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
390 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
391 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
394 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
395 &tx_chn->common, true);
421 return ERR_PTR(-ENOMEM);
423 tx_chn->common.dev = dev;
424 tx_chn->common.swdata_size = cfg->swdata_size;
425 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
426 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
427 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
428 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
430 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
448 if (tx_chn->psil_paired) {
449 xudma_navss_psil_unpair(tx_chn->common.udmax,
450 tx_chn->common.src_thread,
451 tx_chn->common.dst_thread);
452 tx_chn->psil_paired = false;
455 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
456 xudma_tchan_put(tx_chn->common.udmax,
457 tx_chn->udma_tchanx);
459 if (tx_chn->ringtxcq)
460 k3_ringacc_ring_free(tx_chn->ringtxcq);
462 if (tx_chn->ringtx)
463 k3_ringacc_ring_free(tx_chn->ringtx);
465 if (tx_chn->common.chan_dev.parent) {
466 device_unregister(&tx_chn->common.chan_dev);
467 tx_chn->common.chan_dev.parent = NULL;
478 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
479 return -ENOMEM;
481 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
482 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
484 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
493 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
495 atomic_inc(&tx_chn->free_pkts);
505 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
506 tx_chn->common.src_thread,
507 tx_chn->common.dst_thread);
509 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
513 tx_chn->psil_paired = true;
515 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
518 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
530 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
532 xudma_tchanrt_write(tx_chn->udma_tchanx,
536 if (tx_chn->psil_paired) {
537 xudma_navss_psil_unpair(tx_chn->common.udmax,
538 tx_chn->common.src_thread,
539 tx_chn->common.dst_thread);
540 tx_chn->psil_paired = false;
553 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
556 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
559 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
563 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
569 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
572 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
581 struct device *dev = tx_chn->common.dev;
592 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
593 dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
596 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
598 if (ret != -ENODATA)
599 dev_err(dev, "TX reset pop %d\n", ret);
605 /* reset TXCQ as it is not input for udma - expected to be empty */
606 k3_ringacc_ring_reset(tx_chn->ringtxcq);
607 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
613 return tx_chn->common.hdesc_size;
619 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
625 if (xudma_is_pktdma(tx_chn->common.udmax)) {
626 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
627 tx_chn->udma_tflow_id);
629 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
632 if (!tx_chn->virq)
633 return -ENXIO;
635 return tx_chn->virq;
642 if (xudma_is_pktdma(tx_chn->common.udmax) &&
643 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
644 return &tx_chn->common.chan_dev;
646 return xudma_get_device(tx_chn->common.udmax);
653 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
654 !tx_chn->common.atype_asel)
657 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
664 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
665 !tx_chn->common.atype_asel)
668 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
674 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
685 req.nav_id = tisci_rm->tisci_dev_id;
686 req.index = rx_chn->udma_rchan_id;
687 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
691 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
694 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
695 rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
699 req.flowid_start = rx_chn->flow_id_base;
700 req.flowid_cnt = rx_chn->flow_num;
703 req.rx_atype = rx_chn->common.atype_asel;
705 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
707 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
708 rx_chn->udma_rchan_id, ret);
716 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
718 if (IS_ERR_OR_NULL(flow->udma_rflow))
721 if (flow->ringrxfdq)
722 k3_ringacc_ring_free(flow->ringrxfdq);
724 if (flow->ringrx)
725 k3_ringacc_ring_free(flow->ringrx);
727 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
728 flow->udma_rflow = NULL;
729 rx_chn->flows_ready--;
736 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
737 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
738 struct device *dev = rx_chn->common.dev;
744 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
745 flow->udma_rflow_id);
746 if (IS_ERR(flow->udma_rflow)) {
747 ret = PTR_ERR(flow->udma_rflow);
752 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
753 ret = -ENODEV;
757 if (xudma_is_pktdma(rx_chn->common.udmax)) {
758 rx_ringfdq_id = flow->udma_rflow_id +
759 xudma_get_rflow_ring_offset(rx_chn->common.udmax);
762 rx_ring_id = flow_cfg->ring_rxq_id;
763 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
766 /* request and cfg rings */
767 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
769 &flow->ringrxfdq,
770 &flow->ringrx);
772 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
776 /* Set the dma_dev for the rings to be configured */
777 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
778 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
780 /* Set the ASEL value for DMA rings of PKTDMA */
781 if (xudma_is_pktdma(rx_chn->common.udmax)) {
782 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
783 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
786 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
792 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
798 if (rx_chn->remote) {
802 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
803 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
822 req.nav_id = tisci_rm->tisci_dev_id;
823 req.flow_index = flow->udma_rflow_id;
824 if (rx_chn->common.epib)
826 if (rx_chn->common.psdata_size)
828 if (flow_cfg->rx_error_handling)
833 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
841 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
843 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
848 rx_chn->flows_ready++;
850 flow->udma_rflow_id, rx_chn->flows_ready);
855 k3_ringacc_ring_free(flow->ringrxfdq);
856 k3_ringacc_ring_free(flow->ringrx);
859 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
860 flow->udma_rflow = NULL;
867 struct device *dev = chn->common.dev;
879 chn->udma_rchan_id,
880 chn->common.src_thread,
881 chn->common.dst_thread,
882 chn->common.epib,
883 chn->common.hdesc_size,
884 chn->common.psdata_size,
885 chn->common.swdata_size,
886 chn->flow_id_base,
887 chn->flow_num);
893 struct device *dev = chn->common.dev;
898 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
900 xudma_rchanrt_read(chn->udma_rchanx,
903 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
905 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
907 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
917 if (cfg->flow_id_use_rxchan_id)
921 if (rx_chn->flow_id_base != -1 &&
922 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
926 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
927 rx_chn->flow_id_base,
928 rx_chn->flow_num);
930 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
931 rx_chn->flow_id_base, rx_chn->flow_num, ret);
934 rx_chn->flow_id_base = ret;
947 if (cfg->flow_id_num <= 0)
948 return ERR_PTR(-EINVAL);
950 if (cfg->flow_id_num != 1 &&
951 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
952 return ERR_PTR(-EINVAL);
956 return ERR_PTR(-ENOMEM);
958 rx_chn->common.dev = dev;
959 rx_chn->common.swdata_size = cfg->swdata_size;
960 rx_chn->remote = false;
963 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
964 &rx_chn->common, false);
968 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
969 rx_chn->common.psdata_size,
970 rx_chn->common.swdata_size);
972 ep_cfg = rx_chn->common.ep_config;
974 if (xudma_is_pktdma(rx_chn->common.udmax)) {
975 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
976 rx_chn->single_fdq = false;
978 rx_chn->udma_rchan_id = -1;
979 rx_chn->single_fdq = true;
983 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
984 rx_chn->udma_rchan_id);
985 if (IS_ERR(rx_chn->udma_rchanx)) {
986 ret = PTR_ERR(rx_chn->udma_rchanx);
990 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
992 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
993 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
994 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
995 rx_chn->udma_rchan_id, rx_chn->common.src_thread);
996 ret = device_register(&rx_chn->common.chan_dev);
999 put_device(&rx_chn->common.chan_dev);
1000 rx_chn->common.chan_dev.parent = NULL;
1004 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1006 rx_chn->common.chan_dev.dma_coherent = true;
1007 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1011 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1012 int flow_start = cfg->flow_id_base;
1015 if (flow_start == -1)
1016 flow_start = ep_cfg->flow_start;
1018 flow_end = flow_start + cfg->flow_id_num - 1;
1019 if (flow_start < ep_cfg->flow_start ||
1020 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
1022 ret = -EINVAL;
1025 rx_chn->flow_id_base = flow_start;
1027 rx_chn->flow_id_base = cfg->flow_id_base;
1030 if (cfg->flow_id_use_rxchan_id)
1031 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
1034 rx_chn->flow_num = cfg->flow_id_num;
1036 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1037 sizeof(*rx_chn->flows), GFP_KERNEL);
1038 if (!rx_chn->flows) {
1039 ret = -ENOMEM;
1047 for (i = 0; i < rx_chn->flow_num; i++)
1048 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1050 /* request and cfg psi-l */
1051 rx_chn->common.dst_thread =
1052 xudma_dev_get_psil_base(rx_chn->common.udmax) +
1053 rx_chn->udma_rchan_id;
1062 if (cfg->def_flow_cfg) {
1063 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
1084 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1085 rx_chn->common.psdata_size,
1086 rx_chn->common.swdata_size);
1088 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1089 sizeof(*rx_chn->flows), GFP_KERNEL);
1090 if (!rx_chn->flows)
1091 return -ENOMEM;
1093 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1094 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1095 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
1096 rx_chn->common.src_thread, rx_chn->flow_id_base);
1097 ret = device_register(&rx_chn->common.chan_dev);
1100 put_device(&rx_chn->common.chan_dev);
1101 rx_chn->common.chan_dev.parent = NULL;
1105 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1107 rx_chn->common.chan_dev.dma_coherent = true;
1108 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1110 rx_chn->single_fdq = false;
1112 rx_chn->single_fdq = true;
1119 for (i = 0; i < rx_chn->flow_num; i++)
1120 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1134 if (cfg->flow_id_num <= 0 ||
1135 cfg->flow_id_use_rxchan_id ||
1136 cfg->def_flow_cfg ||
1137 cfg->flow_id_base < 0)
1138 return ERR_PTR(-EINVAL);
1147 return ERR_PTR(-ENOMEM);
1149 rx_chn->common.dev = dev;
1150 rx_chn->common.swdata_size = cfg->swdata_size;
1151 rx_chn->remote = true;
1152 rx_chn->udma_rchan_id = -1;
1153 rx_chn->flow_num = cfg->flow_id_num;
1154 rx_chn->flow_id_base = cfg->flow_id_base;
1155 rx_chn->psil_paired = false;
1158 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1159 &rx_chn->common, false);
1182 if (cfg->flow_id_num <= 0 ||
1183 cfg->flow_id_use_rxchan_id ||
1184 cfg->def_flow_cfg ||
1185 cfg->flow_id_base < 0)
1186 return ERR_PTR(-EINVAL);
1195 return ERR_PTR(-ENOMEM);
1197 rx_chn->common.dev = dev;
1198 rx_chn->common.swdata_size = cfg->swdata_size;
1199 rx_chn->remote = true;
1200 rx_chn->udma_rchan_id = -1;
1201 rx_chn->flow_num = cfg->flow_id_num;
1202 rx_chn->flow_id_base = cfg->flow_id_base;
1203 rx_chn->psil_paired = false;
1205 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
1225 if (cfg->remote)
1236 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1239 if (rx_chn->psil_paired) {
1240 xudma_navss_psil_unpair(rx_chn->common.udmax,
1241 rx_chn->common.src_thread,
1242 rx_chn->common.dst_thread);
1243 rx_chn->psil_paired = false;
1246 for (i = 0; i < rx_chn->flow_num; i++)
1249 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1250 xudma_free_gp_rflow_range(rx_chn->common.udmax,
1251 rx_chn->flow_id_base,
1252 rx_chn->flow_num);
1254 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1255 xudma_rchan_put(rx_chn->common.udmax,
1256 rx_chn->udma_rchanx);
1258 if (rx_chn->common.chan_dev.parent) {
1259 device_unregister(&rx_chn->common.chan_dev);
1260 rx_chn->common.chan_dev.parent = NULL;
1269 if (flow_idx >= rx_chn->flow_num)
1270 return -EINVAL;
1281 if (flow_idx >= rx_chn->flow_num)
1282 return -EINVAL;
1284 flow = &rx_chn->flows[flow_idx];
1286 return k3_ringacc_get_ring_id(flow->ringrxfdq);
1292 return rx_chn->flow_id_base;
1299 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1300 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1301 struct device *dev = rx_chn->common.dev;
1307 if (!rx_chn->remote)
1308 return -EINVAL;
1310 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1311 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1321 req.nav_id = tisci_rm->tisci_dev_id;
1322 req.flow_index = flow->udma_rflow_id;
1329 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1331 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1342 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1343 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1344 struct device *dev = rx_chn->common.dev;
1348 if (!rx_chn->remote)
1349 return -EINVAL;
1358 req.nav_id = tisci_rm->tisci_dev_id;
1359 req.flow_index = flow->udma_rflow_id;
1366 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1368 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1380 if (rx_chn->remote)
1381 return -EINVAL;
1383 if (rx_chn->flows_ready < rx_chn->flow_num)
1384 return -EINVAL;
1386 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1387 rx_chn->common.src_thread,
1388 rx_chn->common.dst_thread);
1390 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1394 rx_chn->psil_paired = true;
1396 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1399 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1411 xudma_rchanrt_write(rx_chn->udma_rchanx,
1413 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1417 if (rx_chn->psil_paired) {
1418 xudma_navss_psil_unpair(rx_chn->common.udmax,
1419 rx_chn->common.src_thread,
1420 rx_chn->common.dst_thread);
1421 rx_chn->psil_paired = false;
1432 if (rx_chn->remote)
1437 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1440 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1443 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1447 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1453 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1456 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1465 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1466 struct device *dev = rx_chn->common.dev;
1470 /* reset RXCQ as it is not input for udma - expected to be empty */
1471 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1475 if (rx_chn->single_fdq && flow_num)
1485 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1489 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1491 if (ret != -ENODATA)
1498 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1501 k3_ringacc_ring_reset(flow->ringrx);
1509 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1511 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1518 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1520 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1529 flow = &rx_chn->flows[flow_num];
1531 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1532 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1533 flow->udma_rflow_id);
1535 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1538 if (!flow->virq)
1539 return -ENXIO;
1541 return flow->virq;
1548 if (xudma_is_pktdma(rx_chn->common.udmax) &&
1549 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1550 return &rx_chn->common.chan_dev;
1552 return xudma_get_device(rx_chn->common.udmax);
1559 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1560 !rx_chn->common.atype_asel)
1563 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1570 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1571 !rx_chn->common.atype_asel)
1574 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);