xref: /linux/drivers/dma/switchtec_dma.c (revision d662a710c668a86a39ebaad334d9960a0cc776c2)
1d9587042SKelvin Cao // SPDX-License-Identifier: GPL-2.0
2d9587042SKelvin Cao /*
3d9587042SKelvin Cao  * Microchip Switchtec(tm) DMA Controller Driver
4d9587042SKelvin Cao  * Copyright (c) 2025, Kelvin Cao <kelvin.cao@microchip.com>
5d9587042SKelvin Cao  * Copyright (c) 2025, Microchip Corporation
6d9587042SKelvin Cao  */
7d9587042SKelvin Cao 
8d9587042SKelvin Cao #include <linux/bitfield.h>
9d9587042SKelvin Cao #include <linux/circ_buf.h>
10d9587042SKelvin Cao #include <linux/dmaengine.h>
11d9587042SKelvin Cao #include <linux/module.h>
12d9587042SKelvin Cao #include <linux/pci.h>
13d9587042SKelvin Cao #include <linux/delay.h>
14d9587042SKelvin Cao #include <linux/iopoll.h>
15d9587042SKelvin Cao 
16d9587042SKelvin Cao #include "dmaengine.h"
17d9587042SKelvin Cao 
18d9587042SKelvin Cao MODULE_DESCRIPTION("Switchtec PCIe Switch DMA Engine");
19d9587042SKelvin Cao MODULE_LICENSE("GPL");
20d9587042SKelvin Cao MODULE_AUTHOR("Kelvin Cao");
21d9587042SKelvin Cao 
2230eba9dfSKelvin Cao #define	SWITCHTEC_DMAC_CHAN_CTRL_OFFSET		0x1000
2330eba9dfSKelvin Cao #define	SWITCHTEC_DMAC_CHAN_CFG_STS_OFFSET	0x160000
2430eba9dfSKelvin Cao 
2530eba9dfSKelvin Cao #define SWITCHTEC_DMA_CHAN_HW_REGS_SIZE		0x1000
2630eba9dfSKelvin Cao #define SWITCHTEC_DMA_CHAN_FW_REGS_SIZE		0x80
2730eba9dfSKelvin Cao 
2830eba9dfSKelvin Cao #define SWITCHTEC_REG_CAP		0x80
2930eba9dfSKelvin Cao #define SWITCHTEC_REG_CHAN_CNT		0x84
3030eba9dfSKelvin Cao #define SWITCHTEC_REG_TAG_LIMIT		0x90
3130eba9dfSKelvin Cao #define SWITCHTEC_REG_CHAN_STS_VEC	0x94
3230eba9dfSKelvin Cao #define SWITCHTEC_REG_SE_BUF_CNT	0x98
3330eba9dfSKelvin Cao #define SWITCHTEC_REG_SE_BUF_BASE	0x9a
3430eba9dfSKelvin Cao 
35*3af11daeSKelvin Cao #define SWITCHTEC_DESC_MAX_SIZE		0x100000
36*3af11daeSKelvin Cao 
3730eba9dfSKelvin Cao #define SWITCHTEC_CHAN_CTRL_PAUSE	BIT(0)
3830eba9dfSKelvin Cao #define SWITCHTEC_CHAN_CTRL_HALT	BIT(1)
3930eba9dfSKelvin Cao #define SWITCHTEC_CHAN_CTRL_RESET	BIT(2)
4030eba9dfSKelvin Cao #define SWITCHTEC_CHAN_CTRL_ERR_PAUSE	BIT(3)
4130eba9dfSKelvin Cao 
4230eba9dfSKelvin Cao #define SWITCHTEC_CHAN_STS_PAUSED	BIT(9)
4330eba9dfSKelvin Cao #define SWITCHTEC_CHAN_STS_HALTED	BIT(10)
4430eba9dfSKelvin Cao #define SWITCHTEC_CHAN_STS_PAUSED_MASK	GENMASK(29, 13)
4530eba9dfSKelvin Cao 
46*3af11daeSKelvin Cao #define SWITCHTEC_INVALID_HFID 0xffff
47*3af11daeSKelvin Cao 
4830eba9dfSKelvin Cao #define SWITCHTEC_DMA_SQ_SIZE	SZ_32K
4930eba9dfSKelvin Cao #define SWITCHTEC_DMA_CQ_SIZE	SZ_32K
5030eba9dfSKelvin Cao 
5130eba9dfSKelvin Cao #define SWITCHTEC_DMA_RING_SIZE	SZ_32K
5230eba9dfSKelvin Cao 
5330eba9dfSKelvin Cao static const char * const channel_status_str[] = {
5430eba9dfSKelvin Cao 	[13] = "received a VDM with length error status",
5530eba9dfSKelvin Cao 	[14] = "received a VDM or Cpl with Unsupported Request error status",
5630eba9dfSKelvin Cao 	[15] = "received a VDM or Cpl with Completion Abort error status",
5730eba9dfSKelvin Cao 	[16] = "received a VDM with ECRC error status",
5830eba9dfSKelvin Cao 	[17] = "received a VDM with EP error status",
5930eba9dfSKelvin Cao 	[18] = "received a VDM with Reserved Cpl error status",
6030eba9dfSKelvin Cao 	[19] = "received only part of split SE CplD",
6130eba9dfSKelvin Cao 	[20] = "the ISP_DMAC detected a Completion Time Out",
6230eba9dfSKelvin Cao 	[21] = "received a Cpl with Unsupported Request status",
6330eba9dfSKelvin Cao 	[22] = "received a Cpl with Completion Abort status",
6430eba9dfSKelvin Cao 	[23] = "received a Cpl with a reserved status",
6530eba9dfSKelvin Cao 	[24] = "received a TLP with ECRC error status in its metadata",
6630eba9dfSKelvin Cao 	[25] = "received a TLP with the EP bit set in the header",
6730eba9dfSKelvin Cao 	[26] = "the ISP_DMAC tried to process a SE with an invalid Connection ID",
6830eba9dfSKelvin Cao 	[27] = "the ISP_DMAC tried to process a SE with an invalid Remote Host interrupt",
6930eba9dfSKelvin Cao 	[28] = "a reserved opcode was detected in an SE",
7030eba9dfSKelvin Cao 	[29] = "received a SE Cpl with error status",
7130eba9dfSKelvin Cao };
7230eba9dfSKelvin Cao 
7330eba9dfSKelvin Cao struct chan_hw_regs {
7430eba9dfSKelvin Cao 	u16 cq_head;
7530eba9dfSKelvin Cao 	u16 rsvd1;
7630eba9dfSKelvin Cao 	u16 sq_tail;
7730eba9dfSKelvin Cao 	u16 rsvd2;
7830eba9dfSKelvin Cao 	u8 ctrl;
7930eba9dfSKelvin Cao 	u8 rsvd3[3];
8030eba9dfSKelvin Cao 	u16 status;
8130eba9dfSKelvin Cao 	u16 rsvd4;
8230eba9dfSKelvin Cao };
8330eba9dfSKelvin Cao 
8430eba9dfSKelvin Cao #define PERF_BURST_SCALE_MASK	GENMASK_U32(3,   2)
8530eba9dfSKelvin Cao #define PERF_MRRS_MASK		GENMASK_U32(6,   4)
8630eba9dfSKelvin Cao #define PERF_INTERVAL_MASK	GENMASK_U32(10,  8)
8730eba9dfSKelvin Cao #define PERF_BURST_SIZE_MASK	GENMASK_U32(14, 12)
8830eba9dfSKelvin Cao #define PERF_ARB_WEIGHT_MASK	GENMASK_U32(31, 24)
8930eba9dfSKelvin Cao 
9030eba9dfSKelvin Cao #define SE_BUF_BASE_MASK	GENMASK_U32(10,  2)
9130eba9dfSKelvin Cao #define SE_BUF_LEN_MASK		GENMASK_U32(20, 12)
9230eba9dfSKelvin Cao #define SE_THRESH_MASK		GENMASK_U32(31, 23)
9330eba9dfSKelvin Cao 
9430eba9dfSKelvin Cao #define SWITCHTEC_CHAN_ENABLE	BIT(1)
9530eba9dfSKelvin Cao 
9630eba9dfSKelvin Cao struct chan_fw_regs {
9730eba9dfSKelvin Cao 	u32 valid_en_se;
9830eba9dfSKelvin Cao 	u32 cq_base_lo;
9930eba9dfSKelvin Cao 	u32 cq_base_hi;
10030eba9dfSKelvin Cao 	u16 cq_size;
10130eba9dfSKelvin Cao 	u16 rsvd1;
10230eba9dfSKelvin Cao 	u32 sq_base_lo;
10330eba9dfSKelvin Cao 	u32 sq_base_hi;
10430eba9dfSKelvin Cao 	u16 sq_size;
10530eba9dfSKelvin Cao 	u16 rsvd2;
10630eba9dfSKelvin Cao 	u32 int_vec;
10730eba9dfSKelvin Cao 	u32 perf_cfg;
10830eba9dfSKelvin Cao 	u32 rsvd3;
10930eba9dfSKelvin Cao 	u32 perf_latency_selector;
11030eba9dfSKelvin Cao 	u32 perf_fetched_se_cnt_lo;
11130eba9dfSKelvin Cao 	u32 perf_fetched_se_cnt_hi;
11230eba9dfSKelvin Cao 	u32 perf_byte_cnt_lo;
11330eba9dfSKelvin Cao 	u32 perf_byte_cnt_hi;
11430eba9dfSKelvin Cao 	u32 rsvd4;
11530eba9dfSKelvin Cao 	u16 perf_se_pending;
11630eba9dfSKelvin Cao 	u16 perf_se_buf_empty;
11730eba9dfSKelvin Cao 	u32 perf_chan_idle;
11830eba9dfSKelvin Cao 	u32 perf_lat_max;
11930eba9dfSKelvin Cao 	u32 perf_lat_min;
12030eba9dfSKelvin Cao 	u32 perf_lat_last;
12130eba9dfSKelvin Cao 	u16 sq_current;
12230eba9dfSKelvin Cao 	u16 sq_phase;
12330eba9dfSKelvin Cao 	u16 cq_current;
12430eba9dfSKelvin Cao 	u16 cq_phase;
12530eba9dfSKelvin Cao };
12630eba9dfSKelvin Cao 
12730eba9dfSKelvin Cao struct switchtec_dma_chan {
12830eba9dfSKelvin Cao 	struct switchtec_dma_dev *swdma_dev;
12930eba9dfSKelvin Cao 	struct dma_chan dma_chan;
13030eba9dfSKelvin Cao 	struct chan_hw_regs __iomem *mmio_chan_hw;
13130eba9dfSKelvin Cao 	struct chan_fw_regs __iomem *mmio_chan_fw;
13230eba9dfSKelvin Cao 
13330eba9dfSKelvin Cao 	/* Serialize hardware control register access */
13430eba9dfSKelvin Cao 	spinlock_t hw_ctrl_lock;
13530eba9dfSKelvin Cao 
13630eba9dfSKelvin Cao 	struct tasklet_struct desc_task;
13730eba9dfSKelvin Cao 
13830eba9dfSKelvin Cao 	/* Serialize descriptor preparation */
13930eba9dfSKelvin Cao 	spinlock_t submit_lock;
14030eba9dfSKelvin Cao 	bool ring_active;
14130eba9dfSKelvin Cao 	int cid;
14230eba9dfSKelvin Cao 
14330eba9dfSKelvin Cao 	/* Serialize completion processing */
14430eba9dfSKelvin Cao 	spinlock_t complete_lock;
14530eba9dfSKelvin Cao 	bool comp_ring_active;
14630eba9dfSKelvin Cao 
14730eba9dfSKelvin Cao 	/* channel index and irq */
14830eba9dfSKelvin Cao 	int index;
14930eba9dfSKelvin Cao 	int irq;
15030eba9dfSKelvin Cao 
15130eba9dfSKelvin Cao 	/*
15230eba9dfSKelvin Cao 	 * In driver context, head is advanced by producer while
15330eba9dfSKelvin Cao 	 * tail is advanced by consumer.
15430eba9dfSKelvin Cao 	 */
15530eba9dfSKelvin Cao 
15630eba9dfSKelvin Cao 	/* the head and tail for both desc_ring and hw_sq */
15730eba9dfSKelvin Cao 	int head;
15830eba9dfSKelvin Cao 	int tail;
15930eba9dfSKelvin Cao 	int phase_tag;
16030eba9dfSKelvin Cao 	struct switchtec_dma_hw_se_desc *hw_sq;
16130eba9dfSKelvin Cao 	dma_addr_t dma_addr_sq;
16230eba9dfSKelvin Cao 
16330eba9dfSKelvin Cao 	/* the tail for hw_cq */
16430eba9dfSKelvin Cao 	int cq_tail;
16530eba9dfSKelvin Cao 	struct switchtec_dma_hw_ce *hw_cq;
16630eba9dfSKelvin Cao 	dma_addr_t dma_addr_cq;
16730eba9dfSKelvin Cao 
16830eba9dfSKelvin Cao 	struct list_head list;
16930eba9dfSKelvin Cao 
17030eba9dfSKelvin Cao 	struct switchtec_dma_desc *desc_ring[SWITCHTEC_DMA_RING_SIZE];
17130eba9dfSKelvin Cao };
17230eba9dfSKelvin Cao 
173d9587042SKelvin Cao struct switchtec_dma_dev {
174d9587042SKelvin Cao 	struct dma_device dma_dev;
175d9587042SKelvin Cao 	struct pci_dev __rcu *pdev;
176d9587042SKelvin Cao 	void __iomem *bar;
17730eba9dfSKelvin Cao 
17830eba9dfSKelvin Cao 	struct switchtec_dma_chan **swdma_chans;
17930eba9dfSKelvin Cao 	int chan_cnt;
18030eba9dfSKelvin Cao 	int chan_status_irq;
181d9587042SKelvin Cao };
182d9587042SKelvin Cao 
18330eba9dfSKelvin Cao enum chan_op {
18430eba9dfSKelvin Cao 	ENABLE_CHAN,
18530eba9dfSKelvin Cao 	DISABLE_CHAN,
18630eba9dfSKelvin Cao };
18730eba9dfSKelvin Cao 
18830eba9dfSKelvin Cao enum switchtec_dma_opcode {
18930eba9dfSKelvin Cao 	SWITCHTEC_DMA_OPC_MEMCPY = 0,
19030eba9dfSKelvin Cao 	SWITCHTEC_DMA_OPC_RDIMM = 0x1,
19130eba9dfSKelvin Cao 	SWITCHTEC_DMA_OPC_WRIMM = 0x2,
19230eba9dfSKelvin Cao 	SWITCHTEC_DMA_OPC_RHI = 0x6,
19330eba9dfSKelvin Cao 	SWITCHTEC_DMA_OPC_NOP = 0x7,
19430eba9dfSKelvin Cao };
19530eba9dfSKelvin Cao 
19630eba9dfSKelvin Cao struct switchtec_dma_hw_se_desc {
19730eba9dfSKelvin Cao 	u8 opc;
19830eba9dfSKelvin Cao 	u8 ctrl;
19930eba9dfSKelvin Cao 	__le16 tlp_setting;
20030eba9dfSKelvin Cao 	__le16 rsvd1;
20130eba9dfSKelvin Cao 	__le16 cid;
20230eba9dfSKelvin Cao 	__le32 byte_cnt;
20330eba9dfSKelvin Cao 	__le32 addr_lo; /* SADDR_LO/WIADDR_LO */
20430eba9dfSKelvin Cao 	__le32 addr_hi; /* SADDR_HI/WIADDR_HI */
20530eba9dfSKelvin Cao 	__le32 daddr_lo;
20630eba9dfSKelvin Cao 	__le32 daddr_hi;
20730eba9dfSKelvin Cao 	__le16 dfid;
20830eba9dfSKelvin Cao 	__le16 sfid;
20930eba9dfSKelvin Cao };
21030eba9dfSKelvin Cao 
211*3af11daeSKelvin Cao #define SWITCHTEC_SE_DFM		BIT(5)
212*3af11daeSKelvin Cao #define SWITCHTEC_SE_LIOF		BIT(6)
213*3af11daeSKelvin Cao #define SWITCHTEC_SE_BRR		BIT(7)
214*3af11daeSKelvin Cao #define SWITCHTEC_SE_CID_MASK		GENMASK(15, 0)
215*3af11daeSKelvin Cao 
21630eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_LEN_ERR		BIT(0)
21730eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_UR		BIT(1)
21830eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_CA		BIT(2)
21930eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_RSVD_CPL	BIT(3)
22030eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_ECRC_ERR	BIT(4)
22130eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_EP_SET		BIT(5)
22230eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_RD_CTO	BIT(8)
22330eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_RIMM_UR	BIT(9)
22430eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_RIMM_CA	BIT(10)
22530eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_RIMM_RSVD_CPL	BIT(11)
22630eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_ECRC		BIT(12)
22730eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_EP_SET	BIT(13)
22830eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_BAD_CONNID	BIT(14)
22930eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_BAD_RHI_ADDR	BIT(15)
23030eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_D_INVD_CMD	BIT(16)
23130eba9dfSKelvin Cao #define SWITCHTEC_CE_SC_MASK		GENMASK(16, 0)
23230eba9dfSKelvin Cao 
23330eba9dfSKelvin Cao struct switchtec_dma_hw_ce {
23430eba9dfSKelvin Cao 	__le32 rdimm_cpl_dw0;
23530eba9dfSKelvin Cao 	__le32 rdimm_cpl_dw1;
23630eba9dfSKelvin Cao 	__le32 rsvd1;
23730eba9dfSKelvin Cao 	__le32 cpl_byte_cnt;
23830eba9dfSKelvin Cao 	__le16 sq_head;
23930eba9dfSKelvin Cao 	__le16 rsvd2;
24030eba9dfSKelvin Cao 	__le32 rsvd3;
24130eba9dfSKelvin Cao 	__le32 sts_code;
24230eba9dfSKelvin Cao 	__le16 cid;
24330eba9dfSKelvin Cao 	__le16 phase_tag;
24430eba9dfSKelvin Cao };
24530eba9dfSKelvin Cao 
24630eba9dfSKelvin Cao struct switchtec_dma_desc {
24730eba9dfSKelvin Cao 	struct dma_async_tx_descriptor txd;
24830eba9dfSKelvin Cao 	struct switchtec_dma_hw_se_desc *hw;
24930eba9dfSKelvin Cao 	u32 orig_size;
25030eba9dfSKelvin Cao 	bool completed;
25130eba9dfSKelvin Cao };
25230eba9dfSKelvin Cao 
25330eba9dfSKelvin Cao static int wait_for_chan_status(struct chan_hw_regs __iomem *chan_hw, u32 mask,
25430eba9dfSKelvin Cao 				bool set)
25530eba9dfSKelvin Cao {
25630eba9dfSKelvin Cao 	u32 status;
25730eba9dfSKelvin Cao 
25830eba9dfSKelvin Cao 	return readl_poll_timeout_atomic(&chan_hw->status, status,
25930eba9dfSKelvin Cao 					 (set && (status & mask)) ||
26030eba9dfSKelvin Cao 					 (!set && !(status & mask)),
26130eba9dfSKelvin Cao 					 10, 100 * USEC_PER_MSEC);
26230eba9dfSKelvin Cao }
26330eba9dfSKelvin Cao 
26430eba9dfSKelvin Cao static int halt_channel(struct switchtec_dma_chan *swdma_chan)
26530eba9dfSKelvin Cao {
26630eba9dfSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
26730eba9dfSKelvin Cao 	struct pci_dev *pdev;
26830eba9dfSKelvin Cao 	int ret;
26930eba9dfSKelvin Cao 
27030eba9dfSKelvin Cao 	rcu_read_lock();
27130eba9dfSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
27230eba9dfSKelvin Cao 	if (!pdev) {
27330eba9dfSKelvin Cao 		ret = -ENODEV;
27430eba9dfSKelvin Cao 		goto unlock_and_exit;
27530eba9dfSKelvin Cao 	}
27630eba9dfSKelvin Cao 
27730eba9dfSKelvin Cao 	spin_lock(&swdma_chan->hw_ctrl_lock);
27830eba9dfSKelvin Cao 	writeb(SWITCHTEC_CHAN_CTRL_HALT, &chan_hw->ctrl);
27930eba9dfSKelvin Cao 	ret = wait_for_chan_status(chan_hw, SWITCHTEC_CHAN_STS_HALTED, true);
28030eba9dfSKelvin Cao 	spin_unlock(&swdma_chan->hw_ctrl_lock);
28130eba9dfSKelvin Cao 
28230eba9dfSKelvin Cao unlock_and_exit:
28330eba9dfSKelvin Cao 	rcu_read_unlock();
28430eba9dfSKelvin Cao 	return ret;
28530eba9dfSKelvin Cao }
28630eba9dfSKelvin Cao 
28730eba9dfSKelvin Cao static int unhalt_channel(struct switchtec_dma_chan *swdma_chan)
28830eba9dfSKelvin Cao {
28930eba9dfSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
29030eba9dfSKelvin Cao 	struct pci_dev *pdev;
29130eba9dfSKelvin Cao 	u8 ctrl;
29230eba9dfSKelvin Cao 	int ret;
29330eba9dfSKelvin Cao 
29430eba9dfSKelvin Cao 	rcu_read_lock();
29530eba9dfSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
29630eba9dfSKelvin Cao 	if (!pdev) {
29730eba9dfSKelvin Cao 		ret = -ENODEV;
29830eba9dfSKelvin Cao 		goto unlock_and_exit;
29930eba9dfSKelvin Cao 	}
30030eba9dfSKelvin Cao 
30130eba9dfSKelvin Cao 	spin_lock(&swdma_chan->hw_ctrl_lock);
30230eba9dfSKelvin Cao 	ctrl = readb(&chan_hw->ctrl);
30330eba9dfSKelvin Cao 	ctrl &= ~SWITCHTEC_CHAN_CTRL_HALT;
30430eba9dfSKelvin Cao 	writeb(ctrl, &chan_hw->ctrl);
30530eba9dfSKelvin Cao 	ret = wait_for_chan_status(chan_hw, SWITCHTEC_CHAN_STS_HALTED, false);
30630eba9dfSKelvin Cao 	spin_unlock(&swdma_chan->hw_ctrl_lock);
30730eba9dfSKelvin Cao 
30830eba9dfSKelvin Cao unlock_and_exit:
30930eba9dfSKelvin Cao 	rcu_read_unlock();
31030eba9dfSKelvin Cao 	return ret;
31130eba9dfSKelvin Cao }
31230eba9dfSKelvin Cao 
31330eba9dfSKelvin Cao static void flush_pci_write(struct chan_hw_regs __iomem *chan_hw)
31430eba9dfSKelvin Cao {
31530eba9dfSKelvin Cao 	readl(&chan_hw->cq_head);
31630eba9dfSKelvin Cao }
31730eba9dfSKelvin Cao 
31830eba9dfSKelvin Cao static int reset_channel(struct switchtec_dma_chan *swdma_chan)
31930eba9dfSKelvin Cao {
32030eba9dfSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
32130eba9dfSKelvin Cao 	struct pci_dev *pdev;
32230eba9dfSKelvin Cao 
32330eba9dfSKelvin Cao 	rcu_read_lock();
32430eba9dfSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
32530eba9dfSKelvin Cao 	if (!pdev) {
32630eba9dfSKelvin Cao 		rcu_read_unlock();
32730eba9dfSKelvin Cao 		return -ENODEV;
32830eba9dfSKelvin Cao 	}
32930eba9dfSKelvin Cao 
33030eba9dfSKelvin Cao 	spin_lock(&swdma_chan->hw_ctrl_lock);
33130eba9dfSKelvin Cao 	writel(SWITCHTEC_CHAN_CTRL_RESET | SWITCHTEC_CHAN_CTRL_ERR_PAUSE,
33230eba9dfSKelvin Cao 	       &chan_hw->ctrl);
33330eba9dfSKelvin Cao 	flush_pci_write(chan_hw);
33430eba9dfSKelvin Cao 
33530eba9dfSKelvin Cao 	udelay(1000);
33630eba9dfSKelvin Cao 
33730eba9dfSKelvin Cao 	writel(SWITCHTEC_CHAN_CTRL_ERR_PAUSE, &chan_hw->ctrl);
33830eba9dfSKelvin Cao 	spin_unlock(&swdma_chan->hw_ctrl_lock);
33930eba9dfSKelvin Cao 	flush_pci_write(chan_hw);
34030eba9dfSKelvin Cao 
34130eba9dfSKelvin Cao 	rcu_read_unlock();
34230eba9dfSKelvin Cao 	return 0;
34330eba9dfSKelvin Cao }
34430eba9dfSKelvin Cao 
34530eba9dfSKelvin Cao static int pause_reset_channel(struct switchtec_dma_chan *swdma_chan)
34630eba9dfSKelvin Cao {
34730eba9dfSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
34830eba9dfSKelvin Cao 	struct pci_dev *pdev;
34930eba9dfSKelvin Cao 
35030eba9dfSKelvin Cao 	rcu_read_lock();
35130eba9dfSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
35230eba9dfSKelvin Cao 	if (!pdev) {
35330eba9dfSKelvin Cao 		rcu_read_unlock();
35430eba9dfSKelvin Cao 		return -ENODEV;
35530eba9dfSKelvin Cao 	}
35630eba9dfSKelvin Cao 
35730eba9dfSKelvin Cao 	spin_lock(&swdma_chan->hw_ctrl_lock);
35830eba9dfSKelvin Cao 	writeb(SWITCHTEC_CHAN_CTRL_PAUSE, &chan_hw->ctrl);
35930eba9dfSKelvin Cao 	spin_unlock(&swdma_chan->hw_ctrl_lock);
36030eba9dfSKelvin Cao 
36130eba9dfSKelvin Cao 	flush_pci_write(chan_hw);
36230eba9dfSKelvin Cao 
36330eba9dfSKelvin Cao 	rcu_read_unlock();
36430eba9dfSKelvin Cao 
36530eba9dfSKelvin Cao 	/* wait 60ms to ensure no pending CEs */
36630eba9dfSKelvin Cao 	mdelay(60);
36730eba9dfSKelvin Cao 
36830eba9dfSKelvin Cao 	return reset_channel(swdma_chan);
36930eba9dfSKelvin Cao }
37030eba9dfSKelvin Cao 
37130eba9dfSKelvin Cao static int channel_op(struct switchtec_dma_chan *swdma_chan, int op)
37230eba9dfSKelvin Cao {
37330eba9dfSKelvin Cao 	struct chan_fw_regs __iomem *chan_fw = swdma_chan->mmio_chan_fw;
37430eba9dfSKelvin Cao 	struct pci_dev *pdev;
37530eba9dfSKelvin Cao 	u32 valid_en_se;
37630eba9dfSKelvin Cao 
37730eba9dfSKelvin Cao 	rcu_read_lock();
37830eba9dfSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
37930eba9dfSKelvin Cao 	if (!pdev) {
38030eba9dfSKelvin Cao 		rcu_read_unlock();
38130eba9dfSKelvin Cao 		return -ENODEV;
38230eba9dfSKelvin Cao 	}
38330eba9dfSKelvin Cao 
38430eba9dfSKelvin Cao 	valid_en_se = readl(&chan_fw->valid_en_se);
38530eba9dfSKelvin Cao 	if (op == ENABLE_CHAN)
38630eba9dfSKelvin Cao 		valid_en_se |= SWITCHTEC_CHAN_ENABLE;
38730eba9dfSKelvin Cao 	else
38830eba9dfSKelvin Cao 		valid_en_se &= ~SWITCHTEC_CHAN_ENABLE;
38930eba9dfSKelvin Cao 
39030eba9dfSKelvin Cao 	writel(valid_en_se, &chan_fw->valid_en_se);
39130eba9dfSKelvin Cao 
39230eba9dfSKelvin Cao 	rcu_read_unlock();
39330eba9dfSKelvin Cao 	return 0;
39430eba9dfSKelvin Cao }
39530eba9dfSKelvin Cao 
39630eba9dfSKelvin Cao static int enable_channel(struct switchtec_dma_chan *swdma_chan)
39730eba9dfSKelvin Cao {
39830eba9dfSKelvin Cao 	return channel_op(swdma_chan, ENABLE_CHAN);
39930eba9dfSKelvin Cao }
40030eba9dfSKelvin Cao 
40130eba9dfSKelvin Cao static int disable_channel(struct switchtec_dma_chan *swdma_chan)
40230eba9dfSKelvin Cao {
40330eba9dfSKelvin Cao 	return channel_op(swdma_chan, DISABLE_CHAN);
40430eba9dfSKelvin Cao }
40530eba9dfSKelvin Cao 
40630eba9dfSKelvin Cao static void
40730eba9dfSKelvin Cao switchtec_dma_cleanup_completed(struct switchtec_dma_chan *swdma_chan)
40830eba9dfSKelvin Cao {
40930eba9dfSKelvin Cao 	struct device *chan_dev = &swdma_chan->dma_chan.dev->device;
41030eba9dfSKelvin Cao 	struct switchtec_dma_desc *desc;
41130eba9dfSKelvin Cao 	struct switchtec_dma_hw_ce *ce;
41230eba9dfSKelvin Cao 	struct dmaengine_result res;
41330eba9dfSKelvin Cao 	int tail, cid, se_idx, i;
41430eba9dfSKelvin Cao 	__le16 phase_tag;
41530eba9dfSKelvin Cao 	u32 sts_code;
41630eba9dfSKelvin Cao 	__le32 *p;
41730eba9dfSKelvin Cao 
41830eba9dfSKelvin Cao 	do {
41930eba9dfSKelvin Cao 		spin_lock_bh(&swdma_chan->complete_lock);
42030eba9dfSKelvin Cao 		if (!swdma_chan->comp_ring_active) {
42130eba9dfSKelvin Cao 			spin_unlock_bh(&swdma_chan->complete_lock);
42230eba9dfSKelvin Cao 			break;
42330eba9dfSKelvin Cao 		}
42430eba9dfSKelvin Cao 
42530eba9dfSKelvin Cao 		ce = &swdma_chan->hw_cq[swdma_chan->cq_tail];
42630eba9dfSKelvin Cao 		/*
42730eba9dfSKelvin Cao 		 * phase_tag is updated by hardware, ensure the value is
42830eba9dfSKelvin Cao 		 * not from the cache
42930eba9dfSKelvin Cao 		 */
43030eba9dfSKelvin Cao 		phase_tag = smp_load_acquire(&ce->phase_tag);
43130eba9dfSKelvin Cao 		if (le16_to_cpu(phase_tag) == swdma_chan->phase_tag) {
43230eba9dfSKelvin Cao 			spin_unlock_bh(&swdma_chan->complete_lock);
43330eba9dfSKelvin Cao 			break;
43430eba9dfSKelvin Cao 		}
43530eba9dfSKelvin Cao 
43630eba9dfSKelvin Cao 		cid = le16_to_cpu(ce->cid);
43730eba9dfSKelvin Cao 		se_idx = cid & (SWITCHTEC_DMA_SQ_SIZE - 1);
43830eba9dfSKelvin Cao 		desc = swdma_chan->desc_ring[se_idx];
43930eba9dfSKelvin Cao 
44030eba9dfSKelvin Cao 		tail = swdma_chan->tail;
44130eba9dfSKelvin Cao 
44230eba9dfSKelvin Cao 		res.residue = desc->orig_size - le32_to_cpu(ce->cpl_byte_cnt);
44330eba9dfSKelvin Cao 
44430eba9dfSKelvin Cao 		sts_code = le32_to_cpu(ce->sts_code);
44530eba9dfSKelvin Cao 
44630eba9dfSKelvin Cao 		if (!(sts_code & SWITCHTEC_CE_SC_MASK)) {
44730eba9dfSKelvin Cao 			res.result = DMA_TRANS_NOERROR;
44830eba9dfSKelvin Cao 		} else {
44930eba9dfSKelvin Cao 			if (sts_code & SWITCHTEC_CE_SC_D_RD_CTO)
45030eba9dfSKelvin Cao 				res.result = DMA_TRANS_READ_FAILED;
45130eba9dfSKelvin Cao 			else
45230eba9dfSKelvin Cao 				res.result = DMA_TRANS_WRITE_FAILED;
45330eba9dfSKelvin Cao 
45430eba9dfSKelvin Cao 			dev_err(chan_dev, "CID 0x%04x failed, SC 0x%08x\n", cid,
45530eba9dfSKelvin Cao 				(u32)(sts_code & SWITCHTEC_CE_SC_MASK));
45630eba9dfSKelvin Cao 
45730eba9dfSKelvin Cao 			p = (__le32 *)ce;
45830eba9dfSKelvin Cao 			for (i = 0; i < sizeof(*ce) / 4; i++) {
45930eba9dfSKelvin Cao 				dev_err(chan_dev, "CE DW%d: 0x%08x\n", i,
46030eba9dfSKelvin Cao 					le32_to_cpu(*p));
46130eba9dfSKelvin Cao 				p++;
46230eba9dfSKelvin Cao 			}
46330eba9dfSKelvin Cao 		}
46430eba9dfSKelvin Cao 
46530eba9dfSKelvin Cao 		desc->completed = true;
46630eba9dfSKelvin Cao 
46730eba9dfSKelvin Cao 		swdma_chan->cq_tail++;
46830eba9dfSKelvin Cao 		swdma_chan->cq_tail &= SWITCHTEC_DMA_CQ_SIZE - 1;
46930eba9dfSKelvin Cao 
47030eba9dfSKelvin Cao 		rcu_read_lock();
47130eba9dfSKelvin Cao 		if (!rcu_dereference(swdma_chan->swdma_dev->pdev)) {
47230eba9dfSKelvin Cao 			rcu_read_unlock();
47330eba9dfSKelvin Cao 			spin_unlock_bh(&swdma_chan->complete_lock);
47430eba9dfSKelvin Cao 			return;
47530eba9dfSKelvin Cao 		}
47630eba9dfSKelvin Cao 		writew(swdma_chan->cq_tail, &swdma_chan->mmio_chan_hw->cq_head);
47730eba9dfSKelvin Cao 		rcu_read_unlock();
47830eba9dfSKelvin Cao 
47930eba9dfSKelvin Cao 		if (swdma_chan->cq_tail == 0)
48030eba9dfSKelvin Cao 			swdma_chan->phase_tag = !swdma_chan->phase_tag;
48130eba9dfSKelvin Cao 
48230eba9dfSKelvin Cao 		/*  Out of order CE */
48330eba9dfSKelvin Cao 		if (se_idx != tail) {
48430eba9dfSKelvin Cao 			spin_unlock_bh(&swdma_chan->complete_lock);
48530eba9dfSKelvin Cao 			continue;
48630eba9dfSKelvin Cao 		}
48730eba9dfSKelvin Cao 
48830eba9dfSKelvin Cao 		do {
48930eba9dfSKelvin Cao 			dma_cookie_complete(&desc->txd);
49030eba9dfSKelvin Cao 			dma_descriptor_unmap(&desc->txd);
49130eba9dfSKelvin Cao 			dmaengine_desc_get_callback_invoke(&desc->txd, &res);
49230eba9dfSKelvin Cao 			desc->txd.callback = NULL;
49330eba9dfSKelvin Cao 			desc->txd.callback_result = NULL;
49430eba9dfSKelvin Cao 			desc->completed = false;
49530eba9dfSKelvin Cao 
49630eba9dfSKelvin Cao 			tail++;
49730eba9dfSKelvin Cao 			tail &= SWITCHTEC_DMA_SQ_SIZE - 1;
49830eba9dfSKelvin Cao 
49930eba9dfSKelvin Cao 			/*
50030eba9dfSKelvin Cao 			 * Ensure the desc updates are visible before updating
50130eba9dfSKelvin Cao 			 * the tail index
50230eba9dfSKelvin Cao 			 */
50330eba9dfSKelvin Cao 			smp_store_release(&swdma_chan->tail, tail);
50430eba9dfSKelvin Cao 			desc = swdma_chan->desc_ring[swdma_chan->tail];
50530eba9dfSKelvin Cao 			if (!desc->completed)
50630eba9dfSKelvin Cao 				break;
50730eba9dfSKelvin Cao 		} while (CIRC_CNT(READ_ONCE(swdma_chan->head), swdma_chan->tail,
50830eba9dfSKelvin Cao 				  SWITCHTEC_DMA_SQ_SIZE));
50930eba9dfSKelvin Cao 
51030eba9dfSKelvin Cao 		spin_unlock_bh(&swdma_chan->complete_lock);
51130eba9dfSKelvin Cao 	} while (1);
51230eba9dfSKelvin Cao }
51330eba9dfSKelvin Cao 
51430eba9dfSKelvin Cao static void
51530eba9dfSKelvin Cao switchtec_dma_abort_desc(struct switchtec_dma_chan *swdma_chan, int force)
51630eba9dfSKelvin Cao {
51730eba9dfSKelvin Cao 	struct switchtec_dma_desc *desc;
51830eba9dfSKelvin Cao 	struct dmaengine_result res;
51930eba9dfSKelvin Cao 
52030eba9dfSKelvin Cao 	if (!force)
52130eba9dfSKelvin Cao 		switchtec_dma_cleanup_completed(swdma_chan);
52230eba9dfSKelvin Cao 
52330eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->complete_lock);
52430eba9dfSKelvin Cao 
52530eba9dfSKelvin Cao 	while (CIRC_CNT(swdma_chan->head, swdma_chan->tail,
52630eba9dfSKelvin Cao 			SWITCHTEC_DMA_SQ_SIZE) >= 1) {
52730eba9dfSKelvin Cao 		desc = swdma_chan->desc_ring[swdma_chan->tail];
52830eba9dfSKelvin Cao 
52930eba9dfSKelvin Cao 		res.residue = desc->orig_size;
53030eba9dfSKelvin Cao 		res.result = DMA_TRANS_ABORTED;
53130eba9dfSKelvin Cao 
53230eba9dfSKelvin Cao 		dma_cookie_complete(&desc->txd);
53330eba9dfSKelvin Cao 		dma_descriptor_unmap(&desc->txd);
53430eba9dfSKelvin Cao 		if (!force)
53530eba9dfSKelvin Cao 			dmaengine_desc_get_callback_invoke(&desc->txd, &res);
53630eba9dfSKelvin Cao 		desc->txd.callback = NULL;
53730eba9dfSKelvin Cao 		desc->txd.callback_result = NULL;
53830eba9dfSKelvin Cao 
53930eba9dfSKelvin Cao 		swdma_chan->tail++;
54030eba9dfSKelvin Cao 		swdma_chan->tail &= SWITCHTEC_DMA_SQ_SIZE - 1;
54130eba9dfSKelvin Cao 	}
54230eba9dfSKelvin Cao 
54330eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->complete_lock);
54430eba9dfSKelvin Cao }
54530eba9dfSKelvin Cao 
54630eba9dfSKelvin Cao static void switchtec_dma_chan_stop(struct switchtec_dma_chan *swdma_chan)
54730eba9dfSKelvin Cao {
54830eba9dfSKelvin Cao 	int rc;
54930eba9dfSKelvin Cao 
55030eba9dfSKelvin Cao 	rc = halt_channel(swdma_chan);
55130eba9dfSKelvin Cao 	if (rc)
55230eba9dfSKelvin Cao 		return;
55330eba9dfSKelvin Cao 
55430eba9dfSKelvin Cao 	rcu_read_lock();
55530eba9dfSKelvin Cao 	if (!rcu_dereference(swdma_chan->swdma_dev->pdev)) {
55630eba9dfSKelvin Cao 		rcu_read_unlock();
55730eba9dfSKelvin Cao 		return;
55830eba9dfSKelvin Cao 	}
55930eba9dfSKelvin Cao 
56030eba9dfSKelvin Cao 	writel(0, &swdma_chan->mmio_chan_fw->sq_base_lo);
56130eba9dfSKelvin Cao 	writel(0, &swdma_chan->mmio_chan_fw->sq_base_hi);
56230eba9dfSKelvin Cao 	writel(0, &swdma_chan->mmio_chan_fw->cq_base_lo);
56330eba9dfSKelvin Cao 	writel(0, &swdma_chan->mmio_chan_fw->cq_base_hi);
56430eba9dfSKelvin Cao 
56530eba9dfSKelvin Cao 	rcu_read_unlock();
56630eba9dfSKelvin Cao }
56730eba9dfSKelvin Cao 
56830eba9dfSKelvin Cao static int switchtec_dma_terminate_all(struct dma_chan *chan)
56930eba9dfSKelvin Cao {
57030eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
57130eba9dfSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
57230eba9dfSKelvin Cao 
57330eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->complete_lock);
57430eba9dfSKelvin Cao 	swdma_chan->comp_ring_active = false;
57530eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->complete_lock);
57630eba9dfSKelvin Cao 
57730eba9dfSKelvin Cao 	return pause_reset_channel(swdma_chan);
57830eba9dfSKelvin Cao }
57930eba9dfSKelvin Cao 
58030eba9dfSKelvin Cao static void switchtec_dma_synchronize(struct dma_chan *chan)
58130eba9dfSKelvin Cao {
58230eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
58330eba9dfSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
58430eba9dfSKelvin Cao 
58530eba9dfSKelvin Cao 	int rc;
58630eba9dfSKelvin Cao 
58730eba9dfSKelvin Cao 	switchtec_dma_abort_desc(swdma_chan, 1);
58830eba9dfSKelvin Cao 
58930eba9dfSKelvin Cao 	rc = enable_channel(swdma_chan);
59030eba9dfSKelvin Cao 	if (rc)
59130eba9dfSKelvin Cao 		return;
59230eba9dfSKelvin Cao 
59330eba9dfSKelvin Cao 	rc = reset_channel(swdma_chan);
59430eba9dfSKelvin Cao 	if (rc)
59530eba9dfSKelvin Cao 		return;
59630eba9dfSKelvin Cao 
59730eba9dfSKelvin Cao 	rc = unhalt_channel(swdma_chan);
59830eba9dfSKelvin Cao 	if (rc)
59930eba9dfSKelvin Cao 		return;
60030eba9dfSKelvin Cao 
60130eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->submit_lock);
60230eba9dfSKelvin Cao 	swdma_chan->head = 0;
60330eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->submit_lock);
60430eba9dfSKelvin Cao 
60530eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->complete_lock);
60630eba9dfSKelvin Cao 	swdma_chan->comp_ring_active = true;
60730eba9dfSKelvin Cao 	swdma_chan->phase_tag = 0;
60830eba9dfSKelvin Cao 	swdma_chan->tail = 0;
60930eba9dfSKelvin Cao 	swdma_chan->cq_tail = 0;
61030eba9dfSKelvin Cao 	swdma_chan->cid = 0;
61130eba9dfSKelvin Cao 	dma_cookie_init(chan);
61230eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->complete_lock);
61330eba9dfSKelvin Cao }
61430eba9dfSKelvin Cao 
615*3af11daeSKelvin Cao static struct dma_async_tx_descriptor *
616*3af11daeSKelvin Cao switchtec_dma_prep_desc(struct dma_chan *c, u16 dst_fid, dma_addr_t dma_dst,
617*3af11daeSKelvin Cao 			u16 src_fid, dma_addr_t dma_src, u64 data,
618*3af11daeSKelvin Cao 			size_t len, unsigned long flags)
619*3af11daeSKelvin Cao 	__acquires(swdma_chan->submit_lock)
620*3af11daeSKelvin Cao {
621*3af11daeSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
622*3af11daeSKelvin Cao 		container_of(c, struct switchtec_dma_chan, dma_chan);
623*3af11daeSKelvin Cao 	struct switchtec_dma_desc *desc;
624*3af11daeSKelvin Cao 	int head, tail;
625*3af11daeSKelvin Cao 
626*3af11daeSKelvin Cao 	spin_lock_bh(&swdma_chan->submit_lock);
627*3af11daeSKelvin Cao 
628*3af11daeSKelvin Cao 	if (!swdma_chan->ring_active)
629*3af11daeSKelvin Cao 		goto err_unlock;
630*3af11daeSKelvin Cao 
631*3af11daeSKelvin Cao 	tail = READ_ONCE(swdma_chan->tail);
632*3af11daeSKelvin Cao 	head = swdma_chan->head;
633*3af11daeSKelvin Cao 
634*3af11daeSKelvin Cao 	if (!CIRC_SPACE(head, tail, SWITCHTEC_DMA_RING_SIZE))
635*3af11daeSKelvin Cao 		goto err_unlock;
636*3af11daeSKelvin Cao 
637*3af11daeSKelvin Cao 	desc = swdma_chan->desc_ring[head];
638*3af11daeSKelvin Cao 
639*3af11daeSKelvin Cao 	if (src_fid != SWITCHTEC_INVALID_HFID &&
640*3af11daeSKelvin Cao 	    dst_fid != SWITCHTEC_INVALID_HFID)
641*3af11daeSKelvin Cao 		desc->hw->ctrl |= SWITCHTEC_SE_DFM;
642*3af11daeSKelvin Cao 
643*3af11daeSKelvin Cao 	if (flags & DMA_PREP_INTERRUPT)
644*3af11daeSKelvin Cao 		desc->hw->ctrl |= SWITCHTEC_SE_LIOF;
645*3af11daeSKelvin Cao 
646*3af11daeSKelvin Cao 	if (flags & DMA_PREP_FENCE)
647*3af11daeSKelvin Cao 		desc->hw->ctrl |= SWITCHTEC_SE_BRR;
648*3af11daeSKelvin Cao 
649*3af11daeSKelvin Cao 	desc->txd.flags = flags;
650*3af11daeSKelvin Cao 
651*3af11daeSKelvin Cao 	desc->completed = false;
652*3af11daeSKelvin Cao 	desc->hw->opc = SWITCHTEC_DMA_OPC_MEMCPY;
653*3af11daeSKelvin Cao 	desc->hw->addr_lo = cpu_to_le32(lower_32_bits(dma_src));
654*3af11daeSKelvin Cao 	desc->hw->addr_hi = cpu_to_le32(upper_32_bits(dma_src));
655*3af11daeSKelvin Cao 	desc->hw->daddr_lo = cpu_to_le32(lower_32_bits(dma_dst));
656*3af11daeSKelvin Cao 	desc->hw->daddr_hi = cpu_to_le32(upper_32_bits(dma_dst));
657*3af11daeSKelvin Cao 	desc->hw->byte_cnt = cpu_to_le32(len);
658*3af11daeSKelvin Cao 	desc->hw->tlp_setting = 0;
659*3af11daeSKelvin Cao 	desc->hw->dfid = cpu_to_le16(dst_fid);
660*3af11daeSKelvin Cao 	desc->hw->sfid = cpu_to_le16(src_fid);
661*3af11daeSKelvin Cao 	swdma_chan->cid &= SWITCHTEC_SE_CID_MASK;
662*3af11daeSKelvin Cao 	desc->hw->cid = cpu_to_le16(swdma_chan->cid++);
663*3af11daeSKelvin Cao 	desc->orig_size = len;
664*3af11daeSKelvin Cao 
665*3af11daeSKelvin Cao 	/* return with the lock held, it will be released in tx_submit */
666*3af11daeSKelvin Cao 
667*3af11daeSKelvin Cao 	return &desc->txd;
668*3af11daeSKelvin Cao 
669*3af11daeSKelvin Cao err_unlock:
670*3af11daeSKelvin Cao 	/*
671*3af11daeSKelvin Cao 	 * Keep sparse happy by restoring an even lock count on
672*3af11daeSKelvin Cao 	 * this lock.
673*3af11daeSKelvin Cao 	 */
674*3af11daeSKelvin Cao 	__acquire(swdma_chan->submit_lock);
675*3af11daeSKelvin Cao 
676*3af11daeSKelvin Cao 	spin_unlock_bh(&swdma_chan->submit_lock);
677*3af11daeSKelvin Cao 	return NULL;
678*3af11daeSKelvin Cao }
679*3af11daeSKelvin Cao 
680*3af11daeSKelvin Cao static struct dma_async_tx_descriptor *
681*3af11daeSKelvin Cao switchtec_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dst,
682*3af11daeSKelvin Cao 			  dma_addr_t dma_src, size_t len, unsigned long flags)
683*3af11daeSKelvin Cao 	__acquires(swdma_chan->submit_lock)
684*3af11daeSKelvin Cao {
685*3af11daeSKelvin Cao 	if (len > SWITCHTEC_DESC_MAX_SIZE) {
686*3af11daeSKelvin Cao 		/*
687*3af11daeSKelvin Cao 		 * Keep sparse happy by restoring an even lock count on
688*3af11daeSKelvin Cao 		 * this lock.
689*3af11daeSKelvin Cao 		 */
690*3af11daeSKelvin Cao 		__acquire(swdma_chan->submit_lock);
691*3af11daeSKelvin Cao 		return NULL;
692*3af11daeSKelvin Cao 	}
693*3af11daeSKelvin Cao 
694*3af11daeSKelvin Cao 	return switchtec_dma_prep_desc(c, SWITCHTEC_INVALID_HFID, dma_dst,
695*3af11daeSKelvin Cao 				       SWITCHTEC_INVALID_HFID, dma_src, 0, len,
696*3af11daeSKelvin Cao 				       flags);
697*3af11daeSKelvin Cao }
698*3af11daeSKelvin Cao 
699*3af11daeSKelvin Cao static dma_cookie_t
700*3af11daeSKelvin Cao switchtec_dma_tx_submit(struct dma_async_tx_descriptor *desc)
701*3af11daeSKelvin Cao 	__releases(swdma_chan->submit_lock)
702*3af11daeSKelvin Cao {
703*3af11daeSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
704*3af11daeSKelvin Cao 		container_of(desc->chan, struct switchtec_dma_chan, dma_chan);
705*3af11daeSKelvin Cao 	dma_cookie_t cookie;
706*3af11daeSKelvin Cao 	int head;
707*3af11daeSKelvin Cao 
708*3af11daeSKelvin Cao 	head = swdma_chan->head + 1;
709*3af11daeSKelvin Cao 	head &= SWITCHTEC_DMA_RING_SIZE - 1;
710*3af11daeSKelvin Cao 
711*3af11daeSKelvin Cao 	/*
712*3af11daeSKelvin Cao 	 * Ensure the desc updates are visible before updating the head index
713*3af11daeSKelvin Cao 	 */
714*3af11daeSKelvin Cao 	smp_store_release(&swdma_chan->head, head);
715*3af11daeSKelvin Cao 
716*3af11daeSKelvin Cao 	cookie = dma_cookie_assign(desc);
717*3af11daeSKelvin Cao 
718*3af11daeSKelvin Cao 	spin_unlock_bh(&swdma_chan->submit_lock);
719*3af11daeSKelvin Cao 
720*3af11daeSKelvin Cao 	return cookie;
721*3af11daeSKelvin Cao }
722*3af11daeSKelvin Cao 
723*3af11daeSKelvin Cao static enum dma_status switchtec_dma_tx_status(struct dma_chan *chan,
724*3af11daeSKelvin Cao 		dma_cookie_t cookie, struct dma_tx_state *txstate)
725*3af11daeSKelvin Cao {
726*3af11daeSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
727*3af11daeSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
728*3af11daeSKelvin Cao 	enum dma_status ret;
729*3af11daeSKelvin Cao 
730*3af11daeSKelvin Cao 	ret = dma_cookie_status(chan, cookie, txstate);
731*3af11daeSKelvin Cao 	if (ret == DMA_COMPLETE)
732*3af11daeSKelvin Cao 		return ret;
733*3af11daeSKelvin Cao 
734*3af11daeSKelvin Cao 	/*
735*3af11daeSKelvin Cao 	 * For jobs where the interrupts are disabled, this is the only place
736*3af11daeSKelvin Cao 	 * to process the completions returned by the hardware. Callers that
737*3af11daeSKelvin Cao 	 * disable interrupts must call tx_status() to determine when a job
738*3af11daeSKelvin Cao 	 * is done, so it is safe to process completions here. If a job has
739*3af11daeSKelvin Cao 	 * interrupts enabled, then the completions will normally be processed
740*3af11daeSKelvin Cao 	 * in the tasklet that is triggered by the interrupt and tx_status()
741*3af11daeSKelvin Cao 	 * does not need to be called.
742*3af11daeSKelvin Cao 	 */
743*3af11daeSKelvin Cao 	switchtec_dma_cleanup_completed(swdma_chan);
744*3af11daeSKelvin Cao 
745*3af11daeSKelvin Cao 	return dma_cookie_status(chan, cookie, txstate);
746*3af11daeSKelvin Cao }
747*3af11daeSKelvin Cao 
748*3af11daeSKelvin Cao static void switchtec_dma_issue_pending(struct dma_chan *chan)
749*3af11daeSKelvin Cao {
750*3af11daeSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
751*3af11daeSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
752*3af11daeSKelvin Cao 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
753*3af11daeSKelvin Cao 
754*3af11daeSKelvin Cao 	/*
755*3af11daeSKelvin Cao 	 * The sq_tail register is actually for the head of the
756*3af11daeSKelvin Cao 	 * submisssion queue. Chip has the opposite define of head/tail
757*3af11daeSKelvin Cao 	 * to the Linux kernel.
758*3af11daeSKelvin Cao 	 */
759*3af11daeSKelvin Cao 
760*3af11daeSKelvin Cao 	rcu_read_lock();
761*3af11daeSKelvin Cao 	if (!rcu_dereference(swdma_dev->pdev)) {
762*3af11daeSKelvin Cao 		rcu_read_unlock();
763*3af11daeSKelvin Cao 		return;
764*3af11daeSKelvin Cao 	}
765*3af11daeSKelvin Cao 
766*3af11daeSKelvin Cao 	spin_lock_bh(&swdma_chan->submit_lock);
767*3af11daeSKelvin Cao 	writew(swdma_chan->head, &swdma_chan->mmio_chan_hw->sq_tail);
768*3af11daeSKelvin Cao 	spin_unlock_bh(&swdma_chan->submit_lock);
769*3af11daeSKelvin Cao 
770*3af11daeSKelvin Cao 	rcu_read_unlock();
771*3af11daeSKelvin Cao }
772*3af11daeSKelvin Cao 
773*3af11daeSKelvin Cao static int switchtec_dma_pause(struct dma_chan *chan)
774*3af11daeSKelvin Cao {
775*3af11daeSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
776*3af11daeSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
777*3af11daeSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
778*3af11daeSKelvin Cao 	struct pci_dev *pdev;
779*3af11daeSKelvin Cao 	int ret;
780*3af11daeSKelvin Cao 
781*3af11daeSKelvin Cao 	rcu_read_lock();
782*3af11daeSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
783*3af11daeSKelvin Cao 	if (!pdev) {
784*3af11daeSKelvin Cao 		ret = -ENODEV;
785*3af11daeSKelvin Cao 		goto unlock_and_exit;
786*3af11daeSKelvin Cao 	}
787*3af11daeSKelvin Cao 
788*3af11daeSKelvin Cao 	spin_lock(&swdma_chan->hw_ctrl_lock);
789*3af11daeSKelvin Cao 	writeb(SWITCHTEC_CHAN_CTRL_PAUSE, &chan_hw->ctrl);
790*3af11daeSKelvin Cao 	ret = wait_for_chan_status(chan_hw, SWITCHTEC_CHAN_STS_PAUSED, true);
791*3af11daeSKelvin Cao 	spin_unlock(&swdma_chan->hw_ctrl_lock);
792*3af11daeSKelvin Cao 
793*3af11daeSKelvin Cao unlock_and_exit:
794*3af11daeSKelvin Cao 	rcu_read_unlock();
795*3af11daeSKelvin Cao 	return ret;
796*3af11daeSKelvin Cao }
797*3af11daeSKelvin Cao 
798*3af11daeSKelvin Cao static int switchtec_dma_resume(struct dma_chan *chan)
799*3af11daeSKelvin Cao {
800*3af11daeSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
801*3af11daeSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
802*3af11daeSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
803*3af11daeSKelvin Cao 	struct pci_dev *pdev;
804*3af11daeSKelvin Cao 	int ret;
805*3af11daeSKelvin Cao 
806*3af11daeSKelvin Cao 	rcu_read_lock();
807*3af11daeSKelvin Cao 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
808*3af11daeSKelvin Cao 	if (!pdev) {
809*3af11daeSKelvin Cao 		ret = -ENODEV;
810*3af11daeSKelvin Cao 		goto unlock_and_exit;
811*3af11daeSKelvin Cao 	}
812*3af11daeSKelvin Cao 
813*3af11daeSKelvin Cao 	spin_lock(&swdma_chan->hw_ctrl_lock);
814*3af11daeSKelvin Cao 	writeb(0, &chan_hw->ctrl);
815*3af11daeSKelvin Cao 	ret = wait_for_chan_status(chan_hw, SWITCHTEC_CHAN_STS_PAUSED, false);
816*3af11daeSKelvin Cao 	spin_unlock(&swdma_chan->hw_ctrl_lock);
817*3af11daeSKelvin Cao 
818*3af11daeSKelvin Cao unlock_and_exit:
819*3af11daeSKelvin Cao 	rcu_read_unlock();
820*3af11daeSKelvin Cao 	return ret;
821*3af11daeSKelvin Cao }
822*3af11daeSKelvin Cao 
82330eba9dfSKelvin Cao static void switchtec_dma_desc_task(unsigned long data)
82430eba9dfSKelvin Cao {
82530eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan = (void *)data;
82630eba9dfSKelvin Cao 
82730eba9dfSKelvin Cao 	switchtec_dma_cleanup_completed(swdma_chan);
82830eba9dfSKelvin Cao }
82930eba9dfSKelvin Cao 
83030eba9dfSKelvin Cao static irqreturn_t switchtec_dma_isr(int irq, void *chan)
83130eba9dfSKelvin Cao {
83230eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan = chan;
83330eba9dfSKelvin Cao 
83430eba9dfSKelvin Cao 	if (swdma_chan->comp_ring_active)
83530eba9dfSKelvin Cao 		tasklet_schedule(&swdma_chan->desc_task);
83630eba9dfSKelvin Cao 
83730eba9dfSKelvin Cao 	return IRQ_HANDLED;
83830eba9dfSKelvin Cao }
83930eba9dfSKelvin Cao 
84030eba9dfSKelvin Cao static irqreturn_t switchtec_dma_chan_status_isr(int irq, void *dma)
84130eba9dfSKelvin Cao {
84230eba9dfSKelvin Cao 	struct switchtec_dma_dev *swdma_dev = dma;
84330eba9dfSKelvin Cao 	struct dma_device *dma_dev = &swdma_dev->dma_dev;
84430eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan;
84530eba9dfSKelvin Cao 	struct chan_hw_regs __iomem *chan_hw;
84630eba9dfSKelvin Cao 	struct device *chan_dev;
84730eba9dfSKelvin Cao 	struct dma_chan *chan;
84830eba9dfSKelvin Cao 	u32 chan_status;
84930eba9dfSKelvin Cao 	int bit;
85030eba9dfSKelvin Cao 
85130eba9dfSKelvin Cao 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
85230eba9dfSKelvin Cao 		swdma_chan = container_of(chan, struct switchtec_dma_chan,
85330eba9dfSKelvin Cao 					  dma_chan);
85430eba9dfSKelvin Cao 		chan_dev = &swdma_chan->dma_chan.dev->device;
85530eba9dfSKelvin Cao 		chan_hw = swdma_chan->mmio_chan_hw;
85630eba9dfSKelvin Cao 
85730eba9dfSKelvin Cao 		rcu_read_lock();
85830eba9dfSKelvin Cao 		if (!rcu_dereference(swdma_dev->pdev)) {
85930eba9dfSKelvin Cao 			rcu_read_unlock();
86030eba9dfSKelvin Cao 			goto out;
86130eba9dfSKelvin Cao 		}
86230eba9dfSKelvin Cao 
86330eba9dfSKelvin Cao 		chan_status = readl(&chan_hw->status);
86430eba9dfSKelvin Cao 		chan_status &= SWITCHTEC_CHAN_STS_PAUSED_MASK;
86530eba9dfSKelvin Cao 		rcu_read_unlock();
86630eba9dfSKelvin Cao 
86730eba9dfSKelvin Cao 		bit = ffs(chan_status);
86830eba9dfSKelvin Cao 		if (!bit)
86930eba9dfSKelvin Cao 			dev_dbg(chan_dev, "No pause bit set.\n");
87030eba9dfSKelvin Cao 		else
87130eba9dfSKelvin Cao 			dev_err(chan_dev, "Paused, %s\n",
87230eba9dfSKelvin Cao 				channel_status_str[bit - 1]);
87330eba9dfSKelvin Cao 	}
87430eba9dfSKelvin Cao 
87530eba9dfSKelvin Cao out:
87630eba9dfSKelvin Cao 	return IRQ_HANDLED;
87730eba9dfSKelvin Cao }
87830eba9dfSKelvin Cao 
87930eba9dfSKelvin Cao static void switchtec_dma_free_desc(struct switchtec_dma_chan *swdma_chan)
88030eba9dfSKelvin Cao {
88130eba9dfSKelvin Cao 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
88230eba9dfSKelvin Cao 	size_t size;
88330eba9dfSKelvin Cao 	int i;
88430eba9dfSKelvin Cao 
88530eba9dfSKelvin Cao 	size = SWITCHTEC_DMA_SQ_SIZE * sizeof(*swdma_chan->hw_sq);
88630eba9dfSKelvin Cao 	if (swdma_chan->hw_sq)
88730eba9dfSKelvin Cao 		dma_free_coherent(swdma_dev->dma_dev.dev, size,
88830eba9dfSKelvin Cao 				  swdma_chan->hw_sq, swdma_chan->dma_addr_sq);
88930eba9dfSKelvin Cao 
89030eba9dfSKelvin Cao 	size = SWITCHTEC_DMA_CQ_SIZE * sizeof(*swdma_chan->hw_cq);
89130eba9dfSKelvin Cao 	if (swdma_chan->hw_cq)
89230eba9dfSKelvin Cao 		dma_free_coherent(swdma_dev->dma_dev.dev, size,
89330eba9dfSKelvin Cao 				  swdma_chan->hw_cq, swdma_chan->dma_addr_cq);
89430eba9dfSKelvin Cao 
89530eba9dfSKelvin Cao 	for (i = 0; i < SWITCHTEC_DMA_RING_SIZE; i++)
89630eba9dfSKelvin Cao 		kfree(swdma_chan->desc_ring[i]);
89730eba9dfSKelvin Cao }
89830eba9dfSKelvin Cao 
89930eba9dfSKelvin Cao static int switchtec_dma_alloc_desc(struct switchtec_dma_chan *swdma_chan)
90030eba9dfSKelvin Cao {
90130eba9dfSKelvin Cao 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
90230eba9dfSKelvin Cao 	struct chan_fw_regs __iomem *chan_fw = swdma_chan->mmio_chan_fw;
90330eba9dfSKelvin Cao 	struct switchtec_dma_desc *desc;
90430eba9dfSKelvin Cao 	struct pci_dev *pdev;
90530eba9dfSKelvin Cao 	size_t size;
90630eba9dfSKelvin Cao 	int rc, i;
90730eba9dfSKelvin Cao 
90830eba9dfSKelvin Cao 	swdma_chan->head = 0;
90930eba9dfSKelvin Cao 	swdma_chan->tail = 0;
91030eba9dfSKelvin Cao 	swdma_chan->cq_tail = 0;
91130eba9dfSKelvin Cao 
91230eba9dfSKelvin Cao 	size = SWITCHTEC_DMA_SQ_SIZE * sizeof(*swdma_chan->hw_sq);
91330eba9dfSKelvin Cao 	swdma_chan->hw_sq = dma_alloc_coherent(swdma_dev->dma_dev.dev, size,
91430eba9dfSKelvin Cao 					       &swdma_chan->dma_addr_sq,
91530eba9dfSKelvin Cao 					       GFP_NOWAIT);
91630eba9dfSKelvin Cao 	if (!swdma_chan->hw_sq) {
91730eba9dfSKelvin Cao 		rc = -ENOMEM;
91830eba9dfSKelvin Cao 		goto free_and_exit;
91930eba9dfSKelvin Cao 	}
92030eba9dfSKelvin Cao 
92130eba9dfSKelvin Cao 	size = SWITCHTEC_DMA_CQ_SIZE * sizeof(*swdma_chan->hw_cq);
92230eba9dfSKelvin Cao 	swdma_chan->hw_cq = dma_alloc_coherent(swdma_dev->dma_dev.dev, size,
92330eba9dfSKelvin Cao 					       &swdma_chan->dma_addr_cq,
92430eba9dfSKelvin Cao 					       GFP_NOWAIT);
92530eba9dfSKelvin Cao 	if (!swdma_chan->hw_cq) {
92630eba9dfSKelvin Cao 		rc = -ENOMEM;
92730eba9dfSKelvin Cao 		goto free_and_exit;
92830eba9dfSKelvin Cao 	}
92930eba9dfSKelvin Cao 
93030eba9dfSKelvin Cao 	/* reset host phase tag */
93130eba9dfSKelvin Cao 	swdma_chan->phase_tag = 0;
93230eba9dfSKelvin Cao 
93330eba9dfSKelvin Cao 	for (i = 0; i < SWITCHTEC_DMA_RING_SIZE; i++) {
93430eba9dfSKelvin Cao 		desc = kzalloc_obj(*desc, GFP_NOWAIT);
93530eba9dfSKelvin Cao 		if (!desc) {
93630eba9dfSKelvin Cao 			rc = -ENOMEM;
93730eba9dfSKelvin Cao 			goto free_and_exit;
93830eba9dfSKelvin Cao 		}
93930eba9dfSKelvin Cao 
94030eba9dfSKelvin Cao 		dma_async_tx_descriptor_init(&desc->txd, &swdma_chan->dma_chan);
941*3af11daeSKelvin Cao 		desc->txd.tx_submit = switchtec_dma_tx_submit;
94230eba9dfSKelvin Cao 		desc->hw = &swdma_chan->hw_sq[i];
94330eba9dfSKelvin Cao 		desc->completed = true;
94430eba9dfSKelvin Cao 
94530eba9dfSKelvin Cao 		swdma_chan->desc_ring[i] = desc;
94630eba9dfSKelvin Cao 	}
94730eba9dfSKelvin Cao 
94830eba9dfSKelvin Cao 	rcu_read_lock();
94930eba9dfSKelvin Cao 	pdev = rcu_dereference(swdma_dev->pdev);
95030eba9dfSKelvin Cao 	if (!pdev) {
95130eba9dfSKelvin Cao 		rcu_read_unlock();
95230eba9dfSKelvin Cao 		rc = -ENODEV;
95330eba9dfSKelvin Cao 		goto free_and_exit;
95430eba9dfSKelvin Cao 	}
95530eba9dfSKelvin Cao 
95630eba9dfSKelvin Cao 	/* set sq/cq */
95730eba9dfSKelvin Cao 	writel(lower_32_bits(swdma_chan->dma_addr_sq), &chan_fw->sq_base_lo);
95830eba9dfSKelvin Cao 	writel(upper_32_bits(swdma_chan->dma_addr_sq), &chan_fw->sq_base_hi);
95930eba9dfSKelvin Cao 	writel(lower_32_bits(swdma_chan->dma_addr_cq), &chan_fw->cq_base_lo);
96030eba9dfSKelvin Cao 	writel(upper_32_bits(swdma_chan->dma_addr_cq), &chan_fw->cq_base_hi);
96130eba9dfSKelvin Cao 
96230eba9dfSKelvin Cao 	writew(SWITCHTEC_DMA_SQ_SIZE, &swdma_chan->mmio_chan_fw->sq_size);
96330eba9dfSKelvin Cao 	writew(SWITCHTEC_DMA_CQ_SIZE, &swdma_chan->mmio_chan_fw->cq_size);
96430eba9dfSKelvin Cao 
96530eba9dfSKelvin Cao 	rcu_read_unlock();
96630eba9dfSKelvin Cao 	return 0;
96730eba9dfSKelvin Cao 
96830eba9dfSKelvin Cao free_and_exit:
96930eba9dfSKelvin Cao 	switchtec_dma_free_desc(swdma_chan);
97030eba9dfSKelvin Cao 	return rc;
97130eba9dfSKelvin Cao }
97230eba9dfSKelvin Cao 
97330eba9dfSKelvin Cao static int switchtec_dma_alloc_chan_resources(struct dma_chan *chan)
97430eba9dfSKelvin Cao {
97530eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
97630eba9dfSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
97730eba9dfSKelvin Cao 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
97830eba9dfSKelvin Cao 	u32 perf_cfg;
97930eba9dfSKelvin Cao 	int rc;
98030eba9dfSKelvin Cao 
98130eba9dfSKelvin Cao 	rc = switchtec_dma_alloc_desc(swdma_chan);
98230eba9dfSKelvin Cao 	if (rc)
98330eba9dfSKelvin Cao 		return rc;
98430eba9dfSKelvin Cao 
98530eba9dfSKelvin Cao 	rc = enable_channel(swdma_chan);
98630eba9dfSKelvin Cao 	if (rc)
98730eba9dfSKelvin Cao 		return rc;
98830eba9dfSKelvin Cao 
98930eba9dfSKelvin Cao 	rc = reset_channel(swdma_chan);
99030eba9dfSKelvin Cao 	if (rc)
99130eba9dfSKelvin Cao 		return rc;
99230eba9dfSKelvin Cao 
99330eba9dfSKelvin Cao 	rc = unhalt_channel(swdma_chan);
99430eba9dfSKelvin Cao 	if (rc)
99530eba9dfSKelvin Cao 		return rc;
99630eba9dfSKelvin Cao 
99730eba9dfSKelvin Cao 	swdma_chan->ring_active = true;
99830eba9dfSKelvin Cao 	swdma_chan->comp_ring_active = true;
99930eba9dfSKelvin Cao 	swdma_chan->cid = 0;
100030eba9dfSKelvin Cao 
100130eba9dfSKelvin Cao 	dma_cookie_init(chan);
100230eba9dfSKelvin Cao 
100330eba9dfSKelvin Cao 	rcu_read_lock();
100430eba9dfSKelvin Cao 	if (!rcu_dereference(swdma_dev->pdev)) {
100530eba9dfSKelvin Cao 		rcu_read_unlock();
100630eba9dfSKelvin Cao 		return -ENODEV;
100730eba9dfSKelvin Cao 	}
100830eba9dfSKelvin Cao 
100930eba9dfSKelvin Cao 	perf_cfg = readl(&swdma_chan->mmio_chan_fw->perf_cfg);
101030eba9dfSKelvin Cao 	rcu_read_unlock();
101130eba9dfSKelvin Cao 
101230eba9dfSKelvin Cao 	dev_dbg(&chan->dev->device, "Burst Size:  0x%x\n",
101330eba9dfSKelvin Cao 		FIELD_GET(PERF_BURST_SIZE_MASK, perf_cfg));
101430eba9dfSKelvin Cao 
101530eba9dfSKelvin Cao 	dev_dbg(&chan->dev->device, "Burst Scale: 0x%x\n",
101630eba9dfSKelvin Cao 		FIELD_GET(PERF_BURST_SCALE_MASK, perf_cfg));
101730eba9dfSKelvin Cao 
101830eba9dfSKelvin Cao 	dev_dbg(&chan->dev->device, "Interval:    0x%x\n",
101930eba9dfSKelvin Cao 		FIELD_GET(PERF_INTERVAL_MASK, perf_cfg));
102030eba9dfSKelvin Cao 
102130eba9dfSKelvin Cao 	dev_dbg(&chan->dev->device, "Arb Weight:  0x%x\n",
102230eba9dfSKelvin Cao 		FIELD_GET(PERF_ARB_WEIGHT_MASK, perf_cfg));
102330eba9dfSKelvin Cao 
102430eba9dfSKelvin Cao 	dev_dbg(&chan->dev->device, "MRRS:        0x%x\n",
102530eba9dfSKelvin Cao 		FIELD_GET(PERF_MRRS_MASK, perf_cfg));
102630eba9dfSKelvin Cao 
102730eba9dfSKelvin Cao 	return SWITCHTEC_DMA_SQ_SIZE;
102830eba9dfSKelvin Cao }
102930eba9dfSKelvin Cao 
103030eba9dfSKelvin Cao static void switchtec_dma_free_chan_resources(struct dma_chan *chan)
103130eba9dfSKelvin Cao {
103230eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan =
103330eba9dfSKelvin Cao 		container_of(chan, struct switchtec_dma_chan, dma_chan);
103430eba9dfSKelvin Cao 
103530eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->submit_lock);
103630eba9dfSKelvin Cao 	swdma_chan->ring_active = false;
103730eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->submit_lock);
103830eba9dfSKelvin Cao 
103930eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->complete_lock);
104030eba9dfSKelvin Cao 	swdma_chan->comp_ring_active = false;
104130eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->complete_lock);
104230eba9dfSKelvin Cao 
104330eba9dfSKelvin Cao 	switchtec_dma_chan_stop(swdma_chan);
104430eba9dfSKelvin Cao 	switchtec_dma_abort_desc(swdma_chan, 0);
104530eba9dfSKelvin Cao 	switchtec_dma_free_desc(swdma_chan);
104630eba9dfSKelvin Cao 
104730eba9dfSKelvin Cao 	disable_channel(swdma_chan);
104830eba9dfSKelvin Cao }
104930eba9dfSKelvin Cao 
105030eba9dfSKelvin Cao static int switchtec_dma_chan_init(struct switchtec_dma_dev *swdma_dev,
105130eba9dfSKelvin Cao 				   struct pci_dev *pdev, int i)
105230eba9dfSKelvin Cao {
105330eba9dfSKelvin Cao 	struct dma_device *dma = &swdma_dev->dma_dev;
105430eba9dfSKelvin Cao 	struct switchtec_dma_chan *swdma_chan;
105530eba9dfSKelvin Cao 	u32 valid_en_se, thresh;
105630eba9dfSKelvin Cao 	int se_buf_len, irq, rc;
105730eba9dfSKelvin Cao 	struct dma_chan *chan;
105830eba9dfSKelvin Cao 
105930eba9dfSKelvin Cao 	swdma_chan = kzalloc_obj(*swdma_chan, GFP_KERNEL);
106030eba9dfSKelvin Cao 	if (!swdma_chan)
106130eba9dfSKelvin Cao 		return -ENOMEM;
106230eba9dfSKelvin Cao 
106330eba9dfSKelvin Cao 	swdma_chan->phase_tag = 0;
106430eba9dfSKelvin Cao 	swdma_chan->index = i;
106530eba9dfSKelvin Cao 	swdma_chan->swdma_dev = swdma_dev;
106630eba9dfSKelvin Cao 
106730eba9dfSKelvin Cao 	spin_lock_init(&swdma_chan->hw_ctrl_lock);
106830eba9dfSKelvin Cao 	spin_lock_init(&swdma_chan->submit_lock);
106930eba9dfSKelvin Cao 	spin_lock_init(&swdma_chan->complete_lock);
107030eba9dfSKelvin Cao 	tasklet_init(&swdma_chan->desc_task, switchtec_dma_desc_task,
107130eba9dfSKelvin Cao 		     (unsigned long)swdma_chan);
107230eba9dfSKelvin Cao 
107330eba9dfSKelvin Cao 	swdma_chan->mmio_chan_fw =
107430eba9dfSKelvin Cao 		swdma_dev->bar + SWITCHTEC_DMAC_CHAN_CFG_STS_OFFSET +
107530eba9dfSKelvin Cao 		i * SWITCHTEC_DMA_CHAN_FW_REGS_SIZE;
107630eba9dfSKelvin Cao 	swdma_chan->mmio_chan_hw =
107730eba9dfSKelvin Cao 		swdma_dev->bar + SWITCHTEC_DMAC_CHAN_CTRL_OFFSET +
107830eba9dfSKelvin Cao 		i * SWITCHTEC_DMA_CHAN_HW_REGS_SIZE;
107930eba9dfSKelvin Cao 
108030eba9dfSKelvin Cao 	swdma_dev->swdma_chans[i] = swdma_chan;
108130eba9dfSKelvin Cao 
108230eba9dfSKelvin Cao 	rc = pause_reset_channel(swdma_chan);
108330eba9dfSKelvin Cao 	if (rc)
108430eba9dfSKelvin Cao 		goto free_and_exit;
108530eba9dfSKelvin Cao 
108630eba9dfSKelvin Cao 	/* init perf tuner */
108730eba9dfSKelvin Cao 	writel(FIELD_PREP(PERF_BURST_SCALE_MASK, 1) |
108830eba9dfSKelvin Cao 	       FIELD_PREP(PERF_MRRS_MASK, 3) |
108930eba9dfSKelvin Cao 	       FIELD_PREP(PERF_BURST_SIZE_MASK, 6) |
109030eba9dfSKelvin Cao 	       FIELD_PREP(PERF_ARB_WEIGHT_MASK, 1),
109130eba9dfSKelvin Cao 	       &swdma_chan->mmio_chan_fw->perf_cfg);
109230eba9dfSKelvin Cao 
109330eba9dfSKelvin Cao 	valid_en_se = readl(&swdma_chan->mmio_chan_fw->valid_en_se);
109430eba9dfSKelvin Cao 
109530eba9dfSKelvin Cao 	dev_dbg(&pdev->dev, "Channel %d: SE buffer base %d\n", i,
109630eba9dfSKelvin Cao 		FIELD_GET(SE_BUF_BASE_MASK, valid_en_se));
109730eba9dfSKelvin Cao 
109830eba9dfSKelvin Cao 	se_buf_len = FIELD_GET(SE_BUF_LEN_MASK, valid_en_se);
109930eba9dfSKelvin Cao 	dev_dbg(&pdev->dev, "Channel %d: SE buffer count %d\n", i, se_buf_len);
110030eba9dfSKelvin Cao 
110130eba9dfSKelvin Cao 	thresh = se_buf_len / 2;
110230eba9dfSKelvin Cao 	valid_en_se |= FIELD_GET(SE_THRESH_MASK, thresh);
110330eba9dfSKelvin Cao 	writel(valid_en_se, &swdma_chan->mmio_chan_fw->valid_en_se);
110430eba9dfSKelvin Cao 
110530eba9dfSKelvin Cao 	/* request irqs */
110630eba9dfSKelvin Cao 	irq = readl(&swdma_chan->mmio_chan_fw->int_vec);
110730eba9dfSKelvin Cao 	dev_dbg(&pdev->dev, "Channel %d: CE irq vector %d\n", i, irq);
110830eba9dfSKelvin Cao 
110930eba9dfSKelvin Cao 	rc = pci_request_irq(pdev, irq, switchtec_dma_isr, NULL, swdma_chan,
111030eba9dfSKelvin Cao 			     KBUILD_MODNAME);
111130eba9dfSKelvin Cao 	if (rc)
111230eba9dfSKelvin Cao 		goto free_and_exit;
111330eba9dfSKelvin Cao 
111430eba9dfSKelvin Cao 	swdma_chan->irq = irq;
111530eba9dfSKelvin Cao 
111630eba9dfSKelvin Cao 	chan = &swdma_chan->dma_chan;
111730eba9dfSKelvin Cao 	chan->device = dma;
111830eba9dfSKelvin Cao 	dma_cookie_init(chan);
111930eba9dfSKelvin Cao 
112030eba9dfSKelvin Cao 	list_add_tail(&chan->device_node, &dma->channels);
112130eba9dfSKelvin Cao 
112230eba9dfSKelvin Cao 	return 0;
112330eba9dfSKelvin Cao 
112430eba9dfSKelvin Cao free_and_exit:
112530eba9dfSKelvin Cao 	kfree(swdma_chan);
112630eba9dfSKelvin Cao 	return rc;
112730eba9dfSKelvin Cao }
112830eba9dfSKelvin Cao 
112930eba9dfSKelvin Cao static int switchtec_dma_chan_free(struct pci_dev *pdev,
113030eba9dfSKelvin Cao 				   struct switchtec_dma_chan *swdma_chan)
113130eba9dfSKelvin Cao {
113230eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->submit_lock);
113330eba9dfSKelvin Cao 	swdma_chan->ring_active = false;
113430eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->submit_lock);
113530eba9dfSKelvin Cao 
113630eba9dfSKelvin Cao 	spin_lock_bh(&swdma_chan->complete_lock);
113730eba9dfSKelvin Cao 	swdma_chan->comp_ring_active = false;
113830eba9dfSKelvin Cao 	spin_unlock_bh(&swdma_chan->complete_lock);
113930eba9dfSKelvin Cao 
114030eba9dfSKelvin Cao 	pci_free_irq(pdev, swdma_chan->irq, swdma_chan);
114130eba9dfSKelvin Cao 	tasklet_kill(&swdma_chan->desc_task);
114230eba9dfSKelvin Cao 
114330eba9dfSKelvin Cao 	switchtec_dma_chan_stop(swdma_chan);
114430eba9dfSKelvin Cao 
114530eba9dfSKelvin Cao 	return 0;
114630eba9dfSKelvin Cao }
114730eba9dfSKelvin Cao 
114830eba9dfSKelvin Cao static int switchtec_dma_chans_release(struct pci_dev *pdev,
114930eba9dfSKelvin Cao 				       struct switchtec_dma_dev *swdma_dev)
115030eba9dfSKelvin Cao {
115130eba9dfSKelvin Cao 	int i;
115230eba9dfSKelvin Cao 
115330eba9dfSKelvin Cao 	for (i = 0; i < swdma_dev->chan_cnt; i++)
115430eba9dfSKelvin Cao 		switchtec_dma_chan_free(pdev, swdma_dev->swdma_chans[i]);
115530eba9dfSKelvin Cao 
115630eba9dfSKelvin Cao 	return 0;
115730eba9dfSKelvin Cao }
115830eba9dfSKelvin Cao 
115930eba9dfSKelvin Cao static int switchtec_dma_chans_enumerate(struct switchtec_dma_dev *swdma_dev,
116030eba9dfSKelvin Cao 					 struct pci_dev *pdev, int chan_cnt)
116130eba9dfSKelvin Cao {
116230eba9dfSKelvin Cao 	struct dma_device *dma = &swdma_dev->dma_dev;
116330eba9dfSKelvin Cao 	int base, cnt, rc, i;
116430eba9dfSKelvin Cao 
116530eba9dfSKelvin Cao 	swdma_dev->swdma_chans = kcalloc(chan_cnt, sizeof(*swdma_dev->swdma_chans),
116630eba9dfSKelvin Cao 					 GFP_KERNEL);
116730eba9dfSKelvin Cao 
116830eba9dfSKelvin Cao 	if (!swdma_dev->swdma_chans)
116930eba9dfSKelvin Cao 		return -ENOMEM;
117030eba9dfSKelvin Cao 
117130eba9dfSKelvin Cao 	base = readw(swdma_dev->bar + SWITCHTEC_REG_SE_BUF_BASE);
117230eba9dfSKelvin Cao 	cnt = readw(swdma_dev->bar + SWITCHTEC_REG_SE_BUF_CNT);
117330eba9dfSKelvin Cao 
117430eba9dfSKelvin Cao 	dev_dbg(&pdev->dev, "EP SE buffer base %d\n", base);
117530eba9dfSKelvin Cao 	dev_dbg(&pdev->dev, "EP SE buffer count %d\n", cnt);
117630eba9dfSKelvin Cao 
117730eba9dfSKelvin Cao 	INIT_LIST_HEAD(&dma->channels);
117830eba9dfSKelvin Cao 
117930eba9dfSKelvin Cao 	for (i = 0; i < chan_cnt; i++) {
118030eba9dfSKelvin Cao 		rc = switchtec_dma_chan_init(swdma_dev, pdev, i);
118130eba9dfSKelvin Cao 		if (rc) {
118230eba9dfSKelvin Cao 			dev_err(&pdev->dev, "Channel %d: init channel failed\n",
118330eba9dfSKelvin Cao 				i);
118430eba9dfSKelvin Cao 			chan_cnt = i;
118530eba9dfSKelvin Cao 			goto err_exit;
118630eba9dfSKelvin Cao 		}
118730eba9dfSKelvin Cao 	}
118830eba9dfSKelvin Cao 
118930eba9dfSKelvin Cao 	return chan_cnt;
119030eba9dfSKelvin Cao 
119130eba9dfSKelvin Cao err_exit:
119230eba9dfSKelvin Cao 	for (i = 0; i < chan_cnt; i++)
119330eba9dfSKelvin Cao 		switchtec_dma_chan_free(pdev, swdma_dev->swdma_chans[i]);
119430eba9dfSKelvin Cao 
119530eba9dfSKelvin Cao 	kfree(swdma_dev->swdma_chans);
119630eba9dfSKelvin Cao 
119730eba9dfSKelvin Cao 	return rc;
119830eba9dfSKelvin Cao }
119930eba9dfSKelvin Cao 
1200d9587042SKelvin Cao static void switchtec_dma_release(struct dma_device *dma_dev)
1201d9587042SKelvin Cao {
1202d9587042SKelvin Cao 	struct switchtec_dma_dev *swdma_dev =
1203d9587042SKelvin Cao 		container_of(dma_dev, struct switchtec_dma_dev, dma_dev);
120430eba9dfSKelvin Cao 	int i;
120530eba9dfSKelvin Cao 
120630eba9dfSKelvin Cao 	for (i = 0; i < swdma_dev->chan_cnt; i++)
120730eba9dfSKelvin Cao 		kfree(swdma_dev->swdma_chans[i]);
120830eba9dfSKelvin Cao 
120930eba9dfSKelvin Cao 	kfree(swdma_dev->swdma_chans);
1210d9587042SKelvin Cao 
1211d9587042SKelvin Cao 	put_device(dma_dev->dev);
1212d9587042SKelvin Cao 	kfree(swdma_dev);
1213d9587042SKelvin Cao }
1214d9587042SKelvin Cao 
1215d9587042SKelvin Cao static int switchtec_dma_create(struct pci_dev *pdev)
1216d9587042SKelvin Cao {
1217d9587042SKelvin Cao 	struct switchtec_dma_dev *swdma_dev;
121830eba9dfSKelvin Cao 	int chan_cnt, nr_vecs, irq, rc;
1219d9587042SKelvin Cao 	struct dma_device *dma;
1220d9587042SKelvin Cao 	struct dma_chan *chan;
1221d9587042SKelvin Cao 
1222d9587042SKelvin Cao 	/*
1223d9587042SKelvin Cao 	 * Create the switchtec dma device
1224d9587042SKelvin Cao 	 */
1225d9587042SKelvin Cao 	swdma_dev = kzalloc_obj(*swdma_dev, GFP_KERNEL);
1226d9587042SKelvin Cao 	if (!swdma_dev)
1227d9587042SKelvin Cao 		return -ENOMEM;
1228d9587042SKelvin Cao 
1229d9587042SKelvin Cao 	swdma_dev->bar = ioremap(pci_resource_start(pdev, 0),
1230d9587042SKelvin Cao 				 pci_resource_len(pdev, 0));
1231d9587042SKelvin Cao 
1232d9587042SKelvin Cao 	RCU_INIT_POINTER(swdma_dev->pdev, pdev);
1233d9587042SKelvin Cao 
1234d9587042SKelvin Cao 	nr_vecs = pci_msix_vec_count(pdev);
1235d9587042SKelvin Cao 	rc = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
1236d9587042SKelvin Cao 	if (rc < 0)
1237d9587042SKelvin Cao 		goto err_exit;
1238d9587042SKelvin Cao 
123930eba9dfSKelvin Cao 	irq = readw(swdma_dev->bar + SWITCHTEC_REG_CHAN_STS_VEC);
124030eba9dfSKelvin Cao 	pci_dbg(pdev, "Channel pause irq vector %d\n", irq);
124130eba9dfSKelvin Cao 
124230eba9dfSKelvin Cao 	rc = pci_request_irq(pdev, irq, NULL, switchtec_dma_chan_status_isr,
124330eba9dfSKelvin Cao 			     swdma_dev, KBUILD_MODNAME);
124430eba9dfSKelvin Cao 	if (rc)
124530eba9dfSKelvin Cao 		goto err_exit;
124630eba9dfSKelvin Cao 
124730eba9dfSKelvin Cao 	swdma_dev->chan_status_irq = irq;
124830eba9dfSKelvin Cao 
124930eba9dfSKelvin Cao 	chan_cnt = readl(swdma_dev->bar + SWITCHTEC_REG_CHAN_CNT);
125030eba9dfSKelvin Cao 	if (!chan_cnt) {
125130eba9dfSKelvin Cao 		pci_err(pdev, "No channel configured.\n");
125230eba9dfSKelvin Cao 		rc = -ENXIO;
125330eba9dfSKelvin Cao 		goto err_exit;
125430eba9dfSKelvin Cao 	}
125530eba9dfSKelvin Cao 
125630eba9dfSKelvin Cao 	chan_cnt = switchtec_dma_chans_enumerate(swdma_dev, pdev, chan_cnt);
125730eba9dfSKelvin Cao 	if (chan_cnt < 0) {
125830eba9dfSKelvin Cao 		pci_err(pdev, "Failed to enumerate dma channels: %d\n",
125930eba9dfSKelvin Cao 			chan_cnt);
126030eba9dfSKelvin Cao 		rc = -ENXIO;
126130eba9dfSKelvin Cao 		goto err_exit;
126230eba9dfSKelvin Cao 	}
126330eba9dfSKelvin Cao 
126430eba9dfSKelvin Cao 	swdma_dev->chan_cnt = chan_cnt;
126530eba9dfSKelvin Cao 
1266d9587042SKelvin Cao 	dma = &swdma_dev->dma_dev;
1267d9587042SKelvin Cao 	dma->copy_align = DMAENGINE_ALIGN_8_BYTES;
1268*3af11daeSKelvin Cao 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1269*3af11daeSKelvin Cao 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1270d9587042SKelvin Cao 	dma->dev = get_device(&pdev->dev);
1271d9587042SKelvin Cao 
127230eba9dfSKelvin Cao 	dma->device_alloc_chan_resources = switchtec_dma_alloc_chan_resources;
127330eba9dfSKelvin Cao 	dma->device_free_chan_resources = switchtec_dma_free_chan_resources;
1274*3af11daeSKelvin Cao 	dma->device_prep_dma_memcpy = switchtec_dma_prep_memcpy;
1275*3af11daeSKelvin Cao 	dma->device_tx_status = switchtec_dma_tx_status;
1276*3af11daeSKelvin Cao 	dma->device_issue_pending = switchtec_dma_issue_pending;
1277*3af11daeSKelvin Cao 	dma->device_pause = switchtec_dma_pause;
1278*3af11daeSKelvin Cao 	dma->device_resume = switchtec_dma_resume;
127930eba9dfSKelvin Cao 	dma->device_terminate_all = switchtec_dma_terminate_all;
128030eba9dfSKelvin Cao 	dma->device_synchronize = switchtec_dma_synchronize;
1281d9587042SKelvin Cao 	dma->device_release = switchtec_dma_release;
1282d9587042SKelvin Cao 
1283d9587042SKelvin Cao 	rc = dma_async_device_register(dma);
1284d9587042SKelvin Cao 	if (rc) {
1285d9587042SKelvin Cao 		pci_err(pdev, "Failed to register dma device: %d\n", rc);
128630eba9dfSKelvin Cao 		goto err_chans_release_exit;
1287d9587042SKelvin Cao 	}
1288d9587042SKelvin Cao 
128930eba9dfSKelvin Cao 	pci_dbg(pdev, "Channel count: %d\n", chan_cnt);
129030eba9dfSKelvin Cao 
1291d9587042SKelvin Cao 	list_for_each_entry(chan, &dma->channels, device_node)
1292d9587042SKelvin Cao 		pci_dbg(pdev, "%s\n", dma_chan_name(chan));
1293d9587042SKelvin Cao 
1294d9587042SKelvin Cao 	pci_set_drvdata(pdev, swdma_dev);
1295d9587042SKelvin Cao 
1296d9587042SKelvin Cao 	return 0;
1297d9587042SKelvin Cao 
129830eba9dfSKelvin Cao err_chans_release_exit:
129930eba9dfSKelvin Cao 	switchtec_dma_chans_release(pdev, swdma_dev);
130030eba9dfSKelvin Cao 
1301d9587042SKelvin Cao err_exit:
130230eba9dfSKelvin Cao 	if (swdma_dev->chan_status_irq)
130330eba9dfSKelvin Cao 		free_irq(swdma_dev->chan_status_irq, swdma_dev);
130430eba9dfSKelvin Cao 
1305d9587042SKelvin Cao 	iounmap(swdma_dev->bar);
1306d9587042SKelvin Cao 	kfree(swdma_dev);
1307d9587042SKelvin Cao 	return rc;
1308d9587042SKelvin Cao }
1309d9587042SKelvin Cao 
1310d9587042SKelvin Cao static int switchtec_dma_probe(struct pci_dev *pdev,
1311d9587042SKelvin Cao 			       const struct pci_device_id *id)
1312d9587042SKelvin Cao {
1313d9587042SKelvin Cao 	int rc;
1314d9587042SKelvin Cao 
1315d9587042SKelvin Cao 	rc = pci_enable_device(pdev);
1316d9587042SKelvin Cao 	if (rc)
1317d9587042SKelvin Cao 		return rc;
1318d9587042SKelvin Cao 
1319d9587042SKelvin Cao 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1320d9587042SKelvin Cao 
1321d9587042SKelvin Cao 	rc = pci_request_mem_regions(pdev, KBUILD_MODNAME);
1322d9587042SKelvin Cao 	if (rc)
1323d9587042SKelvin Cao 		goto err_disable;
1324d9587042SKelvin Cao 
1325d9587042SKelvin Cao 	pci_set_master(pdev);
1326d9587042SKelvin Cao 
1327d9587042SKelvin Cao 	rc = switchtec_dma_create(pdev);
1328d9587042SKelvin Cao 	if (rc)
1329d9587042SKelvin Cao 		goto err_free;
1330d9587042SKelvin Cao 
1331d9587042SKelvin Cao 	return 0;
1332d9587042SKelvin Cao 
1333d9587042SKelvin Cao err_free:
1334d9587042SKelvin Cao 	pci_free_irq_vectors(pdev);
1335d9587042SKelvin Cao 	pci_release_mem_regions(pdev);
1336d9587042SKelvin Cao 
1337d9587042SKelvin Cao err_disable:
1338d9587042SKelvin Cao 	pci_disable_device(pdev);
1339d9587042SKelvin Cao 
1340d9587042SKelvin Cao 	return rc;
1341d9587042SKelvin Cao }
1342d9587042SKelvin Cao 
1343d9587042SKelvin Cao static void switchtec_dma_remove(struct pci_dev *pdev)
1344d9587042SKelvin Cao {
1345d9587042SKelvin Cao 	struct switchtec_dma_dev *swdma_dev = pci_get_drvdata(pdev);
1346d9587042SKelvin Cao 
134730eba9dfSKelvin Cao 	switchtec_dma_chans_release(pdev, swdma_dev);
134830eba9dfSKelvin Cao 
1349d9587042SKelvin Cao 	rcu_assign_pointer(swdma_dev->pdev, NULL);
1350d9587042SKelvin Cao 	synchronize_rcu();
1351d9587042SKelvin Cao 
135230eba9dfSKelvin Cao 	pci_free_irq(pdev, swdma_dev->chan_status_irq, swdma_dev);
135330eba9dfSKelvin Cao 
1354d9587042SKelvin Cao 	pci_free_irq_vectors(pdev);
1355d9587042SKelvin Cao 
1356d9587042SKelvin Cao 	dma_async_device_unregister(&swdma_dev->dma_dev);
1357d9587042SKelvin Cao 
1358d9587042SKelvin Cao 	iounmap(swdma_dev->bar);
1359d9587042SKelvin Cao 	pci_release_mem_regions(pdev);
1360d9587042SKelvin Cao 	pci_disable_device(pdev);
1361d9587042SKelvin Cao }
1362d9587042SKelvin Cao 
1363d9587042SKelvin Cao /*
1364d9587042SKelvin Cao  * Also use the class code to identify the devices, as some of the
1365d9587042SKelvin Cao  * device IDs are also used for other devices with other classes by
1366d9587042SKelvin Cao  * Microsemi.
1367d9587042SKelvin Cao  */
1368d9587042SKelvin Cao #define SW_ID(vendor_id, device_id) \
1369d9587042SKelvin Cao 	{ \
1370d9587042SKelvin Cao 		.vendor     = vendor_id, \
1371d9587042SKelvin Cao 		.device     = device_id, \
1372d9587042SKelvin Cao 		.subvendor  = PCI_ANY_ID, \
1373d9587042SKelvin Cao 		.subdevice  = PCI_ANY_ID, \
1374d9587042SKelvin Cao 		.class      = PCI_CLASS_SYSTEM_OTHER << 8, \
1375d9587042SKelvin Cao 		.class_mask = 0xffffffff, \
1376d9587042SKelvin Cao 	}
1377d9587042SKelvin Cao 
1378d9587042SKelvin Cao static const struct pci_device_id switchtec_dma_pci_tbl[] = {
1379d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4000), /* PFX 100XG4 */
1380d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4084), /* PFX 84XG4 */
1381d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4068), /* PFX 68XG4 */
1382d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4052), /* PFX 52XG4 */
1383d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4036), /* PFX 36XG4 */
1384d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4028), /* PFX 28XG4 */
1385d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4100), /* PSX 100XG4 */
1386d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4184), /* PSX 84XG4 */
1387d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4168), /* PSX 68XG4 */
1388d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4152), /* PSX 52XG4 */
1389d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4136), /* PSX 36XG4 */
1390d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4128), /* PSX 28XG4 */
1391d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4352), /* PFXA 52XG4 */
1392d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4336), /* PFXA 36XG4 */
1393d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4328), /* PFXA 28XG4 */
1394d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4452), /* PSXA 52XG4 */
1395d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4436), /* PSXA 36XG4 */
1396d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4428), /* PSXA 28XG4 */
1397d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5000), /* PFX 100XG5 */
1398d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5084), /* PFX 84XG5 */
1399d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5068), /* PFX 68XG5 */
1400d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5052), /* PFX 52XG5 */
1401d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5036), /* PFX 36XG5 */
1402d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5028), /* PFX 28XG5 */
1403d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5100), /* PSX 100XG5 */
1404d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5184), /* PSX 84XG5 */
1405d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5168), /* PSX 68XG5 */
1406d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5152), /* PSX 52XG5 */
1407d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5136), /* PSX 36XG5 */
1408d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5128), /* PSX 28XG5 */
1409d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5300), /* PFXA 100XG5 */
1410d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5384), /* PFXA 84XG5 */
1411d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5368), /* PFXA 68XG5 */
1412d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5352), /* PFXA 52XG5 */
1413d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5336), /* PFXA 36XG5 */
1414d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5328), /* PFXA 28XG5 */
1415d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5400), /* PSXA 100XG5 */
1416d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5484), /* PSXA 84XG5 */
1417d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5468), /* PSXA 68XG5 */
1418d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5452), /* PSXA 52XG5 */
1419d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5436), /* PSXA 36XG5 */
1420d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5428), /* PSXA 28XG5 */
1421d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1001), /* PCI1001 16XG4 */
1422d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1002), /* PCI1002 16XG4 */
1423d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1003), /* PCI1003 16XG4 */
1424d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1004), /* PCI1004 16XG4 */
1425d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1005), /* PCI1005 16XG4 */
1426d9587042SKelvin Cao 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1006), /* PCI1006 16XG4 */
1427d9587042SKelvin Cao 	{0}
1428d9587042SKelvin Cao };
1429d9587042SKelvin Cao MODULE_DEVICE_TABLE(pci, switchtec_dma_pci_tbl);
1430d9587042SKelvin Cao 
1431d9587042SKelvin Cao static struct pci_driver switchtec_dma_pci_driver = {
1432d9587042SKelvin Cao 	.name           = KBUILD_MODNAME,
1433d9587042SKelvin Cao 	.id_table       = switchtec_dma_pci_tbl,
1434d9587042SKelvin Cao 	.probe          = switchtec_dma_probe,
1435d9587042SKelvin Cao 	.remove		= switchtec_dma_remove,
1436d9587042SKelvin Cao };
1437d9587042SKelvin Cao module_pci_driver(switchtec_dma_pci_driver);
1438