xref: /linux/drivers/dma/switchtec_dma.c (revision 30eba9df76adf1294e88214dbf9cea402fa7af37)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip Switchtec(tm) DMA Controller Driver
4  * Copyright (c) 2025, Kelvin Cao <kelvin.cao@microchip.com>
5  * Copyright (c) 2025, Microchip Corporation
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/circ_buf.h>
10 #include <linux/dmaengine.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/delay.h>
14 #include <linux/iopoll.h>
15 
16 #include "dmaengine.h"
17 
18 MODULE_DESCRIPTION("Switchtec PCIe Switch DMA Engine");
19 MODULE_LICENSE("GPL");
20 MODULE_AUTHOR("Kelvin Cao");
21 
22 #define	SWITCHTEC_DMAC_CHAN_CTRL_OFFSET		0x1000
23 #define	SWITCHTEC_DMAC_CHAN_CFG_STS_OFFSET	0x160000
24 
25 #define SWITCHTEC_DMA_CHAN_HW_REGS_SIZE		0x1000
26 #define SWITCHTEC_DMA_CHAN_FW_REGS_SIZE		0x80
27 
28 #define SWITCHTEC_REG_CAP		0x80
29 #define SWITCHTEC_REG_CHAN_CNT		0x84
30 #define SWITCHTEC_REG_TAG_LIMIT		0x90
31 #define SWITCHTEC_REG_CHAN_STS_VEC	0x94
32 #define SWITCHTEC_REG_SE_BUF_CNT	0x98
33 #define SWITCHTEC_REG_SE_BUF_BASE	0x9a
34 
35 #define SWITCHTEC_CHAN_CTRL_PAUSE	BIT(0)
36 #define SWITCHTEC_CHAN_CTRL_HALT	BIT(1)
37 #define SWITCHTEC_CHAN_CTRL_RESET	BIT(2)
38 #define SWITCHTEC_CHAN_CTRL_ERR_PAUSE	BIT(3)
39 
40 #define SWITCHTEC_CHAN_STS_PAUSED	BIT(9)
41 #define SWITCHTEC_CHAN_STS_HALTED	BIT(10)
42 #define SWITCHTEC_CHAN_STS_PAUSED_MASK	GENMASK(29, 13)
43 
44 #define SWITCHTEC_DMA_SQ_SIZE	SZ_32K
45 #define SWITCHTEC_DMA_CQ_SIZE	SZ_32K
46 
47 #define SWITCHTEC_DMA_RING_SIZE	SZ_32K
48 
49 static const char * const channel_status_str[] = {
50 	[13] = "received a VDM with length error status",
51 	[14] = "received a VDM or Cpl with Unsupported Request error status",
52 	[15] = "received a VDM or Cpl with Completion Abort error status",
53 	[16] = "received a VDM with ECRC error status",
54 	[17] = "received a VDM with EP error status",
55 	[18] = "received a VDM with Reserved Cpl error status",
56 	[19] = "received only part of split SE CplD",
57 	[20] = "the ISP_DMAC detected a Completion Time Out",
58 	[21] = "received a Cpl with Unsupported Request status",
59 	[22] = "received a Cpl with Completion Abort status",
60 	[23] = "received a Cpl with a reserved status",
61 	[24] = "received a TLP with ECRC error status in its metadata",
62 	[25] = "received a TLP with the EP bit set in the header",
63 	[26] = "the ISP_DMAC tried to process a SE with an invalid Connection ID",
64 	[27] = "the ISP_DMAC tried to process a SE with an invalid Remote Host interrupt",
65 	[28] = "a reserved opcode was detected in an SE",
66 	[29] = "received a SE Cpl with error status",
67 };
68 
69 struct chan_hw_regs {
70 	u16 cq_head;
71 	u16 rsvd1;
72 	u16 sq_tail;
73 	u16 rsvd2;
74 	u8 ctrl;
75 	u8 rsvd3[3];
76 	u16 status;
77 	u16 rsvd4;
78 };
79 
80 #define PERF_BURST_SCALE_MASK	GENMASK_U32(3,   2)
81 #define PERF_MRRS_MASK		GENMASK_U32(6,   4)
82 #define PERF_INTERVAL_MASK	GENMASK_U32(10,  8)
83 #define PERF_BURST_SIZE_MASK	GENMASK_U32(14, 12)
84 #define PERF_ARB_WEIGHT_MASK	GENMASK_U32(31, 24)
85 
86 #define SE_BUF_BASE_MASK	GENMASK_U32(10,  2)
87 #define SE_BUF_LEN_MASK		GENMASK_U32(20, 12)
88 #define SE_THRESH_MASK		GENMASK_U32(31, 23)
89 
90 #define SWITCHTEC_CHAN_ENABLE	BIT(1)
91 
92 struct chan_fw_regs {
93 	u32 valid_en_se;
94 	u32 cq_base_lo;
95 	u32 cq_base_hi;
96 	u16 cq_size;
97 	u16 rsvd1;
98 	u32 sq_base_lo;
99 	u32 sq_base_hi;
100 	u16 sq_size;
101 	u16 rsvd2;
102 	u32 int_vec;
103 	u32 perf_cfg;
104 	u32 rsvd3;
105 	u32 perf_latency_selector;
106 	u32 perf_fetched_se_cnt_lo;
107 	u32 perf_fetched_se_cnt_hi;
108 	u32 perf_byte_cnt_lo;
109 	u32 perf_byte_cnt_hi;
110 	u32 rsvd4;
111 	u16 perf_se_pending;
112 	u16 perf_se_buf_empty;
113 	u32 perf_chan_idle;
114 	u32 perf_lat_max;
115 	u32 perf_lat_min;
116 	u32 perf_lat_last;
117 	u16 sq_current;
118 	u16 sq_phase;
119 	u16 cq_current;
120 	u16 cq_phase;
121 };
122 
123 struct switchtec_dma_chan {
124 	struct switchtec_dma_dev *swdma_dev;
125 	struct dma_chan dma_chan;
126 	struct chan_hw_regs __iomem *mmio_chan_hw;
127 	struct chan_fw_regs __iomem *mmio_chan_fw;
128 
129 	/* Serialize hardware control register access */
130 	spinlock_t hw_ctrl_lock;
131 
132 	struct tasklet_struct desc_task;
133 
134 	/* Serialize descriptor preparation */
135 	spinlock_t submit_lock;
136 	bool ring_active;
137 	int cid;
138 
139 	/* Serialize completion processing */
140 	spinlock_t complete_lock;
141 	bool comp_ring_active;
142 
143 	/* channel index and irq */
144 	int index;
145 	int irq;
146 
147 	/*
148 	 * In driver context, head is advanced by producer while
149 	 * tail is advanced by consumer.
150 	 */
151 
152 	/* the head and tail for both desc_ring and hw_sq */
153 	int head;
154 	int tail;
155 	int phase_tag;
156 	struct switchtec_dma_hw_se_desc *hw_sq;
157 	dma_addr_t dma_addr_sq;
158 
159 	/* the tail for hw_cq */
160 	int cq_tail;
161 	struct switchtec_dma_hw_ce *hw_cq;
162 	dma_addr_t dma_addr_cq;
163 
164 	struct list_head list;
165 
166 	struct switchtec_dma_desc *desc_ring[SWITCHTEC_DMA_RING_SIZE];
167 };
168 
169 struct switchtec_dma_dev {
170 	struct dma_device dma_dev;
171 	struct pci_dev __rcu *pdev;
172 	void __iomem *bar;
173 
174 	struct switchtec_dma_chan **swdma_chans;
175 	int chan_cnt;
176 	int chan_status_irq;
177 };
178 
179 enum chan_op {
180 	ENABLE_CHAN,
181 	DISABLE_CHAN,
182 };
183 
184 enum switchtec_dma_opcode {
185 	SWITCHTEC_DMA_OPC_MEMCPY = 0,
186 	SWITCHTEC_DMA_OPC_RDIMM = 0x1,
187 	SWITCHTEC_DMA_OPC_WRIMM = 0x2,
188 	SWITCHTEC_DMA_OPC_RHI = 0x6,
189 	SWITCHTEC_DMA_OPC_NOP = 0x7,
190 };
191 
192 struct switchtec_dma_hw_se_desc {
193 	u8 opc;
194 	u8 ctrl;
195 	__le16 tlp_setting;
196 	__le16 rsvd1;
197 	__le16 cid;
198 	__le32 byte_cnt;
199 	__le32 addr_lo; /* SADDR_LO/WIADDR_LO */
200 	__le32 addr_hi; /* SADDR_HI/WIADDR_HI */
201 	__le32 daddr_lo;
202 	__le32 daddr_hi;
203 	__le16 dfid;
204 	__le16 sfid;
205 };
206 
207 #define SWITCHTEC_CE_SC_LEN_ERR		BIT(0)
208 #define SWITCHTEC_CE_SC_UR		BIT(1)
209 #define SWITCHTEC_CE_SC_CA		BIT(2)
210 #define SWITCHTEC_CE_SC_RSVD_CPL	BIT(3)
211 #define SWITCHTEC_CE_SC_ECRC_ERR	BIT(4)
212 #define SWITCHTEC_CE_SC_EP_SET		BIT(5)
213 #define SWITCHTEC_CE_SC_D_RD_CTO	BIT(8)
214 #define SWITCHTEC_CE_SC_D_RIMM_UR	BIT(9)
215 #define SWITCHTEC_CE_SC_D_RIMM_CA	BIT(10)
216 #define SWITCHTEC_CE_SC_D_RIMM_RSVD_CPL	BIT(11)
217 #define SWITCHTEC_CE_SC_D_ECRC		BIT(12)
218 #define SWITCHTEC_CE_SC_D_EP_SET	BIT(13)
219 #define SWITCHTEC_CE_SC_D_BAD_CONNID	BIT(14)
220 #define SWITCHTEC_CE_SC_D_BAD_RHI_ADDR	BIT(15)
221 #define SWITCHTEC_CE_SC_D_INVD_CMD	BIT(16)
222 #define SWITCHTEC_CE_SC_MASK		GENMASK(16, 0)
223 
224 struct switchtec_dma_hw_ce {
225 	__le32 rdimm_cpl_dw0;
226 	__le32 rdimm_cpl_dw1;
227 	__le32 rsvd1;
228 	__le32 cpl_byte_cnt;
229 	__le16 sq_head;
230 	__le16 rsvd2;
231 	__le32 rsvd3;
232 	__le32 sts_code;
233 	__le16 cid;
234 	__le16 phase_tag;
235 };
236 
237 struct switchtec_dma_desc {
238 	struct dma_async_tx_descriptor txd;
239 	struct switchtec_dma_hw_se_desc *hw;
240 	u32 orig_size;
241 	bool completed;
242 };
243 
244 static int wait_for_chan_status(struct chan_hw_regs __iomem *chan_hw, u32 mask,
245 				bool set)
246 {
247 	u32 status;
248 
249 	return readl_poll_timeout_atomic(&chan_hw->status, status,
250 					 (set && (status & mask)) ||
251 					 (!set && !(status & mask)),
252 					 10, 100 * USEC_PER_MSEC);
253 }
254 
255 static int halt_channel(struct switchtec_dma_chan *swdma_chan)
256 {
257 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
258 	struct pci_dev *pdev;
259 	int ret;
260 
261 	rcu_read_lock();
262 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
263 	if (!pdev) {
264 		ret = -ENODEV;
265 		goto unlock_and_exit;
266 	}
267 
268 	spin_lock(&swdma_chan->hw_ctrl_lock);
269 	writeb(SWITCHTEC_CHAN_CTRL_HALT, &chan_hw->ctrl);
270 	ret = wait_for_chan_status(chan_hw, SWITCHTEC_CHAN_STS_HALTED, true);
271 	spin_unlock(&swdma_chan->hw_ctrl_lock);
272 
273 unlock_and_exit:
274 	rcu_read_unlock();
275 	return ret;
276 }
277 
278 static int unhalt_channel(struct switchtec_dma_chan *swdma_chan)
279 {
280 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
281 	struct pci_dev *pdev;
282 	u8 ctrl;
283 	int ret;
284 
285 	rcu_read_lock();
286 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
287 	if (!pdev) {
288 		ret = -ENODEV;
289 		goto unlock_and_exit;
290 	}
291 
292 	spin_lock(&swdma_chan->hw_ctrl_lock);
293 	ctrl = readb(&chan_hw->ctrl);
294 	ctrl &= ~SWITCHTEC_CHAN_CTRL_HALT;
295 	writeb(ctrl, &chan_hw->ctrl);
296 	ret = wait_for_chan_status(chan_hw, SWITCHTEC_CHAN_STS_HALTED, false);
297 	spin_unlock(&swdma_chan->hw_ctrl_lock);
298 
299 unlock_and_exit:
300 	rcu_read_unlock();
301 	return ret;
302 }
303 
304 static void flush_pci_write(struct chan_hw_regs __iomem *chan_hw)
305 {
306 	readl(&chan_hw->cq_head);
307 }
308 
309 static int reset_channel(struct switchtec_dma_chan *swdma_chan)
310 {
311 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
312 	struct pci_dev *pdev;
313 
314 	rcu_read_lock();
315 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
316 	if (!pdev) {
317 		rcu_read_unlock();
318 		return -ENODEV;
319 	}
320 
321 	spin_lock(&swdma_chan->hw_ctrl_lock);
322 	writel(SWITCHTEC_CHAN_CTRL_RESET | SWITCHTEC_CHAN_CTRL_ERR_PAUSE,
323 	       &chan_hw->ctrl);
324 	flush_pci_write(chan_hw);
325 
326 	udelay(1000);
327 
328 	writel(SWITCHTEC_CHAN_CTRL_ERR_PAUSE, &chan_hw->ctrl);
329 	spin_unlock(&swdma_chan->hw_ctrl_lock);
330 	flush_pci_write(chan_hw);
331 
332 	rcu_read_unlock();
333 	return 0;
334 }
335 
336 static int pause_reset_channel(struct switchtec_dma_chan *swdma_chan)
337 {
338 	struct chan_hw_regs __iomem *chan_hw = swdma_chan->mmio_chan_hw;
339 	struct pci_dev *pdev;
340 
341 	rcu_read_lock();
342 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
343 	if (!pdev) {
344 		rcu_read_unlock();
345 		return -ENODEV;
346 	}
347 
348 	spin_lock(&swdma_chan->hw_ctrl_lock);
349 	writeb(SWITCHTEC_CHAN_CTRL_PAUSE, &chan_hw->ctrl);
350 	spin_unlock(&swdma_chan->hw_ctrl_lock);
351 
352 	flush_pci_write(chan_hw);
353 
354 	rcu_read_unlock();
355 
356 	/* wait 60ms to ensure no pending CEs */
357 	mdelay(60);
358 
359 	return reset_channel(swdma_chan);
360 }
361 
362 static int channel_op(struct switchtec_dma_chan *swdma_chan, int op)
363 {
364 	struct chan_fw_regs __iomem *chan_fw = swdma_chan->mmio_chan_fw;
365 	struct pci_dev *pdev;
366 	u32 valid_en_se;
367 
368 	rcu_read_lock();
369 	pdev = rcu_dereference(swdma_chan->swdma_dev->pdev);
370 	if (!pdev) {
371 		rcu_read_unlock();
372 		return -ENODEV;
373 	}
374 
375 	valid_en_se = readl(&chan_fw->valid_en_se);
376 	if (op == ENABLE_CHAN)
377 		valid_en_se |= SWITCHTEC_CHAN_ENABLE;
378 	else
379 		valid_en_se &= ~SWITCHTEC_CHAN_ENABLE;
380 
381 	writel(valid_en_se, &chan_fw->valid_en_se);
382 
383 	rcu_read_unlock();
384 	return 0;
385 }
386 
387 static int enable_channel(struct switchtec_dma_chan *swdma_chan)
388 {
389 	return channel_op(swdma_chan, ENABLE_CHAN);
390 }
391 
392 static int disable_channel(struct switchtec_dma_chan *swdma_chan)
393 {
394 	return channel_op(swdma_chan, DISABLE_CHAN);
395 }
396 
397 static void
398 switchtec_dma_cleanup_completed(struct switchtec_dma_chan *swdma_chan)
399 {
400 	struct device *chan_dev = &swdma_chan->dma_chan.dev->device;
401 	struct switchtec_dma_desc *desc;
402 	struct switchtec_dma_hw_ce *ce;
403 	struct dmaengine_result res;
404 	int tail, cid, se_idx, i;
405 	__le16 phase_tag;
406 	u32 sts_code;
407 	__le32 *p;
408 
409 	do {
410 		spin_lock_bh(&swdma_chan->complete_lock);
411 		if (!swdma_chan->comp_ring_active) {
412 			spin_unlock_bh(&swdma_chan->complete_lock);
413 			break;
414 		}
415 
416 		ce = &swdma_chan->hw_cq[swdma_chan->cq_tail];
417 		/*
418 		 * phase_tag is updated by hardware, ensure the value is
419 		 * not from the cache
420 		 */
421 		phase_tag = smp_load_acquire(&ce->phase_tag);
422 		if (le16_to_cpu(phase_tag) == swdma_chan->phase_tag) {
423 			spin_unlock_bh(&swdma_chan->complete_lock);
424 			break;
425 		}
426 
427 		cid = le16_to_cpu(ce->cid);
428 		se_idx = cid & (SWITCHTEC_DMA_SQ_SIZE - 1);
429 		desc = swdma_chan->desc_ring[se_idx];
430 
431 		tail = swdma_chan->tail;
432 
433 		res.residue = desc->orig_size - le32_to_cpu(ce->cpl_byte_cnt);
434 
435 		sts_code = le32_to_cpu(ce->sts_code);
436 
437 		if (!(sts_code & SWITCHTEC_CE_SC_MASK)) {
438 			res.result = DMA_TRANS_NOERROR;
439 		} else {
440 			if (sts_code & SWITCHTEC_CE_SC_D_RD_CTO)
441 				res.result = DMA_TRANS_READ_FAILED;
442 			else
443 				res.result = DMA_TRANS_WRITE_FAILED;
444 
445 			dev_err(chan_dev, "CID 0x%04x failed, SC 0x%08x\n", cid,
446 				(u32)(sts_code & SWITCHTEC_CE_SC_MASK));
447 
448 			p = (__le32 *)ce;
449 			for (i = 0; i < sizeof(*ce) / 4; i++) {
450 				dev_err(chan_dev, "CE DW%d: 0x%08x\n", i,
451 					le32_to_cpu(*p));
452 				p++;
453 			}
454 		}
455 
456 		desc->completed = true;
457 
458 		swdma_chan->cq_tail++;
459 		swdma_chan->cq_tail &= SWITCHTEC_DMA_CQ_SIZE - 1;
460 
461 		rcu_read_lock();
462 		if (!rcu_dereference(swdma_chan->swdma_dev->pdev)) {
463 			rcu_read_unlock();
464 			spin_unlock_bh(&swdma_chan->complete_lock);
465 			return;
466 		}
467 		writew(swdma_chan->cq_tail, &swdma_chan->mmio_chan_hw->cq_head);
468 		rcu_read_unlock();
469 
470 		if (swdma_chan->cq_tail == 0)
471 			swdma_chan->phase_tag = !swdma_chan->phase_tag;
472 
473 		/*  Out of order CE */
474 		if (se_idx != tail) {
475 			spin_unlock_bh(&swdma_chan->complete_lock);
476 			continue;
477 		}
478 
479 		do {
480 			dma_cookie_complete(&desc->txd);
481 			dma_descriptor_unmap(&desc->txd);
482 			dmaengine_desc_get_callback_invoke(&desc->txd, &res);
483 			desc->txd.callback = NULL;
484 			desc->txd.callback_result = NULL;
485 			desc->completed = false;
486 
487 			tail++;
488 			tail &= SWITCHTEC_DMA_SQ_SIZE - 1;
489 
490 			/*
491 			 * Ensure the desc updates are visible before updating
492 			 * the tail index
493 			 */
494 			smp_store_release(&swdma_chan->tail, tail);
495 			desc = swdma_chan->desc_ring[swdma_chan->tail];
496 			if (!desc->completed)
497 				break;
498 		} while (CIRC_CNT(READ_ONCE(swdma_chan->head), swdma_chan->tail,
499 				  SWITCHTEC_DMA_SQ_SIZE));
500 
501 		spin_unlock_bh(&swdma_chan->complete_lock);
502 	} while (1);
503 }
504 
505 static void
506 switchtec_dma_abort_desc(struct switchtec_dma_chan *swdma_chan, int force)
507 {
508 	struct switchtec_dma_desc *desc;
509 	struct dmaengine_result res;
510 
511 	if (!force)
512 		switchtec_dma_cleanup_completed(swdma_chan);
513 
514 	spin_lock_bh(&swdma_chan->complete_lock);
515 
516 	while (CIRC_CNT(swdma_chan->head, swdma_chan->tail,
517 			SWITCHTEC_DMA_SQ_SIZE) >= 1) {
518 		desc = swdma_chan->desc_ring[swdma_chan->tail];
519 
520 		res.residue = desc->orig_size;
521 		res.result = DMA_TRANS_ABORTED;
522 
523 		dma_cookie_complete(&desc->txd);
524 		dma_descriptor_unmap(&desc->txd);
525 		if (!force)
526 			dmaengine_desc_get_callback_invoke(&desc->txd, &res);
527 		desc->txd.callback = NULL;
528 		desc->txd.callback_result = NULL;
529 
530 		swdma_chan->tail++;
531 		swdma_chan->tail &= SWITCHTEC_DMA_SQ_SIZE - 1;
532 	}
533 
534 	spin_unlock_bh(&swdma_chan->complete_lock);
535 }
536 
537 static void switchtec_dma_chan_stop(struct switchtec_dma_chan *swdma_chan)
538 {
539 	int rc;
540 
541 	rc = halt_channel(swdma_chan);
542 	if (rc)
543 		return;
544 
545 	rcu_read_lock();
546 	if (!rcu_dereference(swdma_chan->swdma_dev->pdev)) {
547 		rcu_read_unlock();
548 		return;
549 	}
550 
551 	writel(0, &swdma_chan->mmio_chan_fw->sq_base_lo);
552 	writel(0, &swdma_chan->mmio_chan_fw->sq_base_hi);
553 	writel(0, &swdma_chan->mmio_chan_fw->cq_base_lo);
554 	writel(0, &swdma_chan->mmio_chan_fw->cq_base_hi);
555 
556 	rcu_read_unlock();
557 }
558 
559 static int switchtec_dma_terminate_all(struct dma_chan *chan)
560 {
561 	struct switchtec_dma_chan *swdma_chan =
562 		container_of(chan, struct switchtec_dma_chan, dma_chan);
563 
564 	spin_lock_bh(&swdma_chan->complete_lock);
565 	swdma_chan->comp_ring_active = false;
566 	spin_unlock_bh(&swdma_chan->complete_lock);
567 
568 	return pause_reset_channel(swdma_chan);
569 }
570 
571 static void switchtec_dma_synchronize(struct dma_chan *chan)
572 {
573 	struct switchtec_dma_chan *swdma_chan =
574 		container_of(chan, struct switchtec_dma_chan, dma_chan);
575 
576 	int rc;
577 
578 	switchtec_dma_abort_desc(swdma_chan, 1);
579 
580 	rc = enable_channel(swdma_chan);
581 	if (rc)
582 		return;
583 
584 	rc = reset_channel(swdma_chan);
585 	if (rc)
586 		return;
587 
588 	rc = unhalt_channel(swdma_chan);
589 	if (rc)
590 		return;
591 
592 	spin_lock_bh(&swdma_chan->submit_lock);
593 	swdma_chan->head = 0;
594 	spin_unlock_bh(&swdma_chan->submit_lock);
595 
596 	spin_lock_bh(&swdma_chan->complete_lock);
597 	swdma_chan->comp_ring_active = true;
598 	swdma_chan->phase_tag = 0;
599 	swdma_chan->tail = 0;
600 	swdma_chan->cq_tail = 0;
601 	swdma_chan->cid = 0;
602 	dma_cookie_init(chan);
603 	spin_unlock_bh(&swdma_chan->complete_lock);
604 }
605 
606 static void switchtec_dma_desc_task(unsigned long data)
607 {
608 	struct switchtec_dma_chan *swdma_chan = (void *)data;
609 
610 	switchtec_dma_cleanup_completed(swdma_chan);
611 }
612 
613 static irqreturn_t switchtec_dma_isr(int irq, void *chan)
614 {
615 	struct switchtec_dma_chan *swdma_chan = chan;
616 
617 	if (swdma_chan->comp_ring_active)
618 		tasklet_schedule(&swdma_chan->desc_task);
619 
620 	return IRQ_HANDLED;
621 }
622 
623 static irqreturn_t switchtec_dma_chan_status_isr(int irq, void *dma)
624 {
625 	struct switchtec_dma_dev *swdma_dev = dma;
626 	struct dma_device *dma_dev = &swdma_dev->dma_dev;
627 	struct switchtec_dma_chan *swdma_chan;
628 	struct chan_hw_regs __iomem *chan_hw;
629 	struct device *chan_dev;
630 	struct dma_chan *chan;
631 	u32 chan_status;
632 	int bit;
633 
634 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
635 		swdma_chan = container_of(chan, struct switchtec_dma_chan,
636 					  dma_chan);
637 		chan_dev = &swdma_chan->dma_chan.dev->device;
638 		chan_hw = swdma_chan->mmio_chan_hw;
639 
640 		rcu_read_lock();
641 		if (!rcu_dereference(swdma_dev->pdev)) {
642 			rcu_read_unlock();
643 			goto out;
644 		}
645 
646 		chan_status = readl(&chan_hw->status);
647 		chan_status &= SWITCHTEC_CHAN_STS_PAUSED_MASK;
648 		rcu_read_unlock();
649 
650 		bit = ffs(chan_status);
651 		if (!bit)
652 			dev_dbg(chan_dev, "No pause bit set.\n");
653 		else
654 			dev_err(chan_dev, "Paused, %s\n",
655 				channel_status_str[bit - 1]);
656 	}
657 
658 out:
659 	return IRQ_HANDLED;
660 }
661 
662 static void switchtec_dma_free_desc(struct switchtec_dma_chan *swdma_chan)
663 {
664 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
665 	size_t size;
666 	int i;
667 
668 	size = SWITCHTEC_DMA_SQ_SIZE * sizeof(*swdma_chan->hw_sq);
669 	if (swdma_chan->hw_sq)
670 		dma_free_coherent(swdma_dev->dma_dev.dev, size,
671 				  swdma_chan->hw_sq, swdma_chan->dma_addr_sq);
672 
673 	size = SWITCHTEC_DMA_CQ_SIZE * sizeof(*swdma_chan->hw_cq);
674 	if (swdma_chan->hw_cq)
675 		dma_free_coherent(swdma_dev->dma_dev.dev, size,
676 				  swdma_chan->hw_cq, swdma_chan->dma_addr_cq);
677 
678 	for (i = 0; i < SWITCHTEC_DMA_RING_SIZE; i++)
679 		kfree(swdma_chan->desc_ring[i]);
680 }
681 
682 static int switchtec_dma_alloc_desc(struct switchtec_dma_chan *swdma_chan)
683 {
684 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
685 	struct chan_fw_regs __iomem *chan_fw = swdma_chan->mmio_chan_fw;
686 	struct switchtec_dma_desc *desc;
687 	struct pci_dev *pdev;
688 	size_t size;
689 	int rc, i;
690 
691 	swdma_chan->head = 0;
692 	swdma_chan->tail = 0;
693 	swdma_chan->cq_tail = 0;
694 
695 	size = SWITCHTEC_DMA_SQ_SIZE * sizeof(*swdma_chan->hw_sq);
696 	swdma_chan->hw_sq = dma_alloc_coherent(swdma_dev->dma_dev.dev, size,
697 					       &swdma_chan->dma_addr_sq,
698 					       GFP_NOWAIT);
699 	if (!swdma_chan->hw_sq) {
700 		rc = -ENOMEM;
701 		goto free_and_exit;
702 	}
703 
704 	size = SWITCHTEC_DMA_CQ_SIZE * sizeof(*swdma_chan->hw_cq);
705 	swdma_chan->hw_cq = dma_alloc_coherent(swdma_dev->dma_dev.dev, size,
706 					       &swdma_chan->dma_addr_cq,
707 					       GFP_NOWAIT);
708 	if (!swdma_chan->hw_cq) {
709 		rc = -ENOMEM;
710 		goto free_and_exit;
711 	}
712 
713 	/* reset host phase tag */
714 	swdma_chan->phase_tag = 0;
715 
716 	for (i = 0; i < SWITCHTEC_DMA_RING_SIZE; i++) {
717 		desc = kzalloc_obj(*desc, GFP_NOWAIT);
718 		if (!desc) {
719 			rc = -ENOMEM;
720 			goto free_and_exit;
721 		}
722 
723 		dma_async_tx_descriptor_init(&desc->txd, &swdma_chan->dma_chan);
724 		desc->hw = &swdma_chan->hw_sq[i];
725 		desc->completed = true;
726 
727 		swdma_chan->desc_ring[i] = desc;
728 	}
729 
730 	rcu_read_lock();
731 	pdev = rcu_dereference(swdma_dev->pdev);
732 	if (!pdev) {
733 		rcu_read_unlock();
734 		rc = -ENODEV;
735 		goto free_and_exit;
736 	}
737 
738 	/* set sq/cq */
739 	writel(lower_32_bits(swdma_chan->dma_addr_sq), &chan_fw->sq_base_lo);
740 	writel(upper_32_bits(swdma_chan->dma_addr_sq), &chan_fw->sq_base_hi);
741 	writel(lower_32_bits(swdma_chan->dma_addr_cq), &chan_fw->cq_base_lo);
742 	writel(upper_32_bits(swdma_chan->dma_addr_cq), &chan_fw->cq_base_hi);
743 
744 	writew(SWITCHTEC_DMA_SQ_SIZE, &swdma_chan->mmio_chan_fw->sq_size);
745 	writew(SWITCHTEC_DMA_CQ_SIZE, &swdma_chan->mmio_chan_fw->cq_size);
746 
747 	rcu_read_unlock();
748 	return 0;
749 
750 free_and_exit:
751 	switchtec_dma_free_desc(swdma_chan);
752 	return rc;
753 }
754 
755 static int switchtec_dma_alloc_chan_resources(struct dma_chan *chan)
756 {
757 	struct switchtec_dma_chan *swdma_chan =
758 		container_of(chan, struct switchtec_dma_chan, dma_chan);
759 	struct switchtec_dma_dev *swdma_dev = swdma_chan->swdma_dev;
760 	u32 perf_cfg;
761 	int rc;
762 
763 	rc = switchtec_dma_alloc_desc(swdma_chan);
764 	if (rc)
765 		return rc;
766 
767 	rc = enable_channel(swdma_chan);
768 	if (rc)
769 		return rc;
770 
771 	rc = reset_channel(swdma_chan);
772 	if (rc)
773 		return rc;
774 
775 	rc = unhalt_channel(swdma_chan);
776 	if (rc)
777 		return rc;
778 
779 	swdma_chan->ring_active = true;
780 	swdma_chan->comp_ring_active = true;
781 	swdma_chan->cid = 0;
782 
783 	dma_cookie_init(chan);
784 
785 	rcu_read_lock();
786 	if (!rcu_dereference(swdma_dev->pdev)) {
787 		rcu_read_unlock();
788 		return -ENODEV;
789 	}
790 
791 	perf_cfg = readl(&swdma_chan->mmio_chan_fw->perf_cfg);
792 	rcu_read_unlock();
793 
794 	dev_dbg(&chan->dev->device, "Burst Size:  0x%x\n",
795 		FIELD_GET(PERF_BURST_SIZE_MASK, perf_cfg));
796 
797 	dev_dbg(&chan->dev->device, "Burst Scale: 0x%x\n",
798 		FIELD_GET(PERF_BURST_SCALE_MASK, perf_cfg));
799 
800 	dev_dbg(&chan->dev->device, "Interval:    0x%x\n",
801 		FIELD_GET(PERF_INTERVAL_MASK, perf_cfg));
802 
803 	dev_dbg(&chan->dev->device, "Arb Weight:  0x%x\n",
804 		FIELD_GET(PERF_ARB_WEIGHT_MASK, perf_cfg));
805 
806 	dev_dbg(&chan->dev->device, "MRRS:        0x%x\n",
807 		FIELD_GET(PERF_MRRS_MASK, perf_cfg));
808 
809 	return SWITCHTEC_DMA_SQ_SIZE;
810 }
811 
812 static void switchtec_dma_free_chan_resources(struct dma_chan *chan)
813 {
814 	struct switchtec_dma_chan *swdma_chan =
815 		container_of(chan, struct switchtec_dma_chan, dma_chan);
816 
817 	spin_lock_bh(&swdma_chan->submit_lock);
818 	swdma_chan->ring_active = false;
819 	spin_unlock_bh(&swdma_chan->submit_lock);
820 
821 	spin_lock_bh(&swdma_chan->complete_lock);
822 	swdma_chan->comp_ring_active = false;
823 	spin_unlock_bh(&swdma_chan->complete_lock);
824 
825 	switchtec_dma_chan_stop(swdma_chan);
826 	switchtec_dma_abort_desc(swdma_chan, 0);
827 	switchtec_dma_free_desc(swdma_chan);
828 
829 	disable_channel(swdma_chan);
830 }
831 
832 static int switchtec_dma_chan_init(struct switchtec_dma_dev *swdma_dev,
833 				   struct pci_dev *pdev, int i)
834 {
835 	struct dma_device *dma = &swdma_dev->dma_dev;
836 	struct switchtec_dma_chan *swdma_chan;
837 	u32 valid_en_se, thresh;
838 	int se_buf_len, irq, rc;
839 	struct dma_chan *chan;
840 
841 	swdma_chan = kzalloc_obj(*swdma_chan, GFP_KERNEL);
842 	if (!swdma_chan)
843 		return -ENOMEM;
844 
845 	swdma_chan->phase_tag = 0;
846 	swdma_chan->index = i;
847 	swdma_chan->swdma_dev = swdma_dev;
848 
849 	spin_lock_init(&swdma_chan->hw_ctrl_lock);
850 	spin_lock_init(&swdma_chan->submit_lock);
851 	spin_lock_init(&swdma_chan->complete_lock);
852 	tasklet_init(&swdma_chan->desc_task, switchtec_dma_desc_task,
853 		     (unsigned long)swdma_chan);
854 
855 	swdma_chan->mmio_chan_fw =
856 		swdma_dev->bar + SWITCHTEC_DMAC_CHAN_CFG_STS_OFFSET +
857 		i * SWITCHTEC_DMA_CHAN_FW_REGS_SIZE;
858 	swdma_chan->mmio_chan_hw =
859 		swdma_dev->bar + SWITCHTEC_DMAC_CHAN_CTRL_OFFSET +
860 		i * SWITCHTEC_DMA_CHAN_HW_REGS_SIZE;
861 
862 	swdma_dev->swdma_chans[i] = swdma_chan;
863 
864 	rc = pause_reset_channel(swdma_chan);
865 	if (rc)
866 		goto free_and_exit;
867 
868 	/* init perf tuner */
869 	writel(FIELD_PREP(PERF_BURST_SCALE_MASK, 1) |
870 	       FIELD_PREP(PERF_MRRS_MASK, 3) |
871 	       FIELD_PREP(PERF_BURST_SIZE_MASK, 6) |
872 	       FIELD_PREP(PERF_ARB_WEIGHT_MASK, 1),
873 	       &swdma_chan->mmio_chan_fw->perf_cfg);
874 
875 	valid_en_se = readl(&swdma_chan->mmio_chan_fw->valid_en_se);
876 
877 	dev_dbg(&pdev->dev, "Channel %d: SE buffer base %d\n", i,
878 		FIELD_GET(SE_BUF_BASE_MASK, valid_en_se));
879 
880 	se_buf_len = FIELD_GET(SE_BUF_LEN_MASK, valid_en_se);
881 	dev_dbg(&pdev->dev, "Channel %d: SE buffer count %d\n", i, se_buf_len);
882 
883 	thresh = se_buf_len / 2;
884 	valid_en_se |= FIELD_GET(SE_THRESH_MASK, thresh);
885 	writel(valid_en_se, &swdma_chan->mmio_chan_fw->valid_en_se);
886 
887 	/* request irqs */
888 	irq = readl(&swdma_chan->mmio_chan_fw->int_vec);
889 	dev_dbg(&pdev->dev, "Channel %d: CE irq vector %d\n", i, irq);
890 
891 	rc = pci_request_irq(pdev, irq, switchtec_dma_isr, NULL, swdma_chan,
892 			     KBUILD_MODNAME);
893 	if (rc)
894 		goto free_and_exit;
895 
896 	swdma_chan->irq = irq;
897 
898 	chan = &swdma_chan->dma_chan;
899 	chan->device = dma;
900 	dma_cookie_init(chan);
901 
902 	list_add_tail(&chan->device_node, &dma->channels);
903 
904 	return 0;
905 
906 free_and_exit:
907 	kfree(swdma_chan);
908 	return rc;
909 }
910 
911 static int switchtec_dma_chan_free(struct pci_dev *pdev,
912 				   struct switchtec_dma_chan *swdma_chan)
913 {
914 	spin_lock_bh(&swdma_chan->submit_lock);
915 	swdma_chan->ring_active = false;
916 	spin_unlock_bh(&swdma_chan->submit_lock);
917 
918 	spin_lock_bh(&swdma_chan->complete_lock);
919 	swdma_chan->comp_ring_active = false;
920 	spin_unlock_bh(&swdma_chan->complete_lock);
921 
922 	pci_free_irq(pdev, swdma_chan->irq, swdma_chan);
923 	tasklet_kill(&swdma_chan->desc_task);
924 
925 	switchtec_dma_chan_stop(swdma_chan);
926 
927 	return 0;
928 }
929 
930 static int switchtec_dma_chans_release(struct pci_dev *pdev,
931 				       struct switchtec_dma_dev *swdma_dev)
932 {
933 	int i;
934 
935 	for (i = 0; i < swdma_dev->chan_cnt; i++)
936 		switchtec_dma_chan_free(pdev, swdma_dev->swdma_chans[i]);
937 
938 	return 0;
939 }
940 
941 static int switchtec_dma_chans_enumerate(struct switchtec_dma_dev *swdma_dev,
942 					 struct pci_dev *pdev, int chan_cnt)
943 {
944 	struct dma_device *dma = &swdma_dev->dma_dev;
945 	int base, cnt, rc, i;
946 
947 	swdma_dev->swdma_chans = kcalloc(chan_cnt, sizeof(*swdma_dev->swdma_chans),
948 					 GFP_KERNEL);
949 
950 	if (!swdma_dev->swdma_chans)
951 		return -ENOMEM;
952 
953 	base = readw(swdma_dev->bar + SWITCHTEC_REG_SE_BUF_BASE);
954 	cnt = readw(swdma_dev->bar + SWITCHTEC_REG_SE_BUF_CNT);
955 
956 	dev_dbg(&pdev->dev, "EP SE buffer base %d\n", base);
957 	dev_dbg(&pdev->dev, "EP SE buffer count %d\n", cnt);
958 
959 	INIT_LIST_HEAD(&dma->channels);
960 
961 	for (i = 0; i < chan_cnt; i++) {
962 		rc = switchtec_dma_chan_init(swdma_dev, pdev, i);
963 		if (rc) {
964 			dev_err(&pdev->dev, "Channel %d: init channel failed\n",
965 				i);
966 			chan_cnt = i;
967 			goto err_exit;
968 		}
969 	}
970 
971 	return chan_cnt;
972 
973 err_exit:
974 	for (i = 0; i < chan_cnt; i++)
975 		switchtec_dma_chan_free(pdev, swdma_dev->swdma_chans[i]);
976 
977 	kfree(swdma_dev->swdma_chans);
978 
979 	return rc;
980 }
981 
982 static void switchtec_dma_release(struct dma_device *dma_dev)
983 {
984 	struct switchtec_dma_dev *swdma_dev =
985 		container_of(dma_dev, struct switchtec_dma_dev, dma_dev);
986 	int i;
987 
988 	for (i = 0; i < swdma_dev->chan_cnt; i++)
989 		kfree(swdma_dev->swdma_chans[i]);
990 
991 	kfree(swdma_dev->swdma_chans);
992 
993 	put_device(dma_dev->dev);
994 	kfree(swdma_dev);
995 }
996 
997 static int switchtec_dma_create(struct pci_dev *pdev)
998 {
999 	struct switchtec_dma_dev *swdma_dev;
1000 	int chan_cnt, nr_vecs, irq, rc;
1001 	struct dma_device *dma;
1002 	struct dma_chan *chan;
1003 
1004 	/*
1005 	 * Create the switchtec dma device
1006 	 */
1007 	swdma_dev = kzalloc_obj(*swdma_dev, GFP_KERNEL);
1008 	if (!swdma_dev)
1009 		return -ENOMEM;
1010 
1011 	swdma_dev->bar = ioremap(pci_resource_start(pdev, 0),
1012 				 pci_resource_len(pdev, 0));
1013 
1014 	RCU_INIT_POINTER(swdma_dev->pdev, pdev);
1015 
1016 	nr_vecs = pci_msix_vec_count(pdev);
1017 	rc = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
1018 	if (rc < 0)
1019 		goto err_exit;
1020 
1021 	irq = readw(swdma_dev->bar + SWITCHTEC_REG_CHAN_STS_VEC);
1022 	pci_dbg(pdev, "Channel pause irq vector %d\n", irq);
1023 
1024 	rc = pci_request_irq(pdev, irq, NULL, switchtec_dma_chan_status_isr,
1025 			     swdma_dev, KBUILD_MODNAME);
1026 	if (rc)
1027 		goto err_exit;
1028 
1029 	swdma_dev->chan_status_irq = irq;
1030 
1031 	chan_cnt = readl(swdma_dev->bar + SWITCHTEC_REG_CHAN_CNT);
1032 	if (!chan_cnt) {
1033 		pci_err(pdev, "No channel configured.\n");
1034 		rc = -ENXIO;
1035 		goto err_exit;
1036 	}
1037 
1038 	chan_cnt = switchtec_dma_chans_enumerate(swdma_dev, pdev, chan_cnt);
1039 	if (chan_cnt < 0) {
1040 		pci_err(pdev, "Failed to enumerate dma channels: %d\n",
1041 			chan_cnt);
1042 		rc = -ENXIO;
1043 		goto err_exit;
1044 	}
1045 
1046 	swdma_dev->chan_cnt = chan_cnt;
1047 
1048 	dma = &swdma_dev->dma_dev;
1049 	dma->copy_align = DMAENGINE_ALIGN_8_BYTES;
1050 	dma->dev = get_device(&pdev->dev);
1051 
1052 	dma->device_alloc_chan_resources = switchtec_dma_alloc_chan_resources;
1053 	dma->device_free_chan_resources = switchtec_dma_free_chan_resources;
1054 	dma->device_terminate_all = switchtec_dma_terminate_all;
1055 	dma->device_synchronize = switchtec_dma_synchronize;
1056 	dma->device_release = switchtec_dma_release;
1057 
1058 	rc = dma_async_device_register(dma);
1059 	if (rc) {
1060 		pci_err(pdev, "Failed to register dma device: %d\n", rc);
1061 		goto err_chans_release_exit;
1062 	}
1063 
1064 	pci_dbg(pdev, "Channel count: %d\n", chan_cnt);
1065 
1066 	list_for_each_entry(chan, &dma->channels, device_node)
1067 		pci_dbg(pdev, "%s\n", dma_chan_name(chan));
1068 
1069 	pci_set_drvdata(pdev, swdma_dev);
1070 
1071 	return 0;
1072 
1073 err_chans_release_exit:
1074 	switchtec_dma_chans_release(pdev, swdma_dev);
1075 
1076 err_exit:
1077 	if (swdma_dev->chan_status_irq)
1078 		free_irq(swdma_dev->chan_status_irq, swdma_dev);
1079 
1080 	iounmap(swdma_dev->bar);
1081 	kfree(swdma_dev);
1082 	return rc;
1083 }
1084 
1085 static int switchtec_dma_probe(struct pci_dev *pdev,
1086 			       const struct pci_device_id *id)
1087 {
1088 	int rc;
1089 
1090 	rc = pci_enable_device(pdev);
1091 	if (rc)
1092 		return rc;
1093 
1094 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1095 
1096 	rc = pci_request_mem_regions(pdev, KBUILD_MODNAME);
1097 	if (rc)
1098 		goto err_disable;
1099 
1100 	pci_set_master(pdev);
1101 
1102 	rc = switchtec_dma_create(pdev);
1103 	if (rc)
1104 		goto err_free;
1105 
1106 	return 0;
1107 
1108 err_free:
1109 	pci_free_irq_vectors(pdev);
1110 	pci_release_mem_regions(pdev);
1111 
1112 err_disable:
1113 	pci_disable_device(pdev);
1114 
1115 	return rc;
1116 }
1117 
1118 static void switchtec_dma_remove(struct pci_dev *pdev)
1119 {
1120 	struct switchtec_dma_dev *swdma_dev = pci_get_drvdata(pdev);
1121 
1122 	switchtec_dma_chans_release(pdev, swdma_dev);
1123 
1124 	rcu_assign_pointer(swdma_dev->pdev, NULL);
1125 	synchronize_rcu();
1126 
1127 	pci_free_irq(pdev, swdma_dev->chan_status_irq, swdma_dev);
1128 
1129 	pci_free_irq_vectors(pdev);
1130 
1131 	dma_async_device_unregister(&swdma_dev->dma_dev);
1132 
1133 	iounmap(swdma_dev->bar);
1134 	pci_release_mem_regions(pdev);
1135 	pci_disable_device(pdev);
1136 }
1137 
1138 /*
1139  * Also use the class code to identify the devices, as some of the
1140  * device IDs are also used for other devices with other classes by
1141  * Microsemi.
1142  */
1143 #define SW_ID(vendor_id, device_id) \
1144 	{ \
1145 		.vendor     = vendor_id, \
1146 		.device     = device_id, \
1147 		.subvendor  = PCI_ANY_ID, \
1148 		.subdevice  = PCI_ANY_ID, \
1149 		.class      = PCI_CLASS_SYSTEM_OTHER << 8, \
1150 		.class_mask = 0xffffffff, \
1151 	}
1152 
1153 static const struct pci_device_id switchtec_dma_pci_tbl[] = {
1154 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4000), /* PFX 100XG4 */
1155 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4084), /* PFX 84XG4 */
1156 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4068), /* PFX 68XG4 */
1157 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4052), /* PFX 52XG4 */
1158 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4036), /* PFX 36XG4 */
1159 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4028), /* PFX 28XG4 */
1160 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4100), /* PSX 100XG4 */
1161 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4184), /* PSX 84XG4 */
1162 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4168), /* PSX 68XG4 */
1163 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4152), /* PSX 52XG4 */
1164 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4136), /* PSX 36XG4 */
1165 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4128), /* PSX 28XG4 */
1166 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4352), /* PFXA 52XG4 */
1167 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4336), /* PFXA 36XG4 */
1168 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4328), /* PFXA 28XG4 */
1169 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4452), /* PSXA 52XG4 */
1170 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4436), /* PSXA 36XG4 */
1171 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x4428), /* PSXA 28XG4 */
1172 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5000), /* PFX 100XG5 */
1173 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5084), /* PFX 84XG5 */
1174 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5068), /* PFX 68XG5 */
1175 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5052), /* PFX 52XG5 */
1176 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5036), /* PFX 36XG5 */
1177 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5028), /* PFX 28XG5 */
1178 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5100), /* PSX 100XG5 */
1179 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5184), /* PSX 84XG5 */
1180 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5168), /* PSX 68XG5 */
1181 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5152), /* PSX 52XG5 */
1182 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5136), /* PSX 36XG5 */
1183 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5128), /* PSX 28XG5 */
1184 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5300), /* PFXA 100XG5 */
1185 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5384), /* PFXA 84XG5 */
1186 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5368), /* PFXA 68XG5 */
1187 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5352), /* PFXA 52XG5 */
1188 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5336), /* PFXA 36XG5 */
1189 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5328), /* PFXA 28XG5 */
1190 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5400), /* PSXA 100XG5 */
1191 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5484), /* PSXA 84XG5 */
1192 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5468), /* PSXA 68XG5 */
1193 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5452), /* PSXA 52XG5 */
1194 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5436), /* PSXA 36XG5 */
1195 	SW_ID(PCI_VENDOR_ID_MICROSEMI, 0x5428), /* PSXA 28XG5 */
1196 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1001), /* PCI1001 16XG4 */
1197 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1002), /* PCI1002 16XG4 */
1198 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1003), /* PCI1003 16XG4 */
1199 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1004), /* PCI1004 16XG4 */
1200 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1005), /* PCI1005 16XG4 */
1201 	SW_ID(PCI_VENDOR_ID_EFAR,      0x1006), /* PCI1006 16XG4 */
1202 	{0}
1203 };
1204 MODULE_DEVICE_TABLE(pci, switchtec_dma_pci_tbl);
1205 
1206 static struct pci_driver switchtec_dma_pci_driver = {
1207 	.name           = KBUILD_MODNAME,
1208 	.id_table       = switchtec_dma_pci_tbl,
1209 	.probe          = switchtec_dma_probe,
1210 	.remove		= switchtec_dma_remove,
1211 };
1212 module_pci_driver(switchtec_dma_pci_driver);
1213