xref: /linux/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  * Copyright (c) 2015-2016 Quantenna Communications, Inc.
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * as published by the Free Software Foundation; either version 2
8  * of the License, or (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/firmware.h>
20 #include <linux/pci.h>
21 #include <linux/vmalloc.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/completion.h>
26 #include <linux/crc32.h>
27 #include <linux/spinlock.h>
28 #include <linux/circ_buf.h>
29 #include <linux/log2.h>
30 
31 #include "pcie_priv.h"
32 #include "pearl_pcie_regs.h"
33 #include "pearl_pcie_ipc.h"
34 #include "qtn_hw_ids.h"
35 #include "core.h"
36 #include "bus.h"
37 #include "shm_ipc.h"
38 #include "debug.h"
39 
40 static bool use_msi = true;
41 module_param(use_msi, bool, 0644);
42 MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
43 
44 static unsigned int tx_bd_size_param = 32;
45 module_param(tx_bd_size_param, uint, 0644);
46 MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
47 
48 static unsigned int rx_bd_size_param = 256;
49 module_param(rx_bd_size_param, uint, 0644);
50 MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
51 
52 static u8 flashboot = 1;
53 module_param(flashboot, byte, 0644);
54 MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
55 
56 #define DRV_NAME	"qtnfmac_pearl_pcie"
57 
58 struct qtnf_pearl_bda {
59 	__le16 bda_len;
60 	__le16 bda_version;
61 	__le32 bda_pci_endian;
62 	__le32 bda_ep_state;
63 	__le32 bda_rc_state;
64 	__le32 bda_dma_mask;
65 	__le32 bda_msi_addr;
66 	__le32 bda_flashsz;
67 	u8 bda_boardname[PCIE_BDA_NAMELEN];
68 	__le32 bda_rc_msi_enabled;
69 	u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
70 	__le32 bda_dsbw_start_index;
71 	__le32 bda_dsbw_end_index;
72 	__le32 bda_dsbw_total_bytes;
73 	__le32 bda_rc_tx_bd_base;
74 	__le32 bda_rc_tx_bd_num;
75 	u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
76 	struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
77 	struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
78 } __packed;
79 
80 struct qtnf_pearl_tx_bd {
81 	__le32 addr;
82 	__le32 addr_h;
83 	__le32 info;
84 	__le32 info_h;
85 } __packed;
86 
87 struct qtnf_pearl_rx_bd {
88 	__le32 addr;
89 	__le32 addr_h;
90 	__le32 info;
91 	__le32 info_h;
92 	__le32 next_ptr;
93 	__le32 next_ptr_h;
94 } __packed;
95 
96 struct qtnf_pearl_fw_hdr {
97 	u8 boardflg[8];
98 	__le32 fwsize;
99 	__le32 seqnum;
100 	__le32 type;
101 	__le32 pktlen;
102 	__le32 crc;
103 } __packed;
104 
105 struct qtnf_pcie_pearl_state {
106 	struct qtnf_pcie_bus_priv base;
107 
108 	/* lock for irq configuration changes */
109 	spinlock_t irq_lock;
110 
111 	struct qtnf_pearl_bda __iomem *bda;
112 	void __iomem *pcie_reg_base;
113 
114 	struct qtnf_pearl_tx_bd *tx_bd_vbase;
115 	dma_addr_t tx_bd_pbase;
116 
117 	struct qtnf_pearl_rx_bd *rx_bd_vbase;
118 	dma_addr_t rx_bd_pbase;
119 
120 	dma_addr_t bd_table_paddr;
121 	void *bd_table_vaddr;
122 	u32 bd_table_len;
123 	u32 pcie_irq_mask;
124 	u32 pcie_irq_rx_count;
125 	u32 pcie_irq_tx_count;
126 	u32 pcie_irq_uf_count;
127 };
128 
129 static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
130 {
131 	unsigned long flags;
132 
133 	spin_lock_irqsave(&ps->irq_lock, flags);
134 	ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
135 	spin_unlock_irqrestore(&ps->irq_lock, flags);
136 }
137 
138 static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
139 {
140 	unsigned long flags;
141 
142 	spin_lock_irqsave(&ps->irq_lock, flags);
143 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
144 	spin_unlock_irqrestore(&ps->irq_lock, flags);
145 }
146 
147 static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
148 {
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&ps->irq_lock, flags);
152 	writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
153 	spin_unlock_irqrestore(&ps->irq_lock, flags);
154 }
155 
156 static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
157 {
158 	unsigned long flags;
159 
160 	spin_lock_irqsave(&ps->irq_lock, flags);
161 	ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
162 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
163 	spin_unlock_irqrestore(&ps->irq_lock, flags);
164 }
165 
166 static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
167 {
168 	unsigned long flags;
169 
170 	spin_lock_irqsave(&ps->irq_lock, flags);
171 	ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
172 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
173 	spin_unlock_irqrestore(&ps->irq_lock, flags);
174 }
175 
176 static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps)
177 {
178 	unsigned long flags;
179 
180 	spin_lock_irqsave(&ps->irq_lock, flags);
181 	ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
182 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
183 	spin_unlock_irqrestore(&ps->irq_lock, flags);
184 }
185 
186 static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps)
187 {
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&ps->irq_lock, flags);
191 	ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
192 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
193 	spin_unlock_irqrestore(&ps->irq_lock, flags);
194 }
195 
196 static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps)
197 {
198 	void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
199 	u32 cfg;
200 
201 	cfg = readl(reg);
202 	cfg &= ~PEARL_ASSERT_INTX;
203 	qtnf_non_posted_write(cfg, reg);
204 }
205 
206 static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps)
207 {
208 	const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
209 	void __iomem *reg = ps->base.sysctl_bar +
210 			    QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
211 
212 	qtnf_non_posted_write(data, reg);
213 	msleep(QTN_EP_RESET_WAIT_MS);
214 	pci_restore_state(ps->base.pdev);
215 }
216 
217 static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg)
218 {
219 	const struct qtnf_pcie_pearl_state *ps = arg;
220 	const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
221 	void __iomem *reg = ps->base.sysctl_bar +
222 			    QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
223 
224 	qtnf_non_posted_write(data, reg);
225 }
226 
227 static int qtnf_is_state(__le32 __iomem *reg, u32 state)
228 {
229 	u32 s = readl(reg);
230 
231 	return s & state;
232 }
233 
234 static void qtnf_set_state(__le32 __iomem *reg, u32 state)
235 {
236 	u32 s = readl(reg);
237 
238 	qtnf_non_posted_write(state | s, reg);
239 }
240 
241 static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
242 {
243 	u32 s = readl(reg);
244 
245 	qtnf_non_posted_write(s & ~state, reg);
246 }
247 
248 static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
249 {
250 	u32 timeout = 0;
251 
252 	while ((qtnf_is_state(reg, state) == 0)) {
253 		usleep_range(1000, 1200);
254 		if (++timeout > delay_in_ms)
255 			return -1;
256 	}
257 
258 	return 0;
259 }
260 
261 static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
262 {
263 	struct qtnf_pcie_bus_priv *priv = &ps->base;
264 	dma_addr_t paddr;
265 	void *vaddr;
266 	int len;
267 
268 	len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
269 		priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
270 
271 	vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
272 	if (!vaddr)
273 		return -ENOMEM;
274 
275 	/* tx bd */
276 
277 	memset(vaddr, 0, len);
278 
279 	ps->bd_table_vaddr = vaddr;
280 	ps->bd_table_paddr = paddr;
281 	ps->bd_table_len = len;
282 
283 	ps->tx_bd_vbase = vaddr;
284 	ps->tx_bd_pbase = paddr;
285 
286 	pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
287 
288 	priv->tx_bd_r_index = 0;
289 	priv->tx_bd_w_index = 0;
290 
291 	/* rx bd */
292 
293 	vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
294 	paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
295 
296 	ps->rx_bd_vbase = vaddr;
297 	ps->rx_bd_pbase = paddr;
298 
299 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
300 	writel(QTN_HOST_HI32(paddr),
301 	       PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
302 #endif
303 	writel(QTN_HOST_LO32(paddr),
304 	       PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
305 	writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
306 	       PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
307 
308 	pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
309 
310 	return 0;
311 }
312 
313 static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
314 {
315 	struct qtnf_pcie_bus_priv *priv = &ps->base;
316 	struct qtnf_pearl_rx_bd *rxbd;
317 	struct sk_buff *skb;
318 	dma_addr_t paddr;
319 
320 	skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
321 	if (!skb) {
322 		priv->rx_skb[index] = NULL;
323 		return -ENOMEM;
324 	}
325 
326 	priv->rx_skb[index] = skb;
327 	rxbd = &ps->rx_bd_vbase[index];
328 
329 	paddr = pci_map_single(priv->pdev, skb->data,
330 			       SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
331 	if (pci_dma_mapping_error(priv->pdev, paddr)) {
332 		pr_err("skb DMA mapping error: %pad\n", &paddr);
333 		return -ENOMEM;
334 	}
335 
336 	/* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
337 	rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
338 	rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
339 	rxbd->info = 0x0;
340 
341 	priv->rx_bd_w_index = index;
342 
343 	/* sync up all descriptor updates */
344 	wmb();
345 
346 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
347 	writel(QTN_HOST_HI32(paddr),
348 	       PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
349 #endif
350 	writel(QTN_HOST_LO32(paddr),
351 	       PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
352 
353 	writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
354 	return 0;
355 }
356 
357 static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps)
358 {
359 	u16 i;
360 	int ret = 0;
361 
362 	memset(ps->rx_bd_vbase, 0x0,
363 	       ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd));
364 
365 	for (i = 0; i < ps->base.rx_bd_num; i++) {
366 		ret = pearl_skb2rbd_attach(ps, i);
367 		if (ret)
368 			break;
369 	}
370 
371 	return ret;
372 }
373 
374 /* all rx/tx activity should have ceased before calling this function */
375 static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
376 {
377 	struct qtnf_pcie_bus_priv *priv = &ps->base;
378 	struct qtnf_pearl_tx_bd *txbd;
379 	struct qtnf_pearl_rx_bd *rxbd;
380 	struct sk_buff *skb;
381 	dma_addr_t paddr;
382 	int i;
383 
384 	/* free rx buffers */
385 	for (i = 0; i < priv->rx_bd_num; i++) {
386 		if (priv->rx_skb && priv->rx_skb[i]) {
387 			rxbd = &ps->rx_bd_vbase[i];
388 			skb = priv->rx_skb[i];
389 			paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
390 					      le32_to_cpu(rxbd->addr));
391 			pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
392 					 PCI_DMA_FROMDEVICE);
393 			dev_kfree_skb_any(skb);
394 			priv->rx_skb[i] = NULL;
395 		}
396 	}
397 
398 	/* free tx buffers */
399 	for (i = 0; i < priv->tx_bd_num; i++) {
400 		if (priv->tx_skb && priv->tx_skb[i]) {
401 			txbd = &ps->tx_bd_vbase[i];
402 			skb = priv->tx_skb[i];
403 			paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
404 					      le32_to_cpu(txbd->addr));
405 			pci_unmap_single(priv->pdev, paddr, skb->len,
406 					 PCI_DMA_TODEVICE);
407 			dev_kfree_skb_any(skb);
408 			priv->tx_skb[i] = NULL;
409 		}
410 	}
411 }
412 
413 static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
414 {
415 	u32 val;
416 
417 	val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
418 	val |= HHBM_CONFIG_SOFT_RESET;
419 	writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
420 	usleep_range(50, 100);
421 	val &= ~HHBM_CONFIG_SOFT_RESET;
422 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
423 	val |= HHBM_64BIT;
424 #endif
425 	writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
426 	writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
427 
428 	return 0;
429 }
430 
431 static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps)
432 {
433 	struct qtnf_pcie_bus_priv *priv = &ps->base;
434 	int ret;
435 	u32 val;
436 
437 	priv->tx_bd_num = tx_bd_size_param;
438 	priv->rx_bd_num = rx_bd_size_param;
439 	priv->rx_bd_w_index = 0;
440 	priv->rx_bd_r_index = 0;
441 
442 	if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
443 		pr_err("tx_bd_size_param %u is not power of two\n",
444 		       priv->tx_bd_num);
445 		return -EINVAL;
446 	}
447 
448 	val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
449 	if (val > PCIE_HHBM_MAX_SIZE) {
450 		pr_err("tx_bd_size_param %u is too large\n",
451 		       priv->tx_bd_num);
452 		return -EINVAL;
453 	}
454 
455 	if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
456 		pr_err("rx_bd_size_param %u is not power of two\n",
457 		       priv->rx_bd_num);
458 		return -EINVAL;
459 	}
460 
461 	val = priv->rx_bd_num * sizeof(dma_addr_t);
462 	if (val > PCIE_HHBM_MAX_SIZE) {
463 		pr_err("rx_bd_size_param %u is too large\n",
464 		       priv->rx_bd_num);
465 		return -EINVAL;
466 	}
467 
468 	ret = pearl_hhbm_init(ps);
469 	if (ret) {
470 		pr_err("failed to init h/w queues\n");
471 		return ret;
472 	}
473 
474 	ret = qtnf_pcie_alloc_skb_array(priv);
475 	if (ret) {
476 		pr_err("failed to allocate skb array\n");
477 		return ret;
478 	}
479 
480 	ret = pearl_alloc_bd_table(ps);
481 	if (ret) {
482 		pr_err("failed to allocate bd table\n");
483 		return ret;
484 	}
485 
486 	ret = pearl_alloc_rx_buffers(ps);
487 	if (ret) {
488 		pr_err("failed to allocate rx buffers\n");
489 		return ret;
490 	}
491 
492 	return ret;
493 }
494 
495 static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
496 {
497 	struct qtnf_pcie_bus_priv *priv = &ps->base;
498 	struct qtnf_pearl_tx_bd *txbd;
499 	struct sk_buff *skb;
500 	unsigned long flags;
501 	dma_addr_t paddr;
502 	u32 tx_done_index;
503 	int count = 0;
504 	int i;
505 
506 	spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
507 
508 	tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
509 			& (priv->tx_bd_num - 1);
510 
511 	i = priv->tx_bd_r_index;
512 
513 	while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
514 		skb = priv->tx_skb[i];
515 		if (likely(skb)) {
516 			txbd = &ps->tx_bd_vbase[i];
517 			paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
518 					      le32_to_cpu(txbd->addr));
519 			pci_unmap_single(priv->pdev, paddr, skb->len,
520 					 PCI_DMA_TODEVICE);
521 
522 			if (skb->dev) {
523 				qtnf_update_tx_stats(skb->dev, skb);
524 				if (unlikely(priv->tx_stopped)) {
525 					qtnf_wake_all_queues(skb->dev);
526 					priv->tx_stopped = 0;
527 				}
528 			}
529 
530 			dev_kfree_skb_any(skb);
531 		}
532 
533 		priv->tx_skb[i] = NULL;
534 		count++;
535 
536 		if (++i >= priv->tx_bd_num)
537 			i = 0;
538 	}
539 
540 	priv->tx_reclaim_done += count;
541 	priv->tx_reclaim_req++;
542 	priv->tx_bd_r_index = i;
543 
544 	spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
545 }
546 
547 static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
548 {
549 	struct qtnf_pcie_bus_priv *priv = &ps->base;
550 
551 	if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
552 			priv->tx_bd_num)) {
553 		qtnf_pearl_data_tx_reclaim(ps);
554 
555 		if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
556 				priv->tx_bd_num)) {
557 			pr_warn_ratelimited("reclaim full Tx queue\n");
558 			priv->tx_full_count++;
559 			return 0;
560 		}
561 	}
562 
563 	return 1;
564 }
565 
566 static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
567 {
568 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
569 	struct qtnf_pcie_bus_priv *priv = &ps->base;
570 	dma_addr_t txbd_paddr, skb_paddr;
571 	struct qtnf_pearl_tx_bd *txbd;
572 	unsigned long flags;
573 	int len, i;
574 	u32 info;
575 	int ret = 0;
576 
577 	spin_lock_irqsave(&priv->tx_lock, flags);
578 
579 	if (!qtnf_tx_queue_ready(ps)) {
580 		if (skb->dev) {
581 			netif_tx_stop_all_queues(skb->dev);
582 			priv->tx_stopped = 1;
583 		}
584 
585 		spin_unlock_irqrestore(&priv->tx_lock, flags);
586 		return NETDEV_TX_BUSY;
587 	}
588 
589 	i = priv->tx_bd_w_index;
590 	priv->tx_skb[i] = skb;
591 	len = skb->len;
592 
593 	skb_paddr = pci_map_single(priv->pdev, skb->data,
594 				   skb->len, PCI_DMA_TODEVICE);
595 	if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
596 		pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
597 		ret = -ENOMEM;
598 		goto tx_done;
599 	}
600 
601 	txbd = &ps->tx_bd_vbase[i];
602 	txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
603 	txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
604 
605 	info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
606 	txbd->info = cpu_to_le32(info);
607 
608 	/* sync up all descriptor updates before passing them to EP */
609 	dma_wmb();
610 
611 	/* write new TX descriptor to PCIE_RX_FIFO on EP */
612 	txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
613 
614 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
615 	writel(QTN_HOST_HI32(txbd_paddr),
616 	       PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
617 #endif
618 	writel(QTN_HOST_LO32(txbd_paddr),
619 	       PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
620 
621 	if (++i >= priv->tx_bd_num)
622 		i = 0;
623 
624 	priv->tx_bd_w_index = i;
625 
626 tx_done:
627 	if (ret && skb) {
628 		pr_err_ratelimited("drop skb\n");
629 		if (skb->dev)
630 			skb->dev->stats.tx_dropped++;
631 		dev_kfree_skb_any(skb);
632 	}
633 
634 	priv->tx_done_count++;
635 	spin_unlock_irqrestore(&priv->tx_lock, flags);
636 
637 	qtnf_pearl_data_tx_reclaim(ps);
638 
639 	return NETDEV_TX_OK;
640 }
641 
642 static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
643 {
644 	struct qtnf_bus *bus = (struct qtnf_bus *)data;
645 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
646 	struct qtnf_pcie_bus_priv *priv = &ps->base;
647 	u32 status;
648 
649 	priv->pcie_irq_count++;
650 	status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
651 
652 	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
653 	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
654 
655 	if (!(status & ps->pcie_irq_mask))
656 		goto irq_done;
657 
658 	if (status & PCIE_HDP_INT_RX_BITS)
659 		ps->pcie_irq_rx_count++;
660 
661 	if (status & PCIE_HDP_INT_TX_BITS)
662 		ps->pcie_irq_tx_count++;
663 
664 	if (status & PCIE_HDP_INT_HHBM_UF)
665 		ps->pcie_irq_uf_count++;
666 
667 	if (status & PCIE_HDP_INT_RX_BITS) {
668 		qtnf_dis_rxdone_irq(ps);
669 		napi_schedule(&bus->mux_napi);
670 	}
671 
672 	if (status & PCIE_HDP_INT_TX_BITS) {
673 		qtnf_dis_txdone_irq(ps);
674 		tasklet_hi_schedule(&priv->reclaim_tq);
675 	}
676 
677 irq_done:
678 	/* H/W workaround: clean all bits, not only enabled */
679 	qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
680 
681 	if (!priv->msi_enabled)
682 		qtnf_deassert_intx(ps);
683 
684 	return IRQ_HANDLED;
685 }
686 
687 static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps)
688 {
689 	u16 index = ps->base.rx_bd_r_index;
690 	struct qtnf_pearl_rx_bd *rxbd;
691 	u32 descw;
692 
693 	rxbd = &ps->rx_bd_vbase[index];
694 	descw = le32_to_cpu(rxbd->info);
695 
696 	if (descw & QTN_TXDONE_MASK)
697 		return 1;
698 
699 	return 0;
700 }
701 
702 static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
703 {
704 	struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
705 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
706 	struct qtnf_pcie_bus_priv *priv = &ps->base;
707 	struct net_device *ndev = NULL;
708 	struct sk_buff *skb = NULL;
709 	int processed = 0;
710 	struct qtnf_pearl_rx_bd *rxbd;
711 	dma_addr_t skb_paddr;
712 	int consume;
713 	u32 descw;
714 	u32 psize;
715 	u16 r_idx;
716 	u16 w_idx;
717 	int ret;
718 
719 	while (processed < budget) {
720 		if (!qtnf_rx_data_ready(ps))
721 			goto rx_out;
722 
723 		r_idx = priv->rx_bd_r_index;
724 		rxbd = &ps->rx_bd_vbase[r_idx];
725 		descw = le32_to_cpu(rxbd->info);
726 
727 		skb = priv->rx_skb[r_idx];
728 		psize = QTN_GET_LEN(descw);
729 		consume = 1;
730 
731 		if (!(descw & QTN_TXDONE_MASK)) {
732 			pr_warn("skip invalid rxbd[%d]\n", r_idx);
733 			consume = 0;
734 		}
735 
736 		if (!skb) {
737 			pr_warn("skip missing rx_skb[%d]\n", r_idx);
738 			consume = 0;
739 		}
740 
741 		if (skb && (skb_tailroom(skb) <  psize)) {
742 			pr_err("skip packet with invalid length: %u > %u\n",
743 			       psize, skb_tailroom(skb));
744 			consume = 0;
745 		}
746 
747 		if (skb) {
748 			skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
749 						  le32_to_cpu(rxbd->addr));
750 			pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
751 					 PCI_DMA_FROMDEVICE);
752 		}
753 
754 		if (consume) {
755 			skb_put(skb, psize);
756 			ndev = qtnf_classify_skb(bus, skb);
757 			if (likely(ndev)) {
758 				qtnf_update_rx_stats(ndev, skb);
759 				skb->protocol = eth_type_trans(skb, ndev);
760 				napi_gro_receive(napi, skb);
761 			} else {
762 				pr_debug("drop untagged skb\n");
763 				bus->mux_dev.stats.rx_dropped++;
764 				dev_kfree_skb_any(skb);
765 			}
766 		} else {
767 			if (skb) {
768 				bus->mux_dev.stats.rx_dropped++;
769 				dev_kfree_skb_any(skb);
770 			}
771 		}
772 
773 		priv->rx_skb[r_idx] = NULL;
774 		if (++r_idx >= priv->rx_bd_num)
775 			r_idx = 0;
776 
777 		priv->rx_bd_r_index = r_idx;
778 
779 		/* repalce processed buffer by a new one */
780 		w_idx = priv->rx_bd_w_index;
781 		while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
782 				  priv->rx_bd_num) > 0) {
783 			if (++w_idx >= priv->rx_bd_num)
784 				w_idx = 0;
785 
786 			ret = pearl_skb2rbd_attach(ps, w_idx);
787 			if (ret) {
788 				pr_err("failed to allocate new rx_skb[%d]\n",
789 				       w_idx);
790 				break;
791 			}
792 		}
793 
794 		processed++;
795 	}
796 
797 rx_out:
798 	if (processed < budget) {
799 		napi_complete(napi);
800 		qtnf_en_rxdone_irq(ps);
801 	}
802 
803 	return processed;
804 }
805 
806 static void
807 qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
808 {
809 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
810 
811 	tasklet_hi_schedule(&ps->base.reclaim_tq);
812 }
813 
814 static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
815 {
816 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
817 
818 	qtnf_enable_hdp_irqs(ps);
819 	napi_enable(&bus->mux_napi);
820 }
821 
822 static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
823 {
824 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
825 
826 	napi_disable(&bus->mux_napi);
827 	qtnf_disable_hdp_irqs(ps);
828 }
829 
830 static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
831 	/* control path methods */
832 	.control_tx	= qtnf_pcie_control_tx,
833 
834 	/* data path methods */
835 	.data_tx		= qtnf_pcie_data_tx,
836 	.data_tx_timeout	= qtnf_pcie_data_tx_timeout,
837 	.data_rx_start		= qtnf_pcie_data_rx_start,
838 	.data_rx_stop		= qtnf_pcie_data_rx_stop,
839 };
840 
841 static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
842 {
843 	struct qtnf_bus *bus = dev_get_drvdata(s->private);
844 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
845 	u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base));
846 	u32 status;
847 
848 	seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count);
849 	seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count);
850 	status = reg &  PCIE_HDP_INT_TX_BITS;
851 	seq_printf(s, "pcie_irq_tx_status(%s)\n",
852 		   (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
853 	seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count);
854 	status = reg &  PCIE_HDP_INT_RX_BITS;
855 	seq_printf(s, "pcie_irq_rx_status(%s)\n",
856 		   (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
857 	seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count);
858 	status = reg &  PCIE_HDP_INT_HHBM_UF;
859 	seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
860 		   (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
861 
862 	return 0;
863 }
864 
865 static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
866 {
867 	struct qtnf_bus *bus = dev_get_drvdata(s->private);
868 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
869 	struct qtnf_pcie_bus_priv *priv = &ps->base;
870 
871 	seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
872 	seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
873 	seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
874 	seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
875 
876 	seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
877 	seq_printf(s, "tx_bd_p_index(%u)\n",
878 		   readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
879 			& (priv->tx_bd_num - 1));
880 	seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
881 	seq_printf(s, "tx queue len(%u)\n",
882 		   CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
883 			    priv->tx_bd_num));
884 
885 	seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
886 	seq_printf(s, "rx_bd_p_index(%u)\n",
887 		   readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
888 			& (priv->rx_bd_num - 1));
889 	seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
890 	seq_printf(s, "rx alloc queue len(%u)\n",
891 		   CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
892 			      priv->rx_bd_num));
893 
894 	return 0;
895 }
896 
897 static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
898 			   int blk, const u8 *pblk, const u8 *fw)
899 {
900 	struct qtnf_bus *bus = pci_get_drvdata(pdev);
901 
902 	struct qtnf_pearl_fw_hdr *hdr;
903 	u8 *pdata;
904 
905 	int hds = sizeof(*hdr);
906 	struct sk_buff *skb = NULL;
907 	int len = 0;
908 	int ret;
909 
910 	skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
911 	if (!skb)
912 		return -ENOMEM;
913 
914 	skb->len = QTN_PCIE_FW_BUFSZ;
915 	skb->dev = NULL;
916 
917 	hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
918 	memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
919 	hdr->fwsize = cpu_to_le32(size);
920 	hdr->seqnum = cpu_to_le32(blk);
921 
922 	if (blk)
923 		hdr->type = cpu_to_le32(QTN_FW_DSUB);
924 	else
925 		hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
926 
927 	pdata = skb->data + hds;
928 
929 	len = QTN_PCIE_FW_BUFSZ - hds;
930 	if (pblk >= (fw + size - len)) {
931 		len = fw + size - pblk;
932 		hdr->type = cpu_to_le32(QTN_FW_DEND);
933 	}
934 
935 	hdr->pktlen = cpu_to_le32(len);
936 	memcpy(pdata, pblk, len);
937 	hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
938 
939 	ret = qtnf_pcie_data_tx(bus, skb);
940 
941 	return (ret == NETDEV_TX_OK) ? len : 0;
942 }
943 
944 static int
945 qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size)
946 {
947 	int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr);
948 	int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
949 	const u8 *pblk = fw;
950 	int threshold = 0;
951 	int blk = 0;
952 	int len;
953 
954 	pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
955 
956 	while (blk < blk_count) {
957 		if (++threshold > 10000) {
958 			pr_err("FW upload failed: too many retries\n");
959 			return -ETIMEDOUT;
960 		}
961 
962 		len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw);
963 		if (len <= 0)
964 			continue;
965 
966 		if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
967 		    (blk == (blk_count - 1))) {
968 			qtnf_set_state(&ps->bda->bda_rc_state,
969 				       QTN_RC_FW_SYNC);
970 			if (qtnf_poll_state(&ps->bda->bda_ep_state,
971 					    QTN_EP_FW_SYNC,
972 					    QTN_FW_DL_TIMEOUT_MS)) {
973 				pr_err("FW upload failed: SYNC timed out\n");
974 				return -ETIMEDOUT;
975 			}
976 
977 			qtnf_clear_state(&ps->bda->bda_ep_state,
978 					 QTN_EP_FW_SYNC);
979 
980 			if (qtnf_is_state(&ps->bda->bda_ep_state,
981 					  QTN_EP_FW_RETRY)) {
982 				if (blk == (blk_count - 1)) {
983 					int last_round =
984 						blk_count & QTN_PCIE_FW_DLMASK;
985 					blk -= last_round;
986 					pblk -= ((last_round - 1) *
987 						blk_size + len);
988 				} else {
989 					blk -= QTN_PCIE_FW_DLMASK;
990 					pblk -= QTN_PCIE_FW_DLMASK * blk_size;
991 				}
992 
993 				qtnf_clear_state(&ps->bda->bda_ep_state,
994 						 QTN_EP_FW_RETRY);
995 
996 				pr_warn("FW upload retry: block #%d\n", blk);
997 				continue;
998 			}
999 
1000 			qtnf_pearl_data_tx_reclaim(ps);
1001 		}
1002 
1003 		pblk += len;
1004 		blk++;
1005 	}
1006 
1007 	pr_debug("FW upload completed: totally sent %d blocks\n", blk);
1008 	return 0;
1009 }
1010 
1011 static void qtnf_pearl_fw_work_handler(struct work_struct *work)
1012 {
1013 	struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1014 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
1015 	struct pci_dev *pdev = ps->base.pdev;
1016 	const struct firmware *fw;
1017 	int ret;
1018 	u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
1019 	const char *fwname = QTN_PCI_PEARL_FW_NAME;
1020 	bool fw_boot_success = false;
1021 
1022 	if (flashboot) {
1023 		state |= QTN_RC_FW_FLASHBOOT;
1024 	} else {
1025 		ret = request_firmware(&fw, fwname, &pdev->dev);
1026 		if (ret < 0) {
1027 			pr_err("failed to get firmware %s\n", fwname);
1028 			goto fw_load_exit;
1029 		}
1030 	}
1031 
1032 	qtnf_set_state(&ps->bda->bda_rc_state, state);
1033 
1034 	if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
1035 			    QTN_FW_DL_TIMEOUT_MS)) {
1036 		pr_err("card is not ready\n");
1037 
1038 		if (!flashboot)
1039 			release_firmware(fw);
1040 
1041 		goto fw_load_exit;
1042 	}
1043 
1044 	qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
1045 
1046 	if (flashboot) {
1047 		pr_info("booting firmware from flash\n");
1048 
1049 	} else {
1050 		pr_info("starting firmware upload: %s\n", fwname);
1051 
1052 		ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
1053 		release_firmware(fw);
1054 		if (ret) {
1055 			pr_err("firmware upload error\n");
1056 			goto fw_load_exit;
1057 		}
1058 	}
1059 
1060 	if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE,
1061 			    QTN_FW_DL_TIMEOUT_MS)) {
1062 		pr_err("firmware bringup timed out\n");
1063 		goto fw_load_exit;
1064 	}
1065 
1066 	pr_info("firmware is up and running\n");
1067 
1068 	if (qtnf_poll_state(&ps->bda->bda_ep_state,
1069 			    QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
1070 		pr_err("firmware runtime failure\n");
1071 		goto fw_load_exit;
1072 	}
1073 
1074 	fw_boot_success = true;
1075 
1076 fw_load_exit:
1077 	qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME);
1078 
1079 	if (fw_boot_success) {
1080 		qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
1081 		qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1082 	}
1083 }
1084 
1085 static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
1086 {
1087 	struct qtnf_pcie_pearl_state *ps = (void *)data;
1088 
1089 	qtnf_pearl_data_tx_reclaim(ps);
1090 	qtnf_en_txdone_irq(ps);
1091 }
1092 
1093 static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps)
1094 {
1095 	unsigned int chipid;
1096 
1097 	chipid = qtnf_chip_id_get(ps->base.sysctl_bar);
1098 
1099 	switch (chipid) {
1100 	case QTN_CHIP_ID_PEARL:
1101 	case QTN_CHIP_ID_PEARL_B:
1102 	case QTN_CHIP_ID_PEARL_C:
1103 		pr_info("chip ID is 0x%x\n", chipid);
1104 		break;
1105 	default:
1106 		pr_err("incorrect chip ID 0x%x\n", chipid);
1107 		return -ENODEV;
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
1114 				 const struct pci_device_id *id)
1115 {
1116 	struct qtnf_shm_ipc_int ipc_int;
1117 	struct qtnf_pcie_pearl_state *ps;
1118 	struct qtnf_bus *bus;
1119 	int ret;
1120 	u64 dma_mask;
1121 
1122 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1123 	dma_mask = DMA_BIT_MASK(64);
1124 #else
1125 	dma_mask = DMA_BIT_MASK(32);
1126 #endif
1127 
1128 	ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops,
1129 			      dma_mask, use_msi);
1130 	if (ret)
1131 		return ret;
1132 
1133 	bus = pci_get_drvdata(pdev);
1134 	ps = get_bus_priv(bus);
1135 
1136 	spin_lock_init(&ps->irq_lock);
1137 
1138 	tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
1139 		     (unsigned long)ps);
1140 	netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1141 		       qtnf_pcie_pearl_rx_poll, 10);
1142 	INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
1143 
1144 	ps->pcie_reg_base = ps->base.dmareg_bar;
1145 	ps->bda = ps->base.epmem_bar;
1146 	writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
1147 
1148 	ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
1149 	ipc_int.arg = ps;
1150 	qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
1151 			       &ps->bda->bda_shm_reg2, &ipc_int);
1152 
1153 	ret = qtnf_pearl_check_chip_id(ps);
1154 	if (ret)
1155 		goto error;
1156 
1157 	ret = qtnf_pcie_pearl_init_xfer(ps);
1158 	if (ret) {
1159 		pr_err("PCIE xfer init failed\n");
1160 		goto error;
1161 	}
1162 
1163 	/* init default irq settings */
1164 	qtnf_init_hdp_irqs(ps);
1165 
1166 	/* start with disabled irqs */
1167 	qtnf_disable_hdp_irqs(ps);
1168 
1169 	ret = devm_request_irq(&pdev->dev, pdev->irq,
1170 			       &qtnf_pcie_pearl_interrupt, 0,
1171 			       "qtnf_pcie_irq", (void *)bus);
1172 	if (ret) {
1173 		pr_err("failed to request pcie irq %d\n", pdev->irq);
1174 		goto err_xfer;
1175 	}
1176 
1177 	qtnf_pcie_bringup_fw_async(bus);
1178 
1179 	return 0;
1180 
1181 err_xfer:
1182 	qtnf_pearl_free_xfer_buffers(ps);
1183 error:
1184 	qtnf_pcie_remove(bus, &ps->base);
1185 
1186 	return ret;
1187 }
1188 
1189 static void qtnf_pcie_pearl_remove(struct pci_dev *pdev)
1190 {
1191 	struct qtnf_pcie_pearl_state *ps;
1192 	struct qtnf_bus *bus;
1193 
1194 	bus = pci_get_drvdata(pdev);
1195 	if (!bus)
1196 		return;
1197 
1198 	ps = get_bus_priv(bus);
1199 
1200 	qtnf_pcie_remove(bus, &ps->base);
1201 	qtnf_pearl_reset_ep(ps);
1202 	qtnf_pearl_free_xfer_buffers(ps);
1203 }
1204 
1205 #ifdef CONFIG_PM_SLEEP
1206 static int qtnf_pcie_pearl_suspend(struct device *dev)
1207 {
1208 	return -EOPNOTSUPP;
1209 }
1210 
1211 static int qtnf_pcie_pearl_resume(struct device *dev)
1212 {
1213 	return 0;
1214 }
1215 #endif /* CONFIG_PM_SLEEP */
1216 
1217 #ifdef CONFIG_PM_SLEEP
1218 /* Power Management Hooks */
1219 static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend,
1220 			 qtnf_pcie_pearl_resume);
1221 #endif
1222 
1223 static const struct pci_device_id qtnf_pcie_devid_table[] = {
1224 	{
1225 		PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
1226 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1227 	},
1228 	{ },
1229 };
1230 
1231 MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
1232 
1233 static struct pci_driver qtnf_pcie_pearl_drv_data = {
1234 	.name = DRV_NAME,
1235 	.id_table = qtnf_pcie_devid_table,
1236 	.probe = qtnf_pcie_pearl_probe,
1237 	.remove = qtnf_pcie_pearl_remove,
1238 #ifdef CONFIG_PM_SLEEP
1239 	.driver = {
1240 		.pm = &qtnf_pcie_pearl_pm_ops,
1241 	},
1242 #endif
1243 };
1244 
1245 static int __init qtnf_pcie_pearl_register(void)
1246 {
1247 	pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
1248 	return pci_register_driver(&qtnf_pcie_pearl_drv_data);
1249 }
1250 
1251 static void __exit qtnf_pcie_pearl_exit(void)
1252 {
1253 	pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
1254 	pci_unregister_driver(&qtnf_pcie_pearl_drv_data);
1255 }
1256 
1257 module_init(qtnf_pcie_pearl_register);
1258 module_exit(qtnf_pcie_pearl_exit);
1259 
1260 MODULE_AUTHOR("Quantenna Communications");
1261 MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
1262 MODULE_LICENSE("GPL");
1263