xref: /linux/drivers/net/wireless/ath/ath10k/pci.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23 
24 #include "core.h"
25 #include "debug.h"
26 
27 #include "targaddrs.h"
28 #include "bmi.h"
29 
30 #include "hif.h"
31 #include "htc.h"
32 
33 #include "ce.h"
34 #include "pci.h"
35 
36 enum ath10k_pci_irq_mode {
37 	ATH10K_PCI_IRQ_AUTO = 0,
38 	ATH10K_PCI_IRQ_LEGACY = 1,
39 	ATH10K_PCI_IRQ_MSI = 2,
40 };
41 
42 static unsigned int ath10k_target_ps;
43 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44 
45 module_param(ath10k_target_ps, uint, 0644);
46 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47 
48 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50 
51 #define QCA988X_2_0_DEVICE_ID	(0x003c)
52 
53 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
54 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55 	{0}
56 };
57 
58 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 				       u32 *data);
60 
61 static void ath10k_pci_process_ce(struct ath10k *ar);
62 static int ath10k_pci_post_rx(struct ath10k *ar);
63 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
64 					     int num);
65 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
66 static void ath10k_pci_stop_ce(struct ath10k *ar);
67 static int ath10k_pci_device_reset(struct ath10k *ar);
68 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
69 static int ath10k_pci_init_irq(struct ath10k *ar);
70 static int ath10k_pci_deinit_irq(struct ath10k *ar);
71 static int ath10k_pci_request_irq(struct ath10k *ar);
72 static void ath10k_pci_free_irq(struct ath10k *ar);
73 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
74 			       struct ath10k_ce_pipe *rx_pipe,
75 			       struct bmi_xfer *xfer);
76 static void ath10k_pci_cleanup_ce(struct ath10k *ar);
77 
78 static const struct ce_attr host_ce_config_wlan[] = {
79 	/* CE0: host->target HTC control and raw streams */
80 	{
81 		.flags = CE_ATTR_FLAGS,
82 		.src_nentries = 16,
83 		.src_sz_max = 256,
84 		.dest_nentries = 0,
85 	},
86 
87 	/* CE1: target->host HTT + HTC control */
88 	{
89 		.flags = CE_ATTR_FLAGS,
90 		.src_nentries = 0,
91 		.src_sz_max = 512,
92 		.dest_nentries = 512,
93 	},
94 
95 	/* CE2: target->host WMI */
96 	{
97 		.flags = CE_ATTR_FLAGS,
98 		.src_nentries = 0,
99 		.src_sz_max = 2048,
100 		.dest_nentries = 32,
101 	},
102 
103 	/* CE3: host->target WMI */
104 	{
105 		.flags = CE_ATTR_FLAGS,
106 		.src_nentries = 32,
107 		.src_sz_max = 2048,
108 		.dest_nentries = 0,
109 	},
110 
111 	/* CE4: host->target HTT */
112 	{
113 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
114 		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
115 		.src_sz_max = 256,
116 		.dest_nentries = 0,
117 	},
118 
119 	/* CE5: unused */
120 	{
121 		.flags = CE_ATTR_FLAGS,
122 		.src_nentries = 0,
123 		.src_sz_max = 0,
124 		.dest_nentries = 0,
125 	},
126 
127 	/* CE6: target autonomous hif_memcpy */
128 	{
129 		.flags = CE_ATTR_FLAGS,
130 		.src_nentries = 0,
131 		.src_sz_max = 0,
132 		.dest_nentries = 0,
133 	},
134 
135 	/* CE7: ce_diag, the Diagnostic Window */
136 	{
137 		.flags = CE_ATTR_FLAGS,
138 		.src_nentries = 2,
139 		.src_sz_max = DIAG_TRANSFER_LIMIT,
140 		.dest_nentries = 2,
141 	},
142 };
143 
144 /* Target firmware's Copy Engine configuration. */
145 static const struct ce_pipe_config target_ce_config_wlan[] = {
146 	/* CE0: host->target HTC control and raw streams */
147 	{
148 		.pipenum = 0,
149 		.pipedir = PIPEDIR_OUT,
150 		.nentries = 32,
151 		.nbytes_max = 256,
152 		.flags = CE_ATTR_FLAGS,
153 		.reserved = 0,
154 	},
155 
156 	/* CE1: target->host HTT + HTC control */
157 	{
158 		.pipenum = 1,
159 		.pipedir = PIPEDIR_IN,
160 		.nentries = 32,
161 		.nbytes_max = 512,
162 		.flags = CE_ATTR_FLAGS,
163 		.reserved = 0,
164 	},
165 
166 	/* CE2: target->host WMI */
167 	{
168 		.pipenum = 2,
169 		.pipedir = PIPEDIR_IN,
170 		.nentries = 32,
171 		.nbytes_max = 2048,
172 		.flags = CE_ATTR_FLAGS,
173 		.reserved = 0,
174 	},
175 
176 	/* CE3: host->target WMI */
177 	{
178 		.pipenum = 3,
179 		.pipedir = PIPEDIR_OUT,
180 		.nentries = 32,
181 		.nbytes_max = 2048,
182 		.flags = CE_ATTR_FLAGS,
183 		.reserved = 0,
184 	},
185 
186 	/* CE4: host->target HTT */
187 	{
188 		.pipenum = 4,
189 		.pipedir = PIPEDIR_OUT,
190 		.nentries = 256,
191 		.nbytes_max = 256,
192 		.flags = CE_ATTR_FLAGS,
193 		.reserved = 0,
194 	},
195 
196 	/* NB: 50% of src nentries, since tx has 2 frags */
197 
198 	/* CE5: unused */
199 	{
200 		.pipenum = 5,
201 		.pipedir = PIPEDIR_OUT,
202 		.nentries = 32,
203 		.nbytes_max = 2048,
204 		.flags = CE_ATTR_FLAGS,
205 		.reserved = 0,
206 	},
207 
208 	/* CE6: Reserved for target autonomous hif_memcpy */
209 	{
210 		.pipenum = 6,
211 		.pipedir = PIPEDIR_INOUT,
212 		.nentries = 32,
213 		.nbytes_max = 4096,
214 		.flags = CE_ATTR_FLAGS,
215 		.reserved = 0,
216 	},
217 
218 	/* CE7 used only by Host */
219 };
220 
221 static bool ath10k_pci_irq_pending(struct ath10k *ar)
222 {
223 	u32 cause;
224 
225 	/* Check if the shared legacy irq is for us */
226 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
227 				  PCIE_INTR_CAUSE_ADDRESS);
228 	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
229 		return true;
230 
231 	return false;
232 }
233 
234 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
235 {
236 	/* IMPORTANT: INTR_CLR register has to be set after
237 	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
238 	 * really cleared. */
239 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
240 			   0);
241 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
242 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
243 
244 	/* IMPORTANT: this extra read transaction is required to
245 	 * flush the posted write buffer. */
246 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
247 				 PCIE_INTR_ENABLE_ADDRESS);
248 }
249 
250 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
251 {
252 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
253 			   PCIE_INTR_ENABLE_ADDRESS,
254 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
255 
256 	/* IMPORTANT: this extra read transaction is required to
257 	 * flush the posted write buffer. */
258 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
259 				 PCIE_INTR_ENABLE_ADDRESS);
260 }
261 
262 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
263 {
264 	struct ath10k *ar = arg;
265 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
266 
267 	if (ar_pci->num_msi_intrs == 0) {
268 		if (!ath10k_pci_irq_pending(ar))
269 			return IRQ_NONE;
270 
271 		ath10k_pci_disable_and_clear_legacy_irq(ar);
272 	}
273 
274 	tasklet_schedule(&ar_pci->early_irq_tasklet);
275 
276 	return IRQ_HANDLED;
277 }
278 
279 static int ath10k_pci_request_early_irq(struct ath10k *ar)
280 {
281 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
282 	int ret;
283 
284 	/* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
285 	 * interrupt from irq vector is triggered in all cases for FW
286 	 * indication/errors */
287 	ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
288 			  IRQF_SHARED, "ath10k_pci (early)", ar);
289 	if (ret) {
290 		ath10k_warn("failed to request early irq: %d\n", ret);
291 		return ret;
292 	}
293 
294 	return 0;
295 }
296 
297 static void ath10k_pci_free_early_irq(struct ath10k *ar)
298 {
299 	free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
300 }
301 
302 /*
303  * Diagnostic read/write access is provided for startup/config/debug usage.
304  * Caller must guarantee proper alignment, when applicable, and single user
305  * at any moment.
306  */
307 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
308 				    int nbytes)
309 {
310 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
311 	int ret = 0;
312 	u32 buf;
313 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
314 	unsigned int id;
315 	unsigned int flags;
316 	struct ath10k_ce_pipe *ce_diag;
317 	/* Host buffer address in CE space */
318 	u32 ce_data;
319 	dma_addr_t ce_data_base = 0;
320 	void *data_buf = NULL;
321 	int i;
322 
323 	/*
324 	 * This code cannot handle reads to non-memory space. Redirect to the
325 	 * register read fn but preserve the multi word read capability of
326 	 * this fn
327 	 */
328 	if (address < DRAM_BASE_ADDRESS) {
329 		if (!IS_ALIGNED(address, 4) ||
330 		    !IS_ALIGNED((unsigned long)data, 4))
331 			return -EIO;
332 
333 		while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
334 					   ar, address, (u32 *)data)) == 0)) {
335 			nbytes -= sizeof(u32);
336 			address += sizeof(u32);
337 			data += sizeof(u32);
338 		}
339 		return ret;
340 	}
341 
342 	ce_diag = ar_pci->ce_diag;
343 
344 	/*
345 	 * Allocate a temporary bounce buffer to hold caller's data
346 	 * to be DMA'ed from Target. This guarantees
347 	 *   1) 4-byte alignment
348 	 *   2) Buffer in DMA-able space
349 	 */
350 	orig_nbytes = nbytes;
351 	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
352 							 orig_nbytes,
353 							 &ce_data_base);
354 
355 	if (!data_buf) {
356 		ret = -ENOMEM;
357 		goto done;
358 	}
359 	memset(data_buf, 0, orig_nbytes);
360 
361 	remaining_bytes = orig_nbytes;
362 	ce_data = ce_data_base;
363 	while (remaining_bytes) {
364 		nbytes = min_t(unsigned int, remaining_bytes,
365 			       DIAG_TRANSFER_LIMIT);
366 
367 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
368 		if (ret != 0)
369 			goto done;
370 
371 		/* Request CE to send from Target(!) address to Host buffer */
372 		/*
373 		 * The address supplied by the caller is in the
374 		 * Target CPU virtual address space.
375 		 *
376 		 * In order to use this address with the diagnostic CE,
377 		 * convert it from Target CPU virtual address space
378 		 * to CE address space
379 		 */
380 		ath10k_pci_wake(ar);
381 		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
382 						     address);
383 		ath10k_pci_sleep(ar);
384 
385 		ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
386 				 0);
387 		if (ret)
388 			goto done;
389 
390 		i = 0;
391 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
392 						     &completed_nbytes,
393 						     &id) != 0) {
394 			mdelay(1);
395 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
396 				ret = -EBUSY;
397 				goto done;
398 			}
399 		}
400 
401 		if (nbytes != completed_nbytes) {
402 			ret = -EIO;
403 			goto done;
404 		}
405 
406 		if (buf != (u32) address) {
407 			ret = -EIO;
408 			goto done;
409 		}
410 
411 		i = 0;
412 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
413 						     &completed_nbytes,
414 						     &id, &flags) != 0) {
415 			mdelay(1);
416 
417 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
418 				ret = -EBUSY;
419 				goto done;
420 			}
421 		}
422 
423 		if (nbytes != completed_nbytes) {
424 			ret = -EIO;
425 			goto done;
426 		}
427 
428 		if (buf != ce_data) {
429 			ret = -EIO;
430 			goto done;
431 		}
432 
433 		remaining_bytes -= nbytes;
434 		address += nbytes;
435 		ce_data += nbytes;
436 	}
437 
438 done:
439 	if (ret == 0) {
440 		/* Copy data from allocated DMA buf to caller's buf */
441 		WARN_ON_ONCE(orig_nbytes & 3);
442 		for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
443 			((u32 *)data)[i] =
444 				__le32_to_cpu(((__le32 *)data_buf)[i]);
445 		}
446 	} else
447 		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
448 			   __func__, address);
449 
450 	if (data_buf)
451 		pci_free_consistent(ar_pci->pdev, orig_nbytes,
452 				    data_buf, ce_data_base);
453 
454 	return ret;
455 }
456 
457 /* Read 4-byte aligned data from Target memory or register */
458 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
459 				       u32 *data)
460 {
461 	/* Assume range doesn't cross this boundary */
462 	if (address >= DRAM_BASE_ADDRESS)
463 		return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
464 
465 	ath10k_pci_wake(ar);
466 	*data = ath10k_pci_read32(ar, address);
467 	ath10k_pci_sleep(ar);
468 	return 0;
469 }
470 
471 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
472 				     const void *data, int nbytes)
473 {
474 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
475 	int ret = 0;
476 	u32 buf;
477 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
478 	unsigned int id;
479 	unsigned int flags;
480 	struct ath10k_ce_pipe *ce_diag;
481 	void *data_buf = NULL;
482 	u32 ce_data;	/* Host buffer address in CE space */
483 	dma_addr_t ce_data_base = 0;
484 	int i;
485 
486 	ce_diag = ar_pci->ce_diag;
487 
488 	/*
489 	 * Allocate a temporary bounce buffer to hold caller's data
490 	 * to be DMA'ed to Target. This guarantees
491 	 *   1) 4-byte alignment
492 	 *   2) Buffer in DMA-able space
493 	 */
494 	orig_nbytes = nbytes;
495 	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
496 							 orig_nbytes,
497 							 &ce_data_base);
498 	if (!data_buf) {
499 		ret = -ENOMEM;
500 		goto done;
501 	}
502 
503 	/* Copy caller's data to allocated DMA buf */
504 	WARN_ON_ONCE(orig_nbytes & 3);
505 	for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
506 		((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
507 
508 	/*
509 	 * The address supplied by the caller is in the
510 	 * Target CPU virtual address space.
511 	 *
512 	 * In order to use this address with the diagnostic CE,
513 	 * convert it from
514 	 *    Target CPU virtual address space
515 	 * to
516 	 *    CE address space
517 	 */
518 	ath10k_pci_wake(ar);
519 	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
520 	ath10k_pci_sleep(ar);
521 
522 	remaining_bytes = orig_nbytes;
523 	ce_data = ce_data_base;
524 	while (remaining_bytes) {
525 		/* FIXME: check cast */
526 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
527 
528 		/* Set up to receive directly into Target(!) address */
529 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
530 		if (ret != 0)
531 			goto done;
532 
533 		/*
534 		 * Request CE to send caller-supplied data that
535 		 * was copied to bounce buffer to Target(!) address.
536 		 */
537 		ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
538 				     nbytes, 0, 0);
539 		if (ret != 0)
540 			goto done;
541 
542 		i = 0;
543 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
544 						     &completed_nbytes,
545 						     &id) != 0) {
546 			mdelay(1);
547 
548 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
549 				ret = -EBUSY;
550 				goto done;
551 			}
552 		}
553 
554 		if (nbytes != completed_nbytes) {
555 			ret = -EIO;
556 			goto done;
557 		}
558 
559 		if (buf != ce_data) {
560 			ret = -EIO;
561 			goto done;
562 		}
563 
564 		i = 0;
565 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
566 						     &completed_nbytes,
567 						     &id, &flags) != 0) {
568 			mdelay(1);
569 
570 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
571 				ret = -EBUSY;
572 				goto done;
573 			}
574 		}
575 
576 		if (nbytes != completed_nbytes) {
577 			ret = -EIO;
578 			goto done;
579 		}
580 
581 		if (buf != address) {
582 			ret = -EIO;
583 			goto done;
584 		}
585 
586 		remaining_bytes -= nbytes;
587 		address += nbytes;
588 		ce_data += nbytes;
589 	}
590 
591 done:
592 	if (data_buf) {
593 		pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
594 				    ce_data_base);
595 	}
596 
597 	if (ret != 0)
598 		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
599 			   address);
600 
601 	return ret;
602 }
603 
604 /* Write 4B data to Target memory or register */
605 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
606 					u32 data)
607 {
608 	/* Assume range doesn't cross this boundary */
609 	if (address >= DRAM_BASE_ADDRESS)
610 		return ath10k_pci_diag_write_mem(ar, address, &data,
611 						 sizeof(u32));
612 
613 	ath10k_pci_wake(ar);
614 	ath10k_pci_write32(ar, address, data);
615 	ath10k_pci_sleep(ar);
616 	return 0;
617 }
618 
619 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
620 {
621 	void __iomem *mem = ath10k_pci_priv(ar)->mem;
622 	u32 val;
623 	val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
624 		       RTC_STATE_ADDRESS);
625 	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
626 }
627 
628 int ath10k_do_pci_wake(struct ath10k *ar)
629 {
630 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
631 	void __iomem *pci_addr = ar_pci->mem;
632 	int tot_delay = 0;
633 	int curr_delay = 5;
634 
635 	if (atomic_read(&ar_pci->keep_awake_count) == 0) {
636 		/* Force AWAKE */
637 		iowrite32(PCIE_SOC_WAKE_V_MASK,
638 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
639 			  PCIE_SOC_WAKE_ADDRESS);
640 	}
641 	atomic_inc(&ar_pci->keep_awake_count);
642 
643 	if (ar_pci->verified_awake)
644 		return 0;
645 
646 	for (;;) {
647 		if (ath10k_pci_target_is_awake(ar)) {
648 			ar_pci->verified_awake = true;
649 			return 0;
650 		}
651 
652 		if (tot_delay > PCIE_WAKE_TIMEOUT) {
653 			ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
654 				    PCIE_WAKE_TIMEOUT,
655 				    atomic_read(&ar_pci->keep_awake_count));
656 			return -ETIMEDOUT;
657 		}
658 
659 		udelay(curr_delay);
660 		tot_delay += curr_delay;
661 
662 		if (curr_delay < 50)
663 			curr_delay += 5;
664 	}
665 }
666 
667 void ath10k_do_pci_sleep(struct ath10k *ar)
668 {
669 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
670 	void __iomem *pci_addr = ar_pci->mem;
671 
672 	if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
673 		/* Allow sleep */
674 		ar_pci->verified_awake = false;
675 		iowrite32(PCIE_SOC_WAKE_RESET,
676 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
677 			  PCIE_SOC_WAKE_ADDRESS);
678 	}
679 }
680 
681 /*
682  * FIXME: Handle OOM properly.
683  */
684 static inline
685 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
686 {
687 	struct ath10k_pci_compl *compl = NULL;
688 
689 	spin_lock_bh(&pipe_info->pipe_lock);
690 	if (list_empty(&pipe_info->compl_free)) {
691 		ath10k_warn("Completion buffers are full\n");
692 		goto exit;
693 	}
694 	compl = list_first_entry(&pipe_info->compl_free,
695 				 struct ath10k_pci_compl, list);
696 	list_del(&compl->list);
697 exit:
698 	spin_unlock_bh(&pipe_info->pipe_lock);
699 	return compl;
700 }
701 
702 /* Called by lower (CE) layer when a send to Target completes. */
703 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
704 {
705 	struct ath10k *ar = ce_state->ar;
706 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
707 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
708 	struct ath10k_pci_compl *compl;
709 	void *transfer_context;
710 	u32 ce_data;
711 	unsigned int nbytes;
712 	unsigned int transfer_id;
713 
714 	while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
715 					     &ce_data, &nbytes,
716 					     &transfer_id) == 0) {
717 		compl = get_free_compl(pipe_info);
718 		if (!compl)
719 			break;
720 
721 		compl->state = ATH10K_PCI_COMPL_SEND;
722 		compl->ce_state = ce_state;
723 		compl->pipe_info = pipe_info;
724 		compl->skb = transfer_context;
725 		compl->nbytes = nbytes;
726 		compl->transfer_id = transfer_id;
727 		compl->flags = 0;
728 
729 		/*
730 		 * Add the completion to the processing queue.
731 		 */
732 		spin_lock_bh(&ar_pci->compl_lock);
733 		list_add_tail(&compl->list, &ar_pci->compl_process);
734 		spin_unlock_bh(&ar_pci->compl_lock);
735 	}
736 
737 	ath10k_pci_process_ce(ar);
738 }
739 
740 /* Called by lower (CE) layer when data is received from the Target. */
741 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
742 {
743 	struct ath10k *ar = ce_state->ar;
744 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
745 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
746 	struct ath10k_pci_compl *compl;
747 	struct sk_buff *skb;
748 	void *transfer_context;
749 	u32 ce_data;
750 	unsigned int nbytes;
751 	unsigned int transfer_id;
752 	unsigned int flags;
753 
754 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
755 					     &ce_data, &nbytes, &transfer_id,
756 					     &flags) == 0) {
757 		compl = get_free_compl(pipe_info);
758 		if (!compl)
759 			break;
760 
761 		compl->state = ATH10K_PCI_COMPL_RECV;
762 		compl->ce_state = ce_state;
763 		compl->pipe_info = pipe_info;
764 		compl->skb = transfer_context;
765 		compl->nbytes = nbytes;
766 		compl->transfer_id = transfer_id;
767 		compl->flags = flags;
768 
769 		skb = transfer_context;
770 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
771 				 skb->len + skb_tailroom(skb),
772 				 DMA_FROM_DEVICE);
773 		/*
774 		 * Add the completion to the processing queue.
775 		 */
776 		spin_lock_bh(&ar_pci->compl_lock);
777 		list_add_tail(&compl->list, &ar_pci->compl_process);
778 		spin_unlock_bh(&ar_pci->compl_lock);
779 	}
780 
781 	ath10k_pci_process_ce(ar);
782 }
783 
784 /* Send the first nbytes bytes of the buffer */
785 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
786 				    unsigned int transfer_id,
787 				    unsigned int bytes, struct sk_buff *nbuf)
788 {
789 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
790 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
791 	struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
792 	struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
793 	unsigned int len;
794 	u32 flags = 0;
795 	int ret;
796 
797 	len = min(bytes, nbuf->len);
798 	bytes -= len;
799 
800 	if (len & 3)
801 		ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
802 
803 	ath10k_dbg(ATH10K_DBG_PCI,
804 		   "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
805 		   nbuf->data, (unsigned long long) skb_cb->paddr,
806 		   nbuf->len, len);
807 	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
808 			"ath10k tx: data: ",
809 			nbuf->data, nbuf->len);
810 
811 	ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
812 			     flags);
813 	if (ret)
814 		ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
815 
816 	return ret;
817 }
818 
819 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
820 {
821 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
822 	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
823 }
824 
825 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
826 {
827 	u32 reg_dump_area = 0;
828 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
829 	u32 host_addr;
830 	int ret;
831 	u32 i;
832 
833 	ath10k_err("firmware crashed!\n");
834 	ath10k_err("hardware name %s version 0x%x\n",
835 		   ar->hw_params.name, ar->target_version);
836 	ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
837 		   ar->fw_version_minor, ar->fw_version_release,
838 		   ar->fw_version_build);
839 
840 	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
841 	ret = ath10k_pci_diag_read_mem(ar, host_addr,
842 				       &reg_dump_area, sizeof(u32));
843 	if (ret) {
844 		ath10k_err("failed to read FW dump area address: %d\n", ret);
845 		return;
846 	}
847 
848 	ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
849 
850 	ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
851 				       &reg_dump_values[0],
852 				       REG_DUMP_COUNT_QCA988X * sizeof(u32));
853 	if (ret != 0) {
854 		ath10k_err("failed to read FW dump area: %d\n", ret);
855 		return;
856 	}
857 
858 	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
859 
860 	ath10k_err("target Register Dump\n");
861 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
862 		ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
863 			   i,
864 			   reg_dump_values[i],
865 			   reg_dump_values[i + 1],
866 			   reg_dump_values[i + 2],
867 			   reg_dump_values[i + 3]);
868 
869 	queue_work(ar->workqueue, &ar->restart_work);
870 }
871 
872 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
873 					       int force)
874 {
875 	if (!force) {
876 		int resources;
877 		/*
878 		 * Decide whether to actually poll for completions, or just
879 		 * wait for a later chance.
880 		 * If there seem to be plenty of resources left, then just wait
881 		 * since checking involves reading a CE register, which is a
882 		 * relatively expensive operation.
883 		 */
884 		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
885 
886 		/*
887 		 * If at least 50% of the total resources are still available,
888 		 * don't bother checking again yet.
889 		 */
890 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
891 			return;
892 	}
893 	ath10k_ce_per_engine_service(ar, pipe);
894 }
895 
896 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
897 					 struct ath10k_hif_cb *callbacks)
898 {
899 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
900 
901 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
902 
903 	memcpy(&ar_pci->msg_callbacks_current, callbacks,
904 	       sizeof(ar_pci->msg_callbacks_current));
905 }
906 
907 static int ath10k_pci_alloc_compl(struct ath10k *ar)
908 {
909 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910 	const struct ce_attr *attr;
911 	struct ath10k_pci_pipe *pipe_info;
912 	struct ath10k_pci_compl *compl;
913 	int i, pipe_num, completions;
914 
915 	spin_lock_init(&ar_pci->compl_lock);
916 	INIT_LIST_HEAD(&ar_pci->compl_process);
917 
918 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
919 		pipe_info = &ar_pci->pipe_info[pipe_num];
920 
921 		spin_lock_init(&pipe_info->pipe_lock);
922 		INIT_LIST_HEAD(&pipe_info->compl_free);
923 
924 		/* Handle Diagnostic CE specially */
925 		if (pipe_info->ce_hdl == ar_pci->ce_diag)
926 			continue;
927 
928 		attr = &host_ce_config_wlan[pipe_num];
929 		completions = 0;
930 
931 		if (attr->src_nentries)
932 			completions += attr->src_nentries;
933 
934 		if (attr->dest_nentries)
935 			completions += attr->dest_nentries;
936 
937 		for (i = 0; i < completions; i++) {
938 			compl = kmalloc(sizeof(*compl), GFP_KERNEL);
939 			if (!compl) {
940 				ath10k_warn("No memory for completion state\n");
941 				ath10k_pci_cleanup_ce(ar);
942 				return -ENOMEM;
943 			}
944 
945 			compl->state = ATH10K_PCI_COMPL_FREE;
946 			list_add_tail(&compl->list, &pipe_info->compl_free);
947 		}
948 	}
949 
950 	return 0;
951 }
952 
953 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
954 {
955 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
956 	const struct ce_attr *attr;
957 	struct ath10k_pci_pipe *pipe_info;
958 	int pipe_num, disable_interrupts;
959 
960 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
961 		pipe_info = &ar_pci->pipe_info[pipe_num];
962 
963 		/* Handle Diagnostic CE specially */
964 		if (pipe_info->ce_hdl == ar_pci->ce_diag)
965 			continue;
966 
967 		attr = &host_ce_config_wlan[pipe_num];
968 
969 		if (attr->src_nentries) {
970 			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
971 			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
972 						   ath10k_pci_ce_send_done,
973 						   disable_interrupts);
974 		}
975 
976 		if (attr->dest_nentries)
977 			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
978 						   ath10k_pci_ce_recv_data);
979 	}
980 
981 	return 0;
982 }
983 
984 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
985 {
986 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
987 	int i;
988 
989 	tasklet_kill(&ar_pci->intr_tq);
990 	tasklet_kill(&ar_pci->msi_fw_err);
991 	tasklet_kill(&ar_pci->early_irq_tasklet);
992 
993 	for (i = 0; i < CE_COUNT; i++)
994 		tasklet_kill(&ar_pci->pipe_info[i].intr);
995 }
996 
997 static void ath10k_pci_stop_ce(struct ath10k *ar)
998 {
999 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1000 	struct ath10k_pci_compl *compl;
1001 	struct sk_buff *skb;
1002 
1003 	/* Mark pending completions as aborted, so that upper layers free up
1004 	 * their associated resources */
1005 	spin_lock_bh(&ar_pci->compl_lock);
1006 	list_for_each_entry(compl, &ar_pci->compl_process, list) {
1007 		skb = compl->skb;
1008 		ATH10K_SKB_CB(skb)->is_aborted = true;
1009 	}
1010 	spin_unlock_bh(&ar_pci->compl_lock);
1011 }
1012 
1013 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
1014 {
1015 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1016 	struct ath10k_pci_compl *compl, *tmp;
1017 	struct ath10k_pci_pipe *pipe_info;
1018 	struct sk_buff *netbuf;
1019 	int pipe_num;
1020 
1021 	/* Free pending completions. */
1022 	spin_lock_bh(&ar_pci->compl_lock);
1023 	if (!list_empty(&ar_pci->compl_process))
1024 		ath10k_warn("pending completions still present! possible memory leaks.\n");
1025 
1026 	list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
1027 		list_del(&compl->list);
1028 		netbuf = compl->skb;
1029 		dev_kfree_skb_any(netbuf);
1030 		kfree(compl);
1031 	}
1032 	spin_unlock_bh(&ar_pci->compl_lock);
1033 
1034 	/* Free unused completions for each pipe. */
1035 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1036 		pipe_info = &ar_pci->pipe_info[pipe_num];
1037 
1038 		spin_lock_bh(&pipe_info->pipe_lock);
1039 		list_for_each_entry_safe(compl, tmp,
1040 					 &pipe_info->compl_free, list) {
1041 			list_del(&compl->list);
1042 			kfree(compl);
1043 		}
1044 		spin_unlock_bh(&pipe_info->pipe_lock);
1045 	}
1046 }
1047 
1048 static void ath10k_pci_process_ce(struct ath10k *ar)
1049 {
1050 	struct ath10k_pci *ar_pci = ar->hif.priv;
1051 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1052 	struct ath10k_pci_compl *compl;
1053 	struct sk_buff *skb;
1054 	unsigned int nbytes;
1055 	int ret, send_done = 0;
1056 
1057 	/* Upper layers aren't ready to handle tx/rx completions in parallel so
1058 	 * we must serialize all completion processing. */
1059 
1060 	spin_lock_bh(&ar_pci->compl_lock);
1061 	if (ar_pci->compl_processing) {
1062 		spin_unlock_bh(&ar_pci->compl_lock);
1063 		return;
1064 	}
1065 	ar_pci->compl_processing = true;
1066 	spin_unlock_bh(&ar_pci->compl_lock);
1067 
1068 	for (;;) {
1069 		spin_lock_bh(&ar_pci->compl_lock);
1070 		if (list_empty(&ar_pci->compl_process)) {
1071 			spin_unlock_bh(&ar_pci->compl_lock);
1072 			break;
1073 		}
1074 		compl = list_first_entry(&ar_pci->compl_process,
1075 					 struct ath10k_pci_compl, list);
1076 		list_del(&compl->list);
1077 		spin_unlock_bh(&ar_pci->compl_lock);
1078 
1079 		switch (compl->state) {
1080 		case ATH10K_PCI_COMPL_SEND:
1081 			cb->tx_completion(ar,
1082 					  compl->skb,
1083 					  compl->transfer_id);
1084 			send_done = 1;
1085 			break;
1086 		case ATH10K_PCI_COMPL_RECV:
1087 			ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1088 			if (ret) {
1089 				ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1090 					    compl->pipe_info->pipe_num, ret);
1091 				break;
1092 			}
1093 
1094 			skb = compl->skb;
1095 			nbytes = compl->nbytes;
1096 
1097 			ath10k_dbg(ATH10K_DBG_PCI,
1098 				   "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1099 				   skb, nbytes);
1100 			ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1101 					"ath10k rx: ", skb->data, nbytes);
1102 
1103 			if (skb->len + skb_tailroom(skb) >= nbytes) {
1104 				skb_trim(skb, 0);
1105 				skb_put(skb, nbytes);
1106 				cb->rx_completion(ar, skb,
1107 						  compl->pipe_info->pipe_num);
1108 			} else {
1109 				ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1110 					    nbytes,
1111 					    skb->len + skb_tailroom(skb));
1112 			}
1113 			break;
1114 		case ATH10K_PCI_COMPL_FREE:
1115 			ath10k_warn("free completion cannot be processed\n");
1116 			break;
1117 		default:
1118 			ath10k_warn("invalid completion state (%d)\n",
1119 				    compl->state);
1120 			break;
1121 		}
1122 
1123 		compl->state = ATH10K_PCI_COMPL_FREE;
1124 
1125 		/*
1126 		 * Add completion back to the pipe's free list.
1127 		 */
1128 		spin_lock_bh(&compl->pipe_info->pipe_lock);
1129 		list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1130 		spin_unlock_bh(&compl->pipe_info->pipe_lock);
1131 	}
1132 
1133 	spin_lock_bh(&ar_pci->compl_lock);
1134 	ar_pci->compl_processing = false;
1135 	spin_unlock_bh(&ar_pci->compl_lock);
1136 }
1137 
1138 /* TODO - temporary mapping while we have too few CE's */
1139 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1140 					      u16 service_id, u8 *ul_pipe,
1141 					      u8 *dl_pipe, int *ul_is_polled,
1142 					      int *dl_is_polled)
1143 {
1144 	int ret = 0;
1145 
1146 	/* polling for received messages not supported */
1147 	*dl_is_polled = 0;
1148 
1149 	switch (service_id) {
1150 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1151 		/*
1152 		 * Host->target HTT gets its own pipe, so it can be polled
1153 		 * while other pipes are interrupt driven.
1154 		 */
1155 		*ul_pipe = 4;
1156 		/*
1157 		 * Use the same target->host pipe for HTC ctrl, HTC raw
1158 		 * streams, and HTT.
1159 		 */
1160 		*dl_pipe = 1;
1161 		break;
1162 
1163 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1164 	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1165 		/*
1166 		 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1167 		 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1168 		 * WMI services.  So, if another CE is needed, change
1169 		 * this to *ul_pipe = 3, which frees up CE 0.
1170 		 */
1171 		/* *ul_pipe = 3; */
1172 		*ul_pipe = 0;
1173 		*dl_pipe = 1;
1174 		break;
1175 
1176 	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1177 	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1178 	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1179 	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1180 
1181 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1182 		*ul_pipe = 3;
1183 		*dl_pipe = 2;
1184 		break;
1185 
1186 		/* pipe 5 unused   */
1187 		/* pipe 6 reserved */
1188 		/* pipe 7 reserved */
1189 
1190 	default:
1191 		ret = -1;
1192 		break;
1193 	}
1194 	*ul_is_polled =
1195 		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1196 
1197 	return ret;
1198 }
1199 
1200 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1201 						u8 *ul_pipe, u8 *dl_pipe)
1202 {
1203 	int ul_is_polled, dl_is_polled;
1204 
1205 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1206 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1207 						 ul_pipe,
1208 						 dl_pipe,
1209 						 &ul_is_polled,
1210 						 &dl_is_polled);
1211 }
1212 
1213 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1214 				   int num)
1215 {
1216 	struct ath10k *ar = pipe_info->hif_ce_state;
1217 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1218 	struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1219 	struct sk_buff *skb;
1220 	dma_addr_t ce_data;
1221 	int i, ret = 0;
1222 
1223 	if (pipe_info->buf_sz == 0)
1224 		return 0;
1225 
1226 	for (i = 0; i < num; i++) {
1227 		skb = dev_alloc_skb(pipe_info->buf_sz);
1228 		if (!skb) {
1229 			ath10k_warn("failed to allocate skbuff for pipe %d\n",
1230 				    num);
1231 			ret = -ENOMEM;
1232 			goto err;
1233 		}
1234 
1235 		WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1236 
1237 		ce_data = dma_map_single(ar->dev, skb->data,
1238 					 skb->len + skb_tailroom(skb),
1239 					 DMA_FROM_DEVICE);
1240 
1241 		if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1242 			ath10k_warn("failed to DMA map sk_buff\n");
1243 			dev_kfree_skb_any(skb);
1244 			ret = -EIO;
1245 			goto err;
1246 		}
1247 
1248 		ATH10K_SKB_CB(skb)->paddr = ce_data;
1249 
1250 		pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1251 					       pipe_info->buf_sz,
1252 					       PCI_DMA_FROMDEVICE);
1253 
1254 		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1255 						 ce_data);
1256 		if (ret) {
1257 			ath10k_warn("failed to enqueue to pipe %d: %d\n",
1258 				    num, ret);
1259 			goto err;
1260 		}
1261 	}
1262 
1263 	return ret;
1264 
1265 err:
1266 	ath10k_pci_rx_pipe_cleanup(pipe_info);
1267 	return ret;
1268 }
1269 
1270 static int ath10k_pci_post_rx(struct ath10k *ar)
1271 {
1272 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1273 	struct ath10k_pci_pipe *pipe_info;
1274 	const struct ce_attr *attr;
1275 	int pipe_num, ret = 0;
1276 
1277 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1278 		pipe_info = &ar_pci->pipe_info[pipe_num];
1279 		attr = &host_ce_config_wlan[pipe_num];
1280 
1281 		if (attr->dest_nentries == 0)
1282 			continue;
1283 
1284 		ret = ath10k_pci_post_rx_pipe(pipe_info,
1285 					      attr->dest_nentries - 1);
1286 		if (ret) {
1287 			ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1288 				    pipe_num, ret);
1289 
1290 			for (; pipe_num >= 0; pipe_num--) {
1291 				pipe_info = &ar_pci->pipe_info[pipe_num];
1292 				ath10k_pci_rx_pipe_cleanup(pipe_info);
1293 			}
1294 			return ret;
1295 		}
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 static int ath10k_pci_hif_start(struct ath10k *ar)
1302 {
1303 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1304 	int ret, ret_early;
1305 
1306 	ath10k_pci_free_early_irq(ar);
1307 	ath10k_pci_kill_tasklet(ar);
1308 
1309 	ret = ath10k_pci_alloc_compl(ar);
1310 	if (ret) {
1311 		ath10k_warn("failed to allocate CE completions: %d\n", ret);
1312 		goto err_early_irq;
1313 	}
1314 
1315 	ret = ath10k_pci_request_irq(ar);
1316 	if (ret) {
1317 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1318 			    ret);
1319 		goto err_free_compl;
1320 	}
1321 
1322 	ret = ath10k_pci_setup_ce_irq(ar);
1323 	if (ret) {
1324 		ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1325 		goto err_stop;
1326 	}
1327 
1328 	/* Post buffers once to start things off. */
1329 	ret = ath10k_pci_post_rx(ar);
1330 	if (ret) {
1331 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1332 			    ret);
1333 		goto err_stop;
1334 	}
1335 
1336 	ar_pci->started = 1;
1337 	return 0;
1338 
1339 err_stop:
1340 	ath10k_ce_disable_interrupts(ar);
1341 	ath10k_pci_free_irq(ar);
1342 	ath10k_pci_kill_tasklet(ar);
1343 	ath10k_pci_stop_ce(ar);
1344 	ath10k_pci_process_ce(ar);
1345 err_free_compl:
1346 	ath10k_pci_cleanup_ce(ar);
1347 err_early_irq:
1348 	/* Though there should be no interrupts (device was reset)
1349 	 * power_down() expects the early IRQ to be installed as per the
1350 	 * driver lifecycle. */
1351 	ret_early = ath10k_pci_request_early_irq(ar);
1352 	if (ret_early)
1353 		ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1354 
1355 	return ret;
1356 }
1357 
1358 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1359 {
1360 	struct ath10k *ar;
1361 	struct ath10k_pci *ar_pci;
1362 	struct ath10k_ce_pipe *ce_hdl;
1363 	u32 buf_sz;
1364 	struct sk_buff *netbuf;
1365 	u32 ce_data;
1366 
1367 	buf_sz = pipe_info->buf_sz;
1368 
1369 	/* Unused Copy Engine */
1370 	if (buf_sz == 0)
1371 		return;
1372 
1373 	ar = pipe_info->hif_ce_state;
1374 	ar_pci = ath10k_pci_priv(ar);
1375 
1376 	if (!ar_pci->started)
1377 		return;
1378 
1379 	ce_hdl = pipe_info->ce_hdl;
1380 
1381 	while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1382 					  &ce_data) == 0) {
1383 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1384 				 netbuf->len + skb_tailroom(netbuf),
1385 				 DMA_FROM_DEVICE);
1386 		dev_kfree_skb_any(netbuf);
1387 	}
1388 }
1389 
1390 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1391 {
1392 	struct ath10k *ar;
1393 	struct ath10k_pci *ar_pci;
1394 	struct ath10k_ce_pipe *ce_hdl;
1395 	struct sk_buff *netbuf;
1396 	u32 ce_data;
1397 	unsigned int nbytes;
1398 	unsigned int id;
1399 	u32 buf_sz;
1400 
1401 	buf_sz = pipe_info->buf_sz;
1402 
1403 	/* Unused Copy Engine */
1404 	if (buf_sz == 0)
1405 		return;
1406 
1407 	ar = pipe_info->hif_ce_state;
1408 	ar_pci = ath10k_pci_priv(ar);
1409 
1410 	if (!ar_pci->started)
1411 		return;
1412 
1413 	ce_hdl = pipe_info->ce_hdl;
1414 
1415 	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1416 					  &ce_data, &nbytes, &id) == 0) {
1417 		/*
1418 		 * Indicate the completion to higer layer to free
1419 		 * the buffer
1420 		 */
1421 
1422 		if (!netbuf) {
1423 			ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1424 				    ce_hdl->id);
1425 			continue;
1426 		}
1427 
1428 		ATH10K_SKB_CB(netbuf)->is_aborted = true;
1429 		ar_pci->msg_callbacks_current.tx_completion(ar,
1430 							    netbuf,
1431 							    id);
1432 	}
1433 }
1434 
1435 /*
1436  * Cleanup residual buffers for device shutdown:
1437  *    buffers that were enqueued for receive
1438  *    buffers that were to be sent
1439  * Note: Buffers that had completed but which were
1440  * not yet processed are on a completion queue. They
1441  * are handled when the completion thread shuts down.
1442  */
1443 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1444 {
1445 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1446 	int pipe_num;
1447 
1448 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1449 		struct ath10k_pci_pipe *pipe_info;
1450 
1451 		pipe_info = &ar_pci->pipe_info[pipe_num];
1452 		ath10k_pci_rx_pipe_cleanup(pipe_info);
1453 		ath10k_pci_tx_pipe_cleanup(pipe_info);
1454 	}
1455 }
1456 
1457 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1458 {
1459 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1460 	struct ath10k_pci_pipe *pipe_info;
1461 	int pipe_num;
1462 
1463 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1464 		pipe_info = &ar_pci->pipe_info[pipe_num];
1465 		if (pipe_info->ce_hdl) {
1466 			ath10k_ce_deinit(pipe_info->ce_hdl);
1467 			pipe_info->ce_hdl = NULL;
1468 			pipe_info->buf_sz = 0;
1469 		}
1470 	}
1471 }
1472 
1473 static void ath10k_pci_hif_stop(struct ath10k *ar)
1474 {
1475 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1476 	int ret;
1477 
1478 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1479 
1480 	ret = ath10k_ce_disable_interrupts(ar);
1481 	if (ret)
1482 		ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1483 
1484 	ath10k_pci_free_irq(ar);
1485 	ath10k_pci_kill_tasklet(ar);
1486 	ath10k_pci_stop_ce(ar);
1487 
1488 	ret = ath10k_pci_request_early_irq(ar);
1489 	if (ret)
1490 		ath10k_warn("failed to re-enable early irq: %d\n", ret);
1491 
1492 	/* At this point, asynchronous threads are stopped, the target should
1493 	 * not DMA nor interrupt. We process the leftovers and then free
1494 	 * everything else up. */
1495 
1496 	ath10k_pci_process_ce(ar);
1497 	ath10k_pci_cleanup_ce(ar);
1498 	ath10k_pci_buffer_cleanup(ar);
1499 
1500 	/* Make the sure the device won't access any structures on the host by
1501 	 * resetting it. The device was fed with PCI CE ringbuffer
1502 	 * configuration during init. If ringbuffers are freed and the device
1503 	 * were to access them this could lead to memory corruption on the
1504 	 * host. */
1505 	ath10k_pci_device_reset(ar);
1506 
1507 	ar_pci->started = 0;
1508 }
1509 
1510 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1511 					   void *req, u32 req_len,
1512 					   void *resp, u32 *resp_len)
1513 {
1514 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1515 	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1516 	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1517 	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1518 	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1519 	dma_addr_t req_paddr = 0;
1520 	dma_addr_t resp_paddr = 0;
1521 	struct bmi_xfer xfer = {};
1522 	void *treq, *tresp = NULL;
1523 	int ret = 0;
1524 
1525 	might_sleep();
1526 
1527 	if (resp && !resp_len)
1528 		return -EINVAL;
1529 
1530 	if (resp && resp_len && *resp_len == 0)
1531 		return -EINVAL;
1532 
1533 	treq = kmemdup(req, req_len, GFP_KERNEL);
1534 	if (!treq)
1535 		return -ENOMEM;
1536 
1537 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1538 	ret = dma_mapping_error(ar->dev, req_paddr);
1539 	if (ret)
1540 		goto err_dma;
1541 
1542 	if (resp && resp_len) {
1543 		tresp = kzalloc(*resp_len, GFP_KERNEL);
1544 		if (!tresp) {
1545 			ret = -ENOMEM;
1546 			goto err_req;
1547 		}
1548 
1549 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1550 					    DMA_FROM_DEVICE);
1551 		ret = dma_mapping_error(ar->dev, resp_paddr);
1552 		if (ret)
1553 			goto err_req;
1554 
1555 		xfer.wait_for_resp = true;
1556 		xfer.resp_len = 0;
1557 
1558 		ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1559 	}
1560 
1561 	init_completion(&xfer.done);
1562 
1563 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1564 	if (ret)
1565 		goto err_resp;
1566 
1567 	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1568 	if (ret) {
1569 		u32 unused_buffer;
1570 		unsigned int unused_nbytes;
1571 		unsigned int unused_id;
1572 
1573 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1574 					   &unused_nbytes, &unused_id);
1575 	} else {
1576 		/* non-zero means we did not time out */
1577 		ret = 0;
1578 	}
1579 
1580 err_resp:
1581 	if (resp) {
1582 		u32 unused_buffer;
1583 
1584 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1585 		dma_unmap_single(ar->dev, resp_paddr,
1586 				 *resp_len, DMA_FROM_DEVICE);
1587 	}
1588 err_req:
1589 	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1590 
1591 	if (ret == 0 && resp_len) {
1592 		*resp_len = min(*resp_len, xfer.resp_len);
1593 		memcpy(resp, tresp, xfer.resp_len);
1594 	}
1595 err_dma:
1596 	kfree(treq);
1597 	kfree(tresp);
1598 
1599 	return ret;
1600 }
1601 
1602 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1603 {
1604 	struct bmi_xfer *xfer;
1605 	u32 ce_data;
1606 	unsigned int nbytes;
1607 	unsigned int transfer_id;
1608 
1609 	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1610 					  &nbytes, &transfer_id))
1611 		return;
1612 
1613 	if (xfer->wait_for_resp)
1614 		return;
1615 
1616 	complete(&xfer->done);
1617 }
1618 
1619 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1620 {
1621 	struct bmi_xfer *xfer;
1622 	u32 ce_data;
1623 	unsigned int nbytes;
1624 	unsigned int transfer_id;
1625 	unsigned int flags;
1626 
1627 	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1628 					  &nbytes, &transfer_id, &flags))
1629 		return;
1630 
1631 	if (!xfer->wait_for_resp) {
1632 		ath10k_warn("unexpected: BMI data received; ignoring\n");
1633 		return;
1634 	}
1635 
1636 	xfer->resp_len = nbytes;
1637 	complete(&xfer->done);
1638 }
1639 
1640 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1641 			       struct ath10k_ce_pipe *rx_pipe,
1642 			       struct bmi_xfer *xfer)
1643 {
1644 	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1645 
1646 	while (time_before_eq(jiffies, timeout)) {
1647 		ath10k_pci_bmi_send_done(tx_pipe);
1648 		ath10k_pci_bmi_recv_data(rx_pipe);
1649 
1650 		if (completion_done(&xfer->done))
1651 			return 0;
1652 
1653 		schedule();
1654 	}
1655 
1656 	return -ETIMEDOUT;
1657 }
1658 
1659 /*
1660  * Map from service/endpoint to Copy Engine.
1661  * This table is derived from the CE_PCI TABLE, above.
1662  * It is passed to the Target at startup for use by firmware.
1663  */
1664 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1665 	{
1666 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1667 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1668 		 3,
1669 	},
1670 	{
1671 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1672 		 PIPEDIR_IN,		/* in = DL = target -> host */
1673 		 2,
1674 	},
1675 	{
1676 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1677 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1678 		 3,
1679 	},
1680 	{
1681 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1682 		 PIPEDIR_IN,		/* in = DL = target -> host */
1683 		 2,
1684 	},
1685 	{
1686 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1687 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1688 		 3,
1689 	},
1690 	{
1691 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1692 		 PIPEDIR_IN,		/* in = DL = target -> host */
1693 		 2,
1694 	},
1695 	{
1696 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1697 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1698 		 3,
1699 	},
1700 	{
1701 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1702 		 PIPEDIR_IN,		/* in = DL = target -> host */
1703 		 2,
1704 	},
1705 	{
1706 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1707 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1708 		 3,
1709 	},
1710 	{
1711 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1712 		 PIPEDIR_IN,		/* in = DL = target -> host */
1713 		 2,
1714 	},
1715 	{
1716 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1717 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1718 		 0,		/* could be moved to 3 (share with WMI) */
1719 	},
1720 	{
1721 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1722 		 PIPEDIR_IN,		/* in = DL = target -> host */
1723 		 1,
1724 	},
1725 	{
1726 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1727 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1728 		 0,
1729 	},
1730 	{
1731 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1732 		 PIPEDIR_IN,		/* in = DL = target -> host */
1733 		 1,
1734 	},
1735 	{
1736 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1737 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1738 		 4,
1739 	},
1740 	{
1741 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1742 		 PIPEDIR_IN,		/* in = DL = target -> host */
1743 		 1,
1744 	},
1745 
1746 	/* (Additions here) */
1747 
1748 	{				/* Must be last */
1749 		 0,
1750 		 0,
1751 		 0,
1752 	},
1753 };
1754 
1755 /*
1756  * Send an interrupt to the device to wake up the Target CPU
1757  * so it has an opportunity to notice any changed state.
1758  */
1759 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1760 {
1761 	int ret;
1762 	u32 core_ctrl;
1763 
1764 	ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1765 					      CORE_CTRL_ADDRESS,
1766 					  &core_ctrl);
1767 	if (ret) {
1768 		ath10k_warn("failed to read core_ctrl: %d\n", ret);
1769 		return ret;
1770 	}
1771 
1772 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1773 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1774 
1775 	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1776 					       CORE_CTRL_ADDRESS,
1777 					   core_ctrl);
1778 	if (ret) {
1779 		ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1780 			    ret);
1781 		return ret;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 static int ath10k_pci_init_config(struct ath10k *ar)
1788 {
1789 	u32 interconnect_targ_addr;
1790 	u32 pcie_state_targ_addr = 0;
1791 	u32 pipe_cfg_targ_addr = 0;
1792 	u32 svc_to_pipe_map = 0;
1793 	u32 pcie_config_flags = 0;
1794 	u32 ealloc_value;
1795 	u32 ealloc_targ_addr;
1796 	u32 flag2_value;
1797 	u32 flag2_targ_addr;
1798 	int ret = 0;
1799 
1800 	/* Download to Target the CE Config and the service-to-CE map */
1801 	interconnect_targ_addr =
1802 		host_interest_item_address(HI_ITEM(hi_interconnect_state));
1803 
1804 	/* Supply Target-side CE configuration */
1805 	ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1806 					  &pcie_state_targ_addr);
1807 	if (ret != 0) {
1808 		ath10k_err("Failed to get pcie state addr: %d\n", ret);
1809 		return ret;
1810 	}
1811 
1812 	if (pcie_state_targ_addr == 0) {
1813 		ret = -EIO;
1814 		ath10k_err("Invalid pcie state addr\n");
1815 		return ret;
1816 	}
1817 
1818 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1819 					  offsetof(struct pcie_state,
1820 						   pipe_cfg_addr),
1821 					  &pipe_cfg_targ_addr);
1822 	if (ret != 0) {
1823 		ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1824 		return ret;
1825 	}
1826 
1827 	if (pipe_cfg_targ_addr == 0) {
1828 		ret = -EIO;
1829 		ath10k_err("Invalid pipe cfg addr\n");
1830 		return ret;
1831 	}
1832 
1833 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1834 				 target_ce_config_wlan,
1835 				 sizeof(target_ce_config_wlan));
1836 
1837 	if (ret != 0) {
1838 		ath10k_err("Failed to write pipe cfg: %d\n", ret);
1839 		return ret;
1840 	}
1841 
1842 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1843 					  offsetof(struct pcie_state,
1844 						   svc_to_pipe_map),
1845 					  &svc_to_pipe_map);
1846 	if (ret != 0) {
1847 		ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1848 		return ret;
1849 	}
1850 
1851 	if (svc_to_pipe_map == 0) {
1852 		ret = -EIO;
1853 		ath10k_err("Invalid svc_to_pipe map\n");
1854 		return ret;
1855 	}
1856 
1857 	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1858 				 target_service_to_ce_map_wlan,
1859 				 sizeof(target_service_to_ce_map_wlan));
1860 	if (ret != 0) {
1861 		ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1862 		return ret;
1863 	}
1864 
1865 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1866 					  offsetof(struct pcie_state,
1867 						   config_flags),
1868 					  &pcie_config_flags);
1869 	if (ret != 0) {
1870 		ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1871 		return ret;
1872 	}
1873 
1874 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1875 
1876 	ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1877 				 offsetof(struct pcie_state, config_flags),
1878 				 &pcie_config_flags,
1879 				 sizeof(pcie_config_flags));
1880 	if (ret != 0) {
1881 		ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1882 		return ret;
1883 	}
1884 
1885 	/* configure early allocation */
1886 	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1887 
1888 	ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1889 	if (ret != 0) {
1890 		ath10k_err("Faile to get early alloc val: %d\n", ret);
1891 		return ret;
1892 	}
1893 
1894 	/* first bank is switched to IRAM */
1895 	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1896 			 HI_EARLY_ALLOC_MAGIC_MASK);
1897 	ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1898 			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1899 
1900 	ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1901 	if (ret != 0) {
1902 		ath10k_err("Failed to set early alloc val: %d\n", ret);
1903 		return ret;
1904 	}
1905 
1906 	/* Tell Target to proceed with initialization */
1907 	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1908 
1909 	ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1910 	if (ret != 0) {
1911 		ath10k_err("Failed to get option val: %d\n", ret);
1912 		return ret;
1913 	}
1914 
1915 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1916 
1917 	ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1918 	if (ret != 0) {
1919 		ath10k_err("Failed to set option val: %d\n", ret);
1920 		return ret;
1921 	}
1922 
1923 	return 0;
1924 }
1925 
1926 
1927 
1928 static int ath10k_pci_ce_init(struct ath10k *ar)
1929 {
1930 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1931 	struct ath10k_pci_pipe *pipe_info;
1932 	const struct ce_attr *attr;
1933 	int pipe_num;
1934 
1935 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1936 		pipe_info = &ar_pci->pipe_info[pipe_num];
1937 		pipe_info->pipe_num = pipe_num;
1938 		pipe_info->hif_ce_state = ar;
1939 		attr = &host_ce_config_wlan[pipe_num];
1940 
1941 		pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1942 		if (pipe_info->ce_hdl == NULL) {
1943 			ath10k_err("failed to initialize CE for pipe: %d\n",
1944 				   pipe_num);
1945 
1946 			/* It is safe to call it here. It checks if ce_hdl is
1947 			 * valid for each pipe */
1948 			ath10k_pci_ce_deinit(ar);
1949 			return -1;
1950 		}
1951 
1952 		if (pipe_num == CE_COUNT - 1) {
1953 			/*
1954 			 * Reserve the ultimate CE for
1955 			 * diagnostic Window support
1956 			 */
1957 			ar_pci->ce_diag = pipe_info->ce_hdl;
1958 			continue;
1959 		}
1960 
1961 		pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1968 {
1969 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1970 	u32 fw_indicator_address, fw_indicator;
1971 
1972 	ath10k_pci_wake(ar);
1973 
1974 	fw_indicator_address = ar_pci->fw_indicator_address;
1975 	fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1976 
1977 	if (fw_indicator & FW_IND_EVENT_PENDING) {
1978 		/* ACK: clear Target-side pending event */
1979 		ath10k_pci_write32(ar, fw_indicator_address,
1980 				   fw_indicator & ~FW_IND_EVENT_PENDING);
1981 
1982 		if (ar_pci->started) {
1983 			ath10k_pci_hif_dump_area(ar);
1984 		} else {
1985 			/*
1986 			 * Probable Target failure before we're prepared
1987 			 * to handle it.  Generally unexpected.
1988 			 */
1989 			ath10k_warn("early firmware event indicated\n");
1990 		}
1991 	}
1992 
1993 	ath10k_pci_sleep(ar);
1994 }
1995 
1996 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1997 {
1998 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1999 	const char *irq_mode;
2000 	int ret;
2001 
2002 	/*
2003 	 * Bring the target up cleanly.
2004 	 *
2005 	 * The target may be in an undefined state with an AUX-powered Target
2006 	 * and a Host in WoW mode. If the Host crashes, loses power, or is
2007 	 * restarted (without unloading the driver) then the Target is left
2008 	 * (aux) powered and running. On a subsequent driver load, the Target
2009 	 * is in an unexpected state. We try to catch that here in order to
2010 	 * reset the Target and retry the probe.
2011 	 */
2012 	ret = ath10k_pci_device_reset(ar);
2013 	if (ret) {
2014 		ath10k_err("failed to reset target: %d\n", ret);
2015 		goto err;
2016 	}
2017 
2018 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2019 		/* Force AWAKE forever */
2020 		ath10k_do_pci_wake(ar);
2021 
2022 	ret = ath10k_pci_ce_init(ar);
2023 	if (ret) {
2024 		ath10k_err("failed to initialize CE: %d\n", ret);
2025 		goto err_ps;
2026 	}
2027 
2028 	ret = ath10k_ce_disable_interrupts(ar);
2029 	if (ret) {
2030 		ath10k_err("failed to disable CE interrupts: %d\n", ret);
2031 		goto err_ce;
2032 	}
2033 
2034 	ret = ath10k_pci_init_irq(ar);
2035 	if (ret) {
2036 		ath10k_err("failed to init irqs: %d\n", ret);
2037 		goto err_ce;
2038 	}
2039 
2040 	ret = ath10k_pci_request_early_irq(ar);
2041 	if (ret) {
2042 		ath10k_err("failed to request early irq: %d\n", ret);
2043 		goto err_deinit_irq;
2044 	}
2045 
2046 	ret = ath10k_pci_wait_for_target_init(ar);
2047 	if (ret) {
2048 		ath10k_err("failed to wait for target to init: %d\n", ret);
2049 		goto err_free_early_irq;
2050 	}
2051 
2052 	ret = ath10k_pci_init_config(ar);
2053 	if (ret) {
2054 		ath10k_err("failed to setup init config: %d\n", ret);
2055 		goto err_free_early_irq;
2056 	}
2057 
2058 	ret = ath10k_pci_wake_target_cpu(ar);
2059 	if (ret) {
2060 		ath10k_err("could not wake up target CPU: %d\n", ret);
2061 		goto err_free_early_irq;
2062 	}
2063 
2064 	if (ar_pci->num_msi_intrs > 1)
2065 		irq_mode = "MSI-X";
2066 	else if (ar_pci->num_msi_intrs == 1)
2067 		irq_mode = "MSI";
2068 	else
2069 		irq_mode = "legacy";
2070 
2071 	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2072 		ath10k_info("pci irq %s\n", irq_mode);
2073 
2074 	return 0;
2075 
2076 err_free_early_irq:
2077 	ath10k_pci_free_early_irq(ar);
2078 err_deinit_irq:
2079 	ath10k_pci_deinit_irq(ar);
2080 err_ce:
2081 	ath10k_pci_ce_deinit(ar);
2082 	ath10k_pci_device_reset(ar);
2083 err_ps:
2084 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2085 		ath10k_do_pci_sleep(ar);
2086 err:
2087 	return ret;
2088 }
2089 
2090 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2091 {
2092 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2093 
2094 	ath10k_pci_free_early_irq(ar);
2095 	ath10k_pci_kill_tasklet(ar);
2096 	ath10k_pci_deinit_irq(ar);
2097 	ath10k_pci_device_reset(ar);
2098 
2099 	ath10k_pci_ce_deinit(ar);
2100 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2101 		ath10k_do_pci_sleep(ar);
2102 }
2103 
2104 #ifdef CONFIG_PM
2105 
2106 #define ATH10K_PCI_PM_CONTROL 0x44
2107 
2108 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2109 {
2110 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2111 	struct pci_dev *pdev = ar_pci->pdev;
2112 	u32 val;
2113 
2114 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2115 
2116 	if ((val & 0x000000ff) != 0x3) {
2117 		pci_save_state(pdev);
2118 		pci_disable_device(pdev);
2119 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2120 				       (val & 0xffffff00) | 0x03);
2121 	}
2122 
2123 	return 0;
2124 }
2125 
2126 static int ath10k_pci_hif_resume(struct ath10k *ar)
2127 {
2128 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2129 	struct pci_dev *pdev = ar_pci->pdev;
2130 	u32 val;
2131 
2132 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2133 
2134 	if ((val & 0x000000ff) != 0) {
2135 		pci_restore_state(pdev);
2136 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2137 				       val & 0xffffff00);
2138 		/*
2139 		 * Suspend/Resume resets the PCI configuration space,
2140 		 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2141 		 * to keep PCI Tx retries from interfering with C3 CPU state
2142 		 */
2143 		pci_read_config_dword(pdev, 0x40, &val);
2144 
2145 		if ((val & 0x0000ff00) != 0)
2146 			pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2147 	}
2148 
2149 	return 0;
2150 }
2151 #endif
2152 
2153 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2154 	.send_head		= ath10k_pci_hif_send_head,
2155 	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
2156 	.start			= ath10k_pci_hif_start,
2157 	.stop			= ath10k_pci_hif_stop,
2158 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
2159 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
2160 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
2161 	.set_callbacks		= ath10k_pci_hif_set_callbacks,
2162 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
2163 	.power_up		= ath10k_pci_hif_power_up,
2164 	.power_down		= ath10k_pci_hif_power_down,
2165 #ifdef CONFIG_PM
2166 	.suspend		= ath10k_pci_hif_suspend,
2167 	.resume			= ath10k_pci_hif_resume,
2168 #endif
2169 };
2170 
2171 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2172 {
2173 	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2174 	struct ath10k_pci *ar_pci = pipe->ar_pci;
2175 
2176 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2177 }
2178 
2179 static void ath10k_msi_err_tasklet(unsigned long data)
2180 {
2181 	struct ath10k *ar = (struct ath10k *)data;
2182 
2183 	ath10k_pci_fw_interrupt_handler(ar);
2184 }
2185 
2186 /*
2187  * Handler for a per-engine interrupt on a PARTICULAR CE.
2188  * This is used in cases where each CE has a private MSI interrupt.
2189  */
2190 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2191 {
2192 	struct ath10k *ar = arg;
2193 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2194 	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2195 
2196 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2197 		ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2198 		return IRQ_HANDLED;
2199 	}
2200 
2201 	/*
2202 	 * NOTE: We are able to derive ce_id from irq because we
2203 	 * use a one-to-one mapping for CE's 0..5.
2204 	 * CE's 6 & 7 do not use interrupts at all.
2205 	 *
2206 	 * This mapping must be kept in sync with the mapping
2207 	 * used by firmware.
2208 	 */
2209 	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2210 	return IRQ_HANDLED;
2211 }
2212 
2213 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2214 {
2215 	struct ath10k *ar = arg;
2216 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2217 
2218 	tasklet_schedule(&ar_pci->msi_fw_err);
2219 	return IRQ_HANDLED;
2220 }
2221 
2222 /*
2223  * Top-level interrupt handler for all PCI interrupts from a Target.
2224  * When a block of MSI interrupts is allocated, this top-level handler
2225  * is not used; instead, we directly call the correct sub-handler.
2226  */
2227 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2228 {
2229 	struct ath10k *ar = arg;
2230 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231 
2232 	if (ar_pci->num_msi_intrs == 0) {
2233 		if (!ath10k_pci_irq_pending(ar))
2234 			return IRQ_NONE;
2235 
2236 		ath10k_pci_disable_and_clear_legacy_irq(ar);
2237 	}
2238 
2239 	tasklet_schedule(&ar_pci->intr_tq);
2240 
2241 	return IRQ_HANDLED;
2242 }
2243 
2244 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2245 {
2246 	struct ath10k *ar = (struct ath10k *)data;
2247 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2248 	u32 fw_ind;
2249 	int ret;
2250 
2251 	ret = ath10k_pci_wake(ar);
2252 	if (ret) {
2253 		ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2254 			    ret);
2255 		return;
2256 	}
2257 
2258 	fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2259 	if (fw_ind & FW_IND_EVENT_PENDING) {
2260 		ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2261 				   fw_ind & ~FW_IND_EVENT_PENDING);
2262 
2263 		/* Some structures are unavailable during early boot or at
2264 		 * driver teardown so just print that the device has crashed. */
2265 		ath10k_warn("device crashed - no diagnostics available\n");
2266 	}
2267 
2268 	ath10k_pci_sleep(ar);
2269 	ath10k_pci_enable_legacy_irq(ar);
2270 }
2271 
2272 static void ath10k_pci_tasklet(unsigned long data)
2273 {
2274 	struct ath10k *ar = (struct ath10k *)data;
2275 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2276 
2277 	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2278 	ath10k_ce_per_engine_service_any(ar);
2279 
2280 	/* Re-enable legacy irq that was disabled in the irq handler */
2281 	if (ar_pci->num_msi_intrs == 0)
2282 		ath10k_pci_enable_legacy_irq(ar);
2283 }
2284 
2285 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2286 {
2287 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2288 	int ret, i;
2289 
2290 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2291 			  ath10k_pci_msi_fw_handler,
2292 			  IRQF_SHARED, "ath10k_pci", ar);
2293 	if (ret) {
2294 		ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2295 			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2296 		return ret;
2297 	}
2298 
2299 	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2300 		ret = request_irq(ar_pci->pdev->irq + i,
2301 				  ath10k_pci_per_engine_handler,
2302 				  IRQF_SHARED, "ath10k_pci", ar);
2303 		if (ret) {
2304 			ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2305 				    ar_pci->pdev->irq + i, ret);
2306 
2307 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2308 				free_irq(ar_pci->pdev->irq + i, ar);
2309 
2310 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2311 			return ret;
2312 		}
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2319 {
2320 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2321 	int ret;
2322 
2323 	ret = request_irq(ar_pci->pdev->irq,
2324 			  ath10k_pci_interrupt_handler,
2325 			  IRQF_SHARED, "ath10k_pci", ar);
2326 	if (ret) {
2327 		ath10k_warn("failed to request MSI irq %d: %d\n",
2328 			    ar_pci->pdev->irq, ret);
2329 		return ret;
2330 	}
2331 
2332 	return 0;
2333 }
2334 
2335 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2336 {
2337 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2338 	int ret;
2339 
2340 	ret = request_irq(ar_pci->pdev->irq,
2341 			  ath10k_pci_interrupt_handler,
2342 			  IRQF_SHARED, "ath10k_pci", ar);
2343 	if (ret) {
2344 		ath10k_warn("failed to request legacy irq %d: %d\n",
2345 			    ar_pci->pdev->irq, ret);
2346 		return ret;
2347 	}
2348 
2349 	return 0;
2350 }
2351 
2352 static int ath10k_pci_request_irq(struct ath10k *ar)
2353 {
2354 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2355 
2356 	switch (ar_pci->num_msi_intrs) {
2357 	case 0:
2358 		return ath10k_pci_request_irq_legacy(ar);
2359 	case 1:
2360 		return ath10k_pci_request_irq_msi(ar);
2361 	case MSI_NUM_REQUEST:
2362 		return ath10k_pci_request_irq_msix(ar);
2363 	}
2364 
2365 	ath10k_warn("unknown irq configuration upon request\n");
2366 	return -EINVAL;
2367 }
2368 
2369 static void ath10k_pci_free_irq(struct ath10k *ar)
2370 {
2371 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2372 	int i;
2373 
2374 	/* There's at least one interrupt irregardless whether its legacy INTR
2375 	 * or MSI or MSI-X */
2376 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2377 		free_irq(ar_pci->pdev->irq + i, ar);
2378 }
2379 
2380 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2381 {
2382 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2383 	int i;
2384 
2385 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2386 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2387 		     (unsigned long)ar);
2388 	tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2389 		     (unsigned long)ar);
2390 
2391 	for (i = 0; i < CE_COUNT; i++) {
2392 		ar_pci->pipe_info[i].ar_pci = ar_pci;
2393 		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2394 			     (unsigned long)&ar_pci->pipe_info[i]);
2395 	}
2396 }
2397 
2398 static int ath10k_pci_init_irq(struct ath10k *ar)
2399 {
2400 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2401 	bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2402 				       ar_pci->features);
2403 	int ret;
2404 
2405 	ath10k_pci_init_irq_tasklets(ar);
2406 
2407 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2408 	    !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2409 		ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2410 
2411 	/* Try MSI-X */
2412 	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2413 		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2414 		ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
2415 		if (ret == 0)
2416 			return 0;
2417 		if (ret > 0)
2418 			pci_disable_msi(ar_pci->pdev);
2419 
2420 		/* fall-through */
2421 	}
2422 
2423 	/* Try MSI */
2424 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2425 		ar_pci->num_msi_intrs = 1;
2426 		ret = pci_enable_msi(ar_pci->pdev);
2427 		if (ret == 0)
2428 			return 0;
2429 
2430 		/* fall-through */
2431 	}
2432 
2433 	/* Try legacy irq
2434 	 *
2435 	 * A potential race occurs here: The CORE_BASE write
2436 	 * depends on target correctly decoding AXI address but
2437 	 * host won't know when target writes BAR to CORE_CTRL.
2438 	 * This write might get lost if target has NOT written BAR.
2439 	 * For now, fix the race by repeating the write in below
2440 	 * synchronization checking. */
2441 	ar_pci->num_msi_intrs = 0;
2442 
2443 	ret = ath10k_pci_wake(ar);
2444 	if (ret) {
2445 		ath10k_warn("failed to wake target: %d\n", ret);
2446 		return ret;
2447 	}
2448 
2449 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2450 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2451 	ath10k_pci_sleep(ar);
2452 
2453 	return 0;
2454 }
2455 
2456 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2457 {
2458 	int ret;
2459 
2460 	ret = ath10k_pci_wake(ar);
2461 	if (ret) {
2462 		ath10k_warn("failed to wake target: %d\n", ret);
2463 		return ret;
2464 	}
2465 
2466 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2467 			   0);
2468 	ath10k_pci_sleep(ar);
2469 
2470 	return 0;
2471 }
2472 
2473 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2474 {
2475 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2476 
2477 	switch (ar_pci->num_msi_intrs) {
2478 	case 0:
2479 		return ath10k_pci_deinit_irq_legacy(ar);
2480 	case 1:
2481 		/* fall-through */
2482 	case MSI_NUM_REQUEST:
2483 		pci_disable_msi(ar_pci->pdev);
2484 		return 0;
2485 	}
2486 
2487 	ath10k_warn("unknown irq configuration upon deinit\n");
2488 	return -EINVAL;
2489 }
2490 
2491 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2492 {
2493 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2494 	int wait_limit = 300; /* 3 sec */
2495 	int ret;
2496 
2497 	ret = ath10k_pci_wake(ar);
2498 	if (ret) {
2499 		ath10k_err("failed to wake up target: %d\n", ret);
2500 		return ret;
2501 	}
2502 
2503 	while (wait_limit-- &&
2504 	       !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2505 		 FW_IND_INITIALIZED)) {
2506 		if (ar_pci->num_msi_intrs == 0)
2507 			/* Fix potential race by repeating CORE_BASE writes */
2508 			iowrite32(PCIE_INTR_FIRMWARE_MASK |
2509 				  PCIE_INTR_CE_MASK_ALL,
2510 				  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2511 						 PCIE_INTR_ENABLE_ADDRESS));
2512 		mdelay(10);
2513 	}
2514 
2515 	if (wait_limit < 0) {
2516 		ath10k_err("target stalled\n");
2517 		ret = -EIO;
2518 		goto out;
2519 	}
2520 
2521 out:
2522 	ath10k_pci_sleep(ar);
2523 	return ret;
2524 }
2525 
2526 static int ath10k_pci_device_reset(struct ath10k *ar)
2527 {
2528 	int i, ret;
2529 	u32 val;
2530 
2531 	ret = ath10k_do_pci_wake(ar);
2532 	if (ret) {
2533 		ath10k_err("failed to wake up target: %d\n",
2534 			   ret);
2535 		return ret;
2536 	}
2537 
2538 	/* Put Target, including PCIe, into RESET. */
2539 	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2540 	val |= 1;
2541 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2542 
2543 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2544 		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2545 					  RTC_STATE_COLD_RESET_MASK)
2546 			break;
2547 		msleep(1);
2548 	}
2549 
2550 	/* Pull Target, including PCIe, out of RESET. */
2551 	val &= ~1;
2552 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2553 
2554 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2555 		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2556 					    RTC_STATE_COLD_RESET_MASK))
2557 			break;
2558 		msleep(1);
2559 	}
2560 
2561 	ath10k_do_pci_sleep(ar);
2562 	return 0;
2563 }
2564 
2565 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2566 {
2567 	int i;
2568 
2569 	for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2570 		if (!test_bit(i, ar_pci->features))
2571 			continue;
2572 
2573 		switch (i) {
2574 		case ATH10K_PCI_FEATURE_MSI_X:
2575 			ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2576 			break;
2577 		case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2578 			ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2579 			break;
2580 		}
2581 	}
2582 }
2583 
2584 static int ath10k_pci_probe(struct pci_dev *pdev,
2585 			    const struct pci_device_id *pci_dev)
2586 {
2587 	void __iomem *mem;
2588 	int ret = 0;
2589 	struct ath10k *ar;
2590 	struct ath10k_pci *ar_pci;
2591 	u32 lcr_val, chip_id;
2592 
2593 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2594 
2595 	ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2596 	if (ar_pci == NULL)
2597 		return -ENOMEM;
2598 
2599 	ar_pci->pdev = pdev;
2600 	ar_pci->dev = &pdev->dev;
2601 
2602 	switch (pci_dev->device) {
2603 	case QCA988X_2_0_DEVICE_ID:
2604 		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2605 		break;
2606 	default:
2607 		ret = -ENODEV;
2608 		ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2609 		goto err_ar_pci;
2610 	}
2611 
2612 	if (ath10k_target_ps)
2613 		set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2614 
2615 	ath10k_pci_dump_features(ar_pci);
2616 
2617 	ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2618 	if (!ar) {
2619 		ath10k_err("failed to create driver core\n");
2620 		ret = -EINVAL;
2621 		goto err_ar_pci;
2622 	}
2623 
2624 	ar_pci->ar = ar;
2625 	ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2626 	atomic_set(&ar_pci->keep_awake_count, 0);
2627 
2628 	pci_set_drvdata(pdev, ar);
2629 
2630 	/*
2631 	 * Without any knowledge of the Host, the Target may have been reset or
2632 	 * power cycled and its Config Space may no longer reflect the PCI
2633 	 * address space that was assigned earlier by the PCI infrastructure.
2634 	 * Refresh it now.
2635 	 */
2636 	ret = pci_assign_resource(pdev, BAR_NUM);
2637 	if (ret) {
2638 		ath10k_err("failed to assign PCI space: %d\n", ret);
2639 		goto err_ar;
2640 	}
2641 
2642 	ret = pci_enable_device(pdev);
2643 	if (ret) {
2644 		ath10k_err("failed to enable PCI device: %d\n", ret);
2645 		goto err_ar;
2646 	}
2647 
2648 	/* Request MMIO resources */
2649 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2650 	if (ret) {
2651 		ath10k_err("failed to request MMIO region: %d\n", ret);
2652 		goto err_device;
2653 	}
2654 
2655 	/*
2656 	 * Target structures have a limit of 32 bit DMA pointers.
2657 	 * DMA pointers can be wider than 32 bits by default on some systems.
2658 	 */
2659 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2660 	if (ret) {
2661 		ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2662 		goto err_region;
2663 	}
2664 
2665 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2666 	if (ret) {
2667 		ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2668 		goto err_region;
2669 	}
2670 
2671 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2672 	pci_set_master(pdev);
2673 
2674 	/*
2675 	 * Temporary FIX: disable ASPM
2676 	 * Will be removed after the OTP is programmed
2677 	 */
2678 	pci_read_config_dword(pdev, 0x80, &lcr_val);
2679 	pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2680 
2681 	/* Arrange for access to Target SoC registers. */
2682 	mem = pci_iomap(pdev, BAR_NUM, 0);
2683 	if (!mem) {
2684 		ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2685 		ret = -EIO;
2686 		goto err_master;
2687 	}
2688 
2689 	ar_pci->mem = mem;
2690 
2691 	spin_lock_init(&ar_pci->ce_lock);
2692 
2693 	ret = ath10k_do_pci_wake(ar);
2694 	if (ret) {
2695 		ath10k_err("Failed to get chip id: %d\n", ret);
2696 		goto err_iomap;
2697 	}
2698 
2699 	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2700 
2701 	ath10k_do_pci_sleep(ar);
2702 
2703 	ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2704 
2705 	ret = ath10k_core_register(ar, chip_id);
2706 	if (ret) {
2707 		ath10k_err("failed to register driver core: %d\n", ret);
2708 		goto err_iomap;
2709 	}
2710 
2711 	return 0;
2712 
2713 err_iomap:
2714 	pci_iounmap(pdev, mem);
2715 err_master:
2716 	pci_clear_master(pdev);
2717 err_region:
2718 	pci_release_region(pdev, BAR_NUM);
2719 err_device:
2720 	pci_disable_device(pdev);
2721 err_ar:
2722 	ath10k_core_destroy(ar);
2723 err_ar_pci:
2724 	/* call HIF PCI free here */
2725 	kfree(ar_pci);
2726 
2727 	return ret;
2728 }
2729 
2730 static void ath10k_pci_remove(struct pci_dev *pdev)
2731 {
2732 	struct ath10k *ar = pci_get_drvdata(pdev);
2733 	struct ath10k_pci *ar_pci;
2734 
2735 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2736 
2737 	if (!ar)
2738 		return;
2739 
2740 	ar_pci = ath10k_pci_priv(ar);
2741 
2742 	if (!ar_pci)
2743 		return;
2744 
2745 	tasklet_kill(&ar_pci->msi_fw_err);
2746 
2747 	ath10k_core_unregister(ar);
2748 
2749 	pci_iounmap(pdev, ar_pci->mem);
2750 	pci_release_region(pdev, BAR_NUM);
2751 	pci_clear_master(pdev);
2752 	pci_disable_device(pdev);
2753 
2754 	ath10k_core_destroy(ar);
2755 	kfree(ar_pci);
2756 }
2757 
2758 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2759 
2760 static struct pci_driver ath10k_pci_driver = {
2761 	.name = "ath10k_pci",
2762 	.id_table = ath10k_pci_id_table,
2763 	.probe = ath10k_pci_probe,
2764 	.remove = ath10k_pci_remove,
2765 };
2766 
2767 static int __init ath10k_pci_init(void)
2768 {
2769 	int ret;
2770 
2771 	ret = pci_register_driver(&ath10k_pci_driver);
2772 	if (ret)
2773 		ath10k_err("failed to register PCI driver: %d\n", ret);
2774 
2775 	return ret;
2776 }
2777 module_init(ath10k_pci_init);
2778 
2779 static void __exit ath10k_pci_exit(void)
2780 {
2781 	pci_unregister_driver(&ath10k_pci_driver);
2782 }
2783 
2784 module_exit(ath10k_pci_exit);
2785 
2786 MODULE_AUTHOR("Qualcomm Atheros");
2787 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2788 MODULE_LICENSE("Dual BSD/GPL");
2789 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2790 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2791 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2792