1da8fa4e3SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2da8fa4e3SBjoern A. Zeeb /*
3da8fa4e3SBjoern A. Zeeb * Copyright (c) 2005-2011 Atheros Communications Inc.
4da8fa4e3SBjoern A. Zeeb * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5da8fa4e3SBjoern A. Zeeb */
6da8fa4e3SBjoern A. Zeeb
7da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
8da8fa4e3SBjoern A. Zeeb #define LINUXKPI_PARAM_PREFIX ath10k_pci_
9da8fa4e3SBjoern A. Zeeb #endif
10da8fa4e3SBjoern A. Zeeb
11da8fa4e3SBjoern A. Zeeb #include <linux/pci.h>
12da8fa4e3SBjoern A. Zeeb #include <linux/module.h>
13da8fa4e3SBjoern A. Zeeb #include <linux/interrupt.h>
14da8fa4e3SBjoern A. Zeeb #include <linux/spinlock.h>
15da8fa4e3SBjoern A. Zeeb #include <linux/bitops.h>
16da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
17da8fa4e3SBjoern A. Zeeb #include <linux/delay.h>
18*c8e7f78aSBjoern A. Zeeb #include <sys/rman.h>
19da8fa4e3SBjoern A. Zeeb #endif
20da8fa4e3SBjoern A. Zeeb
21da8fa4e3SBjoern A. Zeeb #include "core.h"
22da8fa4e3SBjoern A. Zeeb #include "debug.h"
23da8fa4e3SBjoern A. Zeeb #include "coredump.h"
24da8fa4e3SBjoern A. Zeeb
25da8fa4e3SBjoern A. Zeeb #include "targaddrs.h"
26da8fa4e3SBjoern A. Zeeb #include "bmi.h"
27da8fa4e3SBjoern A. Zeeb
28da8fa4e3SBjoern A. Zeeb #include "hif.h"
29da8fa4e3SBjoern A. Zeeb #include "htc.h"
30da8fa4e3SBjoern A. Zeeb
31da8fa4e3SBjoern A. Zeeb #include "ce.h"
32da8fa4e3SBjoern A. Zeeb #include "pci.h"
33da8fa4e3SBjoern A. Zeeb
34da8fa4e3SBjoern A. Zeeb enum ath10k_pci_reset_mode {
35da8fa4e3SBjoern A. Zeeb ATH10K_PCI_RESET_AUTO = 0,
36da8fa4e3SBjoern A. Zeeb ATH10K_PCI_RESET_WARM_ONLY = 1,
37da8fa4e3SBjoern A. Zeeb };
38da8fa4e3SBjoern A. Zeeb
39da8fa4e3SBjoern A. Zeeb static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
40da8fa4e3SBjoern A. Zeeb static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
41da8fa4e3SBjoern A. Zeeb
42da8fa4e3SBjoern A. Zeeb module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
43da8fa4e3SBjoern A. Zeeb MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
44da8fa4e3SBjoern A. Zeeb
45da8fa4e3SBjoern A. Zeeb module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
46da8fa4e3SBjoern A. Zeeb MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
47da8fa4e3SBjoern A. Zeeb
48da8fa4e3SBjoern A. Zeeb /* how long wait to wait for target to initialise, in ms */
49da8fa4e3SBjoern A. Zeeb #define ATH10K_PCI_TARGET_WAIT 3000
50da8fa4e3SBjoern A. Zeeb #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
51da8fa4e3SBjoern A. Zeeb
52da8fa4e3SBjoern A. Zeeb /* Maximum number of bytes that can be handled atomically by
53da8fa4e3SBjoern A. Zeeb * diag read and write.
54da8fa4e3SBjoern A. Zeeb */
55da8fa4e3SBjoern A. Zeeb #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
56da8fa4e3SBjoern A. Zeeb
57da8fa4e3SBjoern A. Zeeb #define QCA99X0_PCIE_BAR0_START_REG 0x81030
58da8fa4e3SBjoern A. Zeeb #define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
59da8fa4e3SBjoern A. Zeeb #define QCA99X0_CPU_MEM_DATA_REG 0x4d010
60da8fa4e3SBjoern A. Zeeb
61da8fa4e3SBjoern A. Zeeb static const struct pci_device_id ath10k_pci_id_table[] = {
62da8fa4e3SBjoern A. Zeeb /* PCI-E QCA988X V2 (Ubiquiti branded) */
63da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
64da8fa4e3SBjoern A. Zeeb
65da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
66da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
67da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
68da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
69da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
70da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
71da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
72da8fa4e3SBjoern A. Zeeb { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
73da8fa4e3SBjoern A. Zeeb {0}
74da8fa4e3SBjoern A. Zeeb };
75da8fa4e3SBjoern A. Zeeb
76da8fa4e3SBjoern A. Zeeb static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
77da8fa4e3SBjoern A. Zeeb /* QCA988X pre 2.0 chips are not supported because they need some nasty
78da8fa4e3SBjoern A. Zeeb * hacks. ath10k doesn't have them and these devices crash horribly
79da8fa4e3SBjoern A. Zeeb * because of that.
80da8fa4e3SBjoern A. Zeeb */
81da8fa4e3SBjoern A. Zeeb { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
82da8fa4e3SBjoern A. Zeeb { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
83da8fa4e3SBjoern A. Zeeb
84da8fa4e3SBjoern A. Zeeb { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
85da8fa4e3SBjoern A. Zeeb { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
86da8fa4e3SBjoern A. Zeeb { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
87da8fa4e3SBjoern A. Zeeb { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
88da8fa4e3SBjoern A. Zeeb { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
89da8fa4e3SBjoern A. Zeeb
90da8fa4e3SBjoern A. Zeeb { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
91da8fa4e3SBjoern A. Zeeb { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
92da8fa4e3SBjoern A. Zeeb { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
93da8fa4e3SBjoern A. Zeeb { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
94da8fa4e3SBjoern A. Zeeb { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
95da8fa4e3SBjoern A. Zeeb
96da8fa4e3SBjoern A. Zeeb { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
97da8fa4e3SBjoern A. Zeeb
98da8fa4e3SBjoern A. Zeeb { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
99da8fa4e3SBjoern A. Zeeb
100da8fa4e3SBjoern A. Zeeb { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
101da8fa4e3SBjoern A. Zeeb
102da8fa4e3SBjoern A. Zeeb { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
103da8fa4e3SBjoern A. Zeeb { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
104da8fa4e3SBjoern A. Zeeb
105da8fa4e3SBjoern A. Zeeb { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
106da8fa4e3SBjoern A. Zeeb };
107da8fa4e3SBjoern A. Zeeb
108da8fa4e3SBjoern A. Zeeb static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
109da8fa4e3SBjoern A. Zeeb static int ath10k_pci_cold_reset(struct ath10k *ar);
110da8fa4e3SBjoern A. Zeeb static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
111da8fa4e3SBjoern A. Zeeb static int ath10k_pci_init_irq(struct ath10k *ar);
112da8fa4e3SBjoern A. Zeeb static int ath10k_pci_deinit_irq(struct ath10k *ar);
113da8fa4e3SBjoern A. Zeeb static int ath10k_pci_request_irq(struct ath10k *ar);
114da8fa4e3SBjoern A. Zeeb static void ath10k_pci_free_irq(struct ath10k *ar);
115da8fa4e3SBjoern A. Zeeb static int ath10k_pci_bmi_wait(struct ath10k *ar,
116da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *tx_pipe,
117da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *rx_pipe,
118da8fa4e3SBjoern A. Zeeb struct bmi_xfer *xfer);
119da8fa4e3SBjoern A. Zeeb static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
120da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
121da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
122da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
123da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
124da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
125da8fa4e3SBjoern A. Zeeb static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
126da8fa4e3SBjoern A. Zeeb
127da8fa4e3SBjoern A. Zeeb static const struct ce_attr pci_host_ce_config_wlan[] = {
128da8fa4e3SBjoern A. Zeeb /* CE0: host->target HTC control and raw streams */
129da8fa4e3SBjoern A. Zeeb {
130da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
131da8fa4e3SBjoern A. Zeeb .src_nentries = 16,
132da8fa4e3SBjoern A. Zeeb .src_sz_max = 256,
133da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
134da8fa4e3SBjoern A. Zeeb .send_cb = ath10k_pci_htc_tx_cb,
135da8fa4e3SBjoern A. Zeeb },
136da8fa4e3SBjoern A. Zeeb
137da8fa4e3SBjoern A. Zeeb /* CE1: target->host HTT + HTC control */
138da8fa4e3SBjoern A. Zeeb {
139da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
140da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
141da8fa4e3SBjoern A. Zeeb .src_sz_max = 2048,
142da8fa4e3SBjoern A. Zeeb .dest_nentries = 512,
143da8fa4e3SBjoern A. Zeeb .recv_cb = ath10k_pci_htt_htc_rx_cb,
144da8fa4e3SBjoern A. Zeeb },
145da8fa4e3SBjoern A. Zeeb
146da8fa4e3SBjoern A. Zeeb /* CE2: target->host WMI */
147da8fa4e3SBjoern A. Zeeb {
148da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
149da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
150da8fa4e3SBjoern A. Zeeb .src_sz_max = 2048,
151da8fa4e3SBjoern A. Zeeb .dest_nentries = 128,
152da8fa4e3SBjoern A. Zeeb .recv_cb = ath10k_pci_htc_rx_cb,
153da8fa4e3SBjoern A. Zeeb },
154da8fa4e3SBjoern A. Zeeb
155da8fa4e3SBjoern A. Zeeb /* CE3: host->target WMI */
156da8fa4e3SBjoern A. Zeeb {
157da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
158da8fa4e3SBjoern A. Zeeb .src_nentries = 32,
159da8fa4e3SBjoern A. Zeeb .src_sz_max = 2048,
160da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
161da8fa4e3SBjoern A. Zeeb .send_cb = ath10k_pci_htc_tx_cb,
162da8fa4e3SBjoern A. Zeeb },
163da8fa4e3SBjoern A. Zeeb
164da8fa4e3SBjoern A. Zeeb /* CE4: host->target HTT */
165da8fa4e3SBjoern A. Zeeb {
166da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
167da8fa4e3SBjoern A. Zeeb .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
168da8fa4e3SBjoern A. Zeeb .src_sz_max = 256,
169da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
170da8fa4e3SBjoern A. Zeeb .send_cb = ath10k_pci_htt_tx_cb,
171da8fa4e3SBjoern A. Zeeb },
172da8fa4e3SBjoern A. Zeeb
173da8fa4e3SBjoern A. Zeeb /* CE5: target->host HTT (HIF->HTT) */
174da8fa4e3SBjoern A. Zeeb {
175da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
176da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
177da8fa4e3SBjoern A. Zeeb .src_sz_max = 512,
178da8fa4e3SBjoern A. Zeeb .dest_nentries = 512,
179da8fa4e3SBjoern A. Zeeb .recv_cb = ath10k_pci_htt_rx_cb,
180da8fa4e3SBjoern A. Zeeb },
181da8fa4e3SBjoern A. Zeeb
182da8fa4e3SBjoern A. Zeeb /* CE6: target autonomous hif_memcpy */
183da8fa4e3SBjoern A. Zeeb {
184da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
185da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
186da8fa4e3SBjoern A. Zeeb .src_sz_max = 0,
187da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
188da8fa4e3SBjoern A. Zeeb },
189da8fa4e3SBjoern A. Zeeb
190da8fa4e3SBjoern A. Zeeb /* CE7: ce_diag, the Diagnostic Window */
191da8fa4e3SBjoern A. Zeeb {
192da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
193da8fa4e3SBjoern A. Zeeb .src_nentries = 2,
194da8fa4e3SBjoern A. Zeeb .src_sz_max = DIAG_TRANSFER_LIMIT,
195da8fa4e3SBjoern A. Zeeb .dest_nentries = 2,
196da8fa4e3SBjoern A. Zeeb },
197da8fa4e3SBjoern A. Zeeb
198da8fa4e3SBjoern A. Zeeb /* CE8: target->host pktlog */
199da8fa4e3SBjoern A. Zeeb {
200da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
201da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
202da8fa4e3SBjoern A. Zeeb .src_sz_max = 2048,
203da8fa4e3SBjoern A. Zeeb .dest_nentries = 128,
204da8fa4e3SBjoern A. Zeeb .recv_cb = ath10k_pci_pktlog_rx_cb,
205da8fa4e3SBjoern A. Zeeb },
206da8fa4e3SBjoern A. Zeeb
207da8fa4e3SBjoern A. Zeeb /* CE9 target autonomous qcache memcpy */
208da8fa4e3SBjoern A. Zeeb {
209da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
210da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
211da8fa4e3SBjoern A. Zeeb .src_sz_max = 0,
212da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
213da8fa4e3SBjoern A. Zeeb },
214da8fa4e3SBjoern A. Zeeb
215da8fa4e3SBjoern A. Zeeb /* CE10: target autonomous hif memcpy */
216da8fa4e3SBjoern A. Zeeb {
217da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
218da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
219da8fa4e3SBjoern A. Zeeb .src_sz_max = 0,
220da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
221da8fa4e3SBjoern A. Zeeb },
222da8fa4e3SBjoern A. Zeeb
223da8fa4e3SBjoern A. Zeeb /* CE11: target autonomous hif memcpy */
224da8fa4e3SBjoern A. Zeeb {
225da8fa4e3SBjoern A. Zeeb .flags = CE_ATTR_FLAGS,
226da8fa4e3SBjoern A. Zeeb .src_nentries = 0,
227da8fa4e3SBjoern A. Zeeb .src_sz_max = 0,
228da8fa4e3SBjoern A. Zeeb .dest_nentries = 0,
229da8fa4e3SBjoern A. Zeeb },
230da8fa4e3SBjoern A. Zeeb };
231da8fa4e3SBjoern A. Zeeb
232da8fa4e3SBjoern A. Zeeb /* Target firmware's Copy Engine configuration. */
233da8fa4e3SBjoern A. Zeeb static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
234da8fa4e3SBjoern A. Zeeb /* CE0: host->target HTC control and raw streams */
235da8fa4e3SBjoern A. Zeeb {
236da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(0),
237da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_OUT),
238da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(32),
239da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(256),
240da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
241da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
242da8fa4e3SBjoern A. Zeeb },
243da8fa4e3SBjoern A. Zeeb
244da8fa4e3SBjoern A. Zeeb /* CE1: target->host HTT + HTC control */
245da8fa4e3SBjoern A. Zeeb {
246da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(1),
247da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_IN),
248da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(32),
249da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(2048),
250da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
251da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
252da8fa4e3SBjoern A. Zeeb },
253da8fa4e3SBjoern A. Zeeb
254da8fa4e3SBjoern A. Zeeb /* CE2: target->host WMI */
255da8fa4e3SBjoern A. Zeeb {
256da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(2),
257da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_IN),
258da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(64),
259da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(2048),
260da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
261da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
262da8fa4e3SBjoern A. Zeeb },
263da8fa4e3SBjoern A. Zeeb
264da8fa4e3SBjoern A. Zeeb /* CE3: host->target WMI */
265da8fa4e3SBjoern A. Zeeb {
266da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(3),
267da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_OUT),
268da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(32),
269da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(2048),
270da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
271da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
272da8fa4e3SBjoern A. Zeeb },
273da8fa4e3SBjoern A. Zeeb
274da8fa4e3SBjoern A. Zeeb /* CE4: host->target HTT */
275da8fa4e3SBjoern A. Zeeb {
276da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(4),
277da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_OUT),
278da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(256),
279da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(256),
280da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
281da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
282da8fa4e3SBjoern A. Zeeb },
283da8fa4e3SBjoern A. Zeeb
284da8fa4e3SBjoern A. Zeeb /* NB: 50% of src nentries, since tx has 2 frags */
285da8fa4e3SBjoern A. Zeeb
286da8fa4e3SBjoern A. Zeeb /* CE5: target->host HTT (HIF->HTT) */
287da8fa4e3SBjoern A. Zeeb {
288da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(5),
289da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_IN),
290da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(32),
291da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(512),
292da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
293da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
294da8fa4e3SBjoern A. Zeeb },
295da8fa4e3SBjoern A. Zeeb
296da8fa4e3SBjoern A. Zeeb /* CE6: Reserved for target autonomous hif_memcpy */
297da8fa4e3SBjoern A. Zeeb {
298da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(6),
299da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
300da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(32),
301da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(4096),
302da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS),
303da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
304da8fa4e3SBjoern A. Zeeb },
305da8fa4e3SBjoern A. Zeeb
306da8fa4e3SBjoern A. Zeeb /* CE7 used only by Host */
307da8fa4e3SBjoern A. Zeeb {
308da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(7),
309da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
310da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(0),
311da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(0),
312da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(0),
313da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
314da8fa4e3SBjoern A. Zeeb },
315da8fa4e3SBjoern A. Zeeb
316da8fa4e3SBjoern A. Zeeb /* CE8 target->host packtlog */
317da8fa4e3SBjoern A. Zeeb {
318da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(8),
319da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_IN),
320da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(64),
321da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(2048),
322da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
323da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
324da8fa4e3SBjoern A. Zeeb },
325da8fa4e3SBjoern A. Zeeb
326da8fa4e3SBjoern A. Zeeb /* CE9 target autonomous qcache memcpy */
327da8fa4e3SBjoern A. Zeeb {
328da8fa4e3SBjoern A. Zeeb .pipenum = __cpu_to_le32(9),
329da8fa4e3SBjoern A. Zeeb .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
330da8fa4e3SBjoern A. Zeeb .nentries = __cpu_to_le32(32),
331da8fa4e3SBjoern A. Zeeb .nbytes_max = __cpu_to_le32(2048),
332da8fa4e3SBjoern A. Zeeb .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
333da8fa4e3SBjoern A. Zeeb .reserved = __cpu_to_le32(0),
334da8fa4e3SBjoern A. Zeeb },
335da8fa4e3SBjoern A. Zeeb
336da8fa4e3SBjoern A. Zeeb /* It not necessary to send target wlan configuration for CE10 & CE11
337da8fa4e3SBjoern A. Zeeb * as these CEs are not actively used in target.
338da8fa4e3SBjoern A. Zeeb */
339da8fa4e3SBjoern A. Zeeb };
340da8fa4e3SBjoern A. Zeeb
341da8fa4e3SBjoern A. Zeeb /*
342da8fa4e3SBjoern A. Zeeb * Map from service/endpoint to Copy Engine.
343da8fa4e3SBjoern A. Zeeb * This table is derived from the CE_PCI TABLE, above.
344da8fa4e3SBjoern A. Zeeb * It is passed to the Target at startup for use by firmware.
345da8fa4e3SBjoern A. Zeeb */
346da8fa4e3SBjoern A. Zeeb static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
347da8fa4e3SBjoern A. Zeeb {
348da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
349da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
350da8fa4e3SBjoern A. Zeeb __cpu_to_le32(3),
351da8fa4e3SBjoern A. Zeeb },
352da8fa4e3SBjoern A. Zeeb {
353da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
354da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
355da8fa4e3SBjoern A. Zeeb __cpu_to_le32(2),
356da8fa4e3SBjoern A. Zeeb },
357da8fa4e3SBjoern A. Zeeb {
358da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
359da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
360da8fa4e3SBjoern A. Zeeb __cpu_to_le32(3),
361da8fa4e3SBjoern A. Zeeb },
362da8fa4e3SBjoern A. Zeeb {
363da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
364da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
365da8fa4e3SBjoern A. Zeeb __cpu_to_le32(2),
366da8fa4e3SBjoern A. Zeeb },
367da8fa4e3SBjoern A. Zeeb {
368da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
369da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
370da8fa4e3SBjoern A. Zeeb __cpu_to_le32(3),
371da8fa4e3SBjoern A. Zeeb },
372da8fa4e3SBjoern A. Zeeb {
373da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
374da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
375da8fa4e3SBjoern A. Zeeb __cpu_to_le32(2),
376da8fa4e3SBjoern A. Zeeb },
377da8fa4e3SBjoern A. Zeeb {
378da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
379da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
380da8fa4e3SBjoern A. Zeeb __cpu_to_le32(3),
381da8fa4e3SBjoern A. Zeeb },
382da8fa4e3SBjoern A. Zeeb {
383da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
384da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
385da8fa4e3SBjoern A. Zeeb __cpu_to_le32(2),
386da8fa4e3SBjoern A. Zeeb },
387da8fa4e3SBjoern A. Zeeb {
388da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
389da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
390da8fa4e3SBjoern A. Zeeb __cpu_to_le32(3),
391da8fa4e3SBjoern A. Zeeb },
392da8fa4e3SBjoern A. Zeeb {
393da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
394da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
395da8fa4e3SBjoern A. Zeeb __cpu_to_le32(2),
396da8fa4e3SBjoern A. Zeeb },
397da8fa4e3SBjoern A. Zeeb {
398da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
399da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
400da8fa4e3SBjoern A. Zeeb __cpu_to_le32(0),
401da8fa4e3SBjoern A. Zeeb },
402da8fa4e3SBjoern A. Zeeb {
403da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
404da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
405da8fa4e3SBjoern A. Zeeb __cpu_to_le32(1),
406da8fa4e3SBjoern A. Zeeb },
407da8fa4e3SBjoern A. Zeeb { /* not used */
408da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
409da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
410da8fa4e3SBjoern A. Zeeb __cpu_to_le32(0),
411da8fa4e3SBjoern A. Zeeb },
412da8fa4e3SBjoern A. Zeeb { /* not used */
413da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
414da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
415da8fa4e3SBjoern A. Zeeb __cpu_to_le32(1),
416da8fa4e3SBjoern A. Zeeb },
417da8fa4e3SBjoern A. Zeeb {
418da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
419da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
420da8fa4e3SBjoern A. Zeeb __cpu_to_le32(4),
421da8fa4e3SBjoern A. Zeeb },
422da8fa4e3SBjoern A. Zeeb {
423da8fa4e3SBjoern A. Zeeb __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
424da8fa4e3SBjoern A. Zeeb __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
425da8fa4e3SBjoern A. Zeeb __cpu_to_le32(5),
426da8fa4e3SBjoern A. Zeeb },
427da8fa4e3SBjoern A. Zeeb
428da8fa4e3SBjoern A. Zeeb /* (Additions here) */
429da8fa4e3SBjoern A. Zeeb
430da8fa4e3SBjoern A. Zeeb { /* must be last */
431da8fa4e3SBjoern A. Zeeb __cpu_to_le32(0),
432da8fa4e3SBjoern A. Zeeb __cpu_to_le32(0),
433da8fa4e3SBjoern A. Zeeb __cpu_to_le32(0),
434da8fa4e3SBjoern A. Zeeb },
435da8fa4e3SBjoern A. Zeeb };
436da8fa4e3SBjoern A. Zeeb
ath10k_pci_is_awake(struct ath10k * ar)437da8fa4e3SBjoern A. Zeeb static bool ath10k_pci_is_awake(struct ath10k *ar)
438da8fa4e3SBjoern A. Zeeb {
439da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
440da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
441da8fa4e3SBjoern A. Zeeb u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
442da8fa4e3SBjoern A. Zeeb RTC_STATE_ADDRESS);
443da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
444da8fa4e3SBjoern A. Zeeb u32 val = bus_read_4((struct resource *)ar_pci->mem, PCIE_LOCAL_BASE_ADDRESS +
445da8fa4e3SBjoern A. Zeeb RTC_STATE_ADDRESS);
446da8fa4e3SBjoern A. Zeeb #endif
447da8fa4e3SBjoern A. Zeeb
448da8fa4e3SBjoern A. Zeeb return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
449da8fa4e3SBjoern A. Zeeb }
450da8fa4e3SBjoern A. Zeeb
__ath10k_pci_wake(struct ath10k * ar)451da8fa4e3SBjoern A. Zeeb static void __ath10k_pci_wake(struct ath10k *ar)
452da8fa4e3SBjoern A. Zeeb {
453da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
454da8fa4e3SBjoern A. Zeeb
455da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar_pci->ps_lock);
456da8fa4e3SBjoern A. Zeeb
457da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
458da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount, ar_pci->ps_awake);
459da8fa4e3SBjoern A. Zeeb
460da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
461da8fa4e3SBjoern A. Zeeb iowrite32(PCIE_SOC_WAKE_V_MASK,
462da8fa4e3SBjoern A. Zeeb ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
463da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_ADDRESS);
464da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
465da8fa4e3SBjoern A. Zeeb bus_write_4((struct resource *)ar_pci->mem,
466da8fa4e3SBjoern A. Zeeb PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
467da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_V_MASK);
468da8fa4e3SBjoern A. Zeeb #endif
469da8fa4e3SBjoern A. Zeeb }
470da8fa4e3SBjoern A. Zeeb
__ath10k_pci_sleep(struct ath10k * ar)471da8fa4e3SBjoern A. Zeeb static void __ath10k_pci_sleep(struct ath10k *ar)
472da8fa4e3SBjoern A. Zeeb {
473da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
474da8fa4e3SBjoern A. Zeeb
475da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar_pci->ps_lock);
476da8fa4e3SBjoern A. Zeeb
477da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
478da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount, ar_pci->ps_awake);
479da8fa4e3SBjoern A. Zeeb
480da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
481da8fa4e3SBjoern A. Zeeb iowrite32(PCIE_SOC_WAKE_RESET,
482da8fa4e3SBjoern A. Zeeb ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
483da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_ADDRESS);
484da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
485da8fa4e3SBjoern A. Zeeb bus_write_4((struct resource *)ar_pci->mem,
486da8fa4e3SBjoern A. Zeeb PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
487da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_RESET);
488da8fa4e3SBjoern A. Zeeb #endif
489da8fa4e3SBjoern A. Zeeb ar_pci->ps_awake = false;
490da8fa4e3SBjoern A. Zeeb }
491da8fa4e3SBjoern A. Zeeb
ath10k_pci_wake_wait(struct ath10k * ar)492da8fa4e3SBjoern A. Zeeb static int ath10k_pci_wake_wait(struct ath10k *ar)
493da8fa4e3SBjoern A. Zeeb {
494da8fa4e3SBjoern A. Zeeb int tot_delay = 0;
495da8fa4e3SBjoern A. Zeeb int curr_delay = 5;
496da8fa4e3SBjoern A. Zeeb
497da8fa4e3SBjoern A. Zeeb while (tot_delay < PCIE_WAKE_TIMEOUT) {
498da8fa4e3SBjoern A. Zeeb if (ath10k_pci_is_awake(ar)) {
499da8fa4e3SBjoern A. Zeeb if (tot_delay > PCIE_WAKE_LATE_US)
500da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
501da8fa4e3SBjoern A. Zeeb tot_delay / 1000);
502da8fa4e3SBjoern A. Zeeb return 0;
503da8fa4e3SBjoern A. Zeeb }
504da8fa4e3SBjoern A. Zeeb
505da8fa4e3SBjoern A. Zeeb udelay(curr_delay);
506da8fa4e3SBjoern A. Zeeb tot_delay += curr_delay;
507da8fa4e3SBjoern A. Zeeb
508da8fa4e3SBjoern A. Zeeb if (curr_delay < 50)
509da8fa4e3SBjoern A. Zeeb curr_delay += 5;
510da8fa4e3SBjoern A. Zeeb }
511da8fa4e3SBjoern A. Zeeb
512da8fa4e3SBjoern A. Zeeb return -ETIMEDOUT;
513da8fa4e3SBjoern A. Zeeb }
514da8fa4e3SBjoern A. Zeeb
ath10k_pci_force_wake(struct ath10k * ar)515da8fa4e3SBjoern A. Zeeb static int ath10k_pci_force_wake(struct ath10k *ar)
516da8fa4e3SBjoern A. Zeeb {
517da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
518da8fa4e3SBjoern A. Zeeb unsigned long flags;
519da8fa4e3SBjoern A. Zeeb int ret = 0;
520da8fa4e3SBjoern A. Zeeb
521da8fa4e3SBjoern A. Zeeb if (ar_pci->pci_ps)
522da8fa4e3SBjoern A. Zeeb return ret;
523da8fa4e3SBjoern A. Zeeb
524da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
525da8fa4e3SBjoern A. Zeeb
526da8fa4e3SBjoern A. Zeeb if (!ar_pci->ps_awake) {
527da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
528da8fa4e3SBjoern A. Zeeb iowrite32(PCIE_SOC_WAKE_V_MASK,
529da8fa4e3SBjoern A. Zeeb ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
530da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_ADDRESS);
531da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
532da8fa4e3SBjoern A. Zeeb bus_write_4((struct resource *)ar_pci->mem,
533da8fa4e3SBjoern A. Zeeb PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
534da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_V_MASK);
535da8fa4e3SBjoern A. Zeeb #endif
536da8fa4e3SBjoern A. Zeeb
537da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wake_wait(ar);
538da8fa4e3SBjoern A. Zeeb if (ret == 0)
539da8fa4e3SBjoern A. Zeeb ar_pci->ps_awake = true;
540da8fa4e3SBjoern A. Zeeb }
541da8fa4e3SBjoern A. Zeeb
542da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
543da8fa4e3SBjoern A. Zeeb
544da8fa4e3SBjoern A. Zeeb return ret;
545da8fa4e3SBjoern A. Zeeb }
546da8fa4e3SBjoern A. Zeeb
ath10k_pci_force_sleep(struct ath10k * ar)547da8fa4e3SBjoern A. Zeeb static void ath10k_pci_force_sleep(struct ath10k *ar)
548da8fa4e3SBjoern A. Zeeb {
549da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
550da8fa4e3SBjoern A. Zeeb unsigned long flags;
551da8fa4e3SBjoern A. Zeeb
552da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
553da8fa4e3SBjoern A. Zeeb
554da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
555da8fa4e3SBjoern A. Zeeb iowrite32(PCIE_SOC_WAKE_RESET,
556da8fa4e3SBjoern A. Zeeb ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
557da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_ADDRESS);
558da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
559da8fa4e3SBjoern A. Zeeb bus_write_4((struct resource *)ar_pci->mem,
560da8fa4e3SBjoern A. Zeeb PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
561da8fa4e3SBjoern A. Zeeb PCIE_SOC_WAKE_RESET);
562da8fa4e3SBjoern A. Zeeb #endif
563da8fa4e3SBjoern A. Zeeb ar_pci->ps_awake = false;
564da8fa4e3SBjoern A. Zeeb
565da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
566da8fa4e3SBjoern A. Zeeb }
567da8fa4e3SBjoern A. Zeeb
ath10k_pci_wake(struct ath10k * ar)568da8fa4e3SBjoern A. Zeeb static int ath10k_pci_wake(struct ath10k *ar)
569da8fa4e3SBjoern A. Zeeb {
570da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
571da8fa4e3SBjoern A. Zeeb unsigned long flags;
572da8fa4e3SBjoern A. Zeeb int ret = 0;
573da8fa4e3SBjoern A. Zeeb
574da8fa4e3SBjoern A. Zeeb if (ar_pci->pci_ps == 0)
575da8fa4e3SBjoern A. Zeeb return ret;
576da8fa4e3SBjoern A. Zeeb
577da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
578da8fa4e3SBjoern A. Zeeb
579da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
580da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount, ar_pci->ps_awake);
581da8fa4e3SBjoern A. Zeeb
582da8fa4e3SBjoern A. Zeeb /* This function can be called very frequently. To avoid excessive
583da8fa4e3SBjoern A. Zeeb * CPU stalls for MMIO reads use a cache var to hold the device state.
584da8fa4e3SBjoern A. Zeeb */
585da8fa4e3SBjoern A. Zeeb if (!ar_pci->ps_awake) {
586da8fa4e3SBjoern A. Zeeb __ath10k_pci_wake(ar);
587da8fa4e3SBjoern A. Zeeb
588da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wake_wait(ar);
589da8fa4e3SBjoern A. Zeeb if (ret == 0)
590da8fa4e3SBjoern A. Zeeb ar_pci->ps_awake = true;
591da8fa4e3SBjoern A. Zeeb }
592da8fa4e3SBjoern A. Zeeb
593da8fa4e3SBjoern A. Zeeb if (ret == 0) {
594da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount++;
595da8fa4e3SBjoern A. Zeeb WARN_ON(ar_pci->ps_wake_refcount == 0);
596da8fa4e3SBjoern A. Zeeb }
597da8fa4e3SBjoern A. Zeeb
598da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
599da8fa4e3SBjoern A. Zeeb
600da8fa4e3SBjoern A. Zeeb return ret;
601da8fa4e3SBjoern A. Zeeb }
602da8fa4e3SBjoern A. Zeeb
ath10k_pci_sleep(struct ath10k * ar)603da8fa4e3SBjoern A. Zeeb static void ath10k_pci_sleep(struct ath10k *ar)
604da8fa4e3SBjoern A. Zeeb {
605da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
606da8fa4e3SBjoern A. Zeeb unsigned long flags;
607da8fa4e3SBjoern A. Zeeb
608da8fa4e3SBjoern A. Zeeb if (ar_pci->pci_ps == 0)
609da8fa4e3SBjoern A. Zeeb return;
610da8fa4e3SBjoern A. Zeeb
611da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
612da8fa4e3SBjoern A. Zeeb
613da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
614da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount, ar_pci->ps_awake);
615da8fa4e3SBjoern A. Zeeb
616da8fa4e3SBjoern A. Zeeb if (WARN_ON(ar_pci->ps_wake_refcount == 0))
617da8fa4e3SBjoern A. Zeeb goto skip;
618da8fa4e3SBjoern A. Zeeb
619da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount--;
620da8fa4e3SBjoern A. Zeeb
621da8fa4e3SBjoern A. Zeeb mod_timer(&ar_pci->ps_timer, jiffies +
622da8fa4e3SBjoern A. Zeeb msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
623da8fa4e3SBjoern A. Zeeb
624da8fa4e3SBjoern A. Zeeb skip:
625da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
626da8fa4e3SBjoern A. Zeeb }
627da8fa4e3SBjoern A. Zeeb
ath10k_pci_ps_timer(struct timer_list * t)628da8fa4e3SBjoern A. Zeeb static void ath10k_pci_ps_timer(struct timer_list *t)
629da8fa4e3SBjoern A. Zeeb {
630da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
631da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ar_pci->ar;
632da8fa4e3SBjoern A. Zeeb unsigned long flags;
633da8fa4e3SBjoern A. Zeeb
634da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
635da8fa4e3SBjoern A. Zeeb
636da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
637da8fa4e3SBjoern A. Zeeb ar_pci->ps_wake_refcount, ar_pci->ps_awake);
638da8fa4e3SBjoern A. Zeeb
639da8fa4e3SBjoern A. Zeeb if (ar_pci->ps_wake_refcount > 0)
640da8fa4e3SBjoern A. Zeeb goto skip;
641da8fa4e3SBjoern A. Zeeb
642da8fa4e3SBjoern A. Zeeb __ath10k_pci_sleep(ar);
643da8fa4e3SBjoern A. Zeeb
644da8fa4e3SBjoern A. Zeeb skip:
645da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
646da8fa4e3SBjoern A. Zeeb }
647da8fa4e3SBjoern A. Zeeb
ath10k_pci_sleep_sync(struct ath10k * ar)648da8fa4e3SBjoern A. Zeeb static void ath10k_pci_sleep_sync(struct ath10k *ar)
649da8fa4e3SBjoern A. Zeeb {
650da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
651da8fa4e3SBjoern A. Zeeb unsigned long flags;
652da8fa4e3SBjoern A. Zeeb
653da8fa4e3SBjoern A. Zeeb if (ar_pci->pci_ps == 0) {
654da8fa4e3SBjoern A. Zeeb ath10k_pci_force_sleep(ar);
655da8fa4e3SBjoern A. Zeeb return;
656da8fa4e3SBjoern A. Zeeb }
657da8fa4e3SBjoern A. Zeeb
658da8fa4e3SBjoern A. Zeeb del_timer_sync(&ar_pci->ps_timer);
659da8fa4e3SBjoern A. Zeeb
660da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
661da8fa4e3SBjoern A. Zeeb WARN_ON(ar_pci->ps_wake_refcount > 0);
662da8fa4e3SBjoern A. Zeeb __ath10k_pci_sleep(ar);
663da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
664da8fa4e3SBjoern A. Zeeb }
665da8fa4e3SBjoern A. Zeeb
ath10k_bus_pci_write32(struct ath10k * ar,u32 offset,u32 value)666da8fa4e3SBjoern A. Zeeb static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
667da8fa4e3SBjoern A. Zeeb {
668da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669da8fa4e3SBjoern A. Zeeb int ret;
670da8fa4e3SBjoern A. Zeeb
671da8fa4e3SBjoern A. Zeeb if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
672da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
673da8fa4e3SBjoern A. Zeeb offset, offset + sizeof(value), ar_pci->mem_len);
674da8fa4e3SBjoern A. Zeeb return;
675da8fa4e3SBjoern A. Zeeb }
676da8fa4e3SBjoern A. Zeeb
677da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wake(ar);
678da8fa4e3SBjoern A. Zeeb if (ret) {
679da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
680da8fa4e3SBjoern A. Zeeb value, offset, ret);
681da8fa4e3SBjoern A. Zeeb return;
682da8fa4e3SBjoern A. Zeeb }
683da8fa4e3SBjoern A. Zeeb
684da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
685da8fa4e3SBjoern A. Zeeb iowrite32(value, ar_pci->mem + offset);
686da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
687da8fa4e3SBjoern A. Zeeb bus_write_4((struct resource *)ar_pci->mem, offset, value);
688da8fa4e3SBjoern A. Zeeb #endif
689da8fa4e3SBjoern A. Zeeb ath10k_pci_sleep(ar);
690da8fa4e3SBjoern A. Zeeb }
691da8fa4e3SBjoern A. Zeeb
ath10k_bus_pci_read32(struct ath10k * ar,u32 offset)692da8fa4e3SBjoern A. Zeeb static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
693da8fa4e3SBjoern A. Zeeb {
694da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
695da8fa4e3SBjoern A. Zeeb u32 val;
696da8fa4e3SBjoern A. Zeeb int ret;
697da8fa4e3SBjoern A. Zeeb
698da8fa4e3SBjoern A. Zeeb if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
699da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
700da8fa4e3SBjoern A. Zeeb offset, offset + sizeof(val), ar_pci->mem_len);
701da8fa4e3SBjoern A. Zeeb return 0;
702da8fa4e3SBjoern A. Zeeb }
703da8fa4e3SBjoern A. Zeeb
704da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wake(ar);
705da8fa4e3SBjoern A. Zeeb if (ret) {
706da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
707da8fa4e3SBjoern A. Zeeb offset, ret);
708da8fa4e3SBjoern A. Zeeb return 0xffffffff;
709da8fa4e3SBjoern A. Zeeb }
710da8fa4e3SBjoern A. Zeeb
711da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
712da8fa4e3SBjoern A. Zeeb val = ioread32(ar_pci->mem + offset);
713da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
714da8fa4e3SBjoern A. Zeeb val = bus_read_4((struct resource *)ar_pci->mem, offset);
715da8fa4e3SBjoern A. Zeeb #endif
716da8fa4e3SBjoern A. Zeeb ath10k_pci_sleep(ar);
717da8fa4e3SBjoern A. Zeeb
718da8fa4e3SBjoern A. Zeeb return val;
719da8fa4e3SBjoern A. Zeeb }
720da8fa4e3SBjoern A. Zeeb
ath10k_pci_write32(struct ath10k * ar,u32 offset,u32 value)721da8fa4e3SBjoern A. Zeeb inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
722da8fa4e3SBjoern A. Zeeb {
723da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
724da8fa4e3SBjoern A. Zeeb
725da8fa4e3SBjoern A. Zeeb ce->bus_ops->write32(ar, offset, value);
726da8fa4e3SBjoern A. Zeeb }
727da8fa4e3SBjoern A. Zeeb
ath10k_pci_read32(struct ath10k * ar,u32 offset)728da8fa4e3SBjoern A. Zeeb inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
729da8fa4e3SBjoern A. Zeeb {
730da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
731da8fa4e3SBjoern A. Zeeb
732da8fa4e3SBjoern A. Zeeb return ce->bus_ops->read32(ar, offset);
733da8fa4e3SBjoern A. Zeeb }
734da8fa4e3SBjoern A. Zeeb
ath10k_pci_soc_read32(struct ath10k * ar,u32 addr)735da8fa4e3SBjoern A. Zeeb u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
736da8fa4e3SBjoern A. Zeeb {
737da8fa4e3SBjoern A. Zeeb return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
738da8fa4e3SBjoern A. Zeeb }
739da8fa4e3SBjoern A. Zeeb
ath10k_pci_soc_write32(struct ath10k * ar,u32 addr,u32 val)740da8fa4e3SBjoern A. Zeeb void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
741da8fa4e3SBjoern A. Zeeb {
742da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
743da8fa4e3SBjoern A. Zeeb }
744da8fa4e3SBjoern A. Zeeb
ath10k_pci_reg_read32(struct ath10k * ar,u32 addr)745da8fa4e3SBjoern A. Zeeb u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
746da8fa4e3SBjoern A. Zeeb {
747da8fa4e3SBjoern A. Zeeb return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
748da8fa4e3SBjoern A. Zeeb }
749da8fa4e3SBjoern A. Zeeb
ath10k_pci_reg_write32(struct ath10k * ar,u32 addr,u32 val)750da8fa4e3SBjoern A. Zeeb void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
751da8fa4e3SBjoern A. Zeeb {
752da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
753da8fa4e3SBjoern A. Zeeb }
754da8fa4e3SBjoern A. Zeeb
ath10k_pci_irq_pending(struct ath10k * ar)755da8fa4e3SBjoern A. Zeeb bool ath10k_pci_irq_pending(struct ath10k *ar)
756da8fa4e3SBjoern A. Zeeb {
757da8fa4e3SBjoern A. Zeeb u32 cause;
758da8fa4e3SBjoern A. Zeeb
759da8fa4e3SBjoern A. Zeeb /* Check if the shared legacy irq is for us */
760da8fa4e3SBjoern A. Zeeb cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
761da8fa4e3SBjoern A. Zeeb PCIE_INTR_CAUSE_ADDRESS);
762da8fa4e3SBjoern A. Zeeb if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
763da8fa4e3SBjoern A. Zeeb return true;
764da8fa4e3SBjoern A. Zeeb
765da8fa4e3SBjoern A. Zeeb return false;
766da8fa4e3SBjoern A. Zeeb }
767da8fa4e3SBjoern A. Zeeb
ath10k_pci_disable_and_clear_legacy_irq(struct ath10k * ar)768da8fa4e3SBjoern A. Zeeb void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
769da8fa4e3SBjoern A. Zeeb {
770da8fa4e3SBjoern A. Zeeb /* IMPORTANT: INTR_CLR register has to be set after
771da8fa4e3SBjoern A. Zeeb * INTR_ENABLE is set to 0, otherwise interrupt can not be
772da8fa4e3SBjoern A. Zeeb * really cleared.
773da8fa4e3SBjoern A. Zeeb */
774da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
775da8fa4e3SBjoern A. Zeeb 0);
776da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
777da8fa4e3SBjoern A. Zeeb PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
778da8fa4e3SBjoern A. Zeeb
779da8fa4e3SBjoern A. Zeeb /* IMPORTANT: this extra read transaction is required to
780da8fa4e3SBjoern A. Zeeb * flush the posted write buffer.
781da8fa4e3SBjoern A. Zeeb */
782da8fa4e3SBjoern A. Zeeb (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
783da8fa4e3SBjoern A. Zeeb PCIE_INTR_ENABLE_ADDRESS);
784da8fa4e3SBjoern A. Zeeb }
785da8fa4e3SBjoern A. Zeeb
ath10k_pci_enable_legacy_irq(struct ath10k * ar)786da8fa4e3SBjoern A. Zeeb void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
787da8fa4e3SBjoern A. Zeeb {
788da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
789da8fa4e3SBjoern A. Zeeb PCIE_INTR_ENABLE_ADDRESS,
790da8fa4e3SBjoern A. Zeeb PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
791da8fa4e3SBjoern A. Zeeb
792da8fa4e3SBjoern A. Zeeb /* IMPORTANT: this extra read transaction is required to
793da8fa4e3SBjoern A. Zeeb * flush the posted write buffer.
794da8fa4e3SBjoern A. Zeeb */
795da8fa4e3SBjoern A. Zeeb (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
796da8fa4e3SBjoern A. Zeeb PCIE_INTR_ENABLE_ADDRESS);
797da8fa4e3SBjoern A. Zeeb }
798da8fa4e3SBjoern A. Zeeb
ath10k_pci_get_irq_method(struct ath10k * ar)799da8fa4e3SBjoern A. Zeeb static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
800da8fa4e3SBjoern A. Zeeb {
801da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
802da8fa4e3SBjoern A. Zeeb
803da8fa4e3SBjoern A. Zeeb if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
804da8fa4e3SBjoern A. Zeeb return "msi";
805da8fa4e3SBjoern A. Zeeb
806da8fa4e3SBjoern A. Zeeb return "legacy";
807da8fa4e3SBjoern A. Zeeb }
808da8fa4e3SBjoern A. Zeeb
__ath10k_pci_rx_post_buf(struct ath10k_pci_pipe * pipe)809da8fa4e3SBjoern A. Zeeb static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
810da8fa4e3SBjoern A. Zeeb {
811da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->hif_ce_state;
812da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
813da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
814da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
815da8fa4e3SBjoern A. Zeeb dma_addr_t paddr;
816da8fa4e3SBjoern A. Zeeb int ret;
817da8fa4e3SBjoern A. Zeeb
818da8fa4e3SBjoern A. Zeeb skb = dev_alloc_skb(pipe->buf_sz);
819da8fa4e3SBjoern A. Zeeb if (!skb)
820da8fa4e3SBjoern A. Zeeb return -ENOMEM;
821da8fa4e3SBjoern A. Zeeb
822da8fa4e3SBjoern A. Zeeb WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
823da8fa4e3SBjoern A. Zeeb
824da8fa4e3SBjoern A. Zeeb paddr = dma_map_single(ar->dev, skb->data,
825da8fa4e3SBjoern A. Zeeb skb->len + skb_tailroom(skb),
826da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
827da8fa4e3SBjoern A. Zeeb if (unlikely(dma_mapping_error(ar->dev, paddr))) {
828da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to dma map pci rx buf\n");
829da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
830da8fa4e3SBjoern A. Zeeb return -EIO;
831da8fa4e3SBjoern A. Zeeb }
832da8fa4e3SBjoern A. Zeeb
833da8fa4e3SBjoern A. Zeeb ATH10K_SKB_RXCB(skb)->paddr = paddr;
834da8fa4e3SBjoern A. Zeeb
835da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
836da8fa4e3SBjoern A. Zeeb ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
837da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
838da8fa4e3SBjoern A. Zeeb if (ret) {
839da8fa4e3SBjoern A. Zeeb dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
840da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
841da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
842da8fa4e3SBjoern A. Zeeb return ret;
843da8fa4e3SBjoern A. Zeeb }
844da8fa4e3SBjoern A. Zeeb
845da8fa4e3SBjoern A. Zeeb return 0;
846da8fa4e3SBjoern A. Zeeb }
847da8fa4e3SBjoern A. Zeeb
ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe * pipe)848da8fa4e3SBjoern A. Zeeb static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
849da8fa4e3SBjoern A. Zeeb {
850da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pipe->hif_ce_state;
851da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
852da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
853da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
854da8fa4e3SBjoern A. Zeeb int ret, num;
855da8fa4e3SBjoern A. Zeeb
856da8fa4e3SBjoern A. Zeeb if (pipe->buf_sz == 0)
857da8fa4e3SBjoern A. Zeeb return;
858da8fa4e3SBjoern A. Zeeb
859da8fa4e3SBjoern A. Zeeb if (!ce_pipe->dest_ring)
860da8fa4e3SBjoern A. Zeeb return;
861da8fa4e3SBjoern A. Zeeb
862da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
863da8fa4e3SBjoern A. Zeeb num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
864da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
865da8fa4e3SBjoern A. Zeeb
866da8fa4e3SBjoern A. Zeeb while (num >= 0) {
867da8fa4e3SBjoern A. Zeeb ret = __ath10k_pci_rx_post_buf(pipe);
868da8fa4e3SBjoern A. Zeeb if (ret) {
869da8fa4e3SBjoern A. Zeeb if (ret == -ENOSPC)
870da8fa4e3SBjoern A. Zeeb break;
871da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
872da8fa4e3SBjoern A. Zeeb mod_timer(&ar_pci->rx_post_retry, jiffies +
873da8fa4e3SBjoern A. Zeeb ATH10K_PCI_RX_POST_RETRY_MS);
874da8fa4e3SBjoern A. Zeeb break;
875da8fa4e3SBjoern A. Zeeb }
876da8fa4e3SBjoern A. Zeeb num--;
877da8fa4e3SBjoern A. Zeeb }
878da8fa4e3SBjoern A. Zeeb }
879da8fa4e3SBjoern A. Zeeb
ath10k_pci_rx_post(struct ath10k * ar)880da8fa4e3SBjoern A. Zeeb void ath10k_pci_rx_post(struct ath10k *ar)
881da8fa4e3SBjoern A. Zeeb {
882da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
883da8fa4e3SBjoern A. Zeeb int i;
884da8fa4e3SBjoern A. Zeeb
885da8fa4e3SBjoern A. Zeeb for (i = 0; i < CE_COUNT; i++)
886da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
887da8fa4e3SBjoern A. Zeeb }
888da8fa4e3SBjoern A. Zeeb
ath10k_pci_rx_replenish_retry(struct timer_list * t)889da8fa4e3SBjoern A. Zeeb void ath10k_pci_rx_replenish_retry(struct timer_list *t)
890da8fa4e3SBjoern A. Zeeb {
891da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
892da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ar_pci->ar;
893da8fa4e3SBjoern A. Zeeb
894da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_post(ar);
895da8fa4e3SBjoern A. Zeeb }
896da8fa4e3SBjoern A. Zeeb
ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k * ar,u32 addr)897da8fa4e3SBjoern A. Zeeb static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
898da8fa4e3SBjoern A. Zeeb {
899da8fa4e3SBjoern A. Zeeb u32 val = 0, region = addr & 0xfffff;
900da8fa4e3SBjoern A. Zeeb
901da8fa4e3SBjoern A. Zeeb val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
902da8fa4e3SBjoern A. Zeeb & 0x7ff) << 21;
903da8fa4e3SBjoern A. Zeeb val |= 0x100000 | region;
904da8fa4e3SBjoern A. Zeeb return val;
905da8fa4e3SBjoern A. Zeeb }
906da8fa4e3SBjoern A. Zeeb
907da8fa4e3SBjoern A. Zeeb /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
908da8fa4e3SBjoern A. Zeeb * Support to access target space below 1M for qca6174 and qca9377.
909da8fa4e3SBjoern A. Zeeb * If target space is below 1M, the bit[20] of converted CE addr is 0.
910da8fa4e3SBjoern A. Zeeb * Otherwise bit[20] of converted CE addr is 1.
911da8fa4e3SBjoern A. Zeeb */
ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k * ar,u32 addr)912da8fa4e3SBjoern A. Zeeb static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
913da8fa4e3SBjoern A. Zeeb {
914da8fa4e3SBjoern A. Zeeb u32 val = 0, region = addr & 0xfffff;
915da8fa4e3SBjoern A. Zeeb
916da8fa4e3SBjoern A. Zeeb val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
917da8fa4e3SBjoern A. Zeeb & 0x7ff) << 21;
918da8fa4e3SBjoern A. Zeeb val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
919da8fa4e3SBjoern A. Zeeb return val;
920da8fa4e3SBjoern A. Zeeb }
921da8fa4e3SBjoern A. Zeeb
ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k * ar,u32 addr)922da8fa4e3SBjoern A. Zeeb static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
923da8fa4e3SBjoern A. Zeeb {
924da8fa4e3SBjoern A. Zeeb u32 val = 0, region = addr & 0xfffff;
925da8fa4e3SBjoern A. Zeeb
926da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
927da8fa4e3SBjoern A. Zeeb val |= 0x100000 | region;
928da8fa4e3SBjoern A. Zeeb return val;
929da8fa4e3SBjoern A. Zeeb }
930da8fa4e3SBjoern A. Zeeb
ath10k_pci_targ_cpu_to_ce_addr(struct ath10k * ar,u32 addr)931da8fa4e3SBjoern A. Zeeb static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
932da8fa4e3SBjoern A. Zeeb {
933da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
934da8fa4e3SBjoern A. Zeeb
935da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
936da8fa4e3SBjoern A. Zeeb return -ENOTSUPP;
937da8fa4e3SBjoern A. Zeeb
938da8fa4e3SBjoern A. Zeeb return ar_pci->targ_cpu_to_ce_addr(ar, addr);
939da8fa4e3SBjoern A. Zeeb }
940da8fa4e3SBjoern A. Zeeb
941da8fa4e3SBjoern A. Zeeb /*
942da8fa4e3SBjoern A. Zeeb * Diagnostic read/write access is provided for startup/config/debug usage.
943da8fa4e3SBjoern A. Zeeb * Caller must guarantee proper alignment, when applicable, and single user
944da8fa4e3SBjoern A. Zeeb * at any moment.
945da8fa4e3SBjoern A. Zeeb */
946da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
ath10k_pci_diag_read_mem(struct ath10k * ar,u32 address,void * data,int nbytes)947da8fa4e3SBjoern A. Zeeb static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
948da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
949da8fa4e3SBjoern A. Zeeb static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, u8 *data,
950da8fa4e3SBjoern A. Zeeb #endif
951da8fa4e3SBjoern A. Zeeb int nbytes)
952da8fa4e3SBjoern A. Zeeb {
953da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
954da8fa4e3SBjoern A. Zeeb int ret = 0;
955da8fa4e3SBjoern A. Zeeb u32 *buf;
956da8fa4e3SBjoern A. Zeeb unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
957da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_diag;
958da8fa4e3SBjoern A. Zeeb /* Host buffer address in CE space */
959da8fa4e3SBjoern A. Zeeb u32 ce_data;
960da8fa4e3SBjoern A. Zeeb dma_addr_t ce_data_base = 0;
961da8fa4e3SBjoern A. Zeeb void *data_buf;
962da8fa4e3SBjoern A. Zeeb int i;
963da8fa4e3SBjoern A. Zeeb
964da8fa4e3SBjoern A. Zeeb mutex_lock(&ar_pci->ce_diag_mutex);
965da8fa4e3SBjoern A. Zeeb ce_diag = ar_pci->ce_diag;
966da8fa4e3SBjoern A. Zeeb
967da8fa4e3SBjoern A. Zeeb /*
968da8fa4e3SBjoern A. Zeeb * Allocate a temporary bounce buffer to hold caller's data
969da8fa4e3SBjoern A. Zeeb * to be DMA'ed from Target. This guarantees
970da8fa4e3SBjoern A. Zeeb * 1) 4-byte alignment
971da8fa4e3SBjoern A. Zeeb * 2) Buffer in DMA-able space
972da8fa4e3SBjoern A. Zeeb */
973da8fa4e3SBjoern A. Zeeb alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
974da8fa4e3SBjoern A. Zeeb
975da8fa4e3SBjoern A. Zeeb data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
976da8fa4e3SBjoern A. Zeeb GFP_ATOMIC);
977da8fa4e3SBjoern A. Zeeb if (!data_buf) {
978da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
979da8fa4e3SBjoern A. Zeeb goto done;
980da8fa4e3SBjoern A. Zeeb }
981da8fa4e3SBjoern A. Zeeb
982da8fa4e3SBjoern A. Zeeb /* The address supplied by the caller is in the
983da8fa4e3SBjoern A. Zeeb * Target CPU virtual address space.
984da8fa4e3SBjoern A. Zeeb *
985da8fa4e3SBjoern A. Zeeb * In order to use this address with the diagnostic CE,
986da8fa4e3SBjoern A. Zeeb * convert it from Target CPU virtual address space
987da8fa4e3SBjoern A. Zeeb * to CE address space
988da8fa4e3SBjoern A. Zeeb */
989da8fa4e3SBjoern A. Zeeb address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
990da8fa4e3SBjoern A. Zeeb
991da8fa4e3SBjoern A. Zeeb remaining_bytes = nbytes;
992da8fa4e3SBjoern A. Zeeb ce_data = ce_data_base;
993da8fa4e3SBjoern A. Zeeb while (remaining_bytes) {
994da8fa4e3SBjoern A. Zeeb nbytes = min_t(unsigned int, remaining_bytes,
995da8fa4e3SBjoern A. Zeeb DIAG_TRANSFER_LIMIT);
996da8fa4e3SBjoern A. Zeeb
997da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
998da8fa4e3SBjoern A. Zeeb if (ret != 0)
999da8fa4e3SBjoern A. Zeeb goto done;
1000da8fa4e3SBjoern A. Zeeb
1001da8fa4e3SBjoern A. Zeeb /* Request CE to send from Target(!) address to Host buffer */
1002da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
1003da8fa4e3SBjoern A. Zeeb if (ret)
1004da8fa4e3SBjoern A. Zeeb goto done;
1005da8fa4e3SBjoern A. Zeeb
1006da8fa4e3SBjoern A. Zeeb i = 0;
1007da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1008da8fa4e3SBjoern A. Zeeb udelay(DIAG_ACCESS_CE_WAIT_US);
1009da8fa4e3SBjoern A. Zeeb i += DIAG_ACCESS_CE_WAIT_US;
1010da8fa4e3SBjoern A. Zeeb
1011da8fa4e3SBjoern A. Zeeb if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1012da8fa4e3SBjoern A. Zeeb ret = -EBUSY;
1013da8fa4e3SBjoern A. Zeeb goto done;
1014da8fa4e3SBjoern A. Zeeb }
1015da8fa4e3SBjoern A. Zeeb }
1016da8fa4e3SBjoern A. Zeeb
1017da8fa4e3SBjoern A. Zeeb i = 0;
1018da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1019da8fa4e3SBjoern A. Zeeb &completed_nbytes) != 0) {
1020da8fa4e3SBjoern A. Zeeb udelay(DIAG_ACCESS_CE_WAIT_US);
1021da8fa4e3SBjoern A. Zeeb i += DIAG_ACCESS_CE_WAIT_US;
1022da8fa4e3SBjoern A. Zeeb
1023da8fa4e3SBjoern A. Zeeb if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1024da8fa4e3SBjoern A. Zeeb ret = -EBUSY;
1025da8fa4e3SBjoern A. Zeeb goto done;
1026da8fa4e3SBjoern A. Zeeb }
1027da8fa4e3SBjoern A. Zeeb }
1028da8fa4e3SBjoern A. Zeeb
1029da8fa4e3SBjoern A. Zeeb if (nbytes != completed_nbytes) {
1030da8fa4e3SBjoern A. Zeeb ret = -EIO;
1031da8fa4e3SBjoern A. Zeeb goto done;
1032da8fa4e3SBjoern A. Zeeb }
1033da8fa4e3SBjoern A. Zeeb
1034da8fa4e3SBjoern A. Zeeb if (*buf != ce_data) {
1035da8fa4e3SBjoern A. Zeeb ret = -EIO;
1036da8fa4e3SBjoern A. Zeeb goto done;
1037da8fa4e3SBjoern A. Zeeb }
1038da8fa4e3SBjoern A. Zeeb
1039da8fa4e3SBjoern A. Zeeb remaining_bytes -= nbytes;
1040da8fa4e3SBjoern A. Zeeb memcpy(data, data_buf, nbytes);
1041da8fa4e3SBjoern A. Zeeb
1042da8fa4e3SBjoern A. Zeeb address += nbytes;
1043da8fa4e3SBjoern A. Zeeb data += nbytes;
1044da8fa4e3SBjoern A. Zeeb }
1045da8fa4e3SBjoern A. Zeeb
1046da8fa4e3SBjoern A. Zeeb done:
1047da8fa4e3SBjoern A. Zeeb
1048da8fa4e3SBjoern A. Zeeb if (data_buf)
1049da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1050da8fa4e3SBjoern A. Zeeb ce_data_base);
1051da8fa4e3SBjoern A. Zeeb
1052da8fa4e3SBjoern A. Zeeb mutex_unlock(&ar_pci->ce_diag_mutex);
1053da8fa4e3SBjoern A. Zeeb
1054da8fa4e3SBjoern A. Zeeb return ret;
1055da8fa4e3SBjoern A. Zeeb }
1056da8fa4e3SBjoern A. Zeeb
ath10k_pci_diag_read32(struct ath10k * ar,u32 address,u32 * value)1057da8fa4e3SBjoern A. Zeeb static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1058da8fa4e3SBjoern A. Zeeb {
1059da8fa4e3SBjoern A. Zeeb __le32 val = 0;
1060da8fa4e3SBjoern A. Zeeb int ret;
1061da8fa4e3SBjoern A. Zeeb
1062da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1063da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1064da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1065da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read_mem(ar, address, (u8 *)&val, sizeof(val));
1066da8fa4e3SBjoern A. Zeeb #endif
1067da8fa4e3SBjoern A. Zeeb *value = __le32_to_cpu(val);
1068da8fa4e3SBjoern A. Zeeb
1069da8fa4e3SBjoern A. Zeeb return ret;
1070da8fa4e3SBjoern A. Zeeb }
1071da8fa4e3SBjoern A. Zeeb
__ath10k_pci_diag_read_hi(struct ath10k * ar,void * dest,u32 src,u32 len)1072da8fa4e3SBjoern A. Zeeb static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1073da8fa4e3SBjoern A. Zeeb u32 src, u32 len)
1074da8fa4e3SBjoern A. Zeeb {
1075da8fa4e3SBjoern A. Zeeb u32 host_addr, addr;
1076da8fa4e3SBjoern A. Zeeb int ret;
1077da8fa4e3SBjoern A. Zeeb
1078da8fa4e3SBjoern A. Zeeb host_addr = host_interest_item_address(src);
1079da8fa4e3SBjoern A. Zeeb
1080da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1081da8fa4e3SBjoern A. Zeeb if (ret != 0) {
1082da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1083da8fa4e3SBjoern A. Zeeb src, ret);
1084da8fa4e3SBjoern A. Zeeb return ret;
1085da8fa4e3SBjoern A. Zeeb }
1086da8fa4e3SBjoern A. Zeeb
1087da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1088da8fa4e3SBjoern A. Zeeb if (ret != 0) {
1089da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1090da8fa4e3SBjoern A. Zeeb addr, len, ret);
1091da8fa4e3SBjoern A. Zeeb return ret;
1092da8fa4e3SBjoern A. Zeeb }
1093da8fa4e3SBjoern A. Zeeb
1094da8fa4e3SBjoern A. Zeeb return 0;
1095da8fa4e3SBjoern A. Zeeb }
1096da8fa4e3SBjoern A. Zeeb
1097da8fa4e3SBjoern A. Zeeb #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1098da8fa4e3SBjoern A. Zeeb __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1099da8fa4e3SBjoern A. Zeeb
ath10k_pci_diag_write_mem(struct ath10k * ar,u32 address,const void * data,int nbytes)1100da8fa4e3SBjoern A. Zeeb int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1101da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1102da8fa4e3SBjoern A. Zeeb const void *data, int nbytes)
1103da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1104da8fa4e3SBjoern A. Zeeb const void *_d, int nbytes)
1105da8fa4e3SBjoern A. Zeeb #endif
1106da8fa4e3SBjoern A. Zeeb {
1107da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1108da8fa4e3SBjoern A. Zeeb int ret = 0;
1109da8fa4e3SBjoern A. Zeeb u32 *buf;
1110da8fa4e3SBjoern A. Zeeb unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1111da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_diag;
1112da8fa4e3SBjoern A. Zeeb void *data_buf;
1113da8fa4e3SBjoern A. Zeeb dma_addr_t ce_data_base = 0;
1114da8fa4e3SBjoern A. Zeeb int i;
1115da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
1116da8fa4e3SBjoern A. Zeeb const u8 *data = _d;
1117da8fa4e3SBjoern A. Zeeb #endif
1118da8fa4e3SBjoern A. Zeeb
1119da8fa4e3SBjoern A. Zeeb mutex_lock(&ar_pci->ce_diag_mutex);
1120da8fa4e3SBjoern A. Zeeb ce_diag = ar_pci->ce_diag;
1121da8fa4e3SBjoern A. Zeeb
1122da8fa4e3SBjoern A. Zeeb /*
1123da8fa4e3SBjoern A. Zeeb * Allocate a temporary bounce buffer to hold caller's data
1124da8fa4e3SBjoern A. Zeeb * to be DMA'ed to Target. This guarantees
1125da8fa4e3SBjoern A. Zeeb * 1) 4-byte alignment
1126da8fa4e3SBjoern A. Zeeb * 2) Buffer in DMA-able space
1127da8fa4e3SBjoern A. Zeeb */
1128da8fa4e3SBjoern A. Zeeb alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1129da8fa4e3SBjoern A. Zeeb
1130da8fa4e3SBjoern A. Zeeb data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1131da8fa4e3SBjoern A. Zeeb GFP_ATOMIC);
1132da8fa4e3SBjoern A. Zeeb if (!data_buf) {
1133da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
1134da8fa4e3SBjoern A. Zeeb goto done;
1135da8fa4e3SBjoern A. Zeeb }
1136da8fa4e3SBjoern A. Zeeb
1137da8fa4e3SBjoern A. Zeeb /*
1138da8fa4e3SBjoern A. Zeeb * The address supplied by the caller is in the
1139da8fa4e3SBjoern A. Zeeb * Target CPU virtual address space.
1140da8fa4e3SBjoern A. Zeeb *
1141da8fa4e3SBjoern A. Zeeb * In order to use this address with the diagnostic CE,
1142da8fa4e3SBjoern A. Zeeb * convert it from
1143da8fa4e3SBjoern A. Zeeb * Target CPU virtual address space
1144da8fa4e3SBjoern A. Zeeb * to
1145da8fa4e3SBjoern A. Zeeb * CE address space
1146da8fa4e3SBjoern A. Zeeb */
1147da8fa4e3SBjoern A. Zeeb address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1148da8fa4e3SBjoern A. Zeeb
1149da8fa4e3SBjoern A. Zeeb remaining_bytes = nbytes;
1150da8fa4e3SBjoern A. Zeeb while (remaining_bytes) {
1151da8fa4e3SBjoern A. Zeeb /* FIXME: check cast */
1152da8fa4e3SBjoern A. Zeeb nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1153da8fa4e3SBjoern A. Zeeb
1154da8fa4e3SBjoern A. Zeeb /* Copy caller's data to allocated DMA buf */
1155da8fa4e3SBjoern A. Zeeb memcpy(data_buf, data, nbytes);
1156da8fa4e3SBjoern A. Zeeb
1157da8fa4e3SBjoern A. Zeeb /* Set up to receive directly into Target(!) address */
1158da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1159da8fa4e3SBjoern A. Zeeb if (ret != 0)
1160da8fa4e3SBjoern A. Zeeb goto done;
1161da8fa4e3SBjoern A. Zeeb
1162da8fa4e3SBjoern A. Zeeb /*
1163da8fa4e3SBjoern A. Zeeb * Request CE to send caller-supplied data that
1164da8fa4e3SBjoern A. Zeeb * was copied to bounce buffer to Target(!) address.
1165da8fa4e3SBjoern A. Zeeb */
1166da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1167da8fa4e3SBjoern A. Zeeb if (ret != 0)
1168da8fa4e3SBjoern A. Zeeb goto done;
1169da8fa4e3SBjoern A. Zeeb
1170da8fa4e3SBjoern A. Zeeb i = 0;
1171da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1172da8fa4e3SBjoern A. Zeeb udelay(DIAG_ACCESS_CE_WAIT_US);
1173da8fa4e3SBjoern A. Zeeb i += DIAG_ACCESS_CE_WAIT_US;
1174da8fa4e3SBjoern A. Zeeb
1175da8fa4e3SBjoern A. Zeeb if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1176da8fa4e3SBjoern A. Zeeb ret = -EBUSY;
1177da8fa4e3SBjoern A. Zeeb goto done;
1178da8fa4e3SBjoern A. Zeeb }
1179da8fa4e3SBjoern A. Zeeb }
1180da8fa4e3SBjoern A. Zeeb
1181da8fa4e3SBjoern A. Zeeb i = 0;
1182da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1183da8fa4e3SBjoern A. Zeeb &completed_nbytes) != 0) {
1184da8fa4e3SBjoern A. Zeeb udelay(DIAG_ACCESS_CE_WAIT_US);
1185da8fa4e3SBjoern A. Zeeb i += DIAG_ACCESS_CE_WAIT_US;
1186da8fa4e3SBjoern A. Zeeb
1187da8fa4e3SBjoern A. Zeeb if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1188da8fa4e3SBjoern A. Zeeb ret = -EBUSY;
1189da8fa4e3SBjoern A. Zeeb goto done;
1190da8fa4e3SBjoern A. Zeeb }
1191da8fa4e3SBjoern A. Zeeb }
1192da8fa4e3SBjoern A. Zeeb
1193da8fa4e3SBjoern A. Zeeb if (nbytes != completed_nbytes) {
1194da8fa4e3SBjoern A. Zeeb ret = -EIO;
1195da8fa4e3SBjoern A. Zeeb goto done;
1196da8fa4e3SBjoern A. Zeeb }
1197da8fa4e3SBjoern A. Zeeb
1198da8fa4e3SBjoern A. Zeeb if (*buf != address) {
1199da8fa4e3SBjoern A. Zeeb ret = -EIO;
1200da8fa4e3SBjoern A. Zeeb goto done;
1201da8fa4e3SBjoern A. Zeeb }
1202da8fa4e3SBjoern A. Zeeb
1203da8fa4e3SBjoern A. Zeeb remaining_bytes -= nbytes;
1204da8fa4e3SBjoern A. Zeeb address += nbytes;
1205da8fa4e3SBjoern A. Zeeb data += nbytes;
1206da8fa4e3SBjoern A. Zeeb }
1207da8fa4e3SBjoern A. Zeeb
1208da8fa4e3SBjoern A. Zeeb done:
1209da8fa4e3SBjoern A. Zeeb if (data_buf) {
1210da8fa4e3SBjoern A. Zeeb dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1211da8fa4e3SBjoern A. Zeeb ce_data_base);
1212da8fa4e3SBjoern A. Zeeb }
1213da8fa4e3SBjoern A. Zeeb
1214da8fa4e3SBjoern A. Zeeb if (ret != 0)
1215da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1216da8fa4e3SBjoern A. Zeeb address, ret);
1217da8fa4e3SBjoern A. Zeeb
1218da8fa4e3SBjoern A. Zeeb mutex_unlock(&ar_pci->ce_diag_mutex);
1219da8fa4e3SBjoern A. Zeeb
1220da8fa4e3SBjoern A. Zeeb return ret;
1221da8fa4e3SBjoern A. Zeeb }
1222da8fa4e3SBjoern A. Zeeb
ath10k_pci_diag_write32(struct ath10k * ar,u32 address,u32 value)1223da8fa4e3SBjoern A. Zeeb static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1224da8fa4e3SBjoern A. Zeeb {
1225da8fa4e3SBjoern A. Zeeb __le32 val = __cpu_to_le32(value);
1226da8fa4e3SBjoern A. Zeeb
1227da8fa4e3SBjoern A. Zeeb return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1228da8fa4e3SBjoern A. Zeeb }
1229da8fa4e3SBjoern A. Zeeb
1230da8fa4e3SBjoern A. Zeeb /* Called by lower (CE) layer when a send to Target completes. */
ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe * ce_state)1231da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1232da8fa4e3SBjoern A. Zeeb {
1233da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1234da8fa4e3SBjoern A. Zeeb struct sk_buff_head list;
1235da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
1236da8fa4e3SBjoern A. Zeeb
1237da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&list);
1238da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1239da8fa4e3SBjoern A. Zeeb /* no need to call tx completion for NULL pointers */
1240da8fa4e3SBjoern A. Zeeb if (skb == NULL)
1241da8fa4e3SBjoern A. Zeeb continue;
1242da8fa4e3SBjoern A. Zeeb
1243da8fa4e3SBjoern A. Zeeb __skb_queue_tail(&list, skb);
1244da8fa4e3SBjoern A. Zeeb }
1245da8fa4e3SBjoern A. Zeeb
1246da8fa4e3SBjoern A. Zeeb while ((skb = __skb_dequeue(&list)))
1247da8fa4e3SBjoern A. Zeeb ath10k_htc_tx_completion_handler(ar, skb);
1248da8fa4e3SBjoern A. Zeeb }
1249da8fa4e3SBjoern A. Zeeb
ath10k_pci_process_rx_cb(struct ath10k_ce_pipe * ce_state,void (* callback)(struct ath10k * ar,struct sk_buff * skb))1250da8fa4e3SBjoern A. Zeeb static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1251da8fa4e3SBjoern A. Zeeb void (*callback)(struct ath10k *ar,
1252da8fa4e3SBjoern A. Zeeb struct sk_buff *skb))
1253da8fa4e3SBjoern A. Zeeb {
1254da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1255da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1256da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1257da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
1258da8fa4e3SBjoern A. Zeeb struct sk_buff_head list;
1259da8fa4e3SBjoern A. Zeeb void *transfer_context;
1260da8fa4e3SBjoern A. Zeeb unsigned int nbytes, max_nbytes;
1261da8fa4e3SBjoern A. Zeeb
1262da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&list);
1263da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1264da8fa4e3SBjoern A. Zeeb &nbytes) == 0) {
1265da8fa4e3SBjoern A. Zeeb skb = transfer_context;
1266da8fa4e3SBjoern A. Zeeb max_nbytes = skb->len + skb_tailroom(skb);
1267da8fa4e3SBjoern A. Zeeb dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1268da8fa4e3SBjoern A. Zeeb max_nbytes, DMA_FROM_DEVICE);
1269da8fa4e3SBjoern A. Zeeb
1270da8fa4e3SBjoern A. Zeeb if (unlikely(max_nbytes < nbytes)) {
1271da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1272da8fa4e3SBjoern A. Zeeb nbytes, max_nbytes);
1273da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
1274da8fa4e3SBjoern A. Zeeb continue;
1275da8fa4e3SBjoern A. Zeeb }
1276da8fa4e3SBjoern A. Zeeb
1277da8fa4e3SBjoern A. Zeeb skb_put(skb, nbytes);
1278da8fa4e3SBjoern A. Zeeb __skb_queue_tail(&list, skb);
1279da8fa4e3SBjoern A. Zeeb }
1280da8fa4e3SBjoern A. Zeeb
1281da8fa4e3SBjoern A. Zeeb while ((skb = __skb_dequeue(&list))) {
1282da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1283da8fa4e3SBjoern A. Zeeb ce_state->id, skb->len);
1284da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1285da8fa4e3SBjoern A. Zeeb skb->data, skb->len);
1286da8fa4e3SBjoern A. Zeeb
1287da8fa4e3SBjoern A. Zeeb callback(ar, skb);
1288da8fa4e3SBjoern A. Zeeb }
1289da8fa4e3SBjoern A. Zeeb
1290da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_post_pipe(pipe_info);
1291da8fa4e3SBjoern A. Zeeb }
1292da8fa4e3SBjoern A. Zeeb
ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe * ce_state,void (* callback)(struct ath10k * ar,struct sk_buff * skb))1293da8fa4e3SBjoern A. Zeeb static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1294da8fa4e3SBjoern A. Zeeb void (*callback)(struct ath10k *ar,
1295da8fa4e3SBjoern A. Zeeb struct sk_buff *skb))
1296da8fa4e3SBjoern A. Zeeb {
1297da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1298da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1299da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1300da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1301da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
1302da8fa4e3SBjoern A. Zeeb struct sk_buff_head list;
1303da8fa4e3SBjoern A. Zeeb void *transfer_context;
1304da8fa4e3SBjoern A. Zeeb unsigned int nbytes, max_nbytes, nentries;
1305da8fa4e3SBjoern A. Zeeb int orig_len;
1306da8fa4e3SBjoern A. Zeeb
130707724ba6SBjoern A. Zeeb /* No need to acquire ce_lock for CE5, since this is the only place CE5
1308da8fa4e3SBjoern A. Zeeb * is processed other than init and deinit. Before releasing CE5
1309da8fa4e3SBjoern A. Zeeb * buffers, interrupts are disabled. Thus CE5 access is serialized.
1310da8fa4e3SBjoern A. Zeeb */
1311da8fa4e3SBjoern A. Zeeb __skb_queue_head_init(&list);
1312da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1313da8fa4e3SBjoern A. Zeeb &nbytes) == 0) {
1314da8fa4e3SBjoern A. Zeeb skb = transfer_context;
1315da8fa4e3SBjoern A. Zeeb max_nbytes = skb->len + skb_tailroom(skb);
1316da8fa4e3SBjoern A. Zeeb
1317da8fa4e3SBjoern A. Zeeb if (unlikely(max_nbytes < nbytes)) {
1318da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1319da8fa4e3SBjoern A. Zeeb nbytes, max_nbytes);
1320da8fa4e3SBjoern A. Zeeb continue;
1321da8fa4e3SBjoern A. Zeeb }
1322da8fa4e3SBjoern A. Zeeb
1323da8fa4e3SBjoern A. Zeeb dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1324da8fa4e3SBjoern A. Zeeb max_nbytes, DMA_FROM_DEVICE);
1325da8fa4e3SBjoern A. Zeeb skb_put(skb, nbytes);
1326da8fa4e3SBjoern A. Zeeb __skb_queue_tail(&list, skb);
1327da8fa4e3SBjoern A. Zeeb }
1328da8fa4e3SBjoern A. Zeeb
1329da8fa4e3SBjoern A. Zeeb nentries = skb_queue_len(&list);
1330da8fa4e3SBjoern A. Zeeb while ((skb = __skb_dequeue(&list))) {
1331da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1332da8fa4e3SBjoern A. Zeeb ce_state->id, skb->len);
1333da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1334da8fa4e3SBjoern A. Zeeb skb->data, skb->len);
1335da8fa4e3SBjoern A. Zeeb
1336da8fa4e3SBjoern A. Zeeb orig_len = skb->len;
1337da8fa4e3SBjoern A. Zeeb callback(ar, skb);
1338da8fa4e3SBjoern A. Zeeb skb_push(skb, orig_len - skb->len);
1339da8fa4e3SBjoern A. Zeeb skb_reset_tail_pointer(skb);
1340da8fa4e3SBjoern A. Zeeb skb_trim(skb, 0);
1341da8fa4e3SBjoern A. Zeeb
1342da8fa4e3SBjoern A. Zeeb /*let device gain the buffer again*/
1343da8fa4e3SBjoern A. Zeeb dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1344da8fa4e3SBjoern A. Zeeb skb->len + skb_tailroom(skb),
1345da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
1346da8fa4e3SBjoern A. Zeeb }
1347da8fa4e3SBjoern A. Zeeb ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1348da8fa4e3SBjoern A. Zeeb }
1349da8fa4e3SBjoern A. Zeeb
1350da8fa4e3SBjoern A. Zeeb /* Called by lower (CE) layer when data is received from the Target. */
ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe * ce_state)1351da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1352da8fa4e3SBjoern A. Zeeb {
1353da8fa4e3SBjoern A. Zeeb ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1354da8fa4e3SBjoern A. Zeeb }
1355da8fa4e3SBjoern A. Zeeb
ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe * ce_state)1356da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1357da8fa4e3SBjoern A. Zeeb {
1358da8fa4e3SBjoern A. Zeeb /* CE4 polling needs to be done whenever CE pipe which transports
1359da8fa4e3SBjoern A. Zeeb * HTT Rx (target->host) is processed.
1360da8fa4e3SBjoern A. Zeeb */
1361da8fa4e3SBjoern A. Zeeb ath10k_ce_per_engine_service(ce_state->ar, 4);
1362da8fa4e3SBjoern A. Zeeb
1363da8fa4e3SBjoern A. Zeeb ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1364da8fa4e3SBjoern A. Zeeb }
1365da8fa4e3SBjoern A. Zeeb
1366da8fa4e3SBjoern A. Zeeb /* Called by lower (CE) layer when data is received from the Target.
1367da8fa4e3SBjoern A. Zeeb * Only 10.4 firmware uses separate CE to transfer pktlog data.
1368da8fa4e3SBjoern A. Zeeb */
ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe * ce_state)1369da8fa4e3SBjoern A. Zeeb static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1370da8fa4e3SBjoern A. Zeeb {
1371da8fa4e3SBjoern A. Zeeb ath10k_pci_process_rx_cb(ce_state,
1372da8fa4e3SBjoern A. Zeeb ath10k_htt_rx_pktlog_completion_handler);
1373da8fa4e3SBjoern A. Zeeb }
1374da8fa4e3SBjoern A. Zeeb
1375da8fa4e3SBjoern A. Zeeb /* Called by lower (CE) layer when a send to HTT Target completes. */
ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe * ce_state)1376da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1377da8fa4e3SBjoern A. Zeeb {
1378da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
1379da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
1380da8fa4e3SBjoern A. Zeeb
1381da8fa4e3SBjoern A. Zeeb while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1382da8fa4e3SBjoern A. Zeeb /* no need to call tx completion for NULL pointers */
1383da8fa4e3SBjoern A. Zeeb if (!skb)
1384da8fa4e3SBjoern A. Zeeb continue;
1385da8fa4e3SBjoern A. Zeeb
1386da8fa4e3SBjoern A. Zeeb dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1387da8fa4e3SBjoern A. Zeeb skb->len, DMA_TO_DEVICE);
1388da8fa4e3SBjoern A. Zeeb ath10k_htt_hif_tx_complete(ar, skb);
1389da8fa4e3SBjoern A. Zeeb }
1390da8fa4e3SBjoern A. Zeeb }
1391da8fa4e3SBjoern A. Zeeb
ath10k_pci_htt_rx_deliver(struct ath10k * ar,struct sk_buff * skb)1392da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1393da8fa4e3SBjoern A. Zeeb {
1394da8fa4e3SBjoern A. Zeeb skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1395da8fa4e3SBjoern A. Zeeb ath10k_htt_t2h_msg_handler(ar, skb);
1396da8fa4e3SBjoern A. Zeeb }
1397da8fa4e3SBjoern A. Zeeb
1398da8fa4e3SBjoern A. Zeeb /* Called by lower (CE) layer when HTT data is received from the Target. */
ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe * ce_state)1399da8fa4e3SBjoern A. Zeeb static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1400da8fa4e3SBjoern A. Zeeb {
1401da8fa4e3SBjoern A. Zeeb /* CE4 polling needs to be done whenever CE pipe which transports
1402da8fa4e3SBjoern A. Zeeb * HTT Rx (target->host) is processed.
1403da8fa4e3SBjoern A. Zeeb */
1404da8fa4e3SBjoern A. Zeeb ath10k_ce_per_engine_service(ce_state->ar, 4);
1405da8fa4e3SBjoern A. Zeeb
1406da8fa4e3SBjoern A. Zeeb ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1407da8fa4e3SBjoern A. Zeeb }
1408da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_tx_sg(struct ath10k * ar,u8 pipe_id,struct ath10k_hif_sg_item * items,int n_items)1409da8fa4e3SBjoern A. Zeeb int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1410da8fa4e3SBjoern A. Zeeb struct ath10k_hif_sg_item *items, int n_items)
1411da8fa4e3SBjoern A. Zeeb {
1412da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1413da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
1414da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1415da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1416da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1417da8fa4e3SBjoern A. Zeeb unsigned int nentries_mask;
1418da8fa4e3SBjoern A. Zeeb unsigned int sw_index;
1419da8fa4e3SBjoern A. Zeeb unsigned int write_index;
1420da8fa4e3SBjoern A. Zeeb int err, i = 0;
1421da8fa4e3SBjoern A. Zeeb
1422da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ce->ce_lock);
1423da8fa4e3SBjoern A. Zeeb
1424da8fa4e3SBjoern A. Zeeb nentries_mask = src_ring->nentries_mask;
1425da8fa4e3SBjoern A. Zeeb sw_index = src_ring->sw_index;
1426da8fa4e3SBjoern A. Zeeb write_index = src_ring->write_index;
1427da8fa4e3SBjoern A. Zeeb
1428da8fa4e3SBjoern A. Zeeb if (unlikely(CE_RING_DELTA(nentries_mask,
1429da8fa4e3SBjoern A. Zeeb write_index, sw_index - 1) < n_items)) {
1430da8fa4e3SBjoern A. Zeeb err = -ENOBUFS;
1431da8fa4e3SBjoern A. Zeeb goto err;
1432da8fa4e3SBjoern A. Zeeb }
1433da8fa4e3SBjoern A. Zeeb
1434da8fa4e3SBjoern A. Zeeb for (i = 0; i < n_items - 1; i++) {
1435da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI,
1436da8fa4e3SBjoern A. Zeeb "pci tx item %d paddr %pad len %d n_items %d\n",
1437da8fa4e3SBjoern A. Zeeb i, &items[i].paddr, items[i].len, n_items);
1438da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1439da8fa4e3SBjoern A. Zeeb items[i].vaddr, items[i].len);
1440da8fa4e3SBjoern A. Zeeb
1441da8fa4e3SBjoern A. Zeeb err = ath10k_ce_send_nolock(ce_pipe,
1442da8fa4e3SBjoern A. Zeeb items[i].transfer_context,
1443da8fa4e3SBjoern A. Zeeb items[i].paddr,
1444da8fa4e3SBjoern A. Zeeb items[i].len,
1445da8fa4e3SBjoern A. Zeeb items[i].transfer_id,
1446da8fa4e3SBjoern A. Zeeb CE_SEND_FLAG_GATHER);
1447da8fa4e3SBjoern A. Zeeb if (err)
1448da8fa4e3SBjoern A. Zeeb goto err;
1449da8fa4e3SBjoern A. Zeeb }
1450da8fa4e3SBjoern A. Zeeb
1451da8fa4e3SBjoern A. Zeeb /* `i` is equal to `n_items -1` after for() */
1452da8fa4e3SBjoern A. Zeeb
1453da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI,
1454734e82feSBjoern A. Zeeb #if defined(__linux__)
1455734e82feSBjoern A. Zeeb "pci tx item %d paddr %pad len %d n_items %d\n",
1456734e82feSBjoern A. Zeeb i, &items[i].paddr, items[i].len, n_items);
1457734e82feSBjoern A. Zeeb #elif defined(__FreeBSD__)
1458da8fa4e3SBjoern A. Zeeb "pci tx item %d paddr %pad len %d n_items %d pipe_id %u\n",
1459da8fa4e3SBjoern A. Zeeb i, &items[i].paddr, items[i].len, n_items, pipe_id);
1460734e82feSBjoern A. Zeeb /*
1461734e82feSBjoern A. Zeeb * XXX-BZ specific debug; the DELAY makes things work for one chipset.
1462734e82feSBjoern A. Zeeb * There's likely a race somewhere (here or LinuxKPI).
1463734e82feSBjoern A. Zeeb */
1464da8fa4e3SBjoern A. Zeeb if (n_items == 1 && items[i].len == 140) {
1465da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_PCI, NULL, "pci tx data: ",
1466da8fa4e3SBjoern A. Zeeb items[i].vaddr, items[i].len);
1467da8fa4e3SBjoern A. Zeeb dump_stack();
1468da8fa4e3SBjoern A. Zeeb DELAY(500);
1469da8fa4e3SBjoern A. Zeeb }
1470734e82feSBjoern A. Zeeb #endif
1471da8fa4e3SBjoern A. Zeeb ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1472da8fa4e3SBjoern A. Zeeb items[i].vaddr, items[i].len);
1473da8fa4e3SBjoern A. Zeeb
1474da8fa4e3SBjoern A. Zeeb err = ath10k_ce_send_nolock(ce_pipe,
1475da8fa4e3SBjoern A. Zeeb items[i].transfer_context,
1476da8fa4e3SBjoern A. Zeeb items[i].paddr,
1477da8fa4e3SBjoern A. Zeeb items[i].len,
1478da8fa4e3SBjoern A. Zeeb items[i].transfer_id,
1479da8fa4e3SBjoern A. Zeeb 0);
1480da8fa4e3SBjoern A. Zeeb if (err)
1481da8fa4e3SBjoern A. Zeeb goto err;
1482da8fa4e3SBjoern A. Zeeb
1483da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
1484da8fa4e3SBjoern A. Zeeb return 0;
1485da8fa4e3SBjoern A. Zeeb
1486da8fa4e3SBjoern A. Zeeb err:
1487da8fa4e3SBjoern A. Zeeb for (; i > 0; i--)
1488da8fa4e3SBjoern A. Zeeb __ath10k_ce_send_revert(ce_pipe);
1489da8fa4e3SBjoern A. Zeeb
1490da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ce->ce_lock);
1491da8fa4e3SBjoern A. Zeeb return err;
1492da8fa4e3SBjoern A. Zeeb }
1493da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_diag_read(struct ath10k * ar,u32 address,void * buf,size_t buf_len)1494da8fa4e3SBjoern A. Zeeb int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1495da8fa4e3SBjoern A. Zeeb size_t buf_len)
1496da8fa4e3SBjoern A. Zeeb {
1497da8fa4e3SBjoern A. Zeeb return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1498da8fa4e3SBjoern A. Zeeb }
1499da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_get_free_queue_number(struct ath10k * ar,u8 pipe)1500da8fa4e3SBjoern A. Zeeb u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1501da8fa4e3SBjoern A. Zeeb {
1502da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1503da8fa4e3SBjoern A. Zeeb
1504da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1505da8fa4e3SBjoern A. Zeeb
1506da8fa4e3SBjoern A. Zeeb return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1507da8fa4e3SBjoern A. Zeeb }
1508da8fa4e3SBjoern A. Zeeb
ath10k_pci_dump_registers(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data)1509da8fa4e3SBjoern A. Zeeb static void ath10k_pci_dump_registers(struct ath10k *ar,
1510da8fa4e3SBjoern A. Zeeb struct ath10k_fw_crash_data *crash_data)
1511da8fa4e3SBjoern A. Zeeb {
1512da8fa4e3SBjoern A. Zeeb __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1513da8fa4e3SBjoern A. Zeeb int i, ret;
1514da8fa4e3SBjoern A. Zeeb
1515da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->dump_mutex);
1516da8fa4e3SBjoern A. Zeeb
1517da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1518da8fa4e3SBjoern A. Zeeb hi_failure_state,
1519da8fa4e3SBjoern A. Zeeb REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1520da8fa4e3SBjoern A. Zeeb if (ret) {
1521da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1522da8fa4e3SBjoern A. Zeeb return;
1523da8fa4e3SBjoern A. Zeeb }
1524da8fa4e3SBjoern A. Zeeb
1525da8fa4e3SBjoern A. Zeeb BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1526da8fa4e3SBjoern A. Zeeb
1527da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "firmware register dump:\n");
1528da8fa4e3SBjoern A. Zeeb for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1529da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1530da8fa4e3SBjoern A. Zeeb i,
1531da8fa4e3SBjoern A. Zeeb __le32_to_cpu(reg_dump_values[i]),
1532da8fa4e3SBjoern A. Zeeb __le32_to_cpu(reg_dump_values[i + 1]),
1533da8fa4e3SBjoern A. Zeeb __le32_to_cpu(reg_dump_values[i + 2]),
1534da8fa4e3SBjoern A. Zeeb __le32_to_cpu(reg_dump_values[i + 3]));
1535da8fa4e3SBjoern A. Zeeb
1536da8fa4e3SBjoern A. Zeeb if (!crash_data)
1537da8fa4e3SBjoern A. Zeeb return;
1538da8fa4e3SBjoern A. Zeeb
1539da8fa4e3SBjoern A. Zeeb for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1540da8fa4e3SBjoern A. Zeeb crash_data->registers[i] = reg_dump_values[i];
1541da8fa4e3SBjoern A. Zeeb }
1542da8fa4e3SBjoern A. Zeeb
ath10k_pci_dump_memory_section(struct ath10k * ar,const struct ath10k_mem_region * mem_region,u8 * buf,size_t buf_len)1543da8fa4e3SBjoern A. Zeeb static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1544da8fa4e3SBjoern A. Zeeb const struct ath10k_mem_region *mem_region,
1545da8fa4e3SBjoern A. Zeeb u8 *buf, size_t buf_len)
1546da8fa4e3SBjoern A. Zeeb {
1547da8fa4e3SBjoern A. Zeeb const struct ath10k_mem_section *cur_section, *next_section;
1548da8fa4e3SBjoern A. Zeeb unsigned int count, section_size, skip_size;
1549da8fa4e3SBjoern A. Zeeb int ret, i, j;
1550da8fa4e3SBjoern A. Zeeb
1551da8fa4e3SBjoern A. Zeeb if (!mem_region || !buf)
1552da8fa4e3SBjoern A. Zeeb return 0;
1553da8fa4e3SBjoern A. Zeeb
1554da8fa4e3SBjoern A. Zeeb cur_section = &mem_region->section_table.sections[0];
1555da8fa4e3SBjoern A. Zeeb
1556da8fa4e3SBjoern A. Zeeb if (mem_region->start > cur_section->start) {
1557da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1558da8fa4e3SBjoern A. Zeeb mem_region->start, cur_section->start);
1559da8fa4e3SBjoern A. Zeeb return 0;
1560da8fa4e3SBjoern A. Zeeb }
1561da8fa4e3SBjoern A. Zeeb
1562da8fa4e3SBjoern A. Zeeb skip_size = cur_section->start - mem_region->start;
1563da8fa4e3SBjoern A. Zeeb
1564da8fa4e3SBjoern A. Zeeb /* fill the gap between the first register section and register
1565da8fa4e3SBjoern A. Zeeb * start address
1566da8fa4e3SBjoern A. Zeeb */
1567da8fa4e3SBjoern A. Zeeb for (i = 0; i < skip_size; i++) {
1568da8fa4e3SBjoern A. Zeeb *buf = ATH10K_MAGIC_NOT_COPIED;
1569da8fa4e3SBjoern A. Zeeb buf++;
1570da8fa4e3SBjoern A. Zeeb }
1571da8fa4e3SBjoern A. Zeeb
1572da8fa4e3SBjoern A. Zeeb count = 0;
1573da8fa4e3SBjoern A. Zeeb
1574da8fa4e3SBjoern A. Zeeb for (i = 0; cur_section != NULL; i++) {
1575da8fa4e3SBjoern A. Zeeb section_size = cur_section->end - cur_section->start;
1576da8fa4e3SBjoern A. Zeeb
1577da8fa4e3SBjoern A. Zeeb if (section_size <= 0) {
1578da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1579da8fa4e3SBjoern A. Zeeb cur_section->start,
1580da8fa4e3SBjoern A. Zeeb cur_section->end);
1581da8fa4e3SBjoern A. Zeeb break;
1582da8fa4e3SBjoern A. Zeeb }
1583da8fa4e3SBjoern A. Zeeb
1584da8fa4e3SBjoern A. Zeeb if ((i + 1) == mem_region->section_table.size) {
1585da8fa4e3SBjoern A. Zeeb /* last section */
1586da8fa4e3SBjoern A. Zeeb next_section = NULL;
1587da8fa4e3SBjoern A. Zeeb skip_size = 0;
1588da8fa4e3SBjoern A. Zeeb } else {
1589da8fa4e3SBjoern A. Zeeb next_section = cur_section + 1;
1590da8fa4e3SBjoern A. Zeeb
1591da8fa4e3SBjoern A. Zeeb if (cur_section->end > next_section->start) {
1592da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1593da8fa4e3SBjoern A. Zeeb next_section->start,
1594da8fa4e3SBjoern A. Zeeb cur_section->end);
1595da8fa4e3SBjoern A. Zeeb break;
1596da8fa4e3SBjoern A. Zeeb }
1597da8fa4e3SBjoern A. Zeeb
1598da8fa4e3SBjoern A. Zeeb skip_size = next_section->start - cur_section->end;
1599da8fa4e3SBjoern A. Zeeb }
1600da8fa4e3SBjoern A. Zeeb
1601da8fa4e3SBjoern A. Zeeb if (buf_len < (skip_size + section_size)) {
1602da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1603da8fa4e3SBjoern A. Zeeb break;
1604da8fa4e3SBjoern A. Zeeb }
1605da8fa4e3SBjoern A. Zeeb
1606da8fa4e3SBjoern A. Zeeb buf_len -= skip_size + section_size;
1607da8fa4e3SBjoern A. Zeeb
1608da8fa4e3SBjoern A. Zeeb /* read section to dest memory */
1609da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1610da8fa4e3SBjoern A. Zeeb buf, section_size);
1611da8fa4e3SBjoern A. Zeeb if (ret) {
1612da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1613da8fa4e3SBjoern A. Zeeb cur_section->start, ret);
1614da8fa4e3SBjoern A. Zeeb break;
1615da8fa4e3SBjoern A. Zeeb }
1616da8fa4e3SBjoern A. Zeeb
1617da8fa4e3SBjoern A. Zeeb buf += section_size;
1618da8fa4e3SBjoern A. Zeeb count += section_size;
1619da8fa4e3SBjoern A. Zeeb
1620da8fa4e3SBjoern A. Zeeb /* fill in the gap between this section and the next */
1621da8fa4e3SBjoern A. Zeeb for (j = 0; j < skip_size; j++) {
1622da8fa4e3SBjoern A. Zeeb *buf = ATH10K_MAGIC_NOT_COPIED;
1623da8fa4e3SBjoern A. Zeeb buf++;
1624da8fa4e3SBjoern A. Zeeb }
1625da8fa4e3SBjoern A. Zeeb
1626da8fa4e3SBjoern A. Zeeb count += skip_size;
1627da8fa4e3SBjoern A. Zeeb
1628da8fa4e3SBjoern A. Zeeb if (!next_section)
1629da8fa4e3SBjoern A. Zeeb /* this was the last section */
1630da8fa4e3SBjoern A. Zeeb break;
1631da8fa4e3SBjoern A. Zeeb
1632da8fa4e3SBjoern A. Zeeb cur_section = next_section;
1633da8fa4e3SBjoern A. Zeeb }
1634da8fa4e3SBjoern A. Zeeb
1635da8fa4e3SBjoern A. Zeeb return count;
1636da8fa4e3SBjoern A. Zeeb }
1637da8fa4e3SBjoern A. Zeeb
ath10k_pci_set_ram_config(struct ath10k * ar,u32 config)1638da8fa4e3SBjoern A. Zeeb static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1639da8fa4e3SBjoern A. Zeeb {
1640da8fa4e3SBjoern A. Zeeb u32 val;
1641da8fa4e3SBjoern A. Zeeb
1642da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1643da8fa4e3SBjoern A. Zeeb FW_RAM_CONFIG_ADDRESS, config);
1644da8fa4e3SBjoern A. Zeeb
1645da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1646da8fa4e3SBjoern A. Zeeb FW_RAM_CONFIG_ADDRESS);
1647da8fa4e3SBjoern A. Zeeb if (val != config) {
1648da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1649da8fa4e3SBjoern A. Zeeb val, config);
1650da8fa4e3SBjoern A. Zeeb return -EIO;
1651da8fa4e3SBjoern A. Zeeb }
1652da8fa4e3SBjoern A. Zeeb
1653da8fa4e3SBjoern A. Zeeb return 0;
1654da8fa4e3SBjoern A. Zeeb }
1655da8fa4e3SBjoern A. Zeeb
1656da8fa4e3SBjoern A. Zeeb /* Always returns the length */
ath10k_pci_dump_memory_sram(struct ath10k * ar,const struct ath10k_mem_region * region,u8 * buf)1657da8fa4e3SBjoern A. Zeeb static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1658da8fa4e3SBjoern A. Zeeb const struct ath10k_mem_region *region,
1659da8fa4e3SBjoern A. Zeeb u8 *buf)
1660da8fa4e3SBjoern A. Zeeb {
1661da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1662da8fa4e3SBjoern A. Zeeb u32 base_addr, i;
1663da8fa4e3SBjoern A. Zeeb
1664da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1665da8fa4e3SBjoern A. Zeeb base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1666da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1667da8fa4e3SBjoern A. Zeeb base_addr = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_PCIE_BAR0_START_REG);
1668da8fa4e3SBjoern A. Zeeb #endif
1669da8fa4e3SBjoern A. Zeeb base_addr += region->start;
1670da8fa4e3SBjoern A. Zeeb
1671da8fa4e3SBjoern A. Zeeb for (i = 0; i < region->len; i += 4) {
1672da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1673da8fa4e3SBjoern A. Zeeb iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1674da8fa4e3SBjoern A. Zeeb *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1675da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1676da8fa4e3SBjoern A. Zeeb bus_write_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_ADDR_REG, base_addr + i);
1677da8fa4e3SBjoern A. Zeeb *(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, QCA99X0_CPU_MEM_DATA_REG);
1678da8fa4e3SBjoern A. Zeeb #endif
1679da8fa4e3SBjoern A. Zeeb }
1680da8fa4e3SBjoern A. Zeeb
1681da8fa4e3SBjoern A. Zeeb return region->len;
1682da8fa4e3SBjoern A. Zeeb }
1683da8fa4e3SBjoern A. Zeeb
1684da8fa4e3SBjoern A. Zeeb /* if an error happened returns < 0, otherwise the length */
ath10k_pci_dump_memory_reg(struct ath10k * ar,const struct ath10k_mem_region * region,u8 * buf)1685da8fa4e3SBjoern A. Zeeb static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1686da8fa4e3SBjoern A. Zeeb const struct ath10k_mem_region *region,
1687da8fa4e3SBjoern A. Zeeb u8 *buf)
1688da8fa4e3SBjoern A. Zeeb {
1689da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1690da8fa4e3SBjoern A. Zeeb u32 i;
1691da8fa4e3SBjoern A. Zeeb int ret;
1692da8fa4e3SBjoern A. Zeeb
1693da8fa4e3SBjoern A. Zeeb mutex_lock(&ar->conf_mutex);
1694da8fa4e3SBjoern A. Zeeb if (ar->state != ATH10K_STATE_ON) {
1695da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1696da8fa4e3SBjoern A. Zeeb ret = -EIO;
1697da8fa4e3SBjoern A. Zeeb goto done;
1698da8fa4e3SBjoern A. Zeeb }
1699da8fa4e3SBjoern A. Zeeb
1700da8fa4e3SBjoern A. Zeeb for (i = 0; i < region->len; i += 4)
1701da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
1702da8fa4e3SBjoern A. Zeeb *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1703da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
1704da8fa4e3SBjoern A. Zeeb *(u32 *)(buf + i) = bus_read_4((struct resource *)ar_pci->mem, region->start + i);
1705da8fa4e3SBjoern A. Zeeb #endif
1706da8fa4e3SBjoern A. Zeeb
1707da8fa4e3SBjoern A. Zeeb ret = region->len;
1708da8fa4e3SBjoern A. Zeeb done:
1709da8fa4e3SBjoern A. Zeeb mutex_unlock(&ar->conf_mutex);
1710da8fa4e3SBjoern A. Zeeb return ret;
1711da8fa4e3SBjoern A. Zeeb }
1712da8fa4e3SBjoern A. Zeeb
1713da8fa4e3SBjoern A. Zeeb /* if an error happened returns < 0, otherwise the length */
ath10k_pci_dump_memory_generic(struct ath10k * ar,const struct ath10k_mem_region * current_region,u8 * buf)1714da8fa4e3SBjoern A. Zeeb static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1715da8fa4e3SBjoern A. Zeeb const struct ath10k_mem_region *current_region,
1716da8fa4e3SBjoern A. Zeeb u8 *buf)
1717da8fa4e3SBjoern A. Zeeb {
1718da8fa4e3SBjoern A. Zeeb int ret;
1719da8fa4e3SBjoern A. Zeeb
1720da8fa4e3SBjoern A. Zeeb if (current_region->section_table.size > 0)
1721da8fa4e3SBjoern A. Zeeb /* Copy each section individually. */
1722da8fa4e3SBjoern A. Zeeb return ath10k_pci_dump_memory_section(ar,
1723da8fa4e3SBjoern A. Zeeb current_region,
1724da8fa4e3SBjoern A. Zeeb buf,
1725da8fa4e3SBjoern A. Zeeb current_region->len);
1726da8fa4e3SBjoern A. Zeeb
1727da8fa4e3SBjoern A. Zeeb /* No individiual memory sections defined so we can
1728da8fa4e3SBjoern A. Zeeb * copy the entire memory region.
1729da8fa4e3SBjoern A. Zeeb */
1730da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read_mem(ar,
1731da8fa4e3SBjoern A. Zeeb current_region->start,
1732da8fa4e3SBjoern A. Zeeb buf,
1733da8fa4e3SBjoern A. Zeeb current_region->len);
1734da8fa4e3SBjoern A. Zeeb if (ret) {
1735da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1736da8fa4e3SBjoern A. Zeeb current_region->name, ret);
1737da8fa4e3SBjoern A. Zeeb return ret;
1738da8fa4e3SBjoern A. Zeeb }
1739da8fa4e3SBjoern A. Zeeb
1740da8fa4e3SBjoern A. Zeeb return current_region->len;
1741da8fa4e3SBjoern A. Zeeb }
1742da8fa4e3SBjoern A. Zeeb
ath10k_pci_dump_memory(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data)1743da8fa4e3SBjoern A. Zeeb static void ath10k_pci_dump_memory(struct ath10k *ar,
1744da8fa4e3SBjoern A. Zeeb struct ath10k_fw_crash_data *crash_data)
1745da8fa4e3SBjoern A. Zeeb {
1746da8fa4e3SBjoern A. Zeeb const struct ath10k_hw_mem_layout *mem_layout;
1747da8fa4e3SBjoern A. Zeeb const struct ath10k_mem_region *current_region;
1748da8fa4e3SBjoern A. Zeeb struct ath10k_dump_ram_data_hdr *hdr;
1749da8fa4e3SBjoern A. Zeeb u32 count, shift;
1750da8fa4e3SBjoern A. Zeeb size_t buf_len;
1751da8fa4e3SBjoern A. Zeeb int ret, i;
1752da8fa4e3SBjoern A. Zeeb u8 *buf;
1753da8fa4e3SBjoern A. Zeeb
1754da8fa4e3SBjoern A. Zeeb lockdep_assert_held(&ar->dump_mutex);
1755da8fa4e3SBjoern A. Zeeb
1756da8fa4e3SBjoern A. Zeeb if (!crash_data)
1757da8fa4e3SBjoern A. Zeeb return;
1758da8fa4e3SBjoern A. Zeeb
1759da8fa4e3SBjoern A. Zeeb mem_layout = ath10k_coredump_get_mem_layout(ar);
1760da8fa4e3SBjoern A. Zeeb if (!mem_layout)
1761da8fa4e3SBjoern A. Zeeb return;
1762da8fa4e3SBjoern A. Zeeb
1763da8fa4e3SBjoern A. Zeeb current_region = &mem_layout->region_table.regions[0];
1764da8fa4e3SBjoern A. Zeeb
1765da8fa4e3SBjoern A. Zeeb buf = crash_data->ramdump_buf;
1766da8fa4e3SBjoern A. Zeeb buf_len = crash_data->ramdump_buf_len;
1767da8fa4e3SBjoern A. Zeeb
1768da8fa4e3SBjoern A. Zeeb memset(buf, 0, buf_len);
1769da8fa4e3SBjoern A. Zeeb
1770da8fa4e3SBjoern A. Zeeb for (i = 0; i < mem_layout->region_table.size; i++) {
1771da8fa4e3SBjoern A. Zeeb count = 0;
1772da8fa4e3SBjoern A. Zeeb
1773da8fa4e3SBjoern A. Zeeb if (current_region->len > buf_len) {
1774da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1775da8fa4e3SBjoern A. Zeeb current_region->name,
1776da8fa4e3SBjoern A. Zeeb current_region->len,
1777da8fa4e3SBjoern A. Zeeb buf_len);
1778da8fa4e3SBjoern A. Zeeb break;
1779da8fa4e3SBjoern A. Zeeb }
1780da8fa4e3SBjoern A. Zeeb
1781da8fa4e3SBjoern A. Zeeb /* To get IRAM dump, the host driver needs to switch target
1782da8fa4e3SBjoern A. Zeeb * ram config from DRAM to IRAM.
1783da8fa4e3SBjoern A. Zeeb */
1784da8fa4e3SBjoern A. Zeeb if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1785da8fa4e3SBjoern A. Zeeb current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1786da8fa4e3SBjoern A. Zeeb shift = current_region->start >> 20;
1787da8fa4e3SBjoern A. Zeeb
1788da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_set_ram_config(ar, shift);
1789da8fa4e3SBjoern A. Zeeb if (ret) {
1790da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1791da8fa4e3SBjoern A. Zeeb current_region->name, ret);
1792da8fa4e3SBjoern A. Zeeb break;
1793da8fa4e3SBjoern A. Zeeb }
1794da8fa4e3SBjoern A. Zeeb }
1795da8fa4e3SBjoern A. Zeeb
1796da8fa4e3SBjoern A. Zeeb /* Reserve space for the header. */
1797da8fa4e3SBjoern A. Zeeb hdr = (void *)buf;
1798da8fa4e3SBjoern A. Zeeb buf += sizeof(*hdr);
1799da8fa4e3SBjoern A. Zeeb buf_len -= sizeof(*hdr);
1800da8fa4e3SBjoern A. Zeeb
1801da8fa4e3SBjoern A. Zeeb switch (current_region->type) {
1802da8fa4e3SBjoern A. Zeeb case ATH10K_MEM_REGION_TYPE_IOSRAM:
1803da8fa4e3SBjoern A. Zeeb count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1804da8fa4e3SBjoern A. Zeeb break;
1805da8fa4e3SBjoern A. Zeeb case ATH10K_MEM_REGION_TYPE_IOREG:
1806da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1807da8fa4e3SBjoern A. Zeeb if (ret < 0)
1808da8fa4e3SBjoern A. Zeeb break;
1809da8fa4e3SBjoern A. Zeeb
1810da8fa4e3SBjoern A. Zeeb count = ret;
1811da8fa4e3SBjoern A. Zeeb break;
1812da8fa4e3SBjoern A. Zeeb default:
1813da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1814da8fa4e3SBjoern A. Zeeb if (ret < 0)
1815da8fa4e3SBjoern A. Zeeb break;
1816da8fa4e3SBjoern A. Zeeb
1817da8fa4e3SBjoern A. Zeeb count = ret;
1818da8fa4e3SBjoern A. Zeeb break;
1819da8fa4e3SBjoern A. Zeeb }
1820da8fa4e3SBjoern A. Zeeb
1821da8fa4e3SBjoern A. Zeeb hdr->region_type = cpu_to_le32(current_region->type);
1822da8fa4e3SBjoern A. Zeeb hdr->start = cpu_to_le32(current_region->start);
1823da8fa4e3SBjoern A. Zeeb hdr->length = cpu_to_le32(count);
1824da8fa4e3SBjoern A. Zeeb
1825da8fa4e3SBjoern A. Zeeb if (count == 0)
1826da8fa4e3SBjoern A. Zeeb /* Note: the header remains, just with zero length. */
1827da8fa4e3SBjoern A. Zeeb break;
1828da8fa4e3SBjoern A. Zeeb
1829da8fa4e3SBjoern A. Zeeb buf += count;
1830da8fa4e3SBjoern A. Zeeb buf_len -= count;
1831da8fa4e3SBjoern A. Zeeb
1832da8fa4e3SBjoern A. Zeeb current_region++;
1833da8fa4e3SBjoern A. Zeeb }
1834da8fa4e3SBjoern A. Zeeb }
1835da8fa4e3SBjoern A. Zeeb
ath10k_pci_fw_dump_work(struct work_struct * work)1836da8fa4e3SBjoern A. Zeeb static void ath10k_pci_fw_dump_work(struct work_struct *work)
1837da8fa4e3SBjoern A. Zeeb {
1838da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1839da8fa4e3SBjoern A. Zeeb dump_work);
1840da8fa4e3SBjoern A. Zeeb struct ath10k_fw_crash_data *crash_data;
1841da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ar_pci->ar;
1842da8fa4e3SBjoern A. Zeeb char guid[UUID_STRING_LEN + 1];
1843da8fa4e3SBjoern A. Zeeb
1844da8fa4e3SBjoern A. Zeeb mutex_lock(&ar->dump_mutex);
1845da8fa4e3SBjoern A. Zeeb
1846da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
1847da8fa4e3SBjoern A. Zeeb ar->stats.fw_crash_counter++;
1848da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
1849da8fa4e3SBjoern A. Zeeb
1850da8fa4e3SBjoern A. Zeeb crash_data = ath10k_coredump_new(ar);
1851da8fa4e3SBjoern A. Zeeb
1852da8fa4e3SBjoern A. Zeeb if (crash_data)
1853da8fa4e3SBjoern A. Zeeb scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1854da8fa4e3SBjoern A. Zeeb else
1855da8fa4e3SBjoern A. Zeeb scnprintf(guid, sizeof(guid), "n/a");
1856da8fa4e3SBjoern A. Zeeb
1857da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1858da8fa4e3SBjoern A. Zeeb ath10k_print_driver_info(ar);
1859da8fa4e3SBjoern A. Zeeb ath10k_pci_dump_registers(ar, crash_data);
1860da8fa4e3SBjoern A. Zeeb ath10k_ce_dump_registers(ar, crash_data);
1861da8fa4e3SBjoern A. Zeeb ath10k_pci_dump_memory(ar, crash_data);
1862da8fa4e3SBjoern A. Zeeb
1863da8fa4e3SBjoern A. Zeeb mutex_unlock(&ar->dump_mutex);
1864da8fa4e3SBjoern A. Zeeb
1865da8fa4e3SBjoern A. Zeeb ath10k_core_start_recovery(ar);
1866da8fa4e3SBjoern A. Zeeb }
1867da8fa4e3SBjoern A. Zeeb
ath10k_pci_fw_crashed_dump(struct ath10k * ar)1868da8fa4e3SBjoern A. Zeeb static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1869da8fa4e3SBjoern A. Zeeb {
1870da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1871da8fa4e3SBjoern A. Zeeb
1872da8fa4e3SBjoern A. Zeeb queue_work(ar->workqueue, &ar_pci->dump_work);
1873da8fa4e3SBjoern A. Zeeb }
1874da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_send_complete_check(struct ath10k * ar,u8 pipe,int force)1875da8fa4e3SBjoern A. Zeeb void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1876da8fa4e3SBjoern A. Zeeb int force)
1877da8fa4e3SBjoern A. Zeeb {
1878da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1879da8fa4e3SBjoern A. Zeeb
1880da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1881da8fa4e3SBjoern A. Zeeb
1882da8fa4e3SBjoern A. Zeeb if (!force) {
1883da8fa4e3SBjoern A. Zeeb int resources;
1884da8fa4e3SBjoern A. Zeeb /*
1885da8fa4e3SBjoern A. Zeeb * Decide whether to actually poll for completions, or just
1886da8fa4e3SBjoern A. Zeeb * wait for a later chance.
1887da8fa4e3SBjoern A. Zeeb * If there seem to be plenty of resources left, then just wait
1888da8fa4e3SBjoern A. Zeeb * since checking involves reading a CE register, which is a
1889da8fa4e3SBjoern A. Zeeb * relatively expensive operation.
1890da8fa4e3SBjoern A. Zeeb */
1891da8fa4e3SBjoern A. Zeeb resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1892da8fa4e3SBjoern A. Zeeb
1893da8fa4e3SBjoern A. Zeeb /*
1894da8fa4e3SBjoern A. Zeeb * If at least 50% of the total resources are still available,
1895da8fa4e3SBjoern A. Zeeb * don't bother checking again yet.
1896da8fa4e3SBjoern A. Zeeb */
1897da8fa4e3SBjoern A. Zeeb if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
1898da8fa4e3SBjoern A. Zeeb return;
1899da8fa4e3SBjoern A. Zeeb }
1900da8fa4e3SBjoern A. Zeeb ath10k_ce_per_engine_service(ar, pipe);
1901da8fa4e3SBjoern A. Zeeb }
1902da8fa4e3SBjoern A. Zeeb
ath10k_pci_rx_retry_sync(struct ath10k * ar)1903da8fa4e3SBjoern A. Zeeb static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1904da8fa4e3SBjoern A. Zeeb {
1905da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1906da8fa4e3SBjoern A. Zeeb
1907da8fa4e3SBjoern A. Zeeb del_timer_sync(&ar_pci->rx_post_retry);
1908da8fa4e3SBjoern A. Zeeb }
1909da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_map_service_to_pipe(struct ath10k * ar,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)1910da8fa4e3SBjoern A. Zeeb int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1911da8fa4e3SBjoern A. Zeeb u8 *ul_pipe, u8 *dl_pipe)
1912da8fa4e3SBjoern A. Zeeb {
1913da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1914da8fa4e3SBjoern A. Zeeb const struct ce_service_to_pipe *entry;
1915da8fa4e3SBjoern A. Zeeb bool ul_set = false, dl_set = false;
1916da8fa4e3SBjoern A. Zeeb int i;
1917da8fa4e3SBjoern A. Zeeb
1918da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1919da8fa4e3SBjoern A. Zeeb
1920da8fa4e3SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
1921da8fa4e3SBjoern A. Zeeb entry = &ar_pci->serv_to_pipe[i];
1922da8fa4e3SBjoern A. Zeeb
1923da8fa4e3SBjoern A. Zeeb if (__le32_to_cpu(entry->service_id) != service_id)
1924da8fa4e3SBjoern A. Zeeb continue;
1925da8fa4e3SBjoern A. Zeeb
1926da8fa4e3SBjoern A. Zeeb switch (__le32_to_cpu(entry->pipedir)) {
1927da8fa4e3SBjoern A. Zeeb case PIPEDIR_NONE:
1928da8fa4e3SBjoern A. Zeeb break;
1929da8fa4e3SBjoern A. Zeeb case PIPEDIR_IN:
1930da8fa4e3SBjoern A. Zeeb WARN_ON(dl_set);
1931da8fa4e3SBjoern A. Zeeb *dl_pipe = __le32_to_cpu(entry->pipenum);
1932da8fa4e3SBjoern A. Zeeb dl_set = true;
1933da8fa4e3SBjoern A. Zeeb break;
1934da8fa4e3SBjoern A. Zeeb case PIPEDIR_OUT:
1935da8fa4e3SBjoern A. Zeeb WARN_ON(ul_set);
1936da8fa4e3SBjoern A. Zeeb *ul_pipe = __le32_to_cpu(entry->pipenum);
1937da8fa4e3SBjoern A. Zeeb ul_set = true;
1938da8fa4e3SBjoern A. Zeeb break;
1939da8fa4e3SBjoern A. Zeeb case PIPEDIR_INOUT:
1940da8fa4e3SBjoern A. Zeeb WARN_ON(dl_set);
1941da8fa4e3SBjoern A. Zeeb WARN_ON(ul_set);
1942da8fa4e3SBjoern A. Zeeb *dl_pipe = __le32_to_cpu(entry->pipenum);
1943da8fa4e3SBjoern A. Zeeb *ul_pipe = __le32_to_cpu(entry->pipenum);
1944da8fa4e3SBjoern A. Zeeb dl_set = true;
1945da8fa4e3SBjoern A. Zeeb ul_set = true;
1946da8fa4e3SBjoern A. Zeeb break;
1947da8fa4e3SBjoern A. Zeeb }
1948da8fa4e3SBjoern A. Zeeb }
1949da8fa4e3SBjoern A. Zeeb
1950da8fa4e3SBjoern A. Zeeb if (!ul_set || !dl_set)
1951da8fa4e3SBjoern A. Zeeb return -ENOENT;
1952da8fa4e3SBjoern A. Zeeb
1953da8fa4e3SBjoern A. Zeeb return 0;
1954da8fa4e3SBjoern A. Zeeb }
1955da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_get_default_pipe(struct ath10k * ar,u8 * ul_pipe,u8 * dl_pipe)1956da8fa4e3SBjoern A. Zeeb void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1957da8fa4e3SBjoern A. Zeeb u8 *ul_pipe, u8 *dl_pipe)
1958da8fa4e3SBjoern A. Zeeb {
1959da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1960da8fa4e3SBjoern A. Zeeb
1961da8fa4e3SBjoern A. Zeeb (void)ath10k_pci_hif_map_service_to_pipe(ar,
1962da8fa4e3SBjoern A. Zeeb ATH10K_HTC_SVC_ID_RSVD_CTRL,
1963da8fa4e3SBjoern A. Zeeb ul_pipe, dl_pipe);
1964da8fa4e3SBjoern A. Zeeb }
1965da8fa4e3SBjoern A. Zeeb
ath10k_pci_irq_msi_fw_mask(struct ath10k * ar)1966da8fa4e3SBjoern A. Zeeb void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1967da8fa4e3SBjoern A. Zeeb {
1968da8fa4e3SBjoern A. Zeeb u32 val;
1969da8fa4e3SBjoern A. Zeeb
1970da8fa4e3SBjoern A. Zeeb switch (ar->hw_rev) {
1971da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA988X:
1972da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9887:
1973da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA6174:
1974da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9377:
1975da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1976da8fa4e3SBjoern A. Zeeb CORE_CTRL_ADDRESS);
1977da8fa4e3SBjoern A. Zeeb val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1978da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1979da8fa4e3SBjoern A. Zeeb CORE_CTRL_ADDRESS, val);
1980da8fa4e3SBjoern A. Zeeb break;
1981da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA99X0:
1982da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9984:
1983da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9888:
1984da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA4019:
1985da8fa4e3SBjoern A. Zeeb /* TODO: Find appropriate register configuration for QCA99X0
1986da8fa4e3SBjoern A. Zeeb * to mask irq/MSI.
1987da8fa4e3SBjoern A. Zeeb */
1988da8fa4e3SBjoern A. Zeeb break;
1989da8fa4e3SBjoern A. Zeeb case ATH10K_HW_WCN3990:
1990da8fa4e3SBjoern A. Zeeb break;
1991da8fa4e3SBjoern A. Zeeb }
1992da8fa4e3SBjoern A. Zeeb }
1993da8fa4e3SBjoern A. Zeeb
ath10k_pci_irq_msi_fw_unmask(struct ath10k * ar)1994da8fa4e3SBjoern A. Zeeb static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1995da8fa4e3SBjoern A. Zeeb {
1996da8fa4e3SBjoern A. Zeeb u32 val;
1997da8fa4e3SBjoern A. Zeeb
1998da8fa4e3SBjoern A. Zeeb switch (ar->hw_rev) {
1999da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA988X:
2000da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9887:
2001da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA6174:
2002da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9377:
2003da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
2004da8fa4e3SBjoern A. Zeeb CORE_CTRL_ADDRESS);
2005da8fa4e3SBjoern A. Zeeb val |= CORE_CTRL_PCIE_REG_31_MASK;
2006da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2007da8fa4e3SBjoern A. Zeeb CORE_CTRL_ADDRESS, val);
2008da8fa4e3SBjoern A. Zeeb break;
2009da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA99X0:
2010da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9984:
2011da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA9888:
2012da8fa4e3SBjoern A. Zeeb case ATH10K_HW_QCA4019:
2013da8fa4e3SBjoern A. Zeeb /* TODO: Find appropriate register configuration for QCA99X0
2014da8fa4e3SBjoern A. Zeeb * to unmask irq/MSI.
2015da8fa4e3SBjoern A. Zeeb */
2016da8fa4e3SBjoern A. Zeeb break;
2017da8fa4e3SBjoern A. Zeeb case ATH10K_HW_WCN3990:
2018da8fa4e3SBjoern A. Zeeb break;
2019da8fa4e3SBjoern A. Zeeb }
2020da8fa4e3SBjoern A. Zeeb }
2021da8fa4e3SBjoern A. Zeeb
ath10k_pci_irq_disable(struct ath10k * ar)2022da8fa4e3SBjoern A. Zeeb static void ath10k_pci_irq_disable(struct ath10k *ar)
2023da8fa4e3SBjoern A. Zeeb {
2024da8fa4e3SBjoern A. Zeeb ath10k_ce_disable_interrupts(ar);
2025da8fa4e3SBjoern A. Zeeb ath10k_pci_disable_and_clear_legacy_irq(ar);
2026da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_msi_fw_mask(ar);
2027da8fa4e3SBjoern A. Zeeb }
2028da8fa4e3SBjoern A. Zeeb
ath10k_pci_irq_sync(struct ath10k * ar)2029da8fa4e3SBjoern A. Zeeb static void ath10k_pci_irq_sync(struct ath10k *ar)
2030da8fa4e3SBjoern A. Zeeb {
2031da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2032da8fa4e3SBjoern A. Zeeb
2033da8fa4e3SBjoern A. Zeeb synchronize_irq(ar_pci->pdev->irq);
2034da8fa4e3SBjoern A. Zeeb }
2035da8fa4e3SBjoern A. Zeeb
ath10k_pci_irq_enable(struct ath10k * ar)2036da8fa4e3SBjoern A. Zeeb static void ath10k_pci_irq_enable(struct ath10k *ar)
2037da8fa4e3SBjoern A. Zeeb {
2038da8fa4e3SBjoern A. Zeeb ath10k_ce_enable_interrupts(ar);
2039da8fa4e3SBjoern A. Zeeb ath10k_pci_enable_legacy_irq(ar);
2040da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_msi_fw_unmask(ar);
2041da8fa4e3SBjoern A. Zeeb }
2042da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_start(struct ath10k * ar)2043da8fa4e3SBjoern A. Zeeb static int ath10k_pci_hif_start(struct ath10k *ar)
2044da8fa4e3SBjoern A. Zeeb {
2045da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2046da8fa4e3SBjoern A. Zeeb
2047da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
2048da8fa4e3SBjoern A. Zeeb
2049da8fa4e3SBjoern A. Zeeb ath10k_core_napi_enable(ar);
2050da8fa4e3SBjoern A. Zeeb
2051da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_enable(ar);
2052da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_post(ar);
2053da8fa4e3SBjoern A. Zeeb
2054da8fa4e3SBjoern A. Zeeb pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2055da8fa4e3SBjoern A. Zeeb ar_pci->link_ctl);
2056da8fa4e3SBjoern A. Zeeb
2057da8fa4e3SBjoern A. Zeeb return 0;
2058da8fa4e3SBjoern A. Zeeb }
2059da8fa4e3SBjoern A. Zeeb
ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe * pci_pipe)2060da8fa4e3SBjoern A. Zeeb static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2061da8fa4e3SBjoern A. Zeeb {
2062da8fa4e3SBjoern A. Zeeb struct ath10k *ar;
2063da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_pipe;
2064da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *ce_ring;
2065da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
2066da8fa4e3SBjoern A. Zeeb int i;
2067da8fa4e3SBjoern A. Zeeb
2068da8fa4e3SBjoern A. Zeeb ar = pci_pipe->hif_ce_state;
2069da8fa4e3SBjoern A. Zeeb ce_pipe = pci_pipe->ce_hdl;
2070da8fa4e3SBjoern A. Zeeb ce_ring = ce_pipe->dest_ring;
2071da8fa4e3SBjoern A. Zeeb
2072da8fa4e3SBjoern A. Zeeb if (!ce_ring)
2073da8fa4e3SBjoern A. Zeeb return;
2074da8fa4e3SBjoern A. Zeeb
2075da8fa4e3SBjoern A. Zeeb if (!pci_pipe->buf_sz)
2076da8fa4e3SBjoern A. Zeeb return;
2077da8fa4e3SBjoern A. Zeeb
2078da8fa4e3SBjoern A. Zeeb for (i = 0; i < ce_ring->nentries; i++) {
2079da8fa4e3SBjoern A. Zeeb skb = ce_ring->per_transfer_context[i];
2080da8fa4e3SBjoern A. Zeeb if (!skb)
2081da8fa4e3SBjoern A. Zeeb continue;
2082da8fa4e3SBjoern A. Zeeb
2083da8fa4e3SBjoern A. Zeeb ce_ring->per_transfer_context[i] = NULL;
2084da8fa4e3SBjoern A. Zeeb
2085da8fa4e3SBjoern A. Zeeb dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
2086da8fa4e3SBjoern A. Zeeb skb->len + skb_tailroom(skb),
2087da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
2088da8fa4e3SBjoern A. Zeeb dev_kfree_skb_any(skb);
2089da8fa4e3SBjoern A. Zeeb }
2090da8fa4e3SBjoern A. Zeeb }
2091da8fa4e3SBjoern A. Zeeb
ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe * pci_pipe)2092da8fa4e3SBjoern A. Zeeb static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2093da8fa4e3SBjoern A. Zeeb {
2094da8fa4e3SBjoern A. Zeeb struct ath10k *ar;
2095da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_pipe;
2096da8fa4e3SBjoern A. Zeeb struct ath10k_ce_ring *ce_ring;
2097da8fa4e3SBjoern A. Zeeb struct sk_buff *skb;
2098da8fa4e3SBjoern A. Zeeb int i;
2099da8fa4e3SBjoern A. Zeeb
2100da8fa4e3SBjoern A. Zeeb ar = pci_pipe->hif_ce_state;
2101da8fa4e3SBjoern A. Zeeb ce_pipe = pci_pipe->ce_hdl;
2102da8fa4e3SBjoern A. Zeeb ce_ring = ce_pipe->src_ring;
2103da8fa4e3SBjoern A. Zeeb
2104da8fa4e3SBjoern A. Zeeb if (!ce_ring)
2105da8fa4e3SBjoern A. Zeeb return;
2106da8fa4e3SBjoern A. Zeeb
2107da8fa4e3SBjoern A. Zeeb if (!pci_pipe->buf_sz)
2108da8fa4e3SBjoern A. Zeeb return;
2109da8fa4e3SBjoern A. Zeeb
2110da8fa4e3SBjoern A. Zeeb for (i = 0; i < ce_ring->nentries; i++) {
2111da8fa4e3SBjoern A. Zeeb skb = ce_ring->per_transfer_context[i];
2112da8fa4e3SBjoern A. Zeeb if (!skb)
2113da8fa4e3SBjoern A. Zeeb continue;
2114da8fa4e3SBjoern A. Zeeb
2115da8fa4e3SBjoern A. Zeeb ce_ring->per_transfer_context[i] = NULL;
2116da8fa4e3SBjoern A. Zeeb
2117da8fa4e3SBjoern A. Zeeb ath10k_htc_tx_completion_handler(ar, skb);
2118da8fa4e3SBjoern A. Zeeb }
2119da8fa4e3SBjoern A. Zeeb }
2120da8fa4e3SBjoern A. Zeeb
2121da8fa4e3SBjoern A. Zeeb /*
2122da8fa4e3SBjoern A. Zeeb * Cleanup residual buffers for device shutdown:
2123da8fa4e3SBjoern A. Zeeb * buffers that were enqueued for receive
2124da8fa4e3SBjoern A. Zeeb * buffers that were to be sent
2125da8fa4e3SBjoern A. Zeeb * Note: Buffers that had completed but which were
2126da8fa4e3SBjoern A. Zeeb * not yet processed are on a completion queue. They
2127da8fa4e3SBjoern A. Zeeb * are handled when the completion thread shuts down.
2128da8fa4e3SBjoern A. Zeeb */
ath10k_pci_buffer_cleanup(struct ath10k * ar)2129da8fa4e3SBjoern A. Zeeb static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2130da8fa4e3SBjoern A. Zeeb {
2131da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2132da8fa4e3SBjoern A. Zeeb int pipe_num;
2133da8fa4e3SBjoern A. Zeeb
2134da8fa4e3SBjoern A. Zeeb for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2135da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pipe_info;
2136da8fa4e3SBjoern A. Zeeb
2137da8fa4e3SBjoern A. Zeeb pipe_info = &ar_pci->pipe_info[pipe_num];
2138da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_pipe_cleanup(pipe_info);
2139da8fa4e3SBjoern A. Zeeb ath10k_pci_tx_pipe_cleanup(pipe_info);
2140da8fa4e3SBjoern A. Zeeb }
2141da8fa4e3SBjoern A. Zeeb }
2142da8fa4e3SBjoern A. Zeeb
ath10k_pci_ce_deinit(struct ath10k * ar)2143da8fa4e3SBjoern A. Zeeb void ath10k_pci_ce_deinit(struct ath10k *ar)
2144da8fa4e3SBjoern A. Zeeb {
2145da8fa4e3SBjoern A. Zeeb int i;
2146da8fa4e3SBjoern A. Zeeb
2147da8fa4e3SBjoern A. Zeeb for (i = 0; i < CE_COUNT; i++)
2148da8fa4e3SBjoern A. Zeeb ath10k_ce_deinit_pipe(ar, i);
2149da8fa4e3SBjoern A. Zeeb }
2150da8fa4e3SBjoern A. Zeeb
ath10k_pci_flush(struct ath10k * ar)2151da8fa4e3SBjoern A. Zeeb void ath10k_pci_flush(struct ath10k *ar)
2152da8fa4e3SBjoern A. Zeeb {
2153da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_retry_sync(ar);
2154da8fa4e3SBjoern A. Zeeb ath10k_pci_buffer_cleanup(ar);
2155da8fa4e3SBjoern A. Zeeb }
2156da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_stop(struct ath10k * ar)2157da8fa4e3SBjoern A. Zeeb static void ath10k_pci_hif_stop(struct ath10k *ar)
2158da8fa4e3SBjoern A. Zeeb {
2159da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2160da8fa4e3SBjoern A. Zeeb unsigned long flags;
2161da8fa4e3SBjoern A. Zeeb
2162da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2163da8fa4e3SBjoern A. Zeeb
2164da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_disable(ar);
2165da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_sync(ar);
2166da8fa4e3SBjoern A. Zeeb
2167da8fa4e3SBjoern A. Zeeb ath10k_core_napi_sync_disable(ar);
2168da8fa4e3SBjoern A. Zeeb
2169da8fa4e3SBjoern A. Zeeb cancel_work_sync(&ar_pci->dump_work);
2170da8fa4e3SBjoern A. Zeeb
2171da8fa4e3SBjoern A. Zeeb /* Most likely the device has HTT Rx ring configured. The only way to
2172da8fa4e3SBjoern A. Zeeb * prevent the device from accessing (and possible corrupting) host
2173da8fa4e3SBjoern A. Zeeb * memory is to reset the chip now.
2174da8fa4e3SBjoern A. Zeeb *
2175da8fa4e3SBjoern A. Zeeb * There's also no known way of masking MSI interrupts on the device.
2176da8fa4e3SBjoern A. Zeeb * For ranged MSI the CE-related interrupts can be masked. However
2177da8fa4e3SBjoern A. Zeeb * regardless how many MSI interrupts are assigned the first one
2178da8fa4e3SBjoern A. Zeeb * is always used for firmware indications (crashes) and cannot be
2179da8fa4e3SBjoern A. Zeeb * masked. To prevent the device from asserting the interrupt reset it
2180da8fa4e3SBjoern A. Zeeb * before proceeding with cleanup.
2181da8fa4e3SBjoern A. Zeeb */
2182da8fa4e3SBjoern A. Zeeb ath10k_pci_safe_chip_reset(ar);
2183da8fa4e3SBjoern A. Zeeb
2184da8fa4e3SBjoern A. Zeeb ath10k_pci_flush(ar);
2185da8fa4e3SBjoern A. Zeeb
2186da8fa4e3SBjoern A. Zeeb spin_lock_irqsave(&ar_pci->ps_lock, flags);
2187da8fa4e3SBjoern A. Zeeb WARN_ON(ar_pci->ps_wake_refcount > 0);
2188da8fa4e3SBjoern A. Zeeb spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2189da8fa4e3SBjoern A. Zeeb }
2190da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_exchange_bmi_msg(struct ath10k * ar,void * req,u32 req_len,void * resp,u32 * resp_len)2191da8fa4e3SBjoern A. Zeeb int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2192da8fa4e3SBjoern A. Zeeb void *req, u32 req_len,
2193da8fa4e3SBjoern A. Zeeb void *resp, u32 *resp_len)
2194da8fa4e3SBjoern A. Zeeb {
2195da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2196da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2197da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2198da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2199da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2200da8fa4e3SBjoern A. Zeeb dma_addr_t req_paddr = 0;
2201da8fa4e3SBjoern A. Zeeb dma_addr_t resp_paddr = 0;
2202da8fa4e3SBjoern A. Zeeb struct bmi_xfer xfer = {};
2203da8fa4e3SBjoern A. Zeeb void *treq, *tresp = NULL;
2204da8fa4e3SBjoern A. Zeeb int ret = 0;
2205da8fa4e3SBjoern A. Zeeb
2206da8fa4e3SBjoern A. Zeeb might_sleep();
2207da8fa4e3SBjoern A. Zeeb
2208da8fa4e3SBjoern A. Zeeb if (resp && !resp_len)
2209da8fa4e3SBjoern A. Zeeb return -EINVAL;
2210da8fa4e3SBjoern A. Zeeb
2211da8fa4e3SBjoern A. Zeeb if (resp && resp_len && *resp_len == 0)
2212da8fa4e3SBjoern A. Zeeb return -EINVAL;
2213da8fa4e3SBjoern A. Zeeb
2214da8fa4e3SBjoern A. Zeeb treq = kmemdup(req, req_len, GFP_KERNEL);
2215da8fa4e3SBjoern A. Zeeb if (!treq)
2216da8fa4e3SBjoern A. Zeeb return -ENOMEM;
2217da8fa4e3SBjoern A. Zeeb
2218da8fa4e3SBjoern A. Zeeb req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2219da8fa4e3SBjoern A. Zeeb ret = dma_mapping_error(ar->dev, req_paddr);
2220da8fa4e3SBjoern A. Zeeb if (ret) {
2221da8fa4e3SBjoern A. Zeeb ret = -EIO;
2222da8fa4e3SBjoern A. Zeeb goto err_dma;
2223da8fa4e3SBjoern A. Zeeb }
2224da8fa4e3SBjoern A. Zeeb
2225da8fa4e3SBjoern A. Zeeb if (resp && resp_len) {
2226da8fa4e3SBjoern A. Zeeb tresp = kzalloc(*resp_len, GFP_KERNEL);
2227da8fa4e3SBjoern A. Zeeb if (!tresp) {
2228da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
2229da8fa4e3SBjoern A. Zeeb goto err_req;
2230da8fa4e3SBjoern A. Zeeb }
2231da8fa4e3SBjoern A. Zeeb
2232da8fa4e3SBjoern A. Zeeb resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2233da8fa4e3SBjoern A. Zeeb DMA_FROM_DEVICE);
2234da8fa4e3SBjoern A. Zeeb ret = dma_mapping_error(ar->dev, resp_paddr);
2235da8fa4e3SBjoern A. Zeeb if (ret) {
2236da8fa4e3SBjoern A. Zeeb ret = -EIO;
2237da8fa4e3SBjoern A. Zeeb goto err_req;
2238da8fa4e3SBjoern A. Zeeb }
2239da8fa4e3SBjoern A. Zeeb
2240da8fa4e3SBjoern A. Zeeb xfer.wait_for_resp = true;
2241da8fa4e3SBjoern A. Zeeb xfer.resp_len = 0;
2242da8fa4e3SBjoern A. Zeeb
2243da8fa4e3SBjoern A. Zeeb ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2244da8fa4e3SBjoern A. Zeeb }
2245da8fa4e3SBjoern A. Zeeb
2246da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2247da8fa4e3SBjoern A. Zeeb if (ret)
2248da8fa4e3SBjoern A. Zeeb goto err_resp;
2249da8fa4e3SBjoern A. Zeeb
2250da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2251da8fa4e3SBjoern A. Zeeb if (ret) {
2252da8fa4e3SBjoern A. Zeeb dma_addr_t unused_buffer;
2253da8fa4e3SBjoern A. Zeeb unsigned int unused_nbytes;
2254da8fa4e3SBjoern A. Zeeb unsigned int unused_id;
2255da8fa4e3SBjoern A. Zeeb
2256da8fa4e3SBjoern A. Zeeb ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2257da8fa4e3SBjoern A. Zeeb &unused_nbytes, &unused_id);
2258da8fa4e3SBjoern A. Zeeb } else {
2259da8fa4e3SBjoern A. Zeeb /* non-zero means we did not time out */
2260da8fa4e3SBjoern A. Zeeb ret = 0;
2261da8fa4e3SBjoern A. Zeeb }
2262da8fa4e3SBjoern A. Zeeb
2263da8fa4e3SBjoern A. Zeeb err_resp:
2264da8fa4e3SBjoern A. Zeeb if (resp) {
2265da8fa4e3SBjoern A. Zeeb dma_addr_t unused_buffer;
2266da8fa4e3SBjoern A. Zeeb
2267da8fa4e3SBjoern A. Zeeb ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2268da8fa4e3SBjoern A. Zeeb dma_unmap_single(ar->dev, resp_paddr,
2269da8fa4e3SBjoern A. Zeeb *resp_len, DMA_FROM_DEVICE);
2270da8fa4e3SBjoern A. Zeeb }
2271da8fa4e3SBjoern A. Zeeb err_req:
2272da8fa4e3SBjoern A. Zeeb dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2273da8fa4e3SBjoern A. Zeeb
2274da8fa4e3SBjoern A. Zeeb if (ret == 0 && resp_len) {
2275da8fa4e3SBjoern A. Zeeb *resp_len = min(*resp_len, xfer.resp_len);
2276da8fa4e3SBjoern A. Zeeb memcpy(resp, tresp, *resp_len);
2277da8fa4e3SBjoern A. Zeeb }
2278da8fa4e3SBjoern A. Zeeb err_dma:
2279da8fa4e3SBjoern A. Zeeb kfree(treq);
2280da8fa4e3SBjoern A. Zeeb kfree(tresp);
2281da8fa4e3SBjoern A. Zeeb
2282da8fa4e3SBjoern A. Zeeb return ret;
2283da8fa4e3SBjoern A. Zeeb }
2284da8fa4e3SBjoern A. Zeeb
ath10k_pci_bmi_send_done(struct ath10k_ce_pipe * ce_state)2285da8fa4e3SBjoern A. Zeeb static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2286da8fa4e3SBjoern A. Zeeb {
2287da8fa4e3SBjoern A. Zeeb struct bmi_xfer *xfer;
2288da8fa4e3SBjoern A. Zeeb
2289da8fa4e3SBjoern A. Zeeb if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2290da8fa4e3SBjoern A. Zeeb return;
2291da8fa4e3SBjoern A. Zeeb
2292da8fa4e3SBjoern A. Zeeb xfer->tx_done = true;
2293da8fa4e3SBjoern A. Zeeb }
2294da8fa4e3SBjoern A. Zeeb
ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe * ce_state)2295da8fa4e3SBjoern A. Zeeb static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2296da8fa4e3SBjoern A. Zeeb {
2297da8fa4e3SBjoern A. Zeeb struct ath10k *ar = ce_state->ar;
2298da8fa4e3SBjoern A. Zeeb struct bmi_xfer *xfer;
2299da8fa4e3SBjoern A. Zeeb unsigned int nbytes;
2300da8fa4e3SBjoern A. Zeeb
2301da8fa4e3SBjoern A. Zeeb if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2302da8fa4e3SBjoern A. Zeeb &nbytes))
2303da8fa4e3SBjoern A. Zeeb return;
2304da8fa4e3SBjoern A. Zeeb
2305da8fa4e3SBjoern A. Zeeb if (WARN_ON_ONCE(!xfer))
2306da8fa4e3SBjoern A. Zeeb return;
2307da8fa4e3SBjoern A. Zeeb
2308da8fa4e3SBjoern A. Zeeb if (!xfer->wait_for_resp) {
2309da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2310da8fa4e3SBjoern A. Zeeb return;
2311da8fa4e3SBjoern A. Zeeb }
2312da8fa4e3SBjoern A. Zeeb
2313da8fa4e3SBjoern A. Zeeb xfer->resp_len = nbytes;
2314da8fa4e3SBjoern A. Zeeb xfer->rx_done = true;
2315da8fa4e3SBjoern A. Zeeb }
2316da8fa4e3SBjoern A. Zeeb
ath10k_pci_bmi_wait(struct ath10k * ar,struct ath10k_ce_pipe * tx_pipe,struct ath10k_ce_pipe * rx_pipe,struct bmi_xfer * xfer)2317da8fa4e3SBjoern A. Zeeb static int ath10k_pci_bmi_wait(struct ath10k *ar,
2318da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *tx_pipe,
2319da8fa4e3SBjoern A. Zeeb struct ath10k_ce_pipe *rx_pipe,
2320da8fa4e3SBjoern A. Zeeb struct bmi_xfer *xfer)
2321da8fa4e3SBjoern A. Zeeb {
2322da8fa4e3SBjoern A. Zeeb unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2323da8fa4e3SBjoern A. Zeeb unsigned long started = jiffies;
2324da8fa4e3SBjoern A. Zeeb unsigned long dur;
2325da8fa4e3SBjoern A. Zeeb int ret;
2326da8fa4e3SBjoern A. Zeeb
2327da8fa4e3SBjoern A. Zeeb while (time_before_eq(jiffies, timeout)) {
2328da8fa4e3SBjoern A. Zeeb ath10k_pci_bmi_send_done(tx_pipe);
2329da8fa4e3SBjoern A. Zeeb ath10k_pci_bmi_recv_data(rx_pipe);
2330da8fa4e3SBjoern A. Zeeb
2331da8fa4e3SBjoern A. Zeeb if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2332da8fa4e3SBjoern A. Zeeb ret = 0;
2333da8fa4e3SBjoern A. Zeeb goto out;
2334da8fa4e3SBjoern A. Zeeb }
2335da8fa4e3SBjoern A. Zeeb
2336da8fa4e3SBjoern A. Zeeb #if defined(__linux__)
2337da8fa4e3SBjoern A. Zeeb schedule();
2338da8fa4e3SBjoern A. Zeeb #elif defined(__FreeBSD__)
2339da8fa4e3SBjoern A. Zeeb /* Using LinuxKPI we'll hang for-ever as there's no wake_up */
2340da8fa4e3SBjoern A. Zeeb kern_yield(PRI_USER);
2341da8fa4e3SBjoern A. Zeeb #endif
2342da8fa4e3SBjoern A. Zeeb }
2343da8fa4e3SBjoern A. Zeeb
2344da8fa4e3SBjoern A. Zeeb ret = -ETIMEDOUT;
2345da8fa4e3SBjoern A. Zeeb
2346da8fa4e3SBjoern A. Zeeb out:
2347da8fa4e3SBjoern A. Zeeb dur = jiffies - started;
2348da8fa4e3SBjoern A. Zeeb if (dur > HZ)
2349da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BMI,
2350da8fa4e3SBjoern A. Zeeb "bmi cmd took %lu jiffies hz %d ret %d\n",
2351da8fa4e3SBjoern A. Zeeb dur, HZ, ret);
2352da8fa4e3SBjoern A. Zeeb return ret;
2353da8fa4e3SBjoern A. Zeeb }
2354da8fa4e3SBjoern A. Zeeb
2355da8fa4e3SBjoern A. Zeeb /*
2356da8fa4e3SBjoern A. Zeeb * Send an interrupt to the device to wake up the Target CPU
2357da8fa4e3SBjoern A. Zeeb * so it has an opportunity to notice any changed state.
2358da8fa4e3SBjoern A. Zeeb */
ath10k_pci_wake_target_cpu(struct ath10k * ar)2359da8fa4e3SBjoern A. Zeeb static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2360da8fa4e3SBjoern A. Zeeb {
2361da8fa4e3SBjoern A. Zeeb u32 addr, val;
2362da8fa4e3SBjoern A. Zeeb
2363da8fa4e3SBjoern A. Zeeb addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2364da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, addr);
2365da8fa4e3SBjoern A. Zeeb val |= CORE_CTRL_CPU_INTR_MASK;
2366da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, addr, val);
2367da8fa4e3SBjoern A. Zeeb
2368da8fa4e3SBjoern A. Zeeb return 0;
2369da8fa4e3SBjoern A. Zeeb }
2370da8fa4e3SBjoern A. Zeeb
ath10k_pci_get_num_banks(struct ath10k * ar)2371da8fa4e3SBjoern A. Zeeb static int ath10k_pci_get_num_banks(struct ath10k *ar)
2372da8fa4e3SBjoern A. Zeeb {
2373da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2374da8fa4e3SBjoern A. Zeeb
2375da8fa4e3SBjoern A. Zeeb switch (ar_pci->pdev->device) {
2376da8fa4e3SBjoern A. Zeeb case QCA988X_2_0_DEVICE_ID_UBNT:
2377da8fa4e3SBjoern A. Zeeb case QCA988X_2_0_DEVICE_ID:
2378da8fa4e3SBjoern A. Zeeb case QCA99X0_2_0_DEVICE_ID:
2379da8fa4e3SBjoern A. Zeeb case QCA9888_2_0_DEVICE_ID:
2380da8fa4e3SBjoern A. Zeeb case QCA9984_1_0_DEVICE_ID:
2381da8fa4e3SBjoern A. Zeeb case QCA9887_1_0_DEVICE_ID:
2382da8fa4e3SBjoern A. Zeeb return 1;
2383da8fa4e3SBjoern A. Zeeb case QCA6164_2_1_DEVICE_ID:
2384da8fa4e3SBjoern A. Zeeb case QCA6174_2_1_DEVICE_ID:
2385da8fa4e3SBjoern A. Zeeb switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2386da8fa4e3SBjoern A. Zeeb case QCA6174_HW_1_0_CHIP_ID_REV:
2387da8fa4e3SBjoern A. Zeeb case QCA6174_HW_1_1_CHIP_ID_REV:
2388da8fa4e3SBjoern A. Zeeb case QCA6174_HW_2_1_CHIP_ID_REV:
2389da8fa4e3SBjoern A. Zeeb case QCA6174_HW_2_2_CHIP_ID_REV:
2390da8fa4e3SBjoern A. Zeeb return 3;
2391da8fa4e3SBjoern A. Zeeb case QCA6174_HW_1_3_CHIP_ID_REV:
2392da8fa4e3SBjoern A. Zeeb return 2;
2393da8fa4e3SBjoern A. Zeeb case QCA6174_HW_3_0_CHIP_ID_REV:
2394da8fa4e3SBjoern A. Zeeb case QCA6174_HW_3_1_CHIP_ID_REV:
2395da8fa4e3SBjoern A. Zeeb case QCA6174_HW_3_2_CHIP_ID_REV:
2396da8fa4e3SBjoern A. Zeeb return 9;
2397da8fa4e3SBjoern A. Zeeb }
2398da8fa4e3SBjoern A. Zeeb break;
2399da8fa4e3SBjoern A. Zeeb case QCA9377_1_0_DEVICE_ID:
2400da8fa4e3SBjoern A. Zeeb return 9;
2401da8fa4e3SBjoern A. Zeeb }
2402da8fa4e3SBjoern A. Zeeb
2403da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2404da8fa4e3SBjoern A. Zeeb return 1;
2405da8fa4e3SBjoern A. Zeeb }
2406da8fa4e3SBjoern A. Zeeb
ath10k_bus_get_num_banks(struct ath10k * ar)2407da8fa4e3SBjoern A. Zeeb static int ath10k_bus_get_num_banks(struct ath10k *ar)
2408da8fa4e3SBjoern A. Zeeb {
2409da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
2410da8fa4e3SBjoern A. Zeeb
2411da8fa4e3SBjoern A. Zeeb return ce->bus_ops->get_num_banks(ar);
2412da8fa4e3SBjoern A. Zeeb }
2413da8fa4e3SBjoern A. Zeeb
ath10k_pci_init_config(struct ath10k * ar)2414da8fa4e3SBjoern A. Zeeb int ath10k_pci_init_config(struct ath10k *ar)
2415da8fa4e3SBjoern A. Zeeb {
2416da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2417da8fa4e3SBjoern A. Zeeb u32 interconnect_targ_addr;
2418da8fa4e3SBjoern A. Zeeb u32 pcie_state_targ_addr = 0;
2419da8fa4e3SBjoern A. Zeeb u32 pipe_cfg_targ_addr = 0;
2420da8fa4e3SBjoern A. Zeeb u32 svc_to_pipe_map = 0;
2421da8fa4e3SBjoern A. Zeeb u32 pcie_config_flags = 0;
2422da8fa4e3SBjoern A. Zeeb u32 ealloc_value;
2423da8fa4e3SBjoern A. Zeeb u32 ealloc_targ_addr;
2424da8fa4e3SBjoern A. Zeeb u32 flag2_value;
2425da8fa4e3SBjoern A. Zeeb u32 flag2_targ_addr;
2426da8fa4e3SBjoern A. Zeeb int ret = 0;
2427da8fa4e3SBjoern A. Zeeb
2428da8fa4e3SBjoern A. Zeeb /* Download to Target the CE Config and the service-to-CE map */
2429da8fa4e3SBjoern A. Zeeb interconnect_targ_addr =
2430da8fa4e3SBjoern A. Zeeb host_interest_item_address(HI_ITEM(hi_interconnect_state));
2431da8fa4e3SBjoern A. Zeeb
2432da8fa4e3SBjoern A. Zeeb /* Supply Target-side CE configuration */
2433da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2434da8fa4e3SBjoern A. Zeeb &pcie_state_targ_addr);
2435da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2436da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2437da8fa4e3SBjoern A. Zeeb return ret;
2438da8fa4e3SBjoern A. Zeeb }
2439da8fa4e3SBjoern A. Zeeb
2440da8fa4e3SBjoern A. Zeeb if (pcie_state_targ_addr == 0) {
2441da8fa4e3SBjoern A. Zeeb ret = -EIO;
2442da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Invalid pcie state addr\n");
2443da8fa4e3SBjoern A. Zeeb return ret;
2444da8fa4e3SBjoern A. Zeeb }
2445da8fa4e3SBjoern A. Zeeb
2446da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2447da8fa4e3SBjoern A. Zeeb offsetof(struct pcie_state,
2448da8fa4e3SBjoern A. Zeeb pipe_cfg_addr)),
2449da8fa4e3SBjoern A. Zeeb &pipe_cfg_targ_addr);
2450da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2451da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2452da8fa4e3SBjoern A. Zeeb return ret;
2453da8fa4e3SBjoern A. Zeeb }
2454da8fa4e3SBjoern A. Zeeb
2455da8fa4e3SBjoern A. Zeeb if (pipe_cfg_targ_addr == 0) {
2456da8fa4e3SBjoern A. Zeeb ret = -EIO;
2457da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Invalid pipe cfg addr\n");
2458da8fa4e3SBjoern A. Zeeb return ret;
2459da8fa4e3SBjoern A. Zeeb }
2460da8fa4e3SBjoern A. Zeeb
2461da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2462da8fa4e3SBjoern A. Zeeb ar_pci->pipe_config,
2463da8fa4e3SBjoern A. Zeeb sizeof(struct ce_pipe_config) *
2464da8fa4e3SBjoern A. Zeeb NUM_TARGET_CE_CONFIG_WLAN);
2465da8fa4e3SBjoern A. Zeeb
2466da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2467da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2468da8fa4e3SBjoern A. Zeeb return ret;
2469da8fa4e3SBjoern A. Zeeb }
2470da8fa4e3SBjoern A. Zeeb
2471da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2472da8fa4e3SBjoern A. Zeeb offsetof(struct pcie_state,
2473da8fa4e3SBjoern A. Zeeb svc_to_pipe_map)),
2474da8fa4e3SBjoern A. Zeeb &svc_to_pipe_map);
2475da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2476da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2477da8fa4e3SBjoern A. Zeeb return ret;
2478da8fa4e3SBjoern A. Zeeb }
2479da8fa4e3SBjoern A. Zeeb
2480da8fa4e3SBjoern A. Zeeb if (svc_to_pipe_map == 0) {
2481da8fa4e3SBjoern A. Zeeb ret = -EIO;
2482da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Invalid svc_to_pipe map\n");
2483da8fa4e3SBjoern A. Zeeb return ret;
2484da8fa4e3SBjoern A. Zeeb }
2485da8fa4e3SBjoern A. Zeeb
2486da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2487da8fa4e3SBjoern A. Zeeb ar_pci->serv_to_pipe,
2488da8fa4e3SBjoern A. Zeeb sizeof(pci_target_service_to_ce_map_wlan));
2489da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2490da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2491da8fa4e3SBjoern A. Zeeb return ret;
2492da8fa4e3SBjoern A. Zeeb }
2493da8fa4e3SBjoern A. Zeeb
2494da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2495da8fa4e3SBjoern A. Zeeb offsetof(struct pcie_state,
2496da8fa4e3SBjoern A. Zeeb config_flags)),
2497da8fa4e3SBjoern A. Zeeb &pcie_config_flags);
2498da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2499da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2500da8fa4e3SBjoern A. Zeeb return ret;
2501da8fa4e3SBjoern A. Zeeb }
2502da8fa4e3SBjoern A. Zeeb
2503da8fa4e3SBjoern A. Zeeb pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2504da8fa4e3SBjoern A. Zeeb
2505da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2506da8fa4e3SBjoern A. Zeeb offsetof(struct pcie_state,
2507da8fa4e3SBjoern A. Zeeb config_flags)),
2508da8fa4e3SBjoern A. Zeeb pcie_config_flags);
2509da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2510da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2511da8fa4e3SBjoern A. Zeeb return ret;
2512da8fa4e3SBjoern A. Zeeb }
2513da8fa4e3SBjoern A. Zeeb
2514da8fa4e3SBjoern A. Zeeb /* configure early allocation */
2515da8fa4e3SBjoern A. Zeeb ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2516da8fa4e3SBjoern A. Zeeb
2517da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2518da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2519da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2520da8fa4e3SBjoern A. Zeeb return ret;
2521da8fa4e3SBjoern A. Zeeb }
2522da8fa4e3SBjoern A. Zeeb
2523da8fa4e3SBjoern A. Zeeb /* first bank is switched to IRAM */
2524da8fa4e3SBjoern A. Zeeb ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2525da8fa4e3SBjoern A. Zeeb HI_EARLY_ALLOC_MAGIC_MASK);
2526da8fa4e3SBjoern A. Zeeb ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2527da8fa4e3SBjoern A. Zeeb HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2528da8fa4e3SBjoern A. Zeeb HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2529da8fa4e3SBjoern A. Zeeb
2530da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2531da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2532da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2533da8fa4e3SBjoern A. Zeeb return ret;
2534da8fa4e3SBjoern A. Zeeb }
2535da8fa4e3SBjoern A. Zeeb
2536da8fa4e3SBjoern A. Zeeb /* Tell Target to proceed with initialization */
2537da8fa4e3SBjoern A. Zeeb flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2538da8fa4e3SBjoern A. Zeeb
2539da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2540da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2541da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to get option val: %d\n", ret);
2542da8fa4e3SBjoern A. Zeeb return ret;
2543da8fa4e3SBjoern A. Zeeb }
2544da8fa4e3SBjoern A. Zeeb
2545da8fa4e3SBjoern A. Zeeb flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2546da8fa4e3SBjoern A. Zeeb
2547da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2548da8fa4e3SBjoern A. Zeeb if (ret != 0) {
2549da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "Failed to set option val: %d\n", ret);
2550da8fa4e3SBjoern A. Zeeb return ret;
2551da8fa4e3SBjoern A. Zeeb }
2552da8fa4e3SBjoern A. Zeeb
2553da8fa4e3SBjoern A. Zeeb return 0;
2554da8fa4e3SBjoern A. Zeeb }
2555da8fa4e3SBjoern A. Zeeb
ath10k_pci_override_ce_config(struct ath10k * ar)2556da8fa4e3SBjoern A. Zeeb static void ath10k_pci_override_ce_config(struct ath10k *ar)
2557da8fa4e3SBjoern A. Zeeb {
2558da8fa4e3SBjoern A. Zeeb struct ce_attr *attr;
2559da8fa4e3SBjoern A. Zeeb struct ce_pipe_config *config;
2560da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2561da8fa4e3SBjoern A. Zeeb
2562da8fa4e3SBjoern A. Zeeb /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2563da8fa4e3SBjoern A. Zeeb * since it is currently used for other feature.
2564da8fa4e3SBjoern A. Zeeb */
2565da8fa4e3SBjoern A. Zeeb
2566da8fa4e3SBjoern A. Zeeb /* Override Host's Copy Engine 5 configuration */
2567da8fa4e3SBjoern A. Zeeb attr = &ar_pci->attr[5];
2568da8fa4e3SBjoern A. Zeeb attr->src_sz_max = 0;
2569da8fa4e3SBjoern A. Zeeb attr->dest_nentries = 0;
2570da8fa4e3SBjoern A. Zeeb
2571da8fa4e3SBjoern A. Zeeb /* Override Target firmware's Copy Engine configuration */
2572da8fa4e3SBjoern A. Zeeb config = &ar_pci->pipe_config[5];
2573da8fa4e3SBjoern A. Zeeb config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2574da8fa4e3SBjoern A. Zeeb config->nbytes_max = __cpu_to_le32(2048);
2575da8fa4e3SBjoern A. Zeeb
2576da8fa4e3SBjoern A. Zeeb /* Map from service/endpoint to Copy Engine */
2577da8fa4e3SBjoern A. Zeeb ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
2578da8fa4e3SBjoern A. Zeeb }
2579da8fa4e3SBjoern A. Zeeb
ath10k_pci_alloc_pipes(struct ath10k * ar)2580da8fa4e3SBjoern A. Zeeb int ath10k_pci_alloc_pipes(struct ath10k *ar)
2581da8fa4e3SBjoern A. Zeeb {
2582da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2583da8fa4e3SBjoern A. Zeeb struct ath10k_pci_pipe *pipe;
2584da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
2585da8fa4e3SBjoern A. Zeeb int i, ret;
2586da8fa4e3SBjoern A. Zeeb
2587da8fa4e3SBjoern A. Zeeb for (i = 0; i < CE_COUNT; i++) {
2588da8fa4e3SBjoern A. Zeeb pipe = &ar_pci->pipe_info[i];
2589da8fa4e3SBjoern A. Zeeb pipe->ce_hdl = &ce->ce_states[i];
2590da8fa4e3SBjoern A. Zeeb pipe->pipe_num = i;
2591da8fa4e3SBjoern A. Zeeb pipe->hif_ce_state = ar;
2592da8fa4e3SBjoern A. Zeeb
2593da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
2594da8fa4e3SBjoern A. Zeeb if (ret) {
2595da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2596da8fa4e3SBjoern A. Zeeb i, ret);
2597da8fa4e3SBjoern A. Zeeb return ret;
2598da8fa4e3SBjoern A. Zeeb }
2599da8fa4e3SBjoern A. Zeeb
2600da8fa4e3SBjoern A. Zeeb /* Last CE is Diagnostic Window */
2601da8fa4e3SBjoern A. Zeeb if (i == CE_DIAG_PIPE) {
2602da8fa4e3SBjoern A. Zeeb ar_pci->ce_diag = pipe->ce_hdl;
2603da8fa4e3SBjoern A. Zeeb continue;
2604da8fa4e3SBjoern A. Zeeb }
2605da8fa4e3SBjoern A. Zeeb
2606da8fa4e3SBjoern A. Zeeb pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
2607da8fa4e3SBjoern A. Zeeb }
2608da8fa4e3SBjoern A. Zeeb
2609da8fa4e3SBjoern A. Zeeb return 0;
2610da8fa4e3SBjoern A. Zeeb }
2611da8fa4e3SBjoern A. Zeeb
ath10k_pci_free_pipes(struct ath10k * ar)2612da8fa4e3SBjoern A. Zeeb void ath10k_pci_free_pipes(struct ath10k *ar)
2613da8fa4e3SBjoern A. Zeeb {
2614da8fa4e3SBjoern A. Zeeb int i;
2615da8fa4e3SBjoern A. Zeeb
2616da8fa4e3SBjoern A. Zeeb for (i = 0; i < CE_COUNT; i++)
2617da8fa4e3SBjoern A. Zeeb ath10k_ce_free_pipe(ar, i);
2618da8fa4e3SBjoern A. Zeeb }
2619da8fa4e3SBjoern A. Zeeb
ath10k_pci_init_pipes(struct ath10k * ar)2620da8fa4e3SBjoern A. Zeeb int ath10k_pci_init_pipes(struct ath10k *ar)
2621da8fa4e3SBjoern A. Zeeb {
2622da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2623da8fa4e3SBjoern A. Zeeb int i, ret;
2624da8fa4e3SBjoern A. Zeeb
2625da8fa4e3SBjoern A. Zeeb for (i = 0; i < CE_COUNT; i++) {
2626da8fa4e3SBjoern A. Zeeb ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
2627da8fa4e3SBjoern A. Zeeb if (ret) {
2628da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2629da8fa4e3SBjoern A. Zeeb i, ret);
2630da8fa4e3SBjoern A. Zeeb return ret;
2631da8fa4e3SBjoern A. Zeeb }
2632da8fa4e3SBjoern A. Zeeb }
2633da8fa4e3SBjoern A. Zeeb
2634da8fa4e3SBjoern A. Zeeb return 0;
2635da8fa4e3SBjoern A. Zeeb }
2636da8fa4e3SBjoern A. Zeeb
ath10k_pci_has_fw_crashed(struct ath10k * ar)2637da8fa4e3SBjoern A. Zeeb static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2638da8fa4e3SBjoern A. Zeeb {
2639da8fa4e3SBjoern A. Zeeb return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2640da8fa4e3SBjoern A. Zeeb FW_IND_EVENT_PENDING;
2641da8fa4e3SBjoern A. Zeeb }
2642da8fa4e3SBjoern A. Zeeb
ath10k_pci_fw_crashed_clear(struct ath10k * ar)2643da8fa4e3SBjoern A. Zeeb static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2644da8fa4e3SBjoern A. Zeeb {
2645da8fa4e3SBjoern A. Zeeb u32 val;
2646da8fa4e3SBjoern A. Zeeb
2647da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2648da8fa4e3SBjoern A. Zeeb val &= ~FW_IND_EVENT_PENDING;
2649da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2650da8fa4e3SBjoern A. Zeeb }
2651da8fa4e3SBjoern A. Zeeb
ath10k_pci_has_device_gone(struct ath10k * ar)2652da8fa4e3SBjoern A. Zeeb static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2653da8fa4e3SBjoern A. Zeeb {
2654da8fa4e3SBjoern A. Zeeb u32 val;
2655da8fa4e3SBjoern A. Zeeb
2656da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2657da8fa4e3SBjoern A. Zeeb return (val == 0xffffffff);
2658da8fa4e3SBjoern A. Zeeb }
2659da8fa4e3SBjoern A. Zeeb
2660da8fa4e3SBjoern A. Zeeb /* this function effectively clears target memory controller assert line */
ath10k_pci_warm_reset_si0(struct ath10k * ar)2661da8fa4e3SBjoern A. Zeeb static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2662da8fa4e3SBjoern A. Zeeb {
2663da8fa4e3SBjoern A. Zeeb u32 val;
2664da8fa4e3SBjoern A. Zeeb
2665da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2666da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2667da8fa4e3SBjoern A. Zeeb val | SOC_RESET_CONTROL_SI0_RST_MASK);
2668da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2669da8fa4e3SBjoern A. Zeeb
2670da8fa4e3SBjoern A. Zeeb msleep(10);
2671da8fa4e3SBjoern A. Zeeb
2672da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2673da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2674da8fa4e3SBjoern A. Zeeb val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2675da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2676da8fa4e3SBjoern A. Zeeb
2677da8fa4e3SBjoern A. Zeeb msleep(10);
2678da8fa4e3SBjoern A. Zeeb }
2679da8fa4e3SBjoern A. Zeeb
ath10k_pci_warm_reset_cpu(struct ath10k * ar)2680da8fa4e3SBjoern A. Zeeb static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2681da8fa4e3SBjoern A. Zeeb {
2682da8fa4e3SBjoern A. Zeeb u32 val;
2683da8fa4e3SBjoern A. Zeeb
2684da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2685da8fa4e3SBjoern A. Zeeb
2686da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2687da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2688da8fa4e3SBjoern A. Zeeb val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2689da8fa4e3SBjoern A. Zeeb }
2690da8fa4e3SBjoern A. Zeeb
ath10k_pci_warm_reset_ce(struct ath10k * ar)2691da8fa4e3SBjoern A. Zeeb static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2692da8fa4e3SBjoern A. Zeeb {
2693da8fa4e3SBjoern A. Zeeb u32 val;
2694da8fa4e3SBjoern A. Zeeb
2695da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2696da8fa4e3SBjoern A. Zeeb
2697da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2698da8fa4e3SBjoern A. Zeeb val | SOC_RESET_CONTROL_CE_RST_MASK);
2699da8fa4e3SBjoern A. Zeeb msleep(10);
2700da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2701da8fa4e3SBjoern A. Zeeb val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2702da8fa4e3SBjoern A. Zeeb }
2703da8fa4e3SBjoern A. Zeeb
ath10k_pci_warm_reset_clear_lf(struct ath10k * ar)2704da8fa4e3SBjoern A. Zeeb static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2705da8fa4e3SBjoern A. Zeeb {
2706da8fa4e3SBjoern A. Zeeb u32 val;
2707da8fa4e3SBjoern A. Zeeb
2708da8fa4e3SBjoern A. Zeeb val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
2709da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
2710da8fa4e3SBjoern A. Zeeb val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2711da8fa4e3SBjoern A. Zeeb }
2712da8fa4e3SBjoern A. Zeeb
ath10k_pci_warm_reset(struct ath10k * ar)2713da8fa4e3SBjoern A. Zeeb static int ath10k_pci_warm_reset(struct ath10k *ar)
2714da8fa4e3SBjoern A. Zeeb {
2715da8fa4e3SBjoern A. Zeeb int ret;
2716da8fa4e3SBjoern A. Zeeb
2717da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2718da8fa4e3SBjoern A. Zeeb
2719da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
2720da8fa4e3SBjoern A. Zeeb ar->stats.fw_warm_reset_counter++;
2721da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
2722da8fa4e3SBjoern A. Zeeb
2723da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_disable(ar);
2724da8fa4e3SBjoern A. Zeeb
2725da8fa4e3SBjoern A. Zeeb /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2726da8fa4e3SBjoern A. Zeeb * were to access copy engine while host performs copy engine reset
2727da8fa4e3SBjoern A. Zeeb * then it is possible for the device to confuse pci-e controller to
2728da8fa4e3SBjoern A. Zeeb * the point of bringing host system to a complete stop (i.e. hang).
2729da8fa4e3SBjoern A. Zeeb */
2730da8fa4e3SBjoern A. Zeeb ath10k_pci_warm_reset_si0(ar);
2731da8fa4e3SBjoern A. Zeeb ath10k_pci_warm_reset_cpu(ar);
2732da8fa4e3SBjoern A. Zeeb ath10k_pci_init_pipes(ar);
2733da8fa4e3SBjoern A. Zeeb ath10k_pci_wait_for_target_init(ar);
2734da8fa4e3SBjoern A. Zeeb
2735da8fa4e3SBjoern A. Zeeb ath10k_pci_warm_reset_clear_lf(ar);
2736da8fa4e3SBjoern A. Zeeb ath10k_pci_warm_reset_ce(ar);
2737da8fa4e3SBjoern A. Zeeb ath10k_pci_warm_reset_cpu(ar);
2738da8fa4e3SBjoern A. Zeeb ath10k_pci_init_pipes(ar);
2739da8fa4e3SBjoern A. Zeeb
2740da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wait_for_target_init(ar);
2741da8fa4e3SBjoern A. Zeeb if (ret) {
2742da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2743da8fa4e3SBjoern A. Zeeb return ret;
2744da8fa4e3SBjoern A. Zeeb }
2745da8fa4e3SBjoern A. Zeeb
2746da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2747da8fa4e3SBjoern A. Zeeb
2748da8fa4e3SBjoern A. Zeeb return 0;
2749da8fa4e3SBjoern A. Zeeb }
2750da8fa4e3SBjoern A. Zeeb
ath10k_pci_qca99x0_soft_chip_reset(struct ath10k * ar)2751da8fa4e3SBjoern A. Zeeb static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2752da8fa4e3SBjoern A. Zeeb {
2753da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_disable(ar);
2754da8fa4e3SBjoern A. Zeeb return ath10k_pci_qca99x0_chip_reset(ar);
2755da8fa4e3SBjoern A. Zeeb }
2756da8fa4e3SBjoern A. Zeeb
ath10k_pci_safe_chip_reset(struct ath10k * ar)2757da8fa4e3SBjoern A. Zeeb static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2758da8fa4e3SBjoern A. Zeeb {
2759da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2760da8fa4e3SBjoern A. Zeeb
2761da8fa4e3SBjoern A. Zeeb if (!ar_pci->pci_soft_reset)
2762da8fa4e3SBjoern A. Zeeb return -ENOTSUPP;
2763da8fa4e3SBjoern A. Zeeb
2764da8fa4e3SBjoern A. Zeeb return ar_pci->pci_soft_reset(ar);
2765da8fa4e3SBjoern A. Zeeb }
2766da8fa4e3SBjoern A. Zeeb
ath10k_pci_qca988x_chip_reset(struct ath10k * ar)2767da8fa4e3SBjoern A. Zeeb static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2768da8fa4e3SBjoern A. Zeeb {
2769da8fa4e3SBjoern A. Zeeb int i, ret;
2770da8fa4e3SBjoern A. Zeeb u32 val;
2771da8fa4e3SBjoern A. Zeeb
2772da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2773da8fa4e3SBjoern A. Zeeb
2774da8fa4e3SBjoern A. Zeeb /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2775da8fa4e3SBjoern A. Zeeb * It is thus preferred to use warm reset which is safer but may not be
2776da8fa4e3SBjoern A. Zeeb * able to recover the device from all possible fail scenarios.
2777da8fa4e3SBjoern A. Zeeb *
2778da8fa4e3SBjoern A. Zeeb * Warm reset doesn't always work on first try so attempt it a few
2779da8fa4e3SBjoern A. Zeeb * times before giving up.
2780da8fa4e3SBjoern A. Zeeb */
2781da8fa4e3SBjoern A. Zeeb for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2782da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_warm_reset(ar);
2783da8fa4e3SBjoern A. Zeeb if (ret) {
2784da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2785da8fa4e3SBjoern A. Zeeb i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2786da8fa4e3SBjoern A. Zeeb ret);
2787da8fa4e3SBjoern A. Zeeb continue;
2788da8fa4e3SBjoern A. Zeeb }
2789da8fa4e3SBjoern A. Zeeb
2790da8fa4e3SBjoern A. Zeeb /* FIXME: Sometimes copy engine doesn't recover after warm
2791da8fa4e3SBjoern A. Zeeb * reset. In most cases this needs cold reset. In some of these
2792da8fa4e3SBjoern A. Zeeb * cases the device is in such a state that a cold reset may
2793da8fa4e3SBjoern A. Zeeb * lock up the host.
2794da8fa4e3SBjoern A. Zeeb *
2795da8fa4e3SBjoern A. Zeeb * Reading any host interest register via copy engine is
2796da8fa4e3SBjoern A. Zeeb * sufficient to verify if device is capable of booting
2797da8fa4e3SBjoern A. Zeeb * firmware blob.
2798da8fa4e3SBjoern A. Zeeb */
2799da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_init_pipes(ar);
2800da8fa4e3SBjoern A. Zeeb if (ret) {
2801da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to init copy engine: %d\n",
2802da8fa4e3SBjoern A. Zeeb ret);
2803da8fa4e3SBjoern A. Zeeb continue;
2804da8fa4e3SBjoern A. Zeeb }
2805da8fa4e3SBjoern A. Zeeb
2806da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2807da8fa4e3SBjoern A. Zeeb &val);
2808da8fa4e3SBjoern A. Zeeb if (ret) {
2809da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to poke copy engine: %d\n",
2810da8fa4e3SBjoern A. Zeeb ret);
2811da8fa4e3SBjoern A. Zeeb continue;
2812da8fa4e3SBjoern A. Zeeb }
2813da8fa4e3SBjoern A. Zeeb
2814da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2815da8fa4e3SBjoern A. Zeeb return 0;
2816da8fa4e3SBjoern A. Zeeb }
2817da8fa4e3SBjoern A. Zeeb
2818da8fa4e3SBjoern A. Zeeb if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2819da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "refusing cold reset as requested\n");
2820da8fa4e3SBjoern A. Zeeb return -EPERM;
2821da8fa4e3SBjoern A. Zeeb }
2822da8fa4e3SBjoern A. Zeeb
2823da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_cold_reset(ar);
2824da8fa4e3SBjoern A. Zeeb if (ret) {
2825da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2826da8fa4e3SBjoern A. Zeeb return ret;
2827da8fa4e3SBjoern A. Zeeb }
2828da8fa4e3SBjoern A. Zeeb
2829da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wait_for_target_init(ar);
2830da8fa4e3SBjoern A. Zeeb if (ret) {
2831da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2832da8fa4e3SBjoern A. Zeeb ret);
2833da8fa4e3SBjoern A. Zeeb return ret;
2834da8fa4e3SBjoern A. Zeeb }
2835da8fa4e3SBjoern A. Zeeb
2836da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2837da8fa4e3SBjoern A. Zeeb
2838da8fa4e3SBjoern A. Zeeb return 0;
2839da8fa4e3SBjoern A. Zeeb }
2840da8fa4e3SBjoern A. Zeeb
ath10k_pci_qca6174_chip_reset(struct ath10k * ar)2841da8fa4e3SBjoern A. Zeeb static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2842da8fa4e3SBjoern A. Zeeb {
2843da8fa4e3SBjoern A. Zeeb int ret;
2844da8fa4e3SBjoern A. Zeeb
2845da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2846da8fa4e3SBjoern A. Zeeb
2847da8fa4e3SBjoern A. Zeeb /* FIXME: QCA6174 requires cold + warm reset to work. */
2848da8fa4e3SBjoern A. Zeeb
2849da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_cold_reset(ar);
2850da8fa4e3SBjoern A. Zeeb if (ret) {
2851da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2852da8fa4e3SBjoern A. Zeeb return ret;
2853da8fa4e3SBjoern A. Zeeb }
2854da8fa4e3SBjoern A. Zeeb
2855da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wait_for_target_init(ar);
2856da8fa4e3SBjoern A. Zeeb if (ret) {
2857da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2858da8fa4e3SBjoern A. Zeeb ret);
2859da8fa4e3SBjoern A. Zeeb return ret;
2860da8fa4e3SBjoern A. Zeeb }
2861da8fa4e3SBjoern A. Zeeb
2862da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_warm_reset(ar);
2863da8fa4e3SBjoern A. Zeeb if (ret) {
2864da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2865da8fa4e3SBjoern A. Zeeb return ret;
2866da8fa4e3SBjoern A. Zeeb }
2867da8fa4e3SBjoern A. Zeeb
2868da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2869da8fa4e3SBjoern A. Zeeb
2870da8fa4e3SBjoern A. Zeeb return 0;
2871da8fa4e3SBjoern A. Zeeb }
2872da8fa4e3SBjoern A. Zeeb
ath10k_pci_qca99x0_chip_reset(struct ath10k * ar)2873da8fa4e3SBjoern A. Zeeb static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2874da8fa4e3SBjoern A. Zeeb {
2875da8fa4e3SBjoern A. Zeeb int ret;
2876da8fa4e3SBjoern A. Zeeb
2877da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2878da8fa4e3SBjoern A. Zeeb
2879da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_cold_reset(ar);
2880da8fa4e3SBjoern A. Zeeb if (ret) {
2881da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2882da8fa4e3SBjoern A. Zeeb return ret;
2883da8fa4e3SBjoern A. Zeeb }
2884da8fa4e3SBjoern A. Zeeb
2885da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wait_for_target_init(ar);
2886da8fa4e3SBjoern A. Zeeb if (ret) {
2887da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2888da8fa4e3SBjoern A. Zeeb ret);
2889da8fa4e3SBjoern A. Zeeb return ret;
2890da8fa4e3SBjoern A. Zeeb }
2891da8fa4e3SBjoern A. Zeeb
2892da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2893da8fa4e3SBjoern A. Zeeb
2894da8fa4e3SBjoern A. Zeeb return 0;
2895da8fa4e3SBjoern A. Zeeb }
2896da8fa4e3SBjoern A. Zeeb
ath10k_pci_chip_reset(struct ath10k * ar)2897da8fa4e3SBjoern A. Zeeb static int ath10k_pci_chip_reset(struct ath10k *ar)
2898da8fa4e3SBjoern A. Zeeb {
2899da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2900da8fa4e3SBjoern A. Zeeb
2901da8fa4e3SBjoern A. Zeeb if (WARN_ON(!ar_pci->pci_hard_reset))
2902da8fa4e3SBjoern A. Zeeb return -ENOTSUPP;
2903da8fa4e3SBjoern A. Zeeb
2904da8fa4e3SBjoern A. Zeeb return ar_pci->pci_hard_reset(ar);
2905da8fa4e3SBjoern A. Zeeb }
2906da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_power_up(struct ath10k * ar,enum ath10k_firmware_mode fw_mode)2907da8fa4e3SBjoern A. Zeeb static int ath10k_pci_hif_power_up(struct ath10k *ar,
2908da8fa4e3SBjoern A. Zeeb enum ath10k_firmware_mode fw_mode)
2909da8fa4e3SBjoern A. Zeeb {
2910da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2911da8fa4e3SBjoern A. Zeeb int ret;
2912da8fa4e3SBjoern A. Zeeb
2913da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2914da8fa4e3SBjoern A. Zeeb
2915da8fa4e3SBjoern A. Zeeb pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2916da8fa4e3SBjoern A. Zeeb &ar_pci->link_ctl);
2917da8fa4e3SBjoern A. Zeeb pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2918da8fa4e3SBjoern A. Zeeb ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2919da8fa4e3SBjoern A. Zeeb
2920da8fa4e3SBjoern A. Zeeb /*
2921da8fa4e3SBjoern A. Zeeb * Bring the target up cleanly.
2922da8fa4e3SBjoern A. Zeeb *
2923da8fa4e3SBjoern A. Zeeb * The target may be in an undefined state with an AUX-powered Target
2924da8fa4e3SBjoern A. Zeeb * and a Host in WoW mode. If the Host crashes, loses power, or is
2925da8fa4e3SBjoern A. Zeeb * restarted (without unloading the driver) then the Target is left
2926da8fa4e3SBjoern A. Zeeb * (aux) powered and running. On a subsequent driver load, the Target
2927da8fa4e3SBjoern A. Zeeb * is in an unexpected state. We try to catch that here in order to
2928da8fa4e3SBjoern A. Zeeb * reset the Target and retry the probe.
2929da8fa4e3SBjoern A. Zeeb */
2930da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_chip_reset(ar);
2931da8fa4e3SBjoern A. Zeeb if (ret) {
2932da8fa4e3SBjoern A. Zeeb if (ath10k_pci_has_fw_crashed(ar)) {
2933da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "firmware crashed during chip reset\n");
2934da8fa4e3SBjoern A. Zeeb ath10k_pci_fw_crashed_clear(ar);
2935da8fa4e3SBjoern A. Zeeb ath10k_pci_fw_crashed_dump(ar);
2936da8fa4e3SBjoern A. Zeeb }
2937da8fa4e3SBjoern A. Zeeb
2938da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to reset chip: %d\n", ret);
2939da8fa4e3SBjoern A. Zeeb goto err_sleep;
2940da8fa4e3SBjoern A. Zeeb }
2941da8fa4e3SBjoern A. Zeeb
2942da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_init_pipes(ar);
2943da8fa4e3SBjoern A. Zeeb if (ret) {
2944da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2945da8fa4e3SBjoern A. Zeeb goto err_sleep;
2946da8fa4e3SBjoern A. Zeeb }
2947da8fa4e3SBjoern A. Zeeb
2948da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_init_config(ar);
2949da8fa4e3SBjoern A. Zeeb if (ret) {
2950da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to setup init config: %d\n", ret);
2951da8fa4e3SBjoern A. Zeeb goto err_ce;
2952da8fa4e3SBjoern A. Zeeb }
2953da8fa4e3SBjoern A. Zeeb
2954da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_wake_target_cpu(ar);
2955da8fa4e3SBjoern A. Zeeb if (ret) {
2956da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2957da8fa4e3SBjoern A. Zeeb goto err_ce;
2958da8fa4e3SBjoern A. Zeeb }
2959da8fa4e3SBjoern A. Zeeb
2960da8fa4e3SBjoern A. Zeeb return 0;
2961da8fa4e3SBjoern A. Zeeb
2962da8fa4e3SBjoern A. Zeeb err_ce:
2963da8fa4e3SBjoern A. Zeeb ath10k_pci_ce_deinit(ar);
2964da8fa4e3SBjoern A. Zeeb
2965da8fa4e3SBjoern A. Zeeb err_sleep:
2966da8fa4e3SBjoern A. Zeeb return ret;
2967da8fa4e3SBjoern A. Zeeb }
2968da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_power_down(struct ath10k * ar)2969da8fa4e3SBjoern A. Zeeb void ath10k_pci_hif_power_down(struct ath10k *ar)
2970da8fa4e3SBjoern A. Zeeb {
2971da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2972da8fa4e3SBjoern A. Zeeb
2973da8fa4e3SBjoern A. Zeeb /* Currently hif_power_up performs effectively a reset and hif_stop
2974da8fa4e3SBjoern A. Zeeb * resets the chip as well so there's no point in resetting here.
2975da8fa4e3SBjoern A. Zeeb */
2976da8fa4e3SBjoern A. Zeeb }
2977da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_suspend(struct ath10k * ar)2978da8fa4e3SBjoern A. Zeeb static int ath10k_pci_hif_suspend(struct ath10k *ar)
2979da8fa4e3SBjoern A. Zeeb {
2980da8fa4e3SBjoern A. Zeeb /* Nothing to do; the important stuff is in the driver suspend. */
2981da8fa4e3SBjoern A. Zeeb return 0;
2982da8fa4e3SBjoern A. Zeeb }
2983da8fa4e3SBjoern A. Zeeb
2984da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM
ath10k_pci_suspend(struct ath10k * ar)2985da8fa4e3SBjoern A. Zeeb static int ath10k_pci_suspend(struct ath10k *ar)
2986da8fa4e3SBjoern A. Zeeb {
2987da8fa4e3SBjoern A. Zeeb /* The grace timer can still be counting down and ar->ps_awake be true.
2988da8fa4e3SBjoern A. Zeeb * It is known that the device may be asleep after resuming regardless
2989da8fa4e3SBjoern A. Zeeb * of the SoC powersave state before suspending. Hence make sure the
2990da8fa4e3SBjoern A. Zeeb * device is asleep before proceeding.
2991da8fa4e3SBjoern A. Zeeb */
2992da8fa4e3SBjoern A. Zeeb ath10k_pci_sleep_sync(ar);
2993da8fa4e3SBjoern A. Zeeb
2994da8fa4e3SBjoern A. Zeeb return 0;
2995da8fa4e3SBjoern A. Zeeb }
2996da8fa4e3SBjoern A. Zeeb #endif
2997da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_resume(struct ath10k * ar)2998da8fa4e3SBjoern A. Zeeb static int ath10k_pci_hif_resume(struct ath10k *ar)
2999da8fa4e3SBjoern A. Zeeb {
3000da8fa4e3SBjoern A. Zeeb /* Nothing to do; the important stuff is in the driver resume. */
3001da8fa4e3SBjoern A. Zeeb return 0;
3002da8fa4e3SBjoern A. Zeeb }
3003da8fa4e3SBjoern A. Zeeb
3004da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM
ath10k_pci_resume(struct ath10k * ar)3005da8fa4e3SBjoern A. Zeeb static int ath10k_pci_resume(struct ath10k *ar)
3006da8fa4e3SBjoern A. Zeeb {
3007da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3008da8fa4e3SBjoern A. Zeeb struct pci_dev *pdev = ar_pci->pdev;
3009da8fa4e3SBjoern A. Zeeb u32 val;
3010da8fa4e3SBjoern A. Zeeb int ret = 0;
3011da8fa4e3SBjoern A. Zeeb
3012da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_force_wake(ar);
3013da8fa4e3SBjoern A. Zeeb if (ret) {
3014da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to wake up target: %d\n", ret);
3015da8fa4e3SBjoern A. Zeeb return ret;
3016da8fa4e3SBjoern A. Zeeb }
3017da8fa4e3SBjoern A. Zeeb
3018da8fa4e3SBjoern A. Zeeb /* Suspend/Resume resets the PCI configuration space, so we have to
3019da8fa4e3SBjoern A. Zeeb * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
3020da8fa4e3SBjoern A. Zeeb * from interfering with C3 CPU state. pci_restore_state won't help
3021da8fa4e3SBjoern A. Zeeb * here since it only restores the first 64 bytes pci config header.
3022da8fa4e3SBjoern A. Zeeb */
3023da8fa4e3SBjoern A. Zeeb pci_read_config_dword(pdev, 0x40, &val);
3024da8fa4e3SBjoern A. Zeeb if ((val & 0x0000ff00) != 0)
3025da8fa4e3SBjoern A. Zeeb pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
3026da8fa4e3SBjoern A. Zeeb
3027da8fa4e3SBjoern A. Zeeb return ret;
3028da8fa4e3SBjoern A. Zeeb }
3029da8fa4e3SBjoern A. Zeeb #endif
3030da8fa4e3SBjoern A. Zeeb
ath10k_pci_validate_cal(void * data,size_t size)3031da8fa4e3SBjoern A. Zeeb static bool ath10k_pci_validate_cal(void *data, size_t size)
3032da8fa4e3SBjoern A. Zeeb {
3033da8fa4e3SBjoern A. Zeeb __le16 *cal_words = data;
3034da8fa4e3SBjoern A. Zeeb u16 checksum = 0;
3035da8fa4e3SBjoern A. Zeeb size_t i;
3036da8fa4e3SBjoern A. Zeeb
3037da8fa4e3SBjoern A. Zeeb if (size % 2 != 0)
3038da8fa4e3SBjoern A. Zeeb return false;
3039da8fa4e3SBjoern A. Zeeb
3040da8fa4e3SBjoern A. Zeeb for (i = 0; i < size / 2; i++)
3041da8fa4e3SBjoern A. Zeeb checksum ^= le16_to_cpu(cal_words[i]);
3042da8fa4e3SBjoern A. Zeeb
3043da8fa4e3SBjoern A. Zeeb return checksum == 0xffff;
3044da8fa4e3SBjoern A. Zeeb }
3045da8fa4e3SBjoern A. Zeeb
ath10k_pci_enable_eeprom(struct ath10k * ar)3046da8fa4e3SBjoern A. Zeeb static void ath10k_pci_enable_eeprom(struct ath10k *ar)
3047da8fa4e3SBjoern A. Zeeb {
3048da8fa4e3SBjoern A. Zeeb /* Enable SI clock */
3049da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
3050da8fa4e3SBjoern A. Zeeb
3051da8fa4e3SBjoern A. Zeeb /* Configure GPIOs for I2C operation */
3052da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar,
3053da8fa4e3SBjoern A. Zeeb GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
3054da8fa4e3SBjoern A. Zeeb 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
3055da8fa4e3SBjoern A. Zeeb SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
3056da8fa4e3SBjoern A. Zeeb GPIO_PIN0_CONFIG) |
3057da8fa4e3SBjoern A. Zeeb SM(1, GPIO_PIN0_PAD_PULL));
3058da8fa4e3SBjoern A. Zeeb
3059da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar,
3060da8fa4e3SBjoern A. Zeeb GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
3061da8fa4e3SBjoern A. Zeeb 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
3062da8fa4e3SBjoern A. Zeeb SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
3063da8fa4e3SBjoern A. Zeeb SM(1, GPIO_PIN0_PAD_PULL));
3064da8fa4e3SBjoern A. Zeeb
3065da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar,
3066da8fa4e3SBjoern A. Zeeb GPIO_BASE_ADDRESS +
3067da8fa4e3SBjoern A. Zeeb QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
3068da8fa4e3SBjoern A. Zeeb 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
3069da8fa4e3SBjoern A. Zeeb
3070da8fa4e3SBjoern A. Zeeb /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
3071da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar,
3072da8fa4e3SBjoern A. Zeeb SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
3073da8fa4e3SBjoern A. Zeeb SM(1, SI_CONFIG_ERR_INT) |
3074da8fa4e3SBjoern A. Zeeb SM(1, SI_CONFIG_BIDIR_OD_DATA) |
3075da8fa4e3SBjoern A. Zeeb SM(1, SI_CONFIG_I2C) |
3076da8fa4e3SBjoern A. Zeeb SM(1, SI_CONFIG_POS_SAMPLE) |
3077da8fa4e3SBjoern A. Zeeb SM(1, SI_CONFIG_INACTIVE_DATA) |
3078da8fa4e3SBjoern A. Zeeb SM(1, SI_CONFIG_INACTIVE_CLK) |
3079da8fa4e3SBjoern A. Zeeb SM(8, SI_CONFIG_DIVIDER));
3080da8fa4e3SBjoern A. Zeeb }
3081da8fa4e3SBjoern A. Zeeb
ath10k_pci_read_eeprom(struct ath10k * ar,u16 addr,u8 * out)3082da8fa4e3SBjoern A. Zeeb static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
3083da8fa4e3SBjoern A. Zeeb {
3084da8fa4e3SBjoern A. Zeeb u32 reg;
3085da8fa4e3SBjoern A. Zeeb int wait_limit;
3086da8fa4e3SBjoern A. Zeeb
3087da8fa4e3SBjoern A. Zeeb /* set device select byte and for the read operation */
3088da8fa4e3SBjoern A. Zeeb reg = QCA9887_EEPROM_SELECT_READ |
3089da8fa4e3SBjoern A. Zeeb SM(addr, QCA9887_EEPROM_ADDR_LO) |
3090da8fa4e3SBjoern A. Zeeb SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
3091da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
3092da8fa4e3SBjoern A. Zeeb
3093da8fa4e3SBjoern A. Zeeb /* write transmit data, transfer length, and START bit */
3094da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
3095da8fa4e3SBjoern A. Zeeb SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
3096da8fa4e3SBjoern A. Zeeb SM(4, SI_CS_TX_CNT));
3097da8fa4e3SBjoern A. Zeeb
3098da8fa4e3SBjoern A. Zeeb /* wait max 1 sec */
3099da8fa4e3SBjoern A. Zeeb wait_limit = 100000;
3100da8fa4e3SBjoern A. Zeeb
3101da8fa4e3SBjoern A. Zeeb /* wait for SI_CS_DONE_INT */
3102da8fa4e3SBjoern A. Zeeb do {
3103da8fa4e3SBjoern A. Zeeb reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3104da8fa4e3SBjoern A. Zeeb if (MS(reg, SI_CS_DONE_INT))
3105da8fa4e3SBjoern A. Zeeb break;
3106da8fa4e3SBjoern A. Zeeb
3107da8fa4e3SBjoern A. Zeeb wait_limit--;
3108da8fa4e3SBjoern A. Zeeb udelay(10);
3109da8fa4e3SBjoern A. Zeeb } while (wait_limit > 0);
3110da8fa4e3SBjoern A. Zeeb
3111da8fa4e3SBjoern A. Zeeb if (!MS(reg, SI_CS_DONE_INT)) {
3112da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3113da8fa4e3SBjoern A. Zeeb addr);
3114da8fa4e3SBjoern A. Zeeb return -ETIMEDOUT;
3115da8fa4e3SBjoern A. Zeeb }
3116da8fa4e3SBjoern A. Zeeb
3117da8fa4e3SBjoern A. Zeeb /* clear SI_CS_DONE_INT */
3118da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3119da8fa4e3SBjoern A. Zeeb
3120da8fa4e3SBjoern A. Zeeb if (MS(reg, SI_CS_DONE_ERR)) {
3121da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3122da8fa4e3SBjoern A. Zeeb return -EIO;
3123da8fa4e3SBjoern A. Zeeb }
3124da8fa4e3SBjoern A. Zeeb
3125da8fa4e3SBjoern A. Zeeb /* extract receive data */
3126da8fa4e3SBjoern A. Zeeb reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3127da8fa4e3SBjoern A. Zeeb *out = reg;
3128da8fa4e3SBjoern A. Zeeb
3129da8fa4e3SBjoern A. Zeeb return 0;
3130da8fa4e3SBjoern A. Zeeb }
3131da8fa4e3SBjoern A. Zeeb
ath10k_pci_hif_fetch_cal_eeprom(struct ath10k * ar,void ** data,size_t * data_len)3132da8fa4e3SBjoern A. Zeeb static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3133da8fa4e3SBjoern A. Zeeb size_t *data_len)
3134da8fa4e3SBjoern A. Zeeb {
3135da8fa4e3SBjoern A. Zeeb u8 *caldata = NULL;
3136da8fa4e3SBjoern A. Zeeb size_t calsize, i;
3137da8fa4e3SBjoern A. Zeeb int ret;
3138da8fa4e3SBjoern A. Zeeb
3139da8fa4e3SBjoern A. Zeeb if (!QCA_REV_9887(ar))
3140da8fa4e3SBjoern A. Zeeb return -EOPNOTSUPP;
3141da8fa4e3SBjoern A. Zeeb
3142da8fa4e3SBjoern A. Zeeb calsize = ar->hw_params.cal_data_len;
3143da8fa4e3SBjoern A. Zeeb caldata = kmalloc(calsize, GFP_KERNEL);
3144da8fa4e3SBjoern A. Zeeb if (!caldata)
3145da8fa4e3SBjoern A. Zeeb return -ENOMEM;
3146da8fa4e3SBjoern A. Zeeb
3147da8fa4e3SBjoern A. Zeeb ath10k_pci_enable_eeprom(ar);
3148da8fa4e3SBjoern A. Zeeb
3149da8fa4e3SBjoern A. Zeeb for (i = 0; i < calsize; i++) {
3150da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3151da8fa4e3SBjoern A. Zeeb if (ret)
3152da8fa4e3SBjoern A. Zeeb goto err_free;
3153da8fa4e3SBjoern A. Zeeb }
3154da8fa4e3SBjoern A. Zeeb
3155da8fa4e3SBjoern A. Zeeb if (!ath10k_pci_validate_cal(caldata, calsize))
3156da8fa4e3SBjoern A. Zeeb goto err_free;
3157da8fa4e3SBjoern A. Zeeb
3158da8fa4e3SBjoern A. Zeeb *data = caldata;
3159da8fa4e3SBjoern A. Zeeb *data_len = calsize;
3160da8fa4e3SBjoern A. Zeeb
3161da8fa4e3SBjoern A. Zeeb return 0;
3162da8fa4e3SBjoern A. Zeeb
3163da8fa4e3SBjoern A. Zeeb err_free:
3164da8fa4e3SBjoern A. Zeeb kfree(caldata);
3165da8fa4e3SBjoern A. Zeeb
3166da8fa4e3SBjoern A. Zeeb return -EINVAL;
3167da8fa4e3SBjoern A. Zeeb }
3168da8fa4e3SBjoern A. Zeeb
3169da8fa4e3SBjoern A. Zeeb static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3170da8fa4e3SBjoern A. Zeeb .tx_sg = ath10k_pci_hif_tx_sg,
3171da8fa4e3SBjoern A. Zeeb .diag_read = ath10k_pci_hif_diag_read,
3172da8fa4e3SBjoern A. Zeeb .diag_write = ath10k_pci_diag_write_mem,
3173da8fa4e3SBjoern A. Zeeb .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
3174da8fa4e3SBjoern A. Zeeb .start = ath10k_pci_hif_start,
3175da8fa4e3SBjoern A. Zeeb .stop = ath10k_pci_hif_stop,
3176da8fa4e3SBjoern A. Zeeb .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
3177da8fa4e3SBjoern A. Zeeb .get_default_pipe = ath10k_pci_hif_get_default_pipe,
3178da8fa4e3SBjoern A. Zeeb .send_complete_check = ath10k_pci_hif_send_complete_check,
3179da8fa4e3SBjoern A. Zeeb .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3180da8fa4e3SBjoern A. Zeeb .power_up = ath10k_pci_hif_power_up,
3181da8fa4e3SBjoern A. Zeeb .power_down = ath10k_pci_hif_power_down,
3182da8fa4e3SBjoern A. Zeeb .read32 = ath10k_pci_read32,
3183da8fa4e3SBjoern A. Zeeb .write32 = ath10k_pci_write32,
3184da8fa4e3SBjoern A. Zeeb .suspend = ath10k_pci_hif_suspend,
3185da8fa4e3SBjoern A. Zeeb .resume = ath10k_pci_hif_resume,
3186da8fa4e3SBjoern A. Zeeb .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3187da8fa4e3SBjoern A. Zeeb };
3188da8fa4e3SBjoern A. Zeeb
3189da8fa4e3SBjoern A. Zeeb /*
3190da8fa4e3SBjoern A. Zeeb * Top-level interrupt handler for all PCI interrupts from a Target.
3191da8fa4e3SBjoern A. Zeeb * When a block of MSI interrupts is allocated, this top-level handler
3192da8fa4e3SBjoern A. Zeeb * is not used; instead, we directly call the correct sub-handler.
3193da8fa4e3SBjoern A. Zeeb */
ath10k_pci_interrupt_handler(int irq,void * arg)3194da8fa4e3SBjoern A. Zeeb static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3195da8fa4e3SBjoern A. Zeeb {
3196da8fa4e3SBjoern A. Zeeb struct ath10k *ar = arg;
3197da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3198da8fa4e3SBjoern A. Zeeb int ret;
3199da8fa4e3SBjoern A. Zeeb
3200da8fa4e3SBjoern A. Zeeb if (ath10k_pci_has_device_gone(ar))
3201da8fa4e3SBjoern A. Zeeb return IRQ_NONE;
3202da8fa4e3SBjoern A. Zeeb
3203da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_force_wake(ar);
3204da8fa4e3SBjoern A. Zeeb if (ret) {
3205da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3206da8fa4e3SBjoern A. Zeeb return IRQ_NONE;
3207da8fa4e3SBjoern A. Zeeb }
3208da8fa4e3SBjoern A. Zeeb
3209da8fa4e3SBjoern A. Zeeb if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3210da8fa4e3SBjoern A. Zeeb !ath10k_pci_irq_pending(ar))
3211da8fa4e3SBjoern A. Zeeb return IRQ_NONE;
3212da8fa4e3SBjoern A. Zeeb
3213da8fa4e3SBjoern A. Zeeb ath10k_pci_disable_and_clear_legacy_irq(ar);
3214da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_msi_fw_mask(ar);
3215da8fa4e3SBjoern A. Zeeb napi_schedule(&ar->napi);
3216da8fa4e3SBjoern A. Zeeb
3217da8fa4e3SBjoern A. Zeeb return IRQ_HANDLED;
3218da8fa4e3SBjoern A. Zeeb }
3219da8fa4e3SBjoern A. Zeeb
ath10k_pci_napi_poll(struct napi_struct * ctx,int budget)3220da8fa4e3SBjoern A. Zeeb static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3221da8fa4e3SBjoern A. Zeeb {
3222da8fa4e3SBjoern A. Zeeb struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3223da8fa4e3SBjoern A. Zeeb int done = 0;
3224da8fa4e3SBjoern A. Zeeb
3225da8fa4e3SBjoern A. Zeeb if (ath10k_pci_has_fw_crashed(ar)) {
3226da8fa4e3SBjoern A. Zeeb ath10k_pci_fw_crashed_clear(ar);
3227da8fa4e3SBjoern A. Zeeb ath10k_pci_fw_crashed_dump(ar);
3228da8fa4e3SBjoern A. Zeeb napi_complete(ctx);
3229da8fa4e3SBjoern A. Zeeb return done;
3230da8fa4e3SBjoern A. Zeeb }
3231da8fa4e3SBjoern A. Zeeb
3232da8fa4e3SBjoern A. Zeeb ath10k_ce_per_engine_service_any(ar);
3233da8fa4e3SBjoern A. Zeeb
3234da8fa4e3SBjoern A. Zeeb done = ath10k_htt_txrx_compl_task(ar, budget);
3235da8fa4e3SBjoern A. Zeeb
3236da8fa4e3SBjoern A. Zeeb if (done < budget) {
3237da8fa4e3SBjoern A. Zeeb napi_complete_done(ctx, done);
3238da8fa4e3SBjoern A. Zeeb /* In case of MSI, it is possible that interrupts are received
3239da8fa4e3SBjoern A. Zeeb * while NAPI poll is inprogress. So pending interrupts that are
3240da8fa4e3SBjoern A. Zeeb * received after processing all copy engine pipes by NAPI poll
3241da8fa4e3SBjoern A. Zeeb * will not be handled again. This is causing failure to
3242da8fa4e3SBjoern A. Zeeb * complete boot sequence in x86 platform. So before enabling
3243da8fa4e3SBjoern A. Zeeb * interrupts safer to check for pending interrupts for
3244da8fa4e3SBjoern A. Zeeb * immediate servicing.
3245da8fa4e3SBjoern A. Zeeb */
3246da8fa4e3SBjoern A. Zeeb if (ath10k_ce_interrupt_summary(ar)) {
3247da8fa4e3SBjoern A. Zeeb napi_reschedule(ctx);
3248da8fa4e3SBjoern A. Zeeb goto out;
3249da8fa4e3SBjoern A. Zeeb }
3250da8fa4e3SBjoern A. Zeeb ath10k_pci_enable_legacy_irq(ar);
3251da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_msi_fw_unmask(ar);
3252da8fa4e3SBjoern A. Zeeb }
3253da8fa4e3SBjoern A. Zeeb
3254da8fa4e3SBjoern A. Zeeb out:
3255da8fa4e3SBjoern A. Zeeb return done;
3256da8fa4e3SBjoern A. Zeeb }
3257da8fa4e3SBjoern A. Zeeb
ath10k_pci_request_irq_msi(struct ath10k * ar)3258da8fa4e3SBjoern A. Zeeb static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3259da8fa4e3SBjoern A. Zeeb {
3260da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3261da8fa4e3SBjoern A. Zeeb int ret;
3262da8fa4e3SBjoern A. Zeeb
3263da8fa4e3SBjoern A. Zeeb ret = request_irq(ar_pci->pdev->irq,
3264da8fa4e3SBjoern A. Zeeb ath10k_pci_interrupt_handler,
3265da8fa4e3SBjoern A. Zeeb IRQF_SHARED, "ath10k_pci", ar);
3266da8fa4e3SBjoern A. Zeeb if (ret) {
3267da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3268da8fa4e3SBjoern A. Zeeb ar_pci->pdev->irq, ret);
3269da8fa4e3SBjoern A. Zeeb return ret;
3270da8fa4e3SBjoern A. Zeeb }
3271da8fa4e3SBjoern A. Zeeb
3272da8fa4e3SBjoern A. Zeeb return 0;
3273da8fa4e3SBjoern A. Zeeb }
3274da8fa4e3SBjoern A. Zeeb
ath10k_pci_request_irq_legacy(struct ath10k * ar)3275da8fa4e3SBjoern A. Zeeb static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3276da8fa4e3SBjoern A. Zeeb {
3277da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3278da8fa4e3SBjoern A. Zeeb int ret;
3279da8fa4e3SBjoern A. Zeeb
3280da8fa4e3SBjoern A. Zeeb ret = request_irq(ar_pci->pdev->irq,
3281da8fa4e3SBjoern A. Zeeb ath10k_pci_interrupt_handler,
3282da8fa4e3SBjoern A. Zeeb IRQF_SHARED, "ath10k_pci", ar);
3283da8fa4e3SBjoern A. Zeeb if (ret) {
3284da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3285da8fa4e3SBjoern A. Zeeb ar_pci->pdev->irq, ret);
3286da8fa4e3SBjoern A. Zeeb return ret;
3287da8fa4e3SBjoern A. Zeeb }
3288da8fa4e3SBjoern A. Zeeb
3289da8fa4e3SBjoern A. Zeeb return 0;
3290da8fa4e3SBjoern A. Zeeb }
3291da8fa4e3SBjoern A. Zeeb
ath10k_pci_request_irq(struct ath10k * ar)3292da8fa4e3SBjoern A. Zeeb static int ath10k_pci_request_irq(struct ath10k *ar)
3293da8fa4e3SBjoern A. Zeeb {
3294da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3295da8fa4e3SBjoern A. Zeeb
3296da8fa4e3SBjoern A. Zeeb switch (ar_pci->oper_irq_mode) {
3297da8fa4e3SBjoern A. Zeeb case ATH10K_PCI_IRQ_LEGACY:
3298da8fa4e3SBjoern A. Zeeb return ath10k_pci_request_irq_legacy(ar);
3299da8fa4e3SBjoern A. Zeeb case ATH10K_PCI_IRQ_MSI:
3300da8fa4e3SBjoern A. Zeeb return ath10k_pci_request_irq_msi(ar);
3301da8fa4e3SBjoern A. Zeeb default:
3302da8fa4e3SBjoern A. Zeeb return -EINVAL;
3303da8fa4e3SBjoern A. Zeeb }
3304da8fa4e3SBjoern A. Zeeb }
3305da8fa4e3SBjoern A. Zeeb
ath10k_pci_free_irq(struct ath10k * ar)3306da8fa4e3SBjoern A. Zeeb static void ath10k_pci_free_irq(struct ath10k *ar)
3307da8fa4e3SBjoern A. Zeeb {
3308da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3309da8fa4e3SBjoern A. Zeeb
3310da8fa4e3SBjoern A. Zeeb free_irq(ar_pci->pdev->irq, ar);
3311da8fa4e3SBjoern A. Zeeb }
3312da8fa4e3SBjoern A. Zeeb
ath10k_pci_init_napi(struct ath10k * ar)3313da8fa4e3SBjoern A. Zeeb void ath10k_pci_init_napi(struct ath10k *ar)
3314da8fa4e3SBjoern A. Zeeb {
3315da8fa4e3SBjoern A. Zeeb netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
3316da8fa4e3SBjoern A. Zeeb }
3317da8fa4e3SBjoern A. Zeeb
ath10k_pci_init_irq(struct ath10k * ar)3318da8fa4e3SBjoern A. Zeeb static int ath10k_pci_init_irq(struct ath10k *ar)
3319da8fa4e3SBjoern A. Zeeb {
3320da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3321da8fa4e3SBjoern A. Zeeb int ret;
3322da8fa4e3SBjoern A. Zeeb
3323da8fa4e3SBjoern A. Zeeb ath10k_pci_init_napi(ar);
3324da8fa4e3SBjoern A. Zeeb
3325da8fa4e3SBjoern A. Zeeb if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3326da8fa4e3SBjoern A. Zeeb ath10k_info(ar, "limiting irq mode to: %d\n",
3327da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_mode);
3328da8fa4e3SBjoern A. Zeeb
3329da8fa4e3SBjoern A. Zeeb /* Try MSI */
3330da8fa4e3SBjoern A. Zeeb if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3331da8fa4e3SBjoern A. Zeeb ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3332da8fa4e3SBjoern A. Zeeb ret = pci_enable_msi(ar_pci->pdev);
3333da8fa4e3SBjoern A. Zeeb if (ret == 0)
3334da8fa4e3SBjoern A. Zeeb return 0;
3335da8fa4e3SBjoern A. Zeeb
3336da8fa4e3SBjoern A. Zeeb /* MHI failed, try legacy irq next */
3337da8fa4e3SBjoern A. Zeeb }
3338da8fa4e3SBjoern A. Zeeb
3339da8fa4e3SBjoern A. Zeeb /* Try legacy irq
3340da8fa4e3SBjoern A. Zeeb *
3341da8fa4e3SBjoern A. Zeeb * A potential race occurs here: The CORE_BASE write
3342da8fa4e3SBjoern A. Zeeb * depends on target correctly decoding AXI address but
3343da8fa4e3SBjoern A. Zeeb * host won't know when target writes BAR to CORE_CTRL.
3344da8fa4e3SBjoern A. Zeeb * This write might get lost if target has NOT written BAR.
3345da8fa4e3SBjoern A. Zeeb * For now, fix the race by repeating the write in below
3346da8fa4e3SBjoern A. Zeeb * synchronization checking.
3347da8fa4e3SBjoern A. Zeeb */
3348da8fa4e3SBjoern A. Zeeb ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3349da8fa4e3SBjoern A. Zeeb
3350da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3351da8fa4e3SBjoern A. Zeeb PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3352da8fa4e3SBjoern A. Zeeb
3353da8fa4e3SBjoern A. Zeeb return 0;
3354da8fa4e3SBjoern A. Zeeb }
3355da8fa4e3SBjoern A. Zeeb
ath10k_pci_deinit_irq_legacy(struct ath10k * ar)3356da8fa4e3SBjoern A. Zeeb static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3357da8fa4e3SBjoern A. Zeeb {
3358da8fa4e3SBjoern A. Zeeb ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3359da8fa4e3SBjoern A. Zeeb 0);
3360da8fa4e3SBjoern A. Zeeb }
3361da8fa4e3SBjoern A. Zeeb
ath10k_pci_deinit_irq(struct ath10k * ar)3362da8fa4e3SBjoern A. Zeeb static int ath10k_pci_deinit_irq(struct ath10k *ar)
3363da8fa4e3SBjoern A. Zeeb {
3364da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3365da8fa4e3SBjoern A. Zeeb
3366da8fa4e3SBjoern A. Zeeb switch (ar_pci->oper_irq_mode) {
3367da8fa4e3SBjoern A. Zeeb case ATH10K_PCI_IRQ_LEGACY:
3368da8fa4e3SBjoern A. Zeeb ath10k_pci_deinit_irq_legacy(ar);
3369da8fa4e3SBjoern A. Zeeb break;
3370da8fa4e3SBjoern A. Zeeb default:
3371da8fa4e3SBjoern A. Zeeb pci_disable_msi(ar_pci->pdev);
3372da8fa4e3SBjoern A. Zeeb break;
3373da8fa4e3SBjoern A. Zeeb }
3374da8fa4e3SBjoern A. Zeeb
3375da8fa4e3SBjoern A. Zeeb return 0;
3376da8fa4e3SBjoern A. Zeeb }
3377da8fa4e3SBjoern A. Zeeb
ath10k_pci_wait_for_target_init(struct ath10k * ar)3378da8fa4e3SBjoern A. Zeeb int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3379da8fa4e3SBjoern A. Zeeb {
3380da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3381da8fa4e3SBjoern A. Zeeb unsigned long timeout;
3382da8fa4e3SBjoern A. Zeeb u32 val;
3383da8fa4e3SBjoern A. Zeeb
3384da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3385da8fa4e3SBjoern A. Zeeb
3386da8fa4e3SBjoern A. Zeeb timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3387da8fa4e3SBjoern A. Zeeb
3388da8fa4e3SBjoern A. Zeeb do {
3389da8fa4e3SBjoern A. Zeeb val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3390da8fa4e3SBjoern A. Zeeb
3391da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3392da8fa4e3SBjoern A. Zeeb val);
3393da8fa4e3SBjoern A. Zeeb
3394da8fa4e3SBjoern A. Zeeb /* target should never return this */
3395da8fa4e3SBjoern A. Zeeb if (val == 0xffffffff)
3396da8fa4e3SBjoern A. Zeeb continue;
3397da8fa4e3SBjoern A. Zeeb
3398da8fa4e3SBjoern A. Zeeb /* the device has crashed so don't bother trying anymore */
3399da8fa4e3SBjoern A. Zeeb if (val & FW_IND_EVENT_PENDING)
3400da8fa4e3SBjoern A. Zeeb break;
3401da8fa4e3SBjoern A. Zeeb
3402da8fa4e3SBjoern A. Zeeb if (val & FW_IND_INITIALIZED)
3403da8fa4e3SBjoern A. Zeeb break;
3404da8fa4e3SBjoern A. Zeeb
3405da8fa4e3SBjoern A. Zeeb if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3406da8fa4e3SBjoern A. Zeeb /* Fix potential race by repeating CORE_BASE writes */
3407da8fa4e3SBjoern A. Zeeb ath10k_pci_enable_legacy_irq(ar);
3408da8fa4e3SBjoern A. Zeeb
3409da8fa4e3SBjoern A. Zeeb mdelay(10);
3410da8fa4e3SBjoern A. Zeeb } while (time_before(jiffies, timeout));
3411da8fa4e3SBjoern A. Zeeb
3412da8fa4e3SBjoern A. Zeeb ath10k_pci_disable_and_clear_legacy_irq(ar);
3413da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_msi_fw_mask(ar);
3414da8fa4e3SBjoern A. Zeeb
3415da8fa4e3SBjoern A. Zeeb if (val == 0xffffffff) {
3416da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to read device register, device is gone\n");
3417da8fa4e3SBjoern A. Zeeb return -EIO;
3418da8fa4e3SBjoern A. Zeeb }
3419da8fa4e3SBjoern A. Zeeb
3420da8fa4e3SBjoern A. Zeeb if (val & FW_IND_EVENT_PENDING) {
3421da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "device has crashed during init\n");
3422da8fa4e3SBjoern A. Zeeb return -ECOMM;
3423da8fa4e3SBjoern A. Zeeb }
3424da8fa4e3SBjoern A. Zeeb
3425da8fa4e3SBjoern A. Zeeb if (!(val & FW_IND_INITIALIZED)) {
3426da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3427da8fa4e3SBjoern A. Zeeb val);
3428da8fa4e3SBjoern A. Zeeb return -ETIMEDOUT;
3429da8fa4e3SBjoern A. Zeeb }
3430da8fa4e3SBjoern A. Zeeb
3431da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3432da8fa4e3SBjoern A. Zeeb return 0;
3433da8fa4e3SBjoern A. Zeeb }
3434da8fa4e3SBjoern A. Zeeb
ath10k_pci_cold_reset(struct ath10k * ar)3435da8fa4e3SBjoern A. Zeeb static int ath10k_pci_cold_reset(struct ath10k *ar)
3436da8fa4e3SBjoern A. Zeeb {
3437da8fa4e3SBjoern A. Zeeb u32 val;
3438da8fa4e3SBjoern A. Zeeb
3439da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3440da8fa4e3SBjoern A. Zeeb
3441da8fa4e3SBjoern A. Zeeb spin_lock_bh(&ar->data_lock);
3442da8fa4e3SBjoern A. Zeeb
3443da8fa4e3SBjoern A. Zeeb ar->stats.fw_cold_reset_counter++;
3444da8fa4e3SBjoern A. Zeeb
3445da8fa4e3SBjoern A. Zeeb spin_unlock_bh(&ar->data_lock);
3446da8fa4e3SBjoern A. Zeeb
3447da8fa4e3SBjoern A. Zeeb /* Put Target, including PCIe, into RESET. */
3448da8fa4e3SBjoern A. Zeeb val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3449da8fa4e3SBjoern A. Zeeb val |= 1;
3450da8fa4e3SBjoern A. Zeeb ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3451da8fa4e3SBjoern A. Zeeb
3452da8fa4e3SBjoern A. Zeeb /* After writing into SOC_GLOBAL_RESET to put device into
3453da8fa4e3SBjoern A. Zeeb * reset and pulling out of reset pcie may not be stable
3454da8fa4e3SBjoern A. Zeeb * for any immediate pcie register access and cause bus error,
3455da8fa4e3SBjoern A. Zeeb * add delay before any pcie access request to fix this issue.
3456da8fa4e3SBjoern A. Zeeb */
3457da8fa4e3SBjoern A. Zeeb msleep(20);
3458da8fa4e3SBjoern A. Zeeb
3459da8fa4e3SBjoern A. Zeeb /* Pull Target, including PCIe, out of RESET. */
3460da8fa4e3SBjoern A. Zeeb val &= ~1;
3461da8fa4e3SBjoern A. Zeeb ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3462da8fa4e3SBjoern A. Zeeb
3463da8fa4e3SBjoern A. Zeeb msleep(20);
3464da8fa4e3SBjoern A. Zeeb
3465da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3466da8fa4e3SBjoern A. Zeeb
3467da8fa4e3SBjoern A. Zeeb return 0;
3468da8fa4e3SBjoern A. Zeeb }
3469da8fa4e3SBjoern A. Zeeb
ath10k_pci_claim(struct ath10k * ar)3470da8fa4e3SBjoern A. Zeeb static int ath10k_pci_claim(struct ath10k *ar)
3471da8fa4e3SBjoern A. Zeeb {
3472da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3473da8fa4e3SBjoern A. Zeeb struct pci_dev *pdev = ar_pci->pdev;
3474da8fa4e3SBjoern A. Zeeb int ret;
3475da8fa4e3SBjoern A. Zeeb
3476da8fa4e3SBjoern A. Zeeb pci_set_drvdata(pdev, ar);
3477da8fa4e3SBjoern A. Zeeb
3478da8fa4e3SBjoern A. Zeeb ret = pci_enable_device(pdev);
3479da8fa4e3SBjoern A. Zeeb if (ret) {
3480da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3481da8fa4e3SBjoern A. Zeeb return ret;
3482da8fa4e3SBjoern A. Zeeb }
3483da8fa4e3SBjoern A. Zeeb
3484da8fa4e3SBjoern A. Zeeb ret = pci_request_region(pdev, BAR_NUM, "ath");
3485da8fa4e3SBjoern A. Zeeb if (ret) {
3486da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3487da8fa4e3SBjoern A. Zeeb ret);
3488da8fa4e3SBjoern A. Zeeb goto err_device;
3489da8fa4e3SBjoern A. Zeeb }
3490da8fa4e3SBjoern A. Zeeb
3491da8fa4e3SBjoern A. Zeeb /* Target expects 32 bit DMA. Enforce it. */
3492da8fa4e3SBjoern A. Zeeb ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3493da8fa4e3SBjoern A. Zeeb if (ret) {
3494da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3495da8fa4e3SBjoern A. Zeeb goto err_region;
3496da8fa4e3SBjoern A. Zeeb }
3497da8fa4e3SBjoern A. Zeeb
3498da8fa4e3SBjoern A. Zeeb pci_set_master(pdev);
3499da8fa4e3SBjoern A. Zeeb
3500da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
3501da8fa4e3SBjoern A. Zeeb linuxkpi_pcim_want_to_use_bus_functions(pdev);
3502da8fa4e3SBjoern A. Zeeb #endif
3503da8fa4e3SBjoern A. Zeeb
3504da8fa4e3SBjoern A. Zeeb /* Arrange for access to Target SoC registers. */
3505da8fa4e3SBjoern A. Zeeb ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3506da8fa4e3SBjoern A. Zeeb ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3507da8fa4e3SBjoern A. Zeeb if (!ar_pci->mem) {
3508da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3509da8fa4e3SBjoern A. Zeeb ret = -EIO;
351007724ba6SBjoern A. Zeeb goto err_region;
3511da8fa4e3SBjoern A. Zeeb }
3512da8fa4e3SBjoern A. Zeeb
3513da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3514da8fa4e3SBjoern A. Zeeb return 0;
3515da8fa4e3SBjoern A. Zeeb
3516da8fa4e3SBjoern A. Zeeb err_region:
3517da8fa4e3SBjoern A. Zeeb pci_release_region(pdev, BAR_NUM);
3518da8fa4e3SBjoern A. Zeeb
3519da8fa4e3SBjoern A. Zeeb err_device:
3520da8fa4e3SBjoern A. Zeeb pci_disable_device(pdev);
3521da8fa4e3SBjoern A. Zeeb
3522da8fa4e3SBjoern A. Zeeb return ret;
3523da8fa4e3SBjoern A. Zeeb }
3524da8fa4e3SBjoern A. Zeeb
ath10k_pci_release(struct ath10k * ar)3525da8fa4e3SBjoern A. Zeeb static void ath10k_pci_release(struct ath10k *ar)
3526da8fa4e3SBjoern A. Zeeb {
3527da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3528da8fa4e3SBjoern A. Zeeb struct pci_dev *pdev = ar_pci->pdev;
3529da8fa4e3SBjoern A. Zeeb
3530da8fa4e3SBjoern A. Zeeb pci_iounmap(pdev, ar_pci->mem);
3531da8fa4e3SBjoern A. Zeeb pci_release_region(pdev, BAR_NUM);
3532da8fa4e3SBjoern A. Zeeb pci_disable_device(pdev);
3533da8fa4e3SBjoern A. Zeeb }
3534da8fa4e3SBjoern A. Zeeb
ath10k_pci_chip_is_supported(u32 dev_id,u32 chip_id)3535da8fa4e3SBjoern A. Zeeb static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3536da8fa4e3SBjoern A. Zeeb {
3537da8fa4e3SBjoern A. Zeeb const struct ath10k_pci_supp_chip *supp_chip;
3538da8fa4e3SBjoern A. Zeeb int i;
3539da8fa4e3SBjoern A. Zeeb u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3540da8fa4e3SBjoern A. Zeeb
3541da8fa4e3SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3542da8fa4e3SBjoern A. Zeeb supp_chip = &ath10k_pci_supp_chips[i];
3543da8fa4e3SBjoern A. Zeeb
3544da8fa4e3SBjoern A. Zeeb if (supp_chip->dev_id == dev_id &&
3545da8fa4e3SBjoern A. Zeeb supp_chip->rev_id == rev_id)
3546da8fa4e3SBjoern A. Zeeb return true;
3547da8fa4e3SBjoern A. Zeeb }
3548da8fa4e3SBjoern A. Zeeb
3549da8fa4e3SBjoern A. Zeeb return false;
3550da8fa4e3SBjoern A. Zeeb }
3551da8fa4e3SBjoern A. Zeeb
ath10k_pci_setup_resource(struct ath10k * ar)3552da8fa4e3SBjoern A. Zeeb int ath10k_pci_setup_resource(struct ath10k *ar)
3553da8fa4e3SBjoern A. Zeeb {
3554da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3555da8fa4e3SBjoern A. Zeeb struct ath10k_ce *ce = ath10k_ce_priv(ar);
3556da8fa4e3SBjoern A. Zeeb int ret;
3557da8fa4e3SBjoern A. Zeeb
3558da8fa4e3SBjoern A. Zeeb spin_lock_init(&ce->ce_lock);
3559da8fa4e3SBjoern A. Zeeb spin_lock_init(&ar_pci->ps_lock);
3560da8fa4e3SBjoern A. Zeeb mutex_init(&ar_pci->ce_diag_mutex);
3561da8fa4e3SBjoern A. Zeeb
3562da8fa4e3SBjoern A. Zeeb INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3563da8fa4e3SBjoern A. Zeeb
3564da8fa4e3SBjoern A. Zeeb timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3565da8fa4e3SBjoern A. Zeeb
3566da8fa4e3SBjoern A. Zeeb ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
3567da8fa4e3SBjoern A. Zeeb sizeof(pci_host_ce_config_wlan),
3568da8fa4e3SBjoern A. Zeeb GFP_KERNEL);
3569da8fa4e3SBjoern A. Zeeb if (!ar_pci->attr)
3570da8fa4e3SBjoern A. Zeeb return -ENOMEM;
3571da8fa4e3SBjoern A. Zeeb
3572da8fa4e3SBjoern A. Zeeb ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
3573da8fa4e3SBjoern A. Zeeb sizeof(pci_target_ce_config_wlan),
3574da8fa4e3SBjoern A. Zeeb GFP_KERNEL);
3575da8fa4e3SBjoern A. Zeeb if (!ar_pci->pipe_config) {
3576da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
3577da8fa4e3SBjoern A. Zeeb goto err_free_attr;
3578da8fa4e3SBjoern A. Zeeb }
3579da8fa4e3SBjoern A. Zeeb
3580da8fa4e3SBjoern A. Zeeb ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
3581da8fa4e3SBjoern A. Zeeb sizeof(pci_target_service_to_ce_map_wlan),
3582da8fa4e3SBjoern A. Zeeb GFP_KERNEL);
3583da8fa4e3SBjoern A. Zeeb if (!ar_pci->serv_to_pipe) {
3584da8fa4e3SBjoern A. Zeeb ret = -ENOMEM;
3585da8fa4e3SBjoern A. Zeeb goto err_free_pipe_config;
3586da8fa4e3SBjoern A. Zeeb }
3587da8fa4e3SBjoern A. Zeeb
3588da8fa4e3SBjoern A. Zeeb if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3589da8fa4e3SBjoern A. Zeeb ath10k_pci_override_ce_config(ar);
3590da8fa4e3SBjoern A. Zeeb
3591da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_alloc_pipes(ar);
3592da8fa4e3SBjoern A. Zeeb if (ret) {
3593da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3594da8fa4e3SBjoern A. Zeeb ret);
3595da8fa4e3SBjoern A. Zeeb goto err_free_serv_to_pipe;
3596da8fa4e3SBjoern A. Zeeb }
3597da8fa4e3SBjoern A. Zeeb
3598da8fa4e3SBjoern A. Zeeb return 0;
3599da8fa4e3SBjoern A. Zeeb
3600da8fa4e3SBjoern A. Zeeb err_free_serv_to_pipe:
3601da8fa4e3SBjoern A. Zeeb kfree(ar_pci->serv_to_pipe);
3602da8fa4e3SBjoern A. Zeeb err_free_pipe_config:
3603da8fa4e3SBjoern A. Zeeb kfree(ar_pci->pipe_config);
3604da8fa4e3SBjoern A. Zeeb err_free_attr:
3605da8fa4e3SBjoern A. Zeeb kfree(ar_pci->attr);
3606da8fa4e3SBjoern A. Zeeb return ret;
3607da8fa4e3SBjoern A. Zeeb }
3608da8fa4e3SBjoern A. Zeeb
ath10k_pci_release_resource(struct ath10k * ar)3609da8fa4e3SBjoern A. Zeeb void ath10k_pci_release_resource(struct ath10k *ar)
3610da8fa4e3SBjoern A. Zeeb {
3611da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3612da8fa4e3SBjoern A. Zeeb
3613da8fa4e3SBjoern A. Zeeb ath10k_pci_rx_retry_sync(ar);
3614da8fa4e3SBjoern A. Zeeb netif_napi_del(&ar->napi);
3615da8fa4e3SBjoern A. Zeeb ath10k_pci_ce_deinit(ar);
3616da8fa4e3SBjoern A. Zeeb ath10k_pci_free_pipes(ar);
3617da8fa4e3SBjoern A. Zeeb kfree(ar_pci->attr);
3618da8fa4e3SBjoern A. Zeeb kfree(ar_pci->pipe_config);
3619da8fa4e3SBjoern A. Zeeb kfree(ar_pci->serv_to_pipe);
3620da8fa4e3SBjoern A. Zeeb }
3621da8fa4e3SBjoern A. Zeeb
3622da8fa4e3SBjoern A. Zeeb static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3623da8fa4e3SBjoern A. Zeeb .read32 = ath10k_bus_pci_read32,
3624da8fa4e3SBjoern A. Zeeb .write32 = ath10k_bus_pci_write32,
3625da8fa4e3SBjoern A. Zeeb .get_num_banks = ath10k_pci_get_num_banks,
3626da8fa4e3SBjoern A. Zeeb };
3627da8fa4e3SBjoern A. Zeeb
ath10k_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_dev)3628da8fa4e3SBjoern A. Zeeb static int ath10k_pci_probe(struct pci_dev *pdev,
3629da8fa4e3SBjoern A. Zeeb const struct pci_device_id *pci_dev)
3630da8fa4e3SBjoern A. Zeeb {
3631da8fa4e3SBjoern A. Zeeb int ret = 0;
3632da8fa4e3SBjoern A. Zeeb struct ath10k *ar;
3633da8fa4e3SBjoern A. Zeeb struct ath10k_pci *ar_pci;
3634da8fa4e3SBjoern A. Zeeb enum ath10k_hw_rev hw_rev;
3635da8fa4e3SBjoern A. Zeeb struct ath10k_bus_params bus_params = {};
3636da8fa4e3SBjoern A. Zeeb bool pci_ps, is_qca988x = false;
3637da8fa4e3SBjoern A. Zeeb int (*pci_soft_reset)(struct ath10k *ar);
3638da8fa4e3SBjoern A. Zeeb int (*pci_hard_reset)(struct ath10k *ar);
3639da8fa4e3SBjoern A. Zeeb u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3640da8fa4e3SBjoern A. Zeeb
3641da8fa4e3SBjoern A. Zeeb switch (pci_dev->device) {
3642da8fa4e3SBjoern A. Zeeb case QCA988X_2_0_DEVICE_ID_UBNT:
3643da8fa4e3SBjoern A. Zeeb case QCA988X_2_0_DEVICE_ID:
3644da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA988X;
3645da8fa4e3SBjoern A. Zeeb pci_ps = false;
3646da8fa4e3SBjoern A. Zeeb is_qca988x = true;
3647da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_warm_reset;
3648da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3649da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3650da8fa4e3SBjoern A. Zeeb break;
3651da8fa4e3SBjoern A. Zeeb case QCA9887_1_0_DEVICE_ID:
3652da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA9887;
3653da8fa4e3SBjoern A. Zeeb pci_ps = false;
3654da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_warm_reset;
3655da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3656da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3657da8fa4e3SBjoern A. Zeeb break;
3658da8fa4e3SBjoern A. Zeeb case QCA6164_2_1_DEVICE_ID:
3659da8fa4e3SBjoern A. Zeeb case QCA6174_2_1_DEVICE_ID:
3660da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA6174;
3661da8fa4e3SBjoern A. Zeeb pci_ps = true;
3662da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_warm_reset;
3663da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3664da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3665da8fa4e3SBjoern A. Zeeb break;
3666da8fa4e3SBjoern A. Zeeb case QCA99X0_2_0_DEVICE_ID:
3667da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA99X0;
3668da8fa4e3SBjoern A. Zeeb pci_ps = false;
3669da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3670da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3671da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3672da8fa4e3SBjoern A. Zeeb break;
3673da8fa4e3SBjoern A. Zeeb case QCA9984_1_0_DEVICE_ID:
3674da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA9984;
3675da8fa4e3SBjoern A. Zeeb pci_ps = false;
3676da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3677da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3678da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3679da8fa4e3SBjoern A. Zeeb break;
3680da8fa4e3SBjoern A. Zeeb case QCA9888_2_0_DEVICE_ID:
3681da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA9888;
3682da8fa4e3SBjoern A. Zeeb pci_ps = false;
3683da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3684da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3685da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3686da8fa4e3SBjoern A. Zeeb break;
3687da8fa4e3SBjoern A. Zeeb case QCA9377_1_0_DEVICE_ID:
3688da8fa4e3SBjoern A. Zeeb hw_rev = ATH10K_HW_QCA9377;
3689da8fa4e3SBjoern A. Zeeb pci_ps = true;
3690da8fa4e3SBjoern A. Zeeb pci_soft_reset = ath10k_pci_warm_reset;
3691da8fa4e3SBjoern A. Zeeb pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3692da8fa4e3SBjoern A. Zeeb targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3693da8fa4e3SBjoern A. Zeeb break;
3694da8fa4e3SBjoern A. Zeeb default:
3695da8fa4e3SBjoern A. Zeeb WARN_ON(1);
3696da8fa4e3SBjoern A. Zeeb return -ENOTSUPP;
3697da8fa4e3SBjoern A. Zeeb }
3698da8fa4e3SBjoern A. Zeeb
3699da8fa4e3SBjoern A. Zeeb ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3700da8fa4e3SBjoern A. Zeeb hw_rev, &ath10k_pci_hif_ops);
3701da8fa4e3SBjoern A. Zeeb if (!ar) {
3702da8fa4e3SBjoern A. Zeeb dev_err(&pdev->dev, "failed to allocate core\n");
3703da8fa4e3SBjoern A. Zeeb return -ENOMEM;
3704da8fa4e3SBjoern A. Zeeb }
3705da8fa4e3SBjoern A. Zeeb
3706da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3707da8fa4e3SBjoern A. Zeeb pdev->vendor, pdev->device,
3708da8fa4e3SBjoern A. Zeeb pdev->subsystem_vendor, pdev->subsystem_device);
3709da8fa4e3SBjoern A. Zeeb
3710da8fa4e3SBjoern A. Zeeb ar_pci = ath10k_pci_priv(ar);
3711da8fa4e3SBjoern A. Zeeb ar_pci->pdev = pdev;
3712da8fa4e3SBjoern A. Zeeb ar_pci->dev = &pdev->dev;
3713da8fa4e3SBjoern A. Zeeb ar_pci->ar = ar;
3714da8fa4e3SBjoern A. Zeeb ar->dev_id = pci_dev->device;
3715da8fa4e3SBjoern A. Zeeb ar_pci->pci_ps = pci_ps;
3716da8fa4e3SBjoern A. Zeeb ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3717da8fa4e3SBjoern A. Zeeb ar_pci->pci_soft_reset = pci_soft_reset;
3718da8fa4e3SBjoern A. Zeeb ar_pci->pci_hard_reset = pci_hard_reset;
3719da8fa4e3SBjoern A. Zeeb ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3720da8fa4e3SBjoern A. Zeeb ar->ce_priv = &ar_pci->ce;
3721da8fa4e3SBjoern A. Zeeb
3722da8fa4e3SBjoern A. Zeeb ar->id.vendor = pdev->vendor;
3723da8fa4e3SBjoern A. Zeeb ar->id.device = pdev->device;
3724da8fa4e3SBjoern A. Zeeb ar->id.subsystem_vendor = pdev->subsystem_vendor;
3725da8fa4e3SBjoern A. Zeeb ar->id.subsystem_device = pdev->subsystem_device;
3726da8fa4e3SBjoern A. Zeeb
3727da8fa4e3SBjoern A. Zeeb timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3728da8fa4e3SBjoern A. Zeeb
3729da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_setup_resource(ar);
3730da8fa4e3SBjoern A. Zeeb if (ret) {
3731da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to setup resource: %d\n", ret);
3732da8fa4e3SBjoern A. Zeeb goto err_core_destroy;
3733da8fa4e3SBjoern A. Zeeb }
3734da8fa4e3SBjoern A. Zeeb
3735da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_claim(ar);
3736da8fa4e3SBjoern A. Zeeb if (ret) {
3737da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to claim device: %d\n", ret);
3738da8fa4e3SBjoern A. Zeeb goto err_free_pipes;
3739da8fa4e3SBjoern A. Zeeb }
3740da8fa4e3SBjoern A. Zeeb
3741da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_force_wake(ar);
3742da8fa4e3SBjoern A. Zeeb if (ret) {
3743da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3744da8fa4e3SBjoern A. Zeeb goto err_sleep;
3745da8fa4e3SBjoern A. Zeeb }
3746da8fa4e3SBjoern A. Zeeb
3747da8fa4e3SBjoern A. Zeeb ath10k_pci_ce_deinit(ar);
3748da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_disable(ar);
3749da8fa4e3SBjoern A. Zeeb
3750da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_init_irq(ar);
3751da8fa4e3SBjoern A. Zeeb if (ret) {
3752da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to init irqs: %d\n", ret);
3753da8fa4e3SBjoern A. Zeeb goto err_sleep;
3754da8fa4e3SBjoern A. Zeeb }
3755da8fa4e3SBjoern A. Zeeb
3756da8fa4e3SBjoern A. Zeeb ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3757da8fa4e3SBjoern A. Zeeb ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3758da8fa4e3SBjoern A. Zeeb ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3759da8fa4e3SBjoern A. Zeeb
3760da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_request_irq(ar);
3761da8fa4e3SBjoern A. Zeeb if (ret) {
3762da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3763da8fa4e3SBjoern A. Zeeb goto err_deinit_irq;
3764da8fa4e3SBjoern A. Zeeb }
3765da8fa4e3SBjoern A. Zeeb
3766da8fa4e3SBjoern A. Zeeb bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3767da8fa4e3SBjoern A. Zeeb bus_params.link_can_suspend = true;
3768da8fa4e3SBjoern A. Zeeb /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3769da8fa4e3SBjoern A. Zeeb * fall off the bus during chip_reset. These chips have the same pci
3770da8fa4e3SBjoern A. Zeeb * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3771da8fa4e3SBjoern A. Zeeb */
3772da8fa4e3SBjoern A. Zeeb if (is_qca988x) {
3773da8fa4e3SBjoern A. Zeeb bus_params.chip_id =
3774da8fa4e3SBjoern A. Zeeb ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3775da8fa4e3SBjoern A. Zeeb if (bus_params.chip_id != 0xffffffff) {
3776da8fa4e3SBjoern A. Zeeb if (!ath10k_pci_chip_is_supported(pdev->device,
3777da8fa4e3SBjoern A. Zeeb bus_params.chip_id)) {
3778da8fa4e3SBjoern A. Zeeb ret = -ENODEV;
3779da8fa4e3SBjoern A. Zeeb goto err_unsupported;
3780da8fa4e3SBjoern A. Zeeb }
3781da8fa4e3SBjoern A. Zeeb }
3782da8fa4e3SBjoern A. Zeeb }
3783da8fa4e3SBjoern A. Zeeb
3784da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_chip_reset(ar);
3785da8fa4e3SBjoern A. Zeeb if (ret) {
3786da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to reset chip: %d\n", ret);
3787da8fa4e3SBjoern A. Zeeb goto err_free_irq;
3788da8fa4e3SBjoern A. Zeeb }
3789da8fa4e3SBjoern A. Zeeb
3790da8fa4e3SBjoern A. Zeeb bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3791da8fa4e3SBjoern A. Zeeb if (bus_params.chip_id == 0xffffffff) {
3792da8fa4e3SBjoern A. Zeeb ret = -ENODEV;
3793da8fa4e3SBjoern A. Zeeb goto err_unsupported;
3794da8fa4e3SBjoern A. Zeeb }
3795da8fa4e3SBjoern A. Zeeb
3796da8fa4e3SBjoern A. Zeeb if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3797da8fa4e3SBjoern A. Zeeb ret = -ENODEV;
3798da8fa4e3SBjoern A. Zeeb goto err_unsupported;
3799da8fa4e3SBjoern A. Zeeb }
3800da8fa4e3SBjoern A. Zeeb
3801da8fa4e3SBjoern A. Zeeb ret = ath10k_core_register(ar, &bus_params);
3802da8fa4e3SBjoern A. Zeeb if (ret) {
3803da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "failed to register driver core: %d\n", ret);
3804da8fa4e3SBjoern A. Zeeb goto err_free_irq;
3805da8fa4e3SBjoern A. Zeeb }
3806da8fa4e3SBjoern A. Zeeb
3807da8fa4e3SBjoern A. Zeeb return 0;
3808da8fa4e3SBjoern A. Zeeb
3809da8fa4e3SBjoern A. Zeeb err_unsupported:
3810da8fa4e3SBjoern A. Zeeb ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3811da8fa4e3SBjoern A. Zeeb pdev->device, bus_params.chip_id);
3812da8fa4e3SBjoern A. Zeeb
3813da8fa4e3SBjoern A. Zeeb err_free_irq:
3814da8fa4e3SBjoern A. Zeeb ath10k_pci_free_irq(ar);
3815da8fa4e3SBjoern A. Zeeb
3816da8fa4e3SBjoern A. Zeeb err_deinit_irq:
3817da8fa4e3SBjoern A. Zeeb ath10k_pci_release_resource(ar);
3818da8fa4e3SBjoern A. Zeeb
3819da8fa4e3SBjoern A. Zeeb err_sleep:
3820da8fa4e3SBjoern A. Zeeb ath10k_pci_sleep_sync(ar);
3821da8fa4e3SBjoern A. Zeeb ath10k_pci_release(ar);
3822da8fa4e3SBjoern A. Zeeb
3823da8fa4e3SBjoern A. Zeeb err_free_pipes:
3824da8fa4e3SBjoern A. Zeeb ath10k_pci_free_pipes(ar);
3825da8fa4e3SBjoern A. Zeeb
3826da8fa4e3SBjoern A. Zeeb err_core_destroy:
3827da8fa4e3SBjoern A. Zeeb ath10k_core_destroy(ar);
3828da8fa4e3SBjoern A. Zeeb
3829da8fa4e3SBjoern A. Zeeb return ret;
3830da8fa4e3SBjoern A. Zeeb }
3831da8fa4e3SBjoern A. Zeeb
ath10k_pci_remove(struct pci_dev * pdev)3832da8fa4e3SBjoern A. Zeeb static void ath10k_pci_remove(struct pci_dev *pdev)
3833da8fa4e3SBjoern A. Zeeb {
3834da8fa4e3SBjoern A. Zeeb struct ath10k *ar = pci_get_drvdata(pdev);
3835da8fa4e3SBjoern A. Zeeb
3836da8fa4e3SBjoern A. Zeeb ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3837da8fa4e3SBjoern A. Zeeb
3838da8fa4e3SBjoern A. Zeeb if (!ar)
3839da8fa4e3SBjoern A. Zeeb return;
3840da8fa4e3SBjoern A. Zeeb
3841da8fa4e3SBjoern A. Zeeb ath10k_core_unregister(ar);
3842da8fa4e3SBjoern A. Zeeb ath10k_pci_free_irq(ar);
3843da8fa4e3SBjoern A. Zeeb ath10k_pci_deinit_irq(ar);
3844da8fa4e3SBjoern A. Zeeb ath10k_pci_release_resource(ar);
3845da8fa4e3SBjoern A. Zeeb ath10k_pci_sleep_sync(ar);
3846da8fa4e3SBjoern A. Zeeb ath10k_pci_release(ar);
3847da8fa4e3SBjoern A. Zeeb ath10k_core_destroy(ar);
3848da8fa4e3SBjoern A. Zeeb }
3849da8fa4e3SBjoern A. Zeeb
3850da8fa4e3SBjoern A. Zeeb MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3851da8fa4e3SBjoern A. Zeeb
3852da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM
ath10k_pci_pm_suspend(struct device * dev)3853da8fa4e3SBjoern A. Zeeb static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3854da8fa4e3SBjoern A. Zeeb {
3855da8fa4e3SBjoern A. Zeeb struct ath10k *ar = dev_get_drvdata(dev);
3856da8fa4e3SBjoern A. Zeeb int ret;
3857da8fa4e3SBjoern A. Zeeb
3858da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_suspend(ar);
3859da8fa4e3SBjoern A. Zeeb if (ret)
3860da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3861da8fa4e3SBjoern A. Zeeb
3862da8fa4e3SBjoern A. Zeeb return ret;
3863da8fa4e3SBjoern A. Zeeb }
3864da8fa4e3SBjoern A. Zeeb
ath10k_pci_pm_resume(struct device * dev)3865da8fa4e3SBjoern A. Zeeb static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3866da8fa4e3SBjoern A. Zeeb {
3867da8fa4e3SBjoern A. Zeeb struct ath10k *ar = dev_get_drvdata(dev);
3868da8fa4e3SBjoern A. Zeeb int ret;
3869da8fa4e3SBjoern A. Zeeb
3870da8fa4e3SBjoern A. Zeeb ret = ath10k_pci_resume(ar);
3871da8fa4e3SBjoern A. Zeeb if (ret)
3872da8fa4e3SBjoern A. Zeeb ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3873da8fa4e3SBjoern A. Zeeb
3874da8fa4e3SBjoern A. Zeeb return ret;
3875da8fa4e3SBjoern A. Zeeb }
3876da8fa4e3SBjoern A. Zeeb
3877da8fa4e3SBjoern A. Zeeb static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3878da8fa4e3SBjoern A. Zeeb ath10k_pci_pm_suspend,
3879da8fa4e3SBjoern A. Zeeb ath10k_pci_pm_resume);
3880da8fa4e3SBjoern A. Zeeb #endif
3881da8fa4e3SBjoern A. Zeeb
3882da8fa4e3SBjoern A. Zeeb static struct pci_driver ath10k_pci_driver = {
3883da8fa4e3SBjoern A. Zeeb .name = "ath10k_pci",
3884da8fa4e3SBjoern A. Zeeb .id_table = ath10k_pci_id_table,
3885da8fa4e3SBjoern A. Zeeb .probe = ath10k_pci_probe,
3886da8fa4e3SBjoern A. Zeeb .remove = ath10k_pci_remove,
3887da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_PM
3888da8fa4e3SBjoern A. Zeeb .driver.pm = &ath10k_pci_pm_ops,
3889da8fa4e3SBjoern A. Zeeb #endif
3890da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
3891da8fa4e3SBjoern A. Zeeb .bsddriver.name = KBUILD_MODNAME,
3892da8fa4e3SBjoern A. Zeeb /* Allow a possible native driver to attach. */
3893da8fa4e3SBjoern A. Zeeb .bsd_probe_return = (BUS_PROBE_DEFAULT - 1),
3894da8fa4e3SBjoern A. Zeeb #endif
3895da8fa4e3SBjoern A. Zeeb };
3896da8fa4e3SBjoern A. Zeeb
ath10k_pci_init(void)3897da8fa4e3SBjoern A. Zeeb static int __init ath10k_pci_init(void)
3898da8fa4e3SBjoern A. Zeeb {
389907724ba6SBjoern A. Zeeb int ret1, ret2;
3900da8fa4e3SBjoern A. Zeeb
390107724ba6SBjoern A. Zeeb ret1 = pci_register_driver(&ath10k_pci_driver);
390207724ba6SBjoern A. Zeeb if (ret1)
3903da8fa4e3SBjoern A. Zeeb printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
390407724ba6SBjoern A. Zeeb ret1);
3905da8fa4e3SBjoern A. Zeeb
390607724ba6SBjoern A. Zeeb ret2 = ath10k_ahb_init();
390707724ba6SBjoern A. Zeeb if (ret2)
390807724ba6SBjoern A. Zeeb printk(KERN_ERR "ahb init failed: %d\n", ret2);
3909da8fa4e3SBjoern A. Zeeb
391007724ba6SBjoern A. Zeeb if (ret1 && ret2)
391107724ba6SBjoern A. Zeeb return ret1;
391207724ba6SBjoern A. Zeeb
391307724ba6SBjoern A. Zeeb /* registered to at least one bus */
391407724ba6SBjoern A. Zeeb return 0;
3915da8fa4e3SBjoern A. Zeeb }
3916da8fa4e3SBjoern A. Zeeb module_init(ath10k_pci_init);
3917da8fa4e3SBjoern A. Zeeb
ath10k_pci_exit(void)3918da8fa4e3SBjoern A. Zeeb static void __exit ath10k_pci_exit(void)
3919da8fa4e3SBjoern A. Zeeb {
3920da8fa4e3SBjoern A. Zeeb pci_unregister_driver(&ath10k_pci_driver);
3921da8fa4e3SBjoern A. Zeeb ath10k_ahb_exit();
3922da8fa4e3SBjoern A. Zeeb }
3923da8fa4e3SBjoern A. Zeeb
3924da8fa4e3SBjoern A. Zeeb module_exit(ath10k_pci_exit);
3925da8fa4e3SBjoern A. Zeeb
3926da8fa4e3SBjoern A. Zeeb MODULE_AUTHOR("Qualcomm Atheros");
3927da8fa4e3SBjoern A. Zeeb MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3928da8fa4e3SBjoern A. Zeeb MODULE_LICENSE("Dual BSD/GPL");
3929da8fa4e3SBjoern A. Zeeb #if defined(__FreeBSD__)
3930da8fa4e3SBjoern A. Zeeb MODULE_VERSION(ath10k_pci, 1);
3931da8fa4e3SBjoern A. Zeeb MODULE_DEPEND(ath10k_pci, linuxkpi, 1, 1, 1);
3932da8fa4e3SBjoern A. Zeeb MODULE_DEPEND(ath10k_pci, linuxkpi_wlan, 1, 1, 1);
3933da8fa4e3SBjoern A. Zeeb MODULE_DEPEND(ath10k_pci, athk_common, 1, 1, 1);
3934da8fa4e3SBjoern A. Zeeb #ifdef CONFIG_ATH10K_DEBUGFS
3935da8fa4e3SBjoern A. Zeeb MODULE_DEPEND(ath10k_pci, debugfs, 1, 1, 1);
3936da8fa4e3SBjoern A. Zeeb #endif
3937da8fa4e3SBjoern A. Zeeb #endif
3938da8fa4e3SBjoern A. Zeeb
3939da8fa4e3SBjoern A. Zeeb /* QCA988x 2.0 firmware files */
3940da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3941da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3942da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3943da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3944da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3945da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3946da8fa4e3SBjoern A. Zeeb
3947da8fa4e3SBjoern A. Zeeb /* QCA9887 1.0 firmware files */
3948da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3949da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3950da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3951da8fa4e3SBjoern A. Zeeb
3952da8fa4e3SBjoern A. Zeeb /* QCA6174 2.1 firmware files */
3953da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3954da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3955da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3956da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3957da8fa4e3SBjoern A. Zeeb
3958da8fa4e3SBjoern A. Zeeb /* QCA6174 3.1 firmware files */
3959da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3960da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3961da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3962da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3963da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3964da8fa4e3SBjoern A. Zeeb
3965da8fa4e3SBjoern A. Zeeb /* QCA9377 1.0 firmware files */
3966da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3967da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3968da8fa4e3SBjoern A. Zeeb MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3969