xref: /linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c (revision 2b64b2ed277ff23e785fbdb65098ee7e1252d64f)
1 /* Copyright (c) 2014 Broadcom Corporation
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 #include <linux/pci.h>
20 #include <linux/vmalloc.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/bcma/bcma.h>
24 #include <linux/sched.h>
25 #include <asm/unaligned.h>
26 
27 #include <soc.h>
28 #include <chipcommon.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <brcm_hw_ids.h>
32 
33 /* Custom brcmf_err() that takes bus arg and passes it further */
34 #define brcmf_err(bus, fmt, ...)					\
35 	do {								\
36 		if (IS_ENABLED(CONFIG_BRCMDBG) ||			\
37 		    IS_ENABLED(CONFIG_BRCM_TRACING) ||			\
38 		    net_ratelimit())					\
39 			__brcmf_err(bus, __func__, fmt, ##__VA_ARGS__);	\
40 	} while (0)
41 
42 #include "debug.h"
43 #include "bus.h"
44 #include "commonring.h"
45 #include "msgbuf.h"
46 #include "pcie.h"
47 #include "firmware.h"
48 #include "chip.h"
49 #include "core.h"
50 #include "common.h"
51 
52 
53 enum brcmf_pcie_state {
54 	BRCMFMAC_PCIE_STATE_DOWN,
55 	BRCMFMAC_PCIE_STATE_UP
56 };
57 
58 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
59 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
60 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
61 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
62 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
63 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
64 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
65 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
66 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
67 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
68 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
69 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
70 
71 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
72 	BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
73 	BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
74 	BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
75 	BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
76 	BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
77 	BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
78 	BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
79 	BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
80 	BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
81 	BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
82 	BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
83 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
84 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
85 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
86 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
87 	BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
88 	BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
89 };
90 
91 #define BRCMF_PCIE_FW_UP_TIMEOUT		2000 /* msec */
92 
93 #define BRCMF_PCIE_REG_MAP_SIZE			(32 * 1024)
94 
95 /* backplane addres space accessed by BAR0 */
96 #define	BRCMF_PCIE_BAR0_WINDOW			0x80
97 #define BRCMF_PCIE_BAR0_REG_SIZE		0x1000
98 #define	BRCMF_PCIE_BAR0_WRAPPERBASE		0x70
99 
100 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET	0x1000
101 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET	0x2000
102 
103 #define BRCMF_PCIE_ARMCR4REG_BANKIDX		0x40
104 #define BRCMF_PCIE_ARMCR4REG_BANKPDA		0x4C
105 
106 #define BRCMF_PCIE_REG_INTSTATUS		0x90
107 #define BRCMF_PCIE_REG_INTMASK			0x94
108 #define BRCMF_PCIE_REG_SBMBX			0x98
109 
110 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL		0xBC
111 
112 #define BRCMF_PCIE_PCIE2REG_INTMASK		0x24
113 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT		0x48
114 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK		0x4C
115 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR		0x120
116 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA		0x124
117 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0	0x140
118 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1	0x144
119 
120 #define BRCMF_PCIE2_INTA			0x01
121 #define BRCMF_PCIE2_INTB			0x02
122 
123 #define BRCMF_PCIE_INT_0			0x01
124 #define BRCMF_PCIE_INT_1			0x02
125 #define BRCMF_PCIE_INT_DEF			(BRCMF_PCIE_INT_0 | \
126 						 BRCMF_PCIE_INT_1)
127 
128 #define BRCMF_PCIE_MB_INT_FN0_0			0x0100
129 #define BRCMF_PCIE_MB_INT_FN0_1			0x0200
130 #define	BRCMF_PCIE_MB_INT_D2H0_DB0		0x10000
131 #define	BRCMF_PCIE_MB_INT_D2H0_DB1		0x20000
132 #define	BRCMF_PCIE_MB_INT_D2H1_DB0		0x40000
133 #define	BRCMF_PCIE_MB_INT_D2H1_DB1		0x80000
134 #define	BRCMF_PCIE_MB_INT_D2H2_DB0		0x100000
135 #define	BRCMF_PCIE_MB_INT_D2H2_DB1		0x200000
136 #define	BRCMF_PCIE_MB_INT_D2H3_DB0		0x400000
137 #define	BRCMF_PCIE_MB_INT_D2H3_DB1		0x800000
138 
139 #define BRCMF_PCIE_MB_INT_D2H_DB		(BRCMF_PCIE_MB_INT_D2H0_DB0 | \
140 						 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
141 						 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
142 						 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
143 						 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
144 						 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
145 						 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
146 						 BRCMF_PCIE_MB_INT_D2H3_DB1)
147 
148 #define BRCMF_PCIE_SHARED_VERSION_7		7
149 #define BRCMF_PCIE_MIN_SHARED_VERSION		5
150 #define BRCMF_PCIE_MAX_SHARED_VERSION		BRCMF_PCIE_SHARED_VERSION_7
151 #define BRCMF_PCIE_SHARED_VERSION_MASK		0x00FF
152 #define BRCMF_PCIE_SHARED_DMA_INDEX		0x10000
153 #define BRCMF_PCIE_SHARED_DMA_2B_IDX		0x100000
154 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1		0x10000000
155 
156 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT		0x4000
157 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT		0x8000
158 
159 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET	34
160 #define BRCMF_SHARED_RING_BASE_OFFSET		52
161 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET	36
162 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET	20
163 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET	40
164 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET	44
165 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET	48
166 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET	52
167 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET	56
168 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET	64
169 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET	68
170 
171 #define BRCMF_RING_H2D_RING_COUNT_OFFSET	0
172 #define BRCMF_RING_D2H_RING_COUNT_OFFSET	1
173 #define BRCMF_RING_H2D_RING_MEM_OFFSET		4
174 #define BRCMF_RING_H2D_RING_STATE_OFFSET	8
175 
176 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET		8
177 #define BRCMF_RING_MAX_ITEM_OFFSET		4
178 #define BRCMF_RING_LEN_ITEMS_OFFSET		6
179 #define BRCMF_RING_MEM_SZ			16
180 #define BRCMF_RING_STATE_SZ			8
181 
182 #define BRCMF_DEF_MAX_RXBUFPOST			255
183 
184 #define BRCMF_CONSOLE_BUFADDR_OFFSET		8
185 #define BRCMF_CONSOLE_BUFSIZE_OFFSET		12
186 #define BRCMF_CONSOLE_WRITEIDX_OFFSET		16
187 
188 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN		8
189 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN		1024
190 
191 #define BRCMF_D2H_DEV_D3_ACK			0x00000001
192 #define BRCMF_D2H_DEV_DS_ENTER_REQ		0x00000002
193 #define BRCMF_D2H_DEV_DS_EXIT_NOTE		0x00000004
194 #define BRCMF_D2H_DEV_FWHALT			0x10000000
195 
196 #define BRCMF_H2D_HOST_D3_INFORM		0x00000001
197 #define BRCMF_H2D_HOST_DS_ACK			0x00000002
198 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE		0x00000008
199 #define BRCMF_H2D_HOST_D0_INFORM		0x00000010
200 
201 #define BRCMF_PCIE_MBDATA_TIMEOUT		msecs_to_jiffies(2000)
202 
203 #define BRCMF_PCIE_CFGREG_STATUS_CMD		0x4
204 #define BRCMF_PCIE_CFGREG_PM_CSR		0x4C
205 #define BRCMF_PCIE_CFGREG_MSI_CAP		0x58
206 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L		0x5C
207 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H		0x60
208 #define BRCMF_PCIE_CFGREG_MSI_DATA		0x64
209 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL	0xBC
210 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2	0xDC
211 #define BRCMF_PCIE_CFGREG_RBAR_CTRL		0x228
212 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1	0x248
213 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG	0x4E0
214 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG	0x4F4
215 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB	3
216 
217 /* Magic number at a magic location to find RAM size */
218 #define BRCMF_RAMSIZE_MAGIC			0x534d4152	/* SMAR */
219 #define BRCMF_RAMSIZE_OFFSET			0x6c
220 
221 
222 struct brcmf_pcie_console {
223 	u32 base_addr;
224 	u32 buf_addr;
225 	u32 bufsize;
226 	u32 read_idx;
227 	u8 log_str[256];
228 	u8 log_idx;
229 };
230 
231 struct brcmf_pcie_shared_info {
232 	u32 tcm_base_address;
233 	u32 flags;
234 	struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
235 	struct brcmf_pcie_ringbuf *flowrings;
236 	u16 max_rxbufpost;
237 	u16 max_flowrings;
238 	u16 max_submissionrings;
239 	u16 max_completionrings;
240 	u32 rx_dataoffset;
241 	u32 htod_mb_data_addr;
242 	u32 dtoh_mb_data_addr;
243 	u32 ring_info_addr;
244 	struct brcmf_pcie_console console;
245 	void *scratch;
246 	dma_addr_t scratch_dmahandle;
247 	void *ringupd;
248 	dma_addr_t ringupd_dmahandle;
249 	u8 version;
250 };
251 
252 struct brcmf_pcie_core_info {
253 	u32 base;
254 	u32 wrapbase;
255 };
256 
257 struct brcmf_pciedev_info {
258 	enum brcmf_pcie_state state;
259 	bool in_irq;
260 	struct pci_dev *pdev;
261 	char fw_name[BRCMF_FW_NAME_LEN];
262 	char nvram_name[BRCMF_FW_NAME_LEN];
263 	void __iomem *regs;
264 	void __iomem *tcm;
265 	u32 ram_base;
266 	u32 ram_size;
267 	struct brcmf_chip *ci;
268 	u32 coreid;
269 	struct brcmf_pcie_shared_info shared;
270 	wait_queue_head_t mbdata_resp_wait;
271 	bool mbdata_completed;
272 	bool irq_allocated;
273 	bool wowl_enabled;
274 	u8 dma_idx_sz;
275 	void *idxbuf;
276 	u32 idxbuf_sz;
277 	dma_addr_t idxbuf_dmahandle;
278 	u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
279 	void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
280 			  u16 value);
281 	struct brcmf_mp_device *settings;
282 };
283 
284 struct brcmf_pcie_ringbuf {
285 	struct brcmf_commonring commonring;
286 	dma_addr_t dma_handle;
287 	u32 w_idx_addr;
288 	u32 r_idx_addr;
289 	struct brcmf_pciedev_info *devinfo;
290 	u8 id;
291 };
292 
293 /**
294  * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
295  *
296  * @ringmem: dongle memory pointer to ring memory location
297  * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
298  * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
299  * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
300  * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
301  * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
302  * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
303  * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
304  * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
305  * @max_flowrings: maximum number of tx flow rings supported.
306  * @max_submissionrings: maximum number of submission rings(h2d) supported.
307  * @max_completionrings: maximum number of completion rings(d2h) supported.
308  */
309 struct brcmf_pcie_dhi_ringinfo {
310 	__le32			ringmem;
311 	__le32			h2d_w_idx_ptr;
312 	__le32			h2d_r_idx_ptr;
313 	__le32			d2h_w_idx_ptr;
314 	__le32			d2h_r_idx_ptr;
315 	struct msgbuf_buf_addr	h2d_w_idx_hostaddr;
316 	struct msgbuf_buf_addr	h2d_r_idx_hostaddr;
317 	struct msgbuf_buf_addr	d2h_w_idx_hostaddr;
318 	struct msgbuf_buf_addr	d2h_r_idx_hostaddr;
319 	__le16			max_flowrings;
320 	__le16			max_submissionrings;
321 	__le16			max_completionrings;
322 };
323 
324 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
325 	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
326 	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
327 	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
328 	BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
329 	BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
330 };
331 
332 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
333 	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
334 	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
335 	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
336 	BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
337 	BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
338 };
339 
340 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
341 	BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
342 	BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
343 	BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
344 	BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
345 	BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
346 };
347 
348 
349 static u32
350 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
351 {
352 	void __iomem *address = devinfo->regs + reg_offset;
353 
354 	return (ioread32(address));
355 }
356 
357 
358 static void
359 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
360 		       u32 value)
361 {
362 	void __iomem *address = devinfo->regs + reg_offset;
363 
364 	iowrite32(value, address);
365 }
366 
367 
368 static u8
369 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
370 {
371 	void __iomem *address = devinfo->tcm + mem_offset;
372 
373 	return (ioread8(address));
374 }
375 
376 
377 static u16
378 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
379 {
380 	void __iomem *address = devinfo->tcm + mem_offset;
381 
382 	return (ioread16(address));
383 }
384 
385 
386 static void
387 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
388 		       u16 value)
389 {
390 	void __iomem *address = devinfo->tcm + mem_offset;
391 
392 	iowrite16(value, address);
393 }
394 
395 
396 static u16
397 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
398 {
399 	u16 *address = devinfo->idxbuf + mem_offset;
400 
401 	return (*(address));
402 }
403 
404 
405 static void
406 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
407 		     u16 value)
408 {
409 	u16 *address = devinfo->idxbuf + mem_offset;
410 
411 	*(address) = value;
412 }
413 
414 
415 static u32
416 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
417 {
418 	void __iomem *address = devinfo->tcm + mem_offset;
419 
420 	return (ioread32(address));
421 }
422 
423 
424 static void
425 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
426 		       u32 value)
427 {
428 	void __iomem *address = devinfo->tcm + mem_offset;
429 
430 	iowrite32(value, address);
431 }
432 
433 
434 static u32
435 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
436 {
437 	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
438 
439 	return (ioread32(addr));
440 }
441 
442 
443 static void
444 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
445 		       u32 value)
446 {
447 	void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
448 
449 	iowrite32(value, addr);
450 }
451 
452 
453 static void
454 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
455 			  void *srcaddr, u32 len)
456 {
457 	void __iomem *address = devinfo->tcm + mem_offset;
458 	__le32 *src32;
459 	__le16 *src16;
460 	u8 *src8;
461 
462 	if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
463 		if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
464 			src8 = (u8 *)srcaddr;
465 			while (len) {
466 				iowrite8(*src8, address);
467 				address++;
468 				src8++;
469 				len--;
470 			}
471 		} else {
472 			len = len / 2;
473 			src16 = (__le16 *)srcaddr;
474 			while (len) {
475 				iowrite16(le16_to_cpu(*src16), address);
476 				address += 2;
477 				src16++;
478 				len--;
479 			}
480 		}
481 	} else {
482 		len = len / 4;
483 		src32 = (__le32 *)srcaddr;
484 		while (len) {
485 			iowrite32(le32_to_cpu(*src32), address);
486 			address += 4;
487 			src32++;
488 			len--;
489 		}
490 	}
491 }
492 
493 
494 static void
495 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
496 			  void *dstaddr, u32 len)
497 {
498 	void __iomem *address = devinfo->tcm + mem_offset;
499 	__le32 *dst32;
500 	__le16 *dst16;
501 	u8 *dst8;
502 
503 	if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
504 		if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
505 			dst8 = (u8 *)dstaddr;
506 			while (len) {
507 				*dst8 = ioread8(address);
508 				address++;
509 				dst8++;
510 				len--;
511 			}
512 		} else {
513 			len = len / 2;
514 			dst16 = (__le16 *)dstaddr;
515 			while (len) {
516 				*dst16 = cpu_to_le16(ioread16(address));
517 				address += 2;
518 				dst16++;
519 				len--;
520 			}
521 		}
522 	} else {
523 		len = len / 4;
524 		dst32 = (__le32 *)dstaddr;
525 		while (len) {
526 			*dst32 = cpu_to_le32(ioread32(address));
527 			address += 4;
528 			dst32++;
529 			len--;
530 		}
531 	}
532 }
533 
534 
535 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
536 		CHIPCREGOFFS(reg), value)
537 
538 
539 static void
540 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
541 {
542 	const struct pci_dev *pdev = devinfo->pdev;
543 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
544 	struct brcmf_core *core;
545 	u32 bar0_win;
546 
547 	core = brcmf_chip_get_core(devinfo->ci, coreid);
548 	if (core) {
549 		bar0_win = core->base;
550 		pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
551 		if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
552 					  &bar0_win) == 0) {
553 			if (bar0_win != core->base) {
554 				bar0_win = core->base;
555 				pci_write_config_dword(pdev,
556 						       BRCMF_PCIE_BAR0_WINDOW,
557 						       bar0_win);
558 			}
559 		}
560 	} else {
561 		brcmf_err(bus, "Unsupported core selected %x\n", coreid);
562 	}
563 }
564 
565 
566 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
567 {
568 	struct brcmf_core *core;
569 	u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
570 			     BRCMF_PCIE_CFGREG_PM_CSR,
571 			     BRCMF_PCIE_CFGREG_MSI_CAP,
572 			     BRCMF_PCIE_CFGREG_MSI_ADDR_L,
573 			     BRCMF_PCIE_CFGREG_MSI_ADDR_H,
574 			     BRCMF_PCIE_CFGREG_MSI_DATA,
575 			     BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
576 			     BRCMF_PCIE_CFGREG_RBAR_CTRL,
577 			     BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
578 			     BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
579 			     BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
580 	u32 i;
581 	u32 val;
582 	u32 lsc;
583 
584 	if (!devinfo->ci)
585 		return;
586 
587 	/* Disable ASPM */
588 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
589 	pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
590 			      &lsc);
591 	val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
592 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
593 			       val);
594 
595 	/* Watchdog reset */
596 	brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
597 	WRITECC32(devinfo, watchdog, 4);
598 	msleep(100);
599 
600 	/* Restore ASPM */
601 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
602 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
603 			       lsc);
604 
605 	core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
606 	if (core->rev <= 13) {
607 		for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
608 			brcmf_pcie_write_reg32(devinfo,
609 					       BRCMF_PCIE_PCIE2REG_CONFIGADDR,
610 					       cfg_offset[i]);
611 			val = brcmf_pcie_read_reg32(devinfo,
612 				BRCMF_PCIE_PCIE2REG_CONFIGDATA);
613 			brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
614 				  cfg_offset[i], val);
615 			brcmf_pcie_write_reg32(devinfo,
616 					       BRCMF_PCIE_PCIE2REG_CONFIGDATA,
617 					       val);
618 		}
619 	}
620 }
621 
622 
623 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
624 {
625 	u32 config;
626 
627 	/* BAR1 window may not be sized properly */
628 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
629 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
630 	config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
631 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
632 
633 	device_wakeup_enable(&devinfo->pdev->dev);
634 }
635 
636 
637 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
638 {
639 	if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
640 		brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
641 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
642 				       5);
643 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
644 				       0);
645 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
646 				       7);
647 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
648 				       0);
649 	}
650 	return 0;
651 }
652 
653 
654 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
655 					  u32 resetintr)
656 {
657 	struct brcmf_core *core;
658 
659 	if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
660 		core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
661 		brcmf_chip_resetcore(core, 0, 0, 0);
662 	}
663 
664 	if (!brcmf_chip_set_active(devinfo->ci, resetintr))
665 		return -EINVAL;
666 	return 0;
667 }
668 
669 
670 static int
671 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
672 {
673 	struct brcmf_pcie_shared_info *shared;
674 	u32 addr;
675 	u32 cur_htod_mb_data;
676 	u32 i;
677 
678 	shared = &devinfo->shared;
679 	addr = shared->htod_mb_data_addr;
680 	cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
681 
682 	if (cur_htod_mb_data != 0)
683 		brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
684 			  cur_htod_mb_data);
685 
686 	i = 0;
687 	while (cur_htod_mb_data != 0) {
688 		msleep(10);
689 		i++;
690 		if (i > 100)
691 			return -EIO;
692 		cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
693 	}
694 
695 	brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
696 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
697 	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
698 
699 	return 0;
700 }
701 
702 
703 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
704 {
705 	struct brcmf_pcie_shared_info *shared;
706 	u32 addr;
707 	u32 dtoh_mb_data;
708 
709 	shared = &devinfo->shared;
710 	addr = shared->dtoh_mb_data_addr;
711 	dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
712 
713 	if (!dtoh_mb_data)
714 		return;
715 
716 	brcmf_pcie_write_tcm32(devinfo, addr, 0);
717 
718 	brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
719 	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ)  {
720 		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
721 		brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
722 		brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
723 	}
724 	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
725 		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
726 	if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
727 		brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
728 		devinfo->mbdata_completed = true;
729 		wake_up(&devinfo->mbdata_resp_wait);
730 	}
731 	if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
732 		brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
733 		brcmf_dev_coredump(&devinfo->pdev->dev);
734 	}
735 }
736 
737 
738 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
739 {
740 	struct brcmf_pcie_shared_info *shared;
741 	struct brcmf_pcie_console *console;
742 	u32 addr;
743 
744 	shared = &devinfo->shared;
745 	console = &shared->console;
746 	addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
747 	console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
748 
749 	addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
750 	console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
751 	addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
752 	console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
753 
754 	brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
755 		  console->base_addr, console->buf_addr, console->bufsize);
756 }
757 
758 
759 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
760 {
761 	struct brcmf_pcie_console *console;
762 	u32 addr;
763 	u8 ch;
764 	u32 newidx;
765 
766 	if (!BRCMF_FWCON_ON())
767 		return;
768 
769 	console = &devinfo->shared.console;
770 	addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
771 	newidx = brcmf_pcie_read_tcm32(devinfo, addr);
772 	while (newidx != console->read_idx) {
773 		addr = console->buf_addr + console->read_idx;
774 		ch = brcmf_pcie_read_tcm8(devinfo, addr);
775 		console->read_idx++;
776 		if (console->read_idx == console->bufsize)
777 			console->read_idx = 0;
778 		if (ch == '\r')
779 			continue;
780 		console->log_str[console->log_idx] = ch;
781 		console->log_idx++;
782 		if ((ch != '\n') &&
783 		    (console->log_idx == (sizeof(console->log_str) - 2))) {
784 			ch = '\n';
785 			console->log_str[console->log_idx] = ch;
786 			console->log_idx++;
787 		}
788 		if (ch == '\n') {
789 			console->log_str[console->log_idx] = 0;
790 			pr_debug("CONSOLE: %s", console->log_str);
791 			console->log_idx = 0;
792 		}
793 	}
794 }
795 
796 
797 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
798 {
799 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
800 }
801 
802 
803 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
804 {
805 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
806 			       BRCMF_PCIE_MB_INT_D2H_DB |
807 			       BRCMF_PCIE_MB_INT_FN0_0 |
808 			       BRCMF_PCIE_MB_INT_FN0_1);
809 }
810 
811 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
812 {
813 	if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
814 		brcmf_pcie_write_reg32(devinfo,
815 				       BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
816 }
817 
818 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
819 {
820 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
821 
822 	if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
823 		brcmf_pcie_intr_disable(devinfo);
824 		brcmf_dbg(PCIE, "Enter\n");
825 		return IRQ_WAKE_THREAD;
826 	}
827 	return IRQ_NONE;
828 }
829 
830 
831 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
832 {
833 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
834 	u32 status;
835 
836 	devinfo->in_irq = true;
837 	status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
838 	brcmf_dbg(PCIE, "Enter %x\n", status);
839 	if (status) {
840 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
841 				       status);
842 		if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
843 			      BRCMF_PCIE_MB_INT_FN0_1))
844 			brcmf_pcie_handle_mb_data(devinfo);
845 		if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
846 			if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
847 				brcmf_proto_msgbuf_rx_trigger(
848 							&devinfo->pdev->dev);
849 		}
850 	}
851 	brcmf_pcie_bus_console_read(devinfo);
852 	if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
853 		brcmf_pcie_intr_enable(devinfo);
854 	devinfo->in_irq = false;
855 	return IRQ_HANDLED;
856 }
857 
858 
859 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
860 {
861 	struct pci_dev *pdev = devinfo->pdev;
862 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
863 
864 	brcmf_pcie_intr_disable(devinfo);
865 
866 	brcmf_dbg(PCIE, "Enter\n");
867 
868 	pci_enable_msi(pdev);
869 	if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
870 				 brcmf_pcie_isr_thread, IRQF_SHARED,
871 				 "brcmf_pcie_intr", devinfo)) {
872 		pci_disable_msi(pdev);
873 		brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
874 		return -EIO;
875 	}
876 	devinfo->irq_allocated = true;
877 	return 0;
878 }
879 
880 
881 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
882 {
883 	struct pci_dev *pdev = devinfo->pdev;
884 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
885 	u32 status;
886 	u32 count;
887 
888 	if (!devinfo->irq_allocated)
889 		return;
890 
891 	brcmf_pcie_intr_disable(devinfo);
892 	free_irq(pdev->irq, devinfo);
893 	pci_disable_msi(pdev);
894 
895 	msleep(50);
896 	count = 0;
897 	while ((devinfo->in_irq) && (count < 20)) {
898 		msleep(50);
899 		count++;
900 	}
901 	if (devinfo->in_irq)
902 		brcmf_err(bus, "Still in IRQ (processing) !!!\n");
903 
904 	status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
905 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
906 
907 	devinfo->irq_allocated = false;
908 }
909 
910 
911 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
912 {
913 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
914 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
915 	struct brcmf_commonring *commonring = &ring->commonring;
916 
917 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
918 		return -EIO;
919 
920 	brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
921 		  commonring->w_ptr, ring->id);
922 
923 	devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
924 
925 	return 0;
926 }
927 
928 
929 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
930 {
931 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
932 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
933 	struct brcmf_commonring *commonring = &ring->commonring;
934 
935 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
936 		return -EIO;
937 
938 	brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
939 		  commonring->r_ptr, ring->id);
940 
941 	devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
942 
943 	return 0;
944 }
945 
946 
947 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
948 {
949 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
950 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
951 
952 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
953 		return -EIO;
954 
955 	brcmf_dbg(PCIE, "RING !\n");
956 	/* Any arbitrary value will do, lets use 1 */
957 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
958 
959 	return 0;
960 }
961 
962 
963 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
964 {
965 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
966 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
967 	struct brcmf_commonring *commonring = &ring->commonring;
968 
969 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
970 		return -EIO;
971 
972 	commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
973 
974 	brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
975 		  commonring->w_ptr, ring->id);
976 
977 	return 0;
978 }
979 
980 
981 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
982 {
983 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
984 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
985 	struct brcmf_commonring *commonring = &ring->commonring;
986 
987 	if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
988 		return -EIO;
989 
990 	commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
991 
992 	brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
993 		  commonring->r_ptr, ring->id);
994 
995 	return 0;
996 }
997 
998 
999 static void *
1000 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
1001 				     u32 size, u32 tcm_dma_phys_addr,
1002 				     dma_addr_t *dma_handle)
1003 {
1004 	void *ring;
1005 	u64 address;
1006 
1007 	ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
1008 				  GFP_KERNEL);
1009 	if (!ring)
1010 		return NULL;
1011 
1012 	address = (u64)*dma_handle;
1013 	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
1014 			       address & 0xffffffff);
1015 	brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
1016 
1017 	memset(ring, 0, size);
1018 
1019 	return (ring);
1020 }
1021 
1022 
1023 static struct brcmf_pcie_ringbuf *
1024 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1025 			      u32 tcm_ring_phys_addr)
1026 {
1027 	void *dma_buf;
1028 	dma_addr_t dma_handle;
1029 	struct brcmf_pcie_ringbuf *ring;
1030 	u32 size;
1031 	u32 addr;
1032 	const u32 *ring_itemsize_array;
1033 
1034 	if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1035 		ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1036 	else
1037 		ring_itemsize_array = brcmf_ring_itemsize;
1038 
1039 	size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1040 	dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1041 			tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1042 			&dma_handle);
1043 	if (!dma_buf)
1044 		return NULL;
1045 
1046 	addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1047 	brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1048 	addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1049 	brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1050 
1051 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1052 	if (!ring) {
1053 		dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1054 				  dma_handle);
1055 		return NULL;
1056 	}
1057 	brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1058 				ring_itemsize_array[ring_id], dma_buf);
1059 	ring->dma_handle = dma_handle;
1060 	ring->devinfo = devinfo;
1061 	brcmf_commonring_register_cb(&ring->commonring,
1062 				     brcmf_pcie_ring_mb_ring_bell,
1063 				     brcmf_pcie_ring_mb_update_rptr,
1064 				     brcmf_pcie_ring_mb_update_wptr,
1065 				     brcmf_pcie_ring_mb_write_rptr,
1066 				     brcmf_pcie_ring_mb_write_wptr, ring);
1067 
1068 	return (ring);
1069 }
1070 
1071 
1072 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1073 					  struct brcmf_pcie_ringbuf *ring)
1074 {
1075 	void *dma_buf;
1076 	u32 size;
1077 
1078 	if (!ring)
1079 		return;
1080 
1081 	dma_buf = ring->commonring.buf_addr;
1082 	if (dma_buf) {
1083 		size = ring->commonring.depth * ring->commonring.item_len;
1084 		dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1085 	}
1086 	kfree(ring);
1087 }
1088 
1089 
1090 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1091 {
1092 	u32 i;
1093 
1094 	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1095 		brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1096 					      devinfo->shared.commonrings[i]);
1097 		devinfo->shared.commonrings[i] = NULL;
1098 	}
1099 	kfree(devinfo->shared.flowrings);
1100 	devinfo->shared.flowrings = NULL;
1101 	if (devinfo->idxbuf) {
1102 		dma_free_coherent(&devinfo->pdev->dev,
1103 				  devinfo->idxbuf_sz,
1104 				  devinfo->idxbuf,
1105 				  devinfo->idxbuf_dmahandle);
1106 		devinfo->idxbuf = NULL;
1107 	}
1108 }
1109 
1110 
1111 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1112 {
1113 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1114 	struct brcmf_pcie_ringbuf *ring;
1115 	struct brcmf_pcie_ringbuf *rings;
1116 	u32 d2h_w_idx_ptr;
1117 	u32 d2h_r_idx_ptr;
1118 	u32 h2d_w_idx_ptr;
1119 	u32 h2d_r_idx_ptr;
1120 	u32 ring_mem_ptr;
1121 	u32 i;
1122 	u64 address;
1123 	u32 bufsz;
1124 	u8 idx_offset;
1125 	struct brcmf_pcie_dhi_ringinfo ringinfo;
1126 	u16 max_flowrings;
1127 	u16 max_submissionrings;
1128 	u16 max_completionrings;
1129 
1130 	memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1131 		      sizeof(ringinfo));
1132 	if (devinfo->shared.version >= 6) {
1133 		max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1134 		max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1135 		max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1136 	} else {
1137 		max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1138 		max_flowrings = max_submissionrings -
1139 				BRCMF_NROF_H2D_COMMON_MSGRINGS;
1140 		max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1141 	}
1142 
1143 	if (devinfo->dma_idx_sz != 0) {
1144 		bufsz = (max_submissionrings + max_completionrings) *
1145 			devinfo->dma_idx_sz * 2;
1146 		devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1147 						     &devinfo->idxbuf_dmahandle,
1148 						     GFP_KERNEL);
1149 		if (!devinfo->idxbuf)
1150 			devinfo->dma_idx_sz = 0;
1151 	}
1152 
1153 	if (devinfo->dma_idx_sz == 0) {
1154 		d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1155 		d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1156 		h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1157 		h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1158 		idx_offset = sizeof(u32);
1159 		devinfo->write_ptr = brcmf_pcie_write_tcm16;
1160 		devinfo->read_ptr = brcmf_pcie_read_tcm16;
1161 		brcmf_dbg(PCIE, "Using TCM indices\n");
1162 	} else {
1163 		memset(devinfo->idxbuf, 0, bufsz);
1164 		devinfo->idxbuf_sz = bufsz;
1165 		idx_offset = devinfo->dma_idx_sz;
1166 		devinfo->write_ptr = brcmf_pcie_write_idx;
1167 		devinfo->read_ptr = brcmf_pcie_read_idx;
1168 
1169 		h2d_w_idx_ptr = 0;
1170 		address = (u64)devinfo->idxbuf_dmahandle;
1171 		ringinfo.h2d_w_idx_hostaddr.low_addr =
1172 			cpu_to_le32(address & 0xffffffff);
1173 		ringinfo.h2d_w_idx_hostaddr.high_addr =
1174 			cpu_to_le32(address >> 32);
1175 
1176 		h2d_r_idx_ptr = h2d_w_idx_ptr +
1177 				max_submissionrings * idx_offset;
1178 		address += max_submissionrings * idx_offset;
1179 		ringinfo.h2d_r_idx_hostaddr.low_addr =
1180 			cpu_to_le32(address & 0xffffffff);
1181 		ringinfo.h2d_r_idx_hostaddr.high_addr =
1182 			cpu_to_le32(address >> 32);
1183 
1184 		d2h_w_idx_ptr = h2d_r_idx_ptr +
1185 				max_submissionrings * idx_offset;
1186 		address += max_submissionrings * idx_offset;
1187 		ringinfo.d2h_w_idx_hostaddr.low_addr =
1188 			cpu_to_le32(address & 0xffffffff);
1189 		ringinfo.d2h_w_idx_hostaddr.high_addr =
1190 			cpu_to_le32(address >> 32);
1191 
1192 		d2h_r_idx_ptr = d2h_w_idx_ptr +
1193 				max_completionrings * idx_offset;
1194 		address += max_completionrings * idx_offset;
1195 		ringinfo.d2h_r_idx_hostaddr.low_addr =
1196 			cpu_to_le32(address & 0xffffffff);
1197 		ringinfo.d2h_r_idx_hostaddr.high_addr =
1198 			cpu_to_le32(address >> 32);
1199 
1200 		memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1201 			    &ringinfo, sizeof(ringinfo));
1202 		brcmf_dbg(PCIE, "Using host memory indices\n");
1203 	}
1204 
1205 	ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1206 
1207 	for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1208 		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1209 		if (!ring)
1210 			goto fail;
1211 		ring->w_idx_addr = h2d_w_idx_ptr;
1212 		ring->r_idx_addr = h2d_r_idx_ptr;
1213 		ring->id = i;
1214 		devinfo->shared.commonrings[i] = ring;
1215 
1216 		h2d_w_idx_ptr += idx_offset;
1217 		h2d_r_idx_ptr += idx_offset;
1218 		ring_mem_ptr += BRCMF_RING_MEM_SZ;
1219 	}
1220 
1221 	for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1222 	     i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1223 		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1224 		if (!ring)
1225 			goto fail;
1226 		ring->w_idx_addr = d2h_w_idx_ptr;
1227 		ring->r_idx_addr = d2h_r_idx_ptr;
1228 		ring->id = i;
1229 		devinfo->shared.commonrings[i] = ring;
1230 
1231 		d2h_w_idx_ptr += idx_offset;
1232 		d2h_r_idx_ptr += idx_offset;
1233 		ring_mem_ptr += BRCMF_RING_MEM_SZ;
1234 	}
1235 
1236 	devinfo->shared.max_flowrings = max_flowrings;
1237 	devinfo->shared.max_submissionrings = max_submissionrings;
1238 	devinfo->shared.max_completionrings = max_completionrings;
1239 	rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1240 	if (!rings)
1241 		goto fail;
1242 
1243 	brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1244 
1245 	for (i = 0; i < max_flowrings; i++) {
1246 		ring = &rings[i];
1247 		ring->devinfo = devinfo;
1248 		ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1249 		brcmf_commonring_register_cb(&ring->commonring,
1250 					     brcmf_pcie_ring_mb_ring_bell,
1251 					     brcmf_pcie_ring_mb_update_rptr,
1252 					     brcmf_pcie_ring_mb_update_wptr,
1253 					     brcmf_pcie_ring_mb_write_rptr,
1254 					     brcmf_pcie_ring_mb_write_wptr,
1255 					     ring);
1256 		ring->w_idx_addr = h2d_w_idx_ptr;
1257 		ring->r_idx_addr = h2d_r_idx_ptr;
1258 		h2d_w_idx_ptr += idx_offset;
1259 		h2d_r_idx_ptr += idx_offset;
1260 	}
1261 	devinfo->shared.flowrings = rings;
1262 
1263 	return 0;
1264 
1265 fail:
1266 	brcmf_err(bus, "Allocating ring buffers failed\n");
1267 	brcmf_pcie_release_ringbuffers(devinfo);
1268 	return -ENOMEM;
1269 }
1270 
1271 
1272 static void
1273 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1274 {
1275 	if (devinfo->shared.scratch)
1276 		dma_free_coherent(&devinfo->pdev->dev,
1277 				  BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1278 				  devinfo->shared.scratch,
1279 				  devinfo->shared.scratch_dmahandle);
1280 	if (devinfo->shared.ringupd)
1281 		dma_free_coherent(&devinfo->pdev->dev,
1282 				  BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1283 				  devinfo->shared.ringupd,
1284 				  devinfo->shared.ringupd_dmahandle);
1285 }
1286 
1287 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1288 {
1289 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1290 	u64 address;
1291 	u32 addr;
1292 
1293 	devinfo->shared.scratch =
1294 		dma_alloc_coherent(&devinfo->pdev->dev,
1295 				   BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1296 				   &devinfo->shared.scratch_dmahandle,
1297 				   GFP_KERNEL);
1298 	if (!devinfo->shared.scratch)
1299 		goto fail;
1300 
1301 	addr = devinfo->shared.tcm_base_address +
1302 	       BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1303 	address = (u64)devinfo->shared.scratch_dmahandle;
1304 	brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1305 	brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1306 	addr = devinfo->shared.tcm_base_address +
1307 	       BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1308 	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1309 
1310 	devinfo->shared.ringupd =
1311 		dma_alloc_coherent(&devinfo->pdev->dev,
1312 				   BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1313 				   &devinfo->shared.ringupd_dmahandle,
1314 				   GFP_KERNEL);
1315 	if (!devinfo->shared.ringupd)
1316 		goto fail;
1317 
1318 	addr = devinfo->shared.tcm_base_address +
1319 	       BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1320 	address = (u64)devinfo->shared.ringupd_dmahandle;
1321 	brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1322 	brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1323 	addr = devinfo->shared.tcm_base_address +
1324 	       BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1325 	brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1326 	return 0;
1327 
1328 fail:
1329 	brcmf_err(bus, "Allocating scratch buffers failed\n");
1330 	brcmf_pcie_release_scratchbuffers(devinfo);
1331 	return -ENOMEM;
1332 }
1333 
1334 
1335 static void brcmf_pcie_down(struct device *dev)
1336 {
1337 }
1338 
1339 
1340 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1341 {
1342 	return 0;
1343 }
1344 
1345 
1346 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1347 				uint len)
1348 {
1349 	return 0;
1350 }
1351 
1352 
1353 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1354 				uint len)
1355 {
1356 	return 0;
1357 }
1358 
1359 
1360 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1361 {
1362 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1363 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1364 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1365 
1366 	brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1367 	devinfo->wowl_enabled = enabled;
1368 }
1369 
1370 
1371 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1372 {
1373 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1374 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1375 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1376 
1377 	return devinfo->ci->ramsize - devinfo->ci->srsize;
1378 }
1379 
1380 
1381 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1382 {
1383 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1384 	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1385 	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1386 
1387 	brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1388 	brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1389 	return 0;
1390 }
1391 
1392 static
1393 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1394 {
1395 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1396 	struct brcmf_fw_request *fwreq;
1397 	struct brcmf_fw_name fwnames[] = {
1398 		{ ext, fw_name },
1399 	};
1400 
1401 	fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1402 				       brcmf_pcie_fwnames,
1403 				       ARRAY_SIZE(brcmf_pcie_fwnames),
1404 				       fwnames, ARRAY_SIZE(fwnames));
1405 	if (!fwreq)
1406 		return -ENOMEM;
1407 
1408 	kfree(fwreq);
1409 	return 0;
1410 }
1411 
1412 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1413 	.txdata = brcmf_pcie_tx,
1414 	.stop = brcmf_pcie_down,
1415 	.txctl = brcmf_pcie_tx_ctlpkt,
1416 	.rxctl = brcmf_pcie_rx_ctlpkt,
1417 	.wowl_config = brcmf_pcie_wowl_config,
1418 	.get_ramsize = brcmf_pcie_get_ramsize,
1419 	.get_memdump = brcmf_pcie_get_memdump,
1420 	.get_fwname = brcmf_pcie_get_fwname,
1421 };
1422 
1423 
1424 static void
1425 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1426 			  u32 data_len)
1427 {
1428 	__le32 *field;
1429 	u32 newsize;
1430 
1431 	if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1432 		return;
1433 
1434 	field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1435 	if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1436 		return;
1437 	field++;
1438 	newsize = le32_to_cpup(field);
1439 
1440 	brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1441 		  newsize);
1442 	devinfo->ci->ramsize = newsize;
1443 }
1444 
1445 
1446 static int
1447 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1448 			       u32 sharedram_addr)
1449 {
1450 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1451 	struct brcmf_pcie_shared_info *shared;
1452 	u32 addr;
1453 
1454 	shared = &devinfo->shared;
1455 	shared->tcm_base_address = sharedram_addr;
1456 
1457 	shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1458 	shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1459 	brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1460 	if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1461 	    (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1462 		brcmf_err(bus, "Unsupported PCIE version %d\n",
1463 			  shared->version);
1464 		return -EINVAL;
1465 	}
1466 
1467 	/* check firmware support dma indicies */
1468 	if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1469 		if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1470 			devinfo->dma_idx_sz = sizeof(u16);
1471 		else
1472 			devinfo->dma_idx_sz = sizeof(u32);
1473 	}
1474 
1475 	addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1476 	shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1477 	if (shared->max_rxbufpost == 0)
1478 		shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1479 
1480 	addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1481 	shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1482 
1483 	addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1484 	shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1485 
1486 	addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1487 	shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1488 
1489 	addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1490 	shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1491 
1492 	brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1493 		  shared->max_rxbufpost, shared->rx_dataoffset);
1494 
1495 	brcmf_pcie_bus_console_init(devinfo);
1496 
1497 	return 0;
1498 }
1499 
1500 
1501 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1502 					const struct firmware *fw, void *nvram,
1503 					u32 nvram_len)
1504 {
1505 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1506 	u32 sharedram_addr;
1507 	u32 sharedram_addr_written;
1508 	u32 loop_counter;
1509 	int err;
1510 	u32 address;
1511 	u32 resetintr;
1512 
1513 	brcmf_dbg(PCIE, "Halt ARM.\n");
1514 	err = brcmf_pcie_enter_download_state(devinfo);
1515 	if (err)
1516 		return err;
1517 
1518 	brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1519 	brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1520 				  (void *)fw->data, fw->size);
1521 
1522 	resetintr = get_unaligned_le32(fw->data);
1523 	release_firmware(fw);
1524 
1525 	/* reset last 4 bytes of RAM address. to be used for shared
1526 	 * area. This identifies when FW is running
1527 	 */
1528 	brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1529 
1530 	if (nvram) {
1531 		brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1532 		address = devinfo->ci->rambase + devinfo->ci->ramsize -
1533 			  nvram_len;
1534 		brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1535 		brcmf_fw_nvram_free(nvram);
1536 	} else {
1537 		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1538 			  devinfo->nvram_name);
1539 	}
1540 
1541 	sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1542 						       devinfo->ci->ramsize -
1543 						       4);
1544 	brcmf_dbg(PCIE, "Bring ARM in running state\n");
1545 	err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1546 	if (err)
1547 		return err;
1548 
1549 	brcmf_dbg(PCIE, "Wait for FW init\n");
1550 	sharedram_addr = sharedram_addr_written;
1551 	loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1552 	while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1553 		msleep(50);
1554 		sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1555 						       devinfo->ci->ramsize -
1556 						       4);
1557 		loop_counter--;
1558 	}
1559 	if (sharedram_addr == sharedram_addr_written) {
1560 		brcmf_err(bus, "FW failed to initialize\n");
1561 		return -ENODEV;
1562 	}
1563 	if (sharedram_addr < devinfo->ci->rambase ||
1564 	    sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1565 		brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1566 			  sharedram_addr);
1567 		return -ENODEV;
1568 	}
1569 	brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1570 
1571 	return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1572 }
1573 
1574 
1575 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1576 {
1577 	struct pci_dev *pdev = devinfo->pdev;
1578 	struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1579 	int err;
1580 	phys_addr_t  bar0_addr, bar1_addr;
1581 	ulong bar1_size;
1582 
1583 	err = pci_enable_device(pdev);
1584 	if (err) {
1585 		brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1586 		return err;
1587 	}
1588 
1589 	pci_set_master(pdev);
1590 
1591 	/* Bar-0 mapped address */
1592 	bar0_addr = pci_resource_start(pdev, 0);
1593 	/* Bar-1 mapped address */
1594 	bar1_addr = pci_resource_start(pdev, 2);
1595 	/* read Bar-1 mapped memory range */
1596 	bar1_size = pci_resource_len(pdev, 2);
1597 	if ((bar1_size == 0) || (bar1_addr == 0)) {
1598 		brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1599 			  bar1_size, (unsigned long long)bar1_addr);
1600 		return -EINVAL;
1601 	}
1602 
1603 	devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1604 	devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
1605 
1606 	if (!devinfo->regs || !devinfo->tcm) {
1607 		brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1608 			  devinfo->tcm);
1609 		return -EINVAL;
1610 	}
1611 	brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1612 		  devinfo->regs, (unsigned long long)bar0_addr);
1613 	brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1614 		  devinfo->tcm, (unsigned long long)bar1_addr,
1615 		  (unsigned int)bar1_size);
1616 
1617 	return 0;
1618 }
1619 
1620 
1621 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1622 {
1623 	if (devinfo->tcm)
1624 		iounmap(devinfo->tcm);
1625 	if (devinfo->regs)
1626 		iounmap(devinfo->regs);
1627 
1628 	pci_disable_device(devinfo->pdev);
1629 }
1630 
1631 
1632 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1633 {
1634 	u32 ret_addr;
1635 
1636 	ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1637 	addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1638 	pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1639 
1640 	return ret_addr;
1641 }
1642 
1643 
1644 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1645 {
1646 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1647 
1648 	addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1649 	return brcmf_pcie_read_reg32(devinfo, addr);
1650 }
1651 
1652 
1653 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1654 {
1655 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1656 
1657 	addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1658 	brcmf_pcie_write_reg32(devinfo, addr, value);
1659 }
1660 
1661 
1662 static int brcmf_pcie_buscoreprep(void *ctx)
1663 {
1664 	return brcmf_pcie_get_resource(ctx);
1665 }
1666 
1667 
1668 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1669 {
1670 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1671 	u32 val;
1672 
1673 	devinfo->ci = chip;
1674 	brcmf_pcie_reset_device(devinfo);
1675 
1676 	val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1677 	if (val != 0xffffffff)
1678 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1679 				       val);
1680 
1681 	return 0;
1682 }
1683 
1684 
1685 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1686 					u32 rstvec)
1687 {
1688 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1689 
1690 	brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1691 }
1692 
1693 
1694 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1695 	.prepare = brcmf_pcie_buscoreprep,
1696 	.reset = brcmf_pcie_buscore_reset,
1697 	.activate = brcmf_pcie_buscore_activate,
1698 	.read32 = brcmf_pcie_buscore_read32,
1699 	.write32 = brcmf_pcie_buscore_write32,
1700 };
1701 
1702 #define BRCMF_PCIE_FW_CODE	0
1703 #define BRCMF_PCIE_FW_NVRAM	1
1704 
1705 static void brcmf_pcie_setup(struct device *dev, int ret,
1706 			     struct brcmf_fw_request *fwreq)
1707 {
1708 	const struct firmware *fw;
1709 	void *nvram;
1710 	struct brcmf_bus *bus;
1711 	struct brcmf_pciedev *pcie_bus_dev;
1712 	struct brcmf_pciedev_info *devinfo;
1713 	struct brcmf_commonring **flowrings;
1714 	u32 i, nvram_len;
1715 
1716 	/* check firmware loading result */
1717 	if (ret)
1718 		goto fail;
1719 
1720 	bus = dev_get_drvdata(dev);
1721 	pcie_bus_dev = bus->bus_priv.pcie;
1722 	devinfo = pcie_bus_dev->devinfo;
1723 	brcmf_pcie_attach(devinfo);
1724 
1725 	fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1726 	nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1727 	nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1728 	kfree(fwreq);
1729 
1730 	/* Some of the firmwares have the size of the memory of the device
1731 	 * defined inside the firmware. This is because part of the memory in
1732 	 * the device is shared and the devision is determined by FW. Parse
1733 	 * the firmware and adjust the chip memory size now.
1734 	 */
1735 	brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1736 
1737 	ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1738 	if (ret)
1739 		goto fail;
1740 
1741 	devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1742 
1743 	ret = brcmf_pcie_init_ringbuffers(devinfo);
1744 	if (ret)
1745 		goto fail;
1746 
1747 	ret = brcmf_pcie_init_scratchbuffers(devinfo);
1748 	if (ret)
1749 		goto fail;
1750 
1751 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1752 	ret = brcmf_pcie_request_irq(devinfo);
1753 	if (ret)
1754 		goto fail;
1755 
1756 	/* hook the commonrings in the bus structure. */
1757 	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1758 		bus->msgbuf->commonrings[i] =
1759 				&devinfo->shared.commonrings[i]->commonring;
1760 
1761 	flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1762 			    GFP_KERNEL);
1763 	if (!flowrings)
1764 		goto fail;
1765 
1766 	for (i = 0; i < devinfo->shared.max_flowrings; i++)
1767 		flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1768 	bus->msgbuf->flowrings = flowrings;
1769 
1770 	bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1771 	bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1772 	bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1773 
1774 	init_waitqueue_head(&devinfo->mbdata_resp_wait);
1775 
1776 	brcmf_pcie_intr_enable(devinfo);
1777 	brcmf_pcie_hostready(devinfo);
1778 	if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
1779 		return;
1780 
1781 	brcmf_pcie_bus_console_read(devinfo);
1782 
1783 fail:
1784 	device_release_driver(dev);
1785 }
1786 
1787 static struct brcmf_fw_request *
1788 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1789 {
1790 	struct brcmf_fw_request *fwreq;
1791 	struct brcmf_fw_name fwnames[] = {
1792 		{ ".bin", devinfo->fw_name },
1793 		{ ".txt", devinfo->nvram_name },
1794 	};
1795 
1796 	fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1797 				       brcmf_pcie_fwnames,
1798 				       ARRAY_SIZE(brcmf_pcie_fwnames),
1799 				       fwnames, ARRAY_SIZE(fwnames));
1800 	if (!fwreq)
1801 		return NULL;
1802 
1803 	fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1804 	fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1805 	fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1806 	fwreq->board_type = devinfo->settings->board_type;
1807 	/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1808 	fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1809 	fwreq->bus_nr = devinfo->pdev->bus->number;
1810 
1811 	return fwreq;
1812 }
1813 
1814 static int
1815 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1816 {
1817 	int ret;
1818 	struct brcmf_fw_request *fwreq;
1819 	struct brcmf_pciedev_info *devinfo;
1820 	struct brcmf_pciedev *pcie_bus_dev;
1821 	struct brcmf_bus *bus;
1822 
1823 	brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1824 
1825 	ret = -ENOMEM;
1826 	devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1827 	if (devinfo == NULL)
1828 		return ret;
1829 
1830 	devinfo->pdev = pdev;
1831 	pcie_bus_dev = NULL;
1832 	devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1833 	if (IS_ERR(devinfo->ci)) {
1834 		ret = PTR_ERR(devinfo->ci);
1835 		devinfo->ci = NULL;
1836 		goto fail;
1837 	}
1838 
1839 	pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1840 	if (pcie_bus_dev == NULL) {
1841 		ret = -ENOMEM;
1842 		goto fail;
1843 	}
1844 
1845 	devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1846 						   BRCMF_BUSTYPE_PCIE,
1847 						   devinfo->ci->chip,
1848 						   devinfo->ci->chiprev);
1849 	if (!devinfo->settings) {
1850 		ret = -ENOMEM;
1851 		goto fail;
1852 	}
1853 
1854 	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1855 	if (!bus) {
1856 		ret = -ENOMEM;
1857 		goto fail;
1858 	}
1859 	bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1860 	if (!bus->msgbuf) {
1861 		ret = -ENOMEM;
1862 		kfree(bus);
1863 		goto fail;
1864 	}
1865 
1866 	/* hook it all together. */
1867 	pcie_bus_dev->devinfo = devinfo;
1868 	pcie_bus_dev->bus = bus;
1869 	bus->dev = &pdev->dev;
1870 	bus->bus_priv.pcie = pcie_bus_dev;
1871 	bus->ops = &brcmf_pcie_bus_ops;
1872 	bus->proto_type = BRCMF_PROTO_MSGBUF;
1873 	bus->chip = devinfo->coreid;
1874 	bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1875 	dev_set_drvdata(&pdev->dev, bus);
1876 
1877 	fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1878 	if (!fwreq) {
1879 		ret = -ENOMEM;
1880 		goto fail_bus;
1881 	}
1882 
1883 	ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1884 	if (ret < 0) {
1885 		kfree(fwreq);
1886 		goto fail_bus;
1887 	}
1888 	return 0;
1889 
1890 fail_bus:
1891 	kfree(bus->msgbuf);
1892 	kfree(bus);
1893 fail:
1894 	brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1895 	brcmf_pcie_release_resource(devinfo);
1896 	if (devinfo->ci)
1897 		brcmf_chip_detach(devinfo->ci);
1898 	if (devinfo->settings)
1899 		brcmf_release_module_param(devinfo->settings);
1900 	kfree(pcie_bus_dev);
1901 	kfree(devinfo);
1902 	return ret;
1903 }
1904 
1905 
1906 static void
1907 brcmf_pcie_remove(struct pci_dev *pdev)
1908 {
1909 	struct brcmf_pciedev_info *devinfo;
1910 	struct brcmf_bus *bus;
1911 
1912 	brcmf_dbg(PCIE, "Enter\n");
1913 
1914 	bus = dev_get_drvdata(&pdev->dev);
1915 	if (bus == NULL)
1916 		return;
1917 
1918 	devinfo = bus->bus_priv.pcie->devinfo;
1919 
1920 	devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1921 	if (devinfo->ci)
1922 		brcmf_pcie_intr_disable(devinfo);
1923 
1924 	brcmf_detach(&pdev->dev);
1925 
1926 	kfree(bus->bus_priv.pcie);
1927 	kfree(bus->msgbuf->flowrings);
1928 	kfree(bus->msgbuf);
1929 	kfree(bus);
1930 
1931 	brcmf_pcie_release_irq(devinfo);
1932 	brcmf_pcie_release_scratchbuffers(devinfo);
1933 	brcmf_pcie_release_ringbuffers(devinfo);
1934 	brcmf_pcie_reset_device(devinfo);
1935 	brcmf_pcie_release_resource(devinfo);
1936 
1937 	if (devinfo->ci)
1938 		brcmf_chip_detach(devinfo->ci);
1939 	if (devinfo->settings)
1940 		brcmf_release_module_param(devinfo->settings);
1941 
1942 	kfree(devinfo);
1943 	dev_set_drvdata(&pdev->dev, NULL);
1944 }
1945 
1946 
1947 #ifdef CONFIG_PM
1948 
1949 
1950 static int brcmf_pcie_pm_enter_D3(struct device *dev)
1951 {
1952 	struct brcmf_pciedev_info *devinfo;
1953 	struct brcmf_bus *bus;
1954 
1955 	brcmf_dbg(PCIE, "Enter\n");
1956 
1957 	bus = dev_get_drvdata(dev);
1958 	devinfo = bus->bus_priv.pcie->devinfo;
1959 
1960 	brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
1961 
1962 	devinfo->mbdata_completed = false;
1963 	brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
1964 
1965 	wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
1966 			   BRCMF_PCIE_MBDATA_TIMEOUT);
1967 	if (!devinfo->mbdata_completed) {
1968 		brcmf_err(bus, "Timeout on response for entering D3 substate\n");
1969 		brcmf_bus_change_state(bus, BRCMF_BUS_UP);
1970 		return -EIO;
1971 	}
1972 
1973 	devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1974 
1975 	return 0;
1976 }
1977 
1978 
1979 static int brcmf_pcie_pm_leave_D3(struct device *dev)
1980 {
1981 	struct brcmf_pciedev_info *devinfo;
1982 	struct brcmf_bus *bus;
1983 	struct pci_dev *pdev;
1984 	int err;
1985 
1986 	brcmf_dbg(PCIE, "Enter\n");
1987 
1988 	bus = dev_get_drvdata(dev);
1989 	devinfo = bus->bus_priv.pcie->devinfo;
1990 	brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
1991 
1992 	/* Check if device is still up and running, if so we are ready */
1993 	if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
1994 		brcmf_dbg(PCIE, "Try to wakeup device....\n");
1995 		if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
1996 			goto cleanup;
1997 		brcmf_dbg(PCIE, "Hot resume, continue....\n");
1998 		devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1999 		brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2000 		brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2001 		brcmf_pcie_intr_enable(devinfo);
2002 		brcmf_pcie_hostready(devinfo);
2003 		return 0;
2004 	}
2005 
2006 cleanup:
2007 	brcmf_chip_detach(devinfo->ci);
2008 	devinfo->ci = NULL;
2009 	pdev = devinfo->pdev;
2010 	brcmf_pcie_remove(pdev);
2011 
2012 	err = brcmf_pcie_probe(pdev, NULL);
2013 	if (err)
2014 		brcmf_err(bus, "probe after resume failed, err=%d\n", err);
2015 
2016 	return err;
2017 }
2018 
2019 
2020 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2021 	.suspend = brcmf_pcie_pm_enter_D3,
2022 	.resume = brcmf_pcie_pm_leave_D3,
2023 	.freeze = brcmf_pcie_pm_enter_D3,
2024 	.restore = brcmf_pcie_pm_leave_D3,
2025 };
2026 
2027 
2028 #endif /* CONFIG_PM */
2029 
2030 
2031 #define BRCMF_PCIE_DEVICE(dev_id)	{ BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2032 	PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2033 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev)	{ \
2034 	BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2035 	subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2036 
2037 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2038 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2039 	BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2040 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2041 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2042 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2043 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2044 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2045 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2046 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2047 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2048 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2049 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2050 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2051 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2052 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2053 	BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2054 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2055 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2056 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2057 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2058 	{ /* end: all zeroes */ }
2059 };
2060 
2061 
2062 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2063 
2064 
2065 static struct pci_driver brcmf_pciedrvr = {
2066 	.node = {},
2067 	.name = KBUILD_MODNAME,
2068 	.id_table = brcmf_pcie_devid_table,
2069 	.probe = brcmf_pcie_probe,
2070 	.remove = brcmf_pcie_remove,
2071 #ifdef CONFIG_PM
2072 	.driver.pm = &brcmf_pciedrvr_pm,
2073 #endif
2074 	.driver.coredump = brcmf_dev_coredump,
2075 };
2076 
2077 
2078 void brcmf_pcie_register(void)
2079 {
2080 	int err;
2081 
2082 	brcmf_dbg(PCIE, "Enter\n");
2083 	err = pci_register_driver(&brcmf_pciedrvr);
2084 	if (err)
2085 		brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
2086 			  err);
2087 }
2088 
2089 
2090 void brcmf_pcie_exit(void)
2091 {
2092 	brcmf_dbg(PCIE, "Enter\n");
2093 	pci_unregister_driver(&brcmf_pciedrvr);
2094 }
2095