xref: /linux/drivers/bluetooth/hci_bcm4377.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe
4  *
5  * Copyright (C) The Asahi Linux Contributors
6  */
7 
8 #include <linux/async.h>
9 #include <linux/bitfield.h>
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmi.h>
13 #include <linux/firmware.h>
14 #include <linux/module.h>
15 #include <linux/msi.h>
16 #include <linux/of.h>
17 #include <linux/pci.h>
18 #include <linux/printk.h>
19 
20 #include <asm/unaligned.h>
21 
22 #include <net/bluetooth/bluetooth.h>
23 #include <net/bluetooth/hci_core.h>
24 
25 enum bcm4377_chip {
26 	BCM4377 = 0,
27 	BCM4378,
28 	BCM4387,
29 };
30 
31 #define BCM4377_DEVICE_ID 0x5fa0
32 #define BCM4378_DEVICE_ID 0x5f69
33 #define BCM4387_DEVICE_ID 0x5f71
34 
35 #define BCM4377_TIMEOUT 1000
36 
37 /*
38  * These devices only support DMA transactions inside a 32bit window
39  * (possibly to avoid 64 bit arithmetic). The window size cannot exceed
40  * 0xffffffff but is always aligned down to the previous 0x200 byte boundary
41  * which effectively limits the window to [start, start+0xfffffe00].
42  * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't
43  * run into this limitation.
44  */
45 #define BCM4377_DMA_MASK 0xfffffe00
46 
47 #define BCM4377_PCIECFG_BAR0_WINDOW1	   0x80
48 #define BCM4377_PCIECFG_BAR0_WINDOW2	   0x70
49 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1 0x74
50 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW2 0x78
51 #define BCM4377_PCIECFG_BAR2_WINDOW	   0x84
52 
53 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT 0x18011000
54 #define BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT	   0x19000000
55 
56 #define BCM4377_PCIECFG_SUBSYSTEM_CTRL 0x88
57 
58 #define BCM4377_BAR0_FW_DOORBELL 0x140
59 #define BCM4377_BAR0_RTI_CONTROL 0x144
60 
61 #define BCM4377_BAR0_SLEEP_CONTROL	      0x150
62 #define BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE  0
63 #define BCM4377_BAR0_SLEEP_CONTROL_AWAKE      2
64 #define BCM4377_BAR0_SLEEP_CONTROL_QUIESCE    3
65 
66 #define BCM4377_BAR0_DOORBELL	    0x174
67 #define BCM4377_BAR0_DOORBELL_VALUE GENMASK(31, 16)
68 #define BCM4377_BAR0_DOORBELL_IDX   GENMASK(15, 8)
69 #define BCM4377_BAR0_DOORBELL_RING  BIT(5)
70 
71 #define BCM4377_BAR0_HOST_WINDOW_LO   0x590
72 #define BCM4377_BAR0_HOST_WINDOW_HI   0x594
73 #define BCM4377_BAR0_HOST_WINDOW_SIZE 0x598
74 
75 #define BCM4377_BAR2_BOOTSTAGE 0x200454
76 
77 #define BCM4377_BAR2_FW_LO   0x200478
78 #define BCM4377_BAR2_FW_HI   0x20047c
79 #define BCM4377_BAR2_FW_SIZE 0x200480
80 
81 #define BCM4377_BAR2_CONTEXT_ADDR_LO 0x20048c
82 #define BCM4377_BAR2_CONTEXT_ADDR_HI 0x200450
83 
84 #define BCM4377_BAR2_RTI_STATUS	     0x20045c
85 #define BCM4377_BAR2_RTI_WINDOW_LO   0x200494
86 #define BCM4377_BAR2_RTI_WINDOW_HI   0x200498
87 #define BCM4377_BAR2_RTI_WINDOW_SIZE 0x20049c
88 
89 #define BCM4377_OTP_SIZE	  0xe0
90 #define BCM4377_OTP_SYS_VENDOR	  0x15
91 #define BCM4377_OTP_CIS		  0x80
92 #define BCM4377_OTP_VENDOR_HDR	  0x00000008
93 #define BCM4377_OTP_MAX_PARAM_LEN 16
94 
95 #define BCM4377_N_TRANSFER_RINGS   9
96 #define BCM4377_N_COMPLETION_RINGS 6
97 
98 #define BCM4377_MAX_RING_SIZE 256
99 
100 #define BCM4377_MSGID_GENERATION GENMASK(15, 8)
101 #define BCM4377_MSGID_ID	 GENMASK(7, 0)
102 
103 #define BCM4377_RING_N_ENTRIES 128
104 
105 #define BCM4377_CONTROL_MSG_SIZE		   0x34
106 #define BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff)
107 
108 #define MAX_ACL_PAYLOAD_SIZE   (HCI_MAX_FRAME_SIZE + HCI_ACL_HDR_SIZE)
109 #define MAX_SCO_PAYLOAD_SIZE   (HCI_MAX_SCO_SIZE + HCI_SCO_HDR_SIZE)
110 #define MAX_EVENT_PAYLOAD_SIZE (HCI_MAX_EVENT_SIZE + HCI_EVENT_HDR_SIZE)
111 
112 enum bcm4377_otp_params_type {
113 	BCM4377_OTP_BOARD_PARAMS,
114 	BCM4377_OTP_CHIP_PARAMS
115 };
116 
117 enum bcm4377_transfer_ring_id {
118 	BCM4377_XFER_RING_CONTROL = 0,
119 	BCM4377_XFER_RING_HCI_H2D = 1,
120 	BCM4377_XFER_RING_HCI_D2H = 2,
121 	BCM4377_XFER_RING_SCO_H2D = 3,
122 	BCM4377_XFER_RING_SCO_D2H = 4,
123 	BCM4377_XFER_RING_ACL_H2D = 5,
124 	BCM4377_XFER_RING_ACL_D2H = 6,
125 };
126 
127 enum bcm4377_completion_ring_id {
128 	BCM4377_ACK_RING_CONTROL = 0,
129 	BCM4377_ACK_RING_HCI_ACL = 1,
130 	BCM4377_EVENT_RING_HCI_ACL = 2,
131 	BCM4377_ACK_RING_SCO = 3,
132 	BCM4377_EVENT_RING_SCO = 4,
133 };
134 
135 enum bcm4377_doorbell {
136 	BCM4377_DOORBELL_CONTROL = 0,
137 	BCM4377_DOORBELL_HCI_H2D = 1,
138 	BCM4377_DOORBELL_HCI_D2H = 2,
139 	BCM4377_DOORBELL_ACL_H2D = 3,
140 	BCM4377_DOORBELL_ACL_D2H = 4,
141 	BCM4377_DOORBELL_SCO = 6,
142 };
143 
144 /*
145  * Transfer ring entry
146  *
147  * flags: Flags to indicate if the payload is appended or mapped
148  * len: Payload length
149  * payload: Optional payload DMA address
150  * id: Message id to recognize the answer in the completion ring entry
151  */
152 struct bcm4377_xfer_ring_entry {
153 #define BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED	 BIT(0)
154 #define BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1)
155 	u8 flags;
156 	__le16 len;
157 	u8 _unk0;
158 	__le64 payload;
159 	__le16 id;
160 	u8 _unk1[2];
161 } __packed;
162 static_assert(sizeof(struct bcm4377_xfer_ring_entry) == 0x10);
163 
164 /*
165  * Completion ring entry
166  *
167  * flags: Flags to indicate if the payload is appended or mapped. If the payload
168  *        is mapped it can be found in the buffer of the corresponding transfer
169  *        ring message.
170  * ring_id: Transfer ring ID which required this message
171  * msg_id: Message ID specified in transfer ring entry
172  * len: Payload length
173  */
174 struct bcm4377_completion_ring_entry {
175 	u8 flags;
176 	u8 _unk0;
177 	__le16 ring_id;
178 	__le16 msg_id;
179 	__le32 len;
180 	u8 _unk1[6];
181 } __packed;
182 static_assert(sizeof(struct bcm4377_completion_ring_entry) == 0x10);
183 
184 enum bcm4377_control_message_type {
185 	BCM4377_CONTROL_MSG_CREATE_XFER_RING = 1,
186 	BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING = 2,
187 	BCM4377_CONTROL_MSG_DESTROY_XFER_RING = 3,
188 	BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING = 4,
189 };
190 
191 /*
192  * Control message used to create a completion ring
193  *
194  * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING
195  * header_size: Unknown, but probably reserved space in front of the entry
196  * footer_size: Number of 32 bit words reserved for payloads after the entry
197  * id/id_again: Completion ring index
198  * ring_iova: DMA address of the ring buffer
199  * n_elements: Number of elements inside the ring buffer
200  * msi: MSI index, doesn't work for all rings though and should be zero
201  * intmod_delay: Unknown delay
202  * intmod_bytes: Unknown
203  */
204 struct bcm4377_create_completion_ring_msg {
205 	u8 msg_type;
206 	u8 header_size;
207 	u8 footer_size;
208 	u8 _unk0;
209 	__le16 id;
210 	__le16 id_again;
211 	__le64 ring_iova;
212 	__le16 n_elements;
213 	__le32 unk;
214 	u8 _unk1[6];
215 	__le16 msi;
216 	__le16 intmod_delay;
217 	__le32 intmod_bytes;
218 	__le16 _unk2;
219 	__le32 _unk3;
220 	u8 _unk4[10];
221 } __packed;
222 static_assert(sizeof(struct bcm4377_create_completion_ring_msg) ==
223 	      BCM4377_CONTROL_MSG_SIZE);
224 
225 /*
226  * Control ring message used to destroy a completion ring
227  *
228  * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING
229  * ring_id: Completion ring to be destroyed
230  */
231 struct bcm4377_destroy_completion_ring_msg {
232 	u8 msg_type;
233 	u8 _pad0;
234 	__le16 ring_id;
235 	u8 _pad1[48];
236 } __packed;
237 static_assert(sizeof(struct bcm4377_destroy_completion_ring_msg) ==
238 	      BCM4377_CONTROL_MSG_SIZE);
239 
240 /*
241  * Control message used to create a transfer ring
242  *
243  * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_XFER_RING
244  * header_size: Number of 32 bit words reserved for unknown content before the
245  *              entry
246  * footer_size: Number of 32 bit words reserved for payloads after the entry
247  * ring_id/ring_id_again: Transfer ring index
248  * ring_iova: DMA address of the ring buffer
249  * n_elements: Number of elements inside the ring buffer
250  * completion_ring_id: Completion ring index for acknowledgements and events
251  * doorbell: Doorbell index used to notify device of new entries
252  * flags: Transfer ring flags
253  *          - virtual: set if there is no associated shared memory and only the
254  *                     corresponding completion ring is used
255  *          - sync: only set for the SCO rings
256  */
257 struct bcm4377_create_transfer_ring_msg {
258 	u8 msg_type;
259 	u8 header_size;
260 	u8 footer_size;
261 	u8 _unk0;
262 	__le16 ring_id;
263 	__le16 ring_id_again;
264 	__le64 ring_iova;
265 	u8 _unk1[8];
266 	__le16 n_elements;
267 	__le16 completion_ring_id;
268 	__le16 doorbell;
269 #define BCM4377_XFER_RING_FLAG_VIRTUAL BIT(7)
270 #define BCM4377_XFER_RING_FLAG_SYNC    BIT(8)
271 	__le16 flags;
272 	u8 _unk2[20];
273 } __packed;
274 static_assert(sizeof(struct bcm4377_create_transfer_ring_msg) ==
275 	      BCM4377_CONTROL_MSG_SIZE);
276 
277 /*
278  * Control ring message used to destroy a transfer ring
279  *
280  * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_XFER_RING
281  * ring_id: Transfer ring to be destroyed
282  */
283 struct bcm4377_destroy_transfer_ring_msg {
284 	u8 msg_type;
285 	u8 _pad0;
286 	__le16 ring_id;
287 	u8 _pad1[48];
288 } __packed;
289 static_assert(sizeof(struct bcm4377_destroy_transfer_ring_msg) ==
290 	      BCM4377_CONTROL_MSG_SIZE);
291 
292 /*
293  * "Converged IPC" context struct used to make the device aware of all other
294  * shared memory structures. A pointer to this structure is configured inside a
295  * MMIO register.
296  *
297  * version: Protocol version, must be 2.
298  * size: Size of this structure, must be 0x68.
299  * enabled_caps: Enabled capabilities. Unknown bitfield but should be 2.
300  * peripheral_info_addr: DMA address for a 0x20 buffer to which the device will
301  *                       write unknown contents
302  * {completion,xfer}_ring_{tails,heads}_addr: DMA pointers to ring heads/tails
303  * n_completion_rings: Number of completion rings, the firmware only works if
304  *                     this is set to BCM4377_N_COMPLETION_RINGS.
305  * n_xfer_rings: Number of transfer rings, the firmware only works if
306  *               this is set to BCM4377_N_TRANSFER_RINGS.
307  * control_completion_ring_addr: Control completion ring buffer DMA address
308  * control_xfer_ring_addr: Control transfer ring buffer DMA address
309  * control_xfer_ring_n_entries: Number of control transfer ring entries
310  * control_completion_ring_n_entries: Number of control completion ring entries
311  * control_xfer_ring_doorbell: Control transfer ring doorbell
312  * control_completion_ring_doorbell: Control completion ring doorbell,
313  *                                   must be set to 0xffff
314  * control_xfer_ring_msi: Control completion ring MSI index, must be 0
315  * control_completion_ring_msi: Control completion ring MSI index, must be 0.
316  * control_xfer_ring_header_size: Number of 32 bit words reserved in front of
317  *                                every control transfer ring entry
318  * control_xfer_ring_footer_size: Number of 32 bit words reserved after every
319  *                                control transfer ring entry
320  * control_completion_ring_header_size: Number of 32 bit words reserved in front
321  *                                      of every control completion ring entry
322  * control_completion_ring_footer_size: Number of 32 bit words reserved after
323  *                                      every control completion ring entry
324  * scratch_pad: Optional scratch pad DMA address
325  * scratch_pad_size: Scratch pad size
326  */
327 struct bcm4377_context {
328 	__le16 version;
329 	__le16 size;
330 	__le32 enabled_caps;
331 
332 	__le64 peripheral_info_addr;
333 
334 	/* ring heads and tails */
335 	__le64 completion_ring_heads_addr;
336 	__le64 xfer_ring_tails_addr;
337 	__le64 completion_ring_tails_addr;
338 	__le64 xfer_ring_heads_addr;
339 	__le16 n_completion_rings;
340 	__le16 n_xfer_rings;
341 
342 	/* control ring configuration */
343 	__le64 control_completion_ring_addr;
344 	__le64 control_xfer_ring_addr;
345 	__le16 control_xfer_ring_n_entries;
346 	__le16 control_completion_ring_n_entries;
347 	__le16 control_xfer_ring_doorbell;
348 	__le16 control_completion_ring_doorbell;
349 	__le16 control_xfer_ring_msi;
350 	__le16 control_completion_ring_msi;
351 	u8 control_xfer_ring_header_size;
352 	u8 control_xfer_ring_footer_size;
353 	u8 control_completion_ring_header_size;
354 	u8 control_completion_ring_footer_size;
355 
356 	__le16 _unk0;
357 	__le16 _unk1;
358 
359 	__le64 scratch_pad;
360 	__le32 scratch_pad_size;
361 
362 	__le32 _unk3;
363 } __packed;
364 static_assert(sizeof(struct bcm4377_context) == 0x68);
365 
366 #define BCM4378_CALIBRATION_CHUNK_SIZE 0xe6
367 struct bcm4378_hci_send_calibration_cmd {
368 	u8 unk;
369 	__le16 blocks_left;
370 	u8 data[BCM4378_CALIBRATION_CHUNK_SIZE];
371 } __packed;
372 
373 #define BCM4378_PTB_CHUNK_SIZE 0xcf
374 struct bcm4378_hci_send_ptb_cmd {
375 	__le16 blocks_left;
376 	u8 data[BCM4378_PTB_CHUNK_SIZE];
377 } __packed;
378 
379 /*
380  * Shared memory structure used to store the ring head and tail pointers.
381  */
382 struct bcm4377_ring_state {
383 	__le16 completion_ring_head[BCM4377_N_COMPLETION_RINGS];
384 	__le16 completion_ring_tail[BCM4377_N_COMPLETION_RINGS];
385 	__le16 xfer_ring_head[BCM4377_N_TRANSFER_RINGS];
386 	__le16 xfer_ring_tail[BCM4377_N_TRANSFER_RINGS];
387 };
388 
389 /*
390  * A transfer ring can be used in two configurations:
391  *  1) Send control or HCI messages to the device which are then acknowledged
392  *     in the corresponding completion ring
393  *  2) Receiving HCI frames from the devices. In this case the transfer ring
394  *     itself contains empty messages that are acknowledged once data is
395  *     available from the device. If the payloads fit inside the footers
396  *     of the completion ring the transfer ring can be configured to be
397  *     virtual such that it has no ring buffer.
398  *
399  * ring_id: ring index hardcoded in the firmware
400  * doorbell: doorbell index to notify device of new entries
401  * payload_size: optional in-place payload size
402  * mapped_payload_size: optional out-of-place payload size
403  * completion_ring: index of corresponding completion ring
404  * n_entries: number of entries inside this ring
405  * generation: ring generation; incremented on hci_open to detect stale messages
406  * sync: set to true for SCO rings
407  * virtual: set to true if this ring has no entries and is just required to
408  *          setup a corresponding completion ring for device->host messages
409  * d2h_buffers_only: set to true if this ring is only used to provide large
410  *                   buffers used by device->host messages in the completion
411  *                   ring
412  * allow_wait: allow to wait for messages to be acknowledged
413  * enabled: true once the ring has been created and can be used
414  * ring: ring buffer for entries (struct bcm4377_xfer_ring_entry)
415  * ring_dma: DMA address for ring entry buffer
416  * payloads: payload buffer for mapped_payload_size payloads
417  * payloads_dma:DMA address for payload buffer
418  * events: pointer to array of completions if waiting is allowed
419  * msgids: bitmap to keep track of used message ids
420  * lock: Spinlock to protect access to ring structurs used in the irq handler
421  */
422 struct bcm4377_transfer_ring {
423 	enum bcm4377_transfer_ring_id ring_id;
424 	enum bcm4377_doorbell doorbell;
425 	size_t payload_size;
426 	size_t mapped_payload_size;
427 	u8 completion_ring;
428 	u16 n_entries;
429 	u8 generation;
430 
431 	bool sync;
432 	bool virtual;
433 	bool d2h_buffers_only;
434 	bool allow_wait;
435 	bool enabled;
436 
437 	void *ring;
438 	dma_addr_t ring_dma;
439 
440 	void *payloads;
441 	dma_addr_t payloads_dma;
442 
443 	struct completion **events;
444 	DECLARE_BITMAP(msgids, BCM4377_MAX_RING_SIZE);
445 	spinlock_t lock;
446 };
447 
448 /*
449  * A completion ring can be either used to either acknowledge messages sent in
450  * the corresponding transfer ring or to receive messages associated with the
451  * transfer ring. When used to receive messages the transfer ring either
452  * has no ring buffer and is only advanced ("virtual transfer ring") or it
453  * only contains empty DMA buffers to be used for the payloads.
454  *
455  * ring_id: completion ring id, hardcoded in firmware
456  * payload_size: optional payload size after each entry
457  * delay: unknown delay
458  * n_entries: number of entries in this ring
459  * enabled: true once the ring has been created and can be used
460  * ring: ring buffer for entries (struct bcm4377_completion_ring_entry)
461  * ring_dma: DMA address of ring buffer
462  * transfer_rings: bitmap of corresponding transfer ring ids
463  */
464 struct bcm4377_completion_ring {
465 	enum bcm4377_completion_ring_id ring_id;
466 	u16 payload_size;
467 	u16 delay;
468 	u16 n_entries;
469 	bool enabled;
470 
471 	void *ring;
472 	dma_addr_t ring_dma;
473 
474 	unsigned long transfer_rings;
475 };
476 
477 struct bcm4377_data;
478 
479 /*
480  * Chip-specific configuration struct
481  *
482  * id: Chip id (e.g. 0x4377 for BCM4377)
483  * otp_offset: Offset to the start of the OTP inside BAR0
484  * bar0_window1: Backplane address mapped to the first window in BAR0
485  * bar0_window2: Backplane address mapped to the second window in BAR0
486  * bar0_core2_window2: Optional backplane address mapped to the second core's
487  *                     second window in BAR0
488  * has_bar0_core2_window2: Set to true if this chip requires the second core's
489  *                         second window to be configured
490  * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the
491  *                                     vendor-specific subsystem control
492  *                                     register has to be cleared
493  * disable_aspm: Set to true if ASPM must be disabled due to hardware errata
494  * broken_ext_scan: Set to true if the chip erroneously claims to support
495  *                  extended scanning
496  * broken_mws_transport_config: Set to true if the chip erroneously claims to
497  *                              support MWS Transport Configuration
498  * broken_le_ext_adv_report_phy: Set to true if this chip stuffs flags inside
499  *                               reserved bits of Primary/Secondary_PHY inside
500  *                               LE Extended Advertising Report events which
501  *                               have to be ignored
502  * send_calibration: Optional callback to send calibration data
503  * send_ptb: Callback to send "PTB" regulatory/calibration data
504  */
505 struct bcm4377_hw {
506 	unsigned int id;
507 
508 	u32 otp_offset;
509 
510 	u32 bar0_window1;
511 	u32 bar0_window2;
512 	u32 bar0_core2_window2;
513 
514 	unsigned long has_bar0_core2_window2 : 1;
515 	unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1;
516 	unsigned long disable_aspm : 1;
517 	unsigned long broken_ext_scan : 1;
518 	unsigned long broken_mws_transport_config : 1;
519 	unsigned long broken_le_coded : 1;
520 	unsigned long broken_le_ext_adv_report_phy : 1;
521 
522 	int (*send_calibration)(struct bcm4377_data *bcm4377);
523 	int (*send_ptb)(struct bcm4377_data *bcm4377,
524 			const struct firmware *fw);
525 };
526 
527 static const struct bcm4377_hw bcm4377_hw_variants[];
528 static const struct dmi_system_id bcm4377_dmi_board_table[];
529 
530 /*
531  * Private struct associated with each device containing global state
532  *
533  * pdev: Pointer to associated struct pci_dev
534  * hdev: Pointer to associated strucy hci_dev
535  * bar0: iomem pointing to BAR0
536  * bar1: iomem pointing to BAR2
537  * bootstage: Current value of the bootstage
538  * rti_status: Current "RTI" status value
539  * hw: Pointer to chip-specific struct bcm4377_hw
540  * taurus_cal_blob: "Taurus" calibration blob used for some chips
541  * taurus_cal_size: "Taurus" calibration blob size
542  * taurus_beamforming_cal_blob: "Taurus" beamforming calibration blob used for
543  *                              some chips
544  * taurus_beamforming_cal_size: "Taurus" beamforming calibration blob size
545  * stepping: Chip stepping read from OTP; used for firmware selection
546  * vendor: Antenna vendor read from OTP; used for firmware selection
547  * board_type: Board type from FDT or DMI match; used for firmware selection
548  * event: Event for changed bootstage or rti_status; used for booting firmware
549  * ctx: "Converged IPC" context
550  * ctx_dma: "Converged IPC" context DMA address
551  * ring_state: Shared memory buffer containing ring head and tail indexes
552  * ring_state_dma: DMA address for ring_state
553  * {control,hci_acl,sco}_ack_ring: Completion rings used to acknowledge messages
554  * {hci_acl,sco}_event_ring: Completion rings used for device->host messages
555  * control_h2d_ring: Transfer ring used for control messages
556  * {hci,sco,acl}_h2d_ring: Transfer ring used to transfer HCI frames
557  * {hci,sco,acl}_d2h_ring: Transfer ring used to receive HCI frames in the
558  *                         corresponding completion ring
559  */
560 struct bcm4377_data {
561 	struct pci_dev *pdev;
562 	struct hci_dev *hdev;
563 
564 	void __iomem *bar0;
565 	void __iomem *bar2;
566 
567 	u32 bootstage;
568 	u32 rti_status;
569 
570 	const struct bcm4377_hw *hw;
571 
572 	const void *taurus_cal_blob;
573 	int taurus_cal_size;
574 	const void *taurus_beamforming_cal_blob;
575 	int taurus_beamforming_cal_size;
576 
577 	char stepping[BCM4377_OTP_MAX_PARAM_LEN];
578 	char vendor[BCM4377_OTP_MAX_PARAM_LEN];
579 	const char *board_type;
580 
581 	struct completion event;
582 
583 	struct bcm4377_context *ctx;
584 	dma_addr_t ctx_dma;
585 
586 	struct bcm4377_ring_state *ring_state;
587 	dma_addr_t ring_state_dma;
588 
589 	/*
590 	 * The HCI and ACL rings have to be merged because this structure is
591 	 * hardcoded in the firmware.
592 	 */
593 	struct bcm4377_completion_ring control_ack_ring;
594 	struct bcm4377_completion_ring hci_acl_ack_ring;
595 	struct bcm4377_completion_ring hci_acl_event_ring;
596 	struct bcm4377_completion_ring sco_ack_ring;
597 	struct bcm4377_completion_ring sco_event_ring;
598 
599 	struct bcm4377_transfer_ring control_h2d_ring;
600 	struct bcm4377_transfer_ring hci_h2d_ring;
601 	struct bcm4377_transfer_ring hci_d2h_ring;
602 	struct bcm4377_transfer_ring sco_h2d_ring;
603 	struct bcm4377_transfer_ring sco_d2h_ring;
604 	struct bcm4377_transfer_ring acl_h2d_ring;
605 	struct bcm4377_transfer_ring acl_d2h_ring;
606 };
607 
608 static void bcm4377_ring_doorbell(struct bcm4377_data *bcm4377, u8 doorbell,
609 				  u16 val)
610 {
611 	u32 db = 0;
612 
613 	db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_VALUE, val);
614 	db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_IDX, doorbell);
615 	db |= BCM4377_BAR0_DOORBELL_RING;
616 
617 	dev_dbg(&bcm4377->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val,
618 		doorbell, db);
619 	iowrite32(db, bcm4377->bar0 + BCM4377_BAR0_DOORBELL);
620 }
621 
622 static int bcm4377_extract_msgid(struct bcm4377_data *bcm4377,
623 				 struct bcm4377_transfer_ring *ring,
624 				 u16 raw_msgid, u8 *msgid)
625 {
626 	u8 generation = FIELD_GET(BCM4377_MSGID_GENERATION, raw_msgid);
627 	*msgid = FIELD_GET(BCM4377_MSGID_ID, raw_msgid);
628 
629 	if (generation != ring->generation) {
630 		dev_warn(
631 			&bcm4377->pdev->dev,
632 			"invalid message generation %d should be %d in entry for ring %d\n",
633 			generation, ring->generation, ring->ring_id);
634 		return -EINVAL;
635 	}
636 
637 	if (*msgid >= ring->n_entries) {
638 		dev_warn(&bcm4377->pdev->dev,
639 			 "invalid message id in entry for ring %d: %d > %d\n",
640 			 ring->ring_id, *msgid, ring->n_entries);
641 		return -EINVAL;
642 	}
643 
644 	return 0;
645 }
646 
647 static void bcm4377_handle_event(struct bcm4377_data *bcm4377,
648 				 struct bcm4377_transfer_ring *ring,
649 				 u16 raw_msgid, u8 entry_flags, u8 type,
650 				 void *payload, size_t len)
651 {
652 	struct sk_buff *skb;
653 	u16 head;
654 	u8 msgid;
655 	unsigned long flags;
656 
657 	spin_lock_irqsave(&ring->lock, flags);
658 	if (!ring->enabled) {
659 		dev_warn(&bcm4377->pdev->dev,
660 			 "event for disabled transfer ring %d\n",
661 			 ring->ring_id);
662 		goto out;
663 	}
664 
665 	if (ring->d2h_buffers_only &&
666 	    entry_flags & BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED) {
667 		if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid))
668 			goto out;
669 
670 		if (len > ring->mapped_payload_size) {
671 			dev_warn(
672 				&bcm4377->pdev->dev,
673 				"invalid payload len in event for ring %d: %zu > %zu\n",
674 				ring->ring_id, len, ring->mapped_payload_size);
675 			goto out;
676 		}
677 
678 		payload = ring->payloads + msgid * ring->mapped_payload_size;
679 	}
680 
681 	skb = bt_skb_alloc(len, GFP_ATOMIC);
682 	if (!skb)
683 		goto out;
684 
685 	memcpy(skb_put(skb, len), payload, len);
686 	hci_skb_pkt_type(skb) = type;
687 	hci_recv_frame(bcm4377->hdev, skb);
688 
689 out:
690 	head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]);
691 	head = (head + 1) % ring->n_entries;
692 	bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head);
693 
694 	bcm4377_ring_doorbell(bcm4377, ring->doorbell, head);
695 
696 	spin_unlock_irqrestore(&ring->lock, flags);
697 }
698 
699 static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
700 			       struct bcm4377_transfer_ring *ring,
701 			       u16 raw_msgid)
702 {
703 	unsigned long flags;
704 	u8 msgid;
705 
706 	spin_lock_irqsave(&ring->lock, flags);
707 
708 	if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid))
709 		goto unlock;
710 
711 	if (!test_bit(msgid, ring->msgids)) {
712 		dev_warn(
713 			&bcm4377->pdev->dev,
714 			"invalid message id in ack for ring %d: %d is not used\n",
715 			ring->ring_id, msgid);
716 		goto unlock;
717 	}
718 
719 	if (ring->allow_wait && ring->events[msgid]) {
720 		complete(ring->events[msgid]);
721 		ring->events[msgid] = NULL;
722 	}
723 
724 	bitmap_release_region(ring->msgids, msgid, 0);
725 
726 unlock:
727 	spin_unlock_irqrestore(&ring->lock, flags);
728 }
729 
730 static void bcm4377_handle_completion(struct bcm4377_data *bcm4377,
731 				      struct bcm4377_completion_ring *ring,
732 				      u16 pos)
733 {
734 	struct bcm4377_completion_ring_entry *entry;
735 	u16 msg_id, transfer_ring;
736 	size_t entry_size, data_len;
737 	void *data;
738 
739 	if (pos >= ring->n_entries) {
740 		dev_warn(&bcm4377->pdev->dev,
741 			 "invalid offset %d for completion ring %d\n", pos,
742 			 ring->ring_id);
743 		return;
744 	}
745 
746 	entry_size = sizeof(*entry) + ring->payload_size;
747 	entry = ring->ring + pos * entry_size;
748 	data = ring->ring + pos * entry_size + sizeof(*entry);
749 	data_len = le32_to_cpu(entry->len);
750 	msg_id = le16_to_cpu(entry->msg_id);
751 	transfer_ring = le16_to_cpu(entry->ring_id);
752 
753 	if ((ring->transfer_rings & BIT(transfer_ring)) == 0) {
754 		dev_warn(
755 			&bcm4377->pdev->dev,
756 			"invalid entry at offset %d for transfer ring %d in completion ring %d\n",
757 			pos, transfer_ring, ring->ring_id);
758 		return;
759 	}
760 
761 	dev_dbg(&bcm4377->pdev->dev,
762 		"entry in completion ring %d for transfer ring %d with msg_id %d\n",
763 		ring->ring_id, transfer_ring, msg_id);
764 
765 	switch (transfer_ring) {
766 	case BCM4377_XFER_RING_CONTROL:
767 		bcm4377_handle_ack(bcm4377, &bcm4377->control_h2d_ring, msg_id);
768 		break;
769 	case BCM4377_XFER_RING_HCI_H2D:
770 		bcm4377_handle_ack(bcm4377, &bcm4377->hci_h2d_ring, msg_id);
771 		break;
772 	case BCM4377_XFER_RING_SCO_H2D:
773 		bcm4377_handle_ack(bcm4377, &bcm4377->sco_h2d_ring, msg_id);
774 		break;
775 	case BCM4377_XFER_RING_ACL_H2D:
776 		bcm4377_handle_ack(bcm4377, &bcm4377->acl_h2d_ring, msg_id);
777 		break;
778 
779 	case BCM4377_XFER_RING_HCI_D2H:
780 		bcm4377_handle_event(bcm4377, &bcm4377->hci_d2h_ring, msg_id,
781 				     entry->flags, HCI_EVENT_PKT, data,
782 				     data_len);
783 		break;
784 	case BCM4377_XFER_RING_SCO_D2H:
785 		bcm4377_handle_event(bcm4377, &bcm4377->sco_d2h_ring, msg_id,
786 				     entry->flags, HCI_SCODATA_PKT, data,
787 				     data_len);
788 		break;
789 	case BCM4377_XFER_RING_ACL_D2H:
790 		bcm4377_handle_event(bcm4377, &bcm4377->acl_d2h_ring, msg_id,
791 				     entry->flags, HCI_ACLDATA_PKT, data,
792 				     data_len);
793 		break;
794 
795 	default:
796 		dev_warn(
797 			&bcm4377->pdev->dev,
798 			"entry in completion ring %d for unknown transfer ring %d with msg_id %d\n",
799 			ring->ring_id, transfer_ring, msg_id);
800 	}
801 }
802 
803 static void bcm4377_poll_completion_ring(struct bcm4377_data *bcm4377,
804 					 struct bcm4377_completion_ring *ring)
805 {
806 	u16 tail;
807 	__le16 *heads = bcm4377->ring_state->completion_ring_head;
808 	__le16 *tails = bcm4377->ring_state->completion_ring_tail;
809 
810 	if (!ring->enabled)
811 		return;
812 
813 	tail = le16_to_cpu(tails[ring->ring_id]);
814 	dev_dbg(&bcm4377->pdev->dev,
815 		"completion ring #%d: head: %d, tail: %d\n", ring->ring_id,
816 		le16_to_cpu(heads[ring->ring_id]), tail);
817 
818 	while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) {
819 		/*
820 		 * ensure the CPU doesn't speculate through the comparison.
821 		 * otherwise it might already read the (empty) queue entry
822 		 * before the updated head has been loaded and checked.
823 		 */
824 		dma_rmb();
825 
826 		bcm4377_handle_completion(bcm4377, ring, tail);
827 
828 		tail = (tail + 1) % ring->n_entries;
829 		tails[ring->ring_id] = cpu_to_le16(tail);
830 	}
831 }
832 
833 static irqreturn_t bcm4377_irq(int irq, void *data)
834 {
835 	struct bcm4377_data *bcm4377 = data;
836 	u32 bootstage, rti_status;
837 
838 	bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
839 	rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
840 
841 	if (bootstage != bcm4377->bootstage ||
842 	    rti_status != bcm4377->rti_status) {
843 		dev_dbg(&bcm4377->pdev->dev,
844 			"bootstage = %d -> %d, rti state = %d -> %d\n",
845 			bcm4377->bootstage, bootstage, bcm4377->rti_status,
846 			rti_status);
847 		complete(&bcm4377->event);
848 		bcm4377->bootstage = bootstage;
849 		bcm4377->rti_status = rti_status;
850 	}
851 
852 	if (rti_status > 2)
853 		dev_err(&bcm4377->pdev->dev, "RTI status is %d\n", rti_status);
854 
855 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->control_ack_ring);
856 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
857 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
858 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
859 	bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_event_ring);
860 
861 	return IRQ_HANDLED;
862 }
863 
864 static int bcm4377_enqueue(struct bcm4377_data *bcm4377,
865 			   struct bcm4377_transfer_ring *ring, void *data,
866 			   size_t len, bool wait)
867 {
868 	unsigned long flags;
869 	struct bcm4377_xfer_ring_entry *entry;
870 	void *payload;
871 	size_t offset;
872 	u16 head, tail, new_head;
873 	u16 raw_msgid;
874 	int ret, msgid;
875 	DECLARE_COMPLETION_ONSTACK(event);
876 
877 	if (len > ring->payload_size && len > ring->mapped_payload_size) {
878 		dev_warn(
879 			&bcm4377->pdev->dev,
880 			"payload len %zu is too large for ring %d (max is %zu or %zu)\n",
881 			len, ring->ring_id, ring->payload_size,
882 			ring->mapped_payload_size);
883 		return -EINVAL;
884 	}
885 	if (wait && !ring->allow_wait)
886 		return -EINVAL;
887 	if (ring->virtual)
888 		return -EINVAL;
889 
890 	spin_lock_irqsave(&ring->lock, flags);
891 
892 	head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]);
893 	tail = le16_to_cpu(bcm4377->ring_state->xfer_ring_tail[ring->ring_id]);
894 
895 	new_head = (head + 1) % ring->n_entries;
896 
897 	if (new_head == tail) {
898 		dev_warn(&bcm4377->pdev->dev,
899 			 "can't send message because ring %d is full\n",
900 			 ring->ring_id);
901 		ret = -EINVAL;
902 		goto out;
903 	}
904 
905 	msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0);
906 	if (msgid < 0) {
907 		dev_warn(&bcm4377->pdev->dev,
908 			 "can't find message id for ring %d\n", ring->ring_id);
909 		ret = -EINVAL;
910 		goto out;
911 	}
912 
913 	raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation);
914 	raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, msgid);
915 
916 	offset = head * (sizeof(*entry) + ring->payload_size);
917 	entry = ring->ring + offset;
918 
919 	memset(entry, 0, sizeof(*entry));
920 	entry->id = cpu_to_le16(raw_msgid);
921 	entry->len = cpu_to_le16(len);
922 
923 	if (len <= ring->payload_size) {
924 		entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER;
925 		payload = ring->ring + offset + sizeof(*entry);
926 	} else {
927 		entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED;
928 		entry->payload = cpu_to_le64(ring->payloads_dma +
929 					     msgid * ring->mapped_payload_size);
930 		payload = ring->payloads + msgid * ring->mapped_payload_size;
931 	}
932 
933 	memcpy(payload, data, len);
934 
935 	if (wait)
936 		ring->events[msgid] = &event;
937 
938 	/*
939 	 * The 4377 chips stop responding to any commands as soon as they
940 	 * have been idle for a while. Poking the sleep control register here
941 	 * makes them come alive again.
942 	 */
943 	iowrite32(BCM4377_BAR0_SLEEP_CONTROL_AWAKE,
944 		  bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
945 
946 	dev_dbg(&bcm4377->pdev->dev,
947 		"updating head for transfer queue #%d to %d\n", ring->ring_id,
948 		new_head);
949 	bcm4377->ring_state->xfer_ring_head[ring->ring_id] =
950 		cpu_to_le16(new_head);
951 
952 	if (!ring->sync)
953 		bcm4377_ring_doorbell(bcm4377, ring->doorbell, new_head);
954 	ret = 0;
955 
956 out:
957 	spin_unlock_irqrestore(&ring->lock, flags);
958 
959 	if (ret == 0 && wait) {
960 		ret = wait_for_completion_interruptible_timeout(
961 			&event, BCM4377_TIMEOUT);
962 		if (ret == 0)
963 			ret = -ETIMEDOUT;
964 		else if (ret > 0)
965 			ret = 0;
966 
967 		spin_lock_irqsave(&ring->lock, flags);
968 		ring->events[msgid] = NULL;
969 		spin_unlock_irqrestore(&ring->lock, flags);
970 	}
971 
972 	return ret;
973 }
974 
975 static int bcm4377_create_completion_ring(struct bcm4377_data *bcm4377,
976 					  struct bcm4377_completion_ring *ring)
977 {
978 	struct bcm4377_create_completion_ring_msg msg;
979 	int ret;
980 
981 	if (ring->enabled) {
982 		dev_warn(&bcm4377->pdev->dev,
983 			 "completion ring %d already enabled\n", ring->ring_id);
984 		return 0;
985 	}
986 
987 	memset(ring->ring, 0,
988 	       ring->n_entries * (sizeof(struct bcm4377_completion_ring_entry) +
989 				  ring->payload_size));
990 	memset(&msg, 0, sizeof(msg));
991 	msg.msg_type = BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING;
992 	msg.id = cpu_to_le16(ring->ring_id);
993 	msg.id_again = cpu_to_le16(ring->ring_id);
994 	msg.ring_iova = cpu_to_le64(ring->ring_dma);
995 	msg.n_elements = cpu_to_le16(ring->n_entries);
996 	msg.intmod_bytes = cpu_to_le32(0xffffffff);
997 	msg.unk = cpu_to_le32(0xffffffff);
998 	msg.intmod_delay = cpu_to_le16(ring->delay);
999 	msg.footer_size = ring->payload_size / 4;
1000 
1001 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1002 			      sizeof(msg), true);
1003 	if (!ret)
1004 		ring->enabled = true;
1005 
1006 	return ret;
1007 }
1008 
1009 static int bcm4377_destroy_completion_ring(struct bcm4377_data *bcm4377,
1010 					   struct bcm4377_completion_ring *ring)
1011 {
1012 	struct bcm4377_destroy_completion_ring_msg msg;
1013 	int ret;
1014 
1015 	memset(&msg, 0, sizeof(msg));
1016 	msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING;
1017 	msg.ring_id = cpu_to_le16(ring->ring_id);
1018 
1019 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1020 			      sizeof(msg), true);
1021 	if (ret)
1022 		dev_warn(&bcm4377->pdev->dev,
1023 			 "failed to destroy completion ring %d\n",
1024 			 ring->ring_id);
1025 
1026 	ring->enabled = false;
1027 	return ret;
1028 }
1029 
1030 static int bcm4377_create_transfer_ring(struct bcm4377_data *bcm4377,
1031 					struct bcm4377_transfer_ring *ring)
1032 {
1033 	struct bcm4377_create_transfer_ring_msg msg;
1034 	u16 flags = 0;
1035 	int ret, i;
1036 	unsigned long spinlock_flags;
1037 
1038 	if (ring->virtual)
1039 		flags |= BCM4377_XFER_RING_FLAG_VIRTUAL;
1040 	if (ring->sync)
1041 		flags |= BCM4377_XFER_RING_FLAG_SYNC;
1042 
1043 	spin_lock_irqsave(&ring->lock, spinlock_flags);
1044 	memset(&msg, 0, sizeof(msg));
1045 	msg.msg_type = BCM4377_CONTROL_MSG_CREATE_XFER_RING;
1046 	msg.ring_id = cpu_to_le16(ring->ring_id);
1047 	msg.ring_id_again = cpu_to_le16(ring->ring_id);
1048 	msg.ring_iova = cpu_to_le64(ring->ring_dma);
1049 	msg.n_elements = cpu_to_le16(ring->n_entries);
1050 	msg.completion_ring_id = cpu_to_le16(ring->completion_ring);
1051 	msg.doorbell = cpu_to_le16(ring->doorbell);
1052 	msg.flags = cpu_to_le16(flags);
1053 	msg.footer_size = ring->payload_size / 4;
1054 
1055 	bcm4377->ring_state->xfer_ring_head[ring->ring_id] = 0;
1056 	bcm4377->ring_state->xfer_ring_tail[ring->ring_id] = 0;
1057 	ring->generation++;
1058 	spin_unlock_irqrestore(&ring->lock, spinlock_flags);
1059 
1060 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1061 			      sizeof(msg), true);
1062 
1063 	spin_lock_irqsave(&ring->lock, spinlock_flags);
1064 
1065 	if (ring->d2h_buffers_only) {
1066 		for (i = 0; i < ring->n_entries; ++i) {
1067 			struct bcm4377_xfer_ring_entry *entry =
1068 				ring->ring + i * sizeof(*entry);
1069 			u16 raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION,
1070 						   ring->generation);
1071 			raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, i);
1072 
1073 			memset(entry, 0, sizeof(*entry));
1074 			entry->id = cpu_to_le16(raw_msgid);
1075 			entry->len = cpu_to_le16(ring->mapped_payload_size);
1076 			entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED;
1077 			entry->payload =
1078 				cpu_to_le64(ring->payloads_dma +
1079 					    i * ring->mapped_payload_size);
1080 		}
1081 	}
1082 
1083 	/*
1084 	 * send some messages if this is a device->host ring to allow the device
1085 	 * to reply by acknowledging them in the completion ring
1086 	 */
1087 	if (ring->virtual || ring->d2h_buffers_only) {
1088 		bcm4377->ring_state->xfer_ring_head[ring->ring_id] =
1089 			cpu_to_le16(0xf);
1090 		bcm4377_ring_doorbell(bcm4377, ring->doorbell, 0xf);
1091 	}
1092 
1093 	ring->enabled = true;
1094 	spin_unlock_irqrestore(&ring->lock, spinlock_flags);
1095 
1096 	return ret;
1097 }
1098 
1099 static int bcm4377_destroy_transfer_ring(struct bcm4377_data *bcm4377,
1100 					 struct bcm4377_transfer_ring *ring)
1101 {
1102 	struct bcm4377_destroy_transfer_ring_msg msg;
1103 	int ret;
1104 
1105 	memset(&msg, 0, sizeof(msg));
1106 	msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_XFER_RING;
1107 	msg.ring_id = cpu_to_le16(ring->ring_id);
1108 
1109 	ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
1110 			      sizeof(msg), true);
1111 	if (ret)
1112 		dev_warn(&bcm4377->pdev->dev,
1113 			 "failed to destroy transfer ring %d\n", ring->ring_id);
1114 
1115 	ring->enabled = false;
1116 	return ret;
1117 }
1118 
1119 static int __bcm4378_send_calibration_chunk(struct bcm4377_data *bcm4377,
1120 					    const void *data, size_t data_len,
1121 					    u16 blocks_left)
1122 {
1123 	struct bcm4378_hci_send_calibration_cmd cmd;
1124 	struct sk_buff *skb;
1125 
1126 	if (data_len > sizeof(cmd.data))
1127 		return -EINVAL;
1128 
1129 	memset(&cmd, 0, sizeof(cmd));
1130 	cmd.unk = 0x03;
1131 	cmd.blocks_left = cpu_to_le16(blocks_left);
1132 	memcpy(cmd.data, data, data_len);
1133 
1134 	skb = __hci_cmd_sync(bcm4377->hdev, 0xfd97, sizeof(cmd), &cmd,
1135 			     HCI_INIT_TIMEOUT);
1136 	if (IS_ERR(skb))
1137 		return PTR_ERR(skb);
1138 
1139 	kfree_skb(skb);
1140 	return 0;
1141 }
1142 
1143 static int __bcm4378_send_calibration(struct bcm4377_data *bcm4377,
1144 				      const void *data, size_t data_size)
1145 {
1146 	int ret;
1147 	size_t i, left, transfer_len;
1148 	size_t blocks =
1149 		DIV_ROUND_UP(data_size, (size_t)BCM4378_CALIBRATION_CHUNK_SIZE);
1150 
1151 	if (!data) {
1152 		dev_err(&bcm4377->pdev->dev,
1153 			"no calibration data available.\n");
1154 		return -ENOENT;
1155 	}
1156 
1157 	for (i = 0, left = data_size; i < blocks; ++i, left -= transfer_len) {
1158 		transfer_len =
1159 			min_t(size_t, left, BCM4378_CALIBRATION_CHUNK_SIZE);
1160 
1161 		ret = __bcm4378_send_calibration_chunk(
1162 			bcm4377, data + i * BCM4378_CALIBRATION_CHUNK_SIZE,
1163 			transfer_len, blocks - i - 1);
1164 		if (ret) {
1165 			dev_err(&bcm4377->pdev->dev,
1166 				"send calibration chunk failed with %d\n", ret);
1167 			return ret;
1168 		}
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 static int bcm4378_send_calibration(struct bcm4377_data *bcm4377)
1175 {
1176 	if ((strcmp(bcm4377->stepping, "b1") == 0) ||
1177 	    strcmp(bcm4377->stepping, "b3") == 0)
1178 		return __bcm4378_send_calibration(
1179 			bcm4377, bcm4377->taurus_beamforming_cal_blob,
1180 			bcm4377->taurus_beamforming_cal_size);
1181 	else
1182 		return __bcm4378_send_calibration(bcm4377,
1183 						  bcm4377->taurus_cal_blob,
1184 						  bcm4377->taurus_cal_size);
1185 }
1186 
1187 static int bcm4387_send_calibration(struct bcm4377_data *bcm4377)
1188 {
1189 	if (strcmp(bcm4377->stepping, "c2") == 0)
1190 		return __bcm4378_send_calibration(
1191 			bcm4377, bcm4377->taurus_beamforming_cal_blob,
1192 			bcm4377->taurus_beamforming_cal_size);
1193 	else
1194 		return __bcm4378_send_calibration(bcm4377,
1195 						  bcm4377->taurus_cal_blob,
1196 						  bcm4377->taurus_cal_size);
1197 }
1198 
1199 static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377,
1200 						   const char *suffix)
1201 {
1202 	const struct firmware *fw;
1203 	char name0[64], name1[64];
1204 	int ret;
1205 
1206 	snprintf(name0, sizeof(name0), "brcm/brcmbt%04x%s-%s-%s.%s",
1207 		 bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type,
1208 		 bcm4377->vendor, suffix);
1209 	snprintf(name1, sizeof(name1), "brcm/brcmbt%04x%s-%s.%s",
1210 		 bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type,
1211 		 suffix);
1212 	dev_dbg(&bcm4377->pdev->dev, "Trying to load firmware: '%s' or '%s'\n",
1213 		name0, name1);
1214 
1215 	ret = firmware_request_nowarn(&fw, name0, &bcm4377->pdev->dev);
1216 	if (!ret)
1217 		return fw;
1218 	ret = firmware_request_nowarn(&fw, name1, &bcm4377->pdev->dev);
1219 	if (!ret)
1220 		return fw;
1221 
1222 	dev_err(&bcm4377->pdev->dev,
1223 		"Unable to load firmware; tried '%s' and '%s'\n", name0, name1);
1224 	return NULL;
1225 }
1226 
1227 static int bcm4377_send_ptb(struct bcm4377_data *bcm4377,
1228 			    const struct firmware *fw)
1229 {
1230 	struct sk_buff *skb;
1231 
1232 	skb = __hci_cmd_sync(bcm4377->hdev, 0xfd98, fw->size, fw->data,
1233 			     HCI_INIT_TIMEOUT);
1234 	/*
1235 	 * This command seems to always fail on more recent firmware versions
1236 	 * (even in traces taken from the macOS driver). It's unclear why this
1237 	 * happens but because the PTB file contains calibration and/or
1238 	 * regulatory data and may be required on older firmware we still try to
1239 	 * send it here just in case and just ignore if it fails.
1240 	 */
1241 	if (!IS_ERR(skb))
1242 		kfree_skb(skb);
1243 	return 0;
1244 }
1245 
1246 static int bcm4378_send_ptb_chunk(struct bcm4377_data *bcm4377,
1247 				  const void *data, size_t data_len,
1248 				  u16 blocks_left)
1249 {
1250 	struct bcm4378_hci_send_ptb_cmd cmd;
1251 	struct sk_buff *skb;
1252 
1253 	if (data_len > BCM4378_PTB_CHUNK_SIZE)
1254 		return -EINVAL;
1255 
1256 	memset(&cmd, 0, sizeof(cmd));
1257 	cmd.blocks_left = cpu_to_le16(blocks_left);
1258 	memcpy(cmd.data, data, data_len);
1259 
1260 	skb = __hci_cmd_sync(bcm4377->hdev, 0xfe0d, sizeof(cmd), &cmd,
1261 			     HCI_INIT_TIMEOUT);
1262 	if (IS_ERR(skb))
1263 		return PTR_ERR(skb);
1264 
1265 	kfree_skb(skb);
1266 	return 0;
1267 }
1268 
1269 static int bcm4378_send_ptb(struct bcm4377_data *bcm4377,
1270 			    const struct firmware *fw)
1271 {
1272 	size_t chunks = DIV_ROUND_UP(fw->size, (size_t)BCM4378_PTB_CHUNK_SIZE);
1273 	size_t i, left, transfer_len;
1274 	int ret;
1275 
1276 	for (i = 0, left = fw->size; i < chunks; ++i, left -= transfer_len) {
1277 		transfer_len = min_t(size_t, left, BCM4378_PTB_CHUNK_SIZE);
1278 
1279 		dev_dbg(&bcm4377->pdev->dev, "sending ptb chunk %zu/%zu\n",
1280 			i + 1, chunks);
1281 		ret = bcm4378_send_ptb_chunk(
1282 			bcm4377, fw->data + i * BCM4378_PTB_CHUNK_SIZE,
1283 			transfer_len, chunks - i - 1);
1284 		if (ret) {
1285 			dev_err(&bcm4377->pdev->dev,
1286 				"sending ptb chunk %zu failed (%d)", i, ret);
1287 			return ret;
1288 		}
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 static int bcm4377_hci_open(struct hci_dev *hdev)
1295 {
1296 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1297 	int ret;
1298 
1299 	dev_dbg(&bcm4377->pdev->dev, "creating rings\n");
1300 
1301 	ret = bcm4377_create_completion_ring(bcm4377,
1302 					     &bcm4377->hci_acl_ack_ring);
1303 	if (ret)
1304 		return ret;
1305 	ret = bcm4377_create_completion_ring(bcm4377,
1306 					     &bcm4377->hci_acl_event_ring);
1307 	if (ret)
1308 		goto destroy_hci_acl_ack;
1309 	ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1310 	if (ret)
1311 		goto destroy_hci_acl_event;
1312 	ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1313 	if (ret)
1314 		goto destroy_sco_ack;
1315 	dev_dbg(&bcm4377->pdev->dev,
1316 		"all completion rings successfully created!\n");
1317 
1318 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1319 	if (ret)
1320 		goto destroy_sco_event;
1321 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1322 	if (ret)
1323 		goto destroy_hci_h2d;
1324 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1325 	if (ret)
1326 		goto destroy_hci_d2h;
1327 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1328 	if (ret)
1329 		goto destroy_sco_h2d;
1330 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1331 	if (ret)
1332 		goto destroy_sco_d2h;
1333 	ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
1334 	if (ret)
1335 		goto destroy_acl_h2d;
1336 	dev_dbg(&bcm4377->pdev->dev,
1337 		"all transfer rings successfully created!\n");
1338 
1339 	return 0;
1340 
1341 destroy_acl_h2d:
1342 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1343 destroy_sco_d2h:
1344 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1345 destroy_sco_h2d:
1346 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1347 destroy_hci_d2h:
1348 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1349 destroy_hci_h2d:
1350 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1351 destroy_sco_event:
1352 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1353 destroy_sco_ack:
1354 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1355 destroy_hci_acl_event:
1356 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
1357 destroy_hci_acl_ack:
1358 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
1359 
1360 	dev_err(&bcm4377->pdev->dev, "Creating rings failed with %d\n", ret);
1361 	return ret;
1362 }
1363 
1364 static int bcm4377_hci_close(struct hci_dev *hdev)
1365 {
1366 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1367 
1368 	dev_dbg(&bcm4377->pdev->dev, "destroying rings in hci_close\n");
1369 
1370 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
1371 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1372 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1373 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1374 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1375 	bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1376 
1377 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1378 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1379 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
1380 	bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
1381 
1382 	return 0;
1383 }
1384 
1385 static bool bcm4377_is_valid_bdaddr(struct bcm4377_data *bcm4377,
1386 				    bdaddr_t *addr)
1387 {
1388 	if (addr->b[0] != 0x93)
1389 		return true;
1390 	if (addr->b[1] != 0x76)
1391 		return true;
1392 	if (addr->b[2] != 0x00)
1393 		return true;
1394 	if (addr->b[4] != (bcm4377->hw->id & 0xff))
1395 		return true;
1396 	if (addr->b[5] != (bcm4377->hw->id >> 8))
1397 		return true;
1398 	return false;
1399 }
1400 
1401 static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
1402 {
1403 	struct hci_rp_read_bd_addr *bda;
1404 	struct sk_buff *skb;
1405 
1406 	skb = __hci_cmd_sync(bcm4377->hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
1407 			     HCI_INIT_TIMEOUT);
1408 	if (IS_ERR(skb)) {
1409 		int err = PTR_ERR(skb);
1410 
1411 		dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR failed (%d)",
1412 			err);
1413 		return err;
1414 	}
1415 
1416 	if (skb->len != sizeof(*bda)) {
1417 		dev_err(&bcm4377->pdev->dev,
1418 			"HCI_OP_READ_BD_ADDR reply length invalid");
1419 		kfree_skb(skb);
1420 		return -EIO;
1421 	}
1422 
1423 	bda = (struct hci_rp_read_bd_addr *)skb->data;
1424 	if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
1425 		set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
1426 
1427 	kfree_skb(skb);
1428 	return 0;
1429 }
1430 
1431 static int bcm4377_hci_setup(struct hci_dev *hdev)
1432 {
1433 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1434 	const struct firmware *fw;
1435 	int ret;
1436 
1437 	if (bcm4377->hw->send_calibration) {
1438 		ret = bcm4377->hw->send_calibration(bcm4377);
1439 		if (ret)
1440 			return ret;
1441 	}
1442 
1443 	fw = bcm4377_request_blob(bcm4377, "ptb");
1444 	if (!fw) {
1445 		dev_err(&bcm4377->pdev->dev, "failed to load PTB data");
1446 		return -ENOENT;
1447 	}
1448 
1449 	ret = bcm4377->hw->send_ptb(bcm4377, fw);
1450 	release_firmware(fw);
1451 	if (ret)
1452 		return ret;
1453 
1454 	return bcm4377_check_bdaddr(bcm4377);
1455 }
1456 
1457 static int bcm4377_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1458 {
1459 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1460 	struct bcm4377_transfer_ring *ring;
1461 	int ret;
1462 
1463 	switch (hci_skb_pkt_type(skb)) {
1464 	case HCI_COMMAND_PKT:
1465 		hdev->stat.cmd_tx++;
1466 		ring = &bcm4377->hci_h2d_ring;
1467 		break;
1468 
1469 	case HCI_ACLDATA_PKT:
1470 		hdev->stat.acl_tx++;
1471 		ring = &bcm4377->acl_h2d_ring;
1472 		break;
1473 
1474 	case HCI_SCODATA_PKT:
1475 		hdev->stat.sco_tx++;
1476 		ring = &bcm4377->sco_h2d_ring;
1477 		break;
1478 
1479 	default:
1480 		return -EILSEQ;
1481 	}
1482 
1483 	ret = bcm4377_enqueue(bcm4377, ring, skb->data, skb->len, false);
1484 	if (ret < 0) {
1485 		hdev->stat.err_tx++;
1486 		return ret;
1487 	}
1488 
1489 	hdev->stat.byte_tx += skb->len;
1490 	kfree_skb(skb);
1491 	return ret;
1492 }
1493 
1494 static int bcm4377_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
1495 {
1496 	struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
1497 	struct sk_buff *skb;
1498 	int err;
1499 
1500 	skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
1501 	if (IS_ERR(skb)) {
1502 		err = PTR_ERR(skb);
1503 		dev_err(&bcm4377->pdev->dev,
1504 			"Change address command failed (%d)", err);
1505 		return err;
1506 	}
1507 	kfree_skb(skb);
1508 
1509 	return 0;
1510 }
1511 
1512 static int bcm4377_alloc_transfer_ring(struct bcm4377_data *bcm4377,
1513 				       struct bcm4377_transfer_ring *ring)
1514 {
1515 	size_t entry_size;
1516 
1517 	spin_lock_init(&ring->lock);
1518 	ring->payload_size = ALIGN(ring->payload_size, 4);
1519 	ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4);
1520 
1521 	if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE)
1522 		return -EINVAL;
1523 	if (ring->n_entries > BCM4377_MAX_RING_SIZE)
1524 		return -EINVAL;
1525 	if (ring->virtual && ring->allow_wait)
1526 		return -EINVAL;
1527 
1528 	if (ring->d2h_buffers_only) {
1529 		if (ring->virtual)
1530 			return -EINVAL;
1531 		if (ring->payload_size)
1532 			return -EINVAL;
1533 		if (!ring->mapped_payload_size)
1534 			return -EINVAL;
1535 	}
1536 	if (ring->virtual)
1537 		return 0;
1538 
1539 	entry_size =
1540 		ring->payload_size + sizeof(struct bcm4377_xfer_ring_entry);
1541 	ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev,
1542 					 ring->n_entries * entry_size,
1543 					 &ring->ring_dma, GFP_KERNEL);
1544 	if (!ring->ring)
1545 		return -ENOMEM;
1546 
1547 	if (ring->allow_wait) {
1548 		ring->events = devm_kcalloc(&bcm4377->pdev->dev,
1549 					    ring->n_entries,
1550 					    sizeof(*ring->events), GFP_KERNEL);
1551 		if (!ring->events)
1552 			return -ENOMEM;
1553 	}
1554 
1555 	if (ring->mapped_payload_size) {
1556 		ring->payloads = dmam_alloc_coherent(
1557 			&bcm4377->pdev->dev,
1558 			ring->n_entries * ring->mapped_payload_size,
1559 			&ring->payloads_dma, GFP_KERNEL);
1560 		if (!ring->payloads)
1561 			return -ENOMEM;
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 static int bcm4377_alloc_completion_ring(struct bcm4377_data *bcm4377,
1568 					 struct bcm4377_completion_ring *ring)
1569 {
1570 	size_t entry_size;
1571 
1572 	ring->payload_size = ALIGN(ring->payload_size, 4);
1573 	if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE)
1574 		return -EINVAL;
1575 	if (ring->n_entries > BCM4377_MAX_RING_SIZE)
1576 		return -EINVAL;
1577 
1578 	entry_size = ring->payload_size +
1579 		     sizeof(struct bcm4377_completion_ring_entry);
1580 
1581 	ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev,
1582 					 ring->n_entries * entry_size,
1583 					 &ring->ring_dma, GFP_KERNEL);
1584 	if (!ring->ring)
1585 		return -ENOMEM;
1586 	return 0;
1587 }
1588 
1589 static int bcm4377_init_context(struct bcm4377_data *bcm4377)
1590 {
1591 	struct device *dev = &bcm4377->pdev->dev;
1592 	dma_addr_t peripheral_info_dma;
1593 
1594 	bcm4377->ctx = dmam_alloc_coherent(dev, sizeof(*bcm4377->ctx),
1595 					   &bcm4377->ctx_dma, GFP_KERNEL);
1596 	if (!bcm4377->ctx)
1597 		return -ENOMEM;
1598 	memset(bcm4377->ctx, 0, sizeof(*bcm4377->ctx));
1599 
1600 	bcm4377->ring_state =
1601 		dmam_alloc_coherent(dev, sizeof(*bcm4377->ring_state),
1602 				    &bcm4377->ring_state_dma, GFP_KERNEL);
1603 	if (!bcm4377->ring_state)
1604 		return -ENOMEM;
1605 	memset(bcm4377->ring_state, 0, sizeof(*bcm4377->ring_state));
1606 
1607 	bcm4377->ctx->version = cpu_to_le16(1);
1608 	bcm4377->ctx->size = cpu_to_le16(sizeof(*bcm4377->ctx));
1609 	bcm4377->ctx->enabled_caps = cpu_to_le32(2);
1610 
1611 	/*
1612 	 * The BT device will write 0x20 bytes of data to this buffer but
1613 	 * the exact contents are unknown. It only needs to exist for BT
1614 	 * to work such that we can just allocate and then ignore it.
1615 	 */
1616 	if (!dmam_alloc_coherent(&bcm4377->pdev->dev, 0x20,
1617 				 &peripheral_info_dma, GFP_KERNEL))
1618 		return -ENOMEM;
1619 	bcm4377->ctx->peripheral_info_addr = cpu_to_le64(peripheral_info_dma);
1620 
1621 	bcm4377->ctx->xfer_ring_heads_addr = cpu_to_le64(
1622 		bcm4377->ring_state_dma +
1623 		offsetof(struct bcm4377_ring_state, xfer_ring_head));
1624 	bcm4377->ctx->xfer_ring_tails_addr = cpu_to_le64(
1625 		bcm4377->ring_state_dma +
1626 		offsetof(struct bcm4377_ring_state, xfer_ring_tail));
1627 	bcm4377->ctx->completion_ring_heads_addr = cpu_to_le64(
1628 		bcm4377->ring_state_dma +
1629 		offsetof(struct bcm4377_ring_state, completion_ring_head));
1630 	bcm4377->ctx->completion_ring_tails_addr = cpu_to_le64(
1631 		bcm4377->ring_state_dma +
1632 		offsetof(struct bcm4377_ring_state, completion_ring_tail));
1633 
1634 	bcm4377->ctx->n_completion_rings =
1635 		cpu_to_le16(BCM4377_N_COMPLETION_RINGS);
1636 	bcm4377->ctx->n_xfer_rings = cpu_to_le16(BCM4377_N_TRANSFER_RINGS);
1637 
1638 	bcm4377->ctx->control_completion_ring_addr =
1639 		cpu_to_le64(bcm4377->control_ack_ring.ring_dma);
1640 	bcm4377->ctx->control_completion_ring_n_entries =
1641 		cpu_to_le16(bcm4377->control_ack_ring.n_entries);
1642 	bcm4377->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff);
1643 	bcm4377->ctx->control_completion_ring_msi = 0;
1644 	bcm4377->ctx->control_completion_ring_header_size = 0;
1645 	bcm4377->ctx->control_completion_ring_footer_size = 0;
1646 
1647 	bcm4377->ctx->control_xfer_ring_addr =
1648 		cpu_to_le64(bcm4377->control_h2d_ring.ring_dma);
1649 	bcm4377->ctx->control_xfer_ring_n_entries =
1650 		cpu_to_le16(bcm4377->control_h2d_ring.n_entries);
1651 	bcm4377->ctx->control_xfer_ring_doorbell =
1652 		cpu_to_le16(bcm4377->control_h2d_ring.doorbell);
1653 	bcm4377->ctx->control_xfer_ring_msi = 0;
1654 	bcm4377->ctx->control_xfer_ring_header_size = 0;
1655 	bcm4377->ctx->control_xfer_ring_footer_size =
1656 		bcm4377->control_h2d_ring.payload_size / 4;
1657 
1658 	dev_dbg(&bcm4377->pdev->dev, "context initialized at IOVA %pad",
1659 		&bcm4377->ctx_dma);
1660 
1661 	return 0;
1662 }
1663 
1664 static int bcm4377_prepare_rings(struct bcm4377_data *bcm4377)
1665 {
1666 	int ret;
1667 
1668 	/*
1669 	 * Even though many of these settings appear to be configurable
1670 	 * when sending the "create ring" messages most of these are
1671 	 * actually hardcoded in some (and quite possibly all) firmware versions
1672 	 * and changing them on the host has no effect.
1673 	 * Specifically, this applies to at least the doorbells, the transfer
1674 	 * and completion ring ids and their mapping (e.g. both HCI and ACL
1675 	 * entries will always be queued in completion rings 1 and 2 no matter
1676 	 * what we configure here).
1677 	 */
1678 	bcm4377->control_ack_ring.ring_id = BCM4377_ACK_RING_CONTROL;
1679 	bcm4377->control_ack_ring.n_entries = 32;
1680 	bcm4377->control_ack_ring.transfer_rings =
1681 		BIT(BCM4377_XFER_RING_CONTROL);
1682 
1683 	bcm4377->hci_acl_ack_ring.ring_id = BCM4377_ACK_RING_HCI_ACL;
1684 	bcm4377->hci_acl_ack_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES;
1685 	bcm4377->hci_acl_ack_ring.transfer_rings =
1686 		BIT(BCM4377_XFER_RING_HCI_H2D) | BIT(BCM4377_XFER_RING_ACL_H2D);
1687 	bcm4377->hci_acl_ack_ring.delay = 1000;
1688 
1689 	/*
1690 	 * A payload size of MAX_EVENT_PAYLOAD_SIZE is enough here since large
1691 	 * ACL packets will be transmitted inside buffers mapped via
1692 	 * acl_d2h_ring anyway.
1693 	 */
1694 	bcm4377->hci_acl_event_ring.ring_id = BCM4377_EVENT_RING_HCI_ACL;
1695 	bcm4377->hci_acl_event_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE;
1696 	bcm4377->hci_acl_event_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES;
1697 	bcm4377->hci_acl_event_ring.transfer_rings =
1698 		BIT(BCM4377_XFER_RING_HCI_D2H) | BIT(BCM4377_XFER_RING_ACL_D2H);
1699 	bcm4377->hci_acl_event_ring.delay = 1000;
1700 
1701 	bcm4377->sco_ack_ring.ring_id = BCM4377_ACK_RING_SCO;
1702 	bcm4377->sco_ack_ring.n_entries = BCM4377_RING_N_ENTRIES;
1703 	bcm4377->sco_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_H2D);
1704 
1705 	bcm4377->sco_event_ring.ring_id = BCM4377_EVENT_RING_SCO;
1706 	bcm4377->sco_event_ring.payload_size = MAX_SCO_PAYLOAD_SIZE;
1707 	bcm4377->sco_event_ring.n_entries = BCM4377_RING_N_ENTRIES;
1708 	bcm4377->sco_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_D2H);
1709 
1710 	bcm4377->control_h2d_ring.ring_id = BCM4377_XFER_RING_CONTROL;
1711 	bcm4377->control_h2d_ring.doorbell = BCM4377_DOORBELL_CONTROL;
1712 	bcm4377->control_h2d_ring.payload_size = BCM4377_CONTROL_MSG_SIZE;
1713 	bcm4377->control_h2d_ring.completion_ring = BCM4377_ACK_RING_CONTROL;
1714 	bcm4377->control_h2d_ring.allow_wait = true;
1715 	bcm4377->control_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1716 
1717 	bcm4377->hci_h2d_ring.ring_id = BCM4377_XFER_RING_HCI_H2D;
1718 	bcm4377->hci_h2d_ring.doorbell = BCM4377_DOORBELL_HCI_H2D;
1719 	bcm4377->hci_h2d_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE;
1720 	bcm4377->hci_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL;
1721 	bcm4377->hci_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1722 
1723 	bcm4377->hci_d2h_ring.ring_id = BCM4377_XFER_RING_HCI_D2H;
1724 	bcm4377->hci_d2h_ring.doorbell = BCM4377_DOORBELL_HCI_D2H;
1725 	bcm4377->hci_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL;
1726 	bcm4377->hci_d2h_ring.virtual = true;
1727 	bcm4377->hci_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
1728 
1729 	bcm4377->sco_h2d_ring.ring_id = BCM4377_XFER_RING_SCO_H2D;
1730 	bcm4377->sco_h2d_ring.doorbell = BCM4377_DOORBELL_SCO;
1731 	bcm4377->sco_h2d_ring.payload_size = MAX_SCO_PAYLOAD_SIZE;
1732 	bcm4377->sco_h2d_ring.completion_ring = BCM4377_ACK_RING_SCO;
1733 	bcm4377->sco_h2d_ring.sync = true;
1734 	bcm4377->sco_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1735 
1736 	bcm4377->sco_d2h_ring.ring_id = BCM4377_XFER_RING_SCO_D2H;
1737 	bcm4377->sco_d2h_ring.doorbell = BCM4377_DOORBELL_SCO;
1738 	bcm4377->sco_d2h_ring.completion_ring = BCM4377_EVENT_RING_SCO;
1739 	bcm4377->sco_d2h_ring.virtual = true;
1740 	bcm4377->sco_d2h_ring.sync = true;
1741 	bcm4377->sco_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
1742 
1743 	/*
1744 	 * This ring has to use mapped_payload_size because the largest ACL
1745 	 * packet doesn't fit inside the largest possible footer
1746 	 */
1747 	bcm4377->acl_h2d_ring.ring_id = BCM4377_XFER_RING_ACL_H2D;
1748 	bcm4377->acl_h2d_ring.doorbell = BCM4377_DOORBELL_ACL_H2D;
1749 	bcm4377->acl_h2d_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE;
1750 	bcm4377->acl_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL;
1751 	bcm4377->acl_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
1752 
1753 	/*
1754 	 * This ring only contains empty buffers to be used by incoming
1755 	 * ACL packets that do not fit inside the footer of hci_acl_event_ring
1756 	 */
1757 	bcm4377->acl_d2h_ring.ring_id = BCM4377_XFER_RING_ACL_D2H;
1758 	bcm4377->acl_d2h_ring.doorbell = BCM4377_DOORBELL_ACL_D2H;
1759 	bcm4377->acl_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL;
1760 	bcm4377->acl_d2h_ring.d2h_buffers_only = true;
1761 	bcm4377->acl_d2h_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE;
1762 	bcm4377->acl_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
1763 
1764 	/*
1765 	 * no need for any cleanup since this is only called from _probe
1766 	 * and only devres-managed allocations are used
1767 	 */
1768 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->control_h2d_ring);
1769 	if (ret)
1770 		return ret;
1771 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
1772 	if (ret)
1773 		return ret;
1774 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
1775 	if (ret)
1776 		return ret;
1777 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
1778 	if (ret)
1779 		return ret;
1780 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
1781 	if (ret)
1782 		return ret;
1783 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
1784 	if (ret)
1785 		return ret;
1786 	ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
1787 	if (ret)
1788 		return ret;
1789 
1790 	ret = bcm4377_alloc_completion_ring(bcm4377,
1791 					    &bcm4377->control_ack_ring);
1792 	if (ret)
1793 		return ret;
1794 	ret = bcm4377_alloc_completion_ring(bcm4377,
1795 					    &bcm4377->hci_acl_ack_ring);
1796 	if (ret)
1797 		return ret;
1798 	ret = bcm4377_alloc_completion_ring(bcm4377,
1799 					    &bcm4377->hci_acl_event_ring);
1800 	if (ret)
1801 		return ret;
1802 	ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
1803 	if (ret)
1804 		return ret;
1805 	ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_event_ring);
1806 	if (ret)
1807 		return ret;
1808 
1809 	dev_dbg(&bcm4377->pdev->dev, "all rings allocated and prepared\n");
1810 
1811 	return 0;
1812 }
1813 
1814 static int bcm4377_boot(struct bcm4377_data *bcm4377)
1815 {
1816 	const struct firmware *fw;
1817 	void *bfr;
1818 	dma_addr_t fw_dma;
1819 	int ret = 0;
1820 	u32 bootstage, rti_status;
1821 
1822 	bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
1823 	rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
1824 
1825 	if (bootstage != 0) {
1826 		dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n",
1827 			bootstage);
1828 		return -EINVAL;
1829 	}
1830 
1831 	if (rti_status != 0) {
1832 		dev_err(&bcm4377->pdev->dev, "RTI status is %d and not 0\n",
1833 			rti_status);
1834 		return -EINVAL;
1835 	}
1836 
1837 	fw = bcm4377_request_blob(bcm4377, "bin");
1838 	if (!fw) {
1839 		dev_err(&bcm4377->pdev->dev, "Failed to load firmware\n");
1840 		return -ENOENT;
1841 	}
1842 
1843 	bfr = dma_alloc_coherent(&bcm4377->pdev->dev, fw->size, &fw_dma,
1844 				 GFP_KERNEL);
1845 	if (!bfr) {
1846 		ret = -ENOMEM;
1847 		goto out_release_fw;
1848 	}
1849 
1850 	memcpy(bfr, fw->data, fw->size);
1851 
1852 	iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_LO);
1853 	iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_HI);
1854 	iowrite32(BCM4377_DMA_MASK,
1855 		  bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE);
1856 
1857 	iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO);
1858 	iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI);
1859 	iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE);
1860 	iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL);
1861 
1862 	dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n");
1863 
1864 	ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
1865 							BCM4377_TIMEOUT);
1866 	if (ret == 0) {
1867 		ret = -ETIMEDOUT;
1868 		goto out_dma_free;
1869 	} else if (ret < 0) {
1870 		goto out_dma_free;
1871 	}
1872 
1873 	if (bcm4377->bootstage != 2) {
1874 		dev_err(&bcm4377->pdev->dev, "boostage %d != 2\n",
1875 			bcm4377->bootstage);
1876 		ret = -ENXIO;
1877 		goto out_dma_free;
1878 	}
1879 
1880 	dev_dbg(&bcm4377->pdev->dev, "firmware has booted (stage = %x)\n",
1881 		bcm4377->bootstage);
1882 	ret = 0;
1883 
1884 out_dma_free:
1885 	dma_free_coherent(&bcm4377->pdev->dev, fw->size, bfr, fw_dma);
1886 out_release_fw:
1887 	release_firmware(fw);
1888 	return ret;
1889 }
1890 
1891 static int bcm4377_setup_rti(struct bcm4377_data *bcm4377)
1892 {
1893 	int ret;
1894 
1895 	dev_dbg(&bcm4377->pdev->dev, "starting RTI\n");
1896 	iowrite32(1, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
1897 
1898 	ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
1899 							BCM4377_TIMEOUT);
1900 	if (ret == 0) {
1901 		dev_err(&bcm4377->pdev->dev,
1902 			"timed out while waiting for RTI to transition to state 1");
1903 		return -ETIMEDOUT;
1904 	} else if (ret < 0) {
1905 		return ret;
1906 	}
1907 
1908 	if (bcm4377->rti_status != 1) {
1909 		dev_err(&bcm4377->pdev->dev, "RTI did not ack state 1 (%d)\n",
1910 			bcm4377->rti_status);
1911 		return -ENODEV;
1912 	}
1913 	dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n");
1914 
1915 	/* allow access to the entire IOVA space again */
1916 	iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO);
1917 	iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI);
1918 	iowrite32(BCM4377_DMA_MASK,
1919 		  bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE);
1920 
1921 	/* setup "Converged IPC" context */
1922 	iowrite32(lower_32_bits(bcm4377->ctx_dma),
1923 		  bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO);
1924 	iowrite32(upper_32_bits(bcm4377->ctx_dma),
1925 		  bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI);
1926 	iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
1927 
1928 	ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
1929 							BCM4377_TIMEOUT);
1930 	if (ret == 0) {
1931 		dev_err(&bcm4377->pdev->dev,
1932 			"timed out while waiting for RTI to transition to state 2");
1933 		return -ETIMEDOUT;
1934 	} else if (ret < 0) {
1935 		return ret;
1936 	}
1937 
1938 	if (bcm4377->rti_status != 2) {
1939 		dev_err(&bcm4377->pdev->dev, "RTI did not ack state 2 (%d)\n",
1940 			bcm4377->rti_status);
1941 		return -ENODEV;
1942 	}
1943 
1944 	dev_dbg(&bcm4377->pdev->dev,
1945 		"RTI is in state 2; control ring is ready\n");
1946 	bcm4377->control_ack_ring.enabled = true;
1947 
1948 	return 0;
1949 }
1950 
1951 static int bcm4377_parse_otp_board_params(struct bcm4377_data *bcm4377,
1952 					  char tag, const char *val, size_t len)
1953 {
1954 	if (tag != 'V')
1955 		return 0;
1956 	if (len >= sizeof(bcm4377->vendor))
1957 		return -EINVAL;
1958 
1959 	strscpy(bcm4377->vendor, val, len + 1);
1960 	return 0;
1961 }
1962 
1963 static int bcm4377_parse_otp_chip_params(struct bcm4377_data *bcm4377, char tag,
1964 					 const char *val, size_t len)
1965 {
1966 	size_t idx = 0;
1967 
1968 	if (tag != 's')
1969 		return 0;
1970 	if (len >= sizeof(bcm4377->stepping))
1971 		return -EINVAL;
1972 
1973 	while (len != 0) {
1974 		bcm4377->stepping[idx] = tolower(val[idx]);
1975 		if (val[idx] == '\0')
1976 			return 0;
1977 
1978 		idx++;
1979 		len--;
1980 	}
1981 
1982 	bcm4377->stepping[idx] = '\0';
1983 	return 0;
1984 }
1985 
1986 static int bcm4377_parse_otp_str(struct bcm4377_data *bcm4377, const u8 *str,
1987 				 enum bcm4377_otp_params_type type)
1988 {
1989 	const char *p;
1990 	int ret;
1991 
1992 	p = skip_spaces(str);
1993 	while (*p) {
1994 		char tag = *p++;
1995 		const char *end;
1996 		size_t len;
1997 
1998 		if (*p++ != '=') /* implicit NUL check */
1999 			return -EINVAL;
2000 
2001 		/* *p might be NUL here, if so end == p and len == 0 */
2002 		end = strchrnul(p, ' ');
2003 		len = end - p;
2004 
2005 		/* leave 1 byte for NUL in destination string */
2006 		if (len > (BCM4377_OTP_MAX_PARAM_LEN - 1))
2007 			return -EINVAL;
2008 
2009 		switch (type) {
2010 		case BCM4377_OTP_BOARD_PARAMS:
2011 			ret = bcm4377_parse_otp_board_params(bcm4377, tag, p,
2012 							     len);
2013 			break;
2014 		case BCM4377_OTP_CHIP_PARAMS:
2015 			ret = bcm4377_parse_otp_chip_params(bcm4377, tag, p,
2016 							    len);
2017 			break;
2018 		default:
2019 			ret = -EINVAL;
2020 			break;
2021 		}
2022 
2023 		if (ret)
2024 			return ret;
2025 
2026 		/* Skip to next arg, if any */
2027 		p = skip_spaces(end);
2028 	}
2029 
2030 	return 0;
2031 }
2032 
2033 static int bcm4377_parse_otp_sys_vendor(struct bcm4377_data *bcm4377, u8 *otp,
2034 					size_t size)
2035 {
2036 	int idx = 4;
2037 	const char *chip_params;
2038 	const char *board_params;
2039 	int ret;
2040 
2041 	/* 4-byte header and two empty strings */
2042 	if (size < 6)
2043 		return -EINVAL;
2044 
2045 	if (get_unaligned_le32(otp) != BCM4377_OTP_VENDOR_HDR)
2046 		return -EINVAL;
2047 
2048 	chip_params = &otp[idx];
2049 
2050 	/* Skip first string, including terminator */
2051 	idx += strnlen(chip_params, size - idx) + 1;
2052 	if (idx >= size)
2053 		return -EINVAL;
2054 
2055 	board_params = &otp[idx];
2056 
2057 	/* Skip to terminator of second string */
2058 	idx += strnlen(board_params, size - idx);
2059 	if (idx >= size)
2060 		return -EINVAL;
2061 
2062 	/* At this point both strings are guaranteed NUL-terminated */
2063 	dev_dbg(&bcm4377->pdev->dev,
2064 		"OTP: chip_params='%s' board_params='%s'\n", chip_params,
2065 		board_params);
2066 
2067 	ret = bcm4377_parse_otp_str(bcm4377, chip_params,
2068 				    BCM4377_OTP_CHIP_PARAMS);
2069 	if (ret)
2070 		return ret;
2071 
2072 	ret = bcm4377_parse_otp_str(bcm4377, board_params,
2073 				    BCM4377_OTP_BOARD_PARAMS);
2074 	if (ret)
2075 		return ret;
2076 
2077 	if (!bcm4377->stepping[0] || !bcm4377->vendor[0])
2078 		return -EINVAL;
2079 
2080 	dev_dbg(&bcm4377->pdev->dev, "OTP: stepping=%s, vendor=%s\n",
2081 		bcm4377->stepping, bcm4377->vendor);
2082 	return 0;
2083 }
2084 
2085 static int bcm4377_parse_otp(struct bcm4377_data *bcm4377)
2086 {
2087 	u8 *otp;
2088 	int i;
2089 	int ret = -ENOENT;
2090 
2091 	otp = kzalloc(BCM4377_OTP_SIZE, GFP_KERNEL);
2092 	if (!otp)
2093 		return -ENOMEM;
2094 
2095 	for (i = 0; i < BCM4377_OTP_SIZE; ++i)
2096 		otp[i] = ioread8(bcm4377->bar0 + bcm4377->hw->otp_offset + i);
2097 
2098 	i = 0;
2099 	while (i < (BCM4377_OTP_SIZE - 1)) {
2100 		u8 type = otp[i];
2101 		u8 length = otp[i + 1];
2102 
2103 		if (type == 0)
2104 			break;
2105 
2106 		if ((i + 2 + length) > BCM4377_OTP_SIZE)
2107 			break;
2108 
2109 		switch (type) {
2110 		case BCM4377_OTP_SYS_VENDOR:
2111 			dev_dbg(&bcm4377->pdev->dev,
2112 				"OTP @ 0x%x (%d): SYS_VENDOR", i, length);
2113 			ret = bcm4377_parse_otp_sys_vendor(bcm4377, &otp[i + 2],
2114 							   length);
2115 			break;
2116 		case BCM4377_OTP_CIS:
2117 			dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): CIS", i,
2118 				length);
2119 			break;
2120 		default:
2121 			dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): unknown",
2122 				i, length);
2123 			break;
2124 		}
2125 
2126 		i += 2 + length;
2127 	}
2128 
2129 	kfree(otp);
2130 	return ret;
2131 }
2132 
2133 static int bcm4377_init_cfg(struct bcm4377_data *bcm4377)
2134 {
2135 	int ret;
2136 	u32 ctrl;
2137 
2138 	ret = pci_write_config_dword(bcm4377->pdev,
2139 				     BCM4377_PCIECFG_BAR0_WINDOW1,
2140 				     bcm4377->hw->bar0_window1);
2141 	if (ret)
2142 		return ret;
2143 
2144 	ret = pci_write_config_dword(bcm4377->pdev,
2145 				     BCM4377_PCIECFG_BAR0_WINDOW2,
2146 				     bcm4377->hw->bar0_window2);
2147 	if (ret)
2148 		return ret;
2149 
2150 	ret = pci_write_config_dword(
2151 		bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1,
2152 		BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT);
2153 	if (ret)
2154 		return ret;
2155 
2156 	if (bcm4377->hw->has_bar0_core2_window2) {
2157 		ret = pci_write_config_dword(bcm4377->pdev,
2158 					     BCM4377_PCIECFG_BAR0_CORE2_WINDOW2,
2159 					     bcm4377->hw->bar0_core2_window2);
2160 		if (ret)
2161 			return ret;
2162 	}
2163 
2164 	ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR2_WINDOW,
2165 				     BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT);
2166 	if (ret)
2167 		return ret;
2168 
2169 	ret = pci_read_config_dword(bcm4377->pdev,
2170 				    BCM4377_PCIECFG_SUBSYSTEM_CTRL, &ctrl);
2171 	if (ret)
2172 		return ret;
2173 
2174 	if (bcm4377->hw->clear_pciecfg_subsystem_ctrl_bit19)
2175 		ctrl &= ~BIT(19);
2176 	ctrl |= BIT(16);
2177 
2178 	return pci_write_config_dword(bcm4377->pdev,
2179 				      BCM4377_PCIECFG_SUBSYSTEM_CTRL, ctrl);
2180 }
2181 
2182 static int bcm4377_probe_dmi(struct bcm4377_data *bcm4377)
2183 {
2184 	const struct dmi_system_id *board_type_dmi_id;
2185 
2186 	board_type_dmi_id = dmi_first_match(bcm4377_dmi_board_table);
2187 	if (board_type_dmi_id && board_type_dmi_id->driver_data) {
2188 		bcm4377->board_type = board_type_dmi_id->driver_data;
2189 		dev_dbg(&bcm4377->pdev->dev,
2190 			"found board type via DMI match: %s\n",
2191 			bcm4377->board_type);
2192 	}
2193 
2194 	return 0;
2195 }
2196 
2197 static int bcm4377_probe_of(struct bcm4377_data *bcm4377)
2198 {
2199 	struct device_node *np = bcm4377->pdev->dev.of_node;
2200 	int ret;
2201 
2202 	if (!np)
2203 		return 0;
2204 
2205 	ret = of_property_read_string(np, "brcm,board-type",
2206 				      &bcm4377->board_type);
2207 	if (ret) {
2208 		dev_err(&bcm4377->pdev->dev, "no brcm,board-type property\n");
2209 		return ret;
2210 	}
2211 
2212 	bcm4377->taurus_beamforming_cal_blob =
2213 		of_get_property(np, "brcm,taurus-bf-cal-blob",
2214 				&bcm4377->taurus_beamforming_cal_size);
2215 	if (!bcm4377->taurus_beamforming_cal_blob) {
2216 		dev_err(&bcm4377->pdev->dev,
2217 			"no brcm,taurus-bf-cal-blob property\n");
2218 		return -ENOENT;
2219 	}
2220 	bcm4377->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob",
2221 						   &bcm4377->taurus_cal_size);
2222 	if (!bcm4377->taurus_cal_blob) {
2223 		dev_err(&bcm4377->pdev->dev,
2224 			"no brcm,taurus-cal-blob property\n");
2225 		return -ENOENT;
2226 	}
2227 
2228 	return 0;
2229 }
2230 
2231 static void bcm4377_disable_aspm(struct bcm4377_data *bcm4377)
2232 {
2233 	pci_disable_link_state(bcm4377->pdev,
2234 			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2235 
2236 	/*
2237 	 * pci_disable_link_state can fail if either CONFIG_PCIEASPM is disabled
2238 	 * or if the BIOS hasn't handed over control to us. We must *always*
2239 	 * disable ASPM for this device due to hardware errata though.
2240 	 */
2241 	pcie_capability_clear_word(bcm4377->pdev, PCI_EXP_LNKCTL,
2242 				   PCI_EXP_LNKCTL_ASPMC);
2243 }
2244 
2245 static void bcm4377_pci_free_irq_vectors(void *data)
2246 {
2247 	pci_free_irq_vectors(data);
2248 }
2249 
2250 static void bcm4377_hci_free_dev(void *data)
2251 {
2252 	hci_free_dev(data);
2253 }
2254 
2255 static void bcm4377_hci_unregister_dev(void *data)
2256 {
2257 	hci_unregister_dev(data);
2258 }
2259 
2260 static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2261 {
2262 	struct bcm4377_data *bcm4377;
2263 	struct hci_dev *hdev;
2264 	int ret, irq;
2265 
2266 	ret = dma_set_mask_and_coherent(&pdev->dev, BCM4377_DMA_MASK);
2267 	if (ret)
2268 		return ret;
2269 
2270 	bcm4377 = devm_kzalloc(&pdev->dev, sizeof(*bcm4377), GFP_KERNEL);
2271 	if (!bcm4377)
2272 		return -ENOMEM;
2273 
2274 	bcm4377->pdev = pdev;
2275 	bcm4377->hw = &bcm4377_hw_variants[id->driver_data];
2276 	init_completion(&bcm4377->event);
2277 
2278 	ret = bcm4377_prepare_rings(bcm4377);
2279 	if (ret)
2280 		return ret;
2281 
2282 	ret = bcm4377_init_context(bcm4377);
2283 	if (ret)
2284 		return ret;
2285 
2286 	ret = bcm4377_probe_dmi(bcm4377);
2287 	if (ret)
2288 		return ret;
2289 	ret = bcm4377_probe_of(bcm4377);
2290 	if (ret)
2291 		return ret;
2292 	if (!bcm4377->board_type) {
2293 		dev_err(&pdev->dev, "unable to determine board type\n");
2294 		return -ENODEV;
2295 	}
2296 
2297 	if (bcm4377->hw->disable_aspm)
2298 		bcm4377_disable_aspm(bcm4377);
2299 
2300 	ret = pci_reset_function_locked(pdev);
2301 	if (ret)
2302 		dev_warn(
2303 			&pdev->dev,
2304 			"function level reset failed with %d; trying to continue anyway\n",
2305 			ret);
2306 
2307 	/*
2308 	 * If this number is too low and we try to access any BAR too
2309 	 * early the device will crash. Experiments have shown that
2310 	 * approximately 50 msec is the minimum amount we have to wait.
2311 	 * Let's double that to be safe.
2312 	 */
2313 	msleep(100);
2314 
2315 	ret = pcim_enable_device(pdev);
2316 	if (ret)
2317 		return ret;
2318 	pci_set_master(pdev);
2319 
2320 	ret = bcm4377_init_cfg(bcm4377);
2321 	if (ret)
2322 		return ret;
2323 
2324 	bcm4377->bar0 = pcim_iomap(pdev, 0, 0);
2325 	if (!bcm4377->bar0)
2326 		return -EBUSY;
2327 	bcm4377->bar2 = pcim_iomap(pdev, 2, 0);
2328 	if (!bcm4377->bar2)
2329 		return -EBUSY;
2330 
2331 	ret = bcm4377_parse_otp(bcm4377);
2332 	if (ret) {
2333 		dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret);
2334 		return ret;
2335 	}
2336 
2337 	/*
2338 	 * Legacy interrupts result in an IRQ storm because we don't know where
2339 	 * the interrupt mask and status registers for these chips are.
2340 	 * MSIs are acked automatically instead.
2341 	 */
2342 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
2343 	if (ret < 0)
2344 		return -ENODEV;
2345 	ret = devm_add_action_or_reset(&pdev->dev, bcm4377_pci_free_irq_vectors,
2346 				       pdev);
2347 	if (ret)
2348 		return ret;
2349 
2350 	irq = pci_irq_vector(pdev, 0);
2351 	if (irq <= 0)
2352 		return -ENODEV;
2353 
2354 	ret = devm_request_irq(&pdev->dev, irq, bcm4377_irq, 0, "bcm4377",
2355 			       bcm4377);
2356 	if (ret)
2357 		return ret;
2358 
2359 	hdev = hci_alloc_dev();
2360 	if (!hdev)
2361 		return -ENOMEM;
2362 	ret = devm_add_action_or_reset(&pdev->dev, bcm4377_hci_free_dev, hdev);
2363 	if (ret)
2364 		return ret;
2365 
2366 	bcm4377->hdev = hdev;
2367 
2368 	hdev->bus = HCI_PCI;
2369 	hdev->open = bcm4377_hci_open;
2370 	hdev->close = bcm4377_hci_close;
2371 	hdev->send = bcm4377_hci_send_frame;
2372 	hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
2373 	hdev->setup = bcm4377_hci_setup;
2374 
2375 	if (bcm4377->hw->broken_mws_transport_config)
2376 		set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
2377 	if (bcm4377->hw->broken_ext_scan)
2378 		set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
2379 	if (bcm4377->hw->broken_le_coded)
2380 		set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
2381 	if (bcm4377->hw->broken_le_ext_adv_report_phy)
2382 		set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
2383 
2384 	pci_set_drvdata(pdev, bcm4377);
2385 	hci_set_drvdata(hdev, bcm4377);
2386 	SET_HCIDEV_DEV(hdev, &pdev->dev);
2387 
2388 	ret = bcm4377_boot(bcm4377);
2389 	if (ret)
2390 		return ret;
2391 
2392 	ret = bcm4377_setup_rti(bcm4377);
2393 	if (ret)
2394 		return ret;
2395 
2396 	ret = hci_register_dev(hdev);
2397 	if (ret)
2398 		return ret;
2399 	return devm_add_action_or_reset(&pdev->dev, bcm4377_hci_unregister_dev,
2400 					hdev);
2401 }
2402 
2403 static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state)
2404 {
2405 	struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
2406 	int ret;
2407 
2408 	ret = hci_suspend_dev(bcm4377->hdev);
2409 	if (ret)
2410 		return ret;
2411 
2412 	iowrite32(BCM4377_BAR0_SLEEP_CONTROL_QUIESCE,
2413 		  bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
2414 
2415 	return 0;
2416 }
2417 
2418 static int bcm4377_resume(struct pci_dev *pdev)
2419 {
2420 	struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
2421 
2422 	iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE,
2423 		  bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
2424 
2425 	return hci_resume_dev(bcm4377->hdev);
2426 }
2427 
2428 static const struct dmi_system_id bcm4377_dmi_board_table[] = {
2429 	{
2430 		.matches = {
2431 			DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
2432 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"),
2433 		},
2434 		.driver_data = "apple,formosa",
2435 	},
2436 	{
2437 		.matches = {
2438 			DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
2439 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro15,4"),
2440 		},
2441 		.driver_data = "apple,formosa",
2442 	},
2443 	{
2444 		.matches = {
2445 			DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
2446 			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,3"),
2447 		},
2448 		.driver_data = "apple,formosa",
2449 	},
2450 	{}
2451 };
2452 
2453 static const struct bcm4377_hw bcm4377_hw_variants[] = {
2454 	[BCM4377] = {
2455 		.id = 0x4377,
2456 		.otp_offset = 0x4120,
2457 		.bar0_window1 = 0x1800b000,
2458 		.bar0_window2 = 0x1810c000,
2459 		.disable_aspm = true,
2460 		.broken_ext_scan = true,
2461 		.send_ptb = bcm4377_send_ptb,
2462 	},
2463 
2464 	[BCM4378] = {
2465 		.id = 0x4378,
2466 		.otp_offset = 0x4120,
2467 		.bar0_window1 = 0x18002000,
2468 		.bar0_window2 = 0x1810a000,
2469 		.bar0_core2_window2 = 0x18107000,
2470 		.has_bar0_core2_window2 = true,
2471 		.broken_mws_transport_config = true,
2472 		.broken_le_coded = true,
2473 		.send_calibration = bcm4378_send_calibration,
2474 		.send_ptb = bcm4378_send_ptb,
2475 	},
2476 
2477 	[BCM4387] = {
2478 		.id = 0x4387,
2479 		.otp_offset = 0x413c,
2480 		.bar0_window1 = 0x18002000,
2481 		.bar0_window2 = 0x18109000,
2482 		.bar0_core2_window2 = 0x18106000,
2483 		.has_bar0_core2_window2 = true,
2484 		.clear_pciecfg_subsystem_ctrl_bit19 = true,
2485 		.broken_mws_transport_config = true,
2486 		.broken_le_coded = true,
2487 		.broken_le_ext_adv_report_phy = true,
2488 		.send_calibration = bcm4387_send_calibration,
2489 		.send_ptb = bcm4378_send_ptb,
2490 	},
2491 };
2492 
2493 #define BCM4377_DEVID_ENTRY(id)                                             \
2494 	{                                                                   \
2495 		PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID,    \
2496 			PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \
2497 			BCM##id                                             \
2498 	}
2499 
2500 static const struct pci_device_id bcm4377_devid_table[] = {
2501 	BCM4377_DEVID_ENTRY(4377),
2502 	BCM4377_DEVID_ENTRY(4378),
2503 	BCM4377_DEVID_ENTRY(4387),
2504 	{},
2505 };
2506 MODULE_DEVICE_TABLE(pci, bcm4377_devid_table);
2507 
2508 static struct pci_driver bcm4377_pci_driver = {
2509 	.name = "hci_bcm4377",
2510 	.id_table = bcm4377_devid_table,
2511 	.probe = bcm4377_probe,
2512 	.suspend = bcm4377_suspend,
2513 	.resume = bcm4377_resume,
2514 };
2515 module_pci_driver(bcm4377_pci_driver);
2516 
2517 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
2518 MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices");
2519 MODULE_LICENSE("Dual MIT/GPL");
2520 MODULE_FIRMWARE("brcm/brcmbt4377*.bin");
2521 MODULE_FIRMWARE("brcm/brcmbt4377*.ptb");
2522 MODULE_FIRMWARE("brcm/brcmbt4378*.bin");
2523 MODULE_FIRMWARE("brcm/brcmbt4378*.ptb");
2524 MODULE_FIRMWARE("brcm/brcmbt4387*.bin");
2525 MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
2526