1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/unaligned.h>
18
19 #include <net/bluetooth/bluetooth.h>
20 #include <net/bluetooth/hci_core.h>
21
22 #include "btintel.h"
23 #include "btintel_pcie.h"
24
25 #define VERSION "0.1"
26
27 #define BTINTEL_PCI_DEVICE(dev, subdev) \
28 .vendor = PCI_VENDOR_ID_INTEL, \
29 .device = (dev), \
30 .subvendor = PCI_ANY_ID, \
31 .subdevice = (subdev), \
32 .driver_data = 0
33
34 #define POLL_INTERVAL_US 10
35
36 /* Intel Bluetooth PCIe device id table */
37 static const struct pci_device_id btintel_pcie_table[] = {
38 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
39 { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
40 { 0 }
41 };
42 MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
43
44 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
45 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
46 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
47 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
48 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
49 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
50 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
51
52 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
53
54 #define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
55 #define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
56
57 #define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
58 #define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
59
60 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
61
62 #define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
63 #define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
64
65 /* Alive interrupt context */
66 enum {
67 BTINTEL_PCIE_ROM,
68 BTINTEL_PCIE_FW_DL,
69 BTINTEL_PCIE_HCI_RESET,
70 BTINTEL_PCIE_INTEL_HCI_RESET1,
71 BTINTEL_PCIE_INTEL_HCI_RESET2,
72 BTINTEL_PCIE_D0,
73 BTINTEL_PCIE_D3
74 };
75
76 /* Structure for dbgc fragment buffer
77 * @buf_addr_lsb: LSB of the buffer's physical address
78 * @buf_addr_msb: MSB of the buffer's physical address
79 * @buf_size: Total size of the buffer
80 */
81 struct btintel_pcie_dbgc_ctxt_buf {
82 u32 buf_addr_lsb;
83 u32 buf_addr_msb;
84 u32 buf_size;
85 };
86
87 /* Structure for dbgc fragment
88 * @magic_num: 0XA5A5A5A5
89 * @ver: For Driver-FW compatibility
90 * @total_size: Total size of the payload debug info
91 * @num_buf: Num of allocated debug bufs
92 * @bufs: All buffer's addresses and sizes
93 */
94 struct btintel_pcie_dbgc_ctxt {
95 u32 magic_num;
96 u32 ver;
97 u32 total_size;
98 u32 num_buf;
99 struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
100 };
101
102 /* This function initializes the memory for DBGC buffers and formats the
103 * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
104 * size as the payload
105 */
btintel_pcie_setup_dbgc(struct btintel_pcie_data * data)106 static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
107 {
108 struct btintel_pcie_dbgc_ctxt db_frag;
109 struct data_buf *buf;
110 int i;
111
112 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
113 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
114 sizeof(*buf), GFP_KERNEL);
115 if (!data->dbgc.bufs)
116 return -ENOMEM;
117
118 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
119 data->dbgc.count *
120 BTINTEL_PCIE_DBGC_BUFFER_SIZE,
121 &data->dbgc.buf_p_addr,
122 GFP_KERNEL | __GFP_NOWARN);
123 if (!data->dbgc.buf_v_addr)
124 return -ENOMEM;
125
126 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
127 sizeof(struct btintel_pcie_dbgc_ctxt),
128 &data->dbgc.frag_p_addr,
129 GFP_KERNEL | __GFP_NOWARN);
130 if (!data->dbgc.frag_v_addr)
131 return -ENOMEM;
132
133 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
134
135 db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
136 db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
137 db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
138 db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
139
140 for (i = 0; i < data->dbgc.count; i++) {
141 buf = &data->dbgc.bufs[i];
142 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
143 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
144 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
145 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
146 db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
147 }
148
149 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
150 return 0;
151 }
152
ipc_print_ia_ring(struct hci_dev * hdev,struct ia * ia,u16 queue_num)153 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
154 u16 queue_num)
155 {
156 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
157 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
158 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
159 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
160 }
161
ipc_print_urbd1(struct hci_dev * hdev,struct urbd1 * urbd1,u16 index)162 static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
163 u16 index)
164 {
165 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
166 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
167 }
168
btintel_pcie_get_data(struct msix_entry * entry)169 static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
170 {
171 u8 queue = entry->entry;
172 struct msix_entry *entries = entry - queue;
173
174 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
175 }
176
177 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
178 * of the TFD is updated and ready to transmit.
179 */
btintel_pcie_set_tx_db(struct btintel_pcie_data * data,u16 index)180 static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
181 {
182 u32 val;
183
184 val = index;
185 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
186
187 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
188 }
189
190 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
191 * descriptor) with the data length and the DMA address of the data buffer.
192 */
btintel_pcie_prepare_tx(struct txq * txq,u16 tfd_index,struct sk_buff * skb)193 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
194 struct sk_buff *skb)
195 {
196 struct data_buf *buf;
197 struct tfd *tfd;
198
199 tfd = &txq->tfds[tfd_index];
200 memset(tfd, 0, sizeof(*tfd));
201
202 buf = &txq->bufs[tfd_index];
203
204 tfd->size = skb->len;
205 tfd->addr = buf->data_p_addr;
206
207 /* Copy the outgoing data to DMA buffer */
208 memcpy(buf->data, skb->data, tfd->size);
209 }
210
btintel_pcie_dump_debug_registers(struct hci_dev * hdev)211 static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
212 {
213 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
214 u16 cr_hia, cr_tia;
215 u32 reg, mbox_reg;
216 struct sk_buff *skb;
217 u8 buf[80];
218
219 skb = alloc_skb(1024, GFP_ATOMIC);
220 if (!skb)
221 return;
222
223 snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
224 bt_dev_dbg(hdev, "%s", buf);
225 skb_put_data(skb, buf, strlen(buf));
226
227 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
228 snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
229 bt_dev_dbg(hdev, "%s", buf);
230 skb_put_data(skb, buf, strlen(buf));
231 data->boot_stage_cache = reg;
232
233 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
234 snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
235 skb_put_data(skb, buf, strlen(buf));
236 bt_dev_dbg(hdev, "%s", buf);
237
238 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
239 snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
240 skb_put_data(skb, buf, strlen(buf));
241 bt_dev_dbg(hdev, "%s", buf);
242
243 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
244 snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
245 skb_put_data(skb, buf, strlen(buf));
246 bt_dev_dbg(hdev, "%s", buf);
247
248 /*Read the Mail box status and registers*/
249 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
250 snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
251 skb_put_data(skb, buf, strlen(buf));
252 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
253 mbox_reg = btintel_pcie_rd_reg32(data,
254 BTINTEL_PCIE_CSR_MBOX_1_REG);
255 snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
256 skb_put_data(skb, buf, strlen(buf));
257 bt_dev_dbg(hdev, "%s", buf);
258 }
259
260 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
261 mbox_reg = btintel_pcie_rd_reg32(data,
262 BTINTEL_PCIE_CSR_MBOX_2_REG);
263 snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
264 skb_put_data(skb, buf, strlen(buf));
265 bt_dev_dbg(hdev, "%s", buf);
266 }
267
268 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
269 mbox_reg = btintel_pcie_rd_reg32(data,
270 BTINTEL_PCIE_CSR_MBOX_3_REG);
271 snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
272 skb_put_data(skb, buf, strlen(buf));
273 bt_dev_dbg(hdev, "%s", buf);
274 }
275
276 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
277 mbox_reg = btintel_pcie_rd_reg32(data,
278 BTINTEL_PCIE_CSR_MBOX_4_REG);
279 snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
280 skb_put_data(skb, buf, strlen(buf));
281 bt_dev_dbg(hdev, "%s", buf);
282 }
283
284 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
285 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
286 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
287 skb_put_data(skb, buf, strlen(buf));
288 bt_dev_dbg(hdev, "%s", buf);
289
290 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
291 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
292 snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
293 skb_put_data(skb, buf, strlen(buf));
294 bt_dev_dbg(hdev, "%s", buf);
295 snprintf(buf, sizeof(buf), "--------------------------------");
296 bt_dev_dbg(hdev, "%s", buf);
297
298 hci_recv_diag(hdev, skb);
299 }
300
btintel_pcie_send_sync(struct btintel_pcie_data * data,struct sk_buff * skb)301 static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
302 struct sk_buff *skb)
303 {
304 int ret;
305 u16 tfd_index;
306 struct txq *txq = &data->txq;
307
308 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
309
310 if (tfd_index > txq->count)
311 return -ERANGE;
312
313 /* Prepare for TX. It updates the TFD with the length of data and
314 * address of the DMA buffer, and copy the data to the DMA buffer
315 */
316 btintel_pcie_prepare_tx(txq, tfd_index, skb);
317
318 tfd_index = (tfd_index + 1) % txq->count;
319 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
320
321 /* Arm wait event condition */
322 data->tx_wait_done = false;
323
324 /* Set the doorbell to notify the device */
325 btintel_pcie_set_tx_db(data, tfd_index);
326
327 /* Wait for the complete interrupt - URBD0 */
328 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
329 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
330 if (!ret) {
331 bt_dev_err(data->hdev, "tx completion timeout");
332 btintel_pcie_dump_debug_registers(data->hdev);
333 return -ETIME;
334 }
335
336 return 0;
337 }
338
339 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
340 * is available to receive the data
341 */
btintel_pcie_set_rx_db(struct btintel_pcie_data * data,u16 index)342 static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
343 {
344 u32 val;
345
346 val = index;
347 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
348
349 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
350 }
351
352 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
353 * DMA address of the free buffer.
354 */
btintel_pcie_prepare_rx(struct rxq * rxq,u16 frbd_index)355 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
356 {
357 struct data_buf *buf;
358 struct frbd *frbd;
359
360 /* Get the buffer of the FRBD for DMA */
361 buf = &rxq->bufs[frbd_index];
362
363 frbd = &rxq->frbds[frbd_index];
364 memset(frbd, 0, sizeof(*frbd));
365
366 /* Update FRBD */
367 frbd->tag = frbd_index;
368 frbd->addr = buf->data_p_addr;
369 }
370
btintel_pcie_submit_rx(struct btintel_pcie_data * data)371 static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
372 {
373 u16 frbd_index;
374 struct rxq *rxq = &data->rxq;
375
376 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
377
378 if (frbd_index > rxq->count)
379 return -ERANGE;
380
381 /* Prepare for RX submit. It updates the FRBD with the address of DMA
382 * buffer
383 */
384 btintel_pcie_prepare_rx(rxq, frbd_index);
385
386 frbd_index = (frbd_index + 1) % rxq->count;
387 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
388 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
389
390 /* Set the doorbell to notify the device */
391 btintel_pcie_set_rx_db(data, frbd_index);
392
393 return 0;
394 }
395
btintel_pcie_start_rx(struct btintel_pcie_data * data)396 static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
397 {
398 int i, ret;
399 struct rxq *rxq = &data->rxq;
400
401 /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
402 * hardware issues leading to race condition at the firmware.
403 */
404
405 for (i = 0; i < rxq->count - 3; i++) {
406 ret = btintel_pcie_submit_rx(data);
407 if (ret)
408 return ret;
409 }
410
411 return 0;
412 }
413
btintel_pcie_reset_ia(struct btintel_pcie_data * data)414 static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
415 {
416 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
417 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
418 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
419 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
420 }
421
btintel_pcie_reset_bt(struct btintel_pcie_data * data)422 static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
423 {
424 u32 reg;
425 int retry = 3;
426
427 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
428
429 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
430 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
431 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
432 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
433
434 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
435
436 do {
437 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
438 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
439 break;
440 usleep_range(10000, 12000);
441
442 } while (--retry > 0);
443 usleep_range(10000, 12000);
444
445 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
446
447 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
448 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
449 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
450 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
451 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
452 usleep_range(10000, 12000);
453
454 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
455 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
456
457 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
458
459 /* If shared hardware reset is success then boot stage register shall be
460 * set to 0
461 */
462 return reg == 0 ? 0 : -ENODEV;
463 }
464
btintel_pcie_mac_init(struct btintel_pcie_data * data)465 static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
466 {
467 u32 reg;
468
469 /* Set MAC_INIT bit to start primary bootloader */
470 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
471 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
472 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
473 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
474 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
475 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
476 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
477 }
478
btintel_pcie_add_dmp_data(struct hci_dev * hdev,const void * data,int size)479 static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
480 {
481 struct sk_buff *skb;
482 int err;
483
484 skb = alloc_skb(size, GFP_ATOMIC);
485 if (!skb)
486 return -ENOMEM;
487
488 skb_put_data(skb, data, size);
489 err = hci_devcd_append(hdev, skb);
490 if (err) {
491 bt_dev_err(hdev, "Failed to append data in the coredump");
492 return err;
493 }
494
495 return 0;
496 }
497
btintel_pcie_get_mac_access(struct btintel_pcie_data * data)498 static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
499 {
500 u32 reg;
501 int retry = 15;
502
503 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
504
505 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
506 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
507 if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
508 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
509
510 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
511
512 do {
513 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
514 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
515 return 0;
516 /* Need delay here for Target Access harwdware to settle down*/
517 usleep_range(1000, 1200);
518
519 } while (--retry > 0);
520
521 return -ETIME;
522 }
523
btintel_pcie_release_mac_access(struct btintel_pcie_data * data)524 static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
525 {
526 u32 reg;
527
528 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
529
530 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
531 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
532
533 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
534 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
535
536 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
537 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
538
539 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
540 }
541
btintel_pcie_copy_tlv(struct sk_buff * skb,enum btintel_pcie_tlv_type type,void * data,int size)542 static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
543 void *data, int size)
544 {
545 struct intel_tlv *tlv;
546
547 tlv = skb_put(skb, sizeof(*tlv) + size);
548 tlv->type = type;
549 tlv->len = size;
550 memcpy(tlv->val, data, tlv->len);
551 }
552
btintel_pcie_read_dram_buffers(struct btintel_pcie_data * data)553 static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
554 {
555 u32 offset, prev_size, wr_ptr_status, dump_size, i;
556 struct btintel_pcie_dbgc *dbgc = &data->dbgc;
557 u8 buf_idx, dump_time_len, fw_build;
558 struct hci_dev *hdev = data->hdev;
559 struct intel_tlv *tlv;
560 struct timespec64 now;
561 struct sk_buff *skb;
562 struct tm tm_now;
563 char buf[256];
564 u16 hdr_len;
565 int ret;
566
567 wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
568 offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
569
570 buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
571 if (buf_idx > dbgc->count) {
572 bt_dev_warn(hdev, "Buffer index is invalid");
573 return -EINVAL;
574 }
575
576 prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
577 if (prev_size + offset >= prev_size)
578 data->dmp_hdr.write_ptr = prev_size + offset;
579 else
580 return -EINVAL;
581
582 ktime_get_real_ts64(&now);
583 time64_to_tm(now.tv_sec, 0, &tm_now);
584 dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
585 tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
586 tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
587
588 fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
589 "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
590 2000 + (data->dmp_hdr.fw_timestamp >> 8),
591 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
592 data->dmp_hdr.fw_build_num);
593
594 hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
595 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
596 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
597 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
598 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
599 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
600 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
601 sizeof(*tlv) + dump_time_len +
602 sizeof(*tlv) + fw_build;
603
604 dump_size = hdr_len + sizeof(hdr_len);
605
606 skb = alloc_skb(dump_size, GFP_KERNEL);
607 if (!skb)
608 return -ENOMEM;
609
610 /* Add debug buffers data length to dump size */
611 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
612
613 ret = hci_devcd_init(hdev, dump_size);
614 if (ret) {
615 bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
616 kfree_skb(skb);
617 return ret;
618 }
619
620 skb_put_data(skb, &hdr_len, sizeof(hdr_len));
621
622 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
623 sizeof(data->dmp_hdr.cnvi_bt));
624
625 btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
626 sizeof(data->dmp_hdr.write_ptr));
627
628 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
629 BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
630
631 btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
632 sizeof(data->dmp_hdr.wrap_ctr));
633
634 btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
635 sizeof(data->dmp_hdr.trigger_reason));
636
637 btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
638 sizeof(data->dmp_hdr.fw_git_sha1));
639
640 btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
641 sizeof(data->dmp_hdr.cnvr_top));
642
643 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
644 sizeof(data->dmp_hdr.cnvi_top));
645
646 btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
647
648 btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
649
650 ret = hci_devcd_append(hdev, skb);
651 if (ret)
652 goto exit_err;
653
654 for (i = 0; i < dbgc->count; i++) {
655 ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
656 BTINTEL_PCIE_DBGC_BUFFER_SIZE);
657 if (ret)
658 break;
659 }
660
661 exit_err:
662 hci_devcd_complete(hdev);
663 return ret;
664 }
665
btintel_pcie_dump_traces(struct hci_dev * hdev)666 static void btintel_pcie_dump_traces(struct hci_dev *hdev)
667 {
668 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
669 int ret = 0;
670
671 ret = btintel_pcie_get_mac_access(data);
672 if (ret) {
673 bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
674 return;
675 }
676
677 ret = btintel_pcie_read_dram_buffers(data);
678
679 btintel_pcie_release_mac_access(data);
680
681 if (ret)
682 bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
683 }
684
btintel_pcie_dump_hdr(struct hci_dev * hdev,struct sk_buff * skb)685 static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
686 {
687 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
688 u16 len = skb->len;
689 u16 *hdrlen_ptr;
690 char buf[80];
691
692 hdrlen_ptr = skb_put_zero(skb, sizeof(len));
693
694 snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
695 INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
696 skb_put_data(skb, buf, strlen(buf));
697
698 snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
699 data->dmp_hdr.fw_build_num);
700 skb_put_data(skb, buf, strlen(buf));
701
702 snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
703 skb_put_data(skb, buf, strlen(buf));
704
705 snprintf(buf, sizeof(buf), "Vendor: Intel\n");
706 skb_put_data(skb, buf, strlen(buf));
707
708 *hdrlen_ptr = skb->len - len;
709 }
710
btintel_pcie_dump_notify(struct hci_dev * hdev,int state)711 static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
712 {
713 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
714
715 switch (state) {
716 case HCI_DEVCOREDUMP_IDLE:
717 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
718 break;
719 case HCI_DEVCOREDUMP_ACTIVE:
720 data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
721 break;
722 case HCI_DEVCOREDUMP_TIMEOUT:
723 case HCI_DEVCOREDUMP_ABORT:
724 case HCI_DEVCOREDUMP_DONE:
725 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
726 break;
727 }
728 }
729
730 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
731 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
732 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
733 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
734 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
735 */
btintel_pcie_enable_bt(struct btintel_pcie_data * data)736 static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
737 {
738 int err;
739 u32 reg;
740
741 data->gp0_received = false;
742
743 /* Update the DMA address of CI struct to CSR */
744 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
745 data->ci_p_addr & 0xffffffff);
746 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
747 (u64)data->ci_p_addr >> 32);
748
749 /* Reset the cached value of boot stage. it is updated by the MSI-X
750 * gp0 interrupt handler.
751 */
752 data->boot_stage_cache = 0x0;
753
754 /* Set MAC_INIT bit to start primary bootloader */
755 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
756 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
757 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
758 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
759 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
760 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
761
762 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
763
764 /* MAC is ready. Enable BT FUNC */
765 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
766 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
767
768 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
769
770 /* wait for interrupt from the device after booting up to primary
771 * bootloader.
772 */
773 data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
774 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
775 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
776 if (!err)
777 return -ETIME;
778
779 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
780 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
781 return -ENODEV;
782
783 return 0;
784 }
785
btintel_pcie_in_op(struct btintel_pcie_data * data)786 static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
787 {
788 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
789 }
790
btintel_pcie_in_iml(struct btintel_pcie_data * data)791 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
792 {
793 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
794 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
795 }
796
btintel_pcie_in_d3(struct btintel_pcie_data * data)797 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
798 {
799 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
800 }
801
btintel_pcie_in_d0(struct btintel_pcie_data * data)802 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
803 {
804 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
805 }
806
btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data * data,u32 dxstate)807 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
808 u32 dxstate)
809 {
810 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
811 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
812 }
813
btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)814 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
815 {
816 switch (alive_intr_ctxt) {
817 case BTINTEL_PCIE_ROM:
818 return "rom";
819 case BTINTEL_PCIE_FW_DL:
820 return "fw_dl";
821 case BTINTEL_PCIE_D0:
822 return "d0";
823 case BTINTEL_PCIE_D3:
824 return "d3";
825 case BTINTEL_PCIE_HCI_RESET:
826 return "hci_reset";
827 case BTINTEL_PCIE_INTEL_HCI_RESET1:
828 return "intel_reset1";
829 case BTINTEL_PCIE_INTEL_HCI_RESET2:
830 return "intel_reset2";
831 default:
832 return "unknown";
833 }
834 }
835
btintel_pcie_read_device_mem(struct btintel_pcie_data * data,void * buf,u32 dev_addr,int len)836 static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
837 void *buf, u32 dev_addr, int len)
838 {
839 int err;
840 u32 *val = buf;
841
842 /* Get device mac access */
843 err = btintel_pcie_get_mac_access(data);
844 if (err) {
845 bt_dev_err(data->hdev, "Failed to get mac access %d", err);
846 return err;
847 }
848
849 for (; len > 0; len -= 4, dev_addr += 4, val++)
850 *val = btintel_pcie_rd_dev_mem(data, dev_addr);
851
852 btintel_pcie_release_mac_access(data);
853
854 return 0;
855 }
856
btintel_pcie_in_lockdown(struct btintel_pcie_data * data)857 static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
858 {
859 return (data->boot_stage_cache &
860 BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
861 (data->boot_stage_cache &
862 BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
863 }
864
btintel_pcie_in_error(struct btintel_pcie_data * data)865 static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
866 {
867 return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
868 (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
869 }
870
btintel_pcie_msix_gp1_handler(struct btintel_pcie_data * data)871 static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
872 {
873 bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
874 btintel_pcie_dump_debug_registers(data->hdev);
875 }
876
877 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
878 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
879 */
btintel_pcie_msix_gp0_handler(struct btintel_pcie_data * data)880 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
881 {
882 bool submit_rx, signal_waitq;
883 u32 reg, old_ctxt;
884
885 /* This interrupt is for three different causes and it is not easy to
886 * know what causes the interrupt. So, it compares each register value
887 * with cached value and update it before it wake up the queue.
888 */
889 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
890 if (reg != data->boot_stage_cache)
891 data->boot_stage_cache = reg;
892
893 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
894 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
895 data->boot_stage_cache, reg);
896 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
897 if (reg != data->img_resp_cache)
898 data->img_resp_cache = reg;
899
900 if (btintel_pcie_in_error(data)) {
901 bt_dev_err(data->hdev, "Controller in error state");
902 btintel_pcie_dump_debug_registers(data->hdev);
903 return;
904 }
905
906 if (btintel_pcie_in_lockdown(data)) {
907 bt_dev_err(data->hdev, "Controller in lockdown state");
908 btintel_pcie_dump_debug_registers(data->hdev);
909 return;
910 }
911
912 data->gp0_received = true;
913
914 old_ctxt = data->alive_intr_ctxt;
915 submit_rx = false;
916 signal_waitq = false;
917
918 switch (data->alive_intr_ctxt) {
919 case BTINTEL_PCIE_ROM:
920 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
921 signal_waitq = true;
922 break;
923 case BTINTEL_PCIE_FW_DL:
924 /* Error case is already handled. Ideally control shall not
925 * reach here
926 */
927 break;
928 case BTINTEL_PCIE_INTEL_HCI_RESET1:
929 if (btintel_pcie_in_op(data)) {
930 submit_rx = true;
931 break;
932 }
933
934 if (btintel_pcie_in_iml(data)) {
935 submit_rx = true;
936 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
937 break;
938 }
939 break;
940 case BTINTEL_PCIE_INTEL_HCI_RESET2:
941 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
942 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
943 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
944 }
945 break;
946 case BTINTEL_PCIE_D0:
947 if (btintel_pcie_in_d3(data)) {
948 data->alive_intr_ctxt = BTINTEL_PCIE_D3;
949 signal_waitq = true;
950 break;
951 }
952 break;
953 case BTINTEL_PCIE_D3:
954 if (btintel_pcie_in_d0(data)) {
955 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
956 submit_rx = true;
957 signal_waitq = true;
958 break;
959 }
960 break;
961 case BTINTEL_PCIE_HCI_RESET:
962 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
963 submit_rx = true;
964 signal_waitq = true;
965 break;
966 default:
967 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
968 data->alive_intr_ctxt);
969 break;
970 }
971
972 if (submit_rx) {
973 btintel_pcie_reset_ia(data);
974 btintel_pcie_start_rx(data);
975 }
976
977 if (signal_waitq) {
978 bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
979 wake_up(&data->gp0_wait_q);
980 }
981
982 if (old_ctxt != data->alive_intr_ctxt)
983 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
984 btintel_pcie_alivectxt_state2str(old_ctxt),
985 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
986 }
987
988 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
989 */
btintel_pcie_msix_tx_handle(struct btintel_pcie_data * data)990 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
991 {
992 u16 cr_tia, cr_hia;
993 struct txq *txq;
994 struct urbd0 *urbd0;
995
996 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
997 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
998
999 if (cr_tia == cr_hia)
1000 return;
1001
1002 txq = &data->txq;
1003
1004 while (cr_tia != cr_hia) {
1005 data->tx_wait_done = true;
1006 wake_up(&data->tx_wait_q);
1007
1008 urbd0 = &txq->urbd0s[cr_tia];
1009
1010 if (urbd0->tfd_index > txq->count)
1011 return;
1012
1013 cr_tia = (cr_tia + 1) % txq->count;
1014 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
1015 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
1016 }
1017 }
1018
btintel_pcie_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1019 static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1020 {
1021 struct hci_event_hdr *hdr = (void *)skb->data;
1022 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1023
1024 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
1025 hdr->plen > 0) {
1026 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
1027 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
1028
1029 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1030 switch (skb->data[2]) {
1031 case 0x02:
1032 /* When switching to the operational firmware
1033 * the device sends a vendor specific event
1034 * indicating that the bootup completed.
1035 */
1036 btintel_bootup(hdev, ptr, len);
1037
1038 /* If bootup event is from operational image,
1039 * driver needs to write sleep control register to
1040 * move into D0 state
1041 */
1042 if (btintel_pcie_in_op(data)) {
1043 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
1044 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
1045 kfree_skb(skb);
1046 return 0;
1047 }
1048
1049 if (btintel_pcie_in_iml(data)) {
1050 /* In case of IML, there is no concept
1051 * of D0 transition. Just mimic as if
1052 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
1053 * bit and waking up the task waiting on
1054 * INTEL_WAIT_FOR_D0. This is required
1055 * as intel_boot() is common function for
1056 * both IML and OP image loading.
1057 */
1058 if (btintel_test_and_clear_flag(data->hdev,
1059 INTEL_WAIT_FOR_D0))
1060 btintel_wake_up_flag(data->hdev,
1061 INTEL_WAIT_FOR_D0);
1062 }
1063 kfree_skb(skb);
1064 return 0;
1065 case 0x06:
1066 /* When the firmware loading completes the
1067 * device sends out a vendor specific event
1068 * indicating the result of the firmware
1069 * loading.
1070 */
1071 btintel_secure_send_result(hdev, ptr, len);
1072 kfree_skb(skb);
1073 return 0;
1074 }
1075 }
1076
1077 /* This is a debug event that comes from IML and OP image when it
1078 * starts execution. There is no need pass this event to stack.
1079 */
1080 if (skb->data[2] == 0x97) {
1081 hci_recv_diag(hdev, skb);
1082 return 0;
1083 }
1084 }
1085
1086 return hci_recv_frame(hdev, skb);
1087 }
1088 /* Process the received rx data
1089 * It check the frame header to identify the data type and create skb
1090 * and calling HCI API
1091 */
btintel_pcie_recv_frame(struct btintel_pcie_data * data,struct sk_buff * skb)1092 static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
1093 struct sk_buff *skb)
1094 {
1095 int ret;
1096 u8 pkt_type;
1097 u16 plen;
1098 u32 pcie_pkt_type;
1099 void *pdata;
1100 struct hci_dev *hdev = data->hdev;
1101
1102 spin_lock(&data->hci_rx_lock);
1103
1104 /* The first 4 bytes indicates the Intel PCIe specific packet type */
1105 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
1106 if (!pdata) {
1107 bt_dev_err(hdev, "Corrupted packet received");
1108 ret = -EILSEQ;
1109 goto exit_error;
1110 }
1111
1112 pcie_pkt_type = get_unaligned_le32(pdata);
1113
1114 switch (pcie_pkt_type) {
1115 case BTINTEL_PCIE_HCI_ACL_PKT:
1116 if (skb->len >= HCI_ACL_HDR_SIZE) {
1117 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
1118 pkt_type = HCI_ACLDATA_PKT;
1119 } else {
1120 bt_dev_err(hdev, "ACL packet is too short");
1121 ret = -EILSEQ;
1122 goto exit_error;
1123 }
1124 break;
1125
1126 case BTINTEL_PCIE_HCI_SCO_PKT:
1127 if (skb->len >= HCI_SCO_HDR_SIZE) {
1128 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1129 pkt_type = HCI_SCODATA_PKT;
1130 } else {
1131 bt_dev_err(hdev, "SCO packet is too short");
1132 ret = -EILSEQ;
1133 goto exit_error;
1134 }
1135 break;
1136
1137 case BTINTEL_PCIE_HCI_EVT_PKT:
1138 if (skb->len >= HCI_EVENT_HDR_SIZE) {
1139 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1140 pkt_type = HCI_EVENT_PKT;
1141 } else {
1142 bt_dev_err(hdev, "Event packet is too short");
1143 ret = -EILSEQ;
1144 goto exit_error;
1145 }
1146 break;
1147
1148 case BTINTEL_PCIE_HCI_ISO_PKT:
1149 if (skb->len >= HCI_ISO_HDR_SIZE) {
1150 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1151 pkt_type = HCI_ISODATA_PKT;
1152 } else {
1153 bt_dev_err(hdev, "ISO packet is too short");
1154 ret = -EILSEQ;
1155 goto exit_error;
1156 }
1157 break;
1158
1159 default:
1160 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1161 pcie_pkt_type);
1162 ret = -EINVAL;
1163 goto exit_error;
1164 }
1165
1166 if (skb->len < plen) {
1167 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1168 pkt_type);
1169 ret = -EILSEQ;
1170 goto exit_error;
1171 }
1172
1173 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1174
1175 hci_skb_pkt_type(skb) = pkt_type;
1176 hdev->stat.byte_rx += plen;
1177 skb_trim(skb, plen);
1178
1179 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1180 ret = btintel_pcie_recv_event(hdev, skb);
1181 else
1182 ret = hci_recv_frame(hdev, skb);
1183 skb = NULL; /* skb is freed in the callee */
1184
1185 exit_error:
1186 if (skb)
1187 kfree_skb(skb);
1188
1189 if (ret)
1190 hdev->stat.err_rx++;
1191
1192 spin_unlock(&data->hci_rx_lock);
1193
1194 return ret;
1195 }
1196
btintel_pcie_read_hwexp(struct btintel_pcie_data * data)1197 static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1198 {
1199 int len, err, offset, pending;
1200 struct sk_buff *skb;
1201 u8 *buf, prefix[64];
1202 u32 addr, val;
1203 u16 pkt_len;
1204
1205 struct tlv {
1206 u8 type;
1207 __le16 len;
1208 u8 val[];
1209 } __packed;
1210
1211 struct tlv *tlv;
1212
1213 switch (data->dmp_hdr.cnvi_top & 0xfff) {
1214 case BTINTEL_CNVI_BLAZARI:
1215 case BTINTEL_CNVI_BLAZARIW:
1216 /* only from step B0 onwards */
1217 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1218 return;
1219 len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1220 addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1221 break;
1222 case BTINTEL_CNVI_SCP:
1223 len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1224 addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1225 break;
1226 default:
1227 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1228 return;
1229 }
1230
1231 buf = kzalloc(len, GFP_KERNEL);
1232 if (!buf)
1233 goto exit_on_error;
1234
1235 btintel_pcie_mac_init(data);
1236
1237 err = btintel_pcie_read_device_mem(data, buf, addr, len);
1238 if (err)
1239 goto exit_on_error;
1240
1241 val = get_unaligned_le32(buf);
1242 if (val != BTINTEL_PCIE_MAGIC_NUM) {
1243 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1244 val);
1245 goto exit_on_error;
1246 }
1247
1248 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1249
1250 offset = 4;
1251 do {
1252 pending = len - offset;
1253 if (pending < sizeof(*tlv))
1254 break;
1255 tlv = (struct tlv *)(buf + offset);
1256
1257 /* If type == 0, then there are no more TLVs to be parsed */
1258 if (!tlv->type) {
1259 bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1260 break;
1261 }
1262 pkt_len = le16_to_cpu(tlv->len);
1263 offset += sizeof(*tlv);
1264 pending = len - offset;
1265 if (pkt_len > pending)
1266 break;
1267
1268 offset += pkt_len;
1269
1270 /* Only TLVs of type == 1 are HCI events, no need to process other
1271 * TLVs
1272 */
1273 if (tlv->type != 1)
1274 continue;
1275
1276 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1277 if (pkt_len > HCI_MAX_EVENT_SIZE)
1278 break;
1279 skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1280 if (!skb)
1281 goto exit_on_error;
1282 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1283 skb_put_data(skb, tlv->val, pkt_len);
1284
1285 /* copy Intel specific pcie packet type */
1286 val = BTINTEL_PCIE_HCI_EVT_PKT;
1287 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1288 BTINTEL_PCIE_HCI_TYPE_LEN);
1289
1290 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1291 tlv->val, pkt_len, false);
1292
1293 btintel_pcie_recv_frame(data, skb);
1294 } while (offset < len);
1295
1296 exit_on_error:
1297 kfree(buf);
1298 }
1299
btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data * data)1300 static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1301 {
1302 bt_dev_err(data->hdev, "Received hw exception interrupt");
1303
1304 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1305 return;
1306
1307 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1308 return;
1309
1310 /* Trigger device core dump when there is HW exception */
1311 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1312 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1313
1314 queue_work(data->workqueue, &data->rx_work);
1315 }
1316
btintel_pcie_rx_work(struct work_struct * work)1317 static void btintel_pcie_rx_work(struct work_struct *work)
1318 {
1319 struct btintel_pcie_data *data = container_of(work,
1320 struct btintel_pcie_data, rx_work);
1321 struct sk_buff *skb;
1322
1323 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1324 /* Unlike usb products, controller will not send hardware
1325 * exception event on exception. Instead controller writes the
1326 * hardware event to device memory along with optional debug
1327 * events, raises MSIX and halts. Driver shall read the
1328 * exception event from device memory and passes it stack for
1329 * further processing.
1330 */
1331 btintel_pcie_read_hwexp(data);
1332 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1333 }
1334
1335 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1336 btintel_pcie_dump_traces(data->hdev);
1337 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1338 }
1339
1340 /* Process the sk_buf in queue and send to the HCI layer */
1341 while ((skb = skb_dequeue(&data->rx_skb_q))) {
1342 btintel_pcie_recv_frame(data, skb);
1343 }
1344 }
1345
1346 /* create sk_buff with data and save it to queue and start RX work */
btintel_pcie_submit_rx_work(struct btintel_pcie_data * data,u8 status,void * buf)1347 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1348 void *buf)
1349 {
1350 int ret, len;
1351 struct rfh_hdr *rfh_hdr;
1352 struct sk_buff *skb;
1353
1354 rfh_hdr = buf;
1355
1356 len = rfh_hdr->packet_len;
1357 if (len <= 0) {
1358 ret = -EINVAL;
1359 goto resubmit;
1360 }
1361
1362 /* Remove RFH header */
1363 buf += sizeof(*rfh_hdr);
1364
1365 skb = alloc_skb(len, GFP_ATOMIC);
1366 if (!skb)
1367 goto resubmit;
1368
1369 skb_put_data(skb, buf, len);
1370 skb_queue_tail(&data->rx_skb_q, skb);
1371 queue_work(data->workqueue, &data->rx_work);
1372
1373 resubmit:
1374 ret = btintel_pcie_submit_rx(data);
1375
1376 return ret;
1377 }
1378
1379 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
btintel_pcie_msix_rx_handle(struct btintel_pcie_data * data)1380 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1381 {
1382 u16 cr_hia, cr_tia;
1383 struct rxq *rxq;
1384 struct urbd1 *urbd1;
1385 struct data_buf *buf;
1386 int ret;
1387 struct hci_dev *hdev = data->hdev;
1388
1389 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1390 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1391
1392 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1393
1394 /* Check CR_TIA and CR_HIA for change */
1395 if (cr_tia == cr_hia)
1396 return;
1397
1398 rxq = &data->rxq;
1399
1400 /* The firmware sends multiple CD in a single MSI-X and it needs to
1401 * process all received CDs in this interrupt.
1402 */
1403 while (cr_tia != cr_hia) {
1404 urbd1 = &rxq->urbd1s[cr_tia];
1405 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1406
1407 buf = &rxq->bufs[urbd1->frbd_tag];
1408 if (!buf) {
1409 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1410 urbd1->frbd_tag);
1411 return;
1412 }
1413
1414 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1415 buf->data);
1416 if (ret) {
1417 bt_dev_err(hdev, "RXQ: failed to submit rx request");
1418 return;
1419 }
1420
1421 cr_tia = (cr_tia + 1) % rxq->count;
1422 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1423 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1424 }
1425 }
1426
btintel_pcie_msix_isr(int irq,void * data)1427 static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
1428 {
1429 return IRQ_WAKE_THREAD;
1430 }
1431
btintel_pcie_is_rxq_empty(struct btintel_pcie_data * data)1432 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1433 {
1434 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1435 }
1436
btintel_pcie_is_txackq_empty(struct btintel_pcie_data * data)1437 static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1438 {
1439 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1440 }
1441
btintel_pcie_irq_msix_handler(int irq,void * dev_id)1442 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1443 {
1444 struct msix_entry *entry = dev_id;
1445 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1446 u32 intr_fh, intr_hw;
1447
1448 spin_lock(&data->irq_lock);
1449 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1450 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1451
1452 /* Clear causes registers to avoid being handling the same cause */
1453 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1454 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1455 spin_unlock(&data->irq_lock);
1456
1457 if (unlikely(!(intr_fh | intr_hw))) {
1458 /* Ignore interrupt, inta == 0 */
1459 return IRQ_NONE;
1460 }
1461
1462 /* This interrupt is raised when there is an hardware exception */
1463 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1464 btintel_pcie_msix_hw_exp_handler(data);
1465
1466 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
1467 btintel_pcie_msix_gp1_handler(data);
1468
1469 /* This interrupt is triggered by the firmware after updating
1470 * boot_stage register and image_response register
1471 */
1472 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1473 btintel_pcie_msix_gp0_handler(data);
1474
1475 /* For TX */
1476 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1477 btintel_pcie_msix_tx_handle(data);
1478 if (!btintel_pcie_is_rxq_empty(data))
1479 btintel_pcie_msix_rx_handle(data);
1480 }
1481
1482 /* For RX */
1483 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1484 btintel_pcie_msix_rx_handle(data);
1485 if (!btintel_pcie_is_txackq_empty(data))
1486 btintel_pcie_msix_tx_handle(data);
1487 }
1488
1489 /*
1490 * Before sending the interrupt the HW disables it to prevent a nested
1491 * interrupt. This is done by writing 1 to the corresponding bit in
1492 * the mask register. After handling the interrupt, it should be
1493 * re-enabled by clearing this bit. This register is defined as write 1
1494 * clear (W1C) register, meaning that it's cleared by writing 1
1495 * to the bit.
1496 */
1497 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1498 BIT(entry->entry));
1499
1500 return IRQ_HANDLED;
1501 }
1502
1503 /* This function requests the irq for MSI-X and registers the handlers per irq.
1504 * Currently, it requests only 1 irq for all interrupt causes.
1505 */
btintel_pcie_setup_irq(struct btintel_pcie_data * data)1506 static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1507 {
1508 int err;
1509 int num_irqs, i;
1510
1511 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1512 data->msix_entries[i].entry = i;
1513
1514 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1515 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1516 if (num_irqs < 0)
1517 return num_irqs;
1518
1519 data->alloc_vecs = num_irqs;
1520 data->msix_enabled = 1;
1521 data->def_irq = 0;
1522
1523 /* setup irq handler */
1524 for (i = 0; i < data->alloc_vecs; i++) {
1525 struct msix_entry *msix_entry;
1526
1527 msix_entry = &data->msix_entries[i];
1528 msix_entry->vector = pci_irq_vector(data->pdev, i);
1529
1530 err = devm_request_threaded_irq(&data->pdev->dev,
1531 msix_entry->vector,
1532 btintel_pcie_msix_isr,
1533 btintel_pcie_irq_msix_handler,
1534 IRQF_SHARED,
1535 KBUILD_MODNAME,
1536 msix_entry);
1537 if (err) {
1538 pci_free_irq_vectors(data->pdev);
1539 data->alloc_vecs = 0;
1540 return err;
1541 }
1542 }
1543 return 0;
1544 }
1545
1546 struct btintel_pcie_causes_list {
1547 u32 cause;
1548 u32 mask_reg;
1549 u8 cause_num;
1550 };
1551
1552 static struct btintel_pcie_causes_list causes_list[] = {
1553 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1554 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1555 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1556 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1557 };
1558
1559 /* This function configures the interrupt masks for both HW_INT_CAUSES and
1560 * FH_INT_CAUSES which are meaningful to us.
1561 *
1562 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1563 * need to call this function again to configure since the masks
1564 * are reset to 0xFFFFFFFF after reset.
1565 */
btintel_pcie_config_msix(struct btintel_pcie_data * data)1566 static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1567 {
1568 int i;
1569 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1570
1571 /* Set Non Auto Clear Cause */
1572 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1573 btintel_pcie_wr_reg8(data,
1574 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1575 val);
1576 btintel_pcie_clr_reg_bits(data,
1577 causes_list[i].mask_reg,
1578 causes_list[i].cause);
1579 }
1580
1581 /* Save the initial interrupt mask */
1582 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1583 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1584 }
1585
btintel_pcie_config_pcie(struct pci_dev * pdev,struct btintel_pcie_data * data)1586 static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1587 struct btintel_pcie_data *data)
1588 {
1589 int err;
1590
1591 err = pcim_enable_device(pdev);
1592 if (err)
1593 return err;
1594
1595 pci_set_master(pdev);
1596
1597 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1598 if (err) {
1599 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1600 if (err)
1601 return err;
1602 }
1603
1604 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1605 if (IS_ERR(data->base_addr))
1606 return PTR_ERR(data->base_addr);
1607
1608 err = btintel_pcie_setup_irq(data);
1609 if (err)
1610 return err;
1611
1612 /* Configure MSI-X with causes list */
1613 btintel_pcie_config_msix(data);
1614
1615 return 0;
1616 }
1617
btintel_pcie_init_ci(struct btintel_pcie_data * data,struct ctx_info * ci)1618 static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1619 struct ctx_info *ci)
1620 {
1621 ci->version = 0x1;
1622 ci->size = sizeof(*ci);
1623 ci->config = 0x0000;
1624 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1625 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1626 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1627 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1628 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1629 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1630 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1631 ci->addr_tfdq = data->txq.tfds_p_addr;
1632 ci->num_tfdq = data->txq.count;
1633 ci->num_urbdq0 = data->txq.count;
1634 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1635 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1636 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1637 ci->addr_frbdq = data->rxq.frbds_p_addr;
1638 ci->num_frbdq = data->rxq.count;
1639 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1640 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1641 ci->num_urbdq1 = data->rxq.count;
1642 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1643
1644 ci->dbg_output_mode = 0x01;
1645 ci->dbgc_addr = data->dbgc.frag_p_addr;
1646 ci->dbgc_size = data->dbgc.frag_size;
1647 ci->dbg_preset = 0x00;
1648 }
1649
btintel_pcie_free_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1650 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1651 struct txq *txq)
1652 {
1653 /* Free data buffers first */
1654 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1655 txq->buf_v_addr, txq->buf_p_addr);
1656 kfree(txq->bufs);
1657 }
1658
btintel_pcie_setup_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1659 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1660 struct txq *txq)
1661 {
1662 int i;
1663 struct data_buf *buf;
1664
1665 /* Allocate the same number of buffers as the descriptor */
1666 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
1667 if (!txq->bufs)
1668 return -ENOMEM;
1669
1670 /* Allocate full chunk of data buffer for DMA first and do indexing and
1671 * initialization next, so it can be freed easily
1672 */
1673 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1674 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1675 &txq->buf_p_addr,
1676 GFP_KERNEL | __GFP_NOWARN);
1677 if (!txq->buf_v_addr) {
1678 kfree(txq->bufs);
1679 return -ENOMEM;
1680 }
1681
1682 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1683 * have virtual address and physical address
1684 */
1685 for (i = 0; i < txq->count; i++) {
1686 buf = &txq->bufs[i];
1687 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1688 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1689 }
1690
1691 return 0;
1692 }
1693
btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1694 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1695 struct rxq *rxq)
1696 {
1697 /* Free data buffers first */
1698 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1699 rxq->buf_v_addr, rxq->buf_p_addr);
1700 kfree(rxq->bufs);
1701 }
1702
btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1703 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1704 struct rxq *rxq)
1705 {
1706 int i;
1707 struct data_buf *buf;
1708
1709 /* Allocate the same number of buffers as the descriptor */
1710 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
1711 if (!rxq->bufs)
1712 return -ENOMEM;
1713
1714 /* Allocate full chunk of data buffer for DMA first and do indexing and
1715 * initialization next, so it can be freed easily
1716 */
1717 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1718 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1719 &rxq->buf_p_addr,
1720 GFP_KERNEL | __GFP_NOWARN);
1721 if (!rxq->buf_v_addr) {
1722 kfree(rxq->bufs);
1723 return -ENOMEM;
1724 }
1725
1726 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1727 * have virtual address and physical address
1728 */
1729 for (i = 0; i < rxq->count; i++) {
1730 buf = &rxq->bufs[i];
1731 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1732 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1733 }
1734
1735 return 0;
1736 }
1737
btintel_pcie_setup_ia(struct btintel_pcie_data * data,dma_addr_t p_addr,void * v_addr,struct ia * ia)1738 static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1739 dma_addr_t p_addr, void *v_addr,
1740 struct ia *ia)
1741 {
1742 /* TR Head Index Array */
1743 ia->tr_hia_p_addr = p_addr;
1744 ia->tr_hia = v_addr;
1745
1746 /* TR Tail Index Array */
1747 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1748 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1749
1750 /* CR Head index Array */
1751 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1752 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1753
1754 /* CR Tail Index Array */
1755 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1756 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1757 }
1758
btintel_pcie_free(struct btintel_pcie_data * data)1759 static void btintel_pcie_free(struct btintel_pcie_data *data)
1760 {
1761 btintel_pcie_free_rxq_bufs(data, &data->rxq);
1762 btintel_pcie_free_txq_bufs(data, &data->txq);
1763
1764 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1765 dma_pool_destroy(data->dma_pool);
1766 }
1767
1768 /* Allocate tx and rx queues, any related data structures and buffers.
1769 */
btintel_pcie_alloc(struct btintel_pcie_data * data)1770 static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1771 {
1772 int err = 0;
1773 size_t total;
1774 dma_addr_t p_addr;
1775 void *v_addr;
1776
1777 /* Allocate the chunk of DMA memory for descriptors, index array, and
1778 * context information, instead of allocating individually.
1779 * The DMA memory for data buffer is allocated while setting up the
1780 * each queue.
1781 *
1782 * Total size is sum of the following
1783 * + size of TFD * Number of descriptors in queue
1784 * + size of URBD0 * Number of descriptors in queue
1785 * + size of FRBD * Number of descriptors in queue
1786 * + size of URBD1 * Number of descriptors in queue
1787 * + size of index * Number of queues(2) * type of index array(4)
1788 * + size of context information
1789 */
1790 total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
1791 total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
1792
1793 /* Add the sum of size of index array and size of ci struct */
1794 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1795
1796 /* Allocate DMA Pool */
1797 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1798 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1799 if (!data->dma_pool) {
1800 err = -ENOMEM;
1801 goto exit_error;
1802 }
1803
1804 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1805 &p_addr);
1806 if (!v_addr) {
1807 dma_pool_destroy(data->dma_pool);
1808 err = -ENOMEM;
1809 goto exit_error;
1810 }
1811
1812 data->dma_p_addr = p_addr;
1813 data->dma_v_addr = v_addr;
1814
1815 /* Setup descriptor count */
1816 data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
1817 data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
1818
1819 /* Setup tfds */
1820 data->txq.tfds_p_addr = p_addr;
1821 data->txq.tfds = v_addr;
1822
1823 p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1824 v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1825
1826 /* Setup urbd0 */
1827 data->txq.urbd0s_p_addr = p_addr;
1828 data->txq.urbd0s = v_addr;
1829
1830 p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1831 v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1832
1833 /* Setup FRBD*/
1834 data->rxq.frbds_p_addr = p_addr;
1835 data->rxq.frbds = v_addr;
1836
1837 p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1838 v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1839
1840 /* Setup urbd1 */
1841 data->rxq.urbd1s_p_addr = p_addr;
1842 data->rxq.urbd1s = v_addr;
1843
1844 p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1845 v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1846
1847 /* Setup data buffers for txq */
1848 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1849 if (err)
1850 goto exit_error_pool;
1851
1852 /* Setup data buffers for rxq */
1853 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1854 if (err)
1855 goto exit_error_txq;
1856
1857 /* Setup Index Array */
1858 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1859
1860 /* Setup data buffers for dbgc */
1861 err = btintel_pcie_setup_dbgc(data);
1862 if (err)
1863 goto exit_error_txq;
1864
1865 /* Setup Context Information */
1866 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1867 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1868
1869 data->ci = v_addr;
1870 data->ci_p_addr = p_addr;
1871
1872 /* Initialize the CI */
1873 btintel_pcie_init_ci(data, data->ci);
1874
1875 return 0;
1876
1877 exit_error_txq:
1878 btintel_pcie_free_txq_bufs(data, &data->txq);
1879 exit_error_pool:
1880 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1881 dma_pool_destroy(data->dma_pool);
1882 exit_error:
1883 return err;
1884 }
1885
btintel_pcie_open(struct hci_dev * hdev)1886 static int btintel_pcie_open(struct hci_dev *hdev)
1887 {
1888 bt_dev_dbg(hdev, "");
1889
1890 return 0;
1891 }
1892
btintel_pcie_close(struct hci_dev * hdev)1893 static int btintel_pcie_close(struct hci_dev *hdev)
1894 {
1895 bt_dev_dbg(hdev, "");
1896
1897 return 0;
1898 }
1899
btintel_pcie_inject_cmd_complete(struct hci_dev * hdev,__u16 opcode)1900 static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1901 {
1902 struct sk_buff *skb;
1903 struct hci_event_hdr *hdr;
1904 struct hci_ev_cmd_complete *evt;
1905
1906 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1907 if (!skb)
1908 return -ENOMEM;
1909
1910 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1911 hdr->evt = HCI_EV_CMD_COMPLETE;
1912 hdr->plen = sizeof(*evt) + 1;
1913
1914 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1915 evt->ncmd = 0x01;
1916 evt->opcode = cpu_to_le16(opcode);
1917
1918 *(u8 *)skb_put(skb, 1) = 0x00;
1919
1920 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1921
1922 return hci_recv_frame(hdev, skb);
1923 }
1924
btintel_pcie_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1925 static int btintel_pcie_send_frame(struct hci_dev *hdev,
1926 struct sk_buff *skb)
1927 {
1928 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1929 struct hci_command_hdr *cmd;
1930 __u16 opcode = ~0;
1931 int ret;
1932 u32 type;
1933 u32 old_ctxt;
1934
1935 /* Due to the fw limitation, the type header of the packet should be
1936 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1937 * the first byte to get the packet type and redirect the rest of data
1938 * packet to the right handler.
1939 *
1940 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1941 * from DMA memory and by the time it reads the first 4 bytes, it has
1942 * already consumed some part of packet. Thus the packet type indicator
1943 * for iBT PCIe is 4 bytes.
1944 *
1945 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1946 * head room for profile and driver use, and before sending the data
1947 * to the device, append the iBT PCIe packet type in the front.
1948 */
1949 switch (hci_skb_pkt_type(skb)) {
1950 case HCI_COMMAND_PKT:
1951 type = BTINTEL_PCIE_HCI_CMD_PKT;
1952 cmd = (void *)skb->data;
1953 opcode = le16_to_cpu(cmd->opcode);
1954 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1955 struct hci_command_hdr *cmd = (void *)skb->data;
1956 __u16 opcode = le16_to_cpu(cmd->opcode);
1957
1958 /* When the 0xfc01 command is issued to boot into
1959 * the operational firmware, it will actually not
1960 * send a command complete event. To keep the flow
1961 * control working inject that event here.
1962 */
1963 if (opcode == 0xfc01)
1964 btintel_pcie_inject_cmd_complete(hdev, opcode);
1965 }
1966 /* Firmware raises alive interrupt on HCI_OP_RESET */
1967 if (opcode == HCI_OP_RESET)
1968 data->gp0_received = false;
1969
1970 hdev->stat.cmd_tx++;
1971 break;
1972 case HCI_ACLDATA_PKT:
1973 type = BTINTEL_PCIE_HCI_ACL_PKT;
1974 hdev->stat.acl_tx++;
1975 break;
1976 case HCI_SCODATA_PKT:
1977 type = BTINTEL_PCIE_HCI_SCO_PKT;
1978 hdev->stat.sco_tx++;
1979 break;
1980 case HCI_ISODATA_PKT:
1981 type = BTINTEL_PCIE_HCI_ISO_PKT;
1982 break;
1983 default:
1984 bt_dev_err(hdev, "Unknown HCI packet type");
1985 return -EILSEQ;
1986 }
1987 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
1988 BTINTEL_PCIE_HCI_TYPE_LEN);
1989
1990 ret = btintel_pcie_send_sync(data, skb);
1991 if (ret) {
1992 hdev->stat.err_tx++;
1993 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1994 goto exit_error;
1995 }
1996
1997 if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
1998 (opcode == HCI_OP_RESET || opcode == 0xfc01)) {
1999 old_ctxt = data->alive_intr_ctxt;
2000 data->alive_intr_ctxt =
2001 (opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
2002 BTINTEL_PCIE_HCI_RESET);
2003 bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
2004 opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
2005 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
2006 if (opcode == HCI_OP_RESET) {
2007 ret = wait_event_timeout(data->gp0_wait_q,
2008 data->gp0_received,
2009 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
2010 if (!ret) {
2011 hdev->stat.err_tx++;
2012 bt_dev_err(hdev, "No alive interrupt received for %s",
2013 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
2014 ret = -ETIME;
2015 goto exit_error;
2016 }
2017 }
2018 }
2019 hdev->stat.byte_tx += skb->len;
2020 kfree_skb(skb);
2021
2022 exit_error:
2023 return ret;
2024 }
2025
btintel_pcie_release_hdev(struct btintel_pcie_data * data)2026 static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
2027 {
2028 struct hci_dev *hdev;
2029
2030 hdev = data->hdev;
2031 hci_unregister_dev(hdev);
2032 hci_free_dev(hdev);
2033 data->hdev = NULL;
2034 }
2035
btintel_pcie_disable_interrupts(struct btintel_pcie_data * data)2036 static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
2037 {
2038 spin_lock(&data->irq_lock);
2039 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
2040 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
2041 spin_unlock(&data->irq_lock);
2042 }
2043
btintel_pcie_enable_interrupts(struct btintel_pcie_data * data)2044 static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
2045 {
2046 spin_lock(&data->irq_lock);
2047 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
2048 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
2049 spin_unlock(&data->irq_lock);
2050 }
2051
btintel_pcie_synchronize_irqs(struct btintel_pcie_data * data)2052 static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
2053 {
2054 for (int i = 0; i < data->alloc_vecs; i++)
2055 synchronize_irq(data->msix_entries[i].vector);
2056 }
2057
btintel_pcie_setup_internal(struct hci_dev * hdev)2058 static int btintel_pcie_setup_internal(struct hci_dev *hdev)
2059 {
2060 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2061 const u8 param[1] = { 0xFF };
2062 struct intel_version_tlv ver_tlv;
2063 struct sk_buff *skb;
2064 int err;
2065
2066 BT_DBG("%s", hdev->name);
2067
2068 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
2069 if (IS_ERR(skb)) {
2070 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
2071 PTR_ERR(skb));
2072 return PTR_ERR(skb);
2073 }
2074
2075 /* Check the status */
2076 if (skb->data[0]) {
2077 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
2078 skb->data[0]);
2079 err = -EIO;
2080 goto exit_error;
2081 }
2082
2083 /* Apply the common HCI quirks for Intel device */
2084 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
2085 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
2086 set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
2087
2088 /* Set up the quality report callback for Intel devices */
2089 hdev->set_quality_report = btintel_set_quality_report;
2090
2091 memset(&ver_tlv, 0, sizeof(ver_tlv));
2092 /* For TLV type device, parse the tlv data */
2093 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
2094 if (err) {
2095 bt_dev_err(hdev, "Failed to parse TLV version information");
2096 goto exit_error;
2097 }
2098
2099 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
2100 case 0x37:
2101 break;
2102 default:
2103 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
2104 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
2105 err = -EINVAL;
2106 goto exit_error;
2107 }
2108
2109 /* Check for supported iBT hardware variants of this firmware
2110 * loading method.
2111 *
2112 * This check has been put in place to ensure correct forward
2113 * compatibility options when newer hardware variants come
2114 * along.
2115 */
2116 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
2117 case 0x1e: /* BzrI */
2118 case 0x1f: /* ScP */
2119 /* Display version information of TLV type */
2120 btintel_version_info_tlv(hdev, &ver_tlv);
2121
2122 /* Apply the device specific HCI quirks for TLV based devices
2123 *
2124 * All TLV based devices support WBS
2125 */
2126 set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
2127
2128 /* Setup MSFT Extension support */
2129 btintel_set_msft_opcode(hdev,
2130 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2131
2132 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
2133 if (err)
2134 goto exit_error;
2135 break;
2136 default:
2137 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
2138 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2139 err = -EINVAL;
2140 goto exit_error;
2141 break;
2142 }
2143
2144 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2145 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2146 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2147 data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2148 data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2149 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2150
2151 if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2152 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2153
2154 err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
2155 btintel_pcie_dump_notify);
2156 if (err) {
2157 bt_dev_err(hdev, "Failed to register coredump (%d)", err);
2158 goto exit_error;
2159 }
2160
2161 btintel_print_fseq_info(hdev);
2162 exit_error:
2163 kfree_skb(skb);
2164
2165 return err;
2166 }
2167
btintel_pcie_setup(struct hci_dev * hdev)2168 static int btintel_pcie_setup(struct hci_dev *hdev)
2169 {
2170 int err, fw_dl_retry = 0;
2171 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2172
2173 while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2174 bt_dev_err(hdev, "Firmware download retry count: %d",
2175 fw_dl_retry);
2176 btintel_pcie_dump_debug_registers(hdev);
2177 btintel_pcie_disable_interrupts(data);
2178 btintel_pcie_synchronize_irqs(data);
2179 err = btintel_pcie_reset_bt(data);
2180 if (err) {
2181 bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2182 break;
2183 }
2184 usleep_range(10000, 12000);
2185 btintel_pcie_reset_ia(data);
2186 btintel_pcie_enable_interrupts(data);
2187 btintel_pcie_config_msix(data);
2188 err = btintel_pcie_enable_bt(data);
2189 if (err) {
2190 bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2191 break;
2192 }
2193 btintel_pcie_start_rx(data);
2194 }
2195 return err;
2196 }
2197
btintel_pcie_setup_hdev(struct btintel_pcie_data * data)2198 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2199 {
2200 int err;
2201 struct hci_dev *hdev;
2202
2203 hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2204 if (!hdev)
2205 return -ENOMEM;
2206
2207 hdev->bus = HCI_PCI;
2208 hci_set_drvdata(hdev, data);
2209
2210 data->hdev = hdev;
2211 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2212
2213 hdev->manufacturer = 2;
2214 hdev->open = btintel_pcie_open;
2215 hdev->close = btintel_pcie_close;
2216 hdev->send = btintel_pcie_send_frame;
2217 hdev->setup = btintel_pcie_setup;
2218 hdev->shutdown = btintel_shutdown_combined;
2219 hdev->hw_error = btintel_hw_error;
2220 hdev->set_diag = btintel_set_diag;
2221 hdev->set_bdaddr = btintel_set_bdaddr;
2222
2223 err = hci_register_dev(hdev);
2224 if (err < 0) {
2225 BT_ERR("Failed to register to hdev (%d)", err);
2226 goto exit_error;
2227 }
2228
2229 data->dmp_hdr.driver_name = KBUILD_MODNAME;
2230 return 0;
2231
2232 exit_error:
2233 hci_free_dev(hdev);
2234 return err;
2235 }
2236
btintel_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2237 static int btintel_pcie_probe(struct pci_dev *pdev,
2238 const struct pci_device_id *ent)
2239 {
2240 int err;
2241 struct btintel_pcie_data *data;
2242
2243 if (!pdev)
2244 return -ENODEV;
2245
2246 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2247 if (!data)
2248 return -ENOMEM;
2249
2250 data->pdev = pdev;
2251
2252 spin_lock_init(&data->irq_lock);
2253 spin_lock_init(&data->hci_rx_lock);
2254
2255 init_waitqueue_head(&data->gp0_wait_q);
2256 data->gp0_received = false;
2257
2258 init_waitqueue_head(&data->tx_wait_q);
2259 data->tx_wait_done = false;
2260
2261 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2262 if (!data->workqueue)
2263 return -ENOMEM;
2264
2265 skb_queue_head_init(&data->rx_skb_q);
2266 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2267
2268 data->boot_stage_cache = 0x00;
2269 data->img_resp_cache = 0x00;
2270
2271 err = btintel_pcie_config_pcie(pdev, data);
2272 if (err)
2273 goto exit_error;
2274
2275 pci_set_drvdata(pdev, data);
2276
2277 err = btintel_pcie_alloc(data);
2278 if (err)
2279 goto exit_error;
2280
2281 err = btintel_pcie_enable_bt(data);
2282 if (err)
2283 goto exit_error;
2284
2285 /* CNV information (CNVi and CNVr) is in CSR */
2286 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2287
2288 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2289
2290 err = btintel_pcie_start_rx(data);
2291 if (err)
2292 goto exit_error;
2293
2294 err = btintel_pcie_setup_hdev(data);
2295 if (err)
2296 goto exit_error;
2297
2298 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2299 data->cnvr);
2300 return 0;
2301
2302 exit_error:
2303 /* reset device before exit */
2304 btintel_pcie_reset_bt(data);
2305
2306 pci_clear_master(pdev);
2307
2308 pci_set_drvdata(pdev, NULL);
2309
2310 return err;
2311 }
2312
btintel_pcie_remove(struct pci_dev * pdev)2313 static void btintel_pcie_remove(struct pci_dev *pdev)
2314 {
2315 struct btintel_pcie_data *data;
2316
2317 data = pci_get_drvdata(pdev);
2318
2319 btintel_pcie_disable_interrupts(data);
2320
2321 btintel_pcie_synchronize_irqs(data);
2322
2323 flush_work(&data->rx_work);
2324
2325 btintel_pcie_reset_bt(data);
2326 for (int i = 0; i < data->alloc_vecs; i++) {
2327 struct msix_entry *msix_entry;
2328
2329 msix_entry = &data->msix_entries[i];
2330 free_irq(msix_entry->vector, msix_entry);
2331 }
2332
2333 pci_free_irq_vectors(pdev);
2334
2335 btintel_pcie_release_hdev(data);
2336
2337 destroy_workqueue(data->workqueue);
2338
2339 btintel_pcie_free(data);
2340
2341 pci_clear_master(pdev);
2342
2343 pci_set_drvdata(pdev, NULL);
2344 }
2345
2346 #ifdef CONFIG_DEV_COREDUMP
btintel_pcie_coredump(struct device * dev)2347 static void btintel_pcie_coredump(struct device *dev)
2348 {
2349 struct pci_dev *pdev = to_pci_dev(dev);
2350 struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2351
2352 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2353 return;
2354
2355 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2356 queue_work(data->workqueue, &data->rx_work);
2357 }
2358 #endif
2359
2360 static struct pci_driver btintel_pcie_driver = {
2361 .name = KBUILD_MODNAME,
2362 .id_table = btintel_pcie_table,
2363 .probe = btintel_pcie_probe,
2364 .remove = btintel_pcie_remove,
2365 #ifdef CONFIG_DEV_COREDUMP
2366 .driver.coredump = btintel_pcie_coredump
2367 #endif
2368 };
2369 module_pci_driver(btintel_pcie_driver);
2370
2371 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
2372 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2373 MODULE_VERSION(VERSION);
2374 MODULE_LICENSE("GPL");
2375