1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/unaligned.h>
18
19 #include <net/bluetooth/bluetooth.h>
20 #include <net/bluetooth/hci_core.h>
21
22 #include "btintel.h"
23 #include "btintel_pcie.h"
24
25 #define VERSION "0.1"
26
27 #define BTINTEL_PCI_DEVICE(dev, subdev) \
28 .vendor = PCI_VENDOR_ID_INTEL, \
29 .device = (dev), \
30 .subvendor = PCI_ANY_ID, \
31 .subdevice = (subdev), \
32 .driver_data = 0
33
34 #define POLL_INTERVAL_US 10
35
36 /* Intel Bluetooth PCIe device id table */
37 static const struct pci_device_id btintel_pcie_table[] = {
38 { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
39 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
40 { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
41 { 0 }
42 };
43 MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
44
45 struct btintel_pcie_dev_recovery {
46 struct list_head list;
47 u8 count;
48 time64_t last_error;
49 char name[];
50 };
51
52 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
53 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
54 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
55 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
56 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
57 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
58 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
59
60 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
61
62 #define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
63 #define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
64
65 #define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
66 #define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
67
68 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
69
70 #define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
71 #define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
72
73 #define BTINTEL_PCIE_RESET_WINDOW_SECS 5
74 #define BTINTEL_PCIE_FLR_MAX_RETRY 1
75
76 /* Alive interrupt context */
77 enum {
78 BTINTEL_PCIE_ROM,
79 BTINTEL_PCIE_FW_DL,
80 BTINTEL_PCIE_HCI_RESET,
81 BTINTEL_PCIE_INTEL_HCI_RESET1,
82 BTINTEL_PCIE_INTEL_HCI_RESET2,
83 BTINTEL_PCIE_D0,
84 BTINTEL_PCIE_D3
85 };
86
87 /* Structure for dbgc fragment buffer
88 * @buf_addr_lsb: LSB of the buffer's physical address
89 * @buf_addr_msb: MSB of the buffer's physical address
90 * @buf_size: Total size of the buffer
91 */
92 struct btintel_pcie_dbgc_ctxt_buf {
93 u32 buf_addr_lsb;
94 u32 buf_addr_msb;
95 u32 buf_size;
96 };
97
98 /* Structure for dbgc fragment
99 * @magic_num: 0XA5A5A5A5
100 * @ver: For Driver-FW compatibility
101 * @total_size: Total size of the payload debug info
102 * @num_buf: Num of allocated debug bufs
103 * @bufs: All buffer's addresses and sizes
104 */
105 struct btintel_pcie_dbgc_ctxt {
106 u32 magic_num;
107 u32 ver;
108 u32 total_size;
109 u32 num_buf;
110 struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
111 };
112
113 struct btintel_pcie_removal {
114 struct pci_dev *pdev;
115 struct work_struct work;
116 };
117
118 static LIST_HEAD(btintel_pcie_recovery_list);
119 static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
120
btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)121 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
122 {
123 switch (alive_intr_ctxt) {
124 case BTINTEL_PCIE_ROM:
125 return "rom";
126 case BTINTEL_PCIE_FW_DL:
127 return "fw_dl";
128 case BTINTEL_PCIE_D0:
129 return "d0";
130 case BTINTEL_PCIE_D3:
131 return "d3";
132 case BTINTEL_PCIE_HCI_RESET:
133 return "hci_reset";
134 case BTINTEL_PCIE_INTEL_HCI_RESET1:
135 return "intel_reset1";
136 case BTINTEL_PCIE_INTEL_HCI_RESET2:
137 return "intel_reset2";
138 default:
139 return "unknown";
140 }
141 }
142
143 /* This function initializes the memory for DBGC buffers and formats the
144 * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
145 * size as the payload
146 */
btintel_pcie_setup_dbgc(struct btintel_pcie_data * data)147 static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
148 {
149 struct btintel_pcie_dbgc_ctxt db_frag;
150 struct data_buf *buf;
151 int i;
152
153 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
154 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
155 sizeof(*buf), GFP_KERNEL);
156 if (!data->dbgc.bufs)
157 return -ENOMEM;
158
159 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
160 data->dbgc.count *
161 BTINTEL_PCIE_DBGC_BUFFER_SIZE,
162 &data->dbgc.buf_p_addr,
163 GFP_KERNEL | __GFP_NOWARN);
164 if (!data->dbgc.buf_v_addr)
165 return -ENOMEM;
166
167 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
168 sizeof(struct btintel_pcie_dbgc_ctxt),
169 &data->dbgc.frag_p_addr,
170 GFP_KERNEL | __GFP_NOWARN);
171 if (!data->dbgc.frag_v_addr)
172 return -ENOMEM;
173
174 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
175
176 db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
177 db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
178 db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
179 db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
180
181 for (i = 0; i < data->dbgc.count; i++) {
182 buf = &data->dbgc.bufs[i];
183 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
184 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
185 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
186 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
187 db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
188 }
189
190 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
191 return 0;
192 }
193
ipc_print_ia_ring(struct hci_dev * hdev,struct ia * ia,u16 queue_num)194 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
195 u16 queue_num)
196 {
197 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
198 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
199 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
200 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
201 }
202
ipc_print_urbd1(struct hci_dev * hdev,struct urbd1 * urbd1,u16 index)203 static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
204 u16 index)
205 {
206 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
207 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
208 }
209
btintel_pcie_get_data(struct msix_entry * entry)210 static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
211 {
212 u8 queue = entry->entry;
213 struct msix_entry *entries = entry - queue;
214
215 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
216 }
217
218 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
219 * of the TFD is updated and ready to transmit.
220 */
btintel_pcie_set_tx_db(struct btintel_pcie_data * data,u16 index)221 static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
222 {
223 u32 val;
224
225 val = index;
226 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
227
228 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
229 }
230
231 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
232 * descriptor) with the data length and the DMA address of the data buffer.
233 */
btintel_pcie_prepare_tx(struct txq * txq,u16 tfd_index,struct sk_buff * skb)234 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
235 struct sk_buff *skb)
236 {
237 struct data_buf *buf;
238 struct tfd *tfd;
239
240 tfd = &txq->tfds[tfd_index];
241 memset(tfd, 0, sizeof(*tfd));
242
243 buf = &txq->bufs[tfd_index];
244
245 tfd->size = skb->len;
246 tfd->addr = buf->data_p_addr;
247
248 /* Copy the outgoing data to DMA buffer */
249 memcpy(buf->data, skb->data, tfd->size);
250 }
251
btintel_pcie_dump_debug_registers(struct hci_dev * hdev)252 static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
253 {
254 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
255 u16 cr_hia, cr_tia;
256 u32 reg, mbox_reg;
257 struct sk_buff *skb;
258 u8 buf[80];
259
260 skb = alloc_skb(1024, GFP_ATOMIC);
261 if (!skb)
262 return;
263
264 snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
265 bt_dev_dbg(hdev, "%s", buf);
266 skb_put_data(skb, buf, strlen(buf));
267
268 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
269 snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
270 bt_dev_dbg(hdev, "%s", buf);
271 skb_put_data(skb, buf, strlen(buf));
272 data->boot_stage_cache = reg;
273
274 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
275 snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
276 skb_put_data(skb, buf, strlen(buf));
277 bt_dev_dbg(hdev, "%s", buf);
278
279 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
280 snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
281 skb_put_data(skb, buf, strlen(buf));
282 bt_dev_dbg(hdev, "%s", buf);
283
284 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
285 snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
286 skb_put_data(skb, buf, strlen(buf));
287 bt_dev_dbg(hdev, "%s", buf);
288
289 /*Read the Mail box status and registers*/
290 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
291 snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
292 skb_put_data(skb, buf, strlen(buf));
293 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
294 mbox_reg = btintel_pcie_rd_reg32(data,
295 BTINTEL_PCIE_CSR_MBOX_1_REG);
296 snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
297 skb_put_data(skb, buf, strlen(buf));
298 bt_dev_dbg(hdev, "%s", buf);
299 }
300
301 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
302 mbox_reg = btintel_pcie_rd_reg32(data,
303 BTINTEL_PCIE_CSR_MBOX_2_REG);
304 snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
305 skb_put_data(skb, buf, strlen(buf));
306 bt_dev_dbg(hdev, "%s", buf);
307 }
308
309 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
310 mbox_reg = btintel_pcie_rd_reg32(data,
311 BTINTEL_PCIE_CSR_MBOX_3_REG);
312 snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
313 skb_put_data(skb, buf, strlen(buf));
314 bt_dev_dbg(hdev, "%s", buf);
315 }
316
317 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
318 mbox_reg = btintel_pcie_rd_reg32(data,
319 BTINTEL_PCIE_CSR_MBOX_4_REG);
320 snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
321 skb_put_data(skb, buf, strlen(buf));
322 bt_dev_dbg(hdev, "%s", buf);
323 }
324
325 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
326 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
327 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
328 skb_put_data(skb, buf, strlen(buf));
329 bt_dev_dbg(hdev, "%s", buf);
330
331 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
332 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
333 snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
334 skb_put_data(skb, buf, strlen(buf));
335 bt_dev_dbg(hdev, "%s", buf);
336 snprintf(buf, sizeof(buf), "--------------------------------");
337 bt_dev_dbg(hdev, "%s", buf);
338
339 hci_recv_diag(hdev, skb);
340 }
341
btintel_pcie_send_sync(struct btintel_pcie_data * data,struct sk_buff * skb,u32 pkt_type,u16 opcode)342 static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
343 struct sk_buff *skb, u32 pkt_type, u16 opcode)
344 {
345 int ret;
346 u16 tfd_index;
347 u32 old_ctxt;
348 bool wait_on_alive = false;
349 struct hci_dev *hdev = data->hdev;
350
351 struct txq *txq = &data->txq;
352
353 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
354
355 if (tfd_index > txq->count)
356 return -ERANGE;
357
358 /* Firmware raises alive interrupt on HCI_OP_RESET or
359 * BTINTEL_HCI_OP_RESET
360 */
361 wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
362 (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
363
364 if (wait_on_alive) {
365 data->gp0_received = false;
366 old_ctxt = data->alive_intr_ctxt;
367 data->alive_intr_ctxt =
368 (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
369 BTINTEL_PCIE_HCI_RESET);
370 bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
371 opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
372 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
373 }
374
375 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
376 BTINTEL_PCIE_HCI_TYPE_LEN);
377
378 /* Prepare for TX. It updates the TFD with the length of data and
379 * address of the DMA buffer, and copy the data to the DMA buffer
380 */
381 btintel_pcie_prepare_tx(txq, tfd_index, skb);
382
383 tfd_index = (tfd_index + 1) % txq->count;
384 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
385
386 /* Arm wait event condition */
387 data->tx_wait_done = false;
388
389 /* Set the doorbell to notify the device */
390 btintel_pcie_set_tx_db(data, tfd_index);
391
392 /* Wait for the complete interrupt - URBD0 */
393 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
394 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
395 if (!ret) {
396 bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
397 BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
398 btintel_pcie_dump_debug_registers(data->hdev);
399 return -ETIME;
400 }
401
402 if (wait_on_alive) {
403 ret = wait_event_timeout(data->gp0_wait_q,
404 data->gp0_received,
405 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
406 if (!ret) {
407 hdev->stat.err_tx++;
408 bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
409 BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
410 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
411 return -ETIME;
412 }
413 }
414 return 0;
415 }
416
417 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
418 * is available to receive the data
419 */
btintel_pcie_set_rx_db(struct btintel_pcie_data * data,u16 index)420 static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
421 {
422 u32 val;
423
424 val = index;
425 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
426
427 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
428 }
429
430 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
431 * DMA address of the free buffer.
432 */
btintel_pcie_prepare_rx(struct rxq * rxq,u16 frbd_index)433 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
434 {
435 struct data_buf *buf;
436 struct frbd *frbd;
437
438 /* Get the buffer of the FRBD for DMA */
439 buf = &rxq->bufs[frbd_index];
440
441 frbd = &rxq->frbds[frbd_index];
442 memset(frbd, 0, sizeof(*frbd));
443
444 /* Update FRBD */
445 frbd->tag = frbd_index;
446 frbd->addr = buf->data_p_addr;
447 }
448
btintel_pcie_submit_rx(struct btintel_pcie_data * data)449 static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
450 {
451 u16 frbd_index;
452 struct rxq *rxq = &data->rxq;
453
454 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
455
456 if (frbd_index > rxq->count)
457 return -ERANGE;
458
459 /* Prepare for RX submit. It updates the FRBD with the address of DMA
460 * buffer
461 */
462 btintel_pcie_prepare_rx(rxq, frbd_index);
463
464 frbd_index = (frbd_index + 1) % rxq->count;
465 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
466 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
467
468 /* Set the doorbell to notify the device */
469 btintel_pcie_set_rx_db(data, frbd_index);
470
471 return 0;
472 }
473
btintel_pcie_start_rx(struct btintel_pcie_data * data)474 static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
475 {
476 int i, ret;
477 struct rxq *rxq = &data->rxq;
478
479 /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
480 * hardware issues leading to race condition at the firmware.
481 */
482
483 for (i = 0; i < rxq->count - 3; i++) {
484 ret = btintel_pcie_submit_rx(data);
485 if (ret)
486 return ret;
487 }
488
489 return 0;
490 }
491
btintel_pcie_reset_ia(struct btintel_pcie_data * data)492 static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
493 {
494 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
495 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
496 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
497 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
498 }
499
btintel_pcie_reset_bt(struct btintel_pcie_data * data)500 static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
501 {
502 u32 reg;
503 int retry = 3;
504
505 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
506
507 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
508 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
509 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
510 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
511
512 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
513
514 do {
515 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
516 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
517 break;
518 usleep_range(10000, 12000);
519
520 } while (--retry > 0);
521 usleep_range(10000, 12000);
522
523 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
524
525 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
526 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
527 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
528 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
529 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
530 usleep_range(10000, 12000);
531
532 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
533 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
534
535 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
536
537 /* If shared hardware reset is success then boot stage register shall be
538 * set to 0
539 */
540 return reg == 0 ? 0 : -ENODEV;
541 }
542
btintel_pcie_mac_init(struct btintel_pcie_data * data)543 static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
544 {
545 u32 reg;
546
547 /* Set MAC_INIT bit to start primary bootloader */
548 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
549 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
550 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
551 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
552 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
553 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
554 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
555 }
556
btintel_pcie_add_dmp_data(struct hci_dev * hdev,const void * data,int size)557 static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
558 {
559 struct sk_buff *skb;
560 int err;
561
562 skb = alloc_skb(size, GFP_ATOMIC);
563 if (!skb)
564 return -ENOMEM;
565
566 skb_put_data(skb, data, size);
567 err = hci_devcd_append(hdev, skb);
568 if (err) {
569 bt_dev_err(hdev, "Failed to append data in the coredump");
570 return err;
571 }
572
573 return 0;
574 }
575
btintel_pcie_get_mac_access(struct btintel_pcie_data * data)576 static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
577 {
578 u32 reg;
579 int retry = 15;
580
581 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
582
583 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
584 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
585 if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
586 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
587
588 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
589
590 do {
591 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
592 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
593 return 0;
594 /* Need delay here for Target Access harwdware to settle down*/
595 usleep_range(1000, 1200);
596
597 } while (--retry > 0);
598
599 return -ETIME;
600 }
601
btintel_pcie_release_mac_access(struct btintel_pcie_data * data)602 static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
603 {
604 u32 reg;
605
606 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
607
608 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
609 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
610
611 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
612 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
613
614 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
615 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
616
617 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
618 }
619
btintel_pcie_copy_tlv(struct sk_buff * skb,enum btintel_pcie_tlv_type type,void * data,int size)620 static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
621 void *data, int size)
622 {
623 struct intel_tlv *tlv;
624
625 tlv = skb_put(skb, sizeof(*tlv) + size);
626 tlv->type = type;
627 tlv->len = size;
628 memcpy(tlv->val, data, tlv->len);
629 }
630
btintel_pcie_read_dram_buffers(struct btintel_pcie_data * data)631 static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
632 {
633 u32 offset, prev_size, wr_ptr_status, dump_size, i;
634 struct btintel_pcie_dbgc *dbgc = &data->dbgc;
635 u8 buf_idx, dump_time_len, fw_build;
636 struct hci_dev *hdev = data->hdev;
637 struct intel_tlv *tlv;
638 struct timespec64 now;
639 struct sk_buff *skb;
640 struct tm tm_now;
641 char buf[256];
642 u16 hdr_len;
643 int ret;
644
645 wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
646 offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
647
648 buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
649 if (buf_idx > dbgc->count) {
650 bt_dev_warn(hdev, "Buffer index is invalid");
651 return -EINVAL;
652 }
653
654 prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
655 if (prev_size + offset >= prev_size)
656 data->dmp_hdr.write_ptr = prev_size + offset;
657 else
658 return -EINVAL;
659
660 ktime_get_real_ts64(&now);
661 time64_to_tm(now.tv_sec, 0, &tm_now);
662 dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
663 tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
664 tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
665
666 fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
667 "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
668 2000 + (data->dmp_hdr.fw_timestamp >> 8),
669 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
670 data->dmp_hdr.fw_build_num);
671
672 hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
673 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
674 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
675 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
676 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
677 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
678 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
679 sizeof(*tlv) + dump_time_len +
680 sizeof(*tlv) + fw_build;
681
682 dump_size = hdr_len + sizeof(hdr_len);
683
684 skb = alloc_skb(dump_size, GFP_KERNEL);
685 if (!skb)
686 return -ENOMEM;
687
688 /* Add debug buffers data length to dump size */
689 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
690
691 ret = hci_devcd_init(hdev, dump_size);
692 if (ret) {
693 bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
694 kfree_skb(skb);
695 return ret;
696 }
697
698 skb_put_data(skb, &hdr_len, sizeof(hdr_len));
699
700 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
701 sizeof(data->dmp_hdr.cnvi_bt));
702
703 btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
704 sizeof(data->dmp_hdr.write_ptr));
705
706 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
707 BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
708
709 btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
710 sizeof(data->dmp_hdr.wrap_ctr));
711
712 btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
713 sizeof(data->dmp_hdr.trigger_reason));
714
715 btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
716 sizeof(data->dmp_hdr.fw_git_sha1));
717
718 btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
719 sizeof(data->dmp_hdr.cnvr_top));
720
721 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
722 sizeof(data->dmp_hdr.cnvi_top));
723
724 btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
725
726 btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
727
728 ret = hci_devcd_append(hdev, skb);
729 if (ret)
730 goto exit_err;
731
732 for (i = 0; i < dbgc->count; i++) {
733 ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
734 BTINTEL_PCIE_DBGC_BUFFER_SIZE);
735 if (ret)
736 break;
737 }
738
739 exit_err:
740 hci_devcd_complete(hdev);
741 return ret;
742 }
743
btintel_pcie_dump_traces(struct hci_dev * hdev)744 static void btintel_pcie_dump_traces(struct hci_dev *hdev)
745 {
746 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
747 int ret = 0;
748
749 ret = btintel_pcie_get_mac_access(data);
750 if (ret) {
751 bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
752 return;
753 }
754
755 ret = btintel_pcie_read_dram_buffers(data);
756
757 btintel_pcie_release_mac_access(data);
758
759 if (ret)
760 bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
761 }
762
btintel_pcie_dump_hdr(struct hci_dev * hdev,struct sk_buff * skb)763 static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
764 {
765 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
766 u16 len = skb->len;
767 u16 *hdrlen_ptr;
768 char buf[80];
769
770 hdrlen_ptr = skb_put_zero(skb, sizeof(len));
771
772 snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
773 INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
774 skb_put_data(skb, buf, strlen(buf));
775
776 snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
777 data->dmp_hdr.fw_build_num);
778 skb_put_data(skb, buf, strlen(buf));
779
780 snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
781 skb_put_data(skb, buf, strlen(buf));
782
783 snprintf(buf, sizeof(buf), "Vendor: Intel\n");
784 skb_put_data(skb, buf, strlen(buf));
785
786 *hdrlen_ptr = skb->len - len;
787 }
788
btintel_pcie_dump_notify(struct hci_dev * hdev,int state)789 static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
790 {
791 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
792
793 switch (state) {
794 case HCI_DEVCOREDUMP_IDLE:
795 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
796 break;
797 case HCI_DEVCOREDUMP_ACTIVE:
798 data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
799 break;
800 case HCI_DEVCOREDUMP_TIMEOUT:
801 case HCI_DEVCOREDUMP_ABORT:
802 case HCI_DEVCOREDUMP_DONE:
803 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
804 break;
805 }
806 }
807
808 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
809 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
810 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
811 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
812 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
813 */
btintel_pcie_enable_bt(struct btintel_pcie_data * data)814 static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
815 {
816 int err;
817 u32 reg;
818
819 data->gp0_received = false;
820
821 /* Update the DMA address of CI struct to CSR */
822 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
823 data->ci_p_addr & 0xffffffff);
824 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
825 (u64)data->ci_p_addr >> 32);
826
827 /* Reset the cached value of boot stage. it is updated by the MSI-X
828 * gp0 interrupt handler.
829 */
830 data->boot_stage_cache = 0x0;
831
832 /* Set MAC_INIT bit to start primary bootloader */
833 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
834 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
835 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
836 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
837 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
838 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
839
840 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
841
842 /* MAC is ready. Enable BT FUNC */
843 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
844 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
845
846 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
847
848 /* wait for interrupt from the device after booting up to primary
849 * bootloader.
850 */
851 data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
852 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
853 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
854 if (!err)
855 return -ETIME;
856
857 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
858 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
859 return -ENODEV;
860
861 return 0;
862 }
863
btintel_pcie_in_op(struct btintel_pcie_data * data)864 static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
865 {
866 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
867 }
868
btintel_pcie_in_iml(struct btintel_pcie_data * data)869 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
870 {
871 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
872 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
873 }
874
btintel_pcie_in_d3(struct btintel_pcie_data * data)875 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
876 {
877 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
878 }
879
btintel_pcie_in_d0(struct btintel_pcie_data * data)880 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
881 {
882 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
883 }
884
btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data * data,u32 dxstate)885 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
886 u32 dxstate)
887 {
888 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
889 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
890 }
891
btintel_pcie_read_device_mem(struct btintel_pcie_data * data,void * buf,u32 dev_addr,int len)892 static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
893 void *buf, u32 dev_addr, int len)
894 {
895 int err;
896 u32 *val = buf;
897
898 /* Get device mac access */
899 err = btintel_pcie_get_mac_access(data);
900 if (err) {
901 bt_dev_err(data->hdev, "Failed to get mac access %d", err);
902 return err;
903 }
904
905 for (; len > 0; len -= 4, dev_addr += 4, val++)
906 *val = btintel_pcie_rd_dev_mem(data, dev_addr);
907
908 btintel_pcie_release_mac_access(data);
909
910 return 0;
911 }
912
btintel_pcie_in_lockdown(struct btintel_pcie_data * data)913 static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
914 {
915 return (data->boot_stage_cache &
916 BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
917 (data->boot_stage_cache &
918 BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
919 }
920
btintel_pcie_in_error(struct btintel_pcie_data * data)921 static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
922 {
923 return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
924 (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
925 }
926
btintel_pcie_msix_gp1_handler(struct btintel_pcie_data * data)927 static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
928 {
929 bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
930 btintel_pcie_dump_debug_registers(data->hdev);
931 }
932
933 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
934 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
935 */
btintel_pcie_msix_gp0_handler(struct btintel_pcie_data * data)936 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
937 {
938 bool submit_rx, signal_waitq;
939 u32 reg, old_ctxt;
940
941 /* This interrupt is for three different causes and it is not easy to
942 * know what causes the interrupt. So, it compares each register value
943 * with cached value and update it before it wake up the queue.
944 */
945 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
946 if (reg != data->boot_stage_cache)
947 data->boot_stage_cache = reg;
948
949 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
950 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
951 data->boot_stage_cache, reg);
952 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
953 if (reg != data->img_resp_cache)
954 data->img_resp_cache = reg;
955
956 if (btintel_pcie_in_error(data)) {
957 bt_dev_err(data->hdev, "Controller in error state");
958 btintel_pcie_dump_debug_registers(data->hdev);
959 return;
960 }
961
962 if (btintel_pcie_in_lockdown(data)) {
963 bt_dev_err(data->hdev, "Controller in lockdown state");
964 btintel_pcie_dump_debug_registers(data->hdev);
965 return;
966 }
967
968 data->gp0_received = true;
969
970 old_ctxt = data->alive_intr_ctxt;
971 submit_rx = false;
972 signal_waitq = false;
973
974 switch (data->alive_intr_ctxt) {
975 case BTINTEL_PCIE_ROM:
976 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
977 signal_waitq = true;
978 break;
979 case BTINTEL_PCIE_FW_DL:
980 /* Error case is already handled. Ideally control shall not
981 * reach here
982 */
983 break;
984 case BTINTEL_PCIE_INTEL_HCI_RESET1:
985 if (btintel_pcie_in_op(data)) {
986 submit_rx = true;
987 signal_waitq = true;
988 break;
989 }
990
991 if (btintel_pcie_in_iml(data)) {
992 submit_rx = true;
993 signal_waitq = true;
994 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
995 break;
996 }
997 break;
998 case BTINTEL_PCIE_INTEL_HCI_RESET2:
999 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
1000 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
1001 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
1002 }
1003 break;
1004 case BTINTEL_PCIE_D0:
1005 if (btintel_pcie_in_d3(data)) {
1006 data->alive_intr_ctxt = BTINTEL_PCIE_D3;
1007 signal_waitq = true;
1008 break;
1009 }
1010 break;
1011 case BTINTEL_PCIE_D3:
1012 if (btintel_pcie_in_d0(data)) {
1013 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
1014 submit_rx = true;
1015 signal_waitq = true;
1016 break;
1017 }
1018 break;
1019 case BTINTEL_PCIE_HCI_RESET:
1020 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
1021 submit_rx = true;
1022 signal_waitq = true;
1023 break;
1024 default:
1025 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
1026 data->alive_intr_ctxt);
1027 break;
1028 }
1029
1030 if (submit_rx) {
1031 btintel_pcie_reset_ia(data);
1032 btintel_pcie_start_rx(data);
1033 }
1034
1035 if (signal_waitq) {
1036 bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
1037 wake_up(&data->gp0_wait_q);
1038 }
1039
1040 if (old_ctxt != data->alive_intr_ctxt)
1041 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
1042 btintel_pcie_alivectxt_state2str(old_ctxt),
1043 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
1044 }
1045
1046 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
1047 */
btintel_pcie_msix_tx_handle(struct btintel_pcie_data * data)1048 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
1049 {
1050 u16 cr_tia, cr_hia;
1051 struct txq *txq;
1052 struct urbd0 *urbd0;
1053
1054 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
1055 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1056
1057 if (cr_tia == cr_hia)
1058 return;
1059
1060 txq = &data->txq;
1061
1062 while (cr_tia != cr_hia) {
1063 data->tx_wait_done = true;
1064 wake_up(&data->tx_wait_q);
1065
1066 urbd0 = &txq->urbd0s[cr_tia];
1067
1068 if (urbd0->tfd_index > txq->count)
1069 return;
1070
1071 cr_tia = (cr_tia + 1) % txq->count;
1072 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
1073 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
1074 }
1075 }
1076
btintel_pcie_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1077 static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1078 {
1079 struct hci_event_hdr *hdr = (void *)skb->data;
1080 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1081
1082 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
1083 hdr->plen > 0) {
1084 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
1085 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
1086
1087 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1088 switch (skb->data[2]) {
1089 case 0x02:
1090 /* When switching to the operational firmware
1091 * the device sends a vendor specific event
1092 * indicating that the bootup completed.
1093 */
1094 btintel_bootup(hdev, ptr, len);
1095
1096 /* If bootup event is from operational image,
1097 * driver needs to write sleep control register to
1098 * move into D0 state
1099 */
1100 if (btintel_pcie_in_op(data)) {
1101 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
1102 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
1103 kfree_skb(skb);
1104 return 0;
1105 }
1106
1107 if (btintel_pcie_in_iml(data)) {
1108 /* In case of IML, there is no concept
1109 * of D0 transition. Just mimic as if
1110 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
1111 * bit and waking up the task waiting on
1112 * INTEL_WAIT_FOR_D0. This is required
1113 * as intel_boot() is common function for
1114 * both IML and OP image loading.
1115 */
1116 if (btintel_test_and_clear_flag(data->hdev,
1117 INTEL_WAIT_FOR_D0))
1118 btintel_wake_up_flag(data->hdev,
1119 INTEL_WAIT_FOR_D0);
1120 }
1121 kfree_skb(skb);
1122 return 0;
1123 case 0x06:
1124 /* When the firmware loading completes the
1125 * device sends out a vendor specific event
1126 * indicating the result of the firmware
1127 * loading.
1128 */
1129 btintel_secure_send_result(hdev, ptr, len);
1130 kfree_skb(skb);
1131 return 0;
1132 }
1133 }
1134
1135 /* This is a debug event that comes from IML and OP image when it
1136 * starts execution. There is no need pass this event to stack.
1137 */
1138 if (skb->data[2] == 0x97) {
1139 hci_recv_diag(hdev, skb);
1140 return 0;
1141 }
1142 }
1143
1144 return hci_recv_frame(hdev, skb);
1145 }
1146 /* Process the received rx data
1147 * It check the frame header to identify the data type and create skb
1148 * and calling HCI API
1149 */
btintel_pcie_recv_frame(struct btintel_pcie_data * data,struct sk_buff * skb)1150 static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
1151 struct sk_buff *skb)
1152 {
1153 int ret;
1154 u8 pkt_type;
1155 u16 plen;
1156 u32 pcie_pkt_type;
1157 void *pdata;
1158 struct hci_dev *hdev = data->hdev;
1159
1160 spin_lock(&data->hci_rx_lock);
1161
1162 /* The first 4 bytes indicates the Intel PCIe specific packet type */
1163 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
1164 if (!pdata) {
1165 bt_dev_err(hdev, "Corrupted packet received");
1166 ret = -EILSEQ;
1167 goto exit_error;
1168 }
1169
1170 pcie_pkt_type = get_unaligned_le32(pdata);
1171
1172 switch (pcie_pkt_type) {
1173 case BTINTEL_PCIE_HCI_ACL_PKT:
1174 if (skb->len >= HCI_ACL_HDR_SIZE) {
1175 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
1176 pkt_type = HCI_ACLDATA_PKT;
1177 } else {
1178 bt_dev_err(hdev, "ACL packet is too short");
1179 ret = -EILSEQ;
1180 goto exit_error;
1181 }
1182 break;
1183
1184 case BTINTEL_PCIE_HCI_SCO_PKT:
1185 if (skb->len >= HCI_SCO_HDR_SIZE) {
1186 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1187 pkt_type = HCI_SCODATA_PKT;
1188 } else {
1189 bt_dev_err(hdev, "SCO packet is too short");
1190 ret = -EILSEQ;
1191 goto exit_error;
1192 }
1193 break;
1194
1195 case BTINTEL_PCIE_HCI_EVT_PKT:
1196 if (skb->len >= HCI_EVENT_HDR_SIZE) {
1197 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1198 pkt_type = HCI_EVENT_PKT;
1199 } else {
1200 bt_dev_err(hdev, "Event packet is too short");
1201 ret = -EILSEQ;
1202 goto exit_error;
1203 }
1204 break;
1205
1206 case BTINTEL_PCIE_HCI_ISO_PKT:
1207 if (skb->len >= HCI_ISO_HDR_SIZE) {
1208 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1209 pkt_type = HCI_ISODATA_PKT;
1210 } else {
1211 bt_dev_err(hdev, "ISO packet is too short");
1212 ret = -EILSEQ;
1213 goto exit_error;
1214 }
1215 break;
1216
1217 default:
1218 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1219 pcie_pkt_type);
1220 ret = -EINVAL;
1221 goto exit_error;
1222 }
1223
1224 if (skb->len < plen) {
1225 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1226 pkt_type);
1227 ret = -EILSEQ;
1228 goto exit_error;
1229 }
1230
1231 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1232
1233 hci_skb_pkt_type(skb) = pkt_type;
1234 hdev->stat.byte_rx += plen;
1235 skb_trim(skb, plen);
1236
1237 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1238 ret = btintel_pcie_recv_event(hdev, skb);
1239 else
1240 ret = hci_recv_frame(hdev, skb);
1241 skb = NULL; /* skb is freed in the callee */
1242
1243 exit_error:
1244 if (skb)
1245 kfree_skb(skb);
1246
1247 if (ret)
1248 hdev->stat.err_rx++;
1249
1250 spin_unlock(&data->hci_rx_lock);
1251
1252 return ret;
1253 }
1254
btintel_pcie_read_hwexp(struct btintel_pcie_data * data)1255 static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1256 {
1257 int len, err, offset, pending;
1258 struct sk_buff *skb;
1259 u8 *buf, prefix[64];
1260 u32 addr, val;
1261 u16 pkt_len;
1262
1263 struct tlv {
1264 u8 type;
1265 __le16 len;
1266 u8 val[];
1267 } __packed;
1268
1269 struct tlv *tlv;
1270
1271 switch (data->dmp_hdr.cnvi_top & 0xfff) {
1272 case BTINTEL_CNVI_BLAZARI:
1273 case BTINTEL_CNVI_BLAZARIW:
1274 /* only from step B0 onwards */
1275 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1276 return;
1277 len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1278 addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1279 break;
1280 case BTINTEL_CNVI_SCP:
1281 len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1282 addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1283 break;
1284 default:
1285 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1286 return;
1287 }
1288
1289 buf = kzalloc(len, GFP_KERNEL);
1290 if (!buf)
1291 goto exit_on_error;
1292
1293 btintel_pcie_mac_init(data);
1294
1295 err = btintel_pcie_read_device_mem(data, buf, addr, len);
1296 if (err)
1297 goto exit_on_error;
1298
1299 val = get_unaligned_le32(buf);
1300 if (val != BTINTEL_PCIE_MAGIC_NUM) {
1301 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1302 val);
1303 goto exit_on_error;
1304 }
1305
1306 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1307
1308 offset = 4;
1309 do {
1310 pending = len - offset;
1311 if (pending < sizeof(*tlv))
1312 break;
1313 tlv = (struct tlv *)(buf + offset);
1314
1315 /* If type == 0, then there are no more TLVs to be parsed */
1316 if (!tlv->type) {
1317 bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1318 break;
1319 }
1320 pkt_len = le16_to_cpu(tlv->len);
1321 offset += sizeof(*tlv);
1322 pending = len - offset;
1323 if (pkt_len > pending)
1324 break;
1325
1326 offset += pkt_len;
1327
1328 /* Only TLVs of type == 1 are HCI events, no need to process other
1329 * TLVs
1330 */
1331 if (tlv->type != 1)
1332 continue;
1333
1334 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1335 if (pkt_len > HCI_MAX_EVENT_SIZE)
1336 break;
1337 skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1338 if (!skb)
1339 goto exit_on_error;
1340 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1341 skb_put_data(skb, tlv->val, pkt_len);
1342
1343 /* copy Intel specific pcie packet type */
1344 val = BTINTEL_PCIE_HCI_EVT_PKT;
1345 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1346 BTINTEL_PCIE_HCI_TYPE_LEN);
1347
1348 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1349 tlv->val, pkt_len, false);
1350
1351 btintel_pcie_recv_frame(data, skb);
1352 } while (offset < len);
1353
1354 exit_on_error:
1355 kfree(buf);
1356 }
1357
btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data * data)1358 static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1359 {
1360 bt_dev_err(data->hdev, "Received hw exception interrupt");
1361
1362 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1363 return;
1364
1365 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1366 return;
1367
1368 /* Trigger device core dump when there is HW exception */
1369 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1370 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1371
1372 queue_work(data->workqueue, &data->rx_work);
1373 }
1374
btintel_pcie_rx_work(struct work_struct * work)1375 static void btintel_pcie_rx_work(struct work_struct *work)
1376 {
1377 struct btintel_pcie_data *data = container_of(work,
1378 struct btintel_pcie_data, rx_work);
1379 struct sk_buff *skb;
1380
1381 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1382 /* Unlike usb products, controller will not send hardware
1383 * exception event on exception. Instead controller writes the
1384 * hardware event to device memory along with optional debug
1385 * events, raises MSIX and halts. Driver shall read the
1386 * exception event from device memory and passes it stack for
1387 * further processing.
1388 */
1389 btintel_pcie_read_hwexp(data);
1390 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1391 }
1392
1393 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1394 btintel_pcie_dump_traces(data->hdev);
1395 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1396 }
1397
1398 /* Process the sk_buf in queue and send to the HCI layer */
1399 while ((skb = skb_dequeue(&data->rx_skb_q))) {
1400 btintel_pcie_recv_frame(data, skb);
1401 }
1402 }
1403
1404 /* create sk_buff with data and save it to queue and start RX work */
btintel_pcie_submit_rx_work(struct btintel_pcie_data * data,u8 status,void * buf)1405 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1406 void *buf)
1407 {
1408 int ret, len;
1409 struct rfh_hdr *rfh_hdr;
1410 struct sk_buff *skb;
1411
1412 rfh_hdr = buf;
1413
1414 len = rfh_hdr->packet_len;
1415 if (len <= 0) {
1416 ret = -EINVAL;
1417 goto resubmit;
1418 }
1419
1420 /* Remove RFH header */
1421 buf += sizeof(*rfh_hdr);
1422
1423 skb = alloc_skb(len, GFP_ATOMIC);
1424 if (!skb)
1425 goto resubmit;
1426
1427 skb_put_data(skb, buf, len);
1428 skb_queue_tail(&data->rx_skb_q, skb);
1429 queue_work(data->workqueue, &data->rx_work);
1430
1431 resubmit:
1432 ret = btintel_pcie_submit_rx(data);
1433
1434 return ret;
1435 }
1436
1437 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
btintel_pcie_msix_rx_handle(struct btintel_pcie_data * data)1438 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1439 {
1440 u16 cr_hia, cr_tia;
1441 struct rxq *rxq;
1442 struct urbd1 *urbd1;
1443 struct data_buf *buf;
1444 int ret;
1445 struct hci_dev *hdev = data->hdev;
1446
1447 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1448 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1449
1450 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1451
1452 /* Check CR_TIA and CR_HIA for change */
1453 if (cr_tia == cr_hia)
1454 return;
1455
1456 rxq = &data->rxq;
1457
1458 /* The firmware sends multiple CD in a single MSI-X and it needs to
1459 * process all received CDs in this interrupt.
1460 */
1461 while (cr_tia != cr_hia) {
1462 urbd1 = &rxq->urbd1s[cr_tia];
1463 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1464
1465 buf = &rxq->bufs[urbd1->frbd_tag];
1466 if (!buf) {
1467 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1468 urbd1->frbd_tag);
1469 return;
1470 }
1471
1472 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1473 buf->data);
1474 if (ret) {
1475 bt_dev_err(hdev, "RXQ: failed to submit rx request");
1476 return;
1477 }
1478
1479 cr_tia = (cr_tia + 1) % rxq->count;
1480 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1481 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1482 }
1483 }
1484
btintel_pcie_msix_isr(int irq,void * data)1485 static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
1486 {
1487 return IRQ_WAKE_THREAD;
1488 }
1489
btintel_pcie_is_rxq_empty(struct btintel_pcie_data * data)1490 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1491 {
1492 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1493 }
1494
btintel_pcie_is_txackq_empty(struct btintel_pcie_data * data)1495 static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1496 {
1497 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1498 }
1499
btintel_pcie_irq_msix_handler(int irq,void * dev_id)1500 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1501 {
1502 struct msix_entry *entry = dev_id;
1503 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1504 u32 intr_fh, intr_hw;
1505
1506 spin_lock(&data->irq_lock);
1507 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1508 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1509
1510 /* Clear causes registers to avoid being handling the same cause */
1511 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1512 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1513 spin_unlock(&data->irq_lock);
1514
1515 if (unlikely(!(intr_fh | intr_hw))) {
1516 /* Ignore interrupt, inta == 0 */
1517 return IRQ_NONE;
1518 }
1519
1520 /* This interrupt is raised when there is an hardware exception */
1521 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1522 btintel_pcie_msix_hw_exp_handler(data);
1523
1524 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
1525 btintel_pcie_msix_gp1_handler(data);
1526
1527 /* This interrupt is triggered by the firmware after updating
1528 * boot_stage register and image_response register
1529 */
1530 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1531 btintel_pcie_msix_gp0_handler(data);
1532
1533 /* For TX */
1534 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1535 btintel_pcie_msix_tx_handle(data);
1536 if (!btintel_pcie_is_rxq_empty(data))
1537 btintel_pcie_msix_rx_handle(data);
1538 }
1539
1540 /* For RX */
1541 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1542 btintel_pcie_msix_rx_handle(data);
1543 if (!btintel_pcie_is_txackq_empty(data))
1544 btintel_pcie_msix_tx_handle(data);
1545 }
1546
1547 /*
1548 * Before sending the interrupt the HW disables it to prevent a nested
1549 * interrupt. This is done by writing 1 to the corresponding bit in
1550 * the mask register. After handling the interrupt, it should be
1551 * re-enabled by clearing this bit. This register is defined as write 1
1552 * clear (W1C) register, meaning that it's cleared by writing 1
1553 * to the bit.
1554 */
1555 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1556 BIT(entry->entry));
1557
1558 return IRQ_HANDLED;
1559 }
1560
1561 /* This function requests the irq for MSI-X and registers the handlers per irq.
1562 * Currently, it requests only 1 irq for all interrupt causes.
1563 */
btintel_pcie_setup_irq(struct btintel_pcie_data * data)1564 static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1565 {
1566 int err;
1567 int num_irqs, i;
1568
1569 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1570 data->msix_entries[i].entry = i;
1571
1572 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1573 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1574 if (num_irqs < 0)
1575 return num_irqs;
1576
1577 data->alloc_vecs = num_irqs;
1578 data->msix_enabled = 1;
1579 data->def_irq = 0;
1580
1581 /* setup irq handler */
1582 for (i = 0; i < data->alloc_vecs; i++) {
1583 struct msix_entry *msix_entry;
1584
1585 msix_entry = &data->msix_entries[i];
1586 msix_entry->vector = pci_irq_vector(data->pdev, i);
1587
1588 err = devm_request_threaded_irq(&data->pdev->dev,
1589 msix_entry->vector,
1590 btintel_pcie_msix_isr,
1591 btintel_pcie_irq_msix_handler,
1592 IRQF_SHARED,
1593 KBUILD_MODNAME,
1594 msix_entry);
1595 if (err) {
1596 pci_free_irq_vectors(data->pdev);
1597 data->alloc_vecs = 0;
1598 return err;
1599 }
1600 }
1601 return 0;
1602 }
1603
1604 struct btintel_pcie_causes_list {
1605 u32 cause;
1606 u32 mask_reg;
1607 u8 cause_num;
1608 };
1609
1610 static struct btintel_pcie_causes_list causes_list[] = {
1611 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1612 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1613 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1614 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1615 };
1616
1617 /* This function configures the interrupt masks for both HW_INT_CAUSES and
1618 * FH_INT_CAUSES which are meaningful to us.
1619 *
1620 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1621 * need to call this function again to configure since the masks
1622 * are reset to 0xFFFFFFFF after reset.
1623 */
btintel_pcie_config_msix(struct btintel_pcie_data * data)1624 static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1625 {
1626 int i;
1627 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1628
1629 /* Set Non Auto Clear Cause */
1630 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1631 btintel_pcie_wr_reg8(data,
1632 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1633 val);
1634 btintel_pcie_clr_reg_bits(data,
1635 causes_list[i].mask_reg,
1636 causes_list[i].cause);
1637 }
1638
1639 /* Save the initial interrupt mask */
1640 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1641 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1642 }
1643
btintel_pcie_config_pcie(struct pci_dev * pdev,struct btintel_pcie_data * data)1644 static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1645 struct btintel_pcie_data *data)
1646 {
1647 int err;
1648
1649 err = pcim_enable_device(pdev);
1650 if (err)
1651 return err;
1652
1653 pci_set_master(pdev);
1654
1655 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1656 if (err) {
1657 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1658 if (err)
1659 return err;
1660 }
1661
1662 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1663 if (IS_ERR(data->base_addr))
1664 return PTR_ERR(data->base_addr);
1665
1666 err = btintel_pcie_setup_irq(data);
1667 if (err)
1668 return err;
1669
1670 /* Configure MSI-X with causes list */
1671 btintel_pcie_config_msix(data);
1672
1673 return 0;
1674 }
1675
btintel_pcie_init_ci(struct btintel_pcie_data * data,struct ctx_info * ci)1676 static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1677 struct ctx_info *ci)
1678 {
1679 ci->version = 0x1;
1680 ci->size = sizeof(*ci);
1681 ci->config = 0x0000;
1682 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1683 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1684 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1685 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1686 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1687 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1688 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1689 ci->addr_tfdq = data->txq.tfds_p_addr;
1690 ci->num_tfdq = data->txq.count;
1691 ci->num_urbdq0 = data->txq.count;
1692 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1693 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1694 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1695 ci->addr_frbdq = data->rxq.frbds_p_addr;
1696 ci->num_frbdq = data->rxq.count;
1697 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1698 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1699 ci->num_urbdq1 = data->rxq.count;
1700 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1701
1702 ci->dbg_output_mode = 0x01;
1703 ci->dbgc_addr = data->dbgc.frag_p_addr;
1704 ci->dbgc_size = data->dbgc.frag_size;
1705 ci->dbg_preset = 0x00;
1706 }
1707
btintel_pcie_free_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1708 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1709 struct txq *txq)
1710 {
1711 /* Free data buffers first */
1712 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1713 txq->buf_v_addr, txq->buf_p_addr);
1714 kfree(txq->bufs);
1715 }
1716
btintel_pcie_setup_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1717 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1718 struct txq *txq)
1719 {
1720 int i;
1721 struct data_buf *buf;
1722
1723 /* Allocate the same number of buffers as the descriptor */
1724 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
1725 if (!txq->bufs)
1726 return -ENOMEM;
1727
1728 /* Allocate full chunk of data buffer for DMA first and do indexing and
1729 * initialization next, so it can be freed easily
1730 */
1731 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1732 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1733 &txq->buf_p_addr,
1734 GFP_KERNEL | __GFP_NOWARN);
1735 if (!txq->buf_v_addr) {
1736 kfree(txq->bufs);
1737 return -ENOMEM;
1738 }
1739
1740 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1741 * have virtual address and physical address
1742 */
1743 for (i = 0; i < txq->count; i++) {
1744 buf = &txq->bufs[i];
1745 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1746 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1747 }
1748
1749 return 0;
1750 }
1751
btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1752 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1753 struct rxq *rxq)
1754 {
1755 /* Free data buffers first */
1756 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1757 rxq->buf_v_addr, rxq->buf_p_addr);
1758 kfree(rxq->bufs);
1759 }
1760
btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1761 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1762 struct rxq *rxq)
1763 {
1764 int i;
1765 struct data_buf *buf;
1766
1767 /* Allocate the same number of buffers as the descriptor */
1768 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
1769 if (!rxq->bufs)
1770 return -ENOMEM;
1771
1772 /* Allocate full chunk of data buffer for DMA first and do indexing and
1773 * initialization next, so it can be freed easily
1774 */
1775 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1776 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1777 &rxq->buf_p_addr,
1778 GFP_KERNEL | __GFP_NOWARN);
1779 if (!rxq->buf_v_addr) {
1780 kfree(rxq->bufs);
1781 return -ENOMEM;
1782 }
1783
1784 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1785 * have virtual address and physical address
1786 */
1787 for (i = 0; i < rxq->count; i++) {
1788 buf = &rxq->bufs[i];
1789 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1790 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1791 }
1792
1793 return 0;
1794 }
1795
btintel_pcie_setup_ia(struct btintel_pcie_data * data,dma_addr_t p_addr,void * v_addr,struct ia * ia)1796 static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1797 dma_addr_t p_addr, void *v_addr,
1798 struct ia *ia)
1799 {
1800 /* TR Head Index Array */
1801 ia->tr_hia_p_addr = p_addr;
1802 ia->tr_hia = v_addr;
1803
1804 /* TR Tail Index Array */
1805 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1806 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1807
1808 /* CR Head index Array */
1809 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1810 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1811
1812 /* CR Tail Index Array */
1813 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1814 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1815 }
1816
btintel_pcie_free(struct btintel_pcie_data * data)1817 static void btintel_pcie_free(struct btintel_pcie_data *data)
1818 {
1819 btintel_pcie_free_rxq_bufs(data, &data->rxq);
1820 btintel_pcie_free_txq_bufs(data, &data->txq);
1821
1822 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1823 dma_pool_destroy(data->dma_pool);
1824 }
1825
1826 /* Allocate tx and rx queues, any related data structures and buffers.
1827 */
btintel_pcie_alloc(struct btintel_pcie_data * data)1828 static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1829 {
1830 int err = 0;
1831 size_t total;
1832 dma_addr_t p_addr;
1833 void *v_addr;
1834
1835 /* Allocate the chunk of DMA memory for descriptors, index array, and
1836 * context information, instead of allocating individually.
1837 * The DMA memory for data buffer is allocated while setting up the
1838 * each queue.
1839 *
1840 * Total size is sum of the following
1841 * + size of TFD * Number of descriptors in queue
1842 * + size of URBD0 * Number of descriptors in queue
1843 * + size of FRBD * Number of descriptors in queue
1844 * + size of URBD1 * Number of descriptors in queue
1845 * + size of index * Number of queues(2) * type of index array(4)
1846 * + size of context information
1847 */
1848 total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
1849 total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
1850
1851 /* Add the sum of size of index array and size of ci struct */
1852 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1853
1854 /* Allocate DMA Pool */
1855 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1856 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1857 if (!data->dma_pool) {
1858 err = -ENOMEM;
1859 goto exit_error;
1860 }
1861
1862 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1863 &p_addr);
1864 if (!v_addr) {
1865 dma_pool_destroy(data->dma_pool);
1866 err = -ENOMEM;
1867 goto exit_error;
1868 }
1869
1870 data->dma_p_addr = p_addr;
1871 data->dma_v_addr = v_addr;
1872
1873 /* Setup descriptor count */
1874 data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
1875 data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
1876
1877 /* Setup tfds */
1878 data->txq.tfds_p_addr = p_addr;
1879 data->txq.tfds = v_addr;
1880
1881 p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1882 v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1883
1884 /* Setup urbd0 */
1885 data->txq.urbd0s_p_addr = p_addr;
1886 data->txq.urbd0s = v_addr;
1887
1888 p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1889 v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1890
1891 /* Setup FRBD*/
1892 data->rxq.frbds_p_addr = p_addr;
1893 data->rxq.frbds = v_addr;
1894
1895 p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1896 v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1897
1898 /* Setup urbd1 */
1899 data->rxq.urbd1s_p_addr = p_addr;
1900 data->rxq.urbd1s = v_addr;
1901
1902 p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1903 v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1904
1905 /* Setup data buffers for txq */
1906 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1907 if (err)
1908 goto exit_error_pool;
1909
1910 /* Setup data buffers for rxq */
1911 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1912 if (err)
1913 goto exit_error_txq;
1914
1915 /* Setup Index Array */
1916 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1917
1918 /* Setup data buffers for dbgc */
1919 err = btintel_pcie_setup_dbgc(data);
1920 if (err)
1921 goto exit_error_txq;
1922
1923 /* Setup Context Information */
1924 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1925 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1926
1927 data->ci = v_addr;
1928 data->ci_p_addr = p_addr;
1929
1930 /* Initialize the CI */
1931 btintel_pcie_init_ci(data, data->ci);
1932
1933 return 0;
1934
1935 exit_error_txq:
1936 btintel_pcie_free_txq_bufs(data, &data->txq);
1937 exit_error_pool:
1938 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1939 dma_pool_destroy(data->dma_pool);
1940 exit_error:
1941 return err;
1942 }
1943
btintel_pcie_open(struct hci_dev * hdev)1944 static int btintel_pcie_open(struct hci_dev *hdev)
1945 {
1946 bt_dev_dbg(hdev, "");
1947
1948 return 0;
1949 }
1950
btintel_pcie_close(struct hci_dev * hdev)1951 static int btintel_pcie_close(struct hci_dev *hdev)
1952 {
1953 bt_dev_dbg(hdev, "");
1954
1955 return 0;
1956 }
1957
btintel_pcie_inject_cmd_complete(struct hci_dev * hdev,__u16 opcode)1958 static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1959 {
1960 struct sk_buff *skb;
1961 struct hci_event_hdr *hdr;
1962 struct hci_ev_cmd_complete *evt;
1963
1964 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1965 if (!skb)
1966 return -ENOMEM;
1967
1968 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1969 hdr->evt = HCI_EV_CMD_COMPLETE;
1970 hdr->plen = sizeof(*evt) + 1;
1971
1972 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1973 evt->ncmd = 0x01;
1974 evt->opcode = cpu_to_le16(opcode);
1975
1976 *(u8 *)skb_put(skb, 1) = 0x00;
1977
1978 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1979
1980 return hci_recv_frame(hdev, skb);
1981 }
1982
btintel_pcie_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1983 static int btintel_pcie_send_frame(struct hci_dev *hdev,
1984 struct sk_buff *skb)
1985 {
1986 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1987 struct hci_command_hdr *cmd;
1988 __u16 opcode = ~0;
1989 int ret;
1990 u32 type;
1991
1992 if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1993 return -ENODEV;
1994
1995 /* Due to the fw limitation, the type header of the packet should be
1996 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1997 * the first byte to get the packet type and redirect the rest of data
1998 * packet to the right handler.
1999 *
2000 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
2001 * from DMA memory and by the time it reads the first 4 bytes, it has
2002 * already consumed some part of packet. Thus the packet type indicator
2003 * for iBT PCIe is 4 bytes.
2004 *
2005 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
2006 * head room for profile and driver use, and before sending the data
2007 * to the device, append the iBT PCIe packet type in the front.
2008 */
2009 switch (hci_skb_pkt_type(skb)) {
2010 case HCI_COMMAND_PKT:
2011 type = BTINTEL_PCIE_HCI_CMD_PKT;
2012 cmd = (void *)skb->data;
2013 opcode = le16_to_cpu(cmd->opcode);
2014 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
2015 struct hci_command_hdr *cmd = (void *)skb->data;
2016 __u16 opcode = le16_to_cpu(cmd->opcode);
2017
2018 /* When the BTINTEL_HCI_OP_RESET command is issued to
2019 * boot into the operational firmware, it will actually
2020 * not send a command complete event. To keep the flow
2021 * control working inject that event here.
2022 */
2023 if (opcode == BTINTEL_HCI_OP_RESET)
2024 btintel_pcie_inject_cmd_complete(hdev, opcode);
2025 }
2026
2027 hdev->stat.cmd_tx++;
2028 break;
2029 case HCI_ACLDATA_PKT:
2030 type = BTINTEL_PCIE_HCI_ACL_PKT;
2031 hdev->stat.acl_tx++;
2032 break;
2033 case HCI_SCODATA_PKT:
2034 type = BTINTEL_PCIE_HCI_SCO_PKT;
2035 hdev->stat.sco_tx++;
2036 break;
2037 case HCI_ISODATA_PKT:
2038 type = BTINTEL_PCIE_HCI_ISO_PKT;
2039 break;
2040 default:
2041 bt_dev_err(hdev, "Unknown HCI packet type");
2042 return -EILSEQ;
2043 }
2044
2045 ret = btintel_pcie_send_sync(data, skb, type, opcode);
2046 if (ret) {
2047 hdev->stat.err_tx++;
2048 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
2049 goto exit_error;
2050 }
2051
2052 hdev->stat.byte_tx += skb->len;
2053 kfree_skb(skb);
2054
2055 exit_error:
2056 return ret;
2057 }
2058
btintel_pcie_release_hdev(struct btintel_pcie_data * data)2059 static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
2060 {
2061 struct hci_dev *hdev;
2062
2063 hdev = data->hdev;
2064 hci_unregister_dev(hdev);
2065 hci_free_dev(hdev);
2066 data->hdev = NULL;
2067 }
2068
btintel_pcie_disable_interrupts(struct btintel_pcie_data * data)2069 static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
2070 {
2071 spin_lock(&data->irq_lock);
2072 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
2073 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
2074 spin_unlock(&data->irq_lock);
2075 }
2076
btintel_pcie_enable_interrupts(struct btintel_pcie_data * data)2077 static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
2078 {
2079 spin_lock(&data->irq_lock);
2080 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
2081 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
2082 spin_unlock(&data->irq_lock);
2083 }
2084
btintel_pcie_synchronize_irqs(struct btintel_pcie_data * data)2085 static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
2086 {
2087 for (int i = 0; i < data->alloc_vecs; i++)
2088 synchronize_irq(data->msix_entries[i].vector);
2089 }
2090
btintel_pcie_setup_internal(struct hci_dev * hdev)2091 static int btintel_pcie_setup_internal(struct hci_dev *hdev)
2092 {
2093 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2094 const u8 param[1] = { 0xFF };
2095 struct intel_version_tlv ver_tlv;
2096 struct sk_buff *skb;
2097 int err;
2098
2099 BT_DBG("%s", hdev->name);
2100
2101 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
2102 if (IS_ERR(skb)) {
2103 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
2104 PTR_ERR(skb));
2105 return PTR_ERR(skb);
2106 }
2107
2108 /* Check the status */
2109 if (skb->data[0]) {
2110 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
2111 skb->data[0]);
2112 err = -EIO;
2113 goto exit_error;
2114 }
2115
2116 /* Apply the common HCI quirks for Intel device */
2117 hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
2118 hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
2119 hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
2120
2121 /* Set up the quality report callback for Intel devices */
2122 hdev->set_quality_report = btintel_set_quality_report;
2123
2124 memset(&ver_tlv, 0, sizeof(ver_tlv));
2125 /* For TLV type device, parse the tlv data */
2126 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
2127 if (err) {
2128 bt_dev_err(hdev, "Failed to parse TLV version information");
2129 goto exit_error;
2130 }
2131
2132 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
2133 case 0x37:
2134 break;
2135 default:
2136 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
2137 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
2138 err = -EINVAL;
2139 goto exit_error;
2140 }
2141
2142 /* Check for supported iBT hardware variants of this firmware
2143 * loading method.
2144 *
2145 * This check has been put in place to ensure correct forward
2146 * compatibility options when newer hardware variants come
2147 * along.
2148 */
2149 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
2150 case 0x1e: /* BzrI */
2151 case 0x1f: /* ScP */
2152 /* Display version information of TLV type */
2153 btintel_version_info_tlv(hdev, &ver_tlv);
2154
2155 /* Apply the device specific HCI quirks for TLV based devices
2156 *
2157 * All TLV based devices support WBS
2158 */
2159 hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2160
2161 /* Setup MSFT Extension support */
2162 btintel_set_msft_opcode(hdev,
2163 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2164
2165 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
2166 if (err)
2167 goto exit_error;
2168 break;
2169 default:
2170 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
2171 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2172 err = -EINVAL;
2173 goto exit_error;
2174 break;
2175 }
2176
2177 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2178 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2179 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2180 data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2181 data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2182 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2183
2184 if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2185 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2186
2187 err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
2188 btintel_pcie_dump_notify);
2189 if (err) {
2190 bt_dev_err(hdev, "Failed to register coredump (%d)", err);
2191 goto exit_error;
2192 }
2193
2194 btintel_print_fseq_info(hdev);
2195 exit_error:
2196 kfree_skb(skb);
2197
2198 return err;
2199 }
2200
btintel_pcie_setup(struct hci_dev * hdev)2201 static int btintel_pcie_setup(struct hci_dev *hdev)
2202 {
2203 int err, fw_dl_retry = 0;
2204 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2205
2206 while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2207 bt_dev_err(hdev, "Firmware download retry count: %d",
2208 fw_dl_retry);
2209 btintel_pcie_dump_debug_registers(hdev);
2210 btintel_pcie_disable_interrupts(data);
2211 btintel_pcie_synchronize_irqs(data);
2212 err = btintel_pcie_reset_bt(data);
2213 if (err) {
2214 bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2215 break;
2216 }
2217 usleep_range(10000, 12000);
2218 btintel_pcie_reset_ia(data);
2219 btintel_pcie_enable_interrupts(data);
2220 btintel_pcie_config_msix(data);
2221 err = btintel_pcie_enable_bt(data);
2222 if (err) {
2223 bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2224 break;
2225 }
2226 btintel_pcie_start_rx(data);
2227 }
2228
2229 if (!err)
2230 set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
2231 return err;
2232 }
2233
2234 static struct btintel_pcie_dev_recovery *
btintel_pcie_get_recovery(struct pci_dev * pdev,struct device * dev)2235 btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
2236 {
2237 struct btintel_pcie_dev_recovery *tmp, *data = NULL;
2238 const char *name = pci_name(pdev);
2239 struct hci_dev *hdev = to_hci_dev(dev);
2240
2241 spin_lock(&btintel_pcie_recovery_lock);
2242 list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
2243 if (strcmp(tmp->name, name))
2244 continue;
2245 data = tmp;
2246 break;
2247 }
2248 spin_unlock(&btintel_pcie_recovery_lock);
2249
2250 if (data) {
2251 bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
2252 return data;
2253 }
2254
2255 data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
2256 if (!data)
2257 return NULL;
2258
2259 strscpy_pad(data->name, name, strlen(name) + 1);
2260 spin_lock(&btintel_pcie_recovery_lock);
2261 list_add_tail(&data->list, &btintel_pcie_recovery_list);
2262 spin_unlock(&btintel_pcie_recovery_lock);
2263
2264 return data;
2265 }
2266
btintel_pcie_free_restart_list(void)2267 static void btintel_pcie_free_restart_list(void)
2268 {
2269 struct btintel_pcie_dev_recovery *tmp;
2270
2271 while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
2272 typeof(*tmp), list))) {
2273 list_del(&tmp->list);
2274 kfree(tmp);
2275 }
2276 }
2277
btintel_pcie_inc_recovery_count(struct pci_dev * pdev,struct device * dev)2278 static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
2279 struct device *dev)
2280 {
2281 struct btintel_pcie_dev_recovery *data;
2282 time64_t retry_window;
2283
2284 data = btintel_pcie_get_recovery(pdev, dev);
2285 if (!data)
2286 return;
2287
2288 retry_window = ktime_get_boottime_seconds() - data->last_error;
2289 if (data->count == 0) {
2290 data->last_error = ktime_get_boottime_seconds();
2291 data->count++;
2292 } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2293 data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
2294 data->count++;
2295 } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
2296 data->last_error = 0;
2297 data->count = 0;
2298 }
2299 }
2300
2301 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
2302
btintel_pcie_removal_work(struct work_struct * wk)2303 static void btintel_pcie_removal_work(struct work_struct *wk)
2304 {
2305 struct btintel_pcie_removal *removal =
2306 container_of(wk, struct btintel_pcie_removal, work);
2307 struct pci_dev *pdev = removal->pdev;
2308 struct btintel_pcie_data *data;
2309 int err;
2310
2311 pci_lock_rescan_remove();
2312
2313 if (!pdev->bus)
2314 goto error;
2315
2316 data = pci_get_drvdata(pdev);
2317
2318 btintel_pcie_disable_interrupts(data);
2319 btintel_pcie_synchronize_irqs(data);
2320
2321 flush_work(&data->rx_work);
2322 flush_work(&data->hdev->dump.dump_rx);
2323
2324 bt_dev_dbg(data->hdev, "Release bluetooth interface");
2325 btintel_pcie_release_hdev(data);
2326
2327 err = pci_reset_function(pdev);
2328 if (err) {
2329 BT_ERR("Failed resetting the pcie device (%d)", err);
2330 goto error;
2331 }
2332
2333 btintel_pcie_enable_interrupts(data);
2334 btintel_pcie_config_msix(data);
2335
2336 err = btintel_pcie_enable_bt(data);
2337 if (err) {
2338 BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
2339 err);
2340 goto error;
2341 }
2342
2343 btintel_pcie_reset_ia(data);
2344 btintel_pcie_start_rx(data);
2345 data->flags = 0;
2346
2347 err = btintel_pcie_setup_hdev(data);
2348 if (err) {
2349 BT_ERR("Failed registering hdev (%d)", err);
2350 goto error;
2351 }
2352 error:
2353 pci_dev_put(pdev);
2354 pci_unlock_rescan_remove();
2355 kfree(removal);
2356 }
2357
btintel_pcie_reset(struct hci_dev * hdev)2358 static void btintel_pcie_reset(struct hci_dev *hdev)
2359 {
2360 struct btintel_pcie_removal *removal;
2361 struct btintel_pcie_data *data;
2362
2363 data = hci_get_drvdata(hdev);
2364
2365 if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
2366 return;
2367
2368 if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
2369 return;
2370
2371 removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2372 if (!removal)
2373 return;
2374
2375 removal->pdev = data->pdev;
2376 INIT_WORK(&removal->work, btintel_pcie_removal_work);
2377 pci_dev_get(removal->pdev);
2378 schedule_work(&removal->work);
2379 }
2380
btintel_pcie_hw_error(struct hci_dev * hdev,u8 code)2381 static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
2382 {
2383 struct btintel_pcie_dev_recovery *data;
2384 struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
2385 struct pci_dev *pdev = dev_data->pdev;
2386 time64_t retry_window;
2387
2388 if (code == 0x13) {
2389 bt_dev_err(hdev, "Encountered top exception");
2390 return;
2391 }
2392
2393 data = btintel_pcie_get_recovery(pdev, &hdev->dev);
2394 if (!data)
2395 return;
2396
2397 retry_window = ktime_get_boottime_seconds() - data->last_error;
2398
2399 if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2400 data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
2401 bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
2402 BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
2403 bt_dev_dbg(hdev, "Boot time: %lld seconds",
2404 ktime_get_boottime_seconds());
2405 bt_dev_dbg(hdev, "last error at: %lld seconds",
2406 data->last_error);
2407 return;
2408 }
2409 btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
2410 btintel_pcie_reset(hdev);
2411 }
2412
btintel_pcie_setup_hdev(struct btintel_pcie_data * data)2413 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2414 {
2415 int err;
2416 struct hci_dev *hdev;
2417
2418 hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2419 if (!hdev)
2420 return -ENOMEM;
2421
2422 hdev->bus = HCI_PCI;
2423 hci_set_drvdata(hdev, data);
2424
2425 data->hdev = hdev;
2426 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2427
2428 hdev->manufacturer = 2;
2429 hdev->open = btintel_pcie_open;
2430 hdev->close = btintel_pcie_close;
2431 hdev->send = btintel_pcie_send_frame;
2432 hdev->setup = btintel_pcie_setup;
2433 hdev->shutdown = btintel_shutdown_combined;
2434 hdev->hw_error = btintel_pcie_hw_error;
2435 hdev->set_diag = btintel_set_diag;
2436 hdev->set_bdaddr = btintel_set_bdaddr;
2437 hdev->reset = btintel_pcie_reset;
2438
2439 err = hci_register_dev(hdev);
2440 if (err < 0) {
2441 BT_ERR("Failed to register to hdev (%d)", err);
2442 goto exit_error;
2443 }
2444
2445 data->dmp_hdr.driver_name = KBUILD_MODNAME;
2446 return 0;
2447
2448 exit_error:
2449 hci_free_dev(hdev);
2450 return err;
2451 }
2452
btintel_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2453 static int btintel_pcie_probe(struct pci_dev *pdev,
2454 const struct pci_device_id *ent)
2455 {
2456 int err;
2457 struct btintel_pcie_data *data;
2458
2459 if (!pdev)
2460 return -ENODEV;
2461
2462 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2463 if (!data)
2464 return -ENOMEM;
2465
2466 data->pdev = pdev;
2467
2468 spin_lock_init(&data->irq_lock);
2469 spin_lock_init(&data->hci_rx_lock);
2470
2471 init_waitqueue_head(&data->gp0_wait_q);
2472 data->gp0_received = false;
2473
2474 init_waitqueue_head(&data->tx_wait_q);
2475 data->tx_wait_done = false;
2476
2477 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2478 if (!data->workqueue)
2479 return -ENOMEM;
2480
2481 skb_queue_head_init(&data->rx_skb_q);
2482 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2483
2484 data->boot_stage_cache = 0x00;
2485 data->img_resp_cache = 0x00;
2486
2487 err = btintel_pcie_config_pcie(pdev, data);
2488 if (err)
2489 goto exit_error;
2490
2491 pci_set_drvdata(pdev, data);
2492
2493 err = btintel_pcie_alloc(data);
2494 if (err)
2495 goto exit_error;
2496
2497 err = btintel_pcie_enable_bt(data);
2498 if (err)
2499 goto exit_error;
2500
2501 /* CNV information (CNVi and CNVr) is in CSR */
2502 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2503
2504 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2505
2506 err = btintel_pcie_start_rx(data);
2507 if (err)
2508 goto exit_error;
2509
2510 err = btintel_pcie_setup_hdev(data);
2511 if (err)
2512 goto exit_error;
2513
2514 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2515 data->cnvr);
2516 return 0;
2517
2518 exit_error:
2519 /* reset device before exit */
2520 btintel_pcie_reset_bt(data);
2521
2522 pci_clear_master(pdev);
2523
2524 pci_set_drvdata(pdev, NULL);
2525
2526 return err;
2527 }
2528
btintel_pcie_remove(struct pci_dev * pdev)2529 static void btintel_pcie_remove(struct pci_dev *pdev)
2530 {
2531 struct btintel_pcie_data *data;
2532
2533 data = pci_get_drvdata(pdev);
2534
2535 btintel_pcie_disable_interrupts(data);
2536
2537 btintel_pcie_synchronize_irqs(data);
2538
2539 flush_work(&data->rx_work);
2540
2541 btintel_pcie_reset_bt(data);
2542 for (int i = 0; i < data->alloc_vecs; i++) {
2543 struct msix_entry *msix_entry;
2544
2545 msix_entry = &data->msix_entries[i];
2546 free_irq(msix_entry->vector, msix_entry);
2547 }
2548
2549 pci_free_irq_vectors(pdev);
2550
2551 btintel_pcie_release_hdev(data);
2552
2553 destroy_workqueue(data->workqueue);
2554
2555 btintel_pcie_free(data);
2556
2557 pci_clear_master(pdev);
2558
2559 pci_set_drvdata(pdev, NULL);
2560 }
2561
2562 #ifdef CONFIG_DEV_COREDUMP
btintel_pcie_coredump(struct device * dev)2563 static void btintel_pcie_coredump(struct device *dev)
2564 {
2565 struct pci_dev *pdev = to_pci_dev(dev);
2566 struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2567
2568 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2569 return;
2570
2571 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2572 queue_work(data->workqueue, &data->rx_work);
2573 }
2574 #endif
2575
2576 static struct pci_driver btintel_pcie_driver = {
2577 .name = KBUILD_MODNAME,
2578 .id_table = btintel_pcie_table,
2579 .probe = btintel_pcie_probe,
2580 .remove = btintel_pcie_remove,
2581 #ifdef CONFIG_DEV_COREDUMP
2582 .driver.coredump = btintel_pcie_coredump
2583 #endif
2584 };
2585
btintel_pcie_init(void)2586 static int __init btintel_pcie_init(void)
2587 {
2588 return pci_register_driver(&btintel_pcie_driver);
2589 }
2590
btintel_pcie_exit(void)2591 static void __exit btintel_pcie_exit(void)
2592 {
2593 pci_unregister_driver(&btintel_pcie_driver);
2594 btintel_pcie_free_restart_list();
2595 }
2596
2597 module_init(btintel_pcie_init);
2598 module_exit(btintel_pcie_exit);
2599
2600 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
2601 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2602 MODULE_VERSION(VERSION);
2603 MODULE_LICENSE("GPL");
2604