1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/unaligned.h>
18 #include <linux/devcoredump.h>
19
20 #include <net/bluetooth/bluetooth.h>
21 #include <net/bluetooth/hci_core.h>
22
23 #include "btintel.h"
24 #include "btintel_pcie.h"
25
26 #define VERSION "0.1"
27
28 #define BTINTEL_PCI_DEVICE(dev, subdev) \
29 .vendor = PCI_VENDOR_ID_INTEL, \
30 .device = (dev), \
31 .subvendor = PCI_ANY_ID, \
32 .subdevice = (subdev), \
33 .driver_data = 0
34
35 #define POLL_INTERVAL_US 10
36
37 /* Intel Bluetooth PCIe device id table */
38 static const struct pci_device_id btintel_pcie_table[] = {
39 /* BlazarI, Wildcat Lake */
40 { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
41 /* BlazarI, Lunar Lake */
42 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
43 /* Scorpious, Panther Lake-H484 */
44 { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
45 /* Scorpious, Panther Lake-H404 */
46 { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
47 { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
50
51 struct btintel_pcie_dev_recovery {
52 struct list_head list;
53 u8 count;
54 time64_t last_error;
55 char name[];
56 };
57
58 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
59 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
60 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
61 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
62 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
63 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
64 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
65
66 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
67
68 #define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
69 #define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
70
71 #define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
72 #define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
73
74 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
75
76 #define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
77 #define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
78
79 #define BTINTEL_PCIE_RESET_WINDOW_SECS 5
80 #define BTINTEL_PCIE_FLR_MAX_RETRY 1
81
82 /* Alive interrupt context */
83 enum {
84 BTINTEL_PCIE_ROM,
85 BTINTEL_PCIE_FW_DL,
86 BTINTEL_PCIE_HCI_RESET,
87 BTINTEL_PCIE_INTEL_HCI_RESET1,
88 BTINTEL_PCIE_INTEL_HCI_RESET2,
89 BTINTEL_PCIE_D0,
90 BTINTEL_PCIE_D3
91 };
92
93 /* Structure for dbgc fragment buffer
94 * @buf_addr_lsb: LSB of the buffer's physical address
95 * @buf_addr_msb: MSB of the buffer's physical address
96 * @buf_size: Total size of the buffer
97 */
98 struct btintel_pcie_dbgc_ctxt_buf {
99 u32 buf_addr_lsb;
100 u32 buf_addr_msb;
101 u32 buf_size;
102 };
103
104 /* Structure for dbgc fragment
105 * @magic_num: 0XA5A5A5A5
106 * @ver: For Driver-FW compatibility
107 * @total_size: Total size of the payload debug info
108 * @num_buf: Num of allocated debug bufs
109 * @bufs: All buffer's addresses and sizes
110 */
111 struct btintel_pcie_dbgc_ctxt {
112 u32 magic_num;
113 u32 ver;
114 u32 total_size;
115 u32 num_buf;
116 struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
117 };
118
119 struct btintel_pcie_removal {
120 struct pci_dev *pdev;
121 struct work_struct work;
122 };
123
124 static LIST_HEAD(btintel_pcie_recovery_list);
125 static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
126
btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)127 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
128 {
129 switch (alive_intr_ctxt) {
130 case BTINTEL_PCIE_ROM:
131 return "rom";
132 case BTINTEL_PCIE_FW_DL:
133 return "fw_dl";
134 case BTINTEL_PCIE_D0:
135 return "d0";
136 case BTINTEL_PCIE_D3:
137 return "d3";
138 case BTINTEL_PCIE_HCI_RESET:
139 return "hci_reset";
140 case BTINTEL_PCIE_INTEL_HCI_RESET1:
141 return "intel_reset1";
142 case BTINTEL_PCIE_INTEL_HCI_RESET2:
143 return "intel_reset2";
144 default:
145 return "unknown";
146 }
147 }
148
149 /* This function initializes the memory for DBGC buffers and formats the
150 * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
151 * size as the payload
152 */
btintel_pcie_setup_dbgc(struct btintel_pcie_data * data)153 static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
154 {
155 struct btintel_pcie_dbgc_ctxt db_frag;
156 struct data_buf *buf;
157 int i;
158
159 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
160 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
161 sizeof(*buf), GFP_KERNEL);
162 if (!data->dbgc.bufs)
163 return -ENOMEM;
164
165 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
166 data->dbgc.count *
167 BTINTEL_PCIE_DBGC_BUFFER_SIZE,
168 &data->dbgc.buf_p_addr,
169 GFP_KERNEL | __GFP_NOWARN);
170 if (!data->dbgc.buf_v_addr)
171 return -ENOMEM;
172
173 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
174 sizeof(struct btintel_pcie_dbgc_ctxt),
175 &data->dbgc.frag_p_addr,
176 GFP_KERNEL | __GFP_NOWARN);
177 if (!data->dbgc.frag_v_addr)
178 return -ENOMEM;
179
180 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
181
182 db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
183 db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
184 db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
185 db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
186
187 for (i = 0; i < data->dbgc.count; i++) {
188 buf = &data->dbgc.bufs[i];
189 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
190 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
191 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
192 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
193 db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
194 }
195
196 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
197 return 0;
198 }
199
ipc_print_ia_ring(struct hci_dev * hdev,struct ia * ia,u16 queue_num)200 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
201 u16 queue_num)
202 {
203 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
204 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
205 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
206 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
207 }
208
ipc_print_urbd1(struct hci_dev * hdev,struct urbd1 * urbd1,u16 index)209 static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
210 u16 index)
211 {
212 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
213 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
214 }
215
btintel_pcie_get_data(struct msix_entry * entry)216 static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
217 {
218 u8 queue = entry->entry;
219 struct msix_entry *entries = entry - queue;
220
221 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
222 }
223
224 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
225 * of the TFD is updated and ready to transmit.
226 */
btintel_pcie_set_tx_db(struct btintel_pcie_data * data,u16 index)227 static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
228 {
229 u32 val;
230
231 val = index;
232 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
233
234 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
235 }
236
237 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
238 * descriptor) with the data length and the DMA address of the data buffer.
239 */
btintel_pcie_prepare_tx(struct txq * txq,u16 tfd_index,struct sk_buff * skb)240 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
241 struct sk_buff *skb)
242 {
243 struct data_buf *buf;
244 struct tfd *tfd;
245
246 tfd = &txq->tfds[tfd_index];
247 memset(tfd, 0, sizeof(*tfd));
248
249 buf = &txq->bufs[tfd_index];
250
251 tfd->size = skb->len;
252 tfd->addr = buf->data_p_addr;
253
254 /* Copy the outgoing data to DMA buffer */
255 memcpy(buf->data, skb->data, tfd->size);
256 }
257
btintel_pcie_dump_debug_registers(struct hci_dev * hdev)258 static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
259 {
260 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
261 u16 cr_hia, cr_tia;
262 u32 reg, mbox_reg;
263 struct sk_buff *skb;
264 u8 buf[80];
265
266 skb = alloc_skb(1024, GFP_ATOMIC);
267 if (!skb)
268 return;
269
270 snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
271 bt_dev_dbg(hdev, "%s", buf);
272 skb_put_data(skb, buf, strlen(buf));
273
274 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
275 snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
276 bt_dev_dbg(hdev, "%s", buf);
277 skb_put_data(skb, buf, strlen(buf));
278 data->boot_stage_cache = reg;
279
280 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
281 snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
282 skb_put_data(skb, buf, strlen(buf));
283 bt_dev_dbg(hdev, "%s", buf);
284
285 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
286 snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
287 skb_put_data(skb, buf, strlen(buf));
288 bt_dev_dbg(hdev, "%s", buf);
289
290 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
291 snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
292 skb_put_data(skb, buf, strlen(buf));
293 bt_dev_dbg(hdev, "%s", buf);
294
295 /*Read the Mail box status and registers*/
296 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
297 snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
298 skb_put_data(skb, buf, strlen(buf));
299 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
300 mbox_reg = btintel_pcie_rd_reg32(data,
301 BTINTEL_PCIE_CSR_MBOX_1_REG);
302 snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
303 skb_put_data(skb, buf, strlen(buf));
304 bt_dev_dbg(hdev, "%s", buf);
305 }
306
307 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
308 mbox_reg = btintel_pcie_rd_reg32(data,
309 BTINTEL_PCIE_CSR_MBOX_2_REG);
310 snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
311 skb_put_data(skb, buf, strlen(buf));
312 bt_dev_dbg(hdev, "%s", buf);
313 }
314
315 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
316 mbox_reg = btintel_pcie_rd_reg32(data,
317 BTINTEL_PCIE_CSR_MBOX_3_REG);
318 snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
319 skb_put_data(skb, buf, strlen(buf));
320 bt_dev_dbg(hdev, "%s", buf);
321 }
322
323 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
324 mbox_reg = btintel_pcie_rd_reg32(data,
325 BTINTEL_PCIE_CSR_MBOX_4_REG);
326 snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
327 skb_put_data(skb, buf, strlen(buf));
328 bt_dev_dbg(hdev, "%s", buf);
329 }
330
331 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
332 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
333 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
334 skb_put_data(skb, buf, strlen(buf));
335 bt_dev_dbg(hdev, "%s", buf);
336
337 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
338 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
339 snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
340 skb_put_data(skb, buf, strlen(buf));
341 bt_dev_dbg(hdev, "%s", buf);
342 snprintf(buf, sizeof(buf), "--------------------------------");
343 bt_dev_dbg(hdev, "%s", buf);
344
345 hci_recv_diag(hdev, skb);
346 }
347
btintel_pcie_send_sync(struct btintel_pcie_data * data,struct sk_buff * skb,u32 pkt_type,u16 opcode)348 static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
349 struct sk_buff *skb, u32 pkt_type, u16 opcode)
350 {
351 int ret;
352 u16 tfd_index;
353 u32 old_ctxt;
354 bool wait_on_alive = false;
355 struct hci_dev *hdev = data->hdev;
356
357 struct txq *txq = &data->txq;
358
359 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
360
361 if (tfd_index > txq->count)
362 return -ERANGE;
363
364 /* Firmware raises alive interrupt on HCI_OP_RESET or
365 * BTINTEL_HCI_OP_RESET
366 */
367 wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
368 (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
369
370 if (wait_on_alive) {
371 data->gp0_received = false;
372 old_ctxt = data->alive_intr_ctxt;
373 data->alive_intr_ctxt =
374 (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
375 BTINTEL_PCIE_HCI_RESET);
376 bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
377 opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
378 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
379 }
380
381 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
382 BTINTEL_PCIE_HCI_TYPE_LEN);
383
384 /* Prepare for TX. It updates the TFD with the length of data and
385 * address of the DMA buffer, and copy the data to the DMA buffer
386 */
387 btintel_pcie_prepare_tx(txq, tfd_index, skb);
388
389 tfd_index = (tfd_index + 1) % txq->count;
390 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
391
392 /* Arm wait event condition */
393 data->tx_wait_done = false;
394
395 /* Set the doorbell to notify the device */
396 btintel_pcie_set_tx_db(data, tfd_index);
397
398 /* Wait for the complete interrupt - URBD0 */
399 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
400 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
401 if (!ret) {
402 bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
403 BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
404 btintel_pcie_dump_debug_registers(data->hdev);
405 return -ETIME;
406 }
407
408 if (wait_on_alive) {
409 ret = wait_event_timeout(data->gp0_wait_q,
410 data->gp0_received,
411 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
412 if (!ret) {
413 hdev->stat.err_tx++;
414 bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
415 BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
416 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
417 return -ETIME;
418 }
419 }
420 return 0;
421 }
422
423 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
424 * is available to receive the data
425 */
btintel_pcie_set_rx_db(struct btintel_pcie_data * data,u16 index)426 static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
427 {
428 u32 val;
429
430 val = index;
431 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
432
433 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
434 }
435
436 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
437 * DMA address of the free buffer.
438 */
btintel_pcie_prepare_rx(struct rxq * rxq,u16 frbd_index)439 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
440 {
441 struct data_buf *buf;
442 struct frbd *frbd;
443
444 /* Get the buffer of the FRBD for DMA */
445 buf = &rxq->bufs[frbd_index];
446
447 frbd = &rxq->frbds[frbd_index];
448 memset(frbd, 0, sizeof(*frbd));
449
450 /* Update FRBD */
451 frbd->tag = frbd_index;
452 frbd->addr = buf->data_p_addr;
453 }
454
btintel_pcie_submit_rx(struct btintel_pcie_data * data)455 static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
456 {
457 u16 frbd_index;
458 struct rxq *rxq = &data->rxq;
459
460 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
461
462 if (frbd_index > rxq->count)
463 return -ERANGE;
464
465 /* Prepare for RX submit. It updates the FRBD with the address of DMA
466 * buffer
467 */
468 btintel_pcie_prepare_rx(rxq, frbd_index);
469
470 frbd_index = (frbd_index + 1) % rxq->count;
471 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
472 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
473
474 /* Set the doorbell to notify the device */
475 btintel_pcie_set_rx_db(data, frbd_index);
476
477 return 0;
478 }
479
btintel_pcie_start_rx(struct btintel_pcie_data * data)480 static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
481 {
482 int i, ret;
483 struct rxq *rxq = &data->rxq;
484
485 /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
486 * hardware issues leading to race condition at the firmware.
487 */
488
489 for (i = 0; i < rxq->count - 3; i++) {
490 ret = btintel_pcie_submit_rx(data);
491 if (ret)
492 return ret;
493 }
494
495 return 0;
496 }
497
btintel_pcie_reset_ia(struct btintel_pcie_data * data)498 static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
499 {
500 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
501 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
502 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
503 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
504 }
505
btintel_pcie_reset_bt(struct btintel_pcie_data * data)506 static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
507 {
508 u32 reg;
509 int retry = 3;
510
511 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
512
513 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
514 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
515 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
516 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
517
518 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
519
520 do {
521 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
522 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
523 break;
524 usleep_range(10000, 12000);
525
526 } while (--retry > 0);
527 usleep_range(10000, 12000);
528
529 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
530
531 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
532 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
533 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
534 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
535 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
536 usleep_range(10000, 12000);
537
538 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
539 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
540
541 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
542
543 /* If shared hardware reset is success then boot stage register shall be
544 * set to 0
545 */
546 return reg == 0 ? 0 : -ENODEV;
547 }
548
btintel_pcie_mac_init(struct btintel_pcie_data * data)549 static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
550 {
551 u32 reg;
552
553 /* Set MAC_INIT bit to start primary bootloader */
554 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
555 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
556 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
557 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
558 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
559 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
560 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
561 }
562
btintel_pcie_get_mac_access(struct btintel_pcie_data * data)563 static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
564 {
565 u32 reg;
566 int retry = 15;
567
568 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
569
570 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
571 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
572 if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
573 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
574
575 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
576
577 do {
578 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
579 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
580 return 0;
581 /* Need delay here for Target Access harwdware to settle down*/
582 usleep_range(1000, 1200);
583
584 } while (--retry > 0);
585
586 return -ETIME;
587 }
588
btintel_pcie_release_mac_access(struct btintel_pcie_data * data)589 static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
590 {
591 u32 reg;
592
593 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
594
595 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
596 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
597
598 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
599 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
600
601 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
602 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
603
604 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
605 }
606
btintel_pcie_copy_tlv(void * dest,enum btintel_pcie_tlv_type type,void * data,size_t size)607 static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
608 void *data, size_t size)
609 {
610 struct intel_tlv *tlv;
611
612 tlv = dest;
613 tlv->type = type;
614 tlv->len = size;
615 memcpy(tlv->val, data, tlv->len);
616 return dest + sizeof(*tlv) + size;
617 }
618
btintel_pcie_read_dram_buffers(struct btintel_pcie_data * data)619 static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
620 {
621 u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
622 struct btintel_pcie_dbgc *dbgc = &data->dbgc;
623 struct hci_dev *hdev = data->hdev;
624 u8 *pdata, *p, buf_idx;
625 struct intel_tlv *tlv;
626 struct timespec64 now;
627 struct tm tm_now;
628 char fw_build[128];
629 char ts[128];
630 char vendor[64];
631 char driver[64];
632
633 if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
634 return -EOPNOTSUPP;
635
636
637 wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
638 offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
639
640 buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
641 if (buf_idx > dbgc->count) {
642 bt_dev_warn(hdev, "Buffer index is invalid");
643 return -EINVAL;
644 }
645
646 prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
647 if (prev_size + offset >= prev_size)
648 data->dmp_hdr.write_ptr = prev_size + offset;
649 else
650 return -EINVAL;
651
652 snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
653 snprintf(driver, sizeof(driver), "Driver: %s\n",
654 data->dmp_hdr.driver_name);
655
656 ktime_get_real_ts64(&now);
657 time64_to_tm(now.tv_sec, 0, &tm_now);
658 snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
659 tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
660 tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
661
662 snprintf(fw_build, sizeof(fw_build),
663 "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
664 2000 + (data->dmp_hdr.fw_timestamp >> 8),
665 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
666 data->dmp_hdr.fw_build_num);
667
668 data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
669 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
670 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
671 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
672 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
673 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
674 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
675 sizeof(*tlv) + strlen(ts) +
676 sizeof(*tlv) + strlen(fw_build) +
677 sizeof(*tlv) + strlen(vendor) +
678 sizeof(*tlv) + strlen(driver);
679
680 /*
681 * sizeof(u32) - signature
682 * sizeof(data_len) - to store tlv data size
683 * data_len - TLV data
684 */
685 dump_size = sizeof(u32) + sizeof(data_len) + data_len;
686
687
688 /* Add debug buffers data length to dump size */
689 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
690
691 pdata = vmalloc(dump_size);
692 if (!pdata)
693 return -ENOMEM;
694 p = pdata;
695
696 *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
697 p += sizeof(u32);
698
699 *(u32 *)p = data_len;
700 p += sizeof(u32);
701
702
703 p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
704 p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
705 p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
706 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
707 strlen(fw_build));
708 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
709 sizeof(data->dmp_hdr.cnvi_bt));
710 p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
711 sizeof(data->dmp_hdr.write_ptr));
712 p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
713 sizeof(data->dmp_hdr.wrap_ctr));
714
715 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
716 BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
717
718 p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
719 sizeof(data->dmp_hdr.trigger_reason));
720 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
721 sizeof(data->dmp_hdr.fw_git_sha1));
722 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
723 sizeof(data->dmp_hdr.cnvr_top));
724 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
725 sizeof(data->dmp_hdr.cnvi_top));
726
727 memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
728 dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
729 return 0;
730 }
731
btintel_pcie_dump_traces(struct hci_dev * hdev)732 static void btintel_pcie_dump_traces(struct hci_dev *hdev)
733 {
734 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
735 int ret = 0;
736
737 ret = btintel_pcie_get_mac_access(data);
738 if (ret) {
739 bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
740 return;
741 }
742
743 ret = btintel_pcie_read_dram_buffers(data);
744
745 btintel_pcie_release_mac_access(data);
746
747 if (ret)
748 bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
749 }
750
751 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
752 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
753 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
754 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
755 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
756 */
btintel_pcie_enable_bt(struct btintel_pcie_data * data)757 static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
758 {
759 int err;
760 u32 reg;
761
762 data->gp0_received = false;
763
764 /* Update the DMA address of CI struct to CSR */
765 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
766 data->ci_p_addr & 0xffffffff);
767 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
768 (u64)data->ci_p_addr >> 32);
769
770 /* Reset the cached value of boot stage. it is updated by the MSI-X
771 * gp0 interrupt handler.
772 */
773 data->boot_stage_cache = 0x0;
774
775 /* Set MAC_INIT bit to start primary bootloader */
776 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
777 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
778 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
779 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
780 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
781 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
782
783 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
784
785 /* MAC is ready. Enable BT FUNC */
786 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
787 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
788
789 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
790
791 /* wait for interrupt from the device after booting up to primary
792 * bootloader.
793 */
794 data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
795 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
796 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
797 if (!err)
798 return -ETIME;
799
800 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
801 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
802 return -ENODEV;
803
804 return 0;
805 }
806
btintel_pcie_in_op(struct btintel_pcie_data * data)807 static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
808 {
809 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
810 }
811
btintel_pcie_in_iml(struct btintel_pcie_data * data)812 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
813 {
814 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
815 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
816 }
817
btintel_pcie_in_d3(struct btintel_pcie_data * data)818 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
819 {
820 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
821 }
822
btintel_pcie_in_d0(struct btintel_pcie_data * data)823 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
824 {
825 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
826 }
827
btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data * data,u32 dxstate)828 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
829 u32 dxstate)
830 {
831 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
832 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
833 }
834
btintel_pcie_read_device_mem(struct btintel_pcie_data * data,void * buf,u32 dev_addr,int len)835 static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
836 void *buf, u32 dev_addr, int len)
837 {
838 int err;
839 u32 *val = buf;
840
841 /* Get device mac access */
842 err = btintel_pcie_get_mac_access(data);
843 if (err) {
844 bt_dev_err(data->hdev, "Failed to get mac access %d", err);
845 return err;
846 }
847
848 for (; len > 0; len -= 4, dev_addr += 4, val++)
849 *val = btintel_pcie_rd_dev_mem(data, dev_addr);
850
851 btintel_pcie_release_mac_access(data);
852
853 return 0;
854 }
855
btintel_pcie_in_lockdown(struct btintel_pcie_data * data)856 static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
857 {
858 return (data->boot_stage_cache &
859 BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
860 (data->boot_stage_cache &
861 BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
862 }
863
btintel_pcie_in_error(struct btintel_pcie_data * data)864 static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
865 {
866 return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
867 (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
868 }
869
btintel_pcie_msix_gp1_handler(struct btintel_pcie_data * data)870 static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
871 {
872 bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
873 btintel_pcie_dump_debug_registers(data->hdev);
874 }
875
876 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
877 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
878 */
btintel_pcie_msix_gp0_handler(struct btintel_pcie_data * data)879 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
880 {
881 bool submit_rx, signal_waitq;
882 u32 reg, old_ctxt;
883
884 /* This interrupt is for three different causes and it is not easy to
885 * know what causes the interrupt. So, it compares each register value
886 * with cached value and update it before it wake up the queue.
887 */
888 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
889 if (reg != data->boot_stage_cache)
890 data->boot_stage_cache = reg;
891
892 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
893 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
894 data->boot_stage_cache, reg);
895 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
896 if (reg != data->img_resp_cache)
897 data->img_resp_cache = reg;
898
899 if (btintel_pcie_in_error(data)) {
900 bt_dev_err(data->hdev, "Controller in error state");
901 btintel_pcie_dump_debug_registers(data->hdev);
902 return;
903 }
904
905 if (btintel_pcie_in_lockdown(data)) {
906 bt_dev_err(data->hdev, "Controller in lockdown state");
907 btintel_pcie_dump_debug_registers(data->hdev);
908 return;
909 }
910
911 data->gp0_received = true;
912
913 old_ctxt = data->alive_intr_ctxt;
914 submit_rx = false;
915 signal_waitq = false;
916
917 switch (data->alive_intr_ctxt) {
918 case BTINTEL_PCIE_ROM:
919 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
920 signal_waitq = true;
921 break;
922 case BTINTEL_PCIE_FW_DL:
923 /* Error case is already handled. Ideally control shall not
924 * reach here
925 */
926 break;
927 case BTINTEL_PCIE_INTEL_HCI_RESET1:
928 if (btintel_pcie_in_op(data)) {
929 submit_rx = true;
930 signal_waitq = true;
931 break;
932 }
933
934 if (btintel_pcie_in_iml(data)) {
935 submit_rx = true;
936 signal_waitq = true;
937 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
938 break;
939 }
940 break;
941 case BTINTEL_PCIE_INTEL_HCI_RESET2:
942 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
943 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
944 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
945 }
946 break;
947 case BTINTEL_PCIE_D0:
948 if (btintel_pcie_in_d3(data)) {
949 data->alive_intr_ctxt = BTINTEL_PCIE_D3;
950 signal_waitq = true;
951 break;
952 }
953 break;
954 case BTINTEL_PCIE_D3:
955 if (btintel_pcie_in_d0(data)) {
956 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
957 submit_rx = true;
958 signal_waitq = true;
959 break;
960 }
961 break;
962 case BTINTEL_PCIE_HCI_RESET:
963 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
964 submit_rx = true;
965 signal_waitq = true;
966 break;
967 default:
968 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
969 data->alive_intr_ctxt);
970 break;
971 }
972
973 if (submit_rx) {
974 btintel_pcie_reset_ia(data);
975 btintel_pcie_start_rx(data);
976 }
977
978 if (signal_waitq) {
979 bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
980 wake_up(&data->gp0_wait_q);
981 }
982
983 if (old_ctxt != data->alive_intr_ctxt)
984 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
985 btintel_pcie_alivectxt_state2str(old_ctxt),
986 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
987 }
988
989 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
990 */
btintel_pcie_msix_tx_handle(struct btintel_pcie_data * data)991 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
992 {
993 u16 cr_tia, cr_hia;
994 struct txq *txq;
995 struct urbd0 *urbd0;
996
997 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
998 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
999
1000 if (cr_tia == cr_hia)
1001 return;
1002
1003 txq = &data->txq;
1004
1005 while (cr_tia != cr_hia) {
1006 data->tx_wait_done = true;
1007 wake_up(&data->tx_wait_q);
1008
1009 urbd0 = &txq->urbd0s[cr_tia];
1010
1011 if (urbd0->tfd_index > txq->count)
1012 return;
1013
1014 cr_tia = (cr_tia + 1) % txq->count;
1015 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
1016 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
1017 }
1018 }
1019
btintel_pcie_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1020 static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1021 {
1022 struct hci_event_hdr *hdr = (void *)skb->data;
1023 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1024
1025 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
1026 hdr->plen > 0) {
1027 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
1028 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
1029
1030 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1031 switch (skb->data[2]) {
1032 case 0x02:
1033 /* When switching to the operational firmware
1034 * the device sends a vendor specific event
1035 * indicating that the bootup completed.
1036 */
1037 btintel_bootup(hdev, ptr, len);
1038
1039 /* If bootup event is from operational image,
1040 * driver needs to write sleep control register to
1041 * move into D0 state
1042 */
1043 if (btintel_pcie_in_op(data)) {
1044 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
1045 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
1046 kfree_skb(skb);
1047 return 0;
1048 }
1049
1050 if (btintel_pcie_in_iml(data)) {
1051 /* In case of IML, there is no concept
1052 * of D0 transition. Just mimic as if
1053 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
1054 * bit and waking up the task waiting on
1055 * INTEL_WAIT_FOR_D0. This is required
1056 * as intel_boot() is common function for
1057 * both IML and OP image loading.
1058 */
1059 if (btintel_test_and_clear_flag(data->hdev,
1060 INTEL_WAIT_FOR_D0))
1061 btintel_wake_up_flag(data->hdev,
1062 INTEL_WAIT_FOR_D0);
1063 }
1064 kfree_skb(skb);
1065 return 0;
1066 case 0x06:
1067 /* When the firmware loading completes the
1068 * device sends out a vendor specific event
1069 * indicating the result of the firmware
1070 * loading.
1071 */
1072 btintel_secure_send_result(hdev, ptr, len);
1073 kfree_skb(skb);
1074 return 0;
1075 }
1076 }
1077
1078 /* This is a debug event that comes from IML and OP image when it
1079 * starts execution. There is no need pass this event to stack.
1080 */
1081 if (skb->data[2] == 0x97) {
1082 hci_recv_diag(hdev, skb);
1083 return 0;
1084 }
1085 }
1086
1087 return hci_recv_frame(hdev, skb);
1088 }
1089 /* Process the received rx data
1090 * It check the frame header to identify the data type and create skb
1091 * and calling HCI API
1092 */
btintel_pcie_recv_frame(struct btintel_pcie_data * data,struct sk_buff * skb)1093 static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
1094 struct sk_buff *skb)
1095 {
1096 int ret;
1097 u8 pkt_type;
1098 u16 plen;
1099 u32 pcie_pkt_type;
1100 void *pdata;
1101 struct hci_dev *hdev = data->hdev;
1102
1103 spin_lock(&data->hci_rx_lock);
1104
1105 /* The first 4 bytes indicates the Intel PCIe specific packet type */
1106 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
1107 if (!pdata) {
1108 bt_dev_err(hdev, "Corrupted packet received");
1109 ret = -EILSEQ;
1110 goto exit_error;
1111 }
1112
1113 pcie_pkt_type = get_unaligned_le32(pdata);
1114
1115 switch (pcie_pkt_type) {
1116 case BTINTEL_PCIE_HCI_ACL_PKT:
1117 if (skb->len >= HCI_ACL_HDR_SIZE) {
1118 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
1119 pkt_type = HCI_ACLDATA_PKT;
1120 } else {
1121 bt_dev_err(hdev, "ACL packet is too short");
1122 ret = -EILSEQ;
1123 goto exit_error;
1124 }
1125 break;
1126
1127 case BTINTEL_PCIE_HCI_SCO_PKT:
1128 if (skb->len >= HCI_SCO_HDR_SIZE) {
1129 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1130 pkt_type = HCI_SCODATA_PKT;
1131 } else {
1132 bt_dev_err(hdev, "SCO packet is too short");
1133 ret = -EILSEQ;
1134 goto exit_error;
1135 }
1136 break;
1137
1138 case BTINTEL_PCIE_HCI_EVT_PKT:
1139 if (skb->len >= HCI_EVENT_HDR_SIZE) {
1140 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1141 pkt_type = HCI_EVENT_PKT;
1142 } else {
1143 bt_dev_err(hdev, "Event packet is too short");
1144 ret = -EILSEQ;
1145 goto exit_error;
1146 }
1147 break;
1148
1149 case BTINTEL_PCIE_HCI_ISO_PKT:
1150 if (skb->len >= HCI_ISO_HDR_SIZE) {
1151 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1152 pkt_type = HCI_ISODATA_PKT;
1153 } else {
1154 bt_dev_err(hdev, "ISO packet is too short");
1155 ret = -EILSEQ;
1156 goto exit_error;
1157 }
1158 break;
1159
1160 default:
1161 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1162 pcie_pkt_type);
1163 ret = -EINVAL;
1164 goto exit_error;
1165 }
1166
1167 if (skb->len < plen) {
1168 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1169 pkt_type);
1170 ret = -EILSEQ;
1171 goto exit_error;
1172 }
1173
1174 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1175
1176 hci_skb_pkt_type(skb) = pkt_type;
1177 hdev->stat.byte_rx += plen;
1178 skb_trim(skb, plen);
1179
1180 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1181 ret = btintel_pcie_recv_event(hdev, skb);
1182 else
1183 ret = hci_recv_frame(hdev, skb);
1184 skb = NULL; /* skb is freed in the callee */
1185
1186 exit_error:
1187 if (skb)
1188 kfree_skb(skb);
1189
1190 if (ret)
1191 hdev->stat.err_rx++;
1192
1193 spin_unlock(&data->hci_rx_lock);
1194
1195 return ret;
1196 }
1197
btintel_pcie_read_hwexp(struct btintel_pcie_data * data)1198 static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1199 {
1200 int len, err, offset, pending;
1201 struct sk_buff *skb;
1202 u8 *buf, prefix[64];
1203 u32 addr, val;
1204 u16 pkt_len;
1205
1206 struct tlv {
1207 u8 type;
1208 __le16 len;
1209 u8 val[];
1210 } __packed;
1211
1212 struct tlv *tlv;
1213
1214 switch (data->dmp_hdr.cnvi_top & 0xfff) {
1215 case BTINTEL_CNVI_BLAZARI:
1216 case BTINTEL_CNVI_BLAZARIW:
1217 /* only from step B0 onwards */
1218 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1219 return;
1220 len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1221 addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1222 break;
1223 case BTINTEL_CNVI_SCP:
1224 len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1225 addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1226 break;
1227 default:
1228 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1229 return;
1230 }
1231
1232 buf = kzalloc(len, GFP_KERNEL);
1233 if (!buf)
1234 goto exit_on_error;
1235
1236 btintel_pcie_mac_init(data);
1237
1238 err = btintel_pcie_read_device_mem(data, buf, addr, len);
1239 if (err)
1240 goto exit_on_error;
1241
1242 val = get_unaligned_le32(buf);
1243 if (val != BTINTEL_PCIE_MAGIC_NUM) {
1244 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1245 val);
1246 goto exit_on_error;
1247 }
1248
1249 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1250
1251 offset = 4;
1252 do {
1253 pending = len - offset;
1254 if (pending < sizeof(*tlv))
1255 break;
1256 tlv = (struct tlv *)(buf + offset);
1257
1258 /* If type == 0, then there are no more TLVs to be parsed */
1259 if (!tlv->type) {
1260 bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1261 break;
1262 }
1263 pkt_len = le16_to_cpu(tlv->len);
1264 offset += sizeof(*tlv);
1265 pending = len - offset;
1266 if (pkt_len > pending)
1267 break;
1268
1269 offset += pkt_len;
1270
1271 /* Only TLVs of type == 1 are HCI events, no need to process other
1272 * TLVs
1273 */
1274 if (tlv->type != 1)
1275 continue;
1276
1277 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1278 if (pkt_len > HCI_MAX_EVENT_SIZE)
1279 break;
1280 skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1281 if (!skb)
1282 goto exit_on_error;
1283 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1284 skb_put_data(skb, tlv->val, pkt_len);
1285
1286 /* copy Intel specific pcie packet type */
1287 val = BTINTEL_PCIE_HCI_EVT_PKT;
1288 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1289 BTINTEL_PCIE_HCI_TYPE_LEN);
1290
1291 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1292 tlv->val, pkt_len, false);
1293
1294 btintel_pcie_recv_frame(data, skb);
1295 } while (offset < len);
1296
1297 exit_on_error:
1298 kfree(buf);
1299 }
1300
btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data * data)1301 static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1302 {
1303 bt_dev_err(data->hdev, "Received hw exception interrupt");
1304
1305 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1306 return;
1307
1308 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1309 return;
1310
1311 /* Trigger device core dump when there is HW exception */
1312 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1313 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1314
1315 queue_work(data->workqueue, &data->rx_work);
1316 }
1317
btintel_pcie_rx_work(struct work_struct * work)1318 static void btintel_pcie_rx_work(struct work_struct *work)
1319 {
1320 struct btintel_pcie_data *data = container_of(work,
1321 struct btintel_pcie_data, rx_work);
1322 struct sk_buff *skb;
1323
1324 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1325 btintel_pcie_dump_traces(data->hdev);
1326 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1327 }
1328
1329 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1330 /* Unlike usb products, controller will not send hardware
1331 * exception event on exception. Instead controller writes the
1332 * hardware event to device memory along with optional debug
1333 * events, raises MSIX and halts. Driver shall read the
1334 * exception event from device memory and passes it stack for
1335 * further processing.
1336 */
1337 btintel_pcie_read_hwexp(data);
1338 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1339 }
1340
1341 /* Process the sk_buf in queue and send to the HCI layer */
1342 while ((skb = skb_dequeue(&data->rx_skb_q))) {
1343 btintel_pcie_recv_frame(data, skb);
1344 }
1345 }
1346
1347 /* create sk_buff with data and save it to queue and start RX work */
btintel_pcie_submit_rx_work(struct btintel_pcie_data * data,u8 status,void * buf)1348 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1349 void *buf)
1350 {
1351 int ret, len;
1352 struct rfh_hdr *rfh_hdr;
1353 struct sk_buff *skb;
1354
1355 rfh_hdr = buf;
1356
1357 len = rfh_hdr->packet_len;
1358 if (len <= 0) {
1359 ret = -EINVAL;
1360 goto resubmit;
1361 }
1362
1363 /* Remove RFH header */
1364 buf += sizeof(*rfh_hdr);
1365
1366 skb = alloc_skb(len, GFP_ATOMIC);
1367 if (!skb)
1368 goto resubmit;
1369
1370 skb_put_data(skb, buf, len);
1371 skb_queue_tail(&data->rx_skb_q, skb);
1372 queue_work(data->workqueue, &data->rx_work);
1373
1374 resubmit:
1375 ret = btintel_pcie_submit_rx(data);
1376
1377 return ret;
1378 }
1379
1380 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
btintel_pcie_msix_rx_handle(struct btintel_pcie_data * data)1381 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1382 {
1383 u16 cr_hia, cr_tia;
1384 struct rxq *rxq;
1385 struct urbd1 *urbd1;
1386 struct data_buf *buf;
1387 int ret;
1388 struct hci_dev *hdev = data->hdev;
1389
1390 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1391 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1392
1393 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1394
1395 /* Check CR_TIA and CR_HIA for change */
1396 if (cr_tia == cr_hia)
1397 return;
1398
1399 rxq = &data->rxq;
1400
1401 /* The firmware sends multiple CD in a single MSI-X and it needs to
1402 * process all received CDs in this interrupt.
1403 */
1404 while (cr_tia != cr_hia) {
1405 urbd1 = &rxq->urbd1s[cr_tia];
1406 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1407
1408 buf = &rxq->bufs[urbd1->frbd_tag];
1409 if (!buf) {
1410 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1411 urbd1->frbd_tag);
1412 return;
1413 }
1414
1415 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1416 buf->data);
1417 if (ret) {
1418 bt_dev_err(hdev, "RXQ: failed to submit rx request");
1419 return;
1420 }
1421
1422 cr_tia = (cr_tia + 1) % rxq->count;
1423 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1424 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1425 }
1426 }
1427
btintel_pcie_msix_isr(int irq,void * data)1428 static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
1429 {
1430 return IRQ_WAKE_THREAD;
1431 }
1432
btintel_pcie_is_rxq_empty(struct btintel_pcie_data * data)1433 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1434 {
1435 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1436 }
1437
btintel_pcie_is_txackq_empty(struct btintel_pcie_data * data)1438 static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1439 {
1440 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1441 }
1442
btintel_pcie_irq_msix_handler(int irq,void * dev_id)1443 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1444 {
1445 struct msix_entry *entry = dev_id;
1446 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1447 u32 intr_fh, intr_hw;
1448
1449 spin_lock(&data->irq_lock);
1450 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1451 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1452
1453 /* Clear causes registers to avoid being handling the same cause */
1454 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1455 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1456 spin_unlock(&data->irq_lock);
1457
1458 if (unlikely(!(intr_fh | intr_hw))) {
1459 /* Ignore interrupt, inta == 0 */
1460 return IRQ_NONE;
1461 }
1462
1463 /* This interrupt is raised when there is an hardware exception */
1464 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1465 btintel_pcie_msix_hw_exp_handler(data);
1466
1467 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
1468 btintel_pcie_msix_gp1_handler(data);
1469
1470
1471 /* For TX */
1472 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1473 btintel_pcie_msix_tx_handle(data);
1474 if (!btintel_pcie_is_rxq_empty(data))
1475 btintel_pcie_msix_rx_handle(data);
1476 }
1477
1478 /* For RX */
1479 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1480 btintel_pcie_msix_rx_handle(data);
1481 if (!btintel_pcie_is_txackq_empty(data))
1482 btintel_pcie_msix_tx_handle(data);
1483 }
1484
1485 /* This interrupt is triggered by the firmware after updating
1486 * boot_stage register and image_response register
1487 */
1488 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1489 btintel_pcie_msix_gp0_handler(data);
1490
1491 /*
1492 * Before sending the interrupt the HW disables it to prevent a nested
1493 * interrupt. This is done by writing 1 to the corresponding bit in
1494 * the mask register. After handling the interrupt, it should be
1495 * re-enabled by clearing this bit. This register is defined as write 1
1496 * clear (W1C) register, meaning that it's cleared by writing 1
1497 * to the bit.
1498 */
1499 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1500 BIT(entry->entry));
1501
1502 return IRQ_HANDLED;
1503 }
1504
1505 /* This function requests the irq for MSI-X and registers the handlers per irq.
1506 * Currently, it requests only 1 irq for all interrupt causes.
1507 */
btintel_pcie_setup_irq(struct btintel_pcie_data * data)1508 static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1509 {
1510 int err;
1511 int num_irqs, i;
1512
1513 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1514 data->msix_entries[i].entry = i;
1515
1516 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1517 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1518 if (num_irqs < 0)
1519 return num_irqs;
1520
1521 data->alloc_vecs = num_irqs;
1522 data->msix_enabled = 1;
1523 data->def_irq = 0;
1524
1525 /* setup irq handler */
1526 for (i = 0; i < data->alloc_vecs; i++) {
1527 struct msix_entry *msix_entry;
1528
1529 msix_entry = &data->msix_entries[i];
1530 msix_entry->vector = pci_irq_vector(data->pdev, i);
1531
1532 err = devm_request_threaded_irq(&data->pdev->dev,
1533 msix_entry->vector,
1534 btintel_pcie_msix_isr,
1535 btintel_pcie_irq_msix_handler,
1536 IRQF_SHARED,
1537 KBUILD_MODNAME,
1538 msix_entry);
1539 if (err) {
1540 pci_free_irq_vectors(data->pdev);
1541 data->alloc_vecs = 0;
1542 return err;
1543 }
1544 }
1545 return 0;
1546 }
1547
1548 struct btintel_pcie_causes_list {
1549 u32 cause;
1550 u32 mask_reg;
1551 u8 cause_num;
1552 };
1553
1554 static struct btintel_pcie_causes_list causes_list[] = {
1555 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1556 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1557 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1558 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1559 };
1560
1561 /* This function configures the interrupt masks for both HW_INT_CAUSES and
1562 * FH_INT_CAUSES which are meaningful to us.
1563 *
1564 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1565 * need to call this function again to configure since the masks
1566 * are reset to 0xFFFFFFFF after reset.
1567 */
btintel_pcie_config_msix(struct btintel_pcie_data * data)1568 static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1569 {
1570 int i;
1571 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1572
1573 /* Set Non Auto Clear Cause */
1574 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1575 btintel_pcie_wr_reg8(data,
1576 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1577 val);
1578 btintel_pcie_clr_reg_bits(data,
1579 causes_list[i].mask_reg,
1580 causes_list[i].cause);
1581 }
1582
1583 /* Save the initial interrupt mask */
1584 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1585 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1586 }
1587
btintel_pcie_config_pcie(struct pci_dev * pdev,struct btintel_pcie_data * data)1588 static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1589 struct btintel_pcie_data *data)
1590 {
1591 int err;
1592
1593 err = pcim_enable_device(pdev);
1594 if (err)
1595 return err;
1596
1597 pci_set_master(pdev);
1598
1599 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1600 if (err) {
1601 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1602 if (err)
1603 return err;
1604 }
1605
1606 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1607 if (IS_ERR(data->base_addr))
1608 return PTR_ERR(data->base_addr);
1609
1610 err = btintel_pcie_setup_irq(data);
1611 if (err)
1612 return err;
1613
1614 /* Configure MSI-X with causes list */
1615 btintel_pcie_config_msix(data);
1616
1617 return 0;
1618 }
1619
btintel_pcie_init_ci(struct btintel_pcie_data * data,struct ctx_info * ci)1620 static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1621 struct ctx_info *ci)
1622 {
1623 ci->version = 0x1;
1624 ci->size = sizeof(*ci);
1625 ci->config = 0x0000;
1626 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1627 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1628 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1629 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1630 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1631 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1632 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1633 ci->addr_tfdq = data->txq.tfds_p_addr;
1634 ci->num_tfdq = data->txq.count;
1635 ci->num_urbdq0 = data->txq.count;
1636 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1637 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1638 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1639 ci->addr_frbdq = data->rxq.frbds_p_addr;
1640 ci->num_frbdq = data->rxq.count;
1641 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1642 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1643 ci->num_urbdq1 = data->rxq.count;
1644 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1645
1646 ci->dbg_output_mode = 0x01;
1647 ci->dbgc_addr = data->dbgc.frag_p_addr;
1648 ci->dbgc_size = data->dbgc.frag_size;
1649 ci->dbg_preset = 0x00;
1650 }
1651
btintel_pcie_free_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1652 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1653 struct txq *txq)
1654 {
1655 /* Free data buffers first */
1656 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1657 txq->buf_v_addr, txq->buf_p_addr);
1658 kfree(txq->bufs);
1659 }
1660
btintel_pcie_setup_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1661 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1662 struct txq *txq)
1663 {
1664 int i;
1665 struct data_buf *buf;
1666
1667 /* Allocate the same number of buffers as the descriptor */
1668 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
1669 if (!txq->bufs)
1670 return -ENOMEM;
1671
1672 /* Allocate full chunk of data buffer for DMA first and do indexing and
1673 * initialization next, so it can be freed easily
1674 */
1675 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1676 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1677 &txq->buf_p_addr,
1678 GFP_KERNEL | __GFP_NOWARN);
1679 if (!txq->buf_v_addr) {
1680 kfree(txq->bufs);
1681 return -ENOMEM;
1682 }
1683
1684 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1685 * have virtual address and physical address
1686 */
1687 for (i = 0; i < txq->count; i++) {
1688 buf = &txq->bufs[i];
1689 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1690 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1691 }
1692
1693 return 0;
1694 }
1695
btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1696 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1697 struct rxq *rxq)
1698 {
1699 /* Free data buffers first */
1700 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1701 rxq->buf_v_addr, rxq->buf_p_addr);
1702 kfree(rxq->bufs);
1703 }
1704
btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1705 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1706 struct rxq *rxq)
1707 {
1708 int i;
1709 struct data_buf *buf;
1710
1711 /* Allocate the same number of buffers as the descriptor */
1712 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
1713 if (!rxq->bufs)
1714 return -ENOMEM;
1715
1716 /* Allocate full chunk of data buffer for DMA first and do indexing and
1717 * initialization next, so it can be freed easily
1718 */
1719 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1720 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1721 &rxq->buf_p_addr,
1722 GFP_KERNEL | __GFP_NOWARN);
1723 if (!rxq->buf_v_addr) {
1724 kfree(rxq->bufs);
1725 return -ENOMEM;
1726 }
1727
1728 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1729 * have virtual address and physical address
1730 */
1731 for (i = 0; i < rxq->count; i++) {
1732 buf = &rxq->bufs[i];
1733 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1734 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1735 }
1736
1737 return 0;
1738 }
1739
btintel_pcie_setup_ia(struct btintel_pcie_data * data,dma_addr_t p_addr,void * v_addr,struct ia * ia)1740 static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1741 dma_addr_t p_addr, void *v_addr,
1742 struct ia *ia)
1743 {
1744 /* TR Head Index Array */
1745 ia->tr_hia_p_addr = p_addr;
1746 ia->tr_hia = v_addr;
1747
1748 /* TR Tail Index Array */
1749 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1750 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1751
1752 /* CR Head index Array */
1753 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1754 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1755
1756 /* CR Tail Index Array */
1757 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1758 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1759 }
1760
btintel_pcie_free(struct btintel_pcie_data * data)1761 static void btintel_pcie_free(struct btintel_pcie_data *data)
1762 {
1763 btintel_pcie_free_rxq_bufs(data, &data->rxq);
1764 btintel_pcie_free_txq_bufs(data, &data->txq);
1765
1766 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1767 dma_pool_destroy(data->dma_pool);
1768 }
1769
1770 /* Allocate tx and rx queues, any related data structures and buffers.
1771 */
btintel_pcie_alloc(struct btintel_pcie_data * data)1772 static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1773 {
1774 int err = 0;
1775 size_t total;
1776 dma_addr_t p_addr;
1777 void *v_addr;
1778
1779 /* Allocate the chunk of DMA memory for descriptors, index array, and
1780 * context information, instead of allocating individually.
1781 * The DMA memory for data buffer is allocated while setting up the
1782 * each queue.
1783 *
1784 * Total size is sum of the following
1785 * + size of TFD * Number of descriptors in queue
1786 * + size of URBD0 * Number of descriptors in queue
1787 * + size of FRBD * Number of descriptors in queue
1788 * + size of URBD1 * Number of descriptors in queue
1789 * + size of index * Number of queues(2) * type of index array(4)
1790 * + size of context information
1791 */
1792 total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
1793 total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
1794
1795 /* Add the sum of size of index array and size of ci struct */
1796 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1797
1798 /* Allocate DMA Pool */
1799 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1800 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1801 if (!data->dma_pool) {
1802 err = -ENOMEM;
1803 goto exit_error;
1804 }
1805
1806 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1807 &p_addr);
1808 if (!v_addr) {
1809 dma_pool_destroy(data->dma_pool);
1810 err = -ENOMEM;
1811 goto exit_error;
1812 }
1813
1814 data->dma_p_addr = p_addr;
1815 data->dma_v_addr = v_addr;
1816
1817 /* Setup descriptor count */
1818 data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
1819 data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
1820
1821 /* Setup tfds */
1822 data->txq.tfds_p_addr = p_addr;
1823 data->txq.tfds = v_addr;
1824
1825 p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1826 v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1827
1828 /* Setup urbd0 */
1829 data->txq.urbd0s_p_addr = p_addr;
1830 data->txq.urbd0s = v_addr;
1831
1832 p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1833 v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1834
1835 /* Setup FRBD*/
1836 data->rxq.frbds_p_addr = p_addr;
1837 data->rxq.frbds = v_addr;
1838
1839 p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1840 v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1841
1842 /* Setup urbd1 */
1843 data->rxq.urbd1s_p_addr = p_addr;
1844 data->rxq.urbd1s = v_addr;
1845
1846 p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1847 v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1848
1849 /* Setup data buffers for txq */
1850 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1851 if (err)
1852 goto exit_error_pool;
1853
1854 /* Setup data buffers for rxq */
1855 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1856 if (err)
1857 goto exit_error_txq;
1858
1859 /* Setup Index Array */
1860 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1861
1862 /* Setup data buffers for dbgc */
1863 err = btintel_pcie_setup_dbgc(data);
1864 if (err)
1865 goto exit_error_txq;
1866
1867 /* Setup Context Information */
1868 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1869 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1870
1871 data->ci = v_addr;
1872 data->ci_p_addr = p_addr;
1873
1874 /* Initialize the CI */
1875 btintel_pcie_init_ci(data, data->ci);
1876
1877 return 0;
1878
1879 exit_error_txq:
1880 btintel_pcie_free_txq_bufs(data, &data->txq);
1881 exit_error_pool:
1882 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1883 dma_pool_destroy(data->dma_pool);
1884 exit_error:
1885 return err;
1886 }
1887
btintel_pcie_open(struct hci_dev * hdev)1888 static int btintel_pcie_open(struct hci_dev *hdev)
1889 {
1890 bt_dev_dbg(hdev, "");
1891
1892 return 0;
1893 }
1894
btintel_pcie_close(struct hci_dev * hdev)1895 static int btintel_pcie_close(struct hci_dev *hdev)
1896 {
1897 bt_dev_dbg(hdev, "");
1898
1899 return 0;
1900 }
1901
btintel_pcie_inject_cmd_complete(struct hci_dev * hdev,__u16 opcode)1902 static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1903 {
1904 struct sk_buff *skb;
1905 struct hci_event_hdr *hdr;
1906 struct hci_ev_cmd_complete *evt;
1907
1908 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1909 if (!skb)
1910 return -ENOMEM;
1911
1912 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1913 hdr->evt = HCI_EV_CMD_COMPLETE;
1914 hdr->plen = sizeof(*evt) + 1;
1915
1916 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1917 evt->ncmd = 0x01;
1918 evt->opcode = cpu_to_le16(opcode);
1919
1920 *(u8 *)skb_put(skb, 1) = 0x00;
1921
1922 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1923
1924 return hci_recv_frame(hdev, skb);
1925 }
1926
btintel_pcie_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1927 static int btintel_pcie_send_frame(struct hci_dev *hdev,
1928 struct sk_buff *skb)
1929 {
1930 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1931 struct hci_command_hdr *cmd;
1932 __u16 opcode = ~0;
1933 int ret;
1934 u32 type;
1935
1936 if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1937 return -ENODEV;
1938
1939 /* Due to the fw limitation, the type header of the packet should be
1940 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1941 * the first byte to get the packet type and redirect the rest of data
1942 * packet to the right handler.
1943 *
1944 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1945 * from DMA memory and by the time it reads the first 4 bytes, it has
1946 * already consumed some part of packet. Thus the packet type indicator
1947 * for iBT PCIe is 4 bytes.
1948 *
1949 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1950 * head room for profile and driver use, and before sending the data
1951 * to the device, append the iBT PCIe packet type in the front.
1952 */
1953 switch (hci_skb_pkt_type(skb)) {
1954 case HCI_COMMAND_PKT:
1955 type = BTINTEL_PCIE_HCI_CMD_PKT;
1956 cmd = (void *)skb->data;
1957 opcode = le16_to_cpu(cmd->opcode);
1958 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1959 struct hci_command_hdr *cmd = (void *)skb->data;
1960 __u16 opcode = le16_to_cpu(cmd->opcode);
1961
1962 /* When the BTINTEL_HCI_OP_RESET command is issued to
1963 * boot into the operational firmware, it will actually
1964 * not send a command complete event. To keep the flow
1965 * control working inject that event here.
1966 */
1967 if (opcode == BTINTEL_HCI_OP_RESET)
1968 btintel_pcie_inject_cmd_complete(hdev, opcode);
1969 }
1970
1971 hdev->stat.cmd_tx++;
1972 break;
1973 case HCI_ACLDATA_PKT:
1974 type = BTINTEL_PCIE_HCI_ACL_PKT;
1975 hdev->stat.acl_tx++;
1976 break;
1977 case HCI_SCODATA_PKT:
1978 type = BTINTEL_PCIE_HCI_SCO_PKT;
1979 hdev->stat.sco_tx++;
1980 break;
1981 case HCI_ISODATA_PKT:
1982 type = BTINTEL_PCIE_HCI_ISO_PKT;
1983 break;
1984 default:
1985 bt_dev_err(hdev, "Unknown HCI packet type");
1986 return -EILSEQ;
1987 }
1988
1989 ret = btintel_pcie_send_sync(data, skb, type, opcode);
1990 if (ret) {
1991 hdev->stat.err_tx++;
1992 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1993 goto exit_error;
1994 }
1995
1996 hdev->stat.byte_tx += skb->len;
1997 kfree_skb(skb);
1998
1999 exit_error:
2000 return ret;
2001 }
2002
btintel_pcie_release_hdev(struct btintel_pcie_data * data)2003 static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
2004 {
2005 struct hci_dev *hdev;
2006
2007 hdev = data->hdev;
2008 hci_unregister_dev(hdev);
2009 hci_free_dev(hdev);
2010 data->hdev = NULL;
2011 }
2012
btintel_pcie_disable_interrupts(struct btintel_pcie_data * data)2013 static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
2014 {
2015 spin_lock(&data->irq_lock);
2016 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
2017 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
2018 spin_unlock(&data->irq_lock);
2019 }
2020
btintel_pcie_enable_interrupts(struct btintel_pcie_data * data)2021 static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
2022 {
2023 spin_lock(&data->irq_lock);
2024 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
2025 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
2026 spin_unlock(&data->irq_lock);
2027 }
2028
btintel_pcie_synchronize_irqs(struct btintel_pcie_data * data)2029 static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
2030 {
2031 for (int i = 0; i < data->alloc_vecs; i++)
2032 synchronize_irq(data->msix_entries[i].vector);
2033 }
2034
btintel_pcie_setup_internal(struct hci_dev * hdev)2035 static int btintel_pcie_setup_internal(struct hci_dev *hdev)
2036 {
2037 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2038 const u8 param[1] = { 0xFF };
2039 struct intel_version_tlv ver_tlv;
2040 struct sk_buff *skb;
2041 int err;
2042
2043 BT_DBG("%s", hdev->name);
2044
2045 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
2046 if (IS_ERR(skb)) {
2047 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
2048 PTR_ERR(skb));
2049 return PTR_ERR(skb);
2050 }
2051
2052 /* Check the status */
2053 if (skb->data[0]) {
2054 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
2055 skb->data[0]);
2056 err = -EIO;
2057 goto exit_error;
2058 }
2059
2060 /* Apply the common HCI quirks for Intel device */
2061 hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
2062 hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
2063 hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
2064
2065 /* Set up the quality report callback for Intel devices */
2066 hdev->set_quality_report = btintel_set_quality_report;
2067
2068 memset(&ver_tlv, 0, sizeof(ver_tlv));
2069 /* For TLV type device, parse the tlv data */
2070 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
2071 if (err) {
2072 bt_dev_err(hdev, "Failed to parse TLV version information");
2073 goto exit_error;
2074 }
2075
2076 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
2077 case 0x37:
2078 break;
2079 default:
2080 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
2081 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
2082 err = -EINVAL;
2083 goto exit_error;
2084 }
2085
2086 /* Check for supported iBT hardware variants of this firmware
2087 * loading method.
2088 *
2089 * This check has been put in place to ensure correct forward
2090 * compatibility options when newer hardware variants come
2091 * along.
2092 */
2093 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
2094 case 0x1e: /* BzrI */
2095 case 0x1f: /* ScP */
2096 case 0x22: /* BzrIW */
2097 /* Display version information of TLV type */
2098 btintel_version_info_tlv(hdev, &ver_tlv);
2099
2100 /* Apply the device specific HCI quirks for TLV based devices
2101 *
2102 * All TLV based devices support WBS
2103 */
2104 hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2105
2106 /* Setup MSFT Extension support */
2107 btintel_set_msft_opcode(hdev,
2108 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2109
2110 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
2111 if (err)
2112 goto exit_error;
2113 break;
2114 default:
2115 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
2116 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2117 err = -EINVAL;
2118 goto exit_error;
2119 break;
2120 }
2121
2122 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2123 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2124 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2125 data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2126 data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2127 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2128
2129 if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2130 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2131
2132 btintel_print_fseq_info(hdev);
2133 exit_error:
2134 kfree_skb(skb);
2135
2136 return err;
2137 }
2138
btintel_pcie_setup(struct hci_dev * hdev)2139 static int btintel_pcie_setup(struct hci_dev *hdev)
2140 {
2141 int err, fw_dl_retry = 0;
2142 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2143
2144 while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2145 bt_dev_err(hdev, "Firmware download retry count: %d",
2146 fw_dl_retry);
2147 btintel_pcie_dump_debug_registers(hdev);
2148 btintel_pcie_disable_interrupts(data);
2149 btintel_pcie_synchronize_irqs(data);
2150 err = btintel_pcie_reset_bt(data);
2151 if (err) {
2152 bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2153 break;
2154 }
2155 usleep_range(10000, 12000);
2156 btintel_pcie_reset_ia(data);
2157 btintel_pcie_enable_interrupts(data);
2158 btintel_pcie_config_msix(data);
2159 err = btintel_pcie_enable_bt(data);
2160 if (err) {
2161 bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2162 break;
2163 }
2164 btintel_pcie_start_rx(data);
2165 }
2166
2167 if (!err)
2168 set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
2169 return err;
2170 }
2171
2172 static struct btintel_pcie_dev_recovery *
btintel_pcie_get_recovery(struct pci_dev * pdev,struct device * dev)2173 btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
2174 {
2175 struct btintel_pcie_dev_recovery *tmp, *data = NULL;
2176 const char *name = pci_name(pdev);
2177 const size_t name_len = strlen(name) + 1;
2178 struct hci_dev *hdev = to_hci_dev(dev);
2179
2180 spin_lock(&btintel_pcie_recovery_lock);
2181 list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
2182 if (strcmp(tmp->name, name))
2183 continue;
2184 data = tmp;
2185 break;
2186 }
2187 spin_unlock(&btintel_pcie_recovery_lock);
2188
2189 if (data) {
2190 bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
2191 return data;
2192 }
2193
2194 data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
2195 if (!data)
2196 return NULL;
2197
2198 strscpy(data->name, name, name_len);
2199 spin_lock(&btintel_pcie_recovery_lock);
2200 list_add_tail(&data->list, &btintel_pcie_recovery_list);
2201 spin_unlock(&btintel_pcie_recovery_lock);
2202
2203 return data;
2204 }
2205
btintel_pcie_free_restart_list(void)2206 static void btintel_pcie_free_restart_list(void)
2207 {
2208 struct btintel_pcie_dev_recovery *tmp;
2209
2210 while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
2211 typeof(*tmp), list))) {
2212 list_del(&tmp->list);
2213 kfree(tmp);
2214 }
2215 }
2216
btintel_pcie_inc_recovery_count(struct pci_dev * pdev,struct device * dev)2217 static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
2218 struct device *dev)
2219 {
2220 struct btintel_pcie_dev_recovery *data;
2221 time64_t retry_window;
2222
2223 data = btintel_pcie_get_recovery(pdev, dev);
2224 if (!data)
2225 return;
2226
2227 retry_window = ktime_get_boottime_seconds() - data->last_error;
2228 if (data->count == 0) {
2229 data->last_error = ktime_get_boottime_seconds();
2230 data->count++;
2231 } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2232 data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
2233 data->count++;
2234 } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
2235 data->last_error = 0;
2236 data->count = 0;
2237 }
2238 }
2239
2240 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
2241
btintel_pcie_removal_work(struct work_struct * wk)2242 static void btintel_pcie_removal_work(struct work_struct *wk)
2243 {
2244 struct btintel_pcie_removal *removal =
2245 container_of(wk, struct btintel_pcie_removal, work);
2246 struct pci_dev *pdev = removal->pdev;
2247 struct btintel_pcie_data *data;
2248 int err;
2249
2250 pci_lock_rescan_remove();
2251
2252 if (!pdev->bus)
2253 goto error;
2254
2255 data = pci_get_drvdata(pdev);
2256
2257 btintel_pcie_disable_interrupts(data);
2258 btintel_pcie_synchronize_irqs(data);
2259
2260 flush_work(&data->rx_work);
2261
2262 bt_dev_dbg(data->hdev, "Release bluetooth interface");
2263 btintel_pcie_release_hdev(data);
2264
2265 err = pci_reset_function(pdev);
2266 if (err) {
2267 BT_ERR("Failed resetting the pcie device (%d)", err);
2268 goto error;
2269 }
2270
2271 btintel_pcie_enable_interrupts(data);
2272 btintel_pcie_config_msix(data);
2273
2274 err = btintel_pcie_enable_bt(data);
2275 if (err) {
2276 BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
2277 err);
2278 goto error;
2279 }
2280
2281 btintel_pcie_reset_ia(data);
2282 btintel_pcie_start_rx(data);
2283 data->flags = 0;
2284
2285 err = btintel_pcie_setup_hdev(data);
2286 if (err) {
2287 BT_ERR("Failed registering hdev (%d)", err);
2288 goto error;
2289 }
2290 error:
2291 pci_dev_put(pdev);
2292 pci_unlock_rescan_remove();
2293 kfree(removal);
2294 }
2295
btintel_pcie_reset(struct hci_dev * hdev)2296 static void btintel_pcie_reset(struct hci_dev *hdev)
2297 {
2298 struct btintel_pcie_removal *removal;
2299 struct btintel_pcie_data *data;
2300
2301 data = hci_get_drvdata(hdev);
2302
2303 if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
2304 return;
2305
2306 if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
2307 return;
2308
2309 removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2310 if (!removal)
2311 return;
2312
2313 removal->pdev = data->pdev;
2314 INIT_WORK(&removal->work, btintel_pcie_removal_work);
2315 pci_dev_get(removal->pdev);
2316 schedule_work(&removal->work);
2317 }
2318
btintel_pcie_hw_error(struct hci_dev * hdev,u8 code)2319 static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
2320 {
2321 struct btintel_pcie_dev_recovery *data;
2322 struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
2323 struct pci_dev *pdev = dev_data->pdev;
2324 time64_t retry_window;
2325
2326 if (code == 0x13) {
2327 bt_dev_err(hdev, "Encountered top exception");
2328 return;
2329 }
2330
2331 data = btintel_pcie_get_recovery(pdev, &hdev->dev);
2332 if (!data)
2333 return;
2334
2335 retry_window = ktime_get_boottime_seconds() - data->last_error;
2336
2337 if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2338 data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
2339 bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
2340 BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
2341 bt_dev_dbg(hdev, "Boot time: %lld seconds",
2342 ktime_get_boottime_seconds());
2343 bt_dev_dbg(hdev, "last error at: %lld seconds",
2344 data->last_error);
2345 return;
2346 }
2347 btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
2348 btintel_pcie_reset(hdev);
2349 }
2350
btintel_pcie_wakeup(struct hci_dev * hdev)2351 static bool btintel_pcie_wakeup(struct hci_dev *hdev)
2352 {
2353 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2354
2355 return device_may_wakeup(&data->pdev->dev);
2356 }
2357
btintel_pcie_setup_hdev(struct btintel_pcie_data * data)2358 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2359 {
2360 int err;
2361 struct hci_dev *hdev;
2362
2363 hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2364 if (!hdev)
2365 return -ENOMEM;
2366
2367 hdev->bus = HCI_PCI;
2368 hci_set_drvdata(hdev, data);
2369
2370 data->hdev = hdev;
2371 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2372
2373 hdev->manufacturer = 2;
2374 hdev->open = btintel_pcie_open;
2375 hdev->close = btintel_pcie_close;
2376 hdev->send = btintel_pcie_send_frame;
2377 hdev->setup = btintel_pcie_setup;
2378 hdev->shutdown = btintel_shutdown_combined;
2379 hdev->hw_error = btintel_pcie_hw_error;
2380 hdev->set_diag = btintel_set_diag;
2381 hdev->set_bdaddr = btintel_set_bdaddr;
2382 hdev->reset = btintel_pcie_reset;
2383 hdev->wakeup = btintel_pcie_wakeup;
2384
2385 err = hci_register_dev(hdev);
2386 if (err < 0) {
2387 BT_ERR("Failed to register to hdev (%d)", err);
2388 goto exit_error;
2389 }
2390
2391 data->dmp_hdr.driver_name = KBUILD_MODNAME;
2392 return 0;
2393
2394 exit_error:
2395 hci_free_dev(hdev);
2396 return err;
2397 }
2398
btintel_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2399 static int btintel_pcie_probe(struct pci_dev *pdev,
2400 const struct pci_device_id *ent)
2401 {
2402 int err;
2403 struct btintel_pcie_data *data;
2404
2405 if (!pdev)
2406 return -ENODEV;
2407
2408 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2409 if (!data)
2410 return -ENOMEM;
2411
2412 data->pdev = pdev;
2413
2414 spin_lock_init(&data->irq_lock);
2415 spin_lock_init(&data->hci_rx_lock);
2416
2417 init_waitqueue_head(&data->gp0_wait_q);
2418 data->gp0_received = false;
2419
2420 init_waitqueue_head(&data->tx_wait_q);
2421 data->tx_wait_done = false;
2422
2423 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2424 if (!data->workqueue)
2425 return -ENOMEM;
2426
2427 skb_queue_head_init(&data->rx_skb_q);
2428 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2429
2430 data->boot_stage_cache = 0x00;
2431 data->img_resp_cache = 0x00;
2432
2433 err = btintel_pcie_config_pcie(pdev, data);
2434 if (err)
2435 goto exit_error;
2436
2437 pci_set_drvdata(pdev, data);
2438
2439 err = btintel_pcie_alloc(data);
2440 if (err)
2441 goto exit_error;
2442
2443 err = btintel_pcie_enable_bt(data);
2444 if (err)
2445 goto exit_error;
2446
2447 /* CNV information (CNVi and CNVr) is in CSR */
2448 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2449
2450 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2451
2452 err = btintel_pcie_start_rx(data);
2453 if (err)
2454 goto exit_error;
2455
2456 err = btintel_pcie_setup_hdev(data);
2457 if (err)
2458 goto exit_error;
2459
2460 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2461 data->cnvr);
2462 return 0;
2463
2464 exit_error:
2465 /* reset device before exit */
2466 btintel_pcie_reset_bt(data);
2467
2468 pci_clear_master(pdev);
2469
2470 pci_set_drvdata(pdev, NULL);
2471
2472 return err;
2473 }
2474
btintel_pcie_remove(struct pci_dev * pdev)2475 static void btintel_pcie_remove(struct pci_dev *pdev)
2476 {
2477 struct btintel_pcie_data *data;
2478
2479 data = pci_get_drvdata(pdev);
2480
2481 btintel_pcie_disable_interrupts(data);
2482
2483 btintel_pcie_synchronize_irqs(data);
2484
2485 flush_work(&data->rx_work);
2486
2487 btintel_pcie_reset_bt(data);
2488 for (int i = 0; i < data->alloc_vecs; i++) {
2489 struct msix_entry *msix_entry;
2490
2491 msix_entry = &data->msix_entries[i];
2492 free_irq(msix_entry->vector, msix_entry);
2493 }
2494
2495 pci_free_irq_vectors(pdev);
2496
2497 btintel_pcie_release_hdev(data);
2498
2499 destroy_workqueue(data->workqueue);
2500
2501 btintel_pcie_free(data);
2502
2503 pci_clear_master(pdev);
2504
2505 pci_set_drvdata(pdev, NULL);
2506 }
2507
2508 #ifdef CONFIG_DEV_COREDUMP
btintel_pcie_coredump(struct device * dev)2509 static void btintel_pcie_coredump(struct device *dev)
2510 {
2511 struct pci_dev *pdev = to_pci_dev(dev);
2512 struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2513
2514 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2515 return;
2516
2517 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2518 queue_work(data->workqueue, &data->rx_work);
2519 }
2520 #endif
2521
btintel_pcie_suspend_late(struct device * dev,pm_message_t mesg)2522 static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
2523 {
2524 struct pci_dev *pdev = to_pci_dev(dev);
2525 struct btintel_pcie_data *data;
2526 ktime_t start;
2527 u32 dxstate;
2528 int err;
2529
2530 data = pci_get_drvdata(pdev);
2531
2532 dxstate = (mesg.event == PM_EVENT_SUSPEND ?
2533 BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
2534
2535 data->gp0_received = false;
2536
2537 start = ktime_get();
2538
2539 /* Refer: 6.4.11.7 -> Platform power management */
2540 btintel_pcie_wr_sleep_cntrl(data, dxstate);
2541 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
2542 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
2543 if (err == 0) {
2544 bt_dev_err(data->hdev,
2545 "Timeout (%u ms) on alive interrupt for D3 entry",
2546 BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
2547 return -EBUSY;
2548 }
2549
2550 bt_dev_dbg(data->hdev,
2551 "device entered into d3 state from d0 in %lld us",
2552 ktime_to_us(ktime_get() - start));
2553
2554 return 0;
2555 }
2556
btintel_pcie_suspend(struct device * dev)2557 static int btintel_pcie_suspend(struct device *dev)
2558 {
2559 return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
2560 }
2561
btintel_pcie_hibernate(struct device * dev)2562 static int btintel_pcie_hibernate(struct device *dev)
2563 {
2564 return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
2565 }
2566
btintel_pcie_freeze(struct device * dev)2567 static int btintel_pcie_freeze(struct device *dev)
2568 {
2569 return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
2570 }
2571
btintel_pcie_resume(struct device * dev)2572 static int btintel_pcie_resume(struct device *dev)
2573 {
2574 struct pci_dev *pdev = to_pci_dev(dev);
2575 struct btintel_pcie_data *data;
2576 ktime_t start;
2577 int err;
2578
2579 data = pci_get_drvdata(pdev);
2580 data->gp0_received = false;
2581
2582 start = ktime_get();
2583
2584 /* Refer: 6.4.11.7 -> Platform power management */
2585 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
2586 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
2587 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
2588 if (err == 0) {
2589 bt_dev_err(data->hdev,
2590 "Timeout (%u ms) on alive interrupt for D0 entry",
2591 BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
2592 return -EBUSY;
2593 }
2594
2595 bt_dev_dbg(data->hdev,
2596 "device entered into d0 state from d3 in %lld us",
2597 ktime_to_us(ktime_get() - start));
2598 return 0;
2599 }
2600
2601 static const struct dev_pm_ops btintel_pcie_pm_ops = {
2602 .suspend = btintel_pcie_suspend,
2603 .resume = btintel_pcie_resume,
2604 .freeze = btintel_pcie_freeze,
2605 .thaw = btintel_pcie_resume,
2606 .poweroff = btintel_pcie_hibernate,
2607 .restore = btintel_pcie_resume,
2608 };
2609
2610 static struct pci_driver btintel_pcie_driver = {
2611 .name = KBUILD_MODNAME,
2612 .id_table = btintel_pcie_table,
2613 .probe = btintel_pcie_probe,
2614 .remove = btintel_pcie_remove,
2615 .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
2616 #ifdef CONFIG_DEV_COREDUMP
2617 .driver.coredump = btintel_pcie_coredump
2618 #endif
2619 };
2620
btintel_pcie_init(void)2621 static int __init btintel_pcie_init(void)
2622 {
2623 return pci_register_driver(&btintel_pcie_driver);
2624 }
2625
btintel_pcie_exit(void)2626 static void __exit btintel_pcie_exit(void)
2627 {
2628 pci_unregister_driver(&btintel_pcie_driver);
2629 btintel_pcie_free_restart_list();
2630 }
2631
2632 module_init(btintel_pcie_init);
2633 module_exit(btintel_pcie_exit);
2634
2635 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
2636 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2637 MODULE_VERSION(VERSION);
2638 MODULE_LICENSE("GPL");
2639