1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/unaligned.h>
18 #include <linux/devcoredump.h>
19
20 #include <net/bluetooth/bluetooth.h>
21 #include <net/bluetooth/hci_core.h>
22 #include <net/bluetooth/hci_drv.h>
23
24 #include "btintel.h"
25 #include "btintel_pcie.h"
26
27 #define VERSION "0.1"
28
29 #define BTINTEL_PCI_DEVICE(dev, subdev) \
30 .vendor = PCI_VENDOR_ID_INTEL, \
31 .device = (dev), \
32 .subvendor = PCI_ANY_ID, \
33 .subdevice = (subdev), \
34 .driver_data = 0
35
36 #define POLL_INTERVAL_US 10
37
38 /* Intel Bluetooth PCIe device id table */
39 static const struct pci_device_id btintel_pcie_table[] = {
40 /* BlazarI, Wildcat Lake */
41 { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
42 /* BlazarI, Lunar Lake */
43 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
44 /* Scorpious, Panther Lake-H484 */
45 { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
46 /* Scorpious, Panther Lake-H404 */
47 { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
51
52 struct btintel_pcie_dev_recovery {
53 struct list_head list;
54 u8 count;
55 time64_t last_error;
56 char name[];
57 };
58
59 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
60 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
61 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
62 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
63 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
64 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
65 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
66
67 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
68
69 #define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
70 #define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
71
72 #define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
73 #define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
74
75 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
76
77 #define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
78 #define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
79
80 #define BTINTEL_PCIE_RESET_WINDOW_SECS 5
81 #define BTINTEL_PCIE_FLR_MAX_RETRY 1
82
83 /* Alive interrupt context */
84 enum {
85 BTINTEL_PCIE_ROM,
86 BTINTEL_PCIE_FW_DL,
87 BTINTEL_PCIE_HCI_RESET,
88 BTINTEL_PCIE_INTEL_HCI_RESET1,
89 BTINTEL_PCIE_INTEL_HCI_RESET2,
90 BTINTEL_PCIE_D0,
91 BTINTEL_PCIE_D3
92 };
93
94 /* Structure for dbgc fragment buffer
95 * @buf_addr_lsb: LSB of the buffer's physical address
96 * @buf_addr_msb: MSB of the buffer's physical address
97 * @buf_size: Total size of the buffer
98 */
99 struct btintel_pcie_dbgc_ctxt_buf {
100 u32 buf_addr_lsb;
101 u32 buf_addr_msb;
102 u32 buf_size;
103 };
104
105 /* Structure for dbgc fragment
106 * @magic_num: 0XA5A5A5A5
107 * @ver: For Driver-FW compatibility
108 * @total_size: Total size of the payload debug info
109 * @num_buf: Num of allocated debug bufs
110 * @bufs: All buffer's addresses and sizes
111 */
112 struct btintel_pcie_dbgc_ctxt {
113 u32 magic_num;
114 u32 ver;
115 u32 total_size;
116 u32 num_buf;
117 struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
118 };
119
120 struct btintel_pcie_removal {
121 struct pci_dev *pdev;
122 struct work_struct work;
123 };
124
125 static LIST_HEAD(btintel_pcie_recovery_list);
126 static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
127
btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)128 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
129 {
130 switch (alive_intr_ctxt) {
131 case BTINTEL_PCIE_ROM:
132 return "rom";
133 case BTINTEL_PCIE_FW_DL:
134 return "fw_dl";
135 case BTINTEL_PCIE_D0:
136 return "d0";
137 case BTINTEL_PCIE_D3:
138 return "d3";
139 case BTINTEL_PCIE_HCI_RESET:
140 return "hci_reset";
141 case BTINTEL_PCIE_INTEL_HCI_RESET1:
142 return "intel_reset1";
143 case BTINTEL_PCIE_INTEL_HCI_RESET2:
144 return "intel_reset2";
145 default:
146 return "unknown";
147 }
148 }
149
150 /* This function initializes the memory for DBGC buffers and formats the
151 * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
152 * size as the payload
153 */
btintel_pcie_setup_dbgc(struct btintel_pcie_data * data)154 static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
155 {
156 struct btintel_pcie_dbgc_ctxt db_frag;
157 struct data_buf *buf;
158 int i;
159
160 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
161 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
162 sizeof(*buf), GFP_KERNEL);
163 if (!data->dbgc.bufs)
164 return -ENOMEM;
165
166 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
167 data->dbgc.count *
168 BTINTEL_PCIE_DBGC_BUFFER_SIZE,
169 &data->dbgc.buf_p_addr,
170 GFP_KERNEL | __GFP_NOWARN);
171 if (!data->dbgc.buf_v_addr)
172 return -ENOMEM;
173
174 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
175 sizeof(struct btintel_pcie_dbgc_ctxt),
176 &data->dbgc.frag_p_addr,
177 GFP_KERNEL | __GFP_NOWARN);
178 if (!data->dbgc.frag_v_addr)
179 return -ENOMEM;
180
181 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
182
183 db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
184 db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
185 db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
186 db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
187
188 for (i = 0; i < data->dbgc.count; i++) {
189 buf = &data->dbgc.bufs[i];
190 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
191 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
192 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
193 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
194 db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
195 }
196
197 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
198 return 0;
199 }
200
ipc_print_ia_ring(struct hci_dev * hdev,struct ia * ia,u16 queue_num)201 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
202 u16 queue_num)
203 {
204 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
205 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
206 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
207 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
208 }
209
ipc_print_urbd1(struct hci_dev * hdev,struct urbd1 * urbd1,u16 index)210 static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
211 u16 index)
212 {
213 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
214 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
215 }
216
btintel_pcie_get_data(struct msix_entry * entry)217 static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
218 {
219 u8 queue = entry->entry;
220 struct msix_entry *entries = entry - queue;
221
222 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
223 }
224
225 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
226 * of the TFD is updated and ready to transmit.
227 */
btintel_pcie_set_tx_db(struct btintel_pcie_data * data,u16 index)228 static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
229 {
230 u32 val;
231
232 val = index;
233 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
234
235 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
236 }
237
238 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
239 * descriptor) with the data length and the DMA address of the data buffer.
240 */
btintel_pcie_prepare_tx(struct txq * txq,u16 tfd_index,struct sk_buff * skb)241 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
242 struct sk_buff *skb)
243 {
244 struct data_buf *buf;
245 struct tfd *tfd;
246
247 tfd = &txq->tfds[tfd_index];
248 memset(tfd, 0, sizeof(*tfd));
249
250 buf = &txq->bufs[tfd_index];
251
252 tfd->size = skb->len;
253 tfd->addr = buf->data_p_addr;
254
255 /* Copy the outgoing data to DMA buffer */
256 memcpy(buf->data, skb->data, tfd->size);
257 }
258
btintel_pcie_dump_debug_registers(struct hci_dev * hdev)259 static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
260 {
261 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
262 u16 cr_hia, cr_tia;
263 u32 reg, mbox_reg;
264 struct sk_buff *skb;
265 u8 buf[80];
266
267 skb = alloc_skb(1024, GFP_ATOMIC);
268 if (!skb)
269 return;
270
271 snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
272 bt_dev_dbg(hdev, "%s", buf);
273 skb_put_data(skb, buf, strlen(buf));
274
275 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
276 snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
277 bt_dev_dbg(hdev, "%s", buf);
278 skb_put_data(skb, buf, strlen(buf));
279 data->boot_stage_cache = reg;
280
281 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
282 snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
283 skb_put_data(skb, buf, strlen(buf));
284 bt_dev_dbg(hdev, "%s", buf);
285
286 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
287 snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
288 skb_put_data(skb, buf, strlen(buf));
289 bt_dev_dbg(hdev, "%s", buf);
290
291 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
292 snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
293 skb_put_data(skb, buf, strlen(buf));
294 bt_dev_dbg(hdev, "%s", buf);
295
296 /*Read the Mail box status and registers*/
297 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
298 snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
299 skb_put_data(skb, buf, strlen(buf));
300 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
301 mbox_reg = btintel_pcie_rd_reg32(data,
302 BTINTEL_PCIE_CSR_MBOX_1_REG);
303 snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
304 skb_put_data(skb, buf, strlen(buf));
305 bt_dev_dbg(hdev, "%s", buf);
306 }
307
308 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
309 mbox_reg = btintel_pcie_rd_reg32(data,
310 BTINTEL_PCIE_CSR_MBOX_2_REG);
311 snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
312 skb_put_data(skb, buf, strlen(buf));
313 bt_dev_dbg(hdev, "%s", buf);
314 }
315
316 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
317 mbox_reg = btintel_pcie_rd_reg32(data,
318 BTINTEL_PCIE_CSR_MBOX_3_REG);
319 snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
320 skb_put_data(skb, buf, strlen(buf));
321 bt_dev_dbg(hdev, "%s", buf);
322 }
323
324 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
325 mbox_reg = btintel_pcie_rd_reg32(data,
326 BTINTEL_PCIE_CSR_MBOX_4_REG);
327 snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
328 skb_put_data(skb, buf, strlen(buf));
329 bt_dev_dbg(hdev, "%s", buf);
330 }
331
332 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
333 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
334 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
335 skb_put_data(skb, buf, strlen(buf));
336 bt_dev_dbg(hdev, "%s", buf);
337
338 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
339 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
340 snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
341 skb_put_data(skb, buf, strlen(buf));
342 bt_dev_dbg(hdev, "%s", buf);
343 snprintf(buf, sizeof(buf), "--------------------------------");
344 bt_dev_dbg(hdev, "%s", buf);
345
346 hci_recv_diag(hdev, skb);
347 }
348
btintel_pcie_send_sync(struct btintel_pcie_data * data,struct sk_buff * skb,u32 pkt_type,u16 opcode)349 static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
350 struct sk_buff *skb, u32 pkt_type, u16 opcode)
351 {
352 int ret;
353 u16 tfd_index;
354 u32 old_ctxt;
355 bool wait_on_alive = false;
356 struct hci_dev *hdev = data->hdev;
357
358 struct txq *txq = &data->txq;
359
360 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
361
362 if (tfd_index > txq->count)
363 return -ERANGE;
364
365 /* Firmware raises alive interrupt on HCI_OP_RESET or
366 * BTINTEL_HCI_OP_RESET
367 */
368 wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
369 (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
370
371 if (wait_on_alive) {
372 data->gp0_received = false;
373 old_ctxt = data->alive_intr_ctxt;
374 data->alive_intr_ctxt =
375 (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
376 BTINTEL_PCIE_HCI_RESET);
377 bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
378 opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
379 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
380 }
381
382 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
383 BTINTEL_PCIE_HCI_TYPE_LEN);
384
385 /* Prepare for TX. It updates the TFD with the length of data and
386 * address of the DMA buffer, and copy the data to the DMA buffer
387 */
388 btintel_pcie_prepare_tx(txq, tfd_index, skb);
389
390 tfd_index = (tfd_index + 1) % txq->count;
391 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
392
393 /* Arm wait event condition */
394 data->tx_wait_done = false;
395
396 /* Set the doorbell to notify the device */
397 btintel_pcie_set_tx_db(data, tfd_index);
398
399 /* Wait for the complete interrupt - URBD0 */
400 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
401 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
402 if (!ret) {
403 bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
404 BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
405 btintel_pcie_dump_debug_registers(data->hdev);
406 return -ETIME;
407 }
408
409 if (wait_on_alive) {
410 ret = wait_event_timeout(data->gp0_wait_q,
411 data->gp0_received,
412 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
413 if (!ret) {
414 hdev->stat.err_tx++;
415 bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
416 BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
417 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
418 return -ETIME;
419 }
420 }
421 return 0;
422 }
423
424 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
425 * is available to receive the data
426 */
btintel_pcie_set_rx_db(struct btintel_pcie_data * data,u16 index)427 static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
428 {
429 u32 val;
430
431 val = index;
432 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
433
434 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
435 }
436
437 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
438 * DMA address of the free buffer.
439 */
btintel_pcie_prepare_rx(struct rxq * rxq,u16 frbd_index)440 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
441 {
442 struct data_buf *buf;
443 struct frbd *frbd;
444
445 /* Get the buffer of the FRBD for DMA */
446 buf = &rxq->bufs[frbd_index];
447
448 frbd = &rxq->frbds[frbd_index];
449 memset(frbd, 0, sizeof(*frbd));
450
451 /* Update FRBD */
452 frbd->tag = frbd_index;
453 frbd->addr = buf->data_p_addr;
454 }
455
btintel_pcie_submit_rx(struct btintel_pcie_data * data)456 static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
457 {
458 u16 frbd_index;
459 struct rxq *rxq = &data->rxq;
460
461 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
462
463 if (frbd_index > rxq->count)
464 return -ERANGE;
465
466 /* Prepare for RX submit. It updates the FRBD with the address of DMA
467 * buffer
468 */
469 btintel_pcie_prepare_rx(rxq, frbd_index);
470
471 frbd_index = (frbd_index + 1) % rxq->count;
472 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
473 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
474
475 /* Set the doorbell to notify the device */
476 btintel_pcie_set_rx_db(data, frbd_index);
477
478 return 0;
479 }
480
btintel_pcie_start_rx(struct btintel_pcie_data * data)481 static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
482 {
483 int i, ret;
484 struct rxq *rxq = &data->rxq;
485
486 /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
487 * hardware issues leading to race condition at the firmware.
488 */
489
490 for (i = 0; i < rxq->count - 3; i++) {
491 ret = btintel_pcie_submit_rx(data);
492 if (ret)
493 return ret;
494 }
495
496 return 0;
497 }
498
btintel_pcie_reset_ia(struct btintel_pcie_data * data)499 static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
500 {
501 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
502 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
503 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
504 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
505 }
506
btintel_pcie_reset_bt(struct btintel_pcie_data * data)507 static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
508 {
509 u32 reg;
510 int retry = 3;
511
512 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
513
514 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
515 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
516 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
517 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
518
519 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
520
521 do {
522 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
523 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
524 break;
525 usleep_range(10000, 12000);
526
527 } while (--retry > 0);
528 usleep_range(10000, 12000);
529
530 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
531
532 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
533 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
534 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
535 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
536 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
537 usleep_range(10000, 12000);
538
539 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
540 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
541
542 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
543
544 /* If shared hardware reset is success then boot stage register shall be
545 * set to 0
546 */
547 return reg == 0 ? 0 : -ENODEV;
548 }
549
btintel_pcie_mac_init(struct btintel_pcie_data * data)550 static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
551 {
552 u32 reg;
553
554 /* Set MAC_INIT bit to start primary bootloader */
555 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
556 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
557 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
558 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
559 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
560 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
561 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
562 }
563
btintel_pcie_get_mac_access(struct btintel_pcie_data * data)564 static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
565 {
566 u32 reg;
567 int retry = 15;
568
569 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
570
571 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
572 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
573 if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
574 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
575
576 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
577
578 do {
579 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
580 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
581 return 0;
582 /* Need delay here for Target Access harwdware to settle down*/
583 usleep_range(1000, 1200);
584
585 } while (--retry > 0);
586
587 return -ETIME;
588 }
589
btintel_pcie_release_mac_access(struct btintel_pcie_data * data)590 static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
591 {
592 u32 reg;
593
594 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
595
596 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
597 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
598
599 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
600 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
601
602 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
603 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
604
605 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
606 }
607
btintel_pcie_copy_tlv(void * dest,enum btintel_pcie_tlv_type type,void * data,size_t size)608 static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
609 void *data, size_t size)
610 {
611 struct intel_tlv *tlv;
612
613 tlv = dest;
614 tlv->type = type;
615 tlv->len = size;
616 memcpy(tlv->val, data, tlv->len);
617 return dest + sizeof(*tlv) + size;
618 }
619
btintel_pcie_read_dram_buffers(struct btintel_pcie_data * data)620 static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
621 {
622 u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
623 struct btintel_pcie_dbgc *dbgc = &data->dbgc;
624 struct hci_dev *hdev = data->hdev;
625 u8 *pdata, *p, buf_idx;
626 struct intel_tlv *tlv;
627 struct timespec64 now;
628 struct tm tm_now;
629 char fw_build[128];
630 char ts[128];
631 char vendor[64];
632 char driver[64];
633
634 if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
635 return -EOPNOTSUPP;
636
637
638 wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
639 offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
640
641 buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
642 if (buf_idx > dbgc->count) {
643 bt_dev_warn(hdev, "Buffer index is invalid");
644 return -EINVAL;
645 }
646
647 prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
648 if (prev_size + offset >= prev_size)
649 data->dmp_hdr.write_ptr = prev_size + offset;
650 else
651 return -EINVAL;
652
653 snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
654 snprintf(driver, sizeof(driver), "Driver: %s\n",
655 data->dmp_hdr.driver_name);
656
657 ktime_get_real_ts64(&now);
658 time64_to_tm(now.tv_sec, 0, &tm_now);
659 snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
660 tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
661 tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
662
663 snprintf(fw_build, sizeof(fw_build),
664 "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
665 2000 + (data->dmp_hdr.fw_timestamp >> 8),
666 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
667 data->dmp_hdr.fw_build_num);
668
669 data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
670 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
671 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
672 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
673 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
674 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
675 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
676 sizeof(*tlv) + strlen(ts) +
677 sizeof(*tlv) + strlen(fw_build) +
678 sizeof(*tlv) + strlen(vendor) +
679 sizeof(*tlv) + strlen(driver);
680
681 /*
682 * sizeof(u32) - signature
683 * sizeof(data_len) - to store tlv data size
684 * data_len - TLV data
685 */
686 dump_size = sizeof(u32) + sizeof(data_len) + data_len;
687
688
689 /* Add debug buffers data length to dump size */
690 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
691
692 pdata = vmalloc(dump_size);
693 if (!pdata)
694 return -ENOMEM;
695 p = pdata;
696
697 *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
698 p += sizeof(u32);
699
700 *(u32 *)p = data_len;
701 p += sizeof(u32);
702
703
704 p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
705 p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
706 p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
707 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
708 strlen(fw_build));
709 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
710 sizeof(data->dmp_hdr.cnvi_bt));
711 p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
712 sizeof(data->dmp_hdr.write_ptr));
713 p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
714 sizeof(data->dmp_hdr.wrap_ctr));
715
716 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
717 BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
718
719 p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
720 sizeof(data->dmp_hdr.trigger_reason));
721 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
722 sizeof(data->dmp_hdr.fw_git_sha1));
723 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
724 sizeof(data->dmp_hdr.cnvr_top));
725 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
726 sizeof(data->dmp_hdr.cnvi_top));
727
728 memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
729 dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
730 return 0;
731 }
732
btintel_pcie_dump_traces(struct hci_dev * hdev)733 static void btintel_pcie_dump_traces(struct hci_dev *hdev)
734 {
735 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
736 int ret = 0;
737
738 ret = btintel_pcie_get_mac_access(data);
739 if (ret) {
740 bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
741 return;
742 }
743
744 ret = btintel_pcie_read_dram_buffers(data);
745
746 btintel_pcie_release_mac_access(data);
747
748 if (ret)
749 bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
750 }
751
752 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
753 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
754 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
755 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
756 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
757 */
btintel_pcie_enable_bt(struct btintel_pcie_data * data)758 static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
759 {
760 int err;
761 u32 reg;
762
763 data->gp0_received = false;
764
765 /* Update the DMA address of CI struct to CSR */
766 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
767 data->ci_p_addr & 0xffffffff);
768 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
769 (u64)data->ci_p_addr >> 32);
770
771 /* Reset the cached value of boot stage. it is updated by the MSI-X
772 * gp0 interrupt handler.
773 */
774 data->boot_stage_cache = 0x0;
775
776 /* Set MAC_INIT bit to start primary bootloader */
777 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
778 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
779 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
780 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
781 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
782 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
783
784 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
785
786 /* MAC is ready. Enable BT FUNC */
787 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
788 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
789
790 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
791
792 /* wait for interrupt from the device after booting up to primary
793 * bootloader.
794 */
795 data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
796 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
797 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
798 if (!err)
799 return -ETIME;
800
801 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
802 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
803 return -ENODEV;
804
805 return 0;
806 }
807
btintel_pcie_in_op(struct btintel_pcie_data * data)808 static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
809 {
810 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
811 }
812
btintel_pcie_in_iml(struct btintel_pcie_data * data)813 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
814 {
815 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
816 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
817 }
818
btintel_pcie_in_d3(struct btintel_pcie_data * data)819 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
820 {
821 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
822 }
823
btintel_pcie_in_d0(struct btintel_pcie_data * data)824 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
825 {
826 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
827 }
828
btintel_pcie_in_device_halt(struct btintel_pcie_data * data)829 static inline bool btintel_pcie_in_device_halt(struct btintel_pcie_data *data)
830 {
831 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED;
832 }
833
btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data * data,u32 dxstate)834 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
835 u32 dxstate)
836 {
837 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
838 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
839 }
840
btintel_pcie_read_device_mem(struct btintel_pcie_data * data,void * buf,u32 dev_addr,int len)841 static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
842 void *buf, u32 dev_addr, int len)
843 {
844 int err;
845 u32 *val = buf;
846
847 /* Get device mac access */
848 err = btintel_pcie_get_mac_access(data);
849 if (err) {
850 bt_dev_err(data->hdev, "Failed to get mac access %d", err);
851 return err;
852 }
853
854 for (; len > 0; len -= 4, dev_addr += 4, val++)
855 *val = btintel_pcie_rd_dev_mem(data, dev_addr);
856
857 btintel_pcie_release_mac_access(data);
858
859 return 0;
860 }
861
btintel_pcie_in_lockdown(struct btintel_pcie_data * data)862 static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
863 {
864 return (data->boot_stage_cache &
865 BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
866 (data->boot_stage_cache &
867 BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
868 }
869
btintel_pcie_in_error(struct btintel_pcie_data * data)870 static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
871 {
872 return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
873 (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
874 }
875
btintel_pcie_msix_gp1_handler(struct btintel_pcie_data * data)876 static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
877 {
878 bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
879 btintel_pcie_dump_debug_registers(data->hdev);
880 }
881
882 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
883 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
884 */
btintel_pcie_msix_gp0_handler(struct btintel_pcie_data * data)885 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
886 {
887 bool submit_rx, signal_waitq;
888 u32 reg, old_ctxt;
889
890 /* This interrupt is for three different causes and it is not easy to
891 * know what causes the interrupt. So, it compares each register value
892 * with cached value and update it before it wake up the queue.
893 */
894 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
895 if (reg != data->boot_stage_cache)
896 data->boot_stage_cache = reg;
897
898 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
899 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
900 data->boot_stage_cache, reg);
901 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
902 if (reg != data->img_resp_cache)
903 data->img_resp_cache = reg;
904
905 if (btintel_pcie_in_error(data)) {
906 bt_dev_err(data->hdev, "Controller in error state");
907 btintel_pcie_dump_debug_registers(data->hdev);
908 return;
909 }
910
911 if (btintel_pcie_in_lockdown(data)) {
912 bt_dev_err(data->hdev, "Controller in lockdown state");
913 btintel_pcie_dump_debug_registers(data->hdev);
914 return;
915 }
916
917 data->gp0_received = true;
918
919 old_ctxt = data->alive_intr_ctxt;
920 submit_rx = false;
921 signal_waitq = false;
922
923 switch (data->alive_intr_ctxt) {
924 case BTINTEL_PCIE_ROM:
925 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
926 signal_waitq = true;
927 break;
928 case BTINTEL_PCIE_FW_DL:
929 /* Error case is already handled. Ideally control shall not
930 * reach here
931 */
932 break;
933 case BTINTEL_PCIE_INTEL_HCI_RESET1:
934 if (btintel_pcie_in_op(data)) {
935 submit_rx = true;
936 signal_waitq = true;
937 break;
938 }
939
940 if (btintel_pcie_in_iml(data)) {
941 submit_rx = true;
942 signal_waitq = true;
943 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
944 break;
945 }
946 break;
947 case BTINTEL_PCIE_INTEL_HCI_RESET2:
948 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
949 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
950 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
951 }
952 break;
953 case BTINTEL_PCIE_D0:
954 if (btintel_pcie_in_d3(data)) {
955 data->alive_intr_ctxt = BTINTEL_PCIE_D3;
956 signal_waitq = true;
957 break;
958 }
959 break;
960 case BTINTEL_PCIE_D3:
961 if (btintel_pcie_in_d0(data)) {
962 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
963 submit_rx = true;
964 signal_waitq = true;
965 break;
966 }
967 break;
968 case BTINTEL_PCIE_HCI_RESET:
969 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
970 submit_rx = true;
971 signal_waitq = true;
972 break;
973 default:
974 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
975 data->alive_intr_ctxt);
976 break;
977 }
978
979 if (submit_rx) {
980 btintel_pcie_reset_ia(data);
981 btintel_pcie_start_rx(data);
982 }
983
984 if (signal_waitq) {
985 bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
986 wake_up(&data->gp0_wait_q);
987 }
988
989 if (old_ctxt != data->alive_intr_ctxt)
990 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
991 btintel_pcie_alivectxt_state2str(old_ctxt),
992 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
993 }
994
995 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
996 */
btintel_pcie_msix_tx_handle(struct btintel_pcie_data * data)997 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
998 {
999 u16 cr_tia, cr_hia;
1000 struct txq *txq;
1001 struct urbd0 *urbd0;
1002
1003 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
1004 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1005
1006 if (cr_tia == cr_hia)
1007 return;
1008
1009 txq = &data->txq;
1010
1011 while (cr_tia != cr_hia) {
1012 data->tx_wait_done = true;
1013 wake_up(&data->tx_wait_q);
1014
1015 urbd0 = &txq->urbd0s[cr_tia];
1016
1017 if (urbd0->tfd_index > txq->count)
1018 return;
1019
1020 cr_tia = (cr_tia + 1) % txq->count;
1021 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
1022 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
1023 }
1024 }
1025
btintel_pcie_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1026 static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1027 {
1028 struct hci_event_hdr *hdr = (void *)skb->data;
1029 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1030
1031 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
1032 hdr->plen > 0) {
1033 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
1034 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
1035
1036 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1037 switch (skb->data[2]) {
1038 case 0x02:
1039 /* When switching to the operational firmware
1040 * the device sends a vendor specific event
1041 * indicating that the bootup completed.
1042 */
1043 btintel_bootup(hdev, ptr, len);
1044
1045 /* If bootup event is from operational image,
1046 * driver needs to write sleep control register to
1047 * move into D0 state
1048 */
1049 if (btintel_pcie_in_op(data)) {
1050 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
1051 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
1052 kfree_skb(skb);
1053 return 0;
1054 }
1055
1056 if (btintel_pcie_in_iml(data)) {
1057 /* In case of IML, there is no concept
1058 * of D0 transition. Just mimic as if
1059 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
1060 * bit and waking up the task waiting on
1061 * INTEL_WAIT_FOR_D0. This is required
1062 * as intel_boot() is common function for
1063 * both IML and OP image loading.
1064 */
1065 if (btintel_test_and_clear_flag(data->hdev,
1066 INTEL_WAIT_FOR_D0))
1067 btintel_wake_up_flag(data->hdev,
1068 INTEL_WAIT_FOR_D0);
1069 }
1070 kfree_skb(skb);
1071 return 0;
1072 case 0x06:
1073 /* When the firmware loading completes the
1074 * device sends out a vendor specific event
1075 * indicating the result of the firmware
1076 * loading.
1077 */
1078 btintel_secure_send_result(hdev, ptr, len);
1079 kfree_skb(skb);
1080 return 0;
1081 }
1082 }
1083
1084 /* This is a debug event that comes from IML and OP image when it
1085 * starts execution. There is no need pass this event to stack.
1086 */
1087 if (skb->data[2] == 0x97) {
1088 hci_recv_diag(hdev, skb);
1089 return 0;
1090 }
1091 }
1092
1093 return hci_recv_frame(hdev, skb);
1094 }
1095 /* Process the received rx data
1096 * It check the frame header to identify the data type and create skb
1097 * and calling HCI API
1098 */
btintel_pcie_recv_frame(struct btintel_pcie_data * data,struct sk_buff * skb)1099 static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
1100 struct sk_buff *skb)
1101 {
1102 int ret;
1103 u8 pkt_type;
1104 u16 plen;
1105 u32 pcie_pkt_type;
1106 void *pdata;
1107 struct hci_dev *hdev = data->hdev;
1108
1109 spin_lock(&data->hci_rx_lock);
1110
1111 /* The first 4 bytes indicates the Intel PCIe specific packet type */
1112 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
1113 if (!pdata) {
1114 bt_dev_err(hdev, "Corrupted packet received");
1115 ret = -EILSEQ;
1116 goto exit_error;
1117 }
1118
1119 pcie_pkt_type = get_unaligned_le32(pdata);
1120
1121 switch (pcie_pkt_type) {
1122 case BTINTEL_PCIE_HCI_ACL_PKT:
1123 if (skb->len >= HCI_ACL_HDR_SIZE) {
1124 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
1125 pkt_type = HCI_ACLDATA_PKT;
1126 } else {
1127 bt_dev_err(hdev, "ACL packet is too short");
1128 ret = -EILSEQ;
1129 goto exit_error;
1130 }
1131 break;
1132
1133 case BTINTEL_PCIE_HCI_SCO_PKT:
1134 if (skb->len >= HCI_SCO_HDR_SIZE) {
1135 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1136 pkt_type = HCI_SCODATA_PKT;
1137 } else {
1138 bt_dev_err(hdev, "SCO packet is too short");
1139 ret = -EILSEQ;
1140 goto exit_error;
1141 }
1142 break;
1143
1144 case BTINTEL_PCIE_HCI_EVT_PKT:
1145 if (skb->len >= HCI_EVENT_HDR_SIZE) {
1146 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1147 pkt_type = HCI_EVENT_PKT;
1148 } else {
1149 bt_dev_err(hdev, "Event packet is too short");
1150 ret = -EILSEQ;
1151 goto exit_error;
1152 }
1153 break;
1154
1155 case BTINTEL_PCIE_HCI_ISO_PKT:
1156 if (skb->len >= HCI_ISO_HDR_SIZE) {
1157 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1158 pkt_type = HCI_ISODATA_PKT;
1159 } else {
1160 bt_dev_err(hdev, "ISO packet is too short");
1161 ret = -EILSEQ;
1162 goto exit_error;
1163 }
1164 break;
1165
1166 default:
1167 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1168 pcie_pkt_type);
1169 ret = -EINVAL;
1170 goto exit_error;
1171 }
1172
1173 if (skb->len < plen) {
1174 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1175 pkt_type);
1176 ret = -EILSEQ;
1177 goto exit_error;
1178 }
1179
1180 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1181
1182 hci_skb_pkt_type(skb) = pkt_type;
1183 hdev->stat.byte_rx += plen;
1184 skb_trim(skb, plen);
1185
1186 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1187 ret = btintel_pcie_recv_event(hdev, skb);
1188 else
1189 ret = hci_recv_frame(hdev, skb);
1190 skb = NULL; /* skb is freed in the callee */
1191
1192 exit_error:
1193 if (skb)
1194 kfree_skb(skb);
1195
1196 if (ret)
1197 hdev->stat.err_rx++;
1198
1199 spin_unlock(&data->hci_rx_lock);
1200
1201 return ret;
1202 }
1203
btintel_pcie_read_hwexp(struct btintel_pcie_data * data)1204 static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1205 {
1206 int len, err, offset, pending;
1207 struct sk_buff *skb;
1208 u8 *buf, prefix[64];
1209 u32 addr, val;
1210 u16 pkt_len;
1211
1212 struct tlv {
1213 u8 type;
1214 __le16 len;
1215 u8 val[];
1216 } __packed;
1217
1218 struct tlv *tlv;
1219
1220 switch (data->dmp_hdr.cnvi_top & 0xfff) {
1221 case BTINTEL_CNVI_BLAZARI:
1222 case BTINTEL_CNVI_BLAZARIW:
1223 /* only from step B0 onwards */
1224 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1225 return;
1226 len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1227 addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1228 break;
1229 case BTINTEL_CNVI_SCP:
1230 len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1231 addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1232 break;
1233 default:
1234 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1235 return;
1236 }
1237
1238 buf = kzalloc(len, GFP_KERNEL);
1239 if (!buf)
1240 goto exit_on_error;
1241
1242 btintel_pcie_mac_init(data);
1243
1244 err = btintel_pcie_read_device_mem(data, buf, addr, len);
1245 if (err)
1246 goto exit_on_error;
1247
1248 val = get_unaligned_le32(buf);
1249 if (val != BTINTEL_PCIE_MAGIC_NUM) {
1250 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1251 val);
1252 goto exit_on_error;
1253 }
1254
1255 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1256
1257 offset = 4;
1258 do {
1259 pending = len - offset;
1260 if (pending < sizeof(*tlv))
1261 break;
1262 tlv = (struct tlv *)(buf + offset);
1263
1264 /* If type == 0, then there are no more TLVs to be parsed */
1265 if (!tlv->type) {
1266 bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1267 break;
1268 }
1269 pkt_len = le16_to_cpu(tlv->len);
1270 offset += sizeof(*tlv);
1271 pending = len - offset;
1272 if (pkt_len > pending)
1273 break;
1274
1275 offset += pkt_len;
1276
1277 /* Only TLVs of type == 1 are HCI events, no need to process other
1278 * TLVs
1279 */
1280 if (tlv->type != 1)
1281 continue;
1282
1283 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1284 if (pkt_len > HCI_MAX_EVENT_SIZE)
1285 break;
1286 skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1287 if (!skb)
1288 goto exit_on_error;
1289 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1290 skb_put_data(skb, tlv->val, pkt_len);
1291
1292 /* copy Intel specific pcie packet type */
1293 val = BTINTEL_PCIE_HCI_EVT_PKT;
1294 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1295 BTINTEL_PCIE_HCI_TYPE_LEN);
1296
1297 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1298 tlv->val, pkt_len, false);
1299
1300 btintel_pcie_recv_frame(data, skb);
1301 } while (offset < len);
1302
1303 exit_on_error:
1304 kfree(buf);
1305 }
1306
btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data * data)1307 static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1308 {
1309 bt_dev_err(data->hdev, "Received hw exception interrupt");
1310
1311 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1312 return;
1313
1314 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1315 return;
1316
1317 /* Trigger device core dump when there is HW exception */
1318 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1319 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1320
1321 queue_work(data->workqueue, &data->rx_work);
1322 }
1323
btintel_pcie_rx_work(struct work_struct * work)1324 static void btintel_pcie_rx_work(struct work_struct *work)
1325 {
1326 struct btintel_pcie_data *data = container_of(work,
1327 struct btintel_pcie_data, rx_work);
1328 struct sk_buff *skb;
1329
1330 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1331 btintel_pcie_dump_traces(data->hdev);
1332 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1333 }
1334
1335 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1336 /* Unlike usb products, controller will not send hardware
1337 * exception event on exception. Instead controller writes the
1338 * hardware event to device memory along with optional debug
1339 * events, raises MSIX and halts. Driver shall read the
1340 * exception event from device memory and passes it stack for
1341 * further processing.
1342 */
1343 btintel_pcie_read_hwexp(data);
1344 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1345 }
1346
1347 /* Process the sk_buf in queue and send to the HCI layer */
1348 while ((skb = skb_dequeue(&data->rx_skb_q))) {
1349 btintel_pcie_recv_frame(data, skb);
1350 }
1351 }
1352
1353 /* create sk_buff with data and save it to queue and start RX work */
btintel_pcie_submit_rx_work(struct btintel_pcie_data * data,u8 status,void * buf)1354 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1355 void *buf)
1356 {
1357 int ret, len;
1358 struct rfh_hdr *rfh_hdr;
1359 struct sk_buff *skb;
1360
1361 rfh_hdr = buf;
1362
1363 len = rfh_hdr->packet_len;
1364 if (len <= 0) {
1365 ret = -EINVAL;
1366 goto resubmit;
1367 }
1368
1369 /* Remove RFH header */
1370 buf += sizeof(*rfh_hdr);
1371
1372 skb = alloc_skb(len, GFP_ATOMIC);
1373 if (!skb)
1374 goto resubmit;
1375
1376 skb_put_data(skb, buf, len);
1377 skb_queue_tail(&data->rx_skb_q, skb);
1378 queue_work(data->workqueue, &data->rx_work);
1379
1380 resubmit:
1381 ret = btintel_pcie_submit_rx(data);
1382
1383 return ret;
1384 }
1385
1386 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
btintel_pcie_msix_rx_handle(struct btintel_pcie_data * data)1387 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1388 {
1389 u16 cr_hia, cr_tia;
1390 struct rxq *rxq;
1391 struct urbd1 *urbd1;
1392 struct data_buf *buf;
1393 int ret;
1394 struct hci_dev *hdev = data->hdev;
1395
1396 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1397 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1398
1399 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1400
1401 /* Check CR_TIA and CR_HIA for change */
1402 if (cr_tia == cr_hia)
1403 return;
1404
1405 rxq = &data->rxq;
1406
1407 /* The firmware sends multiple CD in a single MSI-X and it needs to
1408 * process all received CDs in this interrupt.
1409 */
1410 while (cr_tia != cr_hia) {
1411 urbd1 = &rxq->urbd1s[cr_tia];
1412 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1413
1414 buf = &rxq->bufs[urbd1->frbd_tag];
1415 if (!buf) {
1416 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1417 urbd1->frbd_tag);
1418 return;
1419 }
1420
1421 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1422 buf->data);
1423 if (ret) {
1424 bt_dev_err(hdev, "RXQ: failed to submit rx request");
1425 return;
1426 }
1427
1428 cr_tia = (cr_tia + 1) % rxq->count;
1429 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1430 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1431 }
1432 }
1433
btintel_pcie_msix_isr(int irq,void * data)1434 static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
1435 {
1436 return IRQ_WAKE_THREAD;
1437 }
1438
btintel_pcie_is_rxq_empty(struct btintel_pcie_data * data)1439 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1440 {
1441 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1442 }
1443
btintel_pcie_is_txackq_empty(struct btintel_pcie_data * data)1444 static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1445 {
1446 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1447 }
1448
btintel_pcie_irq_msix_handler(int irq,void * dev_id)1449 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1450 {
1451 struct msix_entry *entry = dev_id;
1452 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1453 u32 intr_fh, intr_hw;
1454
1455 spin_lock(&data->irq_lock);
1456 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1457 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1458
1459 /* Clear causes registers to avoid being handling the same cause */
1460 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1461 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1462 spin_unlock(&data->irq_lock);
1463
1464 if (unlikely(!(intr_fh | intr_hw))) {
1465 /* Ignore interrupt, inta == 0 */
1466 return IRQ_NONE;
1467 }
1468
1469 /* This interrupt is raised when there is an hardware exception */
1470 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1471 btintel_pcie_msix_hw_exp_handler(data);
1472
1473 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
1474 btintel_pcie_msix_gp1_handler(data);
1475
1476
1477 /* For TX */
1478 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1479 btintel_pcie_msix_tx_handle(data);
1480 if (!btintel_pcie_is_rxq_empty(data))
1481 btintel_pcie_msix_rx_handle(data);
1482 }
1483
1484 /* For RX */
1485 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1486 btintel_pcie_msix_rx_handle(data);
1487 if (!btintel_pcie_is_txackq_empty(data))
1488 btintel_pcie_msix_tx_handle(data);
1489 }
1490
1491 /* This interrupt is triggered by the firmware after updating
1492 * boot_stage register and image_response register
1493 */
1494 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1495 btintel_pcie_msix_gp0_handler(data);
1496
1497 /*
1498 * Before sending the interrupt the HW disables it to prevent a nested
1499 * interrupt. This is done by writing 1 to the corresponding bit in
1500 * the mask register. After handling the interrupt, it should be
1501 * re-enabled by clearing this bit. This register is defined as write 1
1502 * clear (W1C) register, meaning that it's cleared by writing 1
1503 * to the bit.
1504 */
1505 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1506 BIT(entry->entry));
1507
1508 return IRQ_HANDLED;
1509 }
1510
1511 /* This function requests the irq for MSI-X and registers the handlers per irq.
1512 * Currently, it requests only 1 irq for all interrupt causes.
1513 */
btintel_pcie_setup_irq(struct btintel_pcie_data * data)1514 static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1515 {
1516 int err;
1517 int num_irqs, i;
1518
1519 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1520 data->msix_entries[i].entry = i;
1521
1522 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1523 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1524 if (num_irqs < 0)
1525 return num_irqs;
1526
1527 data->alloc_vecs = num_irqs;
1528 data->msix_enabled = 1;
1529 data->def_irq = 0;
1530
1531 /* setup irq handler */
1532 for (i = 0; i < data->alloc_vecs; i++) {
1533 struct msix_entry *msix_entry;
1534
1535 msix_entry = &data->msix_entries[i];
1536 msix_entry->vector = pci_irq_vector(data->pdev, i);
1537
1538 err = devm_request_threaded_irq(&data->pdev->dev,
1539 msix_entry->vector,
1540 btintel_pcie_msix_isr,
1541 btintel_pcie_irq_msix_handler,
1542 IRQF_SHARED,
1543 KBUILD_MODNAME,
1544 msix_entry);
1545 if (err) {
1546 pci_free_irq_vectors(data->pdev);
1547 data->alloc_vecs = 0;
1548 return err;
1549 }
1550 }
1551 return 0;
1552 }
1553
1554 struct btintel_pcie_causes_list {
1555 u32 cause;
1556 u32 mask_reg;
1557 u8 cause_num;
1558 };
1559
1560 static struct btintel_pcie_causes_list causes_list[] = {
1561 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1562 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1563 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1564 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1565 };
1566
1567 /* This function configures the interrupt masks for both HW_INT_CAUSES and
1568 * FH_INT_CAUSES which are meaningful to us.
1569 *
1570 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1571 * need to call this function again to configure since the masks
1572 * are reset to 0xFFFFFFFF after reset.
1573 */
btintel_pcie_config_msix(struct btintel_pcie_data * data)1574 static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1575 {
1576 int i;
1577 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1578
1579 /* Set Non Auto Clear Cause */
1580 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1581 btintel_pcie_wr_reg8(data,
1582 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1583 val);
1584 btintel_pcie_clr_reg_bits(data,
1585 causes_list[i].mask_reg,
1586 causes_list[i].cause);
1587 }
1588
1589 /* Save the initial interrupt mask */
1590 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1591 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1592 }
1593
btintel_pcie_config_pcie(struct pci_dev * pdev,struct btintel_pcie_data * data)1594 static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1595 struct btintel_pcie_data *data)
1596 {
1597 int err;
1598
1599 err = pcim_enable_device(pdev);
1600 if (err)
1601 return err;
1602
1603 pci_set_master(pdev);
1604
1605 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1606 if (err) {
1607 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1608 if (err)
1609 return err;
1610 }
1611
1612 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1613 if (IS_ERR(data->base_addr))
1614 return PTR_ERR(data->base_addr);
1615
1616 err = btintel_pcie_setup_irq(data);
1617 if (err)
1618 return err;
1619
1620 /* Configure MSI-X with causes list */
1621 btintel_pcie_config_msix(data);
1622
1623 return 0;
1624 }
1625
btintel_pcie_init_ci(struct btintel_pcie_data * data,struct ctx_info * ci)1626 static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1627 struct ctx_info *ci)
1628 {
1629 ci->version = 0x1;
1630 ci->size = sizeof(*ci);
1631 ci->config = 0x0000;
1632 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1633 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1634 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1635 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1636 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1637 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1638 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1639 ci->addr_tfdq = data->txq.tfds_p_addr;
1640 ci->num_tfdq = data->txq.count;
1641 ci->num_urbdq0 = data->txq.count;
1642 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1643 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1644 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1645 ci->addr_frbdq = data->rxq.frbds_p_addr;
1646 ci->num_frbdq = data->rxq.count;
1647 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1648 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1649 ci->num_urbdq1 = data->rxq.count;
1650 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1651
1652 ci->dbg_output_mode = 0x01;
1653 ci->dbgc_addr = data->dbgc.frag_p_addr;
1654 ci->dbgc_size = data->dbgc.frag_size;
1655 ci->dbg_preset = 0x00;
1656 }
1657
btintel_pcie_free_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1658 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1659 struct txq *txq)
1660 {
1661 /* Free data buffers first */
1662 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1663 txq->buf_v_addr, txq->buf_p_addr);
1664 kfree(txq->bufs);
1665 }
1666
btintel_pcie_setup_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1667 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1668 struct txq *txq)
1669 {
1670 int i;
1671 struct data_buf *buf;
1672
1673 /* Allocate the same number of buffers as the descriptor */
1674 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
1675 if (!txq->bufs)
1676 return -ENOMEM;
1677
1678 /* Allocate full chunk of data buffer for DMA first and do indexing and
1679 * initialization next, so it can be freed easily
1680 */
1681 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1682 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1683 &txq->buf_p_addr,
1684 GFP_KERNEL | __GFP_NOWARN);
1685 if (!txq->buf_v_addr) {
1686 kfree(txq->bufs);
1687 return -ENOMEM;
1688 }
1689
1690 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1691 * have virtual address and physical address
1692 */
1693 for (i = 0; i < txq->count; i++) {
1694 buf = &txq->bufs[i];
1695 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1696 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1697 }
1698
1699 return 0;
1700 }
1701
btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1702 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1703 struct rxq *rxq)
1704 {
1705 /* Free data buffers first */
1706 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1707 rxq->buf_v_addr, rxq->buf_p_addr);
1708 kfree(rxq->bufs);
1709 }
1710
btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1711 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1712 struct rxq *rxq)
1713 {
1714 int i;
1715 struct data_buf *buf;
1716
1717 /* Allocate the same number of buffers as the descriptor */
1718 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
1719 if (!rxq->bufs)
1720 return -ENOMEM;
1721
1722 /* Allocate full chunk of data buffer for DMA first and do indexing and
1723 * initialization next, so it can be freed easily
1724 */
1725 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1726 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1727 &rxq->buf_p_addr,
1728 GFP_KERNEL | __GFP_NOWARN);
1729 if (!rxq->buf_v_addr) {
1730 kfree(rxq->bufs);
1731 return -ENOMEM;
1732 }
1733
1734 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1735 * have virtual address and physical address
1736 */
1737 for (i = 0; i < rxq->count; i++) {
1738 buf = &rxq->bufs[i];
1739 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1740 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1741 }
1742
1743 return 0;
1744 }
1745
btintel_pcie_setup_ia(struct btintel_pcie_data * data,dma_addr_t p_addr,void * v_addr,struct ia * ia)1746 static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1747 dma_addr_t p_addr, void *v_addr,
1748 struct ia *ia)
1749 {
1750 /* TR Head Index Array */
1751 ia->tr_hia_p_addr = p_addr;
1752 ia->tr_hia = v_addr;
1753
1754 /* TR Tail Index Array */
1755 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1756 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1757
1758 /* CR Head index Array */
1759 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1760 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1761
1762 /* CR Tail Index Array */
1763 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1764 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1765 }
1766
btintel_pcie_free(struct btintel_pcie_data * data)1767 static void btintel_pcie_free(struct btintel_pcie_data *data)
1768 {
1769 btintel_pcie_free_rxq_bufs(data, &data->rxq);
1770 btintel_pcie_free_txq_bufs(data, &data->txq);
1771
1772 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1773 dma_pool_destroy(data->dma_pool);
1774 }
1775
1776 /* Allocate tx and rx queues, any related data structures and buffers.
1777 */
btintel_pcie_alloc(struct btintel_pcie_data * data)1778 static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1779 {
1780 int err = 0;
1781 size_t total;
1782 dma_addr_t p_addr;
1783 void *v_addr;
1784
1785 /* Allocate the chunk of DMA memory for descriptors, index array, and
1786 * context information, instead of allocating individually.
1787 * The DMA memory for data buffer is allocated while setting up the
1788 * each queue.
1789 *
1790 * Total size is sum of the following
1791 * + size of TFD * Number of descriptors in queue
1792 * + size of URBD0 * Number of descriptors in queue
1793 * + size of FRBD * Number of descriptors in queue
1794 * + size of URBD1 * Number of descriptors in queue
1795 * + size of index * Number of queues(2) * type of index array(4)
1796 * + size of context information
1797 */
1798 total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
1799 total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
1800
1801 /* Add the sum of size of index array and size of ci struct */
1802 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1803
1804 /* Allocate DMA Pool */
1805 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1806 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1807 if (!data->dma_pool) {
1808 err = -ENOMEM;
1809 goto exit_error;
1810 }
1811
1812 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1813 &p_addr);
1814 if (!v_addr) {
1815 dma_pool_destroy(data->dma_pool);
1816 err = -ENOMEM;
1817 goto exit_error;
1818 }
1819
1820 data->dma_p_addr = p_addr;
1821 data->dma_v_addr = v_addr;
1822
1823 /* Setup descriptor count */
1824 data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
1825 data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
1826
1827 /* Setup tfds */
1828 data->txq.tfds_p_addr = p_addr;
1829 data->txq.tfds = v_addr;
1830
1831 p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1832 v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1833
1834 /* Setup urbd0 */
1835 data->txq.urbd0s_p_addr = p_addr;
1836 data->txq.urbd0s = v_addr;
1837
1838 p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1839 v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1840
1841 /* Setup FRBD*/
1842 data->rxq.frbds_p_addr = p_addr;
1843 data->rxq.frbds = v_addr;
1844
1845 p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1846 v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1847
1848 /* Setup urbd1 */
1849 data->rxq.urbd1s_p_addr = p_addr;
1850 data->rxq.urbd1s = v_addr;
1851
1852 p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1853 v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1854
1855 /* Setup data buffers for txq */
1856 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1857 if (err)
1858 goto exit_error_pool;
1859
1860 /* Setup data buffers for rxq */
1861 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1862 if (err)
1863 goto exit_error_txq;
1864
1865 /* Setup Index Array */
1866 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1867
1868 /* Setup data buffers for dbgc */
1869 err = btintel_pcie_setup_dbgc(data);
1870 if (err)
1871 goto exit_error_txq;
1872
1873 /* Setup Context Information */
1874 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1875 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1876
1877 data->ci = v_addr;
1878 data->ci_p_addr = p_addr;
1879
1880 /* Initialize the CI */
1881 btintel_pcie_init_ci(data, data->ci);
1882
1883 return 0;
1884
1885 exit_error_txq:
1886 btintel_pcie_free_txq_bufs(data, &data->txq);
1887 exit_error_pool:
1888 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1889 dma_pool_destroy(data->dma_pool);
1890 exit_error:
1891 return err;
1892 }
1893
btintel_pcie_open(struct hci_dev * hdev)1894 static int btintel_pcie_open(struct hci_dev *hdev)
1895 {
1896 bt_dev_dbg(hdev, "");
1897
1898 return 0;
1899 }
1900
btintel_pcie_close(struct hci_dev * hdev)1901 static int btintel_pcie_close(struct hci_dev *hdev)
1902 {
1903 bt_dev_dbg(hdev, "");
1904
1905 return 0;
1906 }
1907
btintel_pcie_inject_cmd_complete(struct hci_dev * hdev,__u16 opcode)1908 static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1909 {
1910 struct sk_buff *skb;
1911 struct hci_event_hdr *hdr;
1912 struct hci_ev_cmd_complete *evt;
1913
1914 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1915 if (!skb)
1916 return -ENOMEM;
1917
1918 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1919 hdr->evt = HCI_EV_CMD_COMPLETE;
1920 hdr->plen = sizeof(*evt) + 1;
1921
1922 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1923 evt->ncmd = 0x01;
1924 evt->opcode = cpu_to_le16(opcode);
1925
1926 *(u8 *)skb_put(skb, 1) = 0x00;
1927
1928 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1929
1930 return hci_recv_frame(hdev, skb);
1931 }
1932
btintel_pcie_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1933 static int btintel_pcie_send_frame(struct hci_dev *hdev,
1934 struct sk_buff *skb)
1935 {
1936 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1937 struct hci_command_hdr *cmd;
1938 __u16 opcode = ~0;
1939 int ret;
1940 u32 type;
1941
1942 if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1943 return -ENODEV;
1944
1945 /* Due to the fw limitation, the type header of the packet should be
1946 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1947 * the first byte to get the packet type and redirect the rest of data
1948 * packet to the right handler.
1949 *
1950 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1951 * from DMA memory and by the time it reads the first 4 bytes, it has
1952 * already consumed some part of packet. Thus the packet type indicator
1953 * for iBT PCIe is 4 bytes.
1954 *
1955 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1956 * head room for profile and driver use, and before sending the data
1957 * to the device, append the iBT PCIe packet type in the front.
1958 */
1959 switch (hci_skb_pkt_type(skb)) {
1960 case HCI_COMMAND_PKT:
1961 type = BTINTEL_PCIE_HCI_CMD_PKT;
1962 cmd = (void *)skb->data;
1963 opcode = le16_to_cpu(cmd->opcode);
1964 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1965 struct hci_command_hdr *cmd = (void *)skb->data;
1966 __u16 opcode = le16_to_cpu(cmd->opcode);
1967
1968 /* When the BTINTEL_HCI_OP_RESET command is issued to
1969 * boot into the operational firmware, it will actually
1970 * not send a command complete event. To keep the flow
1971 * control working inject that event here.
1972 */
1973 if (opcode == BTINTEL_HCI_OP_RESET)
1974 btintel_pcie_inject_cmd_complete(hdev, opcode);
1975 }
1976
1977 hdev->stat.cmd_tx++;
1978 break;
1979 case HCI_ACLDATA_PKT:
1980 type = BTINTEL_PCIE_HCI_ACL_PKT;
1981 hdev->stat.acl_tx++;
1982 break;
1983 case HCI_SCODATA_PKT:
1984 type = BTINTEL_PCIE_HCI_SCO_PKT;
1985 hdev->stat.sco_tx++;
1986 break;
1987 case HCI_ISODATA_PKT:
1988 type = BTINTEL_PCIE_HCI_ISO_PKT;
1989 break;
1990 default:
1991 bt_dev_err(hdev, "Unknown HCI packet type");
1992 return -EILSEQ;
1993 }
1994
1995 ret = btintel_pcie_send_sync(data, skb, type, opcode);
1996 if (ret) {
1997 hdev->stat.err_tx++;
1998 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1999 goto exit_error;
2000 }
2001
2002 hdev->stat.byte_tx += skb->len;
2003 kfree_skb(skb);
2004
2005 exit_error:
2006 return ret;
2007 }
2008
btintel_pcie_release_hdev(struct btintel_pcie_data * data)2009 static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
2010 {
2011 struct hci_dev *hdev;
2012
2013 hdev = data->hdev;
2014 hci_unregister_dev(hdev);
2015 hci_free_dev(hdev);
2016 data->hdev = NULL;
2017 }
2018
btintel_pcie_disable_interrupts(struct btintel_pcie_data * data)2019 static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
2020 {
2021 spin_lock(&data->irq_lock);
2022 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
2023 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
2024 spin_unlock(&data->irq_lock);
2025 }
2026
btintel_pcie_enable_interrupts(struct btintel_pcie_data * data)2027 static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
2028 {
2029 spin_lock(&data->irq_lock);
2030 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
2031 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
2032 spin_unlock(&data->irq_lock);
2033 }
2034
btintel_pcie_synchronize_irqs(struct btintel_pcie_data * data)2035 static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
2036 {
2037 for (int i = 0; i < data->alloc_vecs; i++)
2038 synchronize_irq(data->msix_entries[i].vector);
2039 }
2040
btintel_pcie_setup_internal(struct hci_dev * hdev)2041 static int btintel_pcie_setup_internal(struct hci_dev *hdev)
2042 {
2043 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2044 const u8 param[1] = { 0xFF };
2045 struct intel_version_tlv ver_tlv;
2046 struct sk_buff *skb;
2047 int err;
2048
2049 BT_DBG("%s", hdev->name);
2050
2051 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
2052 if (IS_ERR(skb)) {
2053 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
2054 PTR_ERR(skb));
2055 return PTR_ERR(skb);
2056 }
2057
2058 /* Check the status */
2059 if (skb->data[0]) {
2060 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
2061 skb->data[0]);
2062 err = -EIO;
2063 goto exit_error;
2064 }
2065
2066 /* Apply the common HCI quirks for Intel device */
2067 hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
2068 hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
2069 hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
2070
2071 /* Set up the quality report callback for Intel devices */
2072 hdev->set_quality_report = btintel_set_quality_report;
2073
2074 memset(&ver_tlv, 0, sizeof(ver_tlv));
2075 /* For TLV type device, parse the tlv data */
2076 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
2077 if (err) {
2078 bt_dev_err(hdev, "Failed to parse TLV version information");
2079 goto exit_error;
2080 }
2081
2082 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
2083 case 0x37:
2084 break;
2085 default:
2086 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
2087 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
2088 err = -EINVAL;
2089 goto exit_error;
2090 }
2091
2092 /* Check for supported iBT hardware variants of this firmware
2093 * loading method.
2094 *
2095 * This check has been put in place to ensure correct forward
2096 * compatibility options when newer hardware variants come
2097 * along.
2098 */
2099 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
2100 case 0x1e: /* BzrI */
2101 case 0x1f: /* ScP */
2102 case 0x22: /* BzrIW */
2103 /* Display version information of TLV type */
2104 btintel_version_info_tlv(hdev, &ver_tlv);
2105
2106 /* Apply the device specific HCI quirks for TLV based devices
2107 *
2108 * All TLV based devices support WBS
2109 */
2110 hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2111
2112 /* Setup MSFT Extension support */
2113 btintel_set_msft_opcode(hdev,
2114 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2115
2116 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
2117 if (err)
2118 goto exit_error;
2119 break;
2120 default:
2121 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
2122 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2123 err = -EINVAL;
2124 goto exit_error;
2125 break;
2126 }
2127
2128 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2129 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2130 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2131 data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2132 data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2133 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2134
2135 if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2136 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2137
2138 btintel_print_fseq_info(hdev);
2139 exit_error:
2140 kfree_skb(skb);
2141
2142 return err;
2143 }
2144
btintel_pcie_setup(struct hci_dev * hdev)2145 static int btintel_pcie_setup(struct hci_dev *hdev)
2146 {
2147 int err, fw_dl_retry = 0;
2148 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2149
2150 while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2151 bt_dev_err(hdev, "Firmware download retry count: %d",
2152 fw_dl_retry);
2153 btintel_pcie_dump_debug_registers(hdev);
2154 btintel_pcie_disable_interrupts(data);
2155 btintel_pcie_synchronize_irqs(data);
2156 err = btintel_pcie_reset_bt(data);
2157 if (err) {
2158 bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2159 break;
2160 }
2161 usleep_range(10000, 12000);
2162 btintel_pcie_reset_ia(data);
2163 btintel_pcie_enable_interrupts(data);
2164 btintel_pcie_config_msix(data);
2165 err = btintel_pcie_enable_bt(data);
2166 if (err) {
2167 bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2168 break;
2169 }
2170 btintel_pcie_start_rx(data);
2171 }
2172
2173 if (!err)
2174 set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
2175 return err;
2176 }
2177
2178 static struct btintel_pcie_dev_recovery *
btintel_pcie_get_recovery(struct pci_dev * pdev,struct device * dev)2179 btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
2180 {
2181 struct btintel_pcie_dev_recovery *tmp, *data = NULL;
2182 const char *name = pci_name(pdev);
2183 const size_t name_len = strlen(name) + 1;
2184 struct hci_dev *hdev = to_hci_dev(dev);
2185
2186 spin_lock(&btintel_pcie_recovery_lock);
2187 list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
2188 if (strcmp(tmp->name, name))
2189 continue;
2190 data = tmp;
2191 break;
2192 }
2193 spin_unlock(&btintel_pcie_recovery_lock);
2194
2195 if (data) {
2196 bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
2197 return data;
2198 }
2199
2200 data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
2201 if (!data)
2202 return NULL;
2203
2204 strscpy(data->name, name, name_len);
2205 spin_lock(&btintel_pcie_recovery_lock);
2206 list_add_tail(&data->list, &btintel_pcie_recovery_list);
2207 spin_unlock(&btintel_pcie_recovery_lock);
2208
2209 return data;
2210 }
2211
btintel_pcie_free_restart_list(void)2212 static void btintel_pcie_free_restart_list(void)
2213 {
2214 struct btintel_pcie_dev_recovery *tmp;
2215
2216 while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
2217 typeof(*tmp), list))) {
2218 list_del(&tmp->list);
2219 kfree(tmp);
2220 }
2221 }
2222
btintel_pcie_inc_recovery_count(struct pci_dev * pdev,struct device * dev)2223 static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
2224 struct device *dev)
2225 {
2226 struct btintel_pcie_dev_recovery *data;
2227 time64_t retry_window;
2228
2229 data = btintel_pcie_get_recovery(pdev, dev);
2230 if (!data)
2231 return;
2232
2233 retry_window = ktime_get_boottime_seconds() - data->last_error;
2234 if (data->count == 0) {
2235 data->last_error = ktime_get_boottime_seconds();
2236 data->count++;
2237 } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2238 data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
2239 data->count++;
2240 } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
2241 data->last_error = 0;
2242 data->count = 0;
2243 }
2244 }
2245
2246 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
2247
btintel_pcie_removal_work(struct work_struct * wk)2248 static void btintel_pcie_removal_work(struct work_struct *wk)
2249 {
2250 struct btintel_pcie_removal *removal =
2251 container_of(wk, struct btintel_pcie_removal, work);
2252 struct pci_dev *pdev = removal->pdev;
2253 struct btintel_pcie_data *data;
2254 int err;
2255
2256 pci_lock_rescan_remove();
2257
2258 if (!pdev->bus)
2259 goto error;
2260
2261 data = pci_get_drvdata(pdev);
2262
2263 btintel_pcie_disable_interrupts(data);
2264 btintel_pcie_synchronize_irqs(data);
2265
2266 flush_work(&data->rx_work);
2267
2268 bt_dev_dbg(data->hdev, "Release bluetooth interface");
2269 btintel_pcie_release_hdev(data);
2270
2271 err = pci_reset_function(pdev);
2272 if (err) {
2273 BT_ERR("Failed resetting the pcie device (%d)", err);
2274 goto error;
2275 }
2276
2277 btintel_pcie_enable_interrupts(data);
2278 btintel_pcie_config_msix(data);
2279
2280 err = btintel_pcie_enable_bt(data);
2281 if (err) {
2282 BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
2283 err);
2284 goto error;
2285 }
2286
2287 btintel_pcie_reset_ia(data);
2288 btintel_pcie_start_rx(data);
2289 data->flags = 0;
2290
2291 err = btintel_pcie_setup_hdev(data);
2292 if (err) {
2293 BT_ERR("Failed registering hdev (%d)", err);
2294 goto error;
2295 }
2296 error:
2297 pci_dev_put(pdev);
2298 pci_unlock_rescan_remove();
2299 kfree(removal);
2300 }
2301
btintel_pcie_reset(struct hci_dev * hdev)2302 static void btintel_pcie_reset(struct hci_dev *hdev)
2303 {
2304 struct btintel_pcie_removal *removal;
2305 struct btintel_pcie_data *data;
2306
2307 data = hci_get_drvdata(hdev);
2308
2309 if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
2310 return;
2311
2312 if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
2313 return;
2314
2315 removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2316 if (!removal)
2317 return;
2318
2319 removal->pdev = data->pdev;
2320 INIT_WORK(&removal->work, btintel_pcie_removal_work);
2321 pci_dev_get(removal->pdev);
2322 schedule_work(&removal->work);
2323 }
2324
btintel_pcie_hw_error(struct hci_dev * hdev,u8 code)2325 static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
2326 {
2327 struct btintel_pcie_dev_recovery *data;
2328 struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
2329 struct pci_dev *pdev = dev_data->pdev;
2330 time64_t retry_window;
2331
2332 if (code == 0x13) {
2333 bt_dev_err(hdev, "Encountered top exception");
2334 return;
2335 }
2336
2337 data = btintel_pcie_get_recovery(pdev, &hdev->dev);
2338 if (!data)
2339 return;
2340
2341 retry_window = ktime_get_boottime_seconds() - data->last_error;
2342
2343 if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2344 data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
2345 bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
2346 BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
2347 bt_dev_dbg(hdev, "Boot time: %lld seconds",
2348 ktime_get_boottime_seconds());
2349 bt_dev_dbg(hdev, "last error at: %lld seconds",
2350 data->last_error);
2351 return;
2352 }
2353 btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
2354 btintel_pcie_reset(hdev);
2355 }
2356
btintel_pcie_wakeup(struct hci_dev * hdev)2357 static bool btintel_pcie_wakeup(struct hci_dev *hdev)
2358 {
2359 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2360
2361 return device_may_wakeup(&data->pdev->dev);
2362 }
2363
2364 static const struct {
2365 u16 opcode;
2366 const char *desc;
2367 } btintel_pcie_hci_drv_supported_commands[] = {
2368 /* Common commands */
2369 { HCI_DRV_OP_READ_INFO, "Read Info" },
2370 };
2371
btintel_pcie_hci_drv_read_info(struct hci_dev * hdev,void * data,u16 data_len)2372 static int btintel_pcie_hci_drv_read_info(struct hci_dev *hdev, void *data,
2373 u16 data_len)
2374 {
2375 struct hci_drv_rp_read_info *rp;
2376 size_t rp_size;
2377 int err, i;
2378 u16 opcode, num_supported_commands =
2379 ARRAY_SIZE(btintel_pcie_hci_drv_supported_commands);
2380
2381 rp_size = sizeof(*rp) + num_supported_commands * 2;
2382
2383 rp = kmalloc(rp_size, GFP_KERNEL);
2384 if (!rp)
2385 return -ENOMEM;
2386
2387 strscpy_pad(rp->driver_name, KBUILD_MODNAME);
2388
2389 rp->num_supported_commands = cpu_to_le16(num_supported_commands);
2390 for (i = 0; i < num_supported_commands; i++) {
2391 opcode = btintel_pcie_hci_drv_supported_commands[i].opcode;
2392 bt_dev_dbg(hdev,
2393 "Supported HCI Drv command (0x%02x|0x%04x): %s",
2394 hci_opcode_ogf(opcode),
2395 hci_opcode_ocf(opcode),
2396 btintel_pcie_hci_drv_supported_commands[i].desc);
2397 rp->supported_commands[i] = cpu_to_le16(opcode);
2398 }
2399
2400 err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
2401 HCI_DRV_STATUS_SUCCESS,
2402 rp, rp_size);
2403
2404 kfree(rp);
2405 return err;
2406 }
2407
2408 static const struct hci_drv_handler btintel_pcie_hci_drv_common_handlers[] = {
2409 { btintel_pcie_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
2410 };
2411
2412 static const struct hci_drv_handler btintel_pcie_hci_drv_specific_handlers[] = {};
2413
2414 static struct hci_drv btintel_pcie_hci_drv = {
2415 .common_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_common_handlers),
2416 .common_handlers = btintel_pcie_hci_drv_common_handlers,
2417 .specific_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_specific_handlers),
2418 .specific_handlers = btintel_pcie_hci_drv_specific_handlers,
2419 };
2420
btintel_pcie_setup_hdev(struct btintel_pcie_data * data)2421 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2422 {
2423 int err;
2424 struct hci_dev *hdev;
2425
2426 hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2427 if (!hdev)
2428 return -ENOMEM;
2429
2430 hdev->bus = HCI_PCI;
2431 hci_set_drvdata(hdev, data);
2432
2433 data->hdev = hdev;
2434 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2435
2436 hdev->manufacturer = 2;
2437 hdev->open = btintel_pcie_open;
2438 hdev->close = btintel_pcie_close;
2439 hdev->send = btintel_pcie_send_frame;
2440 hdev->setup = btintel_pcie_setup;
2441 hdev->shutdown = btintel_shutdown_combined;
2442 hdev->hw_error = btintel_pcie_hw_error;
2443 hdev->set_diag = btintel_set_diag;
2444 hdev->set_bdaddr = btintel_set_bdaddr;
2445 hdev->reset = btintel_pcie_reset;
2446 hdev->wakeup = btintel_pcie_wakeup;
2447 hdev->hci_drv = &btintel_pcie_hci_drv;
2448
2449 err = hci_register_dev(hdev);
2450 if (err < 0) {
2451 BT_ERR("Failed to register to hdev (%d)", err);
2452 goto exit_error;
2453 }
2454
2455 data->dmp_hdr.driver_name = KBUILD_MODNAME;
2456 return 0;
2457
2458 exit_error:
2459 hci_free_dev(hdev);
2460 return err;
2461 }
2462
btintel_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2463 static int btintel_pcie_probe(struct pci_dev *pdev,
2464 const struct pci_device_id *ent)
2465 {
2466 int err;
2467 struct btintel_pcie_data *data;
2468
2469 if (!pdev)
2470 return -ENODEV;
2471
2472 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2473 if (!data)
2474 return -ENOMEM;
2475
2476 data->pdev = pdev;
2477
2478 spin_lock_init(&data->irq_lock);
2479 spin_lock_init(&data->hci_rx_lock);
2480
2481 init_waitqueue_head(&data->gp0_wait_q);
2482 data->gp0_received = false;
2483
2484 init_waitqueue_head(&data->tx_wait_q);
2485 data->tx_wait_done = false;
2486
2487 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2488 if (!data->workqueue)
2489 return -ENOMEM;
2490
2491 skb_queue_head_init(&data->rx_skb_q);
2492 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2493
2494 data->boot_stage_cache = 0x00;
2495 data->img_resp_cache = 0x00;
2496
2497 err = btintel_pcie_config_pcie(pdev, data);
2498 if (err)
2499 goto exit_error;
2500
2501 pci_set_drvdata(pdev, data);
2502
2503 err = btintel_pcie_alloc(data);
2504 if (err)
2505 goto exit_error;
2506
2507 err = btintel_pcie_enable_bt(data);
2508 if (err)
2509 goto exit_error;
2510
2511 /* CNV information (CNVi and CNVr) is in CSR */
2512 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2513
2514 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2515
2516 err = btintel_pcie_start_rx(data);
2517 if (err)
2518 goto exit_error;
2519
2520 err = btintel_pcie_setup_hdev(data);
2521 if (err)
2522 goto exit_error;
2523
2524 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2525 data->cnvr);
2526 return 0;
2527
2528 exit_error:
2529 /* reset device before exit */
2530 btintel_pcie_reset_bt(data);
2531
2532 pci_clear_master(pdev);
2533
2534 pci_set_drvdata(pdev, NULL);
2535
2536 return err;
2537 }
2538
btintel_pcie_remove(struct pci_dev * pdev)2539 static void btintel_pcie_remove(struct pci_dev *pdev)
2540 {
2541 struct btintel_pcie_data *data;
2542
2543 data = pci_get_drvdata(pdev);
2544
2545 btintel_pcie_disable_interrupts(data);
2546
2547 btintel_pcie_synchronize_irqs(data);
2548
2549 flush_work(&data->rx_work);
2550
2551 btintel_pcie_reset_bt(data);
2552 for (int i = 0; i < data->alloc_vecs; i++) {
2553 struct msix_entry *msix_entry;
2554
2555 msix_entry = &data->msix_entries[i];
2556 free_irq(msix_entry->vector, msix_entry);
2557 }
2558
2559 pci_free_irq_vectors(pdev);
2560
2561 btintel_pcie_release_hdev(data);
2562
2563 destroy_workqueue(data->workqueue);
2564
2565 btintel_pcie_free(data);
2566
2567 pci_clear_master(pdev);
2568
2569 pci_set_drvdata(pdev, NULL);
2570 }
2571
2572 #ifdef CONFIG_DEV_COREDUMP
btintel_pcie_coredump(struct device * dev)2573 static void btintel_pcie_coredump(struct device *dev)
2574 {
2575 struct pci_dev *pdev = to_pci_dev(dev);
2576 struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2577
2578 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2579 return;
2580
2581 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2582 queue_work(data->workqueue, &data->rx_work);
2583 }
2584 #endif
2585
btintel_pcie_set_dxstate(struct btintel_pcie_data * data,u32 dxstate)2586 static int btintel_pcie_set_dxstate(struct btintel_pcie_data *data, u32 dxstate)
2587 {
2588 int retry = 0, status;
2589 u32 dx_intr_timeout_ms = 200;
2590
2591 do {
2592 data->gp0_received = false;
2593
2594 btintel_pcie_wr_sleep_cntrl(data, dxstate);
2595
2596 status = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
2597 msecs_to_jiffies(dx_intr_timeout_ms));
2598
2599 if (status)
2600 return 0;
2601
2602 bt_dev_warn(data->hdev,
2603 "Timeout (%u ms) on alive interrupt for D%d entry, retry count %d",
2604 dx_intr_timeout_ms, dxstate, retry);
2605
2606 /* clear gp0 cause */
2607 btintel_pcie_clr_reg_bits(data,
2608 BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES,
2609 BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0);
2610
2611 /* A hardware bug may cause the alive interrupt to be missed.
2612 * Check if the controller reached the expected state and retry
2613 * the operation only if it hasn't.
2614 */
2615 if (dxstate == BTINTEL_PCIE_STATE_D0) {
2616 if (btintel_pcie_in_d0(data))
2617 return 0;
2618 } else {
2619 if (btintel_pcie_in_d3(data))
2620 return 0;
2621 }
2622
2623 } while (++retry < BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES);
2624
2625 return -EBUSY;
2626 }
2627
btintel_pcie_suspend_late(struct device * dev,pm_message_t mesg)2628 static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
2629 {
2630 struct pci_dev *pdev = to_pci_dev(dev);
2631 struct btintel_pcie_data *data;
2632 ktime_t start;
2633 u32 dxstate;
2634 int err;
2635
2636 data = pci_get_drvdata(pdev);
2637
2638 dxstate = (mesg.event == PM_EVENT_SUSPEND ?
2639 BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
2640
2641 data->pm_sx_event = mesg.event;
2642
2643 start = ktime_get();
2644
2645 /* Refer: 6.4.11.7 -> Platform power management */
2646 err = btintel_pcie_set_dxstate(data, dxstate);
2647
2648 if (err)
2649 return err;
2650
2651 bt_dev_dbg(data->hdev,
2652 "device entered into d3 state from d0 in %lld us",
2653 ktime_to_us(ktime_get() - start));
2654 return err;
2655 }
2656
btintel_pcie_suspend(struct device * dev)2657 static int btintel_pcie_suspend(struct device *dev)
2658 {
2659 return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
2660 }
2661
btintel_pcie_hibernate(struct device * dev)2662 static int btintel_pcie_hibernate(struct device *dev)
2663 {
2664 return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
2665 }
2666
btintel_pcie_freeze(struct device * dev)2667 static int btintel_pcie_freeze(struct device *dev)
2668 {
2669 return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
2670 }
2671
btintel_pcie_resume(struct device * dev)2672 static int btintel_pcie_resume(struct device *dev)
2673 {
2674 struct pci_dev *pdev = to_pci_dev(dev);
2675 struct btintel_pcie_data *data;
2676 ktime_t start;
2677 int err;
2678
2679 data = pci_get_drvdata(pdev);
2680 data->gp0_received = false;
2681
2682 start = ktime_get();
2683
2684 /* When the system enters S4 (hibernate) mode, bluetooth device loses
2685 * power, which results in the erasure of its loaded firmware.
2686 * Consequently, function level reset (flr) is required on system
2687 * resume to bring the controller back into an operational state by
2688 * initiating a new firmware download.
2689 */
2690
2691 if (data->pm_sx_event == PM_EVENT_FREEZE ||
2692 data->pm_sx_event == PM_EVENT_HIBERNATE) {
2693 set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
2694 btintel_pcie_reset(data->hdev);
2695 return 0;
2696 }
2697
2698 /* Refer: 6.4.11.7 -> Platform power management */
2699 err = btintel_pcie_set_dxstate(data, BTINTEL_PCIE_STATE_D0);
2700
2701 if (err == 0) {
2702 bt_dev_dbg(data->hdev,
2703 "device entered into d0 state from d3 in %lld us",
2704 ktime_to_us(ktime_get() - start));
2705 return err;
2706 }
2707
2708 /* Trigger function level reset if the controller is in error
2709 * state during resume() to bring back the controller to
2710 * operational mode
2711 */
2712
2713 data->boot_stage_cache = btintel_pcie_rd_reg32(data,
2714 BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
2715 if (btintel_pcie_in_error(data) ||
2716 btintel_pcie_in_device_halt(data)) {
2717 bt_dev_err(data->hdev, "Controller in error state for D0 entry");
2718 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS,
2719 &data->flags)) {
2720 data->dmp_hdr.trigger_reason =
2721 BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
2722 queue_work(data->workqueue, &data->rx_work);
2723 }
2724 set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
2725 btintel_pcie_reset(data->hdev);
2726 }
2727 return err;
2728 }
2729
2730 static const struct dev_pm_ops btintel_pcie_pm_ops = {
2731 .suspend = btintel_pcie_suspend,
2732 .resume = btintel_pcie_resume,
2733 .freeze = btintel_pcie_freeze,
2734 .thaw = btintel_pcie_resume,
2735 .poweroff = btintel_pcie_hibernate,
2736 .restore = btintel_pcie_resume,
2737 };
2738
2739 static struct pci_driver btintel_pcie_driver = {
2740 .name = KBUILD_MODNAME,
2741 .id_table = btintel_pcie_table,
2742 .probe = btintel_pcie_probe,
2743 .remove = btintel_pcie_remove,
2744 .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
2745 #ifdef CONFIG_DEV_COREDUMP
2746 .driver.coredump = btintel_pcie_coredump
2747 #endif
2748 };
2749
btintel_pcie_init(void)2750 static int __init btintel_pcie_init(void)
2751 {
2752 return pci_register_driver(&btintel_pcie_driver);
2753 }
2754
btintel_pcie_exit(void)2755 static void __exit btintel_pcie_exit(void)
2756 {
2757 pci_unregister_driver(&btintel_pcie_driver);
2758 btintel_pcie_free_restart_list();
2759 }
2760
2761 module_init(btintel_pcie_init);
2762 module_exit(btintel_pcie_exit);
2763
2764 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
2765 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2766 MODULE_VERSION(VERSION);
2767 MODULE_LICENSE("GPL");
2768