1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/unaligned.h>
18 #include <linux/devcoredump.h>
19
20 #include <net/bluetooth/bluetooth.h>
21 #include <net/bluetooth/hci_core.h>
22 #include <net/bluetooth/hci_drv.h>
23
24 #include "btintel.h"
25 #include "btintel_pcie.h"
26
27 #define VERSION "0.1"
28
29 #define BTINTEL_PCI_DEVICE(dev, subdev) \
30 .vendor = PCI_VENDOR_ID_INTEL, \
31 .device = (dev), \
32 .subvendor = PCI_ANY_ID, \
33 .subdevice = (subdev), \
34 .driver_data = 0
35
36 #define POLL_INTERVAL_US 10
37
38 /* Intel Bluetooth PCIe device id table */
39 static const struct pci_device_id btintel_pcie_table[] = {
40 /* BlazarI, Wildcat Lake */
41 { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
42 /* BlazarI, Lunar Lake */
43 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
44 /* Scorpious, Panther Lake-H484 */
45 { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
46 /* Scorpious, Panther Lake-H404 */
47 { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
48 { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
51
52 struct btintel_pcie_dev_recovery {
53 struct list_head list;
54 u8 count;
55 time64_t last_error;
56 char name[];
57 };
58
59 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
60 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
61 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
62 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
63 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
64 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
65 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
66
67 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
68
69 #define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
70 #define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
71
72 #define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
73 #define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
74
75 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
76
77 #define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
78 #define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
79
80 #define BTINTEL_PCIE_RESET_WINDOW_SECS 5
81 #define BTINTEL_PCIE_FLR_MAX_RETRY 1
82
83 /* Alive interrupt context */
84 enum {
85 BTINTEL_PCIE_ROM,
86 BTINTEL_PCIE_FW_DL,
87 BTINTEL_PCIE_HCI_RESET,
88 BTINTEL_PCIE_INTEL_HCI_RESET1,
89 BTINTEL_PCIE_INTEL_HCI_RESET2,
90 BTINTEL_PCIE_D0,
91 BTINTEL_PCIE_D3
92 };
93
94 /* Structure for dbgc fragment buffer
95 * @buf_addr_lsb: LSB of the buffer's physical address
96 * @buf_addr_msb: MSB of the buffer's physical address
97 * @buf_size: Total size of the buffer
98 */
99 struct btintel_pcie_dbgc_ctxt_buf {
100 u32 buf_addr_lsb;
101 u32 buf_addr_msb;
102 u32 buf_size;
103 };
104
105 /* Structure for dbgc fragment
106 * @magic_num: 0XA5A5A5A5
107 * @ver: For Driver-FW compatibility
108 * @total_size: Total size of the payload debug info
109 * @num_buf: Num of allocated debug bufs
110 * @bufs: All buffer's addresses and sizes
111 */
112 struct btintel_pcie_dbgc_ctxt {
113 u32 magic_num;
114 u32 ver;
115 u32 total_size;
116 u32 num_buf;
117 struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
118 };
119
120 struct btintel_pcie_removal {
121 struct pci_dev *pdev;
122 struct work_struct work;
123 };
124
125 static LIST_HEAD(btintel_pcie_recovery_list);
126 static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
127
btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)128 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
129 {
130 switch (alive_intr_ctxt) {
131 case BTINTEL_PCIE_ROM:
132 return "rom";
133 case BTINTEL_PCIE_FW_DL:
134 return "fw_dl";
135 case BTINTEL_PCIE_D0:
136 return "d0";
137 case BTINTEL_PCIE_D3:
138 return "d3";
139 case BTINTEL_PCIE_HCI_RESET:
140 return "hci_reset";
141 case BTINTEL_PCIE_INTEL_HCI_RESET1:
142 return "intel_reset1";
143 case BTINTEL_PCIE_INTEL_HCI_RESET2:
144 return "intel_reset2";
145 default:
146 return "unknown";
147 }
148 }
149
150 /* This function initializes the memory for DBGC buffers and formats the
151 * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
152 * size as the payload
153 */
btintel_pcie_setup_dbgc(struct btintel_pcie_data * data)154 static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
155 {
156 struct btintel_pcie_dbgc_ctxt db_frag;
157 struct data_buf *buf;
158 int i;
159
160 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
161 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
162 sizeof(*buf), GFP_KERNEL);
163 if (!data->dbgc.bufs)
164 return -ENOMEM;
165
166 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
167 data->dbgc.count *
168 BTINTEL_PCIE_DBGC_BUFFER_SIZE,
169 &data->dbgc.buf_p_addr,
170 GFP_KERNEL | __GFP_NOWARN);
171 if (!data->dbgc.buf_v_addr)
172 return -ENOMEM;
173
174 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
175 sizeof(struct btintel_pcie_dbgc_ctxt),
176 &data->dbgc.frag_p_addr,
177 GFP_KERNEL | __GFP_NOWARN);
178 if (!data->dbgc.frag_v_addr)
179 return -ENOMEM;
180
181 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
182
183 db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
184 db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
185 db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
186 db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
187
188 for (i = 0; i < data->dbgc.count; i++) {
189 buf = &data->dbgc.bufs[i];
190 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
191 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
192 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
193 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
194 db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
195 }
196
197 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
198 return 0;
199 }
200
ipc_print_ia_ring(struct hci_dev * hdev,struct ia * ia,u16 queue_num)201 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
202 u16 queue_num)
203 {
204 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
205 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
206 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
207 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
208 }
209
ipc_print_urbd1(struct hci_dev * hdev,struct urbd1 * urbd1,u16 index)210 static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
211 u16 index)
212 {
213 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
214 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
215 }
216
btintel_pcie_get_data(struct msix_entry * entry)217 static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
218 {
219 u8 queue = entry->entry;
220 struct msix_entry *entries = entry - queue;
221
222 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
223 }
224
225 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
226 * of the TFD is updated and ready to transmit.
227 */
btintel_pcie_set_tx_db(struct btintel_pcie_data * data,u16 index)228 static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
229 {
230 u32 val;
231
232 val = index;
233 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
234
235 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
236 }
237
238 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
239 * descriptor) with the data length and the DMA address of the data buffer.
240 */
btintel_pcie_prepare_tx(struct txq * txq,u16 tfd_index,struct sk_buff * skb)241 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
242 struct sk_buff *skb)
243 {
244 struct data_buf *buf;
245 struct tfd *tfd;
246
247 tfd = &txq->tfds[tfd_index];
248 memset(tfd, 0, sizeof(*tfd));
249
250 buf = &txq->bufs[tfd_index];
251
252 tfd->size = skb->len;
253 tfd->addr = buf->data_p_addr;
254
255 /* Copy the outgoing data to DMA buffer */
256 memcpy(buf->data, skb->data, tfd->size);
257 }
258
btintel_pcie_dump_debug_registers(struct hci_dev * hdev)259 static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
260 {
261 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
262 u16 cr_hia, cr_tia;
263 u32 reg, mbox_reg;
264 struct sk_buff *skb;
265 u8 buf[80];
266
267 skb = alloc_skb(1024, GFP_ATOMIC);
268 if (!skb)
269 return;
270
271 snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
272 bt_dev_dbg(hdev, "%s", buf);
273 skb_put_data(skb, buf, strlen(buf));
274
275 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
276 snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
277 bt_dev_dbg(hdev, "%s", buf);
278 skb_put_data(skb, buf, strlen(buf));
279 data->boot_stage_cache = reg;
280
281 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
282 snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
283 skb_put_data(skb, buf, strlen(buf));
284 bt_dev_dbg(hdev, "%s", buf);
285
286 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
287 snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
288 skb_put_data(skb, buf, strlen(buf));
289 bt_dev_dbg(hdev, "%s", buf);
290
291 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
292 snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
293 skb_put_data(skb, buf, strlen(buf));
294 bt_dev_dbg(hdev, "%s", buf);
295
296 /*Read the Mail box status and registers*/
297 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
298 snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
299 skb_put_data(skb, buf, strlen(buf));
300 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
301 mbox_reg = btintel_pcie_rd_reg32(data,
302 BTINTEL_PCIE_CSR_MBOX_1_REG);
303 snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
304 skb_put_data(skb, buf, strlen(buf));
305 bt_dev_dbg(hdev, "%s", buf);
306 }
307
308 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
309 mbox_reg = btintel_pcie_rd_reg32(data,
310 BTINTEL_PCIE_CSR_MBOX_2_REG);
311 snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
312 skb_put_data(skb, buf, strlen(buf));
313 bt_dev_dbg(hdev, "%s", buf);
314 }
315
316 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
317 mbox_reg = btintel_pcie_rd_reg32(data,
318 BTINTEL_PCIE_CSR_MBOX_3_REG);
319 snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
320 skb_put_data(skb, buf, strlen(buf));
321 bt_dev_dbg(hdev, "%s", buf);
322 }
323
324 if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
325 mbox_reg = btintel_pcie_rd_reg32(data,
326 BTINTEL_PCIE_CSR_MBOX_4_REG);
327 snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
328 skb_put_data(skb, buf, strlen(buf));
329 bt_dev_dbg(hdev, "%s", buf);
330 }
331
332 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
333 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
334 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
335 skb_put_data(skb, buf, strlen(buf));
336 bt_dev_dbg(hdev, "%s", buf);
337
338 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
339 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
340 snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
341 skb_put_data(skb, buf, strlen(buf));
342 bt_dev_dbg(hdev, "%s", buf);
343 snprintf(buf, sizeof(buf), "--------------------------------");
344 bt_dev_dbg(hdev, "%s", buf);
345
346 hci_recv_diag(hdev, skb);
347 }
348
btintel_pcie_send_sync(struct btintel_pcie_data * data,struct sk_buff * skb,u32 pkt_type,u16 opcode)349 static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
350 struct sk_buff *skb, u32 pkt_type, u16 opcode)
351 {
352 int ret;
353 u16 tfd_index;
354 u32 old_ctxt;
355 bool wait_on_alive = false;
356 struct hci_dev *hdev = data->hdev;
357
358 struct txq *txq = &data->txq;
359
360 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
361
362 if (tfd_index > txq->count)
363 return -ERANGE;
364
365 /* Firmware raises alive interrupt on HCI_OP_RESET or
366 * BTINTEL_HCI_OP_RESET
367 */
368 wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
369 (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
370
371 if (wait_on_alive) {
372 data->gp0_received = false;
373 old_ctxt = data->alive_intr_ctxt;
374 data->alive_intr_ctxt =
375 (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
376 BTINTEL_PCIE_HCI_RESET);
377 bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
378 opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
379 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
380 }
381
382 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
383 BTINTEL_PCIE_HCI_TYPE_LEN);
384
385 /* Prepare for TX. It updates the TFD with the length of data and
386 * address of the DMA buffer, and copy the data to the DMA buffer
387 */
388 btintel_pcie_prepare_tx(txq, tfd_index, skb);
389
390 tfd_index = (tfd_index + 1) % txq->count;
391 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
392
393 /* Arm wait event condition */
394 data->tx_wait_done = false;
395
396 /* Set the doorbell to notify the device */
397 btintel_pcie_set_tx_db(data, tfd_index);
398
399 /* Wait for the complete interrupt - URBD0 */
400 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
401 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
402 if (!ret) {
403 bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
404 BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
405 btintel_pcie_dump_debug_registers(data->hdev);
406 return -ETIME;
407 }
408
409 if (wait_on_alive) {
410 ret = wait_event_timeout(data->gp0_wait_q,
411 data->gp0_received,
412 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
413 if (!ret) {
414 hdev->stat.err_tx++;
415 bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
416 BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
417 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
418 return -ETIME;
419 }
420 }
421 return 0;
422 }
423
424 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
425 * is available to receive the data
426 */
btintel_pcie_set_rx_db(struct btintel_pcie_data * data,u16 index)427 static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
428 {
429 u32 val;
430
431 val = index;
432 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
433
434 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
435 }
436
437 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
438 * DMA address of the free buffer.
439 */
btintel_pcie_prepare_rx(struct rxq * rxq,u16 frbd_index)440 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
441 {
442 struct data_buf *buf;
443 struct frbd *frbd;
444
445 /* Get the buffer of the FRBD for DMA */
446 buf = &rxq->bufs[frbd_index];
447
448 frbd = &rxq->frbds[frbd_index];
449 memset(frbd, 0, sizeof(*frbd));
450
451 /* Update FRBD */
452 frbd->tag = frbd_index;
453 frbd->addr = buf->data_p_addr;
454 }
455
btintel_pcie_submit_rx(struct btintel_pcie_data * data)456 static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
457 {
458 u16 frbd_index;
459 struct rxq *rxq = &data->rxq;
460
461 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
462
463 if (frbd_index > rxq->count)
464 return -ERANGE;
465
466 /* Prepare for RX submit. It updates the FRBD with the address of DMA
467 * buffer
468 */
469 btintel_pcie_prepare_rx(rxq, frbd_index);
470
471 frbd_index = (frbd_index + 1) % rxq->count;
472 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
473 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
474
475 /* Set the doorbell to notify the device */
476 btintel_pcie_set_rx_db(data, frbd_index);
477
478 return 0;
479 }
480
btintel_pcie_start_rx(struct btintel_pcie_data * data)481 static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
482 {
483 int i, ret;
484 struct rxq *rxq = &data->rxq;
485
486 /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
487 * hardware issues leading to race condition at the firmware.
488 */
489
490 for (i = 0; i < rxq->count - 3; i++) {
491 ret = btintel_pcie_submit_rx(data);
492 if (ret)
493 return ret;
494 }
495
496 return 0;
497 }
498
btintel_pcie_reset_ia(struct btintel_pcie_data * data)499 static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
500 {
501 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
502 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
503 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
504 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
505 }
506
btintel_pcie_reset_bt(struct btintel_pcie_data * data)507 static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
508 {
509 u32 reg;
510 int retry = 3;
511
512 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
513
514 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
515 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
516 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
517 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
518
519 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
520
521 do {
522 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
523 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
524 break;
525 usleep_range(10000, 12000);
526
527 } while (--retry > 0);
528 usleep_range(10000, 12000);
529
530 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
531
532 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
533 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
534 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
535 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
536 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
537 usleep_range(10000, 12000);
538
539 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
540 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
541
542 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
543
544 /* If shared hardware reset is success then boot stage register shall be
545 * set to 0
546 */
547 return reg == 0 ? 0 : -ENODEV;
548 }
549
btintel_pcie_mac_init(struct btintel_pcie_data * data)550 static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
551 {
552 u32 reg;
553
554 /* Set MAC_INIT bit to start primary bootloader */
555 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
556 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
557 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
558 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
559 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
560 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
561 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
562 }
563
btintel_pcie_get_mac_access(struct btintel_pcie_data * data)564 static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
565 {
566 u32 reg;
567 int retry = 15;
568
569 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
570
571 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
572 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
573 if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
574 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
575
576 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
577
578 do {
579 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
580 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
581 return 0;
582 /* Need delay here for Target Access harwdware to settle down*/
583 usleep_range(1000, 1200);
584
585 } while (--retry > 0);
586
587 return -ETIME;
588 }
589
btintel_pcie_release_mac_access(struct btintel_pcie_data * data)590 static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
591 {
592 u32 reg;
593
594 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
595
596 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
597 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
598
599 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
600 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
601
602 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
603 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
604
605 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
606 }
607
btintel_pcie_copy_tlv(void * dest,enum btintel_pcie_tlv_type type,void * data,size_t size)608 static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
609 void *data, size_t size)
610 {
611 struct intel_tlv *tlv;
612
613 tlv = dest;
614 tlv->type = type;
615 tlv->len = size;
616 memcpy(tlv->val, data, tlv->len);
617 return dest + sizeof(*tlv) + size;
618 }
619
btintel_pcie_read_dram_buffers(struct btintel_pcie_data * data)620 static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
621 {
622 u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
623 struct btintel_pcie_dbgc *dbgc = &data->dbgc;
624 struct hci_dev *hdev = data->hdev;
625 u8 *pdata, *p, buf_idx;
626 struct intel_tlv *tlv;
627 struct timespec64 now;
628 struct tm tm_now;
629 char fw_build[128];
630 char ts[128];
631 char vendor[64];
632 char driver[64];
633
634 if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
635 return -EOPNOTSUPP;
636
637
638 wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
639 offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
640
641 buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
642 if (buf_idx > dbgc->count) {
643 bt_dev_warn(hdev, "Buffer index is invalid");
644 return -EINVAL;
645 }
646
647 prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
648 if (prev_size + offset >= prev_size)
649 data->dmp_hdr.write_ptr = prev_size + offset;
650 else
651 return -EINVAL;
652
653 snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
654 snprintf(driver, sizeof(driver), "Driver: %s\n",
655 data->dmp_hdr.driver_name);
656
657 ktime_get_real_ts64(&now);
658 time64_to_tm(now.tv_sec, 0, &tm_now);
659 snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
660 tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
661 tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
662
663 snprintf(fw_build, sizeof(fw_build),
664 "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
665 2000 + (data->dmp_hdr.fw_timestamp >> 8),
666 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
667 data->dmp_hdr.fw_build_num);
668
669 data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
670 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
671 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
672 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
673 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
674 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
675 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
676 sizeof(*tlv) + strlen(ts) +
677 sizeof(*tlv) + strlen(fw_build) +
678 sizeof(*tlv) + strlen(vendor) +
679 sizeof(*tlv) + strlen(driver);
680
681 /*
682 * sizeof(u32) - signature
683 * sizeof(data_len) - to store tlv data size
684 * data_len - TLV data
685 */
686 dump_size = sizeof(u32) + sizeof(data_len) + data_len;
687
688
689 /* Add debug buffers data length to dump size */
690 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
691
692 pdata = vmalloc(dump_size);
693 if (!pdata)
694 return -ENOMEM;
695 p = pdata;
696
697 *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
698 p += sizeof(u32);
699
700 *(u32 *)p = data_len;
701 p += sizeof(u32);
702
703
704 p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
705 p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
706 p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
707 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
708 strlen(fw_build));
709 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
710 sizeof(data->dmp_hdr.cnvi_bt));
711 p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
712 sizeof(data->dmp_hdr.write_ptr));
713 p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
714 sizeof(data->dmp_hdr.wrap_ctr));
715
716 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
717 BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
718
719 p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
720 sizeof(data->dmp_hdr.trigger_reason));
721 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
722 sizeof(data->dmp_hdr.fw_git_sha1));
723 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
724 sizeof(data->dmp_hdr.cnvr_top));
725 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
726 sizeof(data->dmp_hdr.cnvi_top));
727
728 memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
729 dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
730 return 0;
731 }
732
btintel_pcie_dump_traces(struct hci_dev * hdev)733 static void btintel_pcie_dump_traces(struct hci_dev *hdev)
734 {
735 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
736 int ret = 0;
737
738 ret = btintel_pcie_get_mac_access(data);
739 if (ret) {
740 bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
741 return;
742 }
743
744 ret = btintel_pcie_read_dram_buffers(data);
745
746 btintel_pcie_release_mac_access(data);
747
748 if (ret)
749 bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
750 }
751
752 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
753 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
754 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
755 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
756 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
757 */
btintel_pcie_enable_bt(struct btintel_pcie_data * data)758 static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
759 {
760 int err;
761 u32 reg;
762
763 data->gp0_received = false;
764
765 /* Update the DMA address of CI struct to CSR */
766 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
767 data->ci_p_addr & 0xffffffff);
768 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
769 (u64)data->ci_p_addr >> 32);
770
771 /* Reset the cached value of boot stage. it is updated by the MSI-X
772 * gp0 interrupt handler.
773 */
774 data->boot_stage_cache = 0x0;
775
776 /* Set MAC_INIT bit to start primary bootloader */
777 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
778 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
779 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
780 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
781 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
782 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
783
784 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
785
786 /* MAC is ready. Enable BT FUNC */
787 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
788 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
789
790 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
791
792 /* wait for interrupt from the device after booting up to primary
793 * bootloader.
794 */
795 data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
796 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
797 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
798 if (!err)
799 return -ETIME;
800
801 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
802 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
803 return -ENODEV;
804
805 return 0;
806 }
807
btintel_pcie_in_op(struct btintel_pcie_data * data)808 static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
809 {
810 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
811 }
812
btintel_pcie_in_iml(struct btintel_pcie_data * data)813 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
814 {
815 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
816 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
817 }
818
btintel_pcie_in_d3(struct btintel_pcie_data * data)819 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
820 {
821 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
822 }
823
btintel_pcie_in_d0(struct btintel_pcie_data * data)824 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
825 {
826 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
827 }
828
btintel_pcie_in_device_halt(struct btintel_pcie_data * data)829 static inline bool btintel_pcie_in_device_halt(struct btintel_pcie_data *data)
830 {
831 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED;
832 }
833
btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data * data,u32 dxstate)834 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
835 u32 dxstate)
836 {
837 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
838 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
839 }
840
btintel_pcie_read_device_mem(struct btintel_pcie_data * data,void * buf,u32 dev_addr,int len)841 static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
842 void *buf, u32 dev_addr, int len)
843 {
844 int err;
845 u32 *val = buf;
846
847 /* Get device mac access */
848 err = btintel_pcie_get_mac_access(data);
849 if (err) {
850 bt_dev_err(data->hdev, "Failed to get mac access %d", err);
851 return err;
852 }
853
854 for (; len > 0; len -= 4, dev_addr += 4, val++)
855 *val = btintel_pcie_rd_dev_mem(data, dev_addr);
856
857 btintel_pcie_release_mac_access(data);
858
859 return 0;
860 }
861
btintel_pcie_in_lockdown(struct btintel_pcie_data * data)862 static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
863 {
864 return (data->boot_stage_cache &
865 BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
866 (data->boot_stage_cache &
867 BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
868 }
869
btintel_pcie_in_error(struct btintel_pcie_data * data)870 static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
871 {
872 return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
873 (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
874 }
875
btintel_pcie_msix_gp1_handler(struct btintel_pcie_data * data)876 static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
877 {
878 bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
879 btintel_pcie_dump_debug_registers(data->hdev);
880 }
881
882 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
883 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
884 */
btintel_pcie_msix_gp0_handler(struct btintel_pcie_data * data)885 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
886 {
887 bool submit_rx, signal_waitq;
888 u32 reg, old_ctxt;
889
890 /* This interrupt is for three different causes and it is not easy to
891 * know what causes the interrupt. So, it compares each register value
892 * with cached value and update it before it wake up the queue.
893 */
894 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
895 if (reg != data->boot_stage_cache)
896 data->boot_stage_cache = reg;
897
898 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
899 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
900 data->boot_stage_cache, reg);
901 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
902 if (reg != data->img_resp_cache)
903 data->img_resp_cache = reg;
904
905 if (btintel_pcie_in_error(data)) {
906 bt_dev_err(data->hdev, "Controller in error state");
907 btintel_pcie_dump_debug_registers(data->hdev);
908 return;
909 }
910
911 if (btintel_pcie_in_lockdown(data)) {
912 bt_dev_err(data->hdev, "Controller in lockdown state");
913 btintel_pcie_dump_debug_registers(data->hdev);
914 return;
915 }
916
917 data->gp0_received = true;
918
919 old_ctxt = data->alive_intr_ctxt;
920 submit_rx = false;
921 signal_waitq = false;
922
923 switch (data->alive_intr_ctxt) {
924 case BTINTEL_PCIE_ROM:
925 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
926 signal_waitq = true;
927 break;
928 case BTINTEL_PCIE_FW_DL:
929 /* Error case is already handled. Ideally control shall not
930 * reach here
931 */
932 break;
933 case BTINTEL_PCIE_INTEL_HCI_RESET1:
934 if (btintel_pcie_in_op(data)) {
935 submit_rx = true;
936 signal_waitq = true;
937 break;
938 }
939
940 if (btintel_pcie_in_iml(data)) {
941 submit_rx = true;
942 signal_waitq = true;
943 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
944 break;
945 }
946 break;
947 case BTINTEL_PCIE_INTEL_HCI_RESET2:
948 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
949 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
950 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
951 }
952 break;
953 case BTINTEL_PCIE_D0:
954 if (btintel_pcie_in_d3(data)) {
955 data->alive_intr_ctxt = BTINTEL_PCIE_D3;
956 signal_waitq = true;
957 break;
958 }
959 break;
960 case BTINTEL_PCIE_D3:
961 if (btintel_pcie_in_d0(data)) {
962 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
963 submit_rx = true;
964 signal_waitq = true;
965 break;
966 }
967 break;
968 case BTINTEL_PCIE_HCI_RESET:
969 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
970 submit_rx = true;
971 signal_waitq = true;
972 break;
973 default:
974 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
975 data->alive_intr_ctxt);
976 break;
977 }
978
979 if (submit_rx) {
980 btintel_pcie_reset_ia(data);
981 btintel_pcie_start_rx(data);
982 }
983
984 if (signal_waitq) {
985 bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
986 wake_up(&data->gp0_wait_q);
987 }
988
989 if (old_ctxt != data->alive_intr_ctxt)
990 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
991 btintel_pcie_alivectxt_state2str(old_ctxt),
992 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
993 }
994
995 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
996 */
btintel_pcie_msix_tx_handle(struct btintel_pcie_data * data)997 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
998 {
999 u16 cr_tia, cr_hia;
1000 struct txq *txq;
1001 struct urbd0 *urbd0;
1002
1003 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
1004 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1005
1006 if (cr_tia == cr_hia)
1007 return;
1008
1009 txq = &data->txq;
1010
1011 while (cr_tia != cr_hia) {
1012 data->tx_wait_done = true;
1013 wake_up(&data->tx_wait_q);
1014
1015 urbd0 = &txq->urbd0s[cr_tia];
1016
1017 if (urbd0->tfd_index > txq->count)
1018 return;
1019
1020 cr_tia = (cr_tia + 1) % txq->count;
1021 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
1022 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
1023 }
1024 }
1025
btintel_pcie_recv_event(struct hci_dev * hdev,struct sk_buff * skb)1026 static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1027 {
1028 struct hci_event_hdr *hdr = (void *)skb->data;
1029 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1030
1031 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
1032 hdr->plen > 0) {
1033 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
1034 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
1035
1036 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1037 switch (skb->data[2]) {
1038 case 0x02:
1039 /* When switching to the operational firmware
1040 * the device sends a vendor specific event
1041 * indicating that the bootup completed.
1042 */
1043 btintel_bootup(hdev, ptr, len);
1044
1045 /* If bootup event is from operational image,
1046 * driver needs to write sleep control register to
1047 * move into D0 state
1048 */
1049 if (btintel_pcie_in_op(data)) {
1050 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
1051 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
1052 kfree_skb(skb);
1053 return 0;
1054 }
1055
1056 if (btintel_pcie_in_iml(data)) {
1057 /* In case of IML, there is no concept
1058 * of D0 transition. Just mimic as if
1059 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
1060 * bit and waking up the task waiting on
1061 * INTEL_WAIT_FOR_D0. This is required
1062 * as intel_boot() is common function for
1063 * both IML and OP image loading.
1064 */
1065 if (btintel_test_and_clear_flag(data->hdev,
1066 INTEL_WAIT_FOR_D0))
1067 btintel_wake_up_flag(data->hdev,
1068 INTEL_WAIT_FOR_D0);
1069 }
1070 kfree_skb(skb);
1071 return 0;
1072 case 0x06:
1073 /* When the firmware loading completes the
1074 * device sends out a vendor specific event
1075 * indicating the result of the firmware
1076 * loading.
1077 */
1078 btintel_secure_send_result(hdev, ptr, len);
1079 kfree_skb(skb);
1080 return 0;
1081 }
1082 }
1083
1084 /* This is a debug event that comes from IML and OP image when it
1085 * starts execution. There is no need pass this event to stack.
1086 */
1087 if (skb->data[2] == 0x97) {
1088 hci_recv_diag(hdev, skb);
1089 return 0;
1090 }
1091 }
1092
1093 return hci_recv_frame(hdev, skb);
1094 }
1095 /* Process the received rx data
1096 * It check the frame header to identify the data type and create skb
1097 * and calling HCI API
1098 */
btintel_pcie_recv_frame(struct btintel_pcie_data * data,struct sk_buff * skb)1099 static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
1100 struct sk_buff *skb)
1101 {
1102 int ret;
1103 u8 pkt_type;
1104 u16 plen;
1105 u32 pcie_pkt_type;
1106 void *pdata;
1107 struct hci_dev *hdev = data->hdev;
1108
1109 spin_lock(&data->hci_rx_lock);
1110
1111 /* The first 4 bytes indicates the Intel PCIe specific packet type */
1112 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
1113 if (!pdata) {
1114 bt_dev_err(hdev, "Corrupted packet received");
1115 ret = -EILSEQ;
1116 goto exit_error;
1117 }
1118
1119 pcie_pkt_type = get_unaligned_le32(pdata);
1120
1121 switch (pcie_pkt_type) {
1122 case BTINTEL_PCIE_HCI_ACL_PKT:
1123 if (skb->len >= HCI_ACL_HDR_SIZE) {
1124 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
1125 pkt_type = HCI_ACLDATA_PKT;
1126 } else {
1127 bt_dev_err(hdev, "ACL packet is too short");
1128 ret = -EILSEQ;
1129 goto exit_error;
1130 }
1131 break;
1132
1133 case BTINTEL_PCIE_HCI_SCO_PKT:
1134 if (skb->len >= HCI_SCO_HDR_SIZE) {
1135 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1136 pkt_type = HCI_SCODATA_PKT;
1137 } else {
1138 bt_dev_err(hdev, "SCO packet is too short");
1139 ret = -EILSEQ;
1140 goto exit_error;
1141 }
1142 break;
1143
1144 case BTINTEL_PCIE_HCI_EVT_PKT:
1145 if (skb->len >= HCI_EVENT_HDR_SIZE) {
1146 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1147 pkt_type = HCI_EVENT_PKT;
1148 } else {
1149 bt_dev_err(hdev, "Event packet is too short");
1150 ret = -EILSEQ;
1151 goto exit_error;
1152 }
1153 break;
1154
1155 case BTINTEL_PCIE_HCI_ISO_PKT:
1156 if (skb->len >= HCI_ISO_HDR_SIZE) {
1157 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1158 pkt_type = HCI_ISODATA_PKT;
1159 } else {
1160 bt_dev_err(hdev, "ISO packet is too short");
1161 ret = -EILSEQ;
1162 goto exit_error;
1163 }
1164 break;
1165
1166 default:
1167 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1168 pcie_pkt_type);
1169 ret = -EINVAL;
1170 goto exit_error;
1171 }
1172
1173 if (skb->len < plen) {
1174 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1175 pkt_type);
1176 ret = -EILSEQ;
1177 goto exit_error;
1178 }
1179
1180 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1181
1182 hci_skb_pkt_type(skb) = pkt_type;
1183 hdev->stat.byte_rx += plen;
1184 skb_trim(skb, plen);
1185
1186 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1187 ret = btintel_pcie_recv_event(hdev, skb);
1188 else
1189 ret = hci_recv_frame(hdev, skb);
1190 skb = NULL; /* skb is freed in the callee */
1191
1192 exit_error:
1193 kfree_skb(skb);
1194
1195 if (ret)
1196 hdev->stat.err_rx++;
1197
1198 spin_unlock(&data->hci_rx_lock);
1199
1200 return ret;
1201 }
1202
btintel_pcie_read_hwexp(struct btintel_pcie_data * data)1203 static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1204 {
1205 int len, err, offset, pending;
1206 struct sk_buff *skb;
1207 u8 *buf, prefix[64];
1208 u32 addr, val;
1209 u16 pkt_len;
1210
1211 struct tlv {
1212 u8 type;
1213 __le16 len;
1214 u8 val[];
1215 } __packed;
1216
1217 struct tlv *tlv;
1218
1219 switch (data->dmp_hdr.cnvi_top & 0xfff) {
1220 case BTINTEL_CNVI_BLAZARI:
1221 case BTINTEL_CNVI_BLAZARIW:
1222 /* only from step B0 onwards */
1223 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1224 return;
1225 len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1226 addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1227 break;
1228 case BTINTEL_CNVI_SCP:
1229 len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1230 addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1231 break;
1232 default:
1233 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1234 return;
1235 }
1236
1237 buf = kzalloc(len, GFP_KERNEL);
1238 if (!buf)
1239 goto exit_on_error;
1240
1241 btintel_pcie_mac_init(data);
1242
1243 err = btintel_pcie_read_device_mem(data, buf, addr, len);
1244 if (err)
1245 goto exit_on_error;
1246
1247 val = get_unaligned_le32(buf);
1248 if (val != BTINTEL_PCIE_MAGIC_NUM) {
1249 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1250 val);
1251 goto exit_on_error;
1252 }
1253
1254 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1255
1256 offset = 4;
1257 do {
1258 pending = len - offset;
1259 if (pending < sizeof(*tlv))
1260 break;
1261 tlv = (struct tlv *)(buf + offset);
1262
1263 /* If type == 0, then there are no more TLVs to be parsed */
1264 if (!tlv->type) {
1265 bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1266 break;
1267 }
1268 pkt_len = le16_to_cpu(tlv->len);
1269 offset += sizeof(*tlv);
1270 pending = len - offset;
1271 if (pkt_len > pending)
1272 break;
1273
1274 offset += pkt_len;
1275
1276 /* Only TLVs of type == 1 are HCI events, no need to process other
1277 * TLVs
1278 */
1279 if (tlv->type != 1)
1280 continue;
1281
1282 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1283 if (pkt_len > HCI_MAX_EVENT_SIZE)
1284 break;
1285 skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1286 if (!skb)
1287 goto exit_on_error;
1288 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1289 skb_put_data(skb, tlv->val, pkt_len);
1290
1291 /* copy Intel specific pcie packet type */
1292 val = BTINTEL_PCIE_HCI_EVT_PKT;
1293 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1294 BTINTEL_PCIE_HCI_TYPE_LEN);
1295
1296 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1297 tlv->val, pkt_len, false);
1298
1299 btintel_pcie_recv_frame(data, skb);
1300 } while (offset < len);
1301
1302 exit_on_error:
1303 kfree(buf);
1304 }
1305
btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data * data)1306 static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1307 {
1308 bt_dev_err(data->hdev, "Received hw exception interrupt");
1309
1310 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1311 return;
1312
1313 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1314 return;
1315
1316 /* Trigger device core dump when there is HW exception */
1317 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1318 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1319
1320 queue_work(data->workqueue, &data->rx_work);
1321 }
1322
btintel_pcie_rx_work(struct work_struct * work)1323 static void btintel_pcie_rx_work(struct work_struct *work)
1324 {
1325 struct btintel_pcie_data *data = container_of(work,
1326 struct btintel_pcie_data, rx_work);
1327 struct sk_buff *skb;
1328
1329 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1330 btintel_pcie_dump_traces(data->hdev);
1331 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1332 }
1333
1334 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1335 /* Unlike usb products, controller will not send hardware
1336 * exception event on exception. Instead controller writes the
1337 * hardware event to device memory along with optional debug
1338 * events, raises MSIX and halts. Driver shall read the
1339 * exception event from device memory and passes it stack for
1340 * further processing.
1341 */
1342 btintel_pcie_read_hwexp(data);
1343 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1344 }
1345
1346 /* Process the sk_buf in queue and send to the HCI layer */
1347 while ((skb = skb_dequeue(&data->rx_skb_q))) {
1348 btintel_pcie_recv_frame(data, skb);
1349 }
1350 }
1351
1352 /* create sk_buff with data and save it to queue and start RX work */
btintel_pcie_submit_rx_work(struct btintel_pcie_data * data,u8 status,void * buf)1353 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1354 void *buf)
1355 {
1356 int ret, len;
1357 struct rfh_hdr *rfh_hdr;
1358 struct sk_buff *skb;
1359
1360 rfh_hdr = buf;
1361
1362 len = rfh_hdr->packet_len;
1363 if (len <= 0) {
1364 ret = -EINVAL;
1365 goto resubmit;
1366 }
1367
1368 /* Remove RFH header */
1369 buf += sizeof(*rfh_hdr);
1370
1371 skb = alloc_skb(len, GFP_ATOMIC);
1372 if (!skb)
1373 goto resubmit;
1374
1375 skb_put_data(skb, buf, len);
1376 skb_queue_tail(&data->rx_skb_q, skb);
1377 queue_work(data->workqueue, &data->rx_work);
1378
1379 resubmit:
1380 ret = btintel_pcie_submit_rx(data);
1381
1382 return ret;
1383 }
1384
1385 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
btintel_pcie_msix_rx_handle(struct btintel_pcie_data * data)1386 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1387 {
1388 u16 cr_hia, cr_tia;
1389 struct rxq *rxq;
1390 struct urbd1 *urbd1;
1391 struct data_buf *buf;
1392 int ret;
1393 struct hci_dev *hdev = data->hdev;
1394
1395 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1396 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1397
1398 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1399
1400 /* Check CR_TIA and CR_HIA for change */
1401 if (cr_tia == cr_hia)
1402 return;
1403
1404 rxq = &data->rxq;
1405
1406 /* The firmware sends multiple CD in a single MSI-X and it needs to
1407 * process all received CDs in this interrupt.
1408 */
1409 while (cr_tia != cr_hia) {
1410 urbd1 = &rxq->urbd1s[cr_tia];
1411 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1412
1413 buf = &rxq->bufs[urbd1->frbd_tag];
1414 if (!buf) {
1415 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1416 urbd1->frbd_tag);
1417 return;
1418 }
1419
1420 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1421 buf->data);
1422 if (ret) {
1423 bt_dev_err(hdev, "RXQ: failed to submit rx request");
1424 return;
1425 }
1426
1427 cr_tia = (cr_tia + 1) % rxq->count;
1428 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1429 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1430 }
1431 }
1432
btintel_pcie_is_rxq_empty(struct btintel_pcie_data * data)1433 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1434 {
1435 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1436 }
1437
btintel_pcie_is_txackq_empty(struct btintel_pcie_data * data)1438 static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1439 {
1440 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1441 }
1442
btintel_pcie_irq_msix_handler(int irq,void * dev_id)1443 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1444 {
1445 struct msix_entry *entry = dev_id;
1446 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1447 u32 intr_fh, intr_hw;
1448
1449 spin_lock(&data->irq_lock);
1450 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1451 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1452
1453 /* Clear causes registers to avoid being handling the same cause */
1454 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1455 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1456 spin_unlock(&data->irq_lock);
1457
1458 if (unlikely(!(intr_fh | intr_hw))) {
1459 /* Ignore interrupt, inta == 0 */
1460 return IRQ_NONE;
1461 }
1462
1463 /* This interrupt is raised when there is an hardware exception */
1464 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1465 btintel_pcie_msix_hw_exp_handler(data);
1466
1467 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
1468 btintel_pcie_msix_gp1_handler(data);
1469
1470
1471 /* For TX */
1472 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1473 btintel_pcie_msix_tx_handle(data);
1474 if (!btintel_pcie_is_rxq_empty(data))
1475 btintel_pcie_msix_rx_handle(data);
1476 }
1477
1478 /* For RX */
1479 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1480 btintel_pcie_msix_rx_handle(data);
1481 if (!btintel_pcie_is_txackq_empty(data))
1482 btintel_pcie_msix_tx_handle(data);
1483 }
1484
1485 /* This interrupt is triggered by the firmware after updating
1486 * boot_stage register and image_response register
1487 */
1488 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1489 btintel_pcie_msix_gp0_handler(data);
1490
1491 /*
1492 * Before sending the interrupt the HW disables it to prevent a nested
1493 * interrupt. This is done by writing 1 to the corresponding bit in
1494 * the mask register. After handling the interrupt, it should be
1495 * re-enabled by clearing this bit. This register is defined as write 1
1496 * clear (W1C) register, meaning that it's cleared by writing 1
1497 * to the bit.
1498 */
1499 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1500 BIT(entry->entry));
1501
1502 return IRQ_HANDLED;
1503 }
1504
1505 /* This function requests the irq for MSI-X and registers the handlers per irq.
1506 * Currently, it requests only 1 irq for all interrupt causes.
1507 */
btintel_pcie_setup_irq(struct btintel_pcie_data * data)1508 static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1509 {
1510 int err;
1511 int num_irqs, i;
1512
1513 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1514 data->msix_entries[i].entry = i;
1515
1516 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1517 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1518 if (num_irqs < 0)
1519 return num_irqs;
1520
1521 data->alloc_vecs = num_irqs;
1522 data->msix_enabled = 1;
1523 data->def_irq = 0;
1524
1525 /* setup irq handler */
1526 for (i = 0; i < data->alloc_vecs; i++) {
1527 struct msix_entry *msix_entry;
1528
1529 msix_entry = &data->msix_entries[i];
1530 msix_entry->vector = pci_irq_vector(data->pdev, i);
1531
1532 err = devm_request_threaded_irq(&data->pdev->dev,
1533 msix_entry->vector,
1534 NULL,
1535 btintel_pcie_irq_msix_handler,
1536 IRQF_ONESHOT | IRQF_SHARED,
1537 KBUILD_MODNAME,
1538 msix_entry);
1539 if (err) {
1540 pci_free_irq_vectors(data->pdev);
1541 data->alloc_vecs = 0;
1542 return err;
1543 }
1544 }
1545 return 0;
1546 }
1547
1548 struct btintel_pcie_causes_list {
1549 u32 cause;
1550 u32 mask_reg;
1551 u8 cause_num;
1552 };
1553
1554 static struct btintel_pcie_causes_list causes_list[] = {
1555 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1556 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1557 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1558 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1559 };
1560
1561 /* This function configures the interrupt masks for both HW_INT_CAUSES and
1562 * FH_INT_CAUSES which are meaningful to us.
1563 *
1564 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1565 * need to call this function again to configure since the masks
1566 * are reset to 0xFFFFFFFF after reset.
1567 */
btintel_pcie_config_msix(struct btintel_pcie_data * data)1568 static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1569 {
1570 int i;
1571 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1572
1573 /* Set Non Auto Clear Cause */
1574 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1575 btintel_pcie_wr_reg8(data,
1576 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1577 val);
1578 btintel_pcie_clr_reg_bits(data,
1579 causes_list[i].mask_reg,
1580 causes_list[i].cause);
1581 }
1582
1583 /* Save the initial interrupt mask */
1584 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1585 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1586 }
1587
btintel_pcie_config_pcie(struct pci_dev * pdev,struct btintel_pcie_data * data)1588 static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1589 struct btintel_pcie_data *data)
1590 {
1591 int err;
1592
1593 err = pcim_enable_device(pdev);
1594 if (err)
1595 return err;
1596
1597 pci_set_master(pdev);
1598
1599 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1600 if (err) {
1601 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1602 if (err)
1603 return err;
1604 }
1605
1606 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1607 if (IS_ERR(data->base_addr))
1608 return PTR_ERR(data->base_addr);
1609
1610 err = btintel_pcie_setup_irq(data);
1611 if (err)
1612 return err;
1613
1614 /* Configure MSI-X with causes list */
1615 btintel_pcie_config_msix(data);
1616
1617 return 0;
1618 }
1619
btintel_pcie_init_ci(struct btintel_pcie_data * data,struct ctx_info * ci)1620 static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1621 struct ctx_info *ci)
1622 {
1623 ci->version = 0x1;
1624 ci->size = sizeof(*ci);
1625 ci->config = 0x0000;
1626 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1627 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1628 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1629 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1630 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1631 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1632 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1633 ci->addr_tfdq = data->txq.tfds_p_addr;
1634 ci->num_tfdq = data->txq.count;
1635 ci->num_urbdq0 = data->txq.count;
1636 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1637 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1638 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1639 ci->addr_frbdq = data->rxq.frbds_p_addr;
1640 ci->num_frbdq = data->rxq.count;
1641 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1642 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1643 ci->num_urbdq1 = data->rxq.count;
1644 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1645
1646 ci->dbg_output_mode = 0x01;
1647 ci->dbgc_addr = data->dbgc.frag_p_addr;
1648 ci->dbgc_size = data->dbgc.frag_size;
1649 ci->dbg_preset = 0x00;
1650 }
1651
btintel_pcie_free_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1652 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1653 struct txq *txq)
1654 {
1655 /* Free data buffers first */
1656 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1657 txq->buf_v_addr, txq->buf_p_addr);
1658 kfree(txq->bufs);
1659 }
1660
btintel_pcie_setup_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1661 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1662 struct txq *txq)
1663 {
1664 int i;
1665 struct data_buf *buf;
1666
1667 /* Allocate the same number of buffers as the descriptor */
1668 txq->bufs = kmalloc_objs(*buf, txq->count, GFP_KERNEL);
1669 if (!txq->bufs)
1670 return -ENOMEM;
1671
1672 /* Allocate full chunk of data buffer for DMA first and do indexing and
1673 * initialization next, so it can be freed easily
1674 */
1675 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1676 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1677 &txq->buf_p_addr,
1678 GFP_KERNEL | __GFP_NOWARN);
1679 if (!txq->buf_v_addr) {
1680 kfree(txq->bufs);
1681 return -ENOMEM;
1682 }
1683
1684 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1685 * have virtual address and physical address
1686 */
1687 for (i = 0; i < txq->count; i++) {
1688 buf = &txq->bufs[i];
1689 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1690 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1691 }
1692
1693 return 0;
1694 }
1695
btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1696 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1697 struct rxq *rxq)
1698 {
1699 /* Free data buffers first */
1700 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1701 rxq->buf_v_addr, rxq->buf_p_addr);
1702 kfree(rxq->bufs);
1703 }
1704
btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1705 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1706 struct rxq *rxq)
1707 {
1708 int i;
1709 struct data_buf *buf;
1710
1711 /* Allocate the same number of buffers as the descriptor */
1712 rxq->bufs = kmalloc_objs(*buf, rxq->count, GFP_KERNEL);
1713 if (!rxq->bufs)
1714 return -ENOMEM;
1715
1716 /* Allocate full chunk of data buffer for DMA first and do indexing and
1717 * initialization next, so it can be freed easily
1718 */
1719 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1720 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1721 &rxq->buf_p_addr,
1722 GFP_KERNEL | __GFP_NOWARN);
1723 if (!rxq->buf_v_addr) {
1724 kfree(rxq->bufs);
1725 return -ENOMEM;
1726 }
1727
1728 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1729 * have virtual address and physical address
1730 */
1731 for (i = 0; i < rxq->count; i++) {
1732 buf = &rxq->bufs[i];
1733 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1734 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1735 }
1736
1737 return 0;
1738 }
1739
btintel_pcie_setup_ia(struct btintel_pcie_data * data,dma_addr_t p_addr,void * v_addr,struct ia * ia)1740 static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1741 dma_addr_t p_addr, void *v_addr,
1742 struct ia *ia)
1743 {
1744 /* TR Head Index Array */
1745 ia->tr_hia_p_addr = p_addr;
1746 ia->tr_hia = v_addr;
1747
1748 /* TR Tail Index Array */
1749 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1750 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1751
1752 /* CR Head index Array */
1753 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1754 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1755
1756 /* CR Tail Index Array */
1757 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1758 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1759 }
1760
btintel_pcie_free(struct btintel_pcie_data * data)1761 static void btintel_pcie_free(struct btintel_pcie_data *data)
1762 {
1763 btintel_pcie_free_rxq_bufs(data, &data->rxq);
1764 btintel_pcie_free_txq_bufs(data, &data->txq);
1765
1766 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1767 dma_pool_destroy(data->dma_pool);
1768 }
1769
1770 /* Allocate tx and rx queues, any related data structures and buffers.
1771 */
btintel_pcie_alloc(struct btintel_pcie_data * data)1772 static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1773 {
1774 int err = 0;
1775 size_t total;
1776 dma_addr_t p_addr;
1777 void *v_addr;
1778
1779 /* Allocate the chunk of DMA memory for descriptors, index array, and
1780 * context information, instead of allocating individually.
1781 * The DMA memory for data buffer is allocated while setting up the
1782 * each queue.
1783 *
1784 * Total size is sum of the following
1785 * + size of TFD * Number of descriptors in queue
1786 * + size of URBD0 * Number of descriptors in queue
1787 * + size of FRBD * Number of descriptors in queue
1788 * + size of URBD1 * Number of descriptors in queue
1789 * + size of index * Number of queues(2) * type of index array(4)
1790 * + size of context information
1791 */
1792 total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
1793 total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
1794
1795 /* Add the sum of size of index array and size of ci struct */
1796 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1797
1798 /* Allocate DMA Pool */
1799 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1800 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1801 if (!data->dma_pool) {
1802 err = -ENOMEM;
1803 goto exit_error;
1804 }
1805
1806 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1807 &p_addr);
1808 if (!v_addr) {
1809 dma_pool_destroy(data->dma_pool);
1810 err = -ENOMEM;
1811 goto exit_error;
1812 }
1813
1814 data->dma_p_addr = p_addr;
1815 data->dma_v_addr = v_addr;
1816
1817 /* Setup descriptor count */
1818 data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
1819 data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
1820
1821 /* Setup tfds */
1822 data->txq.tfds_p_addr = p_addr;
1823 data->txq.tfds = v_addr;
1824
1825 p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1826 v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
1827
1828 /* Setup urbd0 */
1829 data->txq.urbd0s_p_addr = p_addr;
1830 data->txq.urbd0s = v_addr;
1831
1832 p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1833 v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
1834
1835 /* Setup FRBD*/
1836 data->rxq.frbds_p_addr = p_addr;
1837 data->rxq.frbds = v_addr;
1838
1839 p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1840 v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
1841
1842 /* Setup urbd1 */
1843 data->rxq.urbd1s_p_addr = p_addr;
1844 data->rxq.urbd1s = v_addr;
1845
1846 p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1847 v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
1848
1849 /* Setup data buffers for txq */
1850 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1851 if (err)
1852 goto exit_error_pool;
1853
1854 /* Setup data buffers for rxq */
1855 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1856 if (err)
1857 goto exit_error_txq;
1858
1859 /* Setup Index Array */
1860 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1861
1862 /* Setup data buffers for dbgc */
1863 err = btintel_pcie_setup_dbgc(data);
1864 if (err)
1865 goto exit_error_txq;
1866
1867 /* Setup Context Information */
1868 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1869 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1870
1871 data->ci = v_addr;
1872 data->ci_p_addr = p_addr;
1873
1874 /* Initialize the CI */
1875 btintel_pcie_init_ci(data, data->ci);
1876
1877 return 0;
1878
1879 exit_error_txq:
1880 btintel_pcie_free_txq_bufs(data, &data->txq);
1881 exit_error_pool:
1882 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1883 dma_pool_destroy(data->dma_pool);
1884 exit_error:
1885 return err;
1886 }
1887
btintel_pcie_open(struct hci_dev * hdev)1888 static int btintel_pcie_open(struct hci_dev *hdev)
1889 {
1890 bt_dev_dbg(hdev, "");
1891
1892 return 0;
1893 }
1894
btintel_pcie_close(struct hci_dev * hdev)1895 static int btintel_pcie_close(struct hci_dev *hdev)
1896 {
1897 bt_dev_dbg(hdev, "");
1898
1899 return 0;
1900 }
1901
btintel_pcie_inject_cmd_complete(struct hci_dev * hdev,__u16 opcode)1902 static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1903 {
1904 struct sk_buff *skb;
1905 struct hci_event_hdr *hdr;
1906 struct hci_ev_cmd_complete *evt;
1907
1908 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1909 if (!skb)
1910 return -ENOMEM;
1911
1912 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1913 hdr->evt = HCI_EV_CMD_COMPLETE;
1914 hdr->plen = sizeof(*evt) + 1;
1915
1916 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1917 evt->ncmd = 0x01;
1918 evt->opcode = cpu_to_le16(opcode);
1919
1920 *(u8 *)skb_put(skb, 1) = 0x00;
1921
1922 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1923
1924 return hci_recv_frame(hdev, skb);
1925 }
1926
btintel_pcie_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1927 static int btintel_pcie_send_frame(struct hci_dev *hdev,
1928 struct sk_buff *skb)
1929 {
1930 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1931 struct hci_command_hdr *cmd;
1932 __u16 opcode = ~0;
1933 int ret;
1934 u32 type;
1935
1936 if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1937 return -ENODEV;
1938
1939 /* Due to the fw limitation, the type header of the packet should be
1940 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1941 * the first byte to get the packet type and redirect the rest of data
1942 * packet to the right handler.
1943 *
1944 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1945 * from DMA memory and by the time it reads the first 4 bytes, it has
1946 * already consumed some part of packet. Thus the packet type indicator
1947 * for iBT PCIe is 4 bytes.
1948 *
1949 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1950 * head room for profile and driver use, and before sending the data
1951 * to the device, append the iBT PCIe packet type in the front.
1952 */
1953 switch (hci_skb_pkt_type(skb)) {
1954 case HCI_COMMAND_PKT:
1955 type = BTINTEL_PCIE_HCI_CMD_PKT;
1956 cmd = (void *)skb->data;
1957 opcode = le16_to_cpu(cmd->opcode);
1958 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1959 struct hci_command_hdr *cmd = (void *)skb->data;
1960 __u16 opcode = le16_to_cpu(cmd->opcode);
1961
1962 /* When the BTINTEL_HCI_OP_RESET command is issued to
1963 * boot into the operational firmware, it will actually
1964 * not send a command complete event. To keep the flow
1965 * control working inject that event here.
1966 */
1967 if (opcode == BTINTEL_HCI_OP_RESET)
1968 btintel_pcie_inject_cmd_complete(hdev, opcode);
1969 }
1970
1971 hdev->stat.cmd_tx++;
1972 break;
1973 case HCI_ACLDATA_PKT:
1974 type = BTINTEL_PCIE_HCI_ACL_PKT;
1975 hdev->stat.acl_tx++;
1976 break;
1977 case HCI_SCODATA_PKT:
1978 type = BTINTEL_PCIE_HCI_SCO_PKT;
1979 hdev->stat.sco_tx++;
1980 break;
1981 case HCI_ISODATA_PKT:
1982 type = BTINTEL_PCIE_HCI_ISO_PKT;
1983 break;
1984 default:
1985 bt_dev_err(hdev, "Unknown HCI packet type");
1986 return -EILSEQ;
1987 }
1988
1989 ret = btintel_pcie_send_sync(data, skb, type, opcode);
1990 if (ret) {
1991 hdev->stat.err_tx++;
1992 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1993 goto exit_error;
1994 }
1995
1996 hdev->stat.byte_tx += skb->len;
1997 kfree_skb(skb);
1998
1999 exit_error:
2000 return ret;
2001 }
2002
btintel_pcie_release_hdev(struct btintel_pcie_data * data)2003 static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
2004 {
2005 struct hci_dev *hdev;
2006
2007 hdev = data->hdev;
2008 hci_unregister_dev(hdev);
2009 hci_free_dev(hdev);
2010 data->hdev = NULL;
2011 }
2012
btintel_pcie_disable_interrupts(struct btintel_pcie_data * data)2013 static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
2014 {
2015 spin_lock(&data->irq_lock);
2016 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
2017 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
2018 spin_unlock(&data->irq_lock);
2019 }
2020
btintel_pcie_enable_interrupts(struct btintel_pcie_data * data)2021 static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
2022 {
2023 spin_lock(&data->irq_lock);
2024 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
2025 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
2026 spin_unlock(&data->irq_lock);
2027 }
2028
btintel_pcie_synchronize_irqs(struct btintel_pcie_data * data)2029 static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
2030 {
2031 for (int i = 0; i < data->alloc_vecs; i++)
2032 synchronize_irq(data->msix_entries[i].vector);
2033 }
2034
btintel_pcie_setup_internal(struct hci_dev * hdev)2035 static int btintel_pcie_setup_internal(struct hci_dev *hdev)
2036 {
2037 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2038 const u8 param[1] = { 0xFF };
2039 struct intel_version_tlv ver_tlv;
2040 struct sk_buff *skb;
2041 int err;
2042
2043 BT_DBG("%s", hdev->name);
2044
2045 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
2046 if (IS_ERR(skb)) {
2047 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
2048 PTR_ERR(skb));
2049 return PTR_ERR(skb);
2050 }
2051
2052 /* Check the status */
2053 if (skb->data[0]) {
2054 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
2055 skb->data[0]);
2056 err = -EIO;
2057 goto exit_error;
2058 }
2059
2060 /* Apply the common HCI quirks for Intel device */
2061 hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
2062 hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
2063 hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
2064
2065 /* Set up the quality report callback for Intel devices */
2066 hdev->set_quality_report = btintel_set_quality_report;
2067
2068 memset(&ver_tlv, 0, sizeof(ver_tlv));
2069 /* For TLV type device, parse the tlv data */
2070 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
2071 if (err) {
2072 bt_dev_err(hdev, "Failed to parse TLV version information");
2073 goto exit_error;
2074 }
2075
2076 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
2077 case 0x37:
2078 break;
2079 default:
2080 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
2081 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
2082 err = -EINVAL;
2083 goto exit_error;
2084 }
2085
2086 /* Check for supported iBT hardware variants of this firmware
2087 * loading method.
2088 *
2089 * This check has been put in place to ensure correct forward
2090 * compatibility options when newer hardware variants come
2091 * along.
2092 */
2093 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
2094 case 0x1e: /* BzrI */
2095 case 0x1f: /* ScP */
2096 case 0x22: /* BzrIW */
2097 /* Display version information of TLV type */
2098 btintel_version_info_tlv(hdev, &ver_tlv);
2099
2100 /* Apply the device specific HCI quirks for TLV based devices
2101 *
2102 * All TLV based devices support WBS
2103 */
2104 hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
2105
2106 /* Setup MSFT Extension support */
2107 btintel_set_msft_opcode(hdev,
2108 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2109
2110 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
2111 if (err)
2112 goto exit_error;
2113 break;
2114 default:
2115 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
2116 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
2117 err = -EINVAL;
2118 goto exit_error;
2119 break;
2120 }
2121
2122 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2123 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2124 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2125 data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2126 data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2127 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2128
2129 if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2130 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2131
2132 btintel_print_fseq_info(hdev);
2133 exit_error:
2134 kfree_skb(skb);
2135
2136 return err;
2137 }
2138
btintel_pcie_setup(struct hci_dev * hdev)2139 static int btintel_pcie_setup(struct hci_dev *hdev)
2140 {
2141 int err, fw_dl_retry = 0;
2142 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2143
2144 while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2145 bt_dev_err(hdev, "Firmware download retry count: %d",
2146 fw_dl_retry);
2147 btintel_pcie_dump_debug_registers(hdev);
2148 btintel_pcie_disable_interrupts(data);
2149 btintel_pcie_synchronize_irqs(data);
2150 err = btintel_pcie_reset_bt(data);
2151 if (err) {
2152 bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2153 break;
2154 }
2155 usleep_range(10000, 12000);
2156 btintel_pcie_reset_ia(data);
2157 btintel_pcie_enable_interrupts(data);
2158 btintel_pcie_config_msix(data);
2159 err = btintel_pcie_enable_bt(data);
2160 if (err) {
2161 bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2162 break;
2163 }
2164 btintel_pcie_start_rx(data);
2165 }
2166
2167 if (!err)
2168 set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
2169 return err;
2170 }
2171
2172 static struct btintel_pcie_dev_recovery *
btintel_pcie_get_recovery(struct pci_dev * pdev,struct device * dev)2173 btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
2174 {
2175 struct btintel_pcie_dev_recovery *tmp, *data = NULL;
2176 const char *name = pci_name(pdev);
2177 const size_t name_len = strlen(name) + 1;
2178 struct hci_dev *hdev = to_hci_dev(dev);
2179
2180 spin_lock(&btintel_pcie_recovery_lock);
2181 list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
2182 if (strcmp(tmp->name, name))
2183 continue;
2184 data = tmp;
2185 break;
2186 }
2187 spin_unlock(&btintel_pcie_recovery_lock);
2188
2189 if (data) {
2190 bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
2191 return data;
2192 }
2193
2194 data = kzalloc_flex(*data, name, name_len, GFP_ATOMIC);
2195 if (!data)
2196 return NULL;
2197
2198 strscpy(data->name, name, name_len);
2199 spin_lock(&btintel_pcie_recovery_lock);
2200 list_add_tail(&data->list, &btintel_pcie_recovery_list);
2201 spin_unlock(&btintel_pcie_recovery_lock);
2202
2203 return data;
2204 }
2205
btintel_pcie_free_restart_list(void)2206 static void btintel_pcie_free_restart_list(void)
2207 {
2208 struct btintel_pcie_dev_recovery *tmp;
2209
2210 while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
2211 typeof(*tmp), list))) {
2212 list_del(&tmp->list);
2213 kfree(tmp);
2214 }
2215 }
2216
btintel_pcie_inc_recovery_count(struct pci_dev * pdev,struct device * dev)2217 static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
2218 struct device *dev)
2219 {
2220 struct btintel_pcie_dev_recovery *data;
2221 time64_t retry_window;
2222
2223 data = btintel_pcie_get_recovery(pdev, dev);
2224 if (!data)
2225 return;
2226
2227 retry_window = ktime_get_boottime_seconds() - data->last_error;
2228 if (data->count == 0) {
2229 data->last_error = ktime_get_boottime_seconds();
2230 data->count++;
2231 } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2232 data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
2233 data->count++;
2234 } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
2235 data->last_error = 0;
2236 data->count = 0;
2237 }
2238 }
2239
2240 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
2241
btintel_pcie_removal_work(struct work_struct * wk)2242 static void btintel_pcie_removal_work(struct work_struct *wk)
2243 {
2244 struct btintel_pcie_removal *removal =
2245 container_of(wk, struct btintel_pcie_removal, work);
2246 struct pci_dev *pdev = removal->pdev;
2247 struct btintel_pcie_data *data;
2248 int err;
2249
2250 pci_lock_rescan_remove();
2251
2252 if (!pdev->bus)
2253 goto error;
2254
2255 data = pci_get_drvdata(pdev);
2256
2257 btintel_pcie_disable_interrupts(data);
2258 btintel_pcie_synchronize_irqs(data);
2259
2260 flush_work(&data->rx_work);
2261
2262 bt_dev_dbg(data->hdev, "Release bluetooth interface");
2263 btintel_pcie_release_hdev(data);
2264
2265 err = pci_reset_function(pdev);
2266 if (err) {
2267 BT_ERR("Failed resetting the pcie device (%d)", err);
2268 goto error;
2269 }
2270
2271 btintel_pcie_enable_interrupts(data);
2272 btintel_pcie_config_msix(data);
2273
2274 err = btintel_pcie_enable_bt(data);
2275 if (err) {
2276 BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
2277 err);
2278 goto error;
2279 }
2280
2281 btintel_pcie_reset_ia(data);
2282 btintel_pcie_start_rx(data);
2283 data->flags = 0;
2284
2285 err = btintel_pcie_setup_hdev(data);
2286 if (err) {
2287 BT_ERR("Failed registering hdev (%d)", err);
2288 goto error;
2289 }
2290 error:
2291 pci_dev_put(pdev);
2292 pci_unlock_rescan_remove();
2293 kfree(removal);
2294 }
2295
btintel_pcie_reset(struct hci_dev * hdev)2296 static void btintel_pcie_reset(struct hci_dev *hdev)
2297 {
2298 struct btintel_pcie_removal *removal;
2299 struct btintel_pcie_data *data;
2300
2301 data = hci_get_drvdata(hdev);
2302
2303 if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
2304 return;
2305
2306 if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
2307 return;
2308
2309 removal = kzalloc_obj(*removal, GFP_ATOMIC);
2310 if (!removal)
2311 return;
2312
2313 removal->pdev = data->pdev;
2314 INIT_WORK(&removal->work, btintel_pcie_removal_work);
2315 pci_dev_get(removal->pdev);
2316 schedule_work(&removal->work);
2317 }
2318
btintel_pcie_hw_error(struct hci_dev * hdev,u8 code)2319 static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
2320 {
2321 struct btintel_pcie_dev_recovery *data;
2322 struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
2323 struct pci_dev *pdev = dev_data->pdev;
2324 time64_t retry_window;
2325
2326 if (code == 0x13) {
2327 bt_dev_err(hdev, "Encountered top exception");
2328 return;
2329 }
2330
2331 data = btintel_pcie_get_recovery(pdev, &hdev->dev);
2332 if (!data)
2333 return;
2334
2335 retry_window = ktime_get_boottime_seconds() - data->last_error;
2336
2337 if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
2338 data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
2339 bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
2340 BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
2341 bt_dev_dbg(hdev, "Boot time: %lld seconds",
2342 ktime_get_boottime_seconds());
2343 bt_dev_dbg(hdev, "last error at: %lld seconds",
2344 data->last_error);
2345 return;
2346 }
2347 btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
2348 btintel_pcie_reset(hdev);
2349 }
2350
btintel_pcie_wakeup(struct hci_dev * hdev)2351 static bool btintel_pcie_wakeup(struct hci_dev *hdev)
2352 {
2353 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2354
2355 return device_may_wakeup(&data->pdev->dev);
2356 }
2357
2358 static const struct {
2359 u16 opcode;
2360 const char *desc;
2361 } btintel_pcie_hci_drv_supported_commands[] = {
2362 /* Common commands */
2363 { HCI_DRV_OP_READ_INFO, "Read Info" },
2364 };
2365
btintel_pcie_hci_drv_read_info(struct hci_dev * hdev,void * data,u16 data_len)2366 static int btintel_pcie_hci_drv_read_info(struct hci_dev *hdev, void *data,
2367 u16 data_len)
2368 {
2369 struct hci_drv_rp_read_info *rp;
2370 size_t rp_size;
2371 int err, i;
2372 u16 opcode, num_supported_commands =
2373 ARRAY_SIZE(btintel_pcie_hci_drv_supported_commands);
2374
2375 rp_size = sizeof(*rp) + num_supported_commands * 2;
2376
2377 rp = kmalloc(rp_size, GFP_KERNEL);
2378 if (!rp)
2379 return -ENOMEM;
2380
2381 strscpy_pad(rp->driver_name, KBUILD_MODNAME);
2382
2383 rp->num_supported_commands = cpu_to_le16(num_supported_commands);
2384 for (i = 0; i < num_supported_commands; i++) {
2385 opcode = btintel_pcie_hci_drv_supported_commands[i].opcode;
2386 bt_dev_dbg(hdev,
2387 "Supported HCI Drv command (0x%02x|0x%04x): %s",
2388 hci_opcode_ogf(opcode),
2389 hci_opcode_ocf(opcode),
2390 btintel_pcie_hci_drv_supported_commands[i].desc);
2391 rp->supported_commands[i] = cpu_to_le16(opcode);
2392 }
2393
2394 err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
2395 HCI_DRV_STATUS_SUCCESS,
2396 rp, rp_size);
2397
2398 kfree(rp);
2399 return err;
2400 }
2401
2402 static const struct hci_drv_handler btintel_pcie_hci_drv_common_handlers[] = {
2403 { btintel_pcie_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
2404 };
2405
2406 static const struct hci_drv_handler btintel_pcie_hci_drv_specific_handlers[] = {};
2407
2408 static struct hci_drv btintel_pcie_hci_drv = {
2409 .common_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_common_handlers),
2410 .common_handlers = btintel_pcie_hci_drv_common_handlers,
2411 .specific_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_specific_handlers),
2412 .specific_handlers = btintel_pcie_hci_drv_specific_handlers,
2413 };
2414
btintel_pcie_setup_hdev(struct btintel_pcie_data * data)2415 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2416 {
2417 int err;
2418 struct hci_dev *hdev;
2419
2420 hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2421 if (!hdev)
2422 return -ENOMEM;
2423
2424 hdev->bus = HCI_PCI;
2425 hci_set_drvdata(hdev, data);
2426
2427 data->hdev = hdev;
2428 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2429
2430 hdev->manufacturer = 2;
2431 hdev->open = btintel_pcie_open;
2432 hdev->close = btintel_pcie_close;
2433 hdev->send = btintel_pcie_send_frame;
2434 hdev->setup = btintel_pcie_setup;
2435 hdev->shutdown = btintel_shutdown_combined;
2436 hdev->hw_error = btintel_pcie_hw_error;
2437 hdev->set_diag = btintel_set_diag;
2438 hdev->set_bdaddr = btintel_set_bdaddr;
2439 hdev->reset = btintel_pcie_reset;
2440 hdev->wakeup = btintel_pcie_wakeup;
2441 hdev->hci_drv = &btintel_pcie_hci_drv;
2442
2443 err = hci_register_dev(hdev);
2444 if (err < 0) {
2445 BT_ERR("Failed to register to hdev (%d)", err);
2446 goto exit_error;
2447 }
2448
2449 data->dmp_hdr.driver_name = KBUILD_MODNAME;
2450 return 0;
2451
2452 exit_error:
2453 hci_free_dev(hdev);
2454 return err;
2455 }
2456
btintel_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2457 static int btintel_pcie_probe(struct pci_dev *pdev,
2458 const struct pci_device_id *ent)
2459 {
2460 int err;
2461 struct btintel_pcie_data *data;
2462
2463 if (!pdev)
2464 return -ENODEV;
2465
2466 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2467 if (!data)
2468 return -ENOMEM;
2469
2470 data->pdev = pdev;
2471
2472 spin_lock_init(&data->irq_lock);
2473 spin_lock_init(&data->hci_rx_lock);
2474
2475 init_waitqueue_head(&data->gp0_wait_q);
2476 data->gp0_received = false;
2477
2478 init_waitqueue_head(&data->tx_wait_q);
2479 data->tx_wait_done = false;
2480
2481 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2482 if (!data->workqueue)
2483 return -ENOMEM;
2484
2485 skb_queue_head_init(&data->rx_skb_q);
2486 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2487
2488 data->boot_stage_cache = 0x00;
2489 data->img_resp_cache = 0x00;
2490
2491 err = btintel_pcie_config_pcie(pdev, data);
2492 if (err)
2493 goto exit_error;
2494
2495 pci_set_drvdata(pdev, data);
2496
2497 err = btintel_pcie_alloc(data);
2498 if (err)
2499 goto exit_error;
2500
2501 err = btintel_pcie_enable_bt(data);
2502 if (err)
2503 goto exit_error;
2504
2505 /* CNV information (CNVi and CNVr) is in CSR */
2506 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2507
2508 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2509
2510 err = btintel_pcie_start_rx(data);
2511 if (err)
2512 goto exit_error;
2513
2514 err = btintel_pcie_setup_hdev(data);
2515 if (err)
2516 goto exit_error;
2517
2518 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2519 data->cnvr);
2520 return 0;
2521
2522 exit_error:
2523 /* reset device before exit */
2524 btintel_pcie_reset_bt(data);
2525
2526 pci_clear_master(pdev);
2527
2528 pci_set_drvdata(pdev, NULL);
2529
2530 return err;
2531 }
2532
btintel_pcie_remove(struct pci_dev * pdev)2533 static void btintel_pcie_remove(struct pci_dev *pdev)
2534 {
2535 struct btintel_pcie_data *data;
2536
2537 data = pci_get_drvdata(pdev);
2538
2539 btintel_pcie_disable_interrupts(data);
2540
2541 btintel_pcie_synchronize_irqs(data);
2542
2543 flush_work(&data->rx_work);
2544
2545 btintel_pcie_reset_bt(data);
2546 for (int i = 0; i < data->alloc_vecs; i++) {
2547 struct msix_entry *msix_entry;
2548
2549 msix_entry = &data->msix_entries[i];
2550 free_irq(msix_entry->vector, msix_entry);
2551 }
2552
2553 pci_free_irq_vectors(pdev);
2554
2555 btintel_pcie_release_hdev(data);
2556
2557 destroy_workqueue(data->workqueue);
2558
2559 btintel_pcie_free(data);
2560
2561 pci_clear_master(pdev);
2562
2563 pci_set_drvdata(pdev, NULL);
2564 }
2565
2566 #ifdef CONFIG_DEV_COREDUMP
btintel_pcie_coredump(struct device * dev)2567 static void btintel_pcie_coredump(struct device *dev)
2568 {
2569 struct pci_dev *pdev = to_pci_dev(dev);
2570 struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2571
2572 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2573 return;
2574
2575 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2576 queue_work(data->workqueue, &data->rx_work);
2577 }
2578 #endif
2579
btintel_pcie_set_dxstate(struct btintel_pcie_data * data,u32 dxstate)2580 static int btintel_pcie_set_dxstate(struct btintel_pcie_data *data, u32 dxstate)
2581 {
2582 int retry = 0, status;
2583 u32 dx_intr_timeout_ms = 200;
2584
2585 do {
2586 data->gp0_received = false;
2587
2588 btintel_pcie_wr_sleep_cntrl(data, dxstate);
2589
2590 status = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
2591 msecs_to_jiffies(dx_intr_timeout_ms));
2592
2593 if (status)
2594 return 0;
2595
2596 bt_dev_warn(data->hdev,
2597 "Timeout (%u ms) on alive interrupt for D%d entry, retry count %d",
2598 dx_intr_timeout_ms, dxstate, retry);
2599
2600 /* clear gp0 cause */
2601 btintel_pcie_clr_reg_bits(data,
2602 BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES,
2603 BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0);
2604
2605 /* A hardware bug may cause the alive interrupt to be missed.
2606 * Check if the controller reached the expected state and retry
2607 * the operation only if it hasn't.
2608 */
2609 if (dxstate == BTINTEL_PCIE_STATE_D0) {
2610 if (btintel_pcie_in_d0(data))
2611 return 0;
2612 } else {
2613 if (btintel_pcie_in_d3(data))
2614 return 0;
2615 }
2616
2617 } while (++retry < BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES);
2618
2619 return -EBUSY;
2620 }
2621
btintel_pcie_suspend_late(struct device * dev,pm_message_t mesg)2622 static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
2623 {
2624 struct pci_dev *pdev = to_pci_dev(dev);
2625 struct btintel_pcie_data *data;
2626 ktime_t start;
2627 u32 dxstate;
2628 int err;
2629
2630 data = pci_get_drvdata(pdev);
2631
2632 dxstate = (mesg.event == PM_EVENT_SUSPEND ?
2633 BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
2634
2635 data->pm_sx_event = mesg.event;
2636
2637 start = ktime_get();
2638
2639 /* Refer: 6.4.11.7 -> Platform power management */
2640 err = btintel_pcie_set_dxstate(data, dxstate);
2641
2642 if (err)
2643 return err;
2644
2645 bt_dev_dbg(data->hdev,
2646 "device entered into d3 state from d0 in %lld us",
2647 ktime_to_us(ktime_get() - start));
2648 return err;
2649 }
2650
btintel_pcie_suspend(struct device * dev)2651 static int btintel_pcie_suspend(struct device *dev)
2652 {
2653 return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
2654 }
2655
btintel_pcie_hibernate(struct device * dev)2656 static int btintel_pcie_hibernate(struct device *dev)
2657 {
2658 return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
2659 }
2660
btintel_pcie_freeze(struct device * dev)2661 static int btintel_pcie_freeze(struct device *dev)
2662 {
2663 return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
2664 }
2665
btintel_pcie_resume(struct device * dev)2666 static int btintel_pcie_resume(struct device *dev)
2667 {
2668 struct pci_dev *pdev = to_pci_dev(dev);
2669 struct btintel_pcie_data *data;
2670 ktime_t start;
2671 int err;
2672
2673 data = pci_get_drvdata(pdev);
2674 data->gp0_received = false;
2675
2676 start = ktime_get();
2677
2678 /* When the system enters S4 (hibernate) mode, bluetooth device loses
2679 * power, which results in the erasure of its loaded firmware.
2680 * Consequently, function level reset (flr) is required on system
2681 * resume to bring the controller back into an operational state by
2682 * initiating a new firmware download.
2683 */
2684
2685 if (data->pm_sx_event == PM_EVENT_FREEZE ||
2686 data->pm_sx_event == PM_EVENT_HIBERNATE) {
2687 set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
2688 btintel_pcie_reset(data->hdev);
2689 return 0;
2690 }
2691
2692 /* Refer: 6.4.11.7 -> Platform power management */
2693 err = btintel_pcie_set_dxstate(data, BTINTEL_PCIE_STATE_D0);
2694
2695 if (err == 0) {
2696 bt_dev_dbg(data->hdev,
2697 "device entered into d0 state from d3 in %lld us",
2698 ktime_to_us(ktime_get() - start));
2699 return err;
2700 }
2701
2702 /* Trigger function level reset if the controller is in error
2703 * state during resume() to bring back the controller to
2704 * operational mode
2705 */
2706
2707 data->boot_stage_cache = btintel_pcie_rd_reg32(data,
2708 BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
2709 if (btintel_pcie_in_error(data) ||
2710 btintel_pcie_in_device_halt(data)) {
2711 bt_dev_err(data->hdev, "Controller in error state for D0 entry");
2712 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS,
2713 &data->flags)) {
2714 data->dmp_hdr.trigger_reason =
2715 BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
2716 queue_work(data->workqueue, &data->rx_work);
2717 }
2718 set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
2719 btintel_pcie_reset(data->hdev);
2720 }
2721 return err;
2722 }
2723
2724 static const struct dev_pm_ops btintel_pcie_pm_ops = {
2725 .suspend = btintel_pcie_suspend,
2726 .resume = btintel_pcie_resume,
2727 .freeze = btintel_pcie_freeze,
2728 .thaw = btintel_pcie_resume,
2729 .poweroff = btintel_pcie_hibernate,
2730 .restore = btintel_pcie_resume,
2731 };
2732
2733 static struct pci_driver btintel_pcie_driver = {
2734 .name = KBUILD_MODNAME,
2735 .id_table = btintel_pcie_table,
2736 .probe = btintel_pcie_probe,
2737 .remove = btintel_pcie_remove,
2738 .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
2739 #ifdef CONFIG_DEV_COREDUMP
2740 .driver.coredump = btintel_pcie_coredump
2741 #endif
2742 };
2743
btintel_pcie_init(void)2744 static int __init btintel_pcie_init(void)
2745 {
2746 return pci_register_driver(&btintel_pcie_driver);
2747 }
2748
btintel_pcie_exit(void)2749 static void __exit btintel_pcie_exit(void)
2750 {
2751 pci_unregister_driver(&btintel_pcie_driver);
2752 btintel_pcie_free_restart_list();
2753 }
2754
2755 module_init(btintel_pcie_init);
2756 module_exit(btintel_pcie_exit);
2757
2758 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
2759 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2760 MODULE_VERSION(VERSION);
2761 MODULE_LICENSE("GPL");
2762