Lines Matching +full:num +full:- +full:rxq

1 // SPDX-License-Identifier: GPL-2.0-or-later
43 /* Scorpious, Panther Lake-H484 */
45 /* Scorpious, Panther Lake-H404 */
106 * @ver: For Driver-FW compatibility
108 * @num_buf: Num of allocated debug bufs
159 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT; in btintel_pcie_setup_dbgc()
160 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count, in btintel_pcie_setup_dbgc()
162 if (!data->dbgc.bufs) in btintel_pcie_setup_dbgc()
163 return -ENOMEM; in btintel_pcie_setup_dbgc()
165 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_dbgc()
166 data->dbgc.count * in btintel_pcie_setup_dbgc()
168 &data->dbgc.buf_p_addr, in btintel_pcie_setup_dbgc()
170 if (!data->dbgc.buf_v_addr) in btintel_pcie_setup_dbgc()
171 return -ENOMEM; in btintel_pcie_setup_dbgc()
173 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_dbgc()
175 &data->dbgc.frag_p_addr, in btintel_pcie_setup_dbgc()
177 if (!data->dbgc.frag_v_addr) in btintel_pcie_setup_dbgc()
178 return -ENOMEM; in btintel_pcie_setup_dbgc()
180 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt); in btintel_pcie_setup_dbgc()
187 for (i = 0; i < data->dbgc.count; i++) { in btintel_pcie_setup_dbgc()
188 buf = &data->dbgc.bufs[i]; in btintel_pcie_setup_dbgc()
189 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; in btintel_pcie_setup_dbgc()
190 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; in btintel_pcie_setup_dbgc()
191 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr); in btintel_pcie_setup_dbgc()
192 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr); in btintel_pcie_setup_dbgc()
196 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag)); in btintel_pcie_setup_dbgc()
203 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u", in ipc_print_ia_ring()
204 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ", in ipc_print_ia_ring()
205 ia->tr_hia[queue_num], ia->tr_tia[queue_num], in ipc_print_ia_ring()
206 ia->cr_hia[queue_num], ia->cr_tia[queue_num]); in ipc_print_ia_ring()
212 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x", in ipc_print_urbd1()
213 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed); in ipc_print_urbd1()
218 u8 queue = entry->entry; in btintel_pcie_get_data()
219 struct msix_entry *entries = entry - queue; in btintel_pcie_get_data()
224 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
246 tfd = &txq->tfds[tfd_index]; in btintel_pcie_prepare_tx()
249 buf = &txq->bufs[tfd_index]; in btintel_pcie_prepare_tx()
251 tfd->size = skb->len; in btintel_pcie_prepare_tx()
252 tfd->addr = buf->data_p_addr; in btintel_pcie_prepare_tx()
255 memcpy(buf->data, skb->data, tfd->size); in btintel_pcie_prepare_tx()
270 snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---"); in btintel_pcie_dump_debug_registers()
278 data->boot_stage_cache = reg; in btintel_pcie_dump_debug_registers()
331 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_dump_debug_registers()
332 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_dump_debug_registers()
333 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia); in btintel_pcie_dump_debug_registers()
337 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_dump_debug_registers()
338 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_dump_debug_registers()
342 snprintf(buf, sizeof(buf), "--------------------------------"); in btintel_pcie_dump_debug_registers()
355 struct hci_dev *hdev = data->hdev; in btintel_pcie_send_sync()
357 struct txq *txq = &data->txq; in btintel_pcie_send_sync()
359 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_send_sync()
361 if (tfd_index > txq->count) in btintel_pcie_send_sync()
362 return -ERANGE; in btintel_pcie_send_sync()
371 data->gp0_received = false; in btintel_pcie_send_sync()
372 old_ctxt = data->alive_intr_ctxt; in btintel_pcie_send_sync()
373 data->alive_intr_ctxt = in btintel_pcie_send_sync()
376 bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s", in btintel_pcie_send_sync()
378 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); in btintel_pcie_send_sync()
389 tfd_index = (tfd_index + 1) % txq->count; in btintel_pcie_send_sync()
390 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index; in btintel_pcie_send_sync()
393 data->tx_wait_done = false; in btintel_pcie_send_sync()
398 /* Wait for the complete interrupt - URBD0 */ in btintel_pcie_send_sync()
399 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done, in btintel_pcie_send_sync()
402 bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion", in btintel_pcie_send_sync()
404 btintel_pcie_dump_debug_registers(data->hdev); in btintel_pcie_send_sync()
405 return -ETIME; in btintel_pcie_send_sync()
409 ret = wait_event_timeout(data->gp0_wait_q, in btintel_pcie_send_sync()
410 data->gp0_received, in btintel_pcie_send_sync()
413 hdev->stat.err_tx++; in btintel_pcie_send_sync()
416 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); in btintel_pcie_send_sync()
417 return -ETIME; in btintel_pcie_send_sync()
423 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
439 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index) in btintel_pcie_prepare_rx() argument
445 buf = &rxq->bufs[frbd_index]; in btintel_pcie_prepare_rx()
447 frbd = &rxq->frbds[frbd_index]; in btintel_pcie_prepare_rx()
451 frbd->tag = frbd_index; in btintel_pcie_prepare_rx()
452 frbd->addr = buf->data_p_addr; in btintel_pcie_prepare_rx()
458 struct rxq *rxq = &data->rxq; in btintel_pcie_submit_rx() local
460 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_submit_rx()
462 if (frbd_index > rxq->count) in btintel_pcie_submit_rx()
463 return -ERANGE; in btintel_pcie_submit_rx()
468 btintel_pcie_prepare_rx(rxq, frbd_index); in btintel_pcie_submit_rx()
470 frbd_index = (frbd_index + 1) % rxq->count; in btintel_pcie_submit_rx()
471 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index; in btintel_pcie_submit_rx()
472 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM); in btintel_pcie_submit_rx()
483 struct rxq *rxq = &data->rxq; in btintel_pcie_start_rx() local
485 /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the in btintel_pcie_start_rx()
489 for (i = 0; i < rxq->count - 3; i++) { in btintel_pcie_start_rx()
500 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
501 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
502 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
503 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES); in btintel_pcie_reset_ia()
526 } while (--retry > 0); in btintel_pcie_reset_bt()
539 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg); in btintel_pcie_reset_bt()
546 return reg == 0 ? 0 : -ENODEV; in btintel_pcie_reset_bt()
584 } while (--retry > 0); in btintel_pcie_get_mac_access()
586 return -ETIME; in btintel_pcie_get_mac_access()
613 tlv->type = type; in btintel_pcie_copy_tlv()
614 tlv->len = size; in btintel_pcie_copy_tlv()
615 memcpy(tlv->val, data, tlv->len); in btintel_pcie_copy_tlv()
622 struct btintel_pcie_dbgc *dbgc = &data->dbgc; in btintel_pcie_read_dram_buffers()
623 struct hci_dev *hdev = data->hdev; in btintel_pcie_read_dram_buffers()
634 return -EOPNOTSUPP; in btintel_pcie_read_dram_buffers()
641 if (buf_idx > dbgc->count) { in btintel_pcie_read_dram_buffers()
643 return -EINVAL; in btintel_pcie_read_dram_buffers()
648 data->dmp_hdr.write_ptr = prev_size + offset; in btintel_pcie_read_dram_buffers()
650 return -EINVAL; in btintel_pcie_read_dram_buffers()
654 data->dmp_hdr.driver_name); in btintel_pcie_read_dram_buffers()
658 snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d", in btintel_pcie_read_dram_buffers()
664 2000 + (data->dmp_hdr.fw_timestamp >> 8), in btintel_pcie_read_dram_buffers()
665 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type, in btintel_pcie_read_dram_buffers()
666 data->dmp_hdr.fw_build_num); in btintel_pcie_read_dram_buffers()
668 data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) + in btintel_pcie_read_dram_buffers()
669 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) + in btintel_pcie_read_dram_buffers()
670 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) + in btintel_pcie_read_dram_buffers()
671 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) + in btintel_pcie_read_dram_buffers()
672 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) + in btintel_pcie_read_dram_buffers()
673 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) + in btintel_pcie_read_dram_buffers()
674 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) + in btintel_pcie_read_dram_buffers()
681 * sizeof(u32) - signature in btintel_pcie_read_dram_buffers()
682 * sizeof(data_len) - to store tlv data size in btintel_pcie_read_dram_buffers()
683 * data_len - TLV data in btintel_pcie_read_dram_buffers()
689 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count; in btintel_pcie_read_dram_buffers()
693 return -ENOMEM; in btintel_pcie_read_dram_buffers()
708 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt, in btintel_pcie_read_dram_buffers()
709 sizeof(data->dmp_hdr.cnvi_bt)); in btintel_pcie_read_dram_buffers()
710 p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr, in btintel_pcie_read_dram_buffers()
711 sizeof(data->dmp_hdr.write_ptr)); in btintel_pcie_read_dram_buffers()
712 p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr, in btintel_pcie_read_dram_buffers()
713 sizeof(data->dmp_hdr.wrap_ctr)); in btintel_pcie_read_dram_buffers()
715 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data, in btintel_pcie_read_dram_buffers()
718 p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason, in btintel_pcie_read_dram_buffers()
719 sizeof(data->dmp_hdr.trigger_reason)); in btintel_pcie_read_dram_buffers()
720 p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1, in btintel_pcie_read_dram_buffers()
721 sizeof(data->dmp_hdr.fw_git_sha1)); in btintel_pcie_read_dram_buffers()
722 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top, in btintel_pcie_read_dram_buffers()
723 sizeof(data->dmp_hdr.cnvr_top)); in btintel_pcie_read_dram_buffers()
724 p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top, in btintel_pcie_read_dram_buffers()
725 sizeof(data->dmp_hdr.cnvi_top)); in btintel_pcie_read_dram_buffers()
727 memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE); in btintel_pcie_read_dram_buffers()
728 dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL); in btintel_pcie_read_dram_buffers()
752 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
762 data->gp0_received = false; in btintel_pcie_enable_bt()
766 data->ci_p_addr & 0xffffffff); in btintel_pcie_enable_bt()
768 (u64)data->ci_p_addr >> 32); in btintel_pcie_enable_bt()
770 /* Reset the cached value of boot stage. it is updated by the MSI-X in btintel_pcie_enable_bt()
773 data->boot_stage_cache = 0x0; in btintel_pcie_enable_bt()
794 data->alive_intr_ctxt = BTINTEL_PCIE_ROM; in btintel_pcie_enable_bt()
795 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received, in btintel_pcie_enable_bt()
798 return -ETIME; in btintel_pcie_enable_bt()
801 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM) in btintel_pcie_enable_bt()
802 return -ENODEV; in btintel_pcie_enable_bt()
809 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW; in btintel_pcie_in_op()
814 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML && in btintel_pcie_in_iml()
815 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW); in btintel_pcie_in_iml()
820 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY; in btintel_pcie_in_d3()
825 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY); in btintel_pcie_in_d0()
831 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate); in btintel_pcie_wr_sleep_cntrl()
844 bt_dev_err(data->hdev, "Failed to get mac access %d", err); in btintel_pcie_read_device_mem()
848 for (; len > 0; len -= 4, dev_addr += 4, val++) in btintel_pcie_read_device_mem()
858 return (data->boot_stage_cache & in btintel_pcie_in_lockdown()
860 (data->boot_stage_cache & in btintel_pcie_in_lockdown()
866 return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) || in btintel_pcie_in_error()
867 (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER); in btintel_pcie_in_error()
872 bt_dev_err(data->hdev, "Received gp1 mailbox interrupt"); in btintel_pcie_msix_gp1_handler()
873 btintel_pcie_dump_debug_registers(data->hdev); in btintel_pcie_msix_gp1_handler()
876 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
889 if (reg != data->boot_stage_cache) in btintel_pcie_msix_gp0_handler()
890 data->boot_stage_cache = reg; in btintel_pcie_msix_gp0_handler()
892 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x", in btintel_pcie_msix_gp0_handler()
893 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt), in btintel_pcie_msix_gp0_handler()
894 data->boot_stage_cache, reg); in btintel_pcie_msix_gp0_handler()
896 if (reg != data->img_resp_cache) in btintel_pcie_msix_gp0_handler()
897 data->img_resp_cache = reg; in btintel_pcie_msix_gp0_handler()
900 bt_dev_err(data->hdev, "Controller in error state"); in btintel_pcie_msix_gp0_handler()
901 btintel_pcie_dump_debug_registers(data->hdev); in btintel_pcie_msix_gp0_handler()
906 bt_dev_err(data->hdev, "Controller in lockdown state"); in btintel_pcie_msix_gp0_handler()
907 btintel_pcie_dump_debug_registers(data->hdev); in btintel_pcie_msix_gp0_handler()
911 data->gp0_received = true; in btintel_pcie_msix_gp0_handler()
913 old_ctxt = data->alive_intr_ctxt; in btintel_pcie_msix_gp0_handler()
917 switch (data->alive_intr_ctxt) { in btintel_pcie_msix_gp0_handler()
919 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL; in btintel_pcie_msix_gp0_handler()
937 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL; in btintel_pcie_msix_gp0_handler()
942 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) { in btintel_pcie_msix_gp0_handler()
943 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0); in btintel_pcie_msix_gp0_handler()
944 data->alive_intr_ctxt = BTINTEL_PCIE_D0; in btintel_pcie_msix_gp0_handler()
949 data->alive_intr_ctxt = BTINTEL_PCIE_D3; in btintel_pcie_msix_gp0_handler()
956 data->alive_intr_ctxt = BTINTEL_PCIE_D0; in btintel_pcie_msix_gp0_handler()
963 data->alive_intr_ctxt = BTINTEL_PCIE_D0; in btintel_pcie_msix_gp0_handler()
968 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x", in btintel_pcie_msix_gp0_handler()
969 data->alive_intr_ctxt); in btintel_pcie_msix_gp0_handler()
979 bt_dev_dbg(data->hdev, "wake up gp0 wait_q"); in btintel_pcie_msix_gp0_handler()
980 wake_up(&data->gp0_wait_q); in btintel_pcie_msix_gp0_handler()
983 if (old_ctxt != data->alive_intr_ctxt) in btintel_pcie_msix_gp0_handler()
984 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s", in btintel_pcie_msix_gp0_handler()
986 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt)); in btintel_pcie_msix_gp0_handler()
989 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
997 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_msix_tx_handle()
998 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_msix_tx_handle()
1003 txq = &data->txq; in btintel_pcie_msix_tx_handle()
1006 data->tx_wait_done = true; in btintel_pcie_msix_tx_handle()
1007 wake_up(&data->tx_wait_q); in btintel_pcie_msix_tx_handle()
1009 urbd0 = &txq->urbd0s[cr_tia]; in btintel_pcie_msix_tx_handle()
1011 if (urbd0->tfd_index > txq->count) in btintel_pcie_msix_tx_handle()
1014 cr_tia = (cr_tia + 1) % txq->count; in btintel_pcie_msix_tx_handle()
1015 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia; in btintel_pcie_msix_tx_handle()
1016 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM); in btintel_pcie_msix_tx_handle()
1022 struct hci_event_hdr *hdr = (void *)skb->data; in btintel_pcie_recv_event()
1025 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && in btintel_pcie_recv_event()
1026 hdr->plen > 0) { in btintel_pcie_recv_event()
1027 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1; in btintel_pcie_recv_event()
1028 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1; in btintel_pcie_recv_event()
1031 switch (skb->data[2]) { in btintel_pcie_recv_event()
1045 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2; in btintel_pcie_recv_event()
1059 if (btintel_test_and_clear_flag(data->hdev, in btintel_pcie_recv_event()
1061 btintel_wake_up_flag(data->hdev, in btintel_pcie_recv_event()
1081 if (skb->data[2] == 0x97) { in btintel_pcie_recv_event()
1101 struct hci_dev *hdev = data->hdev; in btintel_pcie_recv_frame()
1103 spin_lock(&data->hci_rx_lock); in btintel_pcie_recv_frame()
1109 ret = -EILSEQ; in btintel_pcie_recv_frame()
1117 if (skb->len >= HCI_ACL_HDR_SIZE) { in btintel_pcie_recv_frame()
1118 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen); in btintel_pcie_recv_frame()
1122 ret = -EILSEQ; in btintel_pcie_recv_frame()
1128 if (skb->len >= HCI_SCO_HDR_SIZE) { in btintel_pcie_recv_frame()
1129 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen; in btintel_pcie_recv_frame()
1133 ret = -EILSEQ; in btintel_pcie_recv_frame()
1139 if (skb->len >= HCI_EVENT_HDR_SIZE) { in btintel_pcie_recv_frame()
1140 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen; in btintel_pcie_recv_frame()
1144 ret = -EILSEQ; in btintel_pcie_recv_frame()
1150 if (skb->len >= HCI_ISO_HDR_SIZE) { in btintel_pcie_recv_frame()
1151 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen); in btintel_pcie_recv_frame()
1155 ret = -EILSEQ; in btintel_pcie_recv_frame()
1163 ret = -EINVAL; in btintel_pcie_recv_frame()
1167 if (skb->len < plen) { in btintel_pcie_recv_frame()
1170 ret = -EILSEQ; in btintel_pcie_recv_frame()
1177 hdev->stat.byte_rx += plen; in btintel_pcie_recv_frame()
1191 hdev->stat.err_rx++; in btintel_pcie_recv_frame()
1193 spin_unlock(&data->hci_rx_lock); in btintel_pcie_recv_frame()
1214 switch (data->dmp_hdr.cnvi_top & 0xfff) { in btintel_pcie_read_hwexp()
1218 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01) in btintel_pcie_read_hwexp()
1228 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top); in btintel_pcie_read_hwexp()
1244 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x", in btintel_pcie_read_hwexp()
1249 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev)); in btintel_pcie_read_hwexp()
1253 pending = len - offset; in btintel_pcie_read_hwexp()
1259 if (!tlv->type) { in btintel_pcie_read_hwexp()
1260 bt_dev_dbg(data->hdev, "Invalid TLV type 0"); in btintel_pcie_read_hwexp()
1263 pkt_len = le16_to_cpu(tlv->len); in btintel_pcie_read_hwexp()
1265 pending = len - offset; in btintel_pcie_read_hwexp()
1274 if (tlv->type != 1) in btintel_pcie_read_hwexp()
1277 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len); in btintel_pcie_read_hwexp()
1284 skb_put_data(skb, tlv->val, pkt_len); in btintel_pcie_read_hwexp()
1292 tlv->val, pkt_len, false); in btintel_pcie_read_hwexp()
1303 bt_dev_err(data->hdev, "Received hw exception interrupt"); in btintel_pcie_msix_hw_exp_handler()
1305 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) in btintel_pcie_msix_hw_exp_handler()
1308 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) in btintel_pcie_msix_hw_exp_handler()
1312 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) in btintel_pcie_msix_hw_exp_handler()
1313 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT; in btintel_pcie_msix_hw_exp_handler()
1315 queue_work(data->workqueue, &data->rx_work); in btintel_pcie_msix_hw_exp_handler()
1324 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) { in btintel_pcie_rx_work()
1325 btintel_pcie_dump_traces(data->hdev); in btintel_pcie_rx_work()
1326 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags); in btintel_pcie_rx_work()
1329 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { in btintel_pcie_rx_work()
1338 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags); in btintel_pcie_rx_work()
1342 while ((skb = skb_dequeue(&data->rx_skb_q))) { in btintel_pcie_rx_work()
1357 len = rfh_hdr->packet_len; in btintel_pcie_submit_rx_work()
1359 ret = -EINVAL; in btintel_pcie_submit_rx_work()
1371 skb_queue_tail(&data->rx_skb_q, skb); in btintel_pcie_submit_rx_work()
1372 queue_work(data->workqueue, &data->rx_work); in btintel_pcie_submit_rx_work()
1380 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
1384 struct rxq *rxq; in btintel_pcie_msix_rx_handle() local
1388 struct hci_dev *hdev = data->hdev; in btintel_pcie_msix_rx_handle()
1390 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_msix_rx_handle()
1391 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_msix_rx_handle()
1393 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia); in btintel_pcie_msix_rx_handle()
1399 rxq = &data->rxq; in btintel_pcie_msix_rx_handle()
1401 /* The firmware sends multiple CD in a single MSI-X and it needs to in btintel_pcie_msix_rx_handle()
1405 urbd1 = &rxq->urbd1s[cr_tia]; in btintel_pcie_msix_rx_handle()
1406 ipc_print_urbd1(data->hdev, urbd1, cr_tia); in btintel_pcie_msix_rx_handle()
1408 buf = &rxq->bufs[urbd1->frbd_tag]; in btintel_pcie_msix_rx_handle()
1410 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d", in btintel_pcie_msix_rx_handle()
1411 urbd1->frbd_tag); in btintel_pcie_msix_rx_handle()
1415 ret = btintel_pcie_submit_rx_work(data, urbd1->status, in btintel_pcie_msix_rx_handle()
1416 buf->data); in btintel_pcie_msix_rx_handle()
1418 bt_dev_err(hdev, "RXQ: failed to submit rx request"); in btintel_pcie_msix_rx_handle()
1422 cr_tia = (cr_tia + 1) % rxq->count; in btintel_pcie_msix_rx_handle()
1423 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia; in btintel_pcie_msix_rx_handle()
1424 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM); in btintel_pcie_msix_rx_handle()
1435 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; in btintel_pcie_is_rxq_empty()
1440 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; in btintel_pcie_is_txackq_empty()
1449 spin_lock(&data->irq_lock); in btintel_pcie_irq_msix_handler()
1456 spin_unlock(&data->irq_lock); in btintel_pcie_irq_msix_handler()
1495 * re-enabled by clearing this bit. This register is defined as write 1 in btintel_pcie_irq_msix_handler()
1500 BIT(entry->entry)); in btintel_pcie_irq_msix_handler()
1505 /* This function requests the irq for MSI-X and registers the handlers per irq.
1514 data->msix_entries[i].entry = i; in btintel_pcie_setup_irq()
1516 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN, in btintel_pcie_setup_irq()
1521 data->alloc_vecs = num_irqs; in btintel_pcie_setup_irq()
1522 data->msix_enabled = 1; in btintel_pcie_setup_irq()
1523 data->def_irq = 0; in btintel_pcie_setup_irq()
1526 for (i = 0; i < data->alloc_vecs; i++) { in btintel_pcie_setup_irq()
1529 msix_entry = &data->msix_entries[i]; in btintel_pcie_setup_irq()
1530 msix_entry->vector = pci_irq_vector(data->pdev, i); in btintel_pcie_setup_irq()
1532 err = devm_request_threaded_irq(&data->pdev->dev, in btintel_pcie_setup_irq()
1533 msix_entry->vector, in btintel_pcie_setup_irq()
1540 pci_free_irq_vectors(data->pdev); in btintel_pcie_setup_irq()
1541 data->alloc_vecs = 0; in btintel_pcie_setup_irq()
1571 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE; in btintel_pcie_config_msix()
1584 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK); in btintel_pcie_config_msix()
1585 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK); in btintel_pcie_config_msix()
1599 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in btintel_pcie_config_pcie()
1601 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in btintel_pcie_config_pcie()
1606 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME); in btintel_pcie_config_pcie()
1607 if (IS_ERR(data->base_addr)) in btintel_pcie_config_pcie()
1608 return PTR_ERR(data->base_addr); in btintel_pcie_config_pcie()
1614 /* Configure MSI-X with causes list */ in btintel_pcie_config_pcie()
1623 ci->version = 0x1; in btintel_pcie_init_ci()
1624 ci->size = sizeof(*ci); in btintel_pcie_init_ci()
1625 ci->config = 0x0000; in btintel_pcie_init_ci()
1626 ci->addr_cr_hia = data->ia.cr_hia_p_addr; in btintel_pcie_init_ci()
1627 ci->addr_tr_tia = data->ia.tr_tia_p_addr; in btintel_pcie_init_ci()
1628 ci->addr_cr_tia = data->ia.cr_tia_p_addr; in btintel_pcie_init_ci()
1629 ci->addr_tr_hia = data->ia.tr_hia_p_addr; in btintel_pcie_init_ci()
1630 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_init_ci()
1631 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_init_ci()
1632 ci->addr_urbdq0 = data->txq.urbd0s_p_addr; in btintel_pcie_init_ci()
1633 ci->addr_tfdq = data->txq.tfds_p_addr; in btintel_pcie_init_ci()
1634 ci->num_tfdq = data->txq.count; in btintel_pcie_init_ci()
1635 ci->num_urbdq0 = data->txq.count; in btintel_pcie_init_ci()
1636 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM; in btintel_pcie_init_ci()
1637 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM; in btintel_pcie_init_ci()
1638 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K; in btintel_pcie_init_ci()
1639 ci->addr_frbdq = data->rxq.frbds_p_addr; in btintel_pcie_init_ci()
1640 ci->num_frbdq = data->rxq.count; in btintel_pcie_init_ci()
1641 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; in btintel_pcie_init_ci()
1642 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr; in btintel_pcie_init_ci()
1643 ci->num_urbdq1 = data->rxq.count; in btintel_pcie_init_ci()
1644 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; in btintel_pcie_init_ci()
1646 ci->dbg_output_mode = 0x01; in btintel_pcie_init_ci()
1647 ci->dbgc_addr = data->dbgc.frag_p_addr; in btintel_pcie_init_ci()
1648 ci->dbgc_size = data->dbgc.frag_size; in btintel_pcie_init_ci()
1649 ci->dbg_preset = 0x00; in btintel_pcie_init_ci()
1656 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_free_txq_bufs()
1657 txq->buf_v_addr, txq->buf_p_addr); in btintel_pcie_free_txq_bufs()
1658 kfree(txq->bufs); in btintel_pcie_free_txq_bufs()
1668 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL); in btintel_pcie_setup_txq_bufs()
1669 if (!txq->bufs) in btintel_pcie_setup_txq_bufs()
1670 return -ENOMEM; in btintel_pcie_setup_txq_bufs()
1675 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_txq_bufs()
1676 txq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_setup_txq_bufs()
1677 &txq->buf_p_addr, in btintel_pcie_setup_txq_bufs()
1679 if (!txq->buf_v_addr) { in btintel_pcie_setup_txq_bufs()
1680 kfree(txq->bufs); in btintel_pcie_setup_txq_bufs()
1681 return -ENOMEM; in btintel_pcie_setup_txq_bufs()
1687 for (i = 0; i < txq->count; i++) { in btintel_pcie_setup_txq_bufs()
1688 buf = &txq->bufs[i]; in btintel_pcie_setup_txq_bufs()
1689 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_txq_bufs()
1690 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_txq_bufs()
1697 struct rxq *rxq) in btintel_pcie_free_rxq_bufs() argument
1700 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_free_rxq_bufs()
1701 rxq->buf_v_addr, rxq->buf_p_addr); in btintel_pcie_free_rxq_bufs()
1702 kfree(rxq->bufs); in btintel_pcie_free_rxq_bufs()
1706 struct rxq *rxq) in btintel_pcie_setup_rxq_bufs() argument
1712 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL); in btintel_pcie_setup_rxq_bufs()
1713 if (!rxq->bufs) in btintel_pcie_setup_rxq_bufs()
1714 return -ENOMEM; in btintel_pcie_setup_rxq_bufs()
1719 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev, in btintel_pcie_setup_rxq_bufs()
1720 rxq->count * BTINTEL_PCIE_BUFFER_SIZE, in btintel_pcie_setup_rxq_bufs()
1721 &rxq->buf_p_addr, in btintel_pcie_setup_rxq_bufs()
1723 if (!rxq->buf_v_addr) { in btintel_pcie_setup_rxq_bufs()
1724 kfree(rxq->bufs); in btintel_pcie_setup_rxq_bufs()
1725 return -ENOMEM; in btintel_pcie_setup_rxq_bufs()
1731 for (i = 0; i < rxq->count; i++) { in btintel_pcie_setup_rxq_bufs()
1732 buf = &rxq->bufs[i]; in btintel_pcie_setup_rxq_bufs()
1733 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_rxq_bufs()
1734 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE); in btintel_pcie_setup_rxq_bufs()
1745 ia->tr_hia_p_addr = p_addr; in btintel_pcie_setup_ia()
1746 ia->tr_hia = v_addr; in btintel_pcie_setup_ia()
1749 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_setup_ia()
1750 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES; in btintel_pcie_setup_ia()
1753 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2); in btintel_pcie_setup_ia()
1754 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2); in btintel_pcie_setup_ia()
1757 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3); in btintel_pcie_setup_ia()
1758 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3); in btintel_pcie_setup_ia()
1763 btintel_pcie_free_rxq_bufs(data, &data->rxq); in btintel_pcie_free()
1764 btintel_pcie_free_txq_bufs(data, &data->txq); in btintel_pcie_free()
1766 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr); in btintel_pcie_free()
1767 dma_pool_destroy(data->dma_pool); in btintel_pcie_free()
1799 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev, in btintel_pcie_alloc()
1801 if (!data->dma_pool) { in btintel_pcie_alloc()
1802 err = -ENOMEM; in btintel_pcie_alloc()
1806 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN, in btintel_pcie_alloc()
1809 dma_pool_destroy(data->dma_pool); in btintel_pcie_alloc()
1810 err = -ENOMEM; in btintel_pcie_alloc()
1814 data->dma_p_addr = p_addr; in btintel_pcie_alloc()
1815 data->dma_v_addr = v_addr; in btintel_pcie_alloc()
1818 data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT; in btintel_pcie_alloc()
1819 data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT; in btintel_pcie_alloc()
1822 data->txq.tfds_p_addr = p_addr; in btintel_pcie_alloc()
1823 data->txq.tfds = v_addr; in btintel_pcie_alloc()
1829 data->txq.urbd0s_p_addr = p_addr; in btintel_pcie_alloc()
1830 data->txq.urbd0s = v_addr; in btintel_pcie_alloc()
1836 data->rxq.frbds_p_addr = p_addr; in btintel_pcie_alloc()
1837 data->rxq.frbds = v_addr; in btintel_pcie_alloc()
1843 data->rxq.urbd1s_p_addr = p_addr; in btintel_pcie_alloc()
1844 data->rxq.urbd1s = v_addr; in btintel_pcie_alloc()
1850 err = btintel_pcie_setup_txq_bufs(data, &data->txq); in btintel_pcie_alloc()
1854 /* Setup data buffers for rxq */ in btintel_pcie_alloc()
1855 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq); in btintel_pcie_alloc()
1860 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia); in btintel_pcie_alloc()
1871 data->ci = v_addr; in btintel_pcie_alloc()
1872 data->ci_p_addr = p_addr; in btintel_pcie_alloc()
1875 btintel_pcie_init_ci(data, data->ci); in btintel_pcie_alloc()
1880 btintel_pcie_free_txq_bufs(data, &data->txq); in btintel_pcie_alloc()
1882 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr); in btintel_pcie_alloc()
1883 dma_pool_destroy(data->dma_pool); in btintel_pcie_alloc()
1910 return -ENOMEM; in btintel_pcie_inject_cmd_complete()
1913 hdr->evt = HCI_EV_CMD_COMPLETE; in btintel_pcie_inject_cmd_complete()
1914 hdr->plen = sizeof(*evt) + 1; in btintel_pcie_inject_cmd_complete()
1917 evt->ncmd = 0x01; in btintel_pcie_inject_cmd_complete()
1918 evt->opcode = cpu_to_le16(opcode); in btintel_pcie_inject_cmd_complete()
1936 if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) in btintel_pcie_send_frame()
1937 return -ENODEV; in btintel_pcie_send_frame()
1956 cmd = (void *)skb->data; in btintel_pcie_send_frame()
1957 opcode = le16_to_cpu(cmd->opcode); in btintel_pcie_send_frame()
1959 struct hci_command_hdr *cmd = (void *)skb->data; in btintel_pcie_send_frame()
1960 __u16 opcode = le16_to_cpu(cmd->opcode); in btintel_pcie_send_frame()
1971 hdev->stat.cmd_tx++; in btintel_pcie_send_frame()
1975 hdev->stat.acl_tx++; in btintel_pcie_send_frame()
1979 hdev->stat.sco_tx++; in btintel_pcie_send_frame()
1986 return -EILSEQ; in btintel_pcie_send_frame()
1991 hdev->stat.err_tx++; in btintel_pcie_send_frame()
1996 hdev->stat.byte_tx += skb->len; in btintel_pcie_send_frame()
2007 hdev = data->hdev; in btintel_pcie_release_hdev()
2010 data->hdev = NULL; in btintel_pcie_release_hdev()
2015 spin_lock(&data->irq_lock); in btintel_pcie_disable_interrupts()
2016 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask); in btintel_pcie_disable_interrupts()
2017 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask); in btintel_pcie_disable_interrupts()
2018 spin_unlock(&data->irq_lock); in btintel_pcie_disable_interrupts()
2023 spin_lock(&data->irq_lock); in btintel_pcie_enable_interrupts()
2024 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask); in btintel_pcie_enable_interrupts()
2025 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask); in btintel_pcie_enable_interrupts()
2026 spin_unlock(&data->irq_lock); in btintel_pcie_enable_interrupts()
2031 for (int i = 0; i < data->alloc_vecs; i++) in btintel_pcie_synchronize_irqs()
2032 synchronize_irq(data->msix_entries[i].vector); in btintel_pcie_synchronize_irqs()
2043 BT_DBG("%s", hdev->name); in btintel_pcie_setup_internal()
2053 if (skb->data[0]) { in btintel_pcie_setup_internal()
2055 skb->data[0]); in btintel_pcie_setup_internal()
2056 err = -EIO; in btintel_pcie_setup_internal()
2066 hdev->set_quality_report = btintel_set_quality_report; in btintel_pcie_setup_internal()
2082 err = -EINVAL; in btintel_pcie_setup_internal()
2117 err = -EINVAL; in btintel_pcie_setup_internal()
2122 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top; in btintel_pcie_setup_internal()
2123 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top; in btintel_pcie_setup_internal()
2124 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp; in btintel_pcie_setup_internal()
2125 data->dmp_hdr.fw_build_type = ver_tlv.build_type; in btintel_pcie_setup_internal()
2126 data->dmp_hdr.fw_build_num = ver_tlv.build_num; in btintel_pcie_setup_internal()
2127 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt; in btintel_pcie_setup_internal()
2130 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1; in btintel_pcie_setup_internal()
2168 set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags); in btintel_pcie_setup()
2182 if (strcmp(tmp->name, name)) in btintel_pcie_get_recovery()
2190 bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name); in btintel_pcie_get_recovery()
2198 strscpy(data->name, name, name_len); in btintel_pcie_get_recovery()
2200 list_add_tail(&data->list, &btintel_pcie_recovery_list); in btintel_pcie_get_recovery()
2212 list_del(&tmp->list); in btintel_pcie_free_restart_list()
2227 retry_window = ktime_get_boottime_seconds() - data->last_error; in btintel_pcie_inc_recovery_count()
2228 if (data->count == 0) { in btintel_pcie_inc_recovery_count()
2229 data->last_error = ktime_get_boottime_seconds(); in btintel_pcie_inc_recovery_count()
2230 data->count++; in btintel_pcie_inc_recovery_count()
2232 data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) { in btintel_pcie_inc_recovery_count()
2233 data->count++; in btintel_pcie_inc_recovery_count()
2235 data->last_error = 0; in btintel_pcie_inc_recovery_count()
2236 data->count = 0; in btintel_pcie_inc_recovery_count()
2246 struct pci_dev *pdev = removal->pdev; in btintel_pcie_removal_work()
2252 if (!pdev->bus) in btintel_pcie_removal_work()
2260 flush_work(&data->rx_work); in btintel_pcie_removal_work()
2262 bt_dev_dbg(data->hdev, "Release bluetooth interface"); in btintel_pcie_removal_work()
2283 data->flags = 0; in btintel_pcie_removal_work()
2303 if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags)) in btintel_pcie_reset()
2306 if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags)) in btintel_pcie_reset()
2313 removal->pdev = data->pdev; in btintel_pcie_reset()
2314 INIT_WORK(&removal->work, btintel_pcie_removal_work); in btintel_pcie_reset()
2315 pci_dev_get(removal->pdev); in btintel_pcie_reset()
2316 schedule_work(&removal->work); in btintel_pcie_reset()
2323 struct pci_dev *pdev = dev_data->pdev; in btintel_pcie_hw_error()
2331 data = btintel_pcie_get_recovery(pdev, &hdev->dev); in btintel_pcie_hw_error()
2335 retry_window = ktime_get_boottime_seconds() - data->last_error; in btintel_pcie_hw_error()
2338 data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) { in btintel_pcie_hw_error()
2340 BTINTEL_PCIE_FLR_MAX_RETRY, data->count); in btintel_pcie_hw_error()
2344 data->last_error); in btintel_pcie_hw_error()
2347 btintel_pcie_inc_recovery_count(pdev, &hdev->dev); in btintel_pcie_hw_error()
2355 return device_may_wakeup(&data->pdev->dev); in btintel_pcie_wakeup()
2365 return -ENOMEM; in btintel_pcie_setup_hdev()
2367 hdev->bus = HCI_PCI; in btintel_pcie_setup_hdev()
2370 data->hdev = hdev; in btintel_pcie_setup_hdev()
2371 SET_HCIDEV_DEV(hdev, &data->pdev->dev); in btintel_pcie_setup_hdev()
2373 hdev->manufacturer = 2; in btintel_pcie_setup_hdev()
2374 hdev->open = btintel_pcie_open; in btintel_pcie_setup_hdev()
2375 hdev->close = btintel_pcie_close; in btintel_pcie_setup_hdev()
2376 hdev->send = btintel_pcie_send_frame; in btintel_pcie_setup_hdev()
2377 hdev->setup = btintel_pcie_setup; in btintel_pcie_setup_hdev()
2378 hdev->shutdown = btintel_shutdown_combined; in btintel_pcie_setup_hdev()
2379 hdev->hw_error = btintel_pcie_hw_error; in btintel_pcie_setup_hdev()
2380 hdev->set_diag = btintel_set_diag; in btintel_pcie_setup_hdev()
2381 hdev->set_bdaddr = btintel_set_bdaddr; in btintel_pcie_setup_hdev()
2382 hdev->reset = btintel_pcie_reset; in btintel_pcie_setup_hdev()
2383 hdev->wakeup = btintel_pcie_wakeup; in btintel_pcie_setup_hdev()
2391 data->dmp_hdr.driver_name = KBUILD_MODNAME; in btintel_pcie_setup_hdev()
2406 return -ENODEV; in btintel_pcie_probe()
2408 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); in btintel_pcie_probe()
2410 return -ENOMEM; in btintel_pcie_probe()
2412 data->pdev = pdev; in btintel_pcie_probe()
2414 spin_lock_init(&data->irq_lock); in btintel_pcie_probe()
2415 spin_lock_init(&data->hci_rx_lock); in btintel_pcie_probe()
2417 init_waitqueue_head(&data->gp0_wait_q); in btintel_pcie_probe()
2418 data->gp0_received = false; in btintel_pcie_probe()
2420 init_waitqueue_head(&data->tx_wait_q); in btintel_pcie_probe()
2421 data->tx_wait_done = false; in btintel_pcie_probe()
2423 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI); in btintel_pcie_probe()
2424 if (!data->workqueue) in btintel_pcie_probe()
2425 return -ENOMEM; in btintel_pcie_probe()
2427 skb_queue_head_init(&data->rx_skb_q); in btintel_pcie_probe()
2428 INIT_WORK(&data->rx_work, btintel_pcie_rx_work); in btintel_pcie_probe()
2430 data->boot_stage_cache = 0x00; in btintel_pcie_probe()
2431 data->img_resp_cache = 0x00; in btintel_pcie_probe()
2448 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG); in btintel_pcie_probe()
2450 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG); in btintel_pcie_probe()
2460 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi, in btintel_pcie_probe()
2461 data->cnvr); in btintel_pcie_probe()
2485 flush_work(&data->rx_work); in btintel_pcie_remove()
2488 for (int i = 0; i < data->alloc_vecs; i++) { in btintel_pcie_remove()
2491 msix_entry = &data->msix_entries[i]; in btintel_pcie_remove()
2492 free_irq(msix_entry->vector, msix_entry); in btintel_pcie_remove()
2499 destroy_workqueue(data->workqueue); in btintel_pcie_remove()
2514 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) in btintel_pcie_coredump()
2517 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER; in btintel_pcie_coredump()
2518 queue_work(data->workqueue, &data->rx_work); in btintel_pcie_coredump()
2535 data->gp0_received = false; in btintel_pcie_suspend_late()
2539 /* Refer: 6.4.11.7 -> Platform power management */ in btintel_pcie_suspend_late()
2541 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received, in btintel_pcie_suspend_late()
2544 bt_dev_err(data->hdev, in btintel_pcie_suspend_late()
2547 return -EBUSY; in btintel_pcie_suspend_late()
2550 bt_dev_dbg(data->hdev, in btintel_pcie_suspend_late()
2552 ktime_to_us(ktime_get() - start)); in btintel_pcie_suspend_late()
2580 data->gp0_received = false; in btintel_pcie_resume()
2584 /* Refer: 6.4.11.7 -> Platform power management */ in btintel_pcie_resume()
2586 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received, in btintel_pcie_resume()
2589 bt_dev_err(data->hdev, in btintel_pcie_resume()
2592 return -EBUSY; in btintel_pcie_resume()
2595 bt_dev_dbg(data->hdev, in btintel_pcie_resume()
2597 ktime_to_us(ktime_get() - start)); in btintel_pcie_resume()
2635 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");