Lines Matching +full:host2wbm +full:- +full:desc +full:- +full:feed
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
37 * 4K - 32 = 0xFE0
82 "mhi-er0",
83 "mhi-er1",
100 "host2wbm-desc-feed",
101 "host2reo-re-injection",
102 "host2reo-command",
103 "host2rxdma-monitor-ring3",
104 "host2rxdma-monitor-ring2",
105 "host2rxdma-monitor-ring1",
106 "reo2ost-exception",
107 "wbm2host-rx-release",
108 "reo2host-status",
109 "reo2host-destination-ring4",
110 "reo2host-destination-ring3",
111 "reo2host-destination-ring2",
112 "reo2host-destination-ring1",
113 "rxdma2host-monitor-destination-mac3",
114 "rxdma2host-monitor-destination-mac2",
115 "rxdma2host-monitor-destination-mac1",
116 "ppdu-end-interrupts-mac3",
117 "ppdu-end-interrupts-mac2",
118 "ppdu-end-interrupts-mac1",
119 "rxdma2host-monitor-status-ring-mac3",
120 "rxdma2host-monitor-status-ring-mac2",
121 "rxdma2host-monitor-status-ring-mac1",
122 "host2rxdma-host-buf-ring-mac3",
123 "host2rxdma-host-buf-ring-mac2",
124 "host2rxdma-host-buf-ring-mac1",
125 "rxdma2host-destination-ring-mac3",
126 "rxdma2host-destination-ring-mac2",
127 "rxdma2host-destination-ring-mac1",
128 "host2tcl-input-ring4",
129 "host2tcl-input-ring3",
130 "host2tcl-input-ring2",
131 "host2tcl-input-ring1",
132 "wbm2host-tx-completions-ring4",
133 "wbm2host-tx-completions-ring3",
134 "wbm2host-tx-completions-ring2",
135 "wbm2host-tx-completions-ring1",
136 "tcl2host-status-ring",
143 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
150 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
165 struct ath12k_base *ab = ab_pci->ab;
170 lockdep_assert_held(&ab_pci->window_lock);
173 static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
176 if (window != ab_pci->register_window) {
178 ab->mem + WINDOW_REG_ADDRESS);
179 ioread32(ab->mem + WINDOW_REG_ADDRESS);
180 ab_pci->register_window = window;
192 spin_lock_bh(&ab_pci->window_lock);
193 ab_pci->register_window = window;
194 spin_unlock_bh(&ab_pci->window_lock);
196 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
349 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
351 for (j = 0; j < irq_grp->num_irq; j++)
352 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
354 netif_napi_del(&irq_grp->napi);
355 free_netdev(irq_grp->napi_ndev);
363 for (i = 0; i < ab->hw_params->ce_count; i++) {
367 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
381 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
385 enable_irq(ab->irq_num[irq_idx]);
396 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
400 disable_irq_nosync(ab->irq_num[irq_idx]);
407 clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
409 for (i = 0; i < ab->hw_params->ce_count; i++) {
421 for (i = 0; i < ab->hw_params->ce_count; i++) {
426 synchronize_irq(ab->irq_num[irq_idx]);
433 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
435 ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
437 enable_irq(ce_pipe->ab->irq_num[irq_idx]);
443 struct ath12k_base *ab = ce_pipe->ab;
444 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
446 if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
450 ce_pipe->timestamp = jiffies;
452 disable_irq_nosync(ab->irq_num[irq_idx]);
454 queue_work(system_bh_wq, &ce_pipe->intr_wq);
461 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
467 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
470 for (i = 0; i < irq_grp->num_irq; i++)
471 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
478 if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
482 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
486 if (irq_grp->napi_enabled) {
487 napi_synchronize(&irq_grp->napi);
488 napi_disable(&irq_grp->napi);
489 irq_grp->napi_enabled = false;
496 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
502 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
505 for (i = 0; i < irq_grp->num_irq; i++)
506 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
514 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
516 for (j = 0; j < irq_grp->num_irq; j++) {
517 irq_idx = irq_grp->irqs[j];
518 synchronize_irq(ab->irq_num[irq_idx]);
528 struct ath12k_base *ab = irq_grp->ab;
535 for (i = 0; i < irq_grp->num_irq; i++)
536 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
548 struct ath12k_base *ab = irq_grp->ab;
551 if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
554 ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
557 irq_grp->timestamp = jiffies;
559 for (i = 0; i < irq_grp->num_irq; i++)
560 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
562 napi_schedule(&irq_grp->napi);
583 irq_grp = &ab->ext_irq_grp[i];
586 irq_grp->ab = ab;
587 irq_grp->grp_id = i;
588 irq_grp->napi_ndev = alloc_netdev_dummy(0);
589 if (!irq_grp->napi_ndev) {
590 ret = -ENOMEM;
594 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
597 if (ab->hw_params->ring_mask->tx[i] ||
598 ab->hw_params->ring_mask->rx[i] ||
599 ab->hw_params->ring_mask->rx_err[i] ||
600 ab->hw_params->ring_mask->rx_wbm_rel[i] ||
601 ab->hw_params->ring_mask->reo_status[i] ||
602 ab->hw_params->ring_mask->host2rxdma[i] ||
603 ab->hw_params->ring_mask->rx_mon_dest[i] ||
604 ab->hw_params->ring_mask->rx_mon_status[i]) {
608 irq_grp->num_irq = num_irq;
609 irq_grp->irqs[0] = base_idx + i;
611 for (j = 0; j < irq_grp->num_irq; j++) {
612 int irq_idx = irq_grp->irqs[j];
614 int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
616 ab->irq_num[irq_idx] = irq;
623 ab_pci->irq_flags,
637 /* i ->napi_ndev was properly allocated. Free it also */
641 irq_grp = &ab->ext_irq_grp[n];
642 free_netdev(irq_grp->napi_ndev);
650 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
653 return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
674 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
679 irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
680 ce_pipe = &ab->ce.ce_pipe[i];
684 INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue);
687 ab_pci->irq_flags, irq_name[irq_idx],
695 ab->irq_num[irq_idx] = irq;
710 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
713 struct pci_bus *bus = ab_pci->pdev->bus;
715 cfg->tgt_ce = ab->hw_params->target_ce_config;
716 cfg->tgt_ce_len = ab->hw_params->target_ce_count;
718 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
719 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
720 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
723 ab_pci->qmi_instance =
725 u32_encode_bits(bus->number, BUS_NUMBER_MASK);
726 ab->qmi.service_ins_id += ab_pci->qmi_instance;
734 set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
736 for (i = 0; i < ab->hw_params->ce_count; i++) {
745 struct pci_dev *dev = ab_pci->pdev;
748 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
755 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
770 struct ath12k_base *ab = ab_pci->ab;
771 const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
776 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
777 msi_config->total_vectors,
778 msi_config->total_vectors,
781 if (num_vectors == msi_config->total_vectors) {
782 set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
783 ab_pci->irq_flags = IRQF_SHARED;
785 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
790 ret = -EINVAL;
793 clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
794 ab_pci->msi_config = &msi_config_one_msi;
795 ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
803 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
806 ret = -EINVAL;
810 ab_pci->msi_ep_base_data = msi_desc->msg.data;
811 if (msi_desc->pci.msi_attrib.is_64)
812 set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
814 ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
819 pci_free_irq_vectors(ab_pci->pdev);
827 pci_free_irq_vectors(ab_pci->pdev);
834 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
836 ath12k_err(ab_pci->ab, "msi_desc is NULL!\n");
837 pci_free_irq_vectors(ab_pci->pdev);
838 return -EINVAL;
841 ab_pci->msi_ep_base_data = msi_desc->msg.data;
843 ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
844 ab_pci->msi_ep_base_data);
851 struct ath12k_base *ab = ab_pci->ab;
856 if (device_id != ab_pci->dev_id) {
858 device_id, ab_pci->dev_id);
859 ret = -EIO;
881 ab_pci->dma_mask = DMA_BIT_MASK(ATH12K_PCI_DMA_MASK);
882 dma_set_mask(&pdev->dev, ab_pci->dma_mask);
883 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
887 ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
888 ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
889 if (!ab->mem) {
891 ret = -EIO;
895 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem);
908 struct ath12k_base *ab = ab_pci->ab;
909 struct pci_dev *pci_dev = ab_pci->pdev;
911 pci_iounmap(pci_dev, ab->mem);
912 ab->mem = NULL;
920 struct ath12k_base *ab = ab_pci->ab;
922 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
923 &ab_pci->link_ctl);
926 ab_pci->link_ctl,
927 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
928 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
931 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
934 set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
943 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
951 ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
954 reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
959 if (ab_pci->ab->hw_params->supports_aspm &&
960 test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
961 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
963 ab_pci->link_ctl &
971 for (i = 0; i < ab->hw_params->ce_count; i++) {
972 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
977 cancel_work_sync(&ce_pipe->intr_wq);
995 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
996 entry = &ab->hw_params->svc_to_ce_map[i];
998 if (__le32_to_cpu(entry->service_id) != service_id)
1001 switch (__le32_to_cpu(entry->pipedir)) {
1006 *dl_pipe = __le32_to_cpu(entry->pipenum);
1011 *ul_pipe = __le32_to_cpu(entry->pipenum);
1017 *dl_pipe = __le32_to_cpu(entry->pipenum);
1018 *ul_pipe = __le32_to_cpu(entry->pipenum);
1026 return -ENOENT;
1043 const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
1046 for (idx = 0; idx < msi_config->total_users; idx++) {
1047 if (strcmp(user_name, msi_config->users[idx].name) == 0) {
1048 *num_vectors = msi_config->users[idx].num_vectors;
1049 *base_vector = msi_config->users[idx].base_vector;
1050 *user_base_data = *base_vector + ab_pci->msi_ep_base_data;
1063 return -EINVAL;
1070 struct pci_dev *pci_dev = to_pci_dev(ab->dev);
1072 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
1075 if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
1076 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
1088 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
1115 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
1117 if (!irq_grp->napi_enabled) {
1118 napi_enable(&irq_grp->napi);
1119 irq_grp->napi_enabled = true;
1125 set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
1130 if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
1159 if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
1170 set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1172 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
1189 /* for offset beyond BAR + 4K - 32, may
1192 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1193 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1194 ret = ab_pci->pci_ops->wakeup(ab);
1197 val = ioread32(ab->mem + offset);
1199 if (ab->static_window_map)
1205 spin_lock_bh(&ab_pci->window_lock);
1209 offset = offset - PCI_MHIREGLEN_REG;
1210 val = ioread32(ab->mem +
1213 val = ioread32(ab->mem + window_start +
1216 spin_unlock_bh(&ab_pci->window_lock);
1218 val = ioread32(ab->mem + window_start +
1223 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1224 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1226 ab_pci->pci_ops->release(ab);
1236 /* for offset beyond BAR + 4K - 32, may
1239 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1240 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1241 ret = ab_pci->pci_ops->wakeup(ab);
1244 iowrite32(value, ab->mem + offset);
1246 if (ab->static_window_map)
1252 spin_lock_bh(&ab_pci->window_lock);
1256 offset = offset - PCI_MHIREGLEN_REG;
1257 iowrite32(value, ab->mem +
1260 iowrite32(value, ab->mem + window_start +
1263 spin_unlock_bh(&ab_pci->window_lock);
1265 iowrite32(value, ab->mem + window_start +
1270 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1271 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1273 ab_pci->pci_ops->release(ab);
1280 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
1288 rddm_img = mhi_ctrl->rddm_image;
1294 fw_img = mhi_ctrl->fbc_image;
1296 for (i = 0; i < fw_img->entries ; i++) {
1297 if (!fw_img->mhi_buf[i].buf)
1300 paging_tlv_sz += fw_img->mhi_buf[i].len;
1304 for (i = 0; i < rddm_img->entries; i++) {
1305 if (!rddm_img->mhi_buf[i].buf)
1308 rddm_tlv_sz += rddm_img->mhi_buf[i].len;
1312 for (i = 0; i < ab->qmi.mem_seg_count; i++) {
1313 mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
1321 ab->qmi.target_mem[i].type);
1325 if (!ab->qmi.target_mem[i].paddr)
1328 dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
1347 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
1366 rddm_img = mhi_ctrl->rddm_image;
1367 fw_img = mhi_ctrl->fbc_image;
1374 ab->dump_data = buf;
1375 ab->ath12k_coredump_len = len;
1376 file_data = ab->dump_data;
1377 strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic));
1378 file_data->len = cpu_to_le32(len);
1379 file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2);
1380 file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
1381 file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
1382 file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
1383 guid_gen(&file_data->guid);
1385 file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
1386 file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
1389 dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
1390 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
1396 for (i = 0; i < fw_img->entries ; i++) {
1397 if (!fw_img->mhi_buf[i].buf)
1400 memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
1401 fw_img->mhi_buf[i].len);
1402 buf += fw_img->mhi_buf[i].len;
1406 dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
1407 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
1413 for (i = 0; i < rddm_img->entries; i++) {
1414 if (!rddm_img->mhi_buf[i].buf)
1417 memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
1418 rddm_img->mhi_buf[i].len);
1419 buf += rddm_img->mhi_buf[i].len;
1428 dump_tlv->type = cpu_to_le32(mem_idx);
1429 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
1432 for (i = 0; i < ab->qmi.mem_seg_count; i++) {
1434 (ab->qmi.target_mem[i].type);
1439 if (!ab->qmi.target_mem[i].paddr) {
1442 ab->qmi.target_mem[i].type);
1446 memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr,
1447 ab->qmi.target_mem[i].size);
1448 buf += ab->qmi.target_mem[i].size;
1452 queue_work(ab->workqueue, &ab->dump_work);
1461 ab_pci->register_window = 0;
1462 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1463 ath12k_pci_sw_reset(ab_pci->ab, true);
1481 if (ab->static_window_map)
1491 if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
1497 ath12k_pci_force_wake(ab_pci->ab);
1500 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1501 ath12k_pci_sw_reset(ab_pci->ab, false);
1558 ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
1560 dev_err(&pdev->dev, "failed to allocate ath12k base\n");
1561 return -ENOMEM;
1564 ab->dev = &pdev->dev;
1567 ab_pci->dev_id = pci_dev->device;
1568 ab_pci->ab = ab;
1569 ab_pci->pdev = pdev;
1570 ab->hif.ops = &ath12k_pci_hif_ops;
1571 ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
1573 spin_lock_init(&ab_pci->window_lock);
1582 pdev->vendor, pdev->device,
1583 pdev->subsystem_vendor, pdev->subsystem_device);
1585 ab->id.vendor = pdev->vendor;
1586 ab->id.device = pdev->device;
1587 ab->id.subsystem_vendor = pdev->subsystem_vendor;
1588 ab->id.subsystem_device = pdev->subsystem_device;
1590 switch (pci_dev->device) {
1592 ab_pci->msi_config = &ath12k_msi_config[0];
1593 ab->static_window_map = true;
1594 ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
1595 ab->hal_rx_ops = &hal_rx_qcn9274_ops;
1600 ab->hw_rev = ATH12K_HW_QCN9274_HW20;
1603 ab->hw_rev = ATH12K_HW_QCN9274_HW10;
1606 dev_err(&pdev->dev,
1609 ret = -EOPNOTSUPP;
1614 ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
1615 ab_pci->msi_config = &ath12k_msi_config[0];
1616 ab->static_window_map = false;
1617 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
1618 ab->hal_rx_ops = &hal_rx_wcn7850_ops;
1623 ab->hw_rev = ATH12K_HW_WCN7850_HW20;
1626 dev_err(&pdev->dev,
1629 ret = -EOPNOTSUPP;
1635 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1636 pci_dev->device);
1637 ret = -EOPNOTSUPP;
1735 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1740 set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1742 cancel_work_sync(&ab->reset_work);
1743 cancel_work_sync(&ab->dump_work);
1744 ath12k_core_hw_group_cleanup(ab->ag);
1768 mutex_lock(&ag->mutex);
1770 for (i = 0; i < ag->num_devices; i++) {
1771 ab = ag->ab[i];
1778 mutex_unlock(&ag->mutex);
1787 ath12k_pci_hw_group_power_down(ab->ag);