Lines Matching +full:reserve +full:- +full:mem +full:- +full:v1

1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
56 /* PCI-E QCA988X V2 (Ubiquiti branded) */
59 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
60 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
61 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
62 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
63 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
64 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
65 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
66 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
122 /* CE0: host->target HTC control and raw streams */
131 /* CE1: target->host HTT + HTC control */
140 /* CE2: target->host WMI */
149 /* CE3: host->target WMI */
158 /* CE4: host->target HTT */
167 /* CE5: target->host HTT (HIF->HTT) */
192 /* CE8: target->host pktlog */
228 /* CE0: host->target HTC control and raw streams */
238 /* CE1: target->host HTT + HTC control */
248 /* CE2: target->host WMI */
258 /* CE3: host->target WMI */
268 /* CE4: host->target HTT */
280 /* CE5: target->host HTT (HIF->HTT) */
310 /* CE8 target->host packtlog */
343 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
348 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
353 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
358 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
363 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
368 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
373 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
378 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
383 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
388 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
393 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
398 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
403 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
408 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
413 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
418 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
434 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + in ath10k_pci_is_awake()
444 lockdep_assert_held(&ar_pci->ps_lock); in __ath10k_pci_wake()
447 ar_pci->ps_wake_refcount, ar_pci->ps_awake); in __ath10k_pci_wake()
450 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + in __ath10k_pci_wake()
458 lockdep_assert_held(&ar_pci->ps_lock); in __ath10k_pci_sleep()
461 ar_pci->ps_wake_refcount, ar_pci->ps_awake); in __ath10k_pci_sleep()
464 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + in __ath10k_pci_sleep()
466 ar_pci->ps_awake = false; in __ath10k_pci_sleep()
489 return -ETIMEDOUT; in ath10k_pci_wake_wait()
498 if (ar_pci->pci_ps) in ath10k_pci_force_wake()
501 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_force_wake()
503 if (!ar_pci->ps_awake) { in ath10k_pci_force_wake()
505 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + in ath10k_pci_force_wake()
510 ar_pci->ps_awake = true; in ath10k_pci_force_wake()
513 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_force_wake()
523 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_force_sleep()
526 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + in ath10k_pci_force_sleep()
528 ar_pci->ps_awake = false; in ath10k_pci_force_sleep()
530 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_force_sleep()
539 if (ar_pci->pci_ps == 0) in ath10k_pci_wake()
542 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_wake()
545 ar_pci->ps_wake_refcount, ar_pci->ps_awake); in ath10k_pci_wake()
550 if (!ar_pci->ps_awake) { in ath10k_pci_wake()
555 ar_pci->ps_awake = true; in ath10k_pci_wake()
559 ar_pci->ps_wake_refcount++; in ath10k_pci_wake()
560 WARN_ON(ar_pci->ps_wake_refcount == 0); in ath10k_pci_wake()
563 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_wake()
573 if (ar_pci->pci_ps == 0) in ath10k_pci_sleep()
576 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_sleep()
579 ar_pci->ps_wake_refcount, ar_pci->ps_awake); in ath10k_pci_sleep()
581 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) in ath10k_pci_sleep()
584 ar_pci->ps_wake_refcount--; in ath10k_pci_sleep()
586 mod_timer(&ar_pci->ps_timer, jiffies + in ath10k_pci_sleep()
590 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_sleep()
596 struct ath10k *ar = ar_pci->ar; in ath10k_pci_ps_timer()
599 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_ps_timer()
602 ar_pci->ps_wake_refcount, ar_pci->ps_awake); in ath10k_pci_ps_timer()
604 if (ar_pci->ps_wake_refcount > 0) in ath10k_pci_ps_timer()
610 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_ps_timer()
618 if (ar_pci->pci_ps == 0) { in ath10k_pci_sleep_sync()
623 timer_delete_sync(&ar_pci->ps_timer); in ath10k_pci_sleep_sync()
625 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_sleep_sync()
626 WARN_ON(ar_pci->ps_wake_refcount > 0); in ath10k_pci_sleep_sync()
628 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_sleep_sync()
636 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { in ath10k_bus_pci_write32()
637 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", in ath10k_bus_pci_write32()
638 offset, offset + sizeof(value), ar_pci->mem_len); in ath10k_bus_pci_write32()
649 iowrite32(value, ar_pci->mem + offset); in ath10k_bus_pci_write32()
659 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { in ath10k_bus_pci_read32()
660 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", in ath10k_bus_pci_read32()
661 offset, offset + sizeof(val), ar_pci->mem_len); in ath10k_bus_pci_read32()
672 val = ioread32(ar_pci->mem + offset); in ath10k_bus_pci_read32()
682 ce->bus_ops->write32(ar, offset, value); in ath10k_pci_write32()
689 return ce->bus_ops->read32(ar, offset); in ath10k_pci_read32()
760 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) in ath10k_pci_get_irq_method()
768 struct ath10k *ar = pipe->hif_ce_state; in __ath10k_pci_rx_post_buf()
770 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; in __ath10k_pci_rx_post_buf()
775 skb = dev_alloc_skb(pipe->buf_sz); in __ath10k_pci_rx_post_buf()
777 return -ENOMEM; in __ath10k_pci_rx_post_buf()
779 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); in __ath10k_pci_rx_post_buf()
781 paddr = dma_map_single(ar->dev, skb->data, in __ath10k_pci_rx_post_buf()
782 skb->len + skb_tailroom(skb), in __ath10k_pci_rx_post_buf()
784 if (unlikely(dma_mapping_error(ar->dev, paddr))) { in __ath10k_pci_rx_post_buf()
787 return -EIO; in __ath10k_pci_rx_post_buf()
790 ATH10K_SKB_RXCB(skb)->paddr = paddr; in __ath10k_pci_rx_post_buf()
792 spin_lock_bh(&ce->ce_lock); in __ath10k_pci_rx_post_buf()
793 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); in __ath10k_pci_rx_post_buf()
794 spin_unlock_bh(&ce->ce_lock); in __ath10k_pci_rx_post_buf()
796 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), in __ath10k_pci_rx_post_buf()
807 struct ath10k *ar = pipe->hif_ce_state; in ath10k_pci_rx_post_pipe()
810 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; in ath10k_pci_rx_post_pipe()
813 if (pipe->buf_sz == 0) in ath10k_pci_rx_post_pipe()
816 if (!ce_pipe->dest_ring) in ath10k_pci_rx_post_pipe()
819 spin_lock_bh(&ce->ce_lock); in ath10k_pci_rx_post_pipe()
821 spin_unlock_bh(&ce->ce_lock); in ath10k_pci_rx_post_pipe()
826 if (ret == -ENOSPC) in ath10k_pci_rx_post_pipe()
829 mod_timer(&ar_pci->rx_post_retry, jiffies + in ath10k_pci_rx_post_pipe()
833 num--; in ath10k_pci_rx_post_pipe()
843 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); in ath10k_pci_rx_post()
850 struct ath10k *ar = ar_pci->ar; in ath10k_pci_rx_replenish_retry()
893 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) in ath10k_pci_targ_cpu_to_ce_addr()
894 return -EOPNOTSUPP; in ath10k_pci_targ_cpu_to_ce_addr()
896 return ar_pci->targ_cpu_to_ce_addr(ar, addr); in ath10k_pci_targ_cpu_to_ce_addr()
918 mutex_lock(&ar_pci->ce_diag_mutex); in ath10k_pci_diag_read_mem()
919 ce_diag = ar_pci->ce_diag; in ath10k_pci_diag_read_mem()
924 * 1) 4-byte alignment in ath10k_pci_diag_read_mem()
925 * 2) Buffer in DMA-able space in ath10k_pci_diag_read_mem()
929 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, in ath10k_pci_diag_read_mem()
932 ret = -ENOMEM; in ath10k_pci_diag_read_mem()
966 ret = -EBUSY; in ath10k_pci_diag_read_mem()
978 ret = -EBUSY; in ath10k_pci_diag_read_mem()
984 ret = -EIO; in ath10k_pci_diag_read_mem()
989 ret = -EIO; in ath10k_pci_diag_read_mem()
993 remaining_bytes -= nbytes; in ath10k_pci_diag_read_mem()
1003 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, in ath10k_pci_diag_read_mem()
1006 mutex_unlock(&ar_pci->ce_diag_mutex); in ath10k_pci_diag_read_mem()
1062 mutex_lock(&ar_pci->ce_diag_mutex); in ath10k_pci_diag_write_mem()
1063 ce_diag = ar_pci->ce_diag; in ath10k_pci_diag_write_mem()
1068 * 1) 4-byte alignment in ath10k_pci_diag_write_mem()
1069 * 2) Buffer in DMA-able space in ath10k_pci_diag_write_mem()
1073 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, in ath10k_pci_diag_write_mem()
1076 ret = -ENOMEM; in ath10k_pci_diag_write_mem()
1106 * Request CE to send caller-supplied data that in ath10k_pci_diag_write_mem()
1119 ret = -EBUSY; in ath10k_pci_diag_write_mem()
1131 ret = -EBUSY; in ath10k_pci_diag_write_mem()
1137 ret = -EIO; in ath10k_pci_diag_write_mem()
1142 ret = -EIO; in ath10k_pci_diag_write_mem()
1146 remaining_bytes -= nbytes; in ath10k_pci_diag_write_mem()
1153 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, in ath10k_pci_diag_write_mem()
1161 mutex_unlock(&ar_pci->ce_diag_mutex); in ath10k_pci_diag_write_mem()
1176 struct ath10k *ar = ce_state->ar; in ath10k_pci_htc_tx_cb()
1197 struct ath10k *ar = ce_state->ar; in ath10k_pci_process_rx_cb()
1199 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; in ath10k_pci_process_rx_cb()
1209 max_nbytes = skb->len + skb_tailroom(skb); in ath10k_pci_process_rx_cb()
1210 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, in ath10k_pci_process_rx_cb()
1226 ce_state->id, skb->len); in ath10k_pci_process_rx_cb()
1228 skb->data, skb->len); in ath10k_pci_process_rx_cb()
1240 struct ath10k *ar = ce_state->ar; in ath10k_pci_process_htt_rx_cb()
1242 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; in ath10k_pci_process_htt_rx_cb()
1243 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; in ath10k_pci_process_htt_rx_cb()
1258 max_nbytes = skb->len + skb_tailroom(skb); in ath10k_pci_process_htt_rx_cb()
1266 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, in ath10k_pci_process_htt_rx_cb()
1275 ce_state->id, skb->len); in ath10k_pci_process_htt_rx_cb()
1277 skb->data, skb->len); in ath10k_pci_process_htt_rx_cb()
1279 orig_len = skb->len; in ath10k_pci_process_htt_rx_cb()
1281 skb_push(skb, orig_len - skb->len); in ath10k_pci_process_htt_rx_cb()
1286 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, in ath10k_pci_process_htt_rx_cb()
1287 skb->len + skb_tailroom(skb), in ath10k_pci_process_htt_rx_cb()
1302 * HTT Rx (target->host) is processed. in ath10k_pci_htt_htc_rx_cb()
1304 ath10k_ce_per_engine_service(ce_state->ar, 4); in ath10k_pci_htt_htc_rx_cb()
1321 struct ath10k *ar = ce_state->ar; in ath10k_pci_htt_tx_cb()
1329 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, in ath10k_pci_htt_tx_cb()
1330 skb->len, DMA_TO_DEVICE); in ath10k_pci_htt_tx_cb()
1345 * HTT Rx (target->host) is processed. in ath10k_pci_htt_rx_cb()
1347 ath10k_ce_per_engine_service(ce_state->ar, 4); in ath10k_pci_htt_rx_cb()
1357 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; in ath10k_pci_hif_tx_sg()
1358 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; in ath10k_pci_hif_tx_sg()
1359 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; in ath10k_pci_hif_tx_sg()
1365 spin_lock_bh(&ce->ce_lock); in ath10k_pci_hif_tx_sg()
1367 nentries_mask = src_ring->nentries_mask; in ath10k_pci_hif_tx_sg()
1368 sw_index = src_ring->sw_index; in ath10k_pci_hif_tx_sg()
1369 write_index = src_ring->write_index; in ath10k_pci_hif_tx_sg()
1372 write_index, sw_index - 1) < n_items)) { in ath10k_pci_hif_tx_sg()
1373 err = -ENOBUFS; in ath10k_pci_hif_tx_sg()
1377 for (i = 0; i < n_items - 1; i++) { in ath10k_pci_hif_tx_sg()
1394 /* `i` is equal to `n_items -1` after for() */ in ath10k_pci_hif_tx_sg()
1411 spin_unlock_bh(&ce->ce_lock); in ath10k_pci_hif_tx_sg()
1415 for (; i > 0; i--) in ath10k_pci_hif_tx_sg()
1418 spin_unlock_bh(&ce->ce_lock); in ath10k_pci_hif_tx_sg()
1434 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); in ath10k_pci_hif_get_free_queue_number()
1443 lockdep_assert_held(&ar->dump_mutex); in ath10k_pci_dump_registers()
1468 crash_data->registers[i] = reg_dump_values[i]; in ath10k_pci_dump_registers()
1482 cur_section = &mem_region->section_table.sections[0]; in ath10k_pci_dump_memory_section()
1484 if (mem_region->start > cur_section->start) { in ath10k_pci_dump_memory_section()
1486 mem_region->start, cur_section->start); in ath10k_pci_dump_memory_section()
1490 skip_size = cur_section->start - mem_region->start; in ath10k_pci_dump_memory_section()
1503 section_size = cur_section->end - cur_section->start; in ath10k_pci_dump_memory_section()
1507 cur_section->start, in ath10k_pci_dump_memory_section()
1508 cur_section->end); in ath10k_pci_dump_memory_section()
1512 if ((i + 1) == mem_region->section_table.size) { in ath10k_pci_dump_memory_section()
1519 if (cur_section->end > next_section->start) { in ath10k_pci_dump_memory_section()
1521 next_section->start, in ath10k_pci_dump_memory_section()
1522 cur_section->end); in ath10k_pci_dump_memory_section()
1526 skip_size = next_section->start - cur_section->end; in ath10k_pci_dump_memory_section()
1534 buf_len -= skip_size + section_size; in ath10k_pci_dump_memory_section()
1537 ret = ath10k_pci_diag_read_mem(ar, cur_section->start, in ath10k_pci_dump_memory_section()
1541 cur_section->start, ret); in ath10k_pci_dump_memory_section()
1578 return -EIO; in ath10k_pci_set_ram_config()
1592 base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); in ath10k_pci_dump_memory_sram()
1593 base_addr += region->start; in ath10k_pci_dump_memory_sram()
1595 for (i = 0; i < region->len; i += 4) { in ath10k_pci_dump_memory_sram()
1596 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); in ath10k_pci_dump_memory_sram()
1597 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); in ath10k_pci_dump_memory_sram()
1600 return region->len; in ath10k_pci_dump_memory_sram()
1612 mutex_lock(&ar->conf_mutex); in ath10k_pci_dump_memory_reg()
1613 if (ar->state != ATH10K_STATE_ON) { in ath10k_pci_dump_memory_reg()
1615 ret = -EIO; in ath10k_pci_dump_memory_reg()
1619 for (i = 0; i < region->len; i += 4) in ath10k_pci_dump_memory_reg()
1620 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); in ath10k_pci_dump_memory_reg()
1622 ret = region->len; in ath10k_pci_dump_memory_reg()
1624 mutex_unlock(&ar->conf_mutex); in ath10k_pci_dump_memory_reg()
1635 if (current_region->section_table.size > 0) in ath10k_pci_dump_memory_generic()
1640 current_region->len); in ath10k_pci_dump_memory_generic()
1646 current_region->start, in ath10k_pci_dump_memory_generic()
1648 current_region->len); in ath10k_pci_dump_memory_generic()
1651 current_region->name, ret); in ath10k_pci_dump_memory_generic()
1655 return current_region->len; in ath10k_pci_dump_memory_generic()
1669 lockdep_assert_held(&ar->dump_mutex); in ath10k_pci_dump_memory()
1678 current_region = &mem_layout->region_table.regions[0]; in ath10k_pci_dump_memory()
1680 buf = crash_data->ramdump_buf; in ath10k_pci_dump_memory()
1681 buf_len = crash_data->ramdump_buf_len; in ath10k_pci_dump_memory()
1685 for (i = 0; i < mem_layout->region_table.size; i++) { in ath10k_pci_dump_memory()
1688 if (current_region->len > buf_len) { in ath10k_pci_dump_memory()
1690 current_region->name, in ath10k_pci_dump_memory()
1691 current_region->len, in ath10k_pci_dump_memory()
1699 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || in ath10k_pci_dump_memory()
1700 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { in ath10k_pci_dump_memory()
1701 shift = current_region->start >> 20; in ath10k_pci_dump_memory()
1706 current_region->name, ret); in ath10k_pci_dump_memory()
1711 /* Reserve space for the header. */ in ath10k_pci_dump_memory()
1714 buf_len -= sizeof(*hdr); in ath10k_pci_dump_memory()
1716 switch (current_region->type) { in ath10k_pci_dump_memory()
1736 hdr->region_type = cpu_to_le32(current_region->type); in ath10k_pci_dump_memory()
1737 hdr->start = cpu_to_le32(current_region->start); in ath10k_pci_dump_memory()
1738 hdr->length = cpu_to_le32(count); in ath10k_pci_dump_memory()
1745 buf_len -= count; in ath10k_pci_dump_memory()
1756 struct ath10k *ar = ar_pci->ar; in ath10k_pci_fw_dump_work()
1759 mutex_lock(&ar->dump_mutex); in ath10k_pci_fw_dump_work()
1761 spin_lock_bh(&ar->data_lock); in ath10k_pci_fw_dump_work()
1762 ar->stats.fw_crash_counter++; in ath10k_pci_fw_dump_work()
1763 spin_unlock_bh(&ar->data_lock); in ath10k_pci_fw_dump_work()
1768 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); in ath10k_pci_fw_dump_work()
1778 mutex_unlock(&ar->dump_mutex); in ath10k_pci_fw_dump_work()
1787 queue_work(ar->workqueue, &ar_pci->dump_work); in ath10k_pci_fw_crashed_dump()
1812 if (resources > (ar_pci->attr[pipe].src_nentries >> 1)) in ath10k_pci_hif_send_complete_check()
1822 timer_delete_sync(&ar_pci->rx_post_retry); in ath10k_pci_rx_retry_sync()
1836 entry = &ar_pci->serv_to_pipe[i]; in ath10k_pci_hif_map_service_to_pipe()
1838 if (__le32_to_cpu(entry->service_id) != service_id) in ath10k_pci_hif_map_service_to_pipe()
1841 switch (__le32_to_cpu(entry->pipedir)) { in ath10k_pci_hif_map_service_to_pipe()
1846 *dl_pipe = __le32_to_cpu(entry->pipenum); in ath10k_pci_hif_map_service_to_pipe()
1851 *ul_pipe = __le32_to_cpu(entry->pipenum); in ath10k_pci_hif_map_service_to_pipe()
1857 *dl_pipe = __le32_to_cpu(entry->pipenum); in ath10k_pci_hif_map_service_to_pipe()
1858 *ul_pipe = __le32_to_cpu(entry->pipenum); in ath10k_pci_hif_map_service_to_pipe()
1866 return -ENOENT; in ath10k_pci_hif_map_service_to_pipe()
1885 switch (ar->hw_rev) { in ath10k_pci_irq_msi_fw_mask()
1913 switch (ar->hw_rev) { in ath10k_pci_irq_msi_fw_unmask()
1948 synchronize_irq(ar_pci->pdev->irq); in ath10k_pci_irq_sync()
1969 pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL, in ath10k_pci_hif_start()
1971 ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); in ath10k_pci_hif_start()
1984 ar = pci_pipe->hif_ce_state; in ath10k_pci_rx_pipe_cleanup()
1985 ce_pipe = pci_pipe->ce_hdl; in ath10k_pci_rx_pipe_cleanup()
1986 ce_ring = ce_pipe->dest_ring; in ath10k_pci_rx_pipe_cleanup()
1991 if (!pci_pipe->buf_sz) in ath10k_pci_rx_pipe_cleanup()
1994 for (i = 0; i < ce_ring->nentries; i++) { in ath10k_pci_rx_pipe_cleanup()
1995 skb = ce_ring->per_transfer_context[i]; in ath10k_pci_rx_pipe_cleanup()
1999 ce_ring->per_transfer_context[i] = NULL; in ath10k_pci_rx_pipe_cleanup()
2001 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, in ath10k_pci_rx_pipe_cleanup()
2002 skb->len + skb_tailroom(skb), in ath10k_pci_rx_pipe_cleanup()
2016 ar = pci_pipe->hif_ce_state; in ath10k_pci_tx_pipe_cleanup()
2017 ce_pipe = pci_pipe->ce_hdl; in ath10k_pci_tx_pipe_cleanup()
2018 ce_ring = ce_pipe->src_ring; in ath10k_pci_tx_pipe_cleanup()
2023 if (!pci_pipe->buf_sz) in ath10k_pci_tx_pipe_cleanup()
2026 for (i = 0; i < ce_ring->nentries; i++) { in ath10k_pci_tx_pipe_cleanup()
2027 skb = ce_ring->per_transfer_context[i]; in ath10k_pci_tx_pipe_cleanup()
2031 ce_ring->per_transfer_context[i] = NULL; in ath10k_pci_tx_pipe_cleanup()
2053 pipe_info = &ar_pci->pipe_info[pipe_num]; in ath10k_pci_buffer_cleanup()
2085 cancel_work_sync(&ar_pci->dump_work); in ath10k_pci_hif_stop()
2092 * For ranged MSI the CE-related interrupts can be masked. However in ath10k_pci_hif_stop()
2102 spin_lock_irqsave(&ar_pci->ps_lock, flags); in ath10k_pci_hif_stop()
2103 WARN_ON(ar_pci->ps_wake_refcount > 0); in ath10k_pci_hif_stop()
2104 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); in ath10k_pci_hif_stop()
2112 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; in ath10k_pci_hif_exchange_bmi_msg()
2113 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; in ath10k_pci_hif_exchange_bmi_msg()
2114 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; in ath10k_pci_hif_exchange_bmi_msg()
2115 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; in ath10k_pci_hif_exchange_bmi_msg()
2125 return -EINVAL; in ath10k_pci_hif_exchange_bmi_msg()
2128 return -EINVAL; in ath10k_pci_hif_exchange_bmi_msg()
2132 return -ENOMEM; in ath10k_pci_hif_exchange_bmi_msg()
2134 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); in ath10k_pci_hif_exchange_bmi_msg()
2135 ret = dma_mapping_error(ar->dev, req_paddr); in ath10k_pci_hif_exchange_bmi_msg()
2137 ret = -EIO; in ath10k_pci_hif_exchange_bmi_msg()
2144 ret = -ENOMEM; in ath10k_pci_hif_exchange_bmi_msg()
2148 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, in ath10k_pci_hif_exchange_bmi_msg()
2150 ret = dma_mapping_error(ar->dev, resp_paddr); in ath10k_pci_hif_exchange_bmi_msg()
2152 ret = -EIO; in ath10k_pci_hif_exchange_bmi_msg()
2162 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); in ath10k_pci_hif_exchange_bmi_msg()
2175 /* non-zero means we did not time out */ in ath10k_pci_hif_exchange_bmi_msg()
2184 dma_unmap_single(ar->dev, resp_paddr, in ath10k_pci_hif_exchange_bmi_msg()
2188 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); in ath10k_pci_hif_exchange_bmi_msg()
2208 xfer->tx_done = true; in ath10k_pci_bmi_send_done()
2213 struct ath10k *ar = ce_state->ar; in ath10k_pci_bmi_recv_data()
2224 if (!xfer->wait_for_resp) { in ath10k_pci_bmi_recv_data()
2229 xfer->resp_len = nbytes; in ath10k_pci_bmi_recv_data()
2230 xfer->rx_done = true; in ath10k_pci_bmi_recv_data()
2247 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) { in ath10k_pci_bmi_wait()
2255 ret = -ETIMEDOUT; in ath10k_pci_bmi_wait()
2258 dur = jiffies - started; in ath10k_pci_bmi_wait()
2286 switch (ar_pci->pdev->device) { in ath10k_pci_get_num_banks()
2296 switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { in ath10k_pci_get_num_banks()
2322 return ce->bus_ops->get_num_banks(ar); in ath10k_bus_get_num_banks()
2339 /* Download to Target the CE Config and the service-to-CE map */ in ath10k_pci_init_config()
2343 /* Supply Target-side CE configuration */ in ath10k_pci_init_config()
2352 ret = -EIO; in ath10k_pci_init_config()
2367 ret = -EIO; in ath10k_pci_init_config()
2373 ar_pci->pipe_config, in ath10k_pci_init_config()
2392 ret = -EIO; in ath10k_pci_init_config()
2398 ar_pci->serv_to_pipe, in ath10k_pci_init_config()
2478 attr = &ar_pci->attr[5]; in ath10k_pci_override_ce_config()
2479 attr->src_sz_max = 0; in ath10k_pci_override_ce_config()
2480 attr->dest_nentries = 0; in ath10k_pci_override_ce_config()
2483 config = &ar_pci->pipe_config[5]; in ath10k_pci_override_ce_config()
2484 config->pipedir = __cpu_to_le32(PIPEDIR_OUT); in ath10k_pci_override_ce_config()
2485 config->nbytes_max = __cpu_to_le32(2048); in ath10k_pci_override_ce_config()
2488 ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1); in ath10k_pci_override_ce_config()
2499 pipe = &ar_pci->pipe_info[i]; in ath10k_pci_alloc_pipes()
2500 pipe->ce_hdl = &ce->ce_states[i]; in ath10k_pci_alloc_pipes()
2501 pipe->pipe_num = i; in ath10k_pci_alloc_pipes()
2502 pipe->hif_ce_state = ar; in ath10k_pci_alloc_pipes()
2504 ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]); in ath10k_pci_alloc_pipes()
2513 ar_pci->ce_diag = pipe->ce_hdl; in ath10k_pci_alloc_pipes()
2517 pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max); in ath10k_pci_alloc_pipes()
2537 ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]); in ath10k_pci_init_pipes()
2630 spin_lock_bh(&ar->data_lock); in ath10k_pci_warm_reset()
2631 ar->stats.fw_warm_reset_counter++; in ath10k_pci_warm_reset()
2632 spin_unlock_bh(&ar->data_lock); in ath10k_pci_warm_reset()
2638 * then it is possible for the device to confuse pci-e controller to in ath10k_pci_warm_reset()
2672 if (!ar_pci->pci_soft_reset) in ath10k_pci_safe_chip_reset()
2673 return -EOPNOTSUPP; in ath10k_pci_safe_chip_reset()
2675 return ar_pci->pci_soft_reset(ar); in ath10k_pci_safe_chip_reset()
2731 return -EPERM; in ath10k_pci_qca988x_chip_reset()
2812 if (WARN_ON(!ar_pci->pci_hard_reset)) in ath10k_pci_chip_reset()
2813 return -EOPNOTSUPP; in ath10k_pci_chip_reset()
2815 return ar_pci->pci_hard_reset(ar); in ath10k_pci_chip_reset()
2826 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, in ath10k_pci_hif_power_up()
2827 &ar_pci->link_ctl); in ath10k_pci_hif_power_up()
2828 pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL, in ath10k_pci_hif_power_up()
2834 * The target may be in an undefined state with an AUX-powered Target in ath10k_pci_hif_power_up()
2897 /* The grace timer can still be counting down and ar->ps_awake be true. in ath10k_pci_suspend()
2916 struct pci_dev *pdev = ar_pci->pdev; in ath10k_pci_resume()
2927 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries in ath10k_pci_resume()
2977 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ in ath10k_pci_enable_eeprom()
3014 wait_limit--; in ath10k_pci_read_eeprom()
3021 return -ETIMEDOUT; in ath10k_pci_read_eeprom()
3029 return -EIO; in ath10k_pci_read_eeprom()
3047 return -EOPNOTSUPP; in ath10k_pci_hif_fetch_cal_eeprom()
3049 calsize = ar->hw_params.cal_data_len; in ath10k_pci_hif_fetch_cal_eeprom()
3052 return -ENOMEM; in ath10k_pci_hif_fetch_cal_eeprom()
3073 return -EINVAL; in ath10k_pci_hif_fetch_cal_eeprom()
3097 * Top-level interrupt handler for all PCI interrupts from a Target.
3098 * When a block of MSI interrupts is allocated, this top-level handler
3099 * is not used; instead, we directly call the correct sub-handler.
3116 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) && in ath10k_pci_interrupt_handler()
3122 napi_schedule(&ar->napi); in ath10k_pci_interrupt_handler()
3170 ret = request_irq(ar_pci->pdev->irq, in ath10k_pci_request_irq_msi()
3175 ar_pci->pdev->irq, ret); in ath10k_pci_request_irq_msi()
3187 ret = request_irq(ar_pci->pdev->irq, in ath10k_pci_request_irq_intx()
3192 ar_pci->pdev->irq, ret); in ath10k_pci_request_irq_intx()
3203 switch (ar_pci->oper_irq_mode) { in ath10k_pci_request_irq()
3209 return -EINVAL; in ath10k_pci_request_irq()
3217 free_irq(ar_pci->pdev->irq, ar); in ath10k_pci_free_irq()
3222 netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll); in ath10k_pci_init_napi()
3238 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; in ath10k_pci_init_irq()
3239 ret = pci_enable_msi(ar_pci->pdev); in ath10k_pci_init_irq()
3255 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX; in ath10k_pci_init_irq()
3273 switch (ar_pci->oper_irq_mode) { in ath10k_pci_deinit_irq()
3278 pci_disable_msi(ar_pci->pdev); in ath10k_pci_deinit_irq()
3312 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) in ath10k_pci_wait_for_target_init()
3324 return -EIO; in ath10k_pci_wait_for_target_init()
3329 return -ECOMM; in ath10k_pci_wait_for_target_init()
3335 return -ETIMEDOUT; in ath10k_pci_wait_for_target_init()
3348 spin_lock_bh(&ar->data_lock); in ath10k_pci_cold_reset()
3350 ar->stats.fw_cold_reset_counter++; in ath10k_pci_cold_reset()
3352 spin_unlock_bh(&ar->data_lock); in ath10k_pci_cold_reset()
3380 struct pci_dev *pdev = ar_pci->pdev; in ath10k_pci_claim()
3399 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ath10k_pci_claim()
3401 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); in ath10k_pci_claim()
3408 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); in ath10k_pci_claim()
3409 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); in ath10k_pci_claim()
3410 if (!ar_pci->mem) { in ath10k_pci_claim()
3412 ret = -EIO; in ath10k_pci_claim()
3416 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); in ath10k_pci_claim()
3431 struct pci_dev *pdev = ar_pci->pdev; in ath10k_pci_release()
3433 pci_iounmap(pdev, ar_pci->mem); in ath10k_pci_release()
3447 if (supp_chip->dev_id == dev_id && in ath10k_pci_chip_is_supported()
3448 supp_chip->rev_id == rev_id) in ath10k_pci_chip_is_supported()
3461 spin_lock_init(&ce->ce_lock); in ath10k_pci_setup_resource()
3462 spin_lock_init(&ar_pci->ps_lock); in ath10k_pci_setup_resource()
3463 mutex_init(&ar_pci->ce_diag_mutex); in ath10k_pci_setup_resource()
3465 INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work); in ath10k_pci_setup_resource()
3467 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); in ath10k_pci_setup_resource()
3469 ar_pci->attr = kmemdup(pci_host_ce_config_wlan, in ath10k_pci_setup_resource()
3472 if (!ar_pci->attr) in ath10k_pci_setup_resource()
3473 return -ENOMEM; in ath10k_pci_setup_resource()
3475 ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, in ath10k_pci_setup_resource()
3478 if (!ar_pci->pipe_config) { in ath10k_pci_setup_resource()
3479 ret = -ENOMEM; in ath10k_pci_setup_resource()
3483 ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, in ath10k_pci_setup_resource()
3486 if (!ar_pci->serv_to_pipe) { in ath10k_pci_setup_resource()
3487 ret = -ENOMEM; in ath10k_pci_setup_resource()
3504 kfree(ar_pci->serv_to_pipe); in ath10k_pci_setup_resource()
3506 kfree(ar_pci->pipe_config); in ath10k_pci_setup_resource()
3508 kfree(ar_pci->attr); in ath10k_pci_setup_resource()
3517 netif_napi_del(&ar->napi); in ath10k_pci_release_resource()
3520 kfree(ar_pci->attr); in ath10k_pci_release_resource()
3521 kfree(ar_pci->pipe_config); in ath10k_pci_release_resource()
3522 kfree(ar_pci->serv_to_pipe); in ath10k_pci_release_resource()
3544 switch (pci_dev->device) { in ath10k_pci_probe()
3599 return -EOPNOTSUPP; in ath10k_pci_probe()
3602 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, in ath10k_pci_probe()
3605 dev_err(&pdev->dev, "failed to allocate core\n"); in ath10k_pci_probe()
3606 return -ENOMEM; in ath10k_pci_probe()
3610 pdev->vendor, pdev->device, in ath10k_pci_probe()
3611 pdev->subsystem_vendor, pdev->subsystem_device); in ath10k_pci_probe()
3614 ar_pci->pdev = pdev; in ath10k_pci_probe()
3615 ar_pci->dev = &pdev->dev; in ath10k_pci_probe()
3616 ar_pci->ar = ar; in ath10k_pci_probe()
3617 ar->dev_id = pci_dev->device; in ath10k_pci_probe()
3618 ar_pci->pci_ps = pci_ps; in ath10k_pci_probe()
3619 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; in ath10k_pci_probe()
3620 ar_pci->pci_soft_reset = pci_soft_reset; in ath10k_pci_probe()
3621 ar_pci->pci_hard_reset = pci_hard_reset; in ath10k_pci_probe()
3622 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; in ath10k_pci_probe()
3623 ar->ce_priv = &ar_pci->ce; in ath10k_pci_probe()
3625 ar->id.vendor = pdev->vendor; in ath10k_pci_probe()
3626 ar->id.device = pdev->device; in ath10k_pci_probe()
3627 ar->id.subsystem_vendor = pdev->subsystem_vendor; in ath10k_pci_probe()
3628 ar->id.subsystem_device = pdev->subsystem_device; in ath10k_pci_probe()
3630 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); in ath10k_pci_probe()
3660 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, in ath10k_pci_probe()
3671 /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that in ath10k_pci_probe()
3679 if (!ath10k_pci_chip_is_supported(pdev->device, in ath10k_pci_probe()
3681 ret = -ENODEV; in ath10k_pci_probe()
3695 ret = -ENODEV; in ath10k_pci_probe()
3699 if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { in ath10k_pci_probe()
3700 ret = -ENODEV; in ath10k_pci_probe()
3714 pdev->device, bus_params.chip_id); in ath10k_pci_probe()