Lines Matching +full:interconnect +full:- +full:snoc

1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
17 * communication between Host and Target over a PCIe interconnect.
26 * an address, length, and meta-data.
28 * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
41 * There are several "contexts" managed by this layer -- more, it
42 * may seem -- than should be needed. These are provided mainly for
44 * implementation. There are per-CopyEngine recv, send, and watermark
48 * also a per-transfer context supplied by the caller when a buffer
50 * These per-transfer contexts are echoed back to the caller when
57 u32 ce_id = ce_state->id; in shadow_sr_wr_ind_addr()
87 return ((offset << addr_map->lsb) & addr_map->mask); in ath10k_set_ring_byte()
94 return ce->bus_ops->read32(ar, offset); in ath10k_ce_read32()
101 ce->bus_ops->write32(ar, offset, value); in ath10k_ce_write32()
109 ar->hw_ce_regs->dst_wr_index_addr, n); in ath10k_ce_dest_ring_write_index_set()
116 ar->hw_ce_regs->dst_wr_index_addr); in ath10k_ce_dest_ring_write_index_get()
124 ar->hw_ce_regs->sr_wr_index_addr, n); in ath10k_ce_src_ring_write_index_set()
131 ar->hw_ce_regs->sr_wr_index_addr); in ath10k_ce_src_ring_write_index_get()
139 return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK; in ath10k_ce_src_ring_read_index_from_ddr()
147 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_src_ring_read_index_get()
150 if (ar->hw_params.rri_on_ddr && in ath10k_ce_src_ring_read_index_get()
151 (ce_state->attr_flags & CE_ATTR_DIS_INTR)) in ath10k_ce_src_ring_read_index_get()
155 ar->hw_ce_regs->current_srri_addr); in ath10k_ce_src_ring_read_index_get()
173 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_src_ring_base_addr_set()
178 ar->hw_ce_regs->sr_base_addr_lo, addr_lo); in ath10k_ce_src_ring_base_addr_set()
180 if (ce_state->ops->ce_set_src_ring_base_addr_hi) { in ath10k_ce_src_ring_base_addr_set()
181 ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr, in ath10k_ce_src_ring_base_addr_set()
193 ar->hw_ce_regs->sr_base_addr_hi, addr_hi); in ath10k_ce_set_src_ring_base_addr_hi()
201 ar->hw_ce_regs->sr_size_addr, n); in ath10k_ce_src_ring_size_set()
208 const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; in ath10k_ce_src_ring_dmax_set()
211 ctrl_regs->addr); in ath10k_ce_src_ring_dmax_set()
213 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, in ath10k_ce_src_ring_dmax_set()
214 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) | in ath10k_ce_src_ring_dmax_set()
215 ath10k_set_ring_byte(n, ctrl_regs->dmax)); in ath10k_ce_src_ring_dmax_set()
222 const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; in ath10k_ce_src_ring_byte_swap_set()
225 ctrl_regs->addr); in ath10k_ce_src_ring_byte_swap_set()
227 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, in ath10k_ce_src_ring_byte_swap_set()
228 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) | in ath10k_ce_src_ring_byte_swap_set()
229 ath10k_set_ring_byte(n, ctrl_regs->src_ring)); in ath10k_ce_src_ring_byte_swap_set()
236 const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; in ath10k_ce_dest_ring_byte_swap_set()
239 ctrl_regs->addr); in ath10k_ce_dest_ring_byte_swap_set()
241 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr, in ath10k_ce_dest_ring_byte_swap_set()
242 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) | in ath10k_ce_dest_ring_byte_swap_set()
243 ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); in ath10k_ce_dest_ring_byte_swap_set()
251 return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) & in ath10k_ce_dest_ring_read_index_from_ddr()
260 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_dest_ring_read_index_get()
263 if (ar->hw_params.rri_on_ddr && in ath10k_ce_dest_ring_read_index_get()
264 (ce_state->attr_flags & CE_ATTR_DIS_INTR)) in ath10k_ce_dest_ring_read_index_get()
268 ar->hw_ce_regs->current_drri_addr); in ath10k_ce_dest_ring_read_index_get()
278 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_dest_ring_base_addr_set()
283 ar->hw_ce_regs->dr_base_addr_lo, addr_lo); in ath10k_ce_dest_ring_base_addr_set()
285 if (ce_state->ops->ce_set_dest_ring_base_addr_hi) { in ath10k_ce_dest_ring_base_addr_set()
286 ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr, in ath10k_ce_dest_ring_base_addr_set()
299 ar->hw_ce_regs->dr_base_addr_hi); in ath10k_ce_set_dest_ring_base_addr_hi()
303 ar->hw_ce_regs->dr_base_addr_hi, reg_value); in ath10k_ce_set_dest_ring_base_addr_hi()
311 ar->hw_ce_regs->dr_size_addr, n); in ath10k_ce_dest_ring_size_set()
318 const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; in ath10k_ce_src_ring_highmark_set()
319 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); in ath10k_ce_src_ring_highmark_set()
321 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, in ath10k_ce_src_ring_highmark_set()
322 (addr & ~(srcr_wm->wm_high->mask)) | in ath10k_ce_src_ring_highmark_set()
323 (ath10k_set_ring_byte(n, srcr_wm->wm_high))); in ath10k_ce_src_ring_highmark_set()
330 const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; in ath10k_ce_src_ring_lowmark_set()
331 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); in ath10k_ce_src_ring_lowmark_set()
333 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, in ath10k_ce_src_ring_lowmark_set()
334 (addr & ~(srcr_wm->wm_low->mask)) | in ath10k_ce_src_ring_lowmark_set()
335 (ath10k_set_ring_byte(n, srcr_wm->wm_low))); in ath10k_ce_src_ring_lowmark_set()
342 const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; in ath10k_ce_dest_ring_highmark_set()
343 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); in ath10k_ce_dest_ring_highmark_set()
345 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, in ath10k_ce_dest_ring_highmark_set()
346 (addr & ~(dstr_wm->wm_high->mask)) | in ath10k_ce_dest_ring_highmark_set()
347 (ath10k_set_ring_byte(n, dstr_wm->wm_high))); in ath10k_ce_dest_ring_highmark_set()
354 const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; in ath10k_ce_dest_ring_lowmark_set()
355 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); in ath10k_ce_dest_ring_lowmark_set()
357 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, in ath10k_ce_dest_ring_lowmark_set()
358 (addr & ~(dstr_wm->wm_low->mask)) | in ath10k_ce_dest_ring_lowmark_set()
359 (ath10k_set_ring_byte(n, dstr_wm->wm_low))); in ath10k_ce_dest_ring_lowmark_set()
365 const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; in ath10k_ce_copy_complete_inter_enable()
368 ar->hw_ce_regs->host_ie_addr); in ath10k_ce_copy_complete_inter_enable()
370 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, in ath10k_ce_copy_complete_inter_enable()
371 host_ie_addr | host_ie->copy_complete->mask); in ath10k_ce_copy_complete_inter_enable()
377 const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; in ath10k_ce_copy_complete_intr_disable()
380 ar->hw_ce_regs->host_ie_addr); in ath10k_ce_copy_complete_intr_disable()
382 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, in ath10k_ce_copy_complete_intr_disable()
383 host_ie_addr & ~(host_ie->copy_complete->mask)); in ath10k_ce_copy_complete_intr_disable()
389 const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; in ath10k_ce_watermark_intr_disable()
392 ar->hw_ce_regs->host_ie_addr); in ath10k_ce_watermark_intr_disable()
394 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr, in ath10k_ce_watermark_intr_disable()
395 host_ie_addr & ~(wm_regs->wm_mask)); in ath10k_ce_watermark_intr_disable()
401 const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; in ath10k_ce_error_intr_disable()
404 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); in ath10k_ce_error_intr_disable()
407 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr, in ath10k_ce_error_intr_disable()
408 misc_ie_addr & ~(misc_regs->err_mask)); in ath10k_ce_error_intr_disable()
415 const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; in ath10k_ce_engine_int_status_clear()
417 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); in ath10k_ce_engine_int_status_clear()
431 struct ath10k *ar = ce_state->ar; in _ath10k_ce_send_nolock()
432 struct ath10k_ce_ring *src_ring = ce_state->src_ring; in _ath10k_ce_send_nolock()
434 unsigned int nentries_mask = src_ring->nentries_mask; in _ath10k_ce_send_nolock()
435 unsigned int sw_index = src_ring->sw_index; in _ath10k_ce_send_nolock()
436 unsigned int write_index = src_ring->write_index; in _ath10k_ce_send_nolock()
437 u32 ctrl_addr = ce_state->ctrl_addr; in _ath10k_ce_send_nolock()
441 if (nbytes > ce_state->src_sz_max) in _ath10k_ce_send_nolock()
443 __func__, nbytes, ce_state->src_sz_max); in _ath10k_ce_send_nolock()
446 write_index, sw_index - 1) <= 0)) { in _ath10k_ce_send_nolock()
447 ret = -ENOSR; in _ath10k_ce_send_nolock()
451 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, in _ath10k_ce_send_nolock()
467 src_ring->per_transfer_context[write_index] = per_transfer_context; in _ath10k_ce_send_nolock()
476 src_ring->write_index = write_index; in _ath10k_ce_send_nolock()
488 struct ath10k *ar = ce_state->ar; in _ath10k_ce_send_nolock_64()
489 struct ath10k_ce_ring *src_ring = ce_state->src_ring; in _ath10k_ce_send_nolock_64()
491 unsigned int nentries_mask = src_ring->nentries_mask; in _ath10k_ce_send_nolock_64()
493 unsigned int write_index = src_ring->write_index; in _ath10k_ce_send_nolock_64()
494 u32 ctrl_addr = ce_state->ctrl_addr; in _ath10k_ce_send_nolock_64()
499 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) in _ath10k_ce_send_nolock_64()
500 return -ESHUTDOWN; in _ath10k_ce_send_nolock_64()
502 if (nbytes > ce_state->src_sz_max) in _ath10k_ce_send_nolock_64()
504 __func__, nbytes, ce_state->src_sz_max); in _ath10k_ce_send_nolock_64()
506 if (ar->hw_params.rri_on_ddr) in _ath10k_ce_send_nolock_64()
507 sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id); in _ath10k_ce_send_nolock_64()
509 sw_index = src_ring->sw_index; in _ath10k_ce_send_nolock_64()
512 write_index, sw_index - 1) <= 0)) { in _ath10k_ce_send_nolock_64()
513 ret = -ENOSR; in _ath10k_ce_send_nolock_64()
517 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, in _ath10k_ce_send_nolock_64()
543 src_ring->per_transfer_context[write_index] = per_transfer_context; in _ath10k_ce_send_nolock_64()
549 if (ar->hw_params.shadow_reg_support) in _ath10k_ce_send_nolock_64()
557 src_ring->write_index = write_index; in _ath10k_ce_send_nolock_64()
569 return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context, in ath10k_ce_send_nolock()
576 struct ath10k *ar = pipe->ar; in __ath10k_ce_send_revert()
578 struct ath10k_ce_ring *src_ring = pipe->src_ring; in __ath10k_ce_send_revert()
579 u32 ctrl_addr = pipe->ctrl_addr; in __ath10k_ce_send_revert()
581 lockdep_assert_held(&ce->ce_lock); in __ath10k_ce_send_revert()
585 * scatter-gather transfer (before index register is updated) in __ath10k_ce_send_revert()
588 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index)) in __ath10k_ce_send_revert()
591 if (WARN_ON_ONCE(src_ring->write_index == in __ath10k_ce_send_revert()
595 src_ring->write_index--; in __ath10k_ce_send_revert()
596 src_ring->write_index &= src_ring->nentries_mask; in __ath10k_ce_send_revert()
598 src_ring->per_transfer_context[src_ring->write_index] = NULL; in __ath10k_ce_send_revert()
609 struct ath10k *ar = ce_state->ar; in ath10k_ce_send()
613 spin_lock_bh(&ce->ce_lock); in ath10k_ce_send()
616 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_send()
624 struct ath10k *ar = pipe->ar; in ath10k_ce_num_free_src_entries()
628 spin_lock_bh(&ce->ce_lock); in ath10k_ce_num_free_src_entries()
629 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, in ath10k_ce_num_free_src_entries()
630 pipe->src_ring->write_index, in ath10k_ce_num_free_src_entries()
631 pipe->src_ring->sw_index - 1); in ath10k_ce_num_free_src_entries()
632 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_num_free_src_entries()
640 struct ath10k *ar = pipe->ar; in __ath10k_ce_rx_num_free_bufs()
642 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; in __ath10k_ce_rx_num_free_bufs()
643 unsigned int nentries_mask = dest_ring->nentries_mask; in __ath10k_ce_rx_num_free_bufs()
644 unsigned int write_index = dest_ring->write_index; in __ath10k_ce_rx_num_free_bufs()
645 unsigned int sw_index = dest_ring->sw_index; in __ath10k_ce_rx_num_free_bufs()
647 lockdep_assert_held(&ce->ce_lock); in __ath10k_ce_rx_num_free_bufs()
649 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); in __ath10k_ce_rx_num_free_bufs()
656 struct ath10k *ar = pipe->ar; in __ath10k_ce_rx_post_buf()
658 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; in __ath10k_ce_rx_post_buf()
659 unsigned int nentries_mask = dest_ring->nentries_mask; in __ath10k_ce_rx_post_buf()
660 unsigned int write_index = dest_ring->write_index; in __ath10k_ce_rx_post_buf()
661 unsigned int sw_index = dest_ring->sw_index; in __ath10k_ce_rx_post_buf()
662 struct ce_desc *base = dest_ring->base_addr_owner_space; in __ath10k_ce_rx_post_buf()
664 u32 ctrl_addr = pipe->ctrl_addr; in __ath10k_ce_rx_post_buf()
666 lockdep_assert_held(&ce->ce_lock); in __ath10k_ce_rx_post_buf()
668 if ((pipe->id != 5) && in __ath10k_ce_rx_post_buf()
669 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) in __ath10k_ce_rx_post_buf()
670 return -ENOSPC; in __ath10k_ce_rx_post_buf()
672 desc->addr = __cpu_to_le32(paddr); in __ath10k_ce_rx_post_buf()
673 desc->nbytes = 0; in __ath10k_ce_rx_post_buf()
675 dest_ring->per_transfer_context[write_index] = ctx; in __ath10k_ce_rx_post_buf()
678 dest_ring->write_index = write_index; in __ath10k_ce_rx_post_buf()
687 struct ath10k *ar = pipe->ar; in __ath10k_ce_rx_post_buf_64()
689 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; in __ath10k_ce_rx_post_buf_64()
690 unsigned int nentries_mask = dest_ring->nentries_mask; in __ath10k_ce_rx_post_buf_64()
691 unsigned int write_index = dest_ring->write_index; in __ath10k_ce_rx_post_buf_64()
692 unsigned int sw_index = dest_ring->sw_index; in __ath10k_ce_rx_post_buf_64()
693 struct ce_desc_64 *base = dest_ring->base_addr_owner_space; in __ath10k_ce_rx_post_buf_64()
696 u32 ctrl_addr = pipe->ctrl_addr; in __ath10k_ce_rx_post_buf_64()
698 lockdep_assert_held(&ce->ce_lock); in __ath10k_ce_rx_post_buf_64()
700 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) in __ath10k_ce_rx_post_buf_64()
701 return -ENOSPC; in __ath10k_ce_rx_post_buf_64()
703 desc->addr = __cpu_to_le64(paddr); in __ath10k_ce_rx_post_buf_64()
704 desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK); in __ath10k_ce_rx_post_buf_64()
706 desc->nbytes = 0; in __ath10k_ce_rx_post_buf_64()
708 dest_ring->per_transfer_context[write_index] = ctx; in __ath10k_ce_rx_post_buf_64()
711 dest_ring->write_index = write_index; in __ath10k_ce_rx_post_buf_64()
718 struct ath10k *ar = pipe->ar; in ath10k_ce_rx_update_write_idx()
719 struct ath10k_ce_ring *dest_ring = pipe->dest_ring; in ath10k_ce_rx_update_write_idx()
720 unsigned int nentries_mask = dest_ring->nentries_mask; in ath10k_ce_rx_update_write_idx()
721 unsigned int write_index = dest_ring->write_index; in ath10k_ce_rx_update_write_idx()
722 u32 ctrl_addr = pipe->ctrl_addr; in ath10k_ce_rx_update_write_idx()
728 if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index) in ath10k_ce_rx_update_write_idx()
729 nentries -= 1; in ath10k_ce_rx_update_write_idx()
733 dest_ring->write_index = write_index; in ath10k_ce_rx_update_write_idx()
740 struct ath10k *ar = pipe->ar; in ath10k_ce_rx_post_buf()
744 spin_lock_bh(&ce->ce_lock); in ath10k_ce_rx_post_buf()
745 ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr); in ath10k_ce_rx_post_buf()
746 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_rx_post_buf()
761 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; in _ath10k_ce_completed_recv_next_nolock()
762 unsigned int nentries_mask = dest_ring->nentries_mask; in _ath10k_ce_completed_recv_next_nolock()
763 unsigned int sw_index = dest_ring->sw_index; in _ath10k_ce_completed_recv_next_nolock()
765 struct ce_desc *base = dest_ring->base_addr_owner_space; in _ath10k_ce_completed_recv_next_nolock()
781 return -EIO; in _ath10k_ce_completed_recv_next_nolock()
784 desc->nbytes = 0; in _ath10k_ce_completed_recv_next_nolock()
791 dest_ring->per_transfer_context[sw_index]; in _ath10k_ce_completed_recv_next_nolock()
796 if (ce_state->id != 5) in _ath10k_ce_completed_recv_next_nolock()
797 dest_ring->per_transfer_context[sw_index] = NULL; in _ath10k_ce_completed_recv_next_nolock()
801 dest_ring->sw_index = sw_index; in _ath10k_ce_completed_recv_next_nolock()
811 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; in _ath10k_ce_completed_recv_next_nolock_64()
812 unsigned int nentries_mask = dest_ring->nentries_mask; in _ath10k_ce_completed_recv_next_nolock_64()
813 unsigned int sw_index = dest_ring->sw_index; in _ath10k_ce_completed_recv_next_nolock_64()
814 struct ce_desc_64 *base = dest_ring->base_addr_owner_space; in _ath10k_ce_completed_recv_next_nolock_64()
830 return -EIO; in _ath10k_ce_completed_recv_next_nolock_64()
833 desc->nbytes = 0; in _ath10k_ce_completed_recv_next_nolock_64()
840 dest_ring->per_transfer_context[sw_index]; in _ath10k_ce_completed_recv_next_nolock_64()
845 if (ce_state->id != 5) in _ath10k_ce_completed_recv_next_nolock_64()
846 dest_ring->per_transfer_context[sw_index] = NULL; in _ath10k_ce_completed_recv_next_nolock_64()
850 dest_ring->sw_index = sw_index; in _ath10k_ce_completed_recv_next_nolock_64()
859 return ce_state->ops->ce_completed_recv_next_nolock(ce_state, in ath10k_ce_completed_recv_next_nolock()
869 struct ath10k *ar = ce_state->ar; in ath10k_ce_completed_recv_next()
873 spin_lock_bh(&ce->ce_lock); in ath10k_ce_completed_recv_next()
874 ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state, in ath10k_ce_completed_recv_next()
878 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_completed_recv_next()
896 dest_ring = ce_state->dest_ring; in _ath10k_ce_revoke_recv_next()
899 return -EIO; in _ath10k_ce_revoke_recv_next()
901 ar = ce_state->ar; in _ath10k_ce_revoke_recv_next()
904 spin_lock_bh(&ce->ce_lock); in _ath10k_ce_revoke_recv_next()
906 nentries_mask = dest_ring->nentries_mask; in _ath10k_ce_revoke_recv_next()
907 sw_index = dest_ring->sw_index; in _ath10k_ce_revoke_recv_next()
908 write_index = dest_ring->write_index; in _ath10k_ce_revoke_recv_next()
910 struct ce_desc *base = dest_ring->base_addr_owner_space; in _ath10k_ce_revoke_recv_next()
914 *bufferp = __le32_to_cpu(desc->addr); in _ath10k_ce_revoke_recv_next()
918 dest_ring->per_transfer_context[sw_index]; in _ath10k_ce_revoke_recv_next()
921 dest_ring->per_transfer_context[sw_index] = NULL; in _ath10k_ce_revoke_recv_next()
922 desc->nbytes = 0; in _ath10k_ce_revoke_recv_next()
926 dest_ring->sw_index = sw_index; in _ath10k_ce_revoke_recv_next()
929 ret = -EIO; in _ath10k_ce_revoke_recv_next()
932 spin_unlock_bh(&ce->ce_lock); in _ath10k_ce_revoke_recv_next()
949 dest_ring = ce_state->dest_ring; in _ath10k_ce_revoke_recv_next_64()
952 return -EIO; in _ath10k_ce_revoke_recv_next_64()
954 ar = ce_state->ar; in _ath10k_ce_revoke_recv_next_64()
957 spin_lock_bh(&ce->ce_lock); in _ath10k_ce_revoke_recv_next_64()
959 nentries_mask = dest_ring->nentries_mask; in _ath10k_ce_revoke_recv_next_64()
960 sw_index = dest_ring->sw_index; in _ath10k_ce_revoke_recv_next_64()
961 write_index = dest_ring->write_index; in _ath10k_ce_revoke_recv_next_64()
963 struct ce_desc_64 *base = dest_ring->base_addr_owner_space; in _ath10k_ce_revoke_recv_next_64()
968 *bufferp = __le64_to_cpu(desc->addr); in _ath10k_ce_revoke_recv_next_64()
972 dest_ring->per_transfer_context[sw_index]; in _ath10k_ce_revoke_recv_next_64()
975 dest_ring->per_transfer_context[sw_index] = NULL; in _ath10k_ce_revoke_recv_next_64()
976 desc->nbytes = 0; in _ath10k_ce_revoke_recv_next_64()
980 dest_ring->sw_index = sw_index; in _ath10k_ce_revoke_recv_next_64()
983 ret = -EIO; in _ath10k_ce_revoke_recv_next_64()
986 spin_unlock_bh(&ce->ce_lock); in _ath10k_ce_revoke_recv_next_64()
995 return ce_state->ops->ce_revoke_recv_next(ce_state, in ath10k_ce_revoke_recv_next()
1008 struct ath10k_ce_ring *src_ring = ce_state->src_ring; in _ath10k_ce_completed_send_next_nolock()
1009 u32 ctrl_addr = ce_state->ctrl_addr; in _ath10k_ce_completed_send_next_nolock()
1010 struct ath10k *ar = ce_state->ar; in _ath10k_ce_completed_send_next_nolock()
1011 unsigned int nentries_mask = src_ring->nentries_mask; in _ath10k_ce_completed_send_next_nolock()
1012 unsigned int sw_index = src_ring->sw_index; in _ath10k_ce_completed_send_next_nolock()
1016 if (src_ring->hw_index == sw_index) { in _ath10k_ce_completed_send_next_nolock()
1027 return -ENODEV; in _ath10k_ce_completed_send_next_nolock()
1030 src_ring->hw_index = read_index; in _ath10k_ce_completed_send_next_nolock()
1033 if (ar->hw_params.rri_on_ddr) in _ath10k_ce_completed_send_next_nolock()
1036 read_index = src_ring->hw_index; in _ath10k_ce_completed_send_next_nolock()
1039 return -EIO; in _ath10k_ce_completed_send_next_nolock()
1043 src_ring->per_transfer_context[sw_index]; in _ath10k_ce_completed_send_next_nolock()
1046 src_ring->per_transfer_context[sw_index] = NULL; in _ath10k_ce_completed_send_next_nolock()
1047 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, in _ath10k_ce_completed_send_next_nolock()
1049 desc->nbytes = 0; in _ath10k_ce_completed_send_next_nolock()
1053 src_ring->sw_index = sw_index; in _ath10k_ce_completed_send_next_nolock()
1061 struct ath10k_ce_ring *src_ring = ce_state->src_ring; in _ath10k_ce_completed_send_next_nolock_64()
1062 u32 ctrl_addr = ce_state->ctrl_addr; in _ath10k_ce_completed_send_next_nolock_64()
1063 struct ath10k *ar = ce_state->ar; in _ath10k_ce_completed_send_next_nolock_64()
1064 unsigned int nentries_mask = src_ring->nentries_mask; in _ath10k_ce_completed_send_next_nolock_64()
1065 unsigned int sw_index = src_ring->sw_index; in _ath10k_ce_completed_send_next_nolock_64()
1069 if (src_ring->hw_index == sw_index) { in _ath10k_ce_completed_send_next_nolock_64()
1080 return -ENODEV; in _ath10k_ce_completed_send_next_nolock_64()
1083 src_ring->hw_index = read_index; in _ath10k_ce_completed_send_next_nolock_64()
1086 if (ar->hw_params.rri_on_ddr) in _ath10k_ce_completed_send_next_nolock_64()
1089 read_index = src_ring->hw_index; in _ath10k_ce_completed_send_next_nolock_64()
1092 return -EIO; in _ath10k_ce_completed_send_next_nolock_64()
1096 src_ring->per_transfer_context[sw_index]; in _ath10k_ce_completed_send_next_nolock_64()
1099 src_ring->per_transfer_context[sw_index] = NULL; in _ath10k_ce_completed_send_next_nolock_64()
1100 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, in _ath10k_ce_completed_send_next_nolock_64()
1102 desc->nbytes = 0; in _ath10k_ce_completed_send_next_nolock_64()
1106 src_ring->sw_index = sw_index; in _ath10k_ce_completed_send_next_nolock_64()
1114 return ce_state->ops->ce_completed_send_next_nolock(ce_state, in ath10k_ce_completed_send_next_nolock()
1126 struct ce_desc *base = src_ring->base_addr_owner_space; in ath10k_ce_extract_desc_data()
1130 *bufferp = __le32_to_cpu(desc->addr); in ath10k_ce_extract_desc_data()
1131 *nbytesp = __le16_to_cpu(desc->nbytes); in ath10k_ce_extract_desc_data()
1132 *transfer_idp = MS(__le16_to_cpu(desc->flags), in ath10k_ce_extract_desc_data()
1143 struct ce_desc_64 *base = src_ring->base_addr_owner_space; in ath10k_ce_extract_desc_data_64()
1148 *bufferp = __le64_to_cpu(desc->addr); in ath10k_ce_extract_desc_data_64()
1149 *nbytesp = __le16_to_cpu(desc->nbytes); in ath10k_ce_extract_desc_data_64()
1150 *transfer_idp = MS(__le16_to_cpu(desc->flags), in ath10k_ce_extract_desc_data_64()
1169 src_ring = ce_state->src_ring; in ath10k_ce_cancel_send_next()
1172 return -EIO; in ath10k_ce_cancel_send_next()
1174 ar = ce_state->ar; in ath10k_ce_cancel_send_next()
1177 spin_lock_bh(&ce->ce_lock); in ath10k_ce_cancel_send_next()
1179 nentries_mask = src_ring->nentries_mask; in ath10k_ce_cancel_send_next()
1180 sw_index = src_ring->sw_index; in ath10k_ce_cancel_send_next()
1181 write_index = src_ring->write_index; in ath10k_ce_cancel_send_next()
1184 ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index, in ath10k_ce_cancel_send_next()
1190 src_ring->per_transfer_context[sw_index]; in ath10k_ce_cancel_send_next()
1193 src_ring->per_transfer_context[sw_index] = NULL; in ath10k_ce_cancel_send_next()
1197 src_ring->sw_index = sw_index; in ath10k_ce_cancel_send_next()
1200 ret = -EIO; in ath10k_ce_cancel_send_next()
1203 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_cancel_send_next()
1212 struct ath10k *ar = ce_state->ar; in ath10k_ce_completed_send_next()
1216 spin_lock_bh(&ce->ce_lock); in ath10k_ce_completed_send_next()
1219 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_completed_send_next()
1226 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1234 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_per_engine_service()
1235 const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; in ath10k_ce_per_engine_service()
1236 u32 ctrl_addr = ce_state->ctrl_addr; in ath10k_ce_per_engine_service()
1249 wm_regs->cc_mask | wm_regs->wm_mask); in ath10k_ce_per_engine_service()
1251 if (ce_state->recv_cb) in ath10k_ce_per_engine_service()
1252 ce_state->recv_cb(ce_state); in ath10k_ce_per_engine_service()
1254 if (ce_state->send_cb) in ath10k_ce_per_engine_service()
1255 ce_state->send_cb(ce_state); in ath10k_ce_per_engine_service()
1260 * Handler for per-engine interrupts on ALL active CEs.
1293 u32 ctrl_addr = ce_state->ctrl_addr; in ath10k_ce_per_engine_handler_adjust()
1294 struct ath10k *ar = ce_state->ar; in ath10k_ce_per_engine_handler_adjust()
1295 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR; in ath10k_ce_per_engine_handler_adjust()
1298 (ce_state->send_cb || ce_state->recv_cb)) in ath10k_ce_per_engine_handler_adjust()
1312 ce_state = &ce->ce_states[ce_id]; in ath10k_ce_disable_interrupt()
1313 if (ce_state->attr_flags & CE_ATTR_POLL) in ath10k_ce_disable_interrupt()
1338 ce_state = &ce->ce_states[ce_id]; in ath10k_ce_enable_interrupt()
1339 if (ce_state->attr_flags & CE_ATTR_POLL) in ath10k_ce_enable_interrupt()
1363 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_init_src_ring()
1364 struct ath10k_ce_ring *src_ring = ce_state->src_ring; in ath10k_ce_init_src_ring()
1367 nentries = roundup_pow_of_two(attr->src_nentries); in ath10k_ce_init_src_ring()
1369 if (ar->hw_params.target_64bit) in ath10k_ce_init_src_ring()
1370 memset(src_ring->base_addr_owner_space, 0, in ath10k_ce_init_src_ring()
1373 memset(src_ring->base_addr_owner_space, 0, in ath10k_ce_init_src_ring()
1376 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); in ath10k_ce_init_src_ring()
1377 src_ring->sw_index &= src_ring->nentries_mask; in ath10k_ce_init_src_ring()
1378 src_ring->hw_index = src_ring->sw_index; in ath10k_ce_init_src_ring()
1380 src_ring->write_index = in ath10k_ce_init_src_ring()
1382 src_ring->write_index &= src_ring->nentries_mask; in ath10k_ce_init_src_ring()
1385 src_ring->base_addr_ce_space); in ath10k_ce_init_src_ring()
1387 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); in ath10k_ce_init_src_ring()
1394 ce_id, nentries, src_ring->base_addr_owner_space); in ath10k_ce_init_src_ring()
1404 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_init_dest_ring()
1405 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; in ath10k_ce_init_dest_ring()
1408 nentries = roundup_pow_of_two(attr->dest_nentries); in ath10k_ce_init_dest_ring()
1410 if (ar->hw_params.target_64bit) in ath10k_ce_init_dest_ring()
1411 memset(dest_ring->base_addr_owner_space, 0, in ath10k_ce_init_dest_ring()
1414 memset(dest_ring->base_addr_owner_space, 0, in ath10k_ce_init_dest_ring()
1417 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); in ath10k_ce_init_dest_ring()
1418 dest_ring->sw_index &= dest_ring->nentries_mask; in ath10k_ce_init_dest_ring()
1419 dest_ring->write_index = in ath10k_ce_init_dest_ring()
1421 dest_ring->write_index &= dest_ring->nentries_mask; in ath10k_ce_init_dest_ring()
1424 dest_ring->base_addr_ce_space); in ath10k_ce_init_dest_ring()
1432 ce_id, nentries, dest_ring->base_addr_owner_space); in ath10k_ce_init_dest_ring()
1441 src_ring->shadow_base_unaligned = kcalloc(nentries, in ath10k_ce_alloc_shadow_base()
1444 if (!src_ring->shadow_base_unaligned) in ath10k_ce_alloc_shadow_base()
1445 return -ENOMEM; in ath10k_ce_alloc_shadow_base()
1447 src_ring->shadow_base = (struct ce_desc_64 *) in ath10k_ce_alloc_shadow_base()
1448 PTR_ALIGN(src_ring->shadow_base_unaligned, in ath10k_ce_alloc_shadow_base()
1458 u32 nentries = attr->src_nentries; in ath10k_ce_alloc_src_ring()
1467 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_src_ring()
1469 src_ring->nentries = nentries; in ath10k_ce_alloc_src_ring()
1470 src_ring->nentries_mask = nentries - 1; in ath10k_ce_alloc_src_ring()
1476 src_ring->base_addr_owner_space_unaligned = in ath10k_ce_alloc_src_ring()
1477 dma_alloc_coherent(ar->dev, in ath10k_ce_alloc_src_ring()
1481 if (!src_ring->base_addr_owner_space_unaligned) { in ath10k_ce_alloc_src_ring()
1483 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_src_ring()
1486 src_ring->base_addr_ce_space_unaligned = base_addr; in ath10k_ce_alloc_src_ring()
1488 src_ring->base_addr_owner_space = in ath10k_ce_alloc_src_ring()
1489 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned, in ath10k_ce_alloc_src_ring()
1491 src_ring->base_addr_ce_space = in ath10k_ce_alloc_src_ring()
1492 ALIGN(src_ring->base_addr_ce_space_unaligned, in ath10k_ce_alloc_src_ring()
1495 if (ar->hw_params.shadow_reg_support) { in ath10k_ce_alloc_src_ring()
1498 dma_free_coherent(ar->dev, in ath10k_ce_alloc_src_ring()
1501 src_ring->base_addr_owner_space_unaligned, in ath10k_ce_alloc_src_ring()
1516 u32 nentries = attr->src_nentries; in ath10k_ce_alloc_src_ring_64()
1525 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_src_ring_64()
1527 src_ring->nentries = nentries; in ath10k_ce_alloc_src_ring_64()
1528 src_ring->nentries_mask = nentries - 1; in ath10k_ce_alloc_src_ring_64()
1533 src_ring->base_addr_owner_space_unaligned = in ath10k_ce_alloc_src_ring_64()
1534 dma_alloc_coherent(ar->dev, in ath10k_ce_alloc_src_ring_64()
1538 if (!src_ring->base_addr_owner_space_unaligned) { in ath10k_ce_alloc_src_ring_64()
1540 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_src_ring_64()
1543 src_ring->base_addr_ce_space_unaligned = base_addr; in ath10k_ce_alloc_src_ring_64()
1545 src_ring->base_addr_owner_space = in ath10k_ce_alloc_src_ring_64()
1546 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned, in ath10k_ce_alloc_src_ring_64()
1548 src_ring->base_addr_ce_space = in ath10k_ce_alloc_src_ring_64()
1549 ALIGN(src_ring->base_addr_ce_space_unaligned, in ath10k_ce_alloc_src_ring_64()
1552 if (ar->hw_params.shadow_reg_support) { in ath10k_ce_alloc_src_ring_64()
1555 dma_free_coherent(ar->dev, in ath10k_ce_alloc_src_ring_64()
1558 src_ring->base_addr_owner_space_unaligned, in ath10k_ce_alloc_src_ring_64()
1576 nentries = roundup_pow_of_two(attr->dest_nentries); in ath10k_ce_alloc_dest_ring()
1581 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_dest_ring()
1583 dest_ring->nentries = nentries; in ath10k_ce_alloc_dest_ring()
1584 dest_ring->nentries_mask = nentries - 1; in ath10k_ce_alloc_dest_ring()
1590 dest_ring->base_addr_owner_space_unaligned = in ath10k_ce_alloc_dest_ring()
1591 dma_alloc_coherent(ar->dev, in ath10k_ce_alloc_dest_ring()
1595 if (!dest_ring->base_addr_owner_space_unaligned) { in ath10k_ce_alloc_dest_ring()
1597 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_dest_ring()
1600 dest_ring->base_addr_ce_space_unaligned = base_addr; in ath10k_ce_alloc_dest_ring()
1602 dest_ring->base_addr_owner_space = in ath10k_ce_alloc_dest_ring()
1603 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, in ath10k_ce_alloc_dest_ring()
1605 dest_ring->base_addr_ce_space = in ath10k_ce_alloc_dest_ring()
1606 ALIGN(dest_ring->base_addr_ce_space_unaligned, in ath10k_ce_alloc_dest_ring()
1620 nentries = roundup_pow_of_two(attr->dest_nentries); in ath10k_ce_alloc_dest_ring_64()
1625 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_dest_ring_64()
1627 dest_ring->nentries = nentries; in ath10k_ce_alloc_dest_ring_64()
1628 dest_ring->nentries_mask = nentries - 1; in ath10k_ce_alloc_dest_ring_64()
1633 dest_ring->base_addr_owner_space_unaligned = in ath10k_ce_alloc_dest_ring_64()
1634 dma_alloc_coherent(ar->dev, in ath10k_ce_alloc_dest_ring_64()
1638 if (!dest_ring->base_addr_owner_space_unaligned) { in ath10k_ce_alloc_dest_ring_64()
1640 return ERR_PTR(-ENOMEM); in ath10k_ce_alloc_dest_ring_64()
1643 dest_ring->base_addr_ce_space_unaligned = base_addr; in ath10k_ce_alloc_dest_ring_64()
1648 dest_ring->base_addr_owner_space = in ath10k_ce_alloc_dest_ring_64()
1649 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned, in ath10k_ce_alloc_dest_ring_64()
1651 dest_ring->base_addr_ce_space = in ath10k_ce_alloc_dest_ring_64()
1652 ALIGN(dest_ring->base_addr_ce_space_unaligned, in ath10k_ce_alloc_dest_ring_64()
1659 * Initialize a Copy Engine based on caller-supplied attributes.
1670 if (attr->src_nentries) { in ath10k_ce_init_pipe()
1679 if (attr->dest_nentries) { in ath10k_ce_init_pipe()
1721 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in _ath10k_ce_free_pipe()
1723 if (ce_state->src_ring) { in _ath10k_ce_free_pipe()
1724 if (ar->hw_params.shadow_reg_support) in _ath10k_ce_free_pipe()
1725 kfree(ce_state->src_ring->shadow_base_unaligned); in _ath10k_ce_free_pipe()
1726 dma_free_coherent(ar->dev, in _ath10k_ce_free_pipe()
1727 (ce_state->src_ring->nentries * in _ath10k_ce_free_pipe()
1730 ce_state->src_ring->base_addr_owner_space, in _ath10k_ce_free_pipe()
1731 ce_state->src_ring->base_addr_ce_space); in _ath10k_ce_free_pipe()
1732 kfree(ce_state->src_ring); in _ath10k_ce_free_pipe()
1735 if (ce_state->dest_ring) { in _ath10k_ce_free_pipe()
1736 dma_free_coherent(ar->dev, in _ath10k_ce_free_pipe()
1737 (ce_state->dest_ring->nentries * in _ath10k_ce_free_pipe()
1740 ce_state->dest_ring->base_addr_owner_space, in _ath10k_ce_free_pipe()
1741 ce_state->dest_ring->base_addr_ce_space); in _ath10k_ce_free_pipe()
1742 kfree(ce_state->dest_ring); in _ath10k_ce_free_pipe()
1745 ce_state->src_ring = NULL; in _ath10k_ce_free_pipe()
1746 ce_state->dest_ring = NULL; in _ath10k_ce_free_pipe()
1752 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in _ath10k_ce_free_pipe_64()
1754 if (ce_state->src_ring) { in _ath10k_ce_free_pipe_64()
1755 if (ar->hw_params.shadow_reg_support) in _ath10k_ce_free_pipe_64()
1756 kfree(ce_state->src_ring->shadow_base_unaligned); in _ath10k_ce_free_pipe_64()
1757 dma_free_coherent(ar->dev, in _ath10k_ce_free_pipe_64()
1758 (ce_state->src_ring->nentries * in _ath10k_ce_free_pipe_64()
1761 ce_state->src_ring->base_addr_owner_space, in _ath10k_ce_free_pipe_64()
1762 ce_state->src_ring->base_addr_ce_space); in _ath10k_ce_free_pipe_64()
1763 kfree(ce_state->src_ring); in _ath10k_ce_free_pipe_64()
1766 if (ce_state->dest_ring) { in _ath10k_ce_free_pipe_64()
1767 dma_free_coherent(ar->dev, in _ath10k_ce_free_pipe_64()
1768 (ce_state->dest_ring->nentries * in _ath10k_ce_free_pipe_64()
1771 ce_state->dest_ring->base_addr_owner_space, in _ath10k_ce_free_pipe_64()
1772 ce_state->dest_ring->base_addr_ce_space); in _ath10k_ce_free_pipe_64()
1773 kfree(ce_state->dest_ring); in _ath10k_ce_free_pipe_64()
1776 ce_state->src_ring = NULL; in _ath10k_ce_free_pipe_64()
1777 ce_state->dest_ring = NULL; in _ath10k_ce_free_pipe_64()
1783 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_free_pipe()
1785 ce_state->ops->ce_free_pipe(ar, ce_id); in ath10k_ce_free_pipe()
1796 lockdep_assert_held(&ar->dump_mutex); in ath10k_ce_dump_registers()
1800 spin_lock_bh(&ce->ce_lock); in ath10k_ce_dump_registers()
1815 crash_data->ce_crash_data[id] = ce_data; in ath10k_ce_dump_registers()
1825 spin_unlock_bh(&ce->ce_lock); in ath10k_ce_dump_registers()
1861 switch (ar->hw_rev) { in ath10k_ce_set_ops()
1863 ce_state->ops = &ce_64_ops; in ath10k_ce_set_ops()
1866 ce_state->ops = &ce_ops; in ath10k_ce_set_ops()
1875 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; in ath10k_ce_alloc_pipe()
1885 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); in ath10k_ce_alloc_pipe()
1887 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); in ath10k_ce_alloc_pipe()
1889 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); in ath10k_ce_alloc_pipe()
1891 ce_state->ar = ar; in ath10k_ce_alloc_pipe()
1892 ce_state->id = ce_id; in ath10k_ce_alloc_pipe()
1893 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id); in ath10k_ce_alloc_pipe()
1894 ce_state->attr_flags = attr->flags; in ath10k_ce_alloc_pipe()
1895 ce_state->src_sz_max = attr->src_sz_max; in ath10k_ce_alloc_pipe()
1897 if (attr->src_nentries) in ath10k_ce_alloc_pipe()
1898 ce_state->send_cb = attr->send_cb; in ath10k_ce_alloc_pipe()
1900 if (attr->dest_nentries) in ath10k_ce_alloc_pipe()
1901 ce_state->recv_cb = attr->recv_cb; in ath10k_ce_alloc_pipe()
1903 if (attr->src_nentries) { in ath10k_ce_alloc_pipe()
1904 ce_state->src_ring = in ath10k_ce_alloc_pipe()
1905 ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr); in ath10k_ce_alloc_pipe()
1906 if (IS_ERR(ce_state->src_ring)) { in ath10k_ce_alloc_pipe()
1907 ret = PTR_ERR(ce_state->src_ring); in ath10k_ce_alloc_pipe()
1910 ce_state->src_ring = NULL; in ath10k_ce_alloc_pipe()
1915 if (attr->dest_nentries) { in ath10k_ce_alloc_pipe()
1916 ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar, in ath10k_ce_alloc_pipe()
1919 if (IS_ERR(ce_state->dest_ring)) { in ath10k_ce_alloc_pipe()
1920 ret = PTR_ERR(ce_state->dest_ring); in ath10k_ce_alloc_pipe()
1923 ce_state->dest_ring = NULL; in ath10k_ce_alloc_pipe()
1940 ce->vaddr_rri = dma_alloc_coherent(ar->dev, in ath10k_ce_alloc_rri()
1942 &ce->paddr_rri, GFP_KERNEL); in ath10k_ce_alloc_rri()
1944 if (!ce->vaddr_rri) in ath10k_ce_alloc_rri()
1947 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low, in ath10k_ce_alloc_rri()
1948 lower_32_bits(ce->paddr_rri)); in ath10k_ce_alloc_rri()
1949 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, in ath10k_ce_alloc_rri()
1950 (upper_32_bits(ce->paddr_rri) & in ath10k_ce_alloc_rri()
1954 ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; in ath10k_ce_alloc_rri()
1957 value |= ar->hw_ce_regs->upd->mask; in ath10k_ce_alloc_rri()
1967 dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)), in ath10k_ce_free_rri()
1968 ce->vaddr_rri, in ath10k_ce_free_rri()
1969 ce->paddr_rri); in ath10k_ce_free_rri()