Lines Matching refs:rxq
55 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
58 * or reused - added immediately to the iwl->rxq->rx_free list.
66 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
76 * detached from the iwl->rxq. The driver 'processed' index is updated.
77 * + If there are no allocated buffers in iwl->rxq->rx_free,
104 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
108 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
111 * rxq.queue -> rxq.rx_free -> rxq.queue
119 static int iwl_rxq_space(const struct iwl_rxq *rxq)
122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
167 struct iwl_rxq *rxq)
171 lockdep_assert_held(&rxq->lock);
187 rxq->need_update = true;
192 rxq->write_actual = round_down(rxq->write, 8);
194 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
196 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
197 HBUS_TARG_WRPTR_RX_Q(rxq->id));
199 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
200 rxq->write_actual);
209 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
211 if (!rxq->need_update)
213 spin_lock_bh(&rxq->lock);
214 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
215 rxq->need_update = false;
216 spin_unlock_bh(&rxq->lock);
221 struct iwl_rxq *rxq,
225 struct iwl_rx_transfer_desc *bd = rxq->bd;
229 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
232 __le64 *bd = rxq->bd;
234 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
241 (u32)rxb->vid, rxq->id, rxq->write);
249 struct iwl_rxq *rxq)
265 spin_lock_bh(&rxq->lock);
266 while (rxq->free_count) {
268 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
275 iwl_pcie_restock_bd(trans, rxq, rxb);
276 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
277 rxq->free_count--;
279 spin_unlock_bh(&rxq->lock);
285 if (rxq->write_actual != (rxq->write & ~0x7)) {
286 spin_lock_bh(&rxq->lock);
287 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
288 spin_unlock_bh(&rxq->lock);
296 struct iwl_rxq *rxq)
311 spin_lock_bh(&rxq->lock);
312 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
313 __le32 *bd = (__le32 *)rxq->bd;
315 rxb = rxq->queue[rxq->write];
319 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
325 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
326 rxq->queue[rxq->write] = rxb;
327 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
328 rxq->free_count--;
330 spin_unlock_bh(&rxq->lock);
334 if (rxq->write_actual != (rxq->write & ~0x7)) {
335 spin_lock_bh(&rxq->lock);
336 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
337 spin_unlock_bh(&rxq->lock);
353 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
356 iwl_pcie_rxmq_restock(trans, rxq);
358 iwl_pcie_rxsq_restock(trans, rxq);
434 struct iwl_rxq *rxq)
443 spin_lock_bh(&rxq->lock);
444 if (list_empty(&rxq->rx_used)) {
445 spin_unlock_bh(&rxq->lock);
448 spin_unlock_bh(&rxq->lock);
454 spin_lock_bh(&rxq->lock);
456 if (list_empty(&rxq->rx_used)) {
457 spin_unlock_bh(&rxq->lock);
461 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
464 spin_unlock_bh(&rxq->lock);
476 spin_lock_bh(&rxq->lock);
477 list_add(&rxb->list, &rxq->rx_used);
478 spin_unlock_bh(&rxq->lock);
483 spin_lock_bh(&rxq->lock);
485 list_add_tail(&rxb->list, &rxq->rx_free);
486 rxq->free_count++;
488 spin_unlock_bh(&rxq->lock);
618 struct iwl_rxq *rxq)
624 lockdep_assert_held(&rxq->lock);
644 list_move(&rxb->list, &rxq->rx_free);
648 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
649 rxq->free_count += RX_CLAIM_REQ_ALLOC;
683 struct iwl_rxq *rxq)
687 if (rxq->bd)
689 free_size * rxq->queue_size,
690 rxq->bd, rxq->bd_dma);
691 rxq->bd_dma = 0;
692 rxq->bd = NULL;
694 rxq->rb_stts_dma = 0;
695 rxq->rb_stts = NULL;
697 if (rxq->used_bd)
700 rxq->queue_size,
701 rxq->used_bd, rxq->used_bd_dma);
702 rxq->used_bd_dma = 0;
703 rxq->used_bd = NULL;
718 struct iwl_rxq *rxq)
726 spin_lock_init(&rxq->lock);
728 rxq->queue_size = trans->cfg->num_rbds;
730 rxq->queue_size = RX_QUEUE_SIZE;
738 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
739 &rxq->bd_dma, GFP_KERNEL);
740 if (!rxq->bd)
744 rxq->used_bd = dma_alloc_coherent(dev,
746 rxq->queue_size,
747 &rxq->used_bd_dma,
749 if (!rxq->used_bd)
753 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
754 rxq->rb_stts_dma =
755 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
761 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
763 iwl_pcie_free_rxq_dma(trans, rxq);
776 if (WARN_ON(trans_pcie->rxq))
779 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
788 if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
811 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
813 rxq->id = i;
814 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
833 kfree(trans_pcie->rxq);
834 trans_pcie->rxq = NULL;
839 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
875 (u32)(rxq->bd_dma >> 8));
879 rxq->rb_stts_dma >> 4);
943 trans_pcie->rxq[i].bd_dma);
947 trans_pcie->rxq[i].used_bd_dma);
951 trans_pcie->rxq[i].rb_stts_dma);
995 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
997 lockdep_assert_held(&rxq->lock);
999 INIT_LIST_HEAD(&rxq->rx_free);
1000 INIT_LIST_HEAD(&rxq->rx_used);
1001 rxq->free_count = 0;
1002 rxq->used_count = 0;
1014 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1022 ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1025 rxq->id, ret, budget);
1033 napi_complete_done(&rxq->napi, ret);
1041 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1049 ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1050 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1054 int irq_line = rxq->id;
1058 rxq->id == 1)
1065 napi_complete_done(&rxq->napi, ret);
1076 if (unlikely(!trans_pcie->rxq))
1080 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1082 if (rxq && rxq->napi.poll)
1083 napi_synchronize(&rxq->napi);
1094 if (!trans_pcie->rxq) {
1099 def_rxq = trans_pcie->rxq;
1117 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1119 spin_lock_bh(&rxq->lock);
1125 rxq->read = 0;
1126 rxq->write = 0;
1127 rxq->write_actual = 0;
1128 memset(rxq->rb_stts, 0,
1133 iwl_pcie_rx_init_rxb_lists(rxq);
1135 spin_unlock_bh(&rxq->lock);
1137 if (!rxq->napi.poll) {
1143 netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
1145 napi_enable(&rxq->napi);
1185 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1187 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1189 spin_lock_bh(&trans_pcie->rxq->lock);
1190 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1191 spin_unlock_bh(&trans_pcie->rxq->lock);
1216 * if rxq is NULL, it means that nothing has been allocated,
1219 if (!trans_pcie->rxq) {
1238 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1240 iwl_pcie_free_rxq_dma(trans, rxq);
1242 if (rxq->napi.poll) {
1243 napi_disable(&rxq->napi);
1244 netif_napi_del(&rxq->napi);
1249 kfree(trans_pcie->rxq);
1255 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1259 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1271 struct iwl_rxq *rxq, bool emergency)
1278 list_add_tail(&rxb->list, &rxq->rx_used);
1284 rxq->used_count++;
1291 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1294 iwl_pcie_rx_move_to_allocator(rxq, rba);
1302 struct iwl_rxq *rxq,
1335 rxq->id, offset);
1340 FH_RSCSR_RXQ_POS != rxq->id,
1342 rxq->id,
1348 rxq->id, offset,
1384 if (rxq->id == IWL_DEFAULT_RX_QUEUE)
1385 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1388 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1389 &rxcb, rxq->id);
1441 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1443 list_add_tail(&rxb->list, &rxq->rx_free);
1444 rxq->free_count++;
1447 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1451 struct iwl_rxq *rxq, int i,
1462 rxb = rxq->queue[i];
1463 rxq->queue[i] = NULL;
1468 struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
1473 struct iwl_rx_completion_desc *cd = rxq->used_bd;
1478 __le32 *cd = rxq->used_bd;
1508 struct iwl_rxq *rxq;
1512 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1515 rxq = &trans_pcie->rxq[queue];
1518 spin_lock(&rxq->lock);
1521 r = iwl_get_closed_rb_stts(trans, rxq);
1522 i = rxq->read;
1525 r &= (rxq->queue_size - 1);
1529 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1540 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1542 iwl_pcie_rx_move_to_allocator(rxq, rba);
1549 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1551 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1555 if (unlikely(join || rxq->next_rb_is_fragment)) {
1556 rxq->next_rb_is_fragment = join;
1569 list_add_tail(&rxb->list, &rxq->rx_free);
1570 rxq->free_count++;
1572 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1575 i = (i + 1) & (rxq->queue_size - 1);
1584 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1585 iwl_pcie_rx_allocator_get(trans, rxq);
1587 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1589 iwl_pcie_rx_move_to_allocator(rxq, rba);
1594 if (rb_pending_alloc < rxq->queue_size / 3) {
1601 rxq->read = i;
1602 spin_unlock(&rxq->lock);
1603 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1604 iwl_pcie_rxq_restock(trans, rxq);
1611 rxq->read = i;
1612 spin_unlock(&rxq->lock);
1627 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1629 iwl_pcie_rxq_restock(trans, rxq);
1651 struct iwl_rxq *rxq;
1658 if (!trans_pcie->rxq) {
1666 rxq = &trans_pcie->rxq[entry->entry];
1671 if (!napi_schedule(&rxq->napi))
1952 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2039 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2041 __napi_schedule(&trans_pcie->rxq[0].napi);
2257 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2259 __napi_schedule(&trans_pcie->rxq[0].napi);
2267 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2269 __napi_schedule(&trans_pcie->rxq[1].napi);
2353 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);