Lines Matching defs:rxb

222 				struct iwl_rx_mem_buffer *rxb)
229 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
234 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
241 (u32)rxb->vid, rxq->id, rxq->write);
252 struct iwl_rx_mem_buffer *rxb;
268 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
270 list_del(&rxb->list);
271 rxb->invalid = false;
273 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
275 iwl_pcie_restock_bd(trans, rxq, rxb);
298 struct iwl_rx_mem_buffer *rxb;
314 /* The overwritten rxb must be a used one */
315 rxb = rxq->queue[rxq->write];
316 BUG_ON(rxb && rxb->page);
319 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
321 list_del(&rxb->list);
322 rxb->invalid = false;
325 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
326 rxq->queue[rxq->write] = rxb;
437 struct iwl_rx_mem_buffer *rxb;
461 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
463 list_del(&rxb->list);
466 BUG_ON(rxb->page);
467 rxb->page = page;
468 rxb->offset = offset;
470 rxb->page_dma =
471 dma_map_page(trans->dev, page, rxb->offset,
474 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
475 rxb->page = NULL;
477 list_add(&rxb->list, &rxq->rx_used);
485 list_add_tail(&rxb->list, &rxq->rx_free);
542 struct iwl_rx_mem_buffer *rxb;
551 /* Get the first rxb from the rbd list */
552 rxb = list_first_entry(&local_empty,
554 BUG_ON(rxb->page);
557 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
561 rxb->page = page;
564 rxb->page_dma = dma_map_page(trans->dev, page,
565 rxb->offset,
568 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
569 rxb->page = NULL;
575 list_move(&rxb->list, &local_allocated);
640 struct iwl_rx_mem_buffer *rxb =
644 list_move(&rxb->list, &rxq->rx_free);
1158 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1161 list_add(&rxb->list, &rba->rbd_empty);
1163 list_add(&rxb->list, &def_rxq->rx_used);
1164 trans_pcie->global_table[i] = rxb;
1165 rxb->vid = (u16)(i + 1);
1166 rxb->invalid = true;
1270 struct iwl_rx_mem_buffer *rxb,
1278 list_add_tail(&rxb->list, &rxq->rx_used);
1303 struct iwl_rx_mem_buffer *rxb,
1313 if (WARN_ON(!rxb))
1316 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1323 ._offset = rxb->offset + offset,
1325 ._page = rxb->page,
1411 IWL_WARN(trans, "Claim null rxb?\n");
1421 __free_pages(rxb->page, trans_pcie->rx_page_order);
1422 rxb->page = NULL;
1428 if (rxb->page != NULL) {
1429 rxb->page_dma =
1430 dma_map_page(trans->dev, rxb->page, rxb->offset,
1433 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1439 __free_pages(rxb->page, trans_pcie->rx_page_order);
1440 rxb->page = NULL;
1441 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1443 list_add_tail(&rxb->list, &rxq->rx_free);
1447 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1455 struct iwl_rx_mem_buffer *rxb;
1462 rxb = rxq->queue[i];
1464 return rxb;
1486 rxb = trans_pcie->global_table[vid - 1];
1487 if (rxb->invalid)
1490 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1492 rxb->invalid = true;
1494 return rxb;
1497 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1533 struct iwl_rx_mem_buffer *rxb;
1551 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1552 if (!rxb)
1569 list_add_tail(&rxb->list, &rxq->rx_free);
1572 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);