Lines Matching defs:rdev
54 return (is_t5(dev->rdev.adap) && length >= 8*1024*1024*1024ULL);
58 _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
61 struct adapter *sc = rdev->adap;
79 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
85 _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
87 struct adapter *sc = rdev->adap;
114 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
119 _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
121 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
142 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
151 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
162 write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
165 if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl) {
167 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
170 return _c4iw_write_mem_inline(rdev, addr, len,
175 return _c4iw_write_mem_inline(rdev, addr, len, data);
177 return _c4iw_write_mem_inline(rdev, addr, len, data);
187 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
198 if (c4iw_stopped(rdev))
205 stag_idx = t4_stag_alloc(rdev->adap, 1);
207 mutex_lock(&rdev->stats.lock);
208 rdev->stats.stag.fail++;
209 mutex_unlock(&rdev->stats.lock);
212 mutex_lock(&rdev->stats.lock);
213 rdev->stats.stag.cur += 32;
214 if (rdev->stats.stag.cur > rdev->stats.stag.max)
215 rdev->stats.stag.max = rdev->stats.stag.cur;
216 mutex_unlock(&rdev->stats.lock);
239 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
246 err = write_adapter_mem(rdev, stag_idx +
247 (rdev->adap->vres.stag.start >> 5),
251 t4_stag_free(rdev->adap, stag_idx, 1);
252 mutex_lock(&rdev->stats.lock);
253 rdev->stats.stag.cur -= 32;
254 mutex_unlock(&rdev->stats.lock);
259 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
265 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
267 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
271 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
274 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
278 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
281 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
285 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
287 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
291 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
295 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
317 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
327 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
334 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
371 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
382 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
448 err = write_pbl(&mhp->rhp->rdev,
461 err = write_pbl(&mhp->rhp->rdev, pages,
483 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
510 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
522 deallocate_window(&rhp->rdev, mhp->attr.stag);
541 deallocate_window(&rhp->rdev, mhp->attr.stag);
563 if (__predict_false(c4iw_stopped(&rhp->rdev)))
567 max_num_sg > t4_max_fr_depth(&rhp->rdev, use_dsgl))
589 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
607 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
610 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
655 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
658 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,