Lines Matching +full:no +full:- +full:memory +full:- +full:wc

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
40 if (cmpxchg(&ibmr->u.frmr.fr_state, in rds_transition_frwr_state()
43 /* enforce order of ibmr->u.frmr.fr_state update in rds_transition_frwr_state()
47 atomic_dec(&ibmr->ic->i_fastreg_inuse_count); in rds_transition_frwr_state()
62 pool = rds_ibdev->mr_8k_pool; in rds_ib_alloc_frmr()
64 pool = rds_ibdev->mr_1m_pool; in rds_ib_alloc_frmr()
73 err = -ENOMEM; in rds_ib_alloc_frmr()
77 frmr = &ibmr->u.frmr; in rds_ib_alloc_frmr()
78 frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG, in rds_ib_alloc_frmr()
79 pool->max_pages); in rds_ib_alloc_frmr()
80 if (IS_ERR(frmr->mr)) { in rds_ib_alloc_frmr()
82 err = PTR_ERR(frmr->mr); in rds_ib_alloc_frmr()
86 ibmr->pool = pool; in rds_ib_alloc_frmr()
87 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_alloc_frmr()
92 if (atomic_read(&pool->item_count) > pool->max_items_soft) in rds_ib_alloc_frmr()
93 pool->max_items_soft = pool->max_items; in rds_ib_alloc_frmr()
95 frmr->fr_state = FRMR_IS_FREE; in rds_ib_alloc_frmr()
96 init_waitqueue_head(&frmr->fr_inv_done); in rds_ib_alloc_frmr()
97 init_waitqueue_head(&frmr->fr_reg_done); in rds_ib_alloc_frmr()
102 atomic_dec(&pool->item_count); in rds_ib_alloc_frmr()
108 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_frmr()
111 llist_add(&ibmr->llnode, &pool->drop_list); in rds_ib_free_frmr()
113 llist_add(&ibmr->llnode, &pool->free_list); in rds_ib_free_frmr()
114 atomic_add(ibmr->sg_len, &pool->free_pinned); in rds_ib_free_frmr()
115 atomic_inc(&pool->dirty_count); in rds_ib_free_frmr()
118 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || in rds_ib_free_frmr()
119 atomic_read(&pool->dirty_count) >= pool->max_items / 5) in rds_ib_free_frmr()
120 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); in rds_ib_free_frmr()
125 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_post_reg_frmr()
129 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { in rds_ib_post_reg_frmr()
130 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_reg_frmr()
134 ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, in rds_ib_post_reg_frmr()
136 if (unlikely(ret != ibmr->sg_dma_len)) in rds_ib_post_reg_frmr()
137 return ret < 0 ? ret : -EINVAL; in rds_ib_post_reg_frmr()
139 if (cmpxchg(&frmr->fr_state, in rds_ib_post_reg_frmr()
141 return -EBUSY; in rds_ib_post_reg_frmr()
143 atomic_inc(&ibmr->ic->i_fastreg_inuse_count); in rds_ib_post_reg_frmr()
150 ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++); in rds_ib_post_reg_frmr()
151 frmr->fr_reg = true; in rds_ib_post_reg_frmr()
157 reg_wr.mr = frmr->mr; in rds_ib_post_reg_frmr()
158 reg_wr.key = frmr->mr->rkey; in rds_ib_post_reg_frmr()
164 ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL); in rds_ib_post_reg_frmr()
166 /* Failure here can be because of -ENOMEM as well */ in rds_ib_post_reg_frmr()
169 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_reg_frmr()
177 * access error resulting from a race between the memory region already in rds_ib_post_reg_frmr()
180 wait_event(frmr->fr_reg_done, !frmr->fr_reg); in rds_ib_post_reg_frmr()
192 struct ib_device *dev = rds_ibdev->dev; in rds_ib_map_frmr()
193 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_map_frmr()
203 ibmr->sg = sg; in rds_ib_map_frmr()
204 ibmr->sg_len = sg_len; in rds_ib_map_frmr()
205 ibmr->sg_dma_len = 0; in rds_ib_map_frmr()
206 frmr->sg_byte_len = 0; in rds_ib_map_frmr()
207 WARN_ON(ibmr->sg_dma_len); in rds_ib_map_frmr()
208 ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len, in rds_ib_map_frmr()
210 if (unlikely(!ibmr->sg_dma_len)) { in rds_ib_map_frmr()
212 return -EBUSY; in rds_ib_map_frmr()
215 frmr->sg_byte_len = 0; in rds_ib_map_frmr()
216 frmr->dma_npages = 0; in rds_ib_map_frmr()
219 ret = -EINVAL; in rds_ib_map_frmr()
220 for (i = 0; i < ibmr->sg_dma_len; ++i) { in rds_ib_map_frmr()
221 unsigned int dma_len = sg_dma_len(&ibmr->sg[i]); in rds_ib_map_frmr()
222 u64 dma_addr = sg_dma_address(&ibmr->sg[i]); in rds_ib_map_frmr()
224 frmr->sg_byte_len += dma_len; in rds_ib_map_frmr()
229 ++frmr->dma_npages; in rds_ib_map_frmr()
233 if (i < ibmr->sg_dma_len - 1) in rds_ib_map_frmr()
236 ++frmr->dma_npages; in rds_ib_map_frmr()
241 frmr->dma_npages += len >> PAGE_SHIFT; in rds_ib_map_frmr()
243 if (frmr->dma_npages > ibmr->pool->max_pages) { in rds_ib_map_frmr()
244 ret = -EMSGSIZE; in rds_ib_map_frmr()
252 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_map_frmr()
260 ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len, in rds_ib_map_frmr()
262 ibmr->sg_dma_len = 0; in rds_ib_map_frmr()
269 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_post_inv()
270 struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id; in rds_ib_post_inv()
271 int ret = -EINVAL; in rds_ib_post_inv()
273 if (!i_cm_id || !i_cm_id->qp || !frmr->mr) in rds_ib_post_inv()
276 if (frmr->fr_state != FRMR_IS_INUSE) in rds_ib_post_inv()
279 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { in rds_ib_post_inv()
280 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_inv()
284 frmr->fr_inv = true; in rds_ib_post_inv()
285 s_wr = &frmr->fr_wr; in rds_ib_post_inv()
288 s_wr->wr_id = (unsigned long)(void *)ibmr; in rds_ib_post_inv()
289 s_wr->opcode = IB_WR_LOCAL_INV; in rds_ib_post_inv()
290 s_wr->ex.invalidate_rkey = frmr->mr->rkey; in rds_ib_post_inv()
291 s_wr->send_flags = IB_SEND_SIGNALED; in rds_ib_post_inv()
293 ret = ib_post_send(i_cm_id->qp, s_wr, NULL); in rds_ib_post_inv()
296 frmr->fr_inv = false; in rds_ib_post_inv()
297 /* enforce order of frmr->fr_inv update in rds_ib_post_inv()
301 atomic_inc(&ibmr->ic->i_fastreg_wrs); in rds_ib_post_inv()
313 * and de-registration ("ib_dereg_mr") of the corresponding in rds_ib_post_inv()
314 * memory region. in rds_ib_post_inv()
316 wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE); in rds_ib_post_inv()
322 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) in rds_ib_mr_cqe_handler() argument
324 struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id; in rds_ib_mr_cqe_handler()
325 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_mr_cqe_handler()
327 if (wc->status != IB_WC_SUCCESS) { in rds_ib_mr_cqe_handler()
329 if (rds_conn_up(ic->conn)) in rds_ib_mr_cqe_handler()
330 rds_ib_conn_error(ic->conn, in rds_ib_mr_cqe_handler()
332 &ic->conn->c_laddr, in rds_ib_mr_cqe_handler()
333 &ic->conn->c_faddr, in rds_ib_mr_cqe_handler()
334 wc->status, in rds_ib_mr_cqe_handler()
335 ib_wc_status_msg(wc->status), in rds_ib_mr_cqe_handler()
336 wc->vendor_err); in rds_ib_mr_cqe_handler()
339 if (frmr->fr_inv) { in rds_ib_mr_cqe_handler()
341 frmr->fr_inv = false; in rds_ib_mr_cqe_handler()
342 wake_up(&frmr->fr_inv_done); in rds_ib_mr_cqe_handler()
345 if (frmr->fr_reg) { in rds_ib_mr_cqe_handler()
346 frmr->fr_reg = false; in rds_ib_mr_cqe_handler()
347 wake_up(&frmr->fr_reg_done); in rds_ib_mr_cqe_handler()
350 /* enforce order of frmr->{fr_reg,fr_inv} update in rds_ib_mr_cqe_handler()
354 atomic_inc(&ic->i_fastreg_wrs); in rds_ib_mr_cqe_handler()
367 if (ibmr->sg_dma_len) { in rds_ib_unreg_frmr()
379 *unpinned += ibmr->sg_len; in rds_ib_unreg_frmr()
380 frmr = &ibmr->u.frmr; in rds_ib_unreg_frmr()
382 if (freed < goal || frmr->fr_state == FRMR_IS_STALE) { in rds_ib_unreg_frmr()
383 /* Don't de-allocate if the MR is not free yet */ in rds_ib_unreg_frmr()
384 if (frmr->fr_state == FRMR_IS_INUSE) in rds_ib_unreg_frmr()
387 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_unreg_frmr()
391 list_del(&ibmr->unmap_list); in rds_ib_unreg_frmr()
392 if (frmr->mr) in rds_ib_unreg_frmr()
393 ib_dereg_mr(frmr->mr); in rds_ib_unreg_frmr()
412 return ERR_PTR(-EOPNOTSUPP); in rds_ib_reg_frmr()
421 frmr = &ibmr->u.frmr; in rds_ib_reg_frmr()
422 } while (frmr->fr_state != FRMR_IS_FREE); in rds_ib_reg_frmr()
424 ibmr->ic = ic; in rds_ib_reg_frmr()
425 ibmr->device = rds_ibdev; in rds_ib_reg_frmr()
426 ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents); in rds_ib_reg_frmr()
428 *key = frmr->mr->rkey; in rds_ib_reg_frmr()
439 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_frmr_list()
440 struct rds_ib_frmr *frmr = &ibmr->u.frmr; in rds_ib_free_frmr_list()
442 if (frmr->fr_state == FRMR_IS_STALE) in rds_ib_free_frmr_list()
443 llist_add(&ibmr->llnode, &pool->drop_list); in rds_ib_free_frmr_list()
445 llist_add(&ibmr->llnode, &pool->free_list); in rds_ib_free_frmr_list()