Home
last modified time | relevance | path

Searched full:mr (Results 1 – 25 of 441) sorted by relevance

12345678910>>...18

/linux/drivers/infiniband/sw/rdmavt/
H A Dmr.c11 #include "mr.h"
15 * rvt_driver_mr_init - Init MR resources per driver
62 * rvt_mr_exit - clean up MR
70 rvt_pr_err(rdi, "DMA MR not null!\n"); in rvt_mr_exit()
75 static void rvt_deinit_mregion(struct rvt_mregion *mr) in rvt_deinit_mregion() argument
77 int i = mr->mapsz; in rvt_deinit_mregion()
79 mr->mapsz = 0; in rvt_deinit_mregion()
81 kfree(mr->map[--i]); in rvt_deinit_mregion()
82 percpu_ref_exit(&mr->refcount); in rvt_deinit_mregion()
87 struct rvt_mregion *mr = container_of(ref, struct rvt_mregion, in __rvt_mregion_complete() local
[all …]
H A Dtrace_mr.h15 #include "mr.h"
21 TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
22 TP_ARGS(mr, m, n, v, len),
24 RDI_DEV_ENTRY(ib_to_rvt(mr->pd->device))
37 RDI_DEV_ASSIGN(ib_to_rvt(mr->pd->device));
40 __entry->iova = mr->iova;
41 __entry->user_base = mr->user_base;
42 __entry->lkey = mr->lkey;
46 __entry->length = mr->length;
47 __entry->offset = mr->offset;
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_mr.c14 * if this is the first key for an MR or MW
27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) in mr_check_range() argument
29 switch (mr->ibmr.type) { in mr_check_range()
35 if (iova < mr->ibmr.iova || in mr_check_range()
36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range()
37 rxe_dbg_mr(mr, "iova/length out of range\n"); in mr_check_range()
43 rxe_dbg_mr(mr, "mr type not supported\n"); in mr_check_range()
48 static void rxe_mr_init(int access, struct rxe_mr *mr) in rxe_mr_init() argument
50 u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); in rxe_mr_init()
57 mr->lkey = mr->ibmr.lkey = key; in rxe_mr_init()
[all …]
H A Drxe_mw.c51 struct rxe_mw *mw, struct rxe_mr *mr, int access) in rxe_check_bind_mw() argument
83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw()
85 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n"); in rxe_check_bind_mw()
90 /* remaining checks only apply to a nonzero MR */ in rxe_check_bind_mw()
91 if (!mr) in rxe_check_bind_mw()
94 if (unlikely(mr->access & IB_ZERO_BASED)) { in rxe_check_bind_mw()
95 rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n"); in rxe_check_bind_mw()
100 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) { in rxe_check_bind_mw()
102 "attempt to bind an MW to an MR without bind access\n"); in rxe_check_bind_mw()
109 !(mr->access & IB_ACCESS_LOCAL_WRITE))) { in rxe_check_bind_mw()
[all …]
/linux/drivers/vdpa/mlx5/core/
H A Dmr.c35 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) in populate_mtts() argument
38 int nsg = mr->nsg; in populate_mtts()
44 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { in populate_mtts()
47 nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size)) in populate_mtts()
64 struct mlx5_vdpa_direct_mr *mr, in fill_create_direct_mr() argument
72 MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); in fill_create_direct_mr()
73 MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); in fill_create_direct_mr()
77 MLX5_SET64(mkc, mkc, start_addr, mr->offset); in fill_create_direct_mr()
78 MLX5_SET64(mkc, mkc, len, mr->end - mr->start); in fill_create_direct_mr()
79 MLX5_SET(mkc, mkc, log_page_size, mr->log_size); in fill_create_direct_mr()
[all …]
/linux/net/sunrpc/xprtrdma/
H A Dfrwr_ops.c49 struct rpcrdma_mr *mr) in frwr_cid_init() argument
51 struct rpc_rdma_cid *cid = &mr->mr_cid; in frwr_cid_init()
54 cid->ci_completion_id = mr->mr_ibmr->res.id; in frwr_cid_init()
57 static void frwr_mr_unmap(struct rpcrdma_mr *mr) in frwr_mr_unmap() argument
59 if (mr->mr_device) { in frwr_mr_unmap()
60 trace_xprtrdma_mr_unmap(mr); in frwr_mr_unmap()
61 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents, in frwr_mr_unmap()
62 mr->mr_dir); in frwr_mr_unmap()
63 mr->mr_device = NULL; in frwr_mr_unmap()
68 * frwr_mr_release - Destroy one MR
[all …]
/linux/drivers/infiniband/hw/mlx5/
H A Dmr.c130 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) in destroy_mkey() argument
132 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
134 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); in destroy_mkey()
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); in create_mkey_warn()
305 /* Synchronously create a MR in the cache */
724 struct mlx5_ib_mr *mr; in _mlx5_mr_cache_alloc() local
727 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in _mlx5_mr_cache_alloc()
728 if (!mr) in _mlx5_mr_cache_alloc()
738 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
743 kfree(mr); in _mlx5_mr_cache_alloc()
[all …]
H A Dodp.c174 struct mlx5_ib_mr *mr, int flags) in populate_mtt() argument
176 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in populate_mtt()
190 struct mlx5_ib_mr *mr, int flags) in mlx5_odp_populate_xlt() argument
193 populate_klm(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
195 populate_mtt(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
200 * This must be called after the mr has been removed from implicit_children.
201 * NOTE: The MR does not necessarily have to be
207 struct mlx5_ib_mr *mr = in free_implicit_child_mr_work() local
209 struct mlx5_ib_mr *imr = mr->parent; in free_implicit_child_mr_work()
211 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in free_implicit_child_mr_work()
[all …]
H A Dumr.c383 * mlx5r_umr_revoke_mr - Fence all DMA on the MR
384 * @mr: The MR to fence
386 * Upon return the NIC will not be doing any DMA to the pages under the MR,
390 int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr) in mlx5r_umr_revoke_mr() argument
392 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5r_umr_revoke_mr()
406 mlx5_mkey_variant(mr->mmkey.key)); in mlx5r_umr_revoke_mr()
408 return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false); in mlx5r_umr_revoke_mr()
429 int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, in mlx5r_umr_rereg_pd_access() argument
432 struct mlx5_ib_dev *dev = mr_to_mdev(mr); in mlx5r_umr_rereg_pd_access()
445 mlx5_mkey_variant(mr->mmkey.key)); in mlx5r_umr_rereg_pd_access()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_mr.c61 struct pvrdma_user_mr *mr; in pvrdma_get_dma_mr() local
71 "unsupported dma mr access flags %#x\n", acc); in pvrdma_get_dma_mr()
75 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in pvrdma_get_dma_mr()
76 if (!mr) in pvrdma_get_dma_mr()
89 kfree(mr); in pvrdma_get_dma_mr()
93 mr->mmr.mr_handle = resp->mr_handle; in pvrdma_get_dma_mr()
94 mr->ibmr.lkey = resp->lkey; in pvrdma_get_dma_mr()
95 mr->ibmr.rkey = resp->rkey; in pvrdma_get_dma_mr()
97 return &mr->ibmr; in pvrdma_get_dma_mr()
116 struct pvrdma_user_mr *mr = NULL; in pvrdma_reg_user_mr() local
[all …]
/linux/drivers/infiniband/hw/mlx4/
H A Dmr.c60 struct mlx4_ib_mr *mr; in mlx4_ib_get_dma_mr() local
63 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mlx4_ib_get_dma_mr()
64 if (!mr) in mlx4_ib_get_dma_mr()
68 ~0ull, convert_access(acc), 0, 0, &mr->mmr); in mlx4_ib_get_dma_mr()
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
76 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; in mlx4_ib_get_dma_mr()
77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
79 return &mr->ibmr; in mlx4_ib_get_dma_mr()
82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
85 kfree(mr); in mlx4_ib_get_dma_mr()
[all …]
/linux/drivers/scsi/
H A Dmesh.c305 volatile struct mesh_regs __iomem *mr = ms->mesh; in mesh_dump_regs() local
311 ms, mr, md); in mesh_dump_regs()
314 (mr->count_hi << 8) + mr->count_lo, mr->sequence, in mesh_dump_regs()
315 (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, in mesh_dump_regs()
316 mr->exception, mr->error, mr->intr_mask, mr->interrupt, in mesh_dump_regs()
317 mr->sync_params); in mesh_dump_regs()
318 while(in_8(&mr->fifo_count)) in mesh_dump_regs()
319 printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); in mesh_dump_regs()
339 static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) in mesh_flush_io() argument
341 (void)in_8(&mr->mesh_id); in mesh_flush_io()
[all …]
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_mr.c52 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) in alloc_mr_key() argument
59 /* Allocate a key for mr from mr_table */ in alloc_mr_key()
63 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id); in alloc_mr_key()
67 mr->key = hw_index_to_key(id); /* MR key */ in alloc_mr_key()
82 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) in free_mr_key() argument
84 unsigned long obj = key_to_hw_index(mr->key); in free_mr_key()
90 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, in alloc_mr_pbl() argument
94 bool is_fast = mr->type == MR_TYPE_FRMR; in alloc_mr_pbl()
98 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; in alloc_mr_pbl()
101 buf_attr.region[0].size = mr->size; in alloc_mr_pbl()
[all …]
/linux/drivers/rtc/
H A Drtc-at91sam9.c133 u32 offset, alarm, mr; in at91_rtc_settime() local
140 mr = rtt_readl(rtc, MR); in at91_rtc_settime()
143 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); in at91_rtc_settime()
164 mr &= ~AT91_RTT_ALMIEN; in at91_rtc_settime()
170 rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST); in at91_rtc_settime()
192 if (rtt_readl(rtc, MR) & AT91_RTT_ALMIEN) in at91_rtc_readalarm()
205 u32 mr; in at91_rtc_setalarm() local
214 mr = rtt_readl(rtc, MR); in at91_rtc_setalarm()
215 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); in at91_rtc_setalarm()
226 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); in at91_rtc_setalarm()
[all …]
/linux/arch/powerpc/platforms/pseries/
H A DhvCall.S64 mr r4,r3; \
65 mr r3,r0; \
77 mr r5,BUFREG; \
160 mr r4,r5
161 mr r5,r6
162 mr r6,r7
163 mr r7,r8
164 mr r8,r9
165 mr r9,r10
187 mr r4,r5
[all …]
/linux/drivers/infiniband/core/
H A Duverbs_std_types_mr.c95 struct ib_mr *mr; in UVERBS_HANDLER() local
127 mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs); in UVERBS_HANDLER()
128 if (IS_ERR(mr)) in UVERBS_HANDLER()
129 return PTR_ERR(mr); in UVERBS_HANDLER()
131 mr->device = pd->device; in UVERBS_HANDLER()
132 mr->pd = pd; in UVERBS_HANDLER()
133 mr->type = IB_MR_TYPE_DM; in UVERBS_HANDLER()
134 mr->dm = dm; in UVERBS_HANDLER()
135 mr->uobject = uobj; in UVERBS_HANDLER()
139 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in UVERBS_HANDLER()
[all …]
H A Dmr_pool.c10 struct ib_mr *mr; in ib_mr_pool_get() local
14 mr = list_first_entry_or_null(list, struct ib_mr, qp_entry); in ib_mr_pool_get()
15 if (mr) { in ib_mr_pool_get()
16 list_del(&mr->qp_entry); in ib_mr_pool_get()
21 return mr; in ib_mr_pool_get()
25 void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr) in ib_mr_pool_put() argument
30 list_add(&mr->qp_entry, list); in ib_mr_pool_put()
39 struct ib_mr *mr; in ib_mr_pool_init() local
45 mr = ib_alloc_mr_integrity(qp->pd, max_num_sg, in ib_mr_pool_init()
48 mr = ib_alloc_mr(qp->pd, type, max_num_sg); in ib_mr_pool_init()
[all …]
/linux/net/rds/
H A Drdma.c70 struct rds_mr *mr; in rds_mr_tree_walk() local
74 mr = rb_entry(parent, struct rds_mr, r_rb_node); in rds_mr_tree_walk()
76 if (key < mr->r_key) in rds_mr_tree_walk()
78 else if (key > mr->r_key) in rds_mr_tree_walk()
81 return mr; in rds_mr_tree_walk()
93 * Destroy the transport-specific part of a MR.
95 static void rds_destroy_mr(struct rds_mr *mr) in rds_destroy_mr() argument
97 struct rds_sock *rs = mr->r_sock; in rds_destroy_mr()
101 rdsdebug("RDS: destroy mr key is %x refcnt %u\n", in rds_destroy_mr()
102 mr->r_key, kref_read(&mr->r_kref)); in rds_destroy_mr()
[all …]
/linux/drivers/infiniband/hw/mana/
H A Dmr.c28 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr, in mana_ib_gd_create_mr() argument
58 ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err, in mana_ib_gd_create_mr()
66 mr->ibmr.lkey = resp.lkey; in mana_ib_gd_create_mr()
67 mr->ibmr.rkey = resp.rkey; in mana_ib_gd_create_mr()
68 mr->mr_handle = resp.mr_handle; in mana_ib_gd_create_mr()
87 dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err, in mana_ib_gd_destroy_mr()
105 struct mana_ib_mr *mr; in mana_ib_reg_user_mr() local
119 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in mana_ib_reg_user_mr()
120 if (!mr) in mana_ib_reg_user_mr()
123 mr->umem = ib_umem_get(ibdev, start, length, access_flags); in mana_ib_reg_user_mr()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dgddr5.c75 ram->mr[0] &= ~0xf7f; in nvkm_gddr5_calc()
76 ram->mr[0] |= (WR & 0x0f) << 8; in nvkm_gddr5_calc()
77 ram->mr[0] |= (CL & 0x0f) << 3; in nvkm_gddr5_calc()
78 ram->mr[0] |= (WL & 0x07) << 0; in nvkm_gddr5_calc()
80 ram->mr[1] &= ~0x0bf; in nvkm_gddr5_calc()
81 ram->mr[1] |= (xd & 0x01) << 7; in nvkm_gddr5_calc()
82 ram->mr[1] |= (at[0] & 0x03) << 4; in nvkm_gddr5_calc()
83 ram->mr[1] |= (dt & 0x03) << 2; in nvkm_gddr5_calc()
84 ram->mr[1] |= (ds & 0x03) << 0; in nvkm_gddr5_calc()
89 ram->mr1_nuts = ram->mr[1]; in nvkm_gddr5_calc()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_mr.c254 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; in mlxsw_sp_mr_route_write() local
264 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size, in mlxsw_sp_mr_route_write()
274 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, in mlxsw_sp_mr_route_write()
280 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv, in mlxsw_sp_mr_route_write()
292 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; in mlxsw_sp_mr_route_erase() local
294 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv); in mlxsw_sp_mr_route_erase()
485 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; in mlxsw_sp_mr_route_ivif_resolve() local
495 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv, in mlxsw_sp_mr_route_ivif_resolve()
500 err = mr->mr_ops->route_action_update(mlxsw_sp, in mlxsw_sp_mr_route_ivif_resolve()
519 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; in mlxsw_sp_mr_route_ivif_unresolve() local
[all …]
/linux/drivers/infiniband/ulp/iser/
H A Diser_memory.c160 * The signature MR cannot be invalidated and reused without checking. in iser_unreg_mem_fastreg()
162 * SCSI-Response is received. And the signature MR is not checked if in iser_unreg_mem_fastreg()
164 * handling. That's why we must check the signature MR here before in iser_unreg_mem_fastreg()
236 static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr, in iser_inv_rkey() argument
241 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey()
255 struct ib_mr *mr = rsc->sig_mr; in iser_reg_sig_mr() local
256 struct ib_sig_attrs *sig_attrs = mr->sig_attrs; in iser_reg_sig_mr()
268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr()
270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_reg_sig_mr()
272 ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL, in iser_reg_sig_mr()
[all …]
/linux/drivers/sh/intc/
H A Dhandle.c44 struct intc_mask_reg *mr = desc->hw.mask_regs; in _intc_mask_data() local
48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { in _intc_mask_data()
49 mr = desc->hw.mask_regs + *reg_idx; in _intc_mask_data()
51 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { in _intc_mask_data()
52 if (mr->enum_ids[*fld_idx] != enum_id) in _intc_mask_data()
55 if (mr->set_reg && mr->clr_reg) { in _intc_mask_data()
58 reg_e = mr->clr_reg; in _intc_mask_data()
59 reg_d = mr->set_reg; in _intc_mask_data()
62 if (mr->set_reg) { in _intc_mask_data()
64 reg_e = mr->set_reg; in _intc_mask_data()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx4/
H A Dmr.c306 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); in mlx4_mr_hw_get_mpt()
419 int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc_reserved() argument
421 mr->iova = iova; in mlx4_mr_alloc_reserved()
422 mr->size = size; in mlx4_mr_alloc_reserved()
423 mr->pd = pd; in mlx4_mr_alloc_reserved()
424 mr->access = access; in mlx4_mr_alloc_reserved()
425 mr->enabled = MLX4_MPT_DISABLED; in mlx4_mr_alloc_reserved()
426 mr->key = hw_index_to_key(mridx); in mlx4_mr_alloc_reserved()
428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
476 mlx4_warn(dev, "Failed to release mr index:%d\n", in mlx4_mpt_release()
[all …]
/linux/drivers/infiniband/hw/erdma/
H A Derdma_verbs.c131 static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) in regmr_cmd() argument
133 struct erdma_pd *pd = to_epd(mr->ibmr.pd); in regmr_cmd()
139 if (mr->type == ERDMA_MR_TYPE_FRMR || in regmr_cmd()
140 mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) { in regmr_cmd()
141 if (mr->mem.mtt->continuous) { in regmr_cmd()
142 req.phy_addr[0] = mr->mem.mtt->buf_dma; in regmr_cmd()
145 req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist); in regmr_cmd()
146 mtt_level = mr->mem.mtt->level; in regmr_cmd()
148 } else if (mr->type != ERDMA_MR_TYPE_DMA) { in regmr_cmd()
149 memcpy(req.phy_addr, mr->mem.mtt->buf, in regmr_cmd()
[all …]

12345678910>>...18