Home
last modified time | relevance | path

Searched refs:rkey (Results 1 – 25 of 94) sorted by relevance

1234

/linux/net/dns_resolver/
H A Ddns_query.c79 struct key *rkey; in dns_query() local
128 rkey = request_key_net(&key_type_dns_resolver, desc, net, options); in dns_query()
131 if (IS_ERR(rkey)) { in dns_query()
132 ret = PTR_ERR(rkey); in dns_query()
136 down_read(&rkey->sem); in dns_query()
137 set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); in dns_query()
138 rkey->perm |= KEY_USR_VIEW; in dns_query()
140 ret = key_validate(rkey); in dns_query()
145 ret = PTR_ERR(rkey->payload.data[dns_key_error]); in dns_query()
149 upayload = user_key_payload_locked(rkey); in dns_query()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_mw.c31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); in rxe_alloc_mw()
138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw()
140 mw->rkey = (mw->rkey & ~0xff) | key; in rxe_do_bind_mw()
180 if (unlikely(mw->rkey != mw_rkey)) { in rxe_bind_mw()
258 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) in rxe_invalidate_mw() argument
264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_invalidate_mw()
270 if (rkey != mw->rkey) { in rxe_invalidate_mw()
290 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) in rxe_lookup_mw() argument
295 int index = rkey >> 8; in rxe_lookup_mw()
301 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || in rxe_lookup_mw()
H A Drxe_hdr.h528 __be32 rkey; member
550 return be32_to_cpu(reth->rkey); in __reth_rkey()
553 static inline void __reth_set_rkey(void *arg, u32 rkey) in __reth_set_rkey() argument
557 reth->rkey = cpu_to_be32(rkey); in __reth_set_rkey()
592 static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey) in reth_set_rkey() argument
595 rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey); in reth_set_rkey()
661 __be32 rkey; member
684 return be32_to_cpu(atmeth->rkey); in __atmeth_rkey()
687 static inline void __atmeth_set_rkey(void *arg, u32 rkey) in __atmeth_set_rkey() argument
691 atmeth->rkey = cpu_to_be32(rkey); in __atmeth_set_rkey()
[all …]
H A Drxe_resp.c413 qp->resp.rkey = 0; in qp_resp_from_reth()
415 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth()
422 qp->resp.rkey = atmeth_rkey(pkt); in qp_resp_from_atmeth()
436 u32 rkey; in check_rkey() local
481 rkey = qp->resp.rkey; in check_rkey()
485 if (rkey_is_mw(rkey)) { in check_rkey()
486 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey()
488 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); in check_rkey()
507 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); in check_rkey()
509 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); in check_rkey()
[all …]
H A Drxe_verbs.h149 u32 rkey; member
182 u32 rkey; member
294 static inline int rkey_is_mw(u32 rkey) in rkey_is_mw() argument
296 u32 index = rkey >> 8; in rkey_is_mw()
308 u32 rkey; member
341 u32 rkey; member
/linux/drivers/infiniband/ulp/iser/
H A Diser_memory.c135 reg->rkey = device->pd->unsafe_global_rkey; in iser_reg_dma()
137 reg->rkey = 0; in iser_reg_dma()
142 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_reg_dma()
241 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey()
270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_reg_sig_mr()
287 wr->key = mr->rkey; in iser_reg_sig_mr()
294 sig_reg->rkey = mr->rkey; in iser_reg_sig_mr()
299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, in iser_reg_sig_mr()
319 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_fast_reg_mr()
334 wr->key = mr->rkey; in iser_fast_reg_mr()
[all …]
H A Diser_initiator.c70 hdr->read_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_read_cmd()
74 task->itt, mem_reg->rkey, in iser_prepare_read_cmd()
117 hdr->write_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_write_cmd()
122 task->itt, mem_reg->rkey, in iser_prepare_write_cmd()
576 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) in iser_inv_desc() argument
578 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || in iser_inv_desc()
579 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { in iser_inv_desc()
580 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); in iser_inv_desc()
597 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv() local
600 iser_conn, rkey); in iser_check_remote_inv()
[all …]
/linux/fs/nfs/
H A Dnfs4idmap.c283 struct key *rkey = ERR_PTR(-EAGAIN); in nfs_idmap_request_key() local
291 rkey = request_key(&key_type_id_resolver, desc, ""); in nfs_idmap_request_key()
292 if (IS_ERR(rkey)) { in nfs_idmap_request_key()
294 rkey = request_key_with_auxdata(&key_type_id_resolver_legacy, in nfs_idmap_request_key()
298 if (!IS_ERR(rkey)) in nfs_idmap_request_key()
299 set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); in nfs_idmap_request_key()
302 return rkey; in nfs_idmap_request_key()
310 struct key *rkey; in nfs_idmap_get_key() local
315 rkey = nfs_idmap_request_key(name, namelen, type, idmap); in nfs_idmap_get_key()
318 if (IS_ERR(rkey)) { in nfs_idmap_get_key()
[all …]
/linux/drivers/infiniband/core/
H A Drw.c126 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_mr_wrs() argument
162 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs()
197 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_map_wrs() argument
223 rdma_wr->rkey = rkey; in rdma_rw_init_map_wrs()
252 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, in rdma_rw_init_single_wr() argument
271 rdma_wr->rkey = rkey; in rdma_rw_init_single_wr()
294 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_ctx_init() argument
328 sg_offset, remote_addr, rkey, dir); in rdma_rw_ctx_init()
331 remote_addr, rkey, dir); in rdma_rw_ctx_init()
334 remote_addr, rkey, dir); in rdma_rw_ctx_init()
[all …]
H A Duverbs_std_types_mr.c152 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER()
169 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER()
265 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER()
/linux/include/uapi/rdma/
H A Drdma_user_rxe.h88 __u32 rkey; member
94 __u32 rkey; member
101 __u32 rkey; member
118 __u32 rkey; member
H A Dvmw_pvrdma-abi.h251 __u32 rkey; member
258 __u32 rkey; member
264 __u32 rkey; member
277 __u32 rkey; member
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_mr.c95 mr->ibmr.rkey = resp->rkey; in pvrdma_get_dma_mr()
183 mr->ibmr.rkey = resp->rkey; in pvrdma_reg_user_mr()
255 mr->ibmr.rkey = resp->rkey; in pvrdma_alloc_mr()
/linux/drivers/infiniband/sw/siw/
H A Dsiw_qp_tx.c138 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
183 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
195 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
209 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
935 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); in siw_fastreg_mr()
938 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr()
942 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { in siw_fastreg_mr()
943 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); in siw_fastreg_mr()
947 mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); in siw_fastreg_mr()
949 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr()
[all …]
/linux/drivers/nvme/target/
H A Dpr.c240 if (reg->rkey != nrkey) in nvmet_pr_register()
248 new->rkey = nrkey; in nvmet_pr_register()
301 if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) { in nvmet_pr_unregister()
316 reg->rkey = *(u64 *)attr; in nvmet_pr_update_reg_rkey()
338 new->rkey = holder->rkey; in nvmet_pr_update_reg_attr()
364 if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) in nvmet_pr_replace()
469 if (reg->rkey == prkey) { in nvmet_pr_unreg_all_host_by_prkey()
492 if (reg->rkey == prkey && in nvmet_pr_unreg_all_others_by_prkey()
573 if (prkey == holder->rkey) { in nvmet_pr_preempt()
679 reg->rkey == le64_to_cpu(d->crkey)) { in nvmet_execute_pr_acquire()
[all …]
/linux/drivers/infiniband/ulp/rtrs/
H A DREADME51 then pass it to the block layer. A new rkey is generated and registered for the
53 The new rkey is sent back to the client along with the IO result.
144 using the IMM field, Server invalidate rkey associated to the memory chunks
149 inflight IO and for the error code. The new rkey is sent back using
150 SEND_WITH_IMM WR, client When it recived new rkey message, it validates
151 the message and finished IO after update rkey for the rbuffer, then post
186 Server invalidate rkey associated to the memory chunks first, when it finishes,
192 outstanding inflight IO and the error code. The new rkey is sent back using
193 SEND_WITH_IMM WR, client When it recived new rkey message, it validates
194 the message and finished IO after update rkey for the rbuffer, then post
H A Drtrs-srv.c211 u32 rkey = 0; in rdma_write_sg() local
242 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key); in rdma_write_sg()
243 if (rkey == 0) in rdma_write_sg()
244 rkey = wr->rkey; in rdma_write_sg()
247 WARN_ON_ONCE(rkey != wr->rkey); in rdma_write_sg()
280 inv_wr.ex.invalidate_rkey = rkey; in rdma_write_sg()
293 rwr.key = srv_mr->mr->rkey; in rdma_write_sg()
299 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in rdma_write_sg()
414 rwr.key = srv_mr->mr->rkey; in send_io_resp_imm()
420 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in send_io_resp_imm()
[all …]
/linux/drivers/infiniband/sw/rdmavt/
H A Dmr.c260 mr->ibmr.rkey = mr->mr.lkey; in __rvt_alloc_mr()
630 ibmr->rkey = key; in rvt_fast_reg_mr()
647 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey) in rvt_invalidate_rkey() argument
653 if (rkey == 0) in rvt_invalidate_rkey()
658 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]); in rvt_invalidate_rkey()
659 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_invalidate_rkey()
827 u32 len, u64 vaddr, u32 rkey, int acc) in rvt_rkey_ok() argument
840 if (rkey == 0) { in rvt_rkey_ok()
861 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); in rvt_rkey_ok()
869 mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_rkey_ok()
/linux/arch/arm64/crypto/
H A Dsm4-ce-glue.c29 asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
30 asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src,
32 asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src,
34 asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src,
36 asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
38 asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
40 asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
109 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) in sm4_ecb_do_crypt() argument
126 sm4_ce_crypt(rkey, dst, src, nblks); in sm4_ecb_do_crypt()
H A Dsm4-neon-glue.c21 asmlinkage void sm4_neon_crypt(const u32 *rkey, u8 *dst, const u8 *src,
36 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) in sm4_ecb_do_crypt() argument
53 sm4_neon_crypt(rkey, dst, src, nblocks); in sm4_ecb_do_crypt()
/linux/drivers/infiniband/hw/qib/
H A Dqib_rc.c350 ohdr->u.rc.reth.rkey = in qib_make_rc_req()
351 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req()
393 ohdr->u.rc.reth.rkey = in qib_make_rc_req()
394 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req()
435 ohdr->u.atomic_eth.rkey = cpu_to_be32( in qib_make_rc_req()
436 wqe->atomic_wr.rkey); in qib_make_rc_req()
555 ohdr->u.rc.reth.rkey = in qib_make_rc_req()
556 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req()
1608 u32 rkey = be32_to_cpu(reth->rkey); in qib_rc_rcv_error() local
1612 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in qib_rc_rcv_error()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/
H A Ddr_send.c31 u32 rkey; member
401 u32 rkey, in dr_rdma_handle_icm_write_segments() argument
411 wq_raddr->rkey = cpu_to_be32(rkey); in dr_rdma_handle_icm_write_segments()
438 u32 rkey, struct dr_data_seg *data_seg, in dr_rdma_segments() argument
455 rkey, data_seg, &size); in dr_rdma_segments()
487 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send()
489 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send()
492 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send()
707 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk); in mlx5dr_send_postsend_ste()
767 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk); in mlx5dr_send_postsend_htbl()
[all …]
/linux/drivers/nvme/host/
H A Dpr.c258 le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_keys()
263 keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_keys()
315 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_reservation()
323 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_reservation()
/linux/include/rdma/
H A Drw.h47 u64 remote_addr, u32 rkey, enum dma_data_direction dir);
55 struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_mr.c219 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_get_dma_mr()
262 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_reg_user_mr()
411 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_alloc_mr()
492 key_to_hw_index(mw->rkey) & in hns_roce_mw_free()
498 key_to_hw_index(mw->rkey)); in hns_roce_mw_free()
502 (int)key_to_hw_index(mw->rkey)); in hns_roce_mw_free()
511 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); in hns_roce_mw_enable()
570 mw->rkey = hw_index_to_key(id); in hns_roce_alloc_mw()
572 ibmw->rkey = mw->rkey; in hns_roce_alloc_mw()

1234