Home
last modified time | relevance | path

Searched refs:rkey (Results 1 – 25 of 71) sorted by relevance

123

/linux/net/dns_resolver/
H A Ddns_query.c79 struct key *rkey; in dns_query() local
127 rkey = request_key_net(&key_type_dns_resolver, desc, net, options); in dns_query()
129 if (IS_ERR(rkey)) { in dns_query()
130 ret = PTR_ERR(rkey); in dns_query()
134 down_read(&rkey->sem); in dns_query()
135 set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); in dns_query()
136 rkey->perm |= KEY_USR_VIEW; in dns_query()
138 ret = key_validate(rkey); in dns_query()
143 ret = PTR_ERR(rkey->payload.data[dns_key_error]); in dns_query()
147 upayload = user_key_payload_locked(rkey); in dns_query()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_mw.c31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); in rxe_alloc_mw()
138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw()
140 mw->rkey = (mw->rkey & ~0xff) | key; in rxe_do_bind_mw()
180 if (unlikely(mw->rkey != mw_rkey)) { in rxe_bind_mw()
258 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) in rxe_invalidate_mw() argument
264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_invalidate_mw()
270 if (rkey != mw->rkey) { in rxe_invalidate_mw()
290 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) in rxe_lookup_mw() argument
295 int index = rkey >> 8; in rxe_lookup_mw()
301 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || in rxe_lookup_mw()
H A Drxe_hdr.h528 __be32 rkey; member
550 return be32_to_cpu(reth->rkey); in __reth_rkey()
553 static inline void __reth_set_rkey(void *arg, u32 rkey) in __reth_set_rkey() argument
557 reth->rkey = cpu_to_be32(rkey); in __reth_set_rkey()
592 static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey) in reth_set_rkey() argument
595 rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey); in reth_set_rkey()
661 __be32 rkey; member
684 return be32_to_cpu(atmeth->rkey); in __atmeth_rkey()
687 static inline void __atmeth_set_rkey(void *arg, u32 rkey) in __atmeth_set_rkey() argument
691 atmeth->rkey = cpu_to_be32(rkey); in __atmeth_set_rkey()
[all …]
H A Drxe_resp.c413 qp->resp.rkey = 0; in qp_resp_from_reth()
415 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth()
422 qp->resp.rkey = atmeth_rkey(pkt); in qp_resp_from_atmeth()
436 u32 rkey; in check_rkey() local
481 rkey = qp->resp.rkey; in check_rkey()
485 if (rkey_is_mw(rkey)) { in check_rkey()
486 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey()
488 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); in check_rkey()
507 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); in check_rkey()
509 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); in check_rkey()
[all …]
H A Drxe_req.c459 reth_set_rkey(pkt, ibwr->wr.flush.rkey); in init_req_packet()
461 reth_set_rkey(pkt, ibwr->wr.rdma.rkey); in init_req_packet()
484 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey); in init_req_packet()
596 u32 rkey; in rxe_do_local_ops() local
601 rkey = wqe->wr.ex.invalidate_rkey; in rxe_do_local_ops()
602 if (rkey_is_mw(rkey)) in rxe_do_local_ops()
603 ret = rxe_invalidate_mw(qp, rkey); in rxe_do_local_ops()
605 ret = rxe_invalidate_mr(qp, rkey); in rxe_do_local_ops()
H A Drxe_mr.c58 mr->rkey = mr->ibmr.rkey = key; in rxe_mr_init()
626 (type == RXE_LOOKUP_REMOTE && mr->rkey != key) || in lookup_mr()
651 if (remote ? (key != mr->rkey) : (key != mr->lkey)) { in rxe_invalidate_mr()
653 key, (remote ? mr->rkey : mr->lkey)); in rxe_invalidate_mr()
713 mr->rkey = key; in rxe_reg_fast_mr()
/linux/drivers/infiniband/ulp/iser/
H A Diser_memory.c135 reg->rkey = device->pd->unsafe_global_rkey; in iser_reg_dma()
137 reg->rkey = 0; in iser_reg_dma()
142 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_reg_dma()
241 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey()
270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_reg_sig_mr()
287 wr->key = mr->rkey; in iser_reg_sig_mr()
294 sig_reg->rkey = mr->rkey; in iser_reg_sig_mr()
299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, in iser_reg_sig_mr()
319 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_fast_reg_mr()
334 wr->key = mr->rkey; in iser_fast_reg_mr()
[all …]
H A Diser_initiator.c70 hdr->read_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_read_cmd()
74 task->itt, mem_reg->rkey, in iser_prepare_read_cmd()
117 hdr->write_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_write_cmd()
122 task->itt, mem_reg->rkey, in iser_prepare_write_cmd()
576 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) in iser_inv_desc() argument
578 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || in iser_inv_desc()
579 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { in iser_inv_desc()
580 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); in iser_inv_desc()
597 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv() local
600 iser_conn, rkey); in iser_check_remote_inv()
[all …]
/linux/fs/nfs/
H A Dnfs4idmap.c283 struct key *rkey = ERR_PTR(-EAGAIN); in nfs_idmap_request_key() local
291 rkey = request_key(&key_type_id_resolver, desc, ""); in nfs_idmap_request_key()
292 if (IS_ERR(rkey)) { in nfs_idmap_request_key()
294 rkey = request_key_with_auxdata(&key_type_id_resolver_legacy, in nfs_idmap_request_key()
298 if (!IS_ERR(rkey)) in nfs_idmap_request_key()
299 set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); in nfs_idmap_request_key()
302 return rkey; in nfs_idmap_request_key()
309 struct key *rkey; in nfs_idmap_get_key() local
314 rkey = nfs_idmap_request_key(name, namelen, type, idmap); in nfs_idmap_get_key()
315 if (IS_ERR(rkey)) { in nfs_idmap_get_key()
[all …]
/linux/drivers/infiniband/core/
H A Drw.c126 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_mr_wrs() argument
162 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs()
197 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_map_wrs() argument
223 rdma_wr->rkey = rkey; in rdma_rw_init_map_wrs()
252 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, in rdma_rw_init_single_wr() argument
271 rdma_wr->rkey = rkey; in rdma_rw_init_single_wr()
294 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_ctx_init() argument
328 sg_offset, remote_addr, rkey, dir); in rdma_rw_ctx_init()
331 remote_addr, rkey, dir); in rdma_rw_ctx_init()
334 remote_addr, rkey, dir); in rdma_rw_ctx_init()
[all …]
/linux/include/uapi/rdma/
H A Drdma_user_rxe.h88 __u32 rkey; member
94 __u32 rkey; member
101 __u32 rkey; member
118 __u32 rkey; member
H A Dvmw_pvrdma-abi.h251 __u32 rkey; member
258 __u32 rkey; member
264 __u32 rkey; member
277 __u32 rkey; member
/linux/drivers/infiniband/sw/siw/
H A Dsiw_qp_tx.c138 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
183 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
195 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
209 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
938 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); in siw_fastreg_mr()
941 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr()
945 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { in siw_fastreg_mr()
946 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); in siw_fastreg_mr()
950 mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); in siw_fastreg_mr()
952 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr()
[all …]
/linux/drivers/nvme/target/
H A Dpr.c240 if (reg->rkey != nrkey) in nvmet_pr_register()
248 new->rkey = nrkey; in nvmet_pr_register()
301 if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) { in nvmet_pr_unregister()
316 reg->rkey = *(u64 *)attr; in nvmet_pr_update_reg_rkey()
338 new->rkey = holder->rkey; in nvmet_pr_update_reg_attr()
364 if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) in nvmet_pr_replace()
469 if (reg->rkey == prkey) { in nvmet_pr_unreg_all_host_by_prkey()
492 if (reg->rkey == prkey && in nvmet_pr_unreg_all_others_by_prkey()
573 if (prkey == holder->rkey) { in nvmet_pr_preempt()
679 reg->rkey == le64_to_cpu(d->crkey)) { in nvmet_execute_pr_acquire()
[all …]
/linux/drivers/infiniband/ulp/rtrs/
H A DREADME51 then pass it to the block layer. A new rkey is generated and registered for the
53 The new rkey is sent back to the client along with the IO result.
144 using the IMM field, Server invalidate rkey associated to the memory chunks
149 inflight IO and for the error code. The new rkey is sent back using
150 SEND_WITH_IMM WR, client When it recived new rkey message, it validates
151 the message and finished IO after update rkey for the rbuffer, then post
186 Server invalidate rkey associated to the memory chunks first, when it finishes,
192 outstanding inflight IO and the error code. The new rkey is sent back using
193 SEND_WITH_IMM WR, client When it recived new rkey message, it validates
194 the message and finished IO after update rkey for the rbuffer, then post
H A Drtrs-srv.c211 u32 rkey = 0; in rdma_write_sg() local
242 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key); in rdma_write_sg()
243 if (rkey == 0) in rdma_write_sg()
244 rkey = wr->rkey; in rdma_write_sg()
247 WARN_ON_ONCE(rkey != wr->rkey); in rdma_write_sg()
280 inv_wr.ex.invalidate_rkey = rkey; in rdma_write_sg()
293 rwr.key = srv_mr->mr->rkey; in rdma_write_sg()
299 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in rdma_write_sg()
414 rwr.key = srv_mr->mr->rkey; in send_io_resp_imm()
420 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in send_io_resp_imm()
[all …]
/linux/arch/arm64/crypto/
H A Dsm4-ce-glue.c28 asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
29 asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src,
31 asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src,
33 asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src,
35 asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
37 asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
39 asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
106 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) in sm4_ecb_do_crypt() argument
122 sm4_ce_crypt(rkey, dst, src, nblks); in sm4_ecb_do_crypt()
H A Dsm4-neon-glue.c21 asmlinkage void sm4_neon_crypt(const u32 *rkey, u8 *dst, const u8 *src,
36 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) in sm4_ecb_do_crypt() argument
52 sm4_neon_crypt(rkey, dst, src, nblocks); in sm4_ecb_do_crypt()
H A Dsm4-ce.h10 void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
/linux/include/rdma/
H A Drw.h47 u64 remote_addr, u32 rkey, enum dma_data_direction dir);
55 struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
/linux/drivers/nvme/host/
H A Dpr.c260 le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_keys()
265 keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_keys()
317 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_reservation()
325 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_reservation()
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/
H A Ddr_send.c31 u32 rkey; member
401 u32 rkey, in dr_rdma_handle_icm_write_segments() argument
411 wq_raddr->rkey = cpu_to_be32(rkey); in dr_rdma_handle_icm_write_segments()
438 u32 rkey, struct dr_data_seg *data_seg, in dr_rdma_segments() argument
455 rkey, data_seg, &size); in dr_rdma_segments()
487 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send()
489 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send()
492 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send()
707 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk); in mlx5dr_send_postsend_ste()
767 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk); in mlx5dr_send_postsend_htbl()
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Drc.c597 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
598 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
745 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
746 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
862 ohdr->u.atomic_eth.rkey = cpu_to_be32( in hfi1_make_rc_req()
863 wqe->atomic_wr.rkey); in hfi1_make_rc_req()
995 ohdr->u.rc.reth.rkey = in hfi1_make_rc_req()
996 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_rc_req()
2594 u32 rkey = be32_to_cpu(reth->rkey); in rc_rcv_error() local
2598 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in rc_rcv_error()
[all …]
H A Duc.c146 ohdr->u.rc.reth.rkey = in hfi1_make_uc_req()
147 cpu_to_be32(wqe->rdma_wr.rkey); in hfi1_make_uc_req()
449 u32 rkey = be32_to_cpu(reth->rkey); in hfi1_uc_rcv() local
455 vaddr, rkey, IB_ACCESS_REMOTE_WRITE); in hfi1_uc_rcv()
/linux/drivers/md/persistent-data/
H A Ddm-btree.c379 uint64_t rkey; in dm_btree_lookup() local
398 lower_bound, &rkey, in dm_btree_lookup()
402 if (rkey != keys[level]) { in dm_btree_lookup()
420 uint64_t key, uint64_t *rkey, void *value_le) in dm_btree_lookup_next_single() argument
449 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le); in dm_btree_lookup_next_single()
452 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le); in dm_btree_lookup_next_single()
462 *rkey = le64_to_cpu(n->keys[i]); in dm_btree_lookup_next_single()
471 uint64_t *keys, uint64_t *rkey, void *value_le) in dm_btree_lookup_next() argument
481 lower_bound, rkey, in dm_btree_lookup_next()
486 if (*rkey != keys[level]) { in dm_btree_lookup_next()
[all …]

123