Home
last modified time | relevance | path

Searched refs:rdma (Results 1 – 25 of 75) sorted by relevance

123

/linux/net/9p/
H A Dtrans_rdma.c111 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options() local
113 if (rdma->port != P9_RDMA_PORT) in p9_rdma_show_options()
114 seq_printf(m, ",port=%u", rdma->port); in p9_rdma_show_options()
115 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options()
116 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options()
117 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options()
118 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options()
119 if (rdma->timeout != P9_RDMA_TIMEOUT) in p9_rdma_show_options()
120 seq_printf(m, ",timeout=%lu", rdma->timeout); in p9_rdma_show_options()
121 if (rdma->privport) in p9_rdma_show_options()
[all …]
/linux/net/sunrpc/xprtrdma/
H A Dsvc_rdma_sendto.c105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
117 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_send_ctxt_alloc() argument
119 int node = ibdev_to_node(rdma->sc_cm_id->device); in svc_rdma_send_ctxt_alloc()
126 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc()
130 pages = svc_serv_maxpages(rdma->sc_xprt.xpt_server); in svc_rdma_send_ctxt_alloc()
136 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); in svc_rdma_send_ctxt_alloc()
139 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_send_ctxt_alloc()
140 rdma->sc_max_req_size, DMA_TO_DEVICE); in svc_rdma_send_ctxt_alloc()
141 if (ib_dma_mapping_error(rdma in svc_rdma_send_ctxt_alloc()
167 svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma) svc_rdma_send_ctxts_destroy() argument
190 svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma) svc_rdma_send_ctxt_get() argument
224 svc_rdma_send_ctxt_release(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_send_ctxt_release() argument
266 svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_send_ctxt_put() argument
279 svc_rdma_wake_send_waiters(struct svcxprt_rdma * rdma,int avail) svc_rdma_wake_send_waiters() argument
297 struct svcxprt_rdma *rdma = cq->cq_context; svc_rdma_wc_send() local
338 svc_rdma_post_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_post_send() argument
566 struct svcxprt_rdma *rdma = args->md_rdma; svc_rdma_page_dma_map() local
703 svc_rdma_pull_up_needed(const struct svcxprt_rdma * rdma,const struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct xdr_buf * xdr) svc_rdma_pull_up_needed() argument
784 svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct xdr_buf * xdr) svc_rdma_pull_up_reply_msg() argument
818 svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_pcl * write_pcl,const struct svc_rdma_pcl * reply_pcl,const struct xdr_buf * xdr) svc_rdma_map_reply_msg() argument
880 svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp) svc_rdma_send_reply_msg() argument
923 svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,int status) svc_rdma_send_error_msg() argument
992 struct svcxprt_rdma *rdma = svc_rdma_sendto() local
[all...]
H A Dsvc_rdma_rw.c49 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
60 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int nr_bvec) in svc_rdma_get_rw_ctxt() argument
62 struct ib_device *dev = rdma->sc_cm_id->device; in svc_rdma_get_rw_ctxt()
67 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
68 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
69 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
98 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_get_rw_ctxt()
102 trace_svcrdma_rwctx_empty(rdma, nr_bvec); in svc_rdma_get_rw_ctxt()
114 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
117 __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
[all …]
H A Dsvc_rdma_transport.c52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
54 #include <rdma/rw.h>
94 .xcl_name = "rdma",
330 struct svcxprt_rdma *rdma = cma_id->context; in svc_rdma_cma_handler() local
331 struct svc_xprt *xprt = &rdma->sc_xprt; in svc_rdma_cma_handler()
335 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); in svc_rdma_cma_handler()
388 struct svcxprt_rdma *rdma = container_of(rn, struct svcxprt_rdma, in svc_rdma_xprt_done() local
390 struct rdma_cm_id *id = rdma->sc_cm_id; in svc_rdma_xprt_done()
393 svc_xprt_close(&rdma in svc_rdma_xprt_done()
588 struct svcxprt_rdma *rdma = svc_rdma_detach() local
600 struct svcxprt_rdma *rdma = svc_rdma_free() local
640 struct svcxprt_rdma *rdma = svc_rdma_has_wspace() local
[all...]
H A Dsvc_rdma_backchannel.c75 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, in svc_rdma_bc_sendto() argument
83 ret = svc_rdma_map_reply_msg(rdma, sctxt, &empty_pcl, &empty_pcl, in svc_rdma_bc_sendto()
93 return svc_rdma_post_send(rdma, sctxt); in svc_rdma_bc_sendto()
136 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) in rpcrdma_bc_send_request() argument
144 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request()
160 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request()
166 svc_rdma_send_ctxt_put(rdma, ctxt); in rpcrdma_bc_send_request()
183 struct svcxprt_rdma *rdma = in xprt_rdma_bc_send_request() local
190 ret = rpcrdma_bc_send_request(rdma, rqst); in xprt_rdma_bc_send_request()
/linux/drivers/gpu/drm/meson/
H A Dmeson_rdma.c26 if (!priv->rdma.addr) { in meson_rdma_init()
28 priv->rdma.addr = in meson_rdma_init()
30 &priv->rdma.addr_dma, in meson_rdma_init()
32 if (!priv->rdma.addr) in meson_rdma_init()
36 priv->rdma.offset = 0; in meson_rdma_init()
50 if (!priv->rdma.addr && !priv->rdma.addr_dma) in meson_rdma_free()
56 priv->rdma.addr, priv->rdma.addr_dma); in meson_rdma_free()
58 priv->rdma.addr = NULL; in meson_rdma_free()
59 priv->rdma.addr_dma = (dma_addr_t)0; in meson_rdma_free()
88 priv->rdma.offset = 0; in meson_rdma_reset()
[all …]
/linux/include/linux/sunrpc/
H A Dsvc_rdma.h145 static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_recv_cid_init() argument
148 cid->ci_queue_id = rdma->sc_rq_cq->res.id; in svc_rdma_recv_cid_init()
149 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_recv_cid_init()
157 static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_send_cid_init() argument
160 cid->ci_queue_id = rdma->sc_sq_cq->res.id; in svc_rdma_send_cid_init()
161 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_send_cid_init()
258 extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
259 extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
261 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma);
262 extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
[all …]
/linux/Documentation/admin-guide/cgroup-v1/
H A Drdma.rst28 Currently user space applications can easily take away all the rdma verb
31 rdma resources. This can lead to service unavailability.
34 of processes can be limited. Through this controller different rdma
43 by rdma cgroup, which can be extended later if required.
52 of the child processes which shares the address space, rdma resources are
56 rdma resources. Linking resources around css also ensures that cgroups can be
60 Whenever RDMA resource charging occurs, owner rdma cgroup is returned to
61 the caller. Same rdma cgroup should be passed while uncharging the resource.
81 IB stack honors limits enforced by the rdma controller. When application
86 Following resources can be accounted by rdma controller.
[all …]
/linux/Documentation/ABI/stable/
H A Dsysfs-driver-ib_srp4 Contact: linux-rdma@vger.kernel.org
77 Contact: linux-rdma@vger.kernel.org
83 Contact: linux-rdma@vger.kernel.org
89 Contact: linux-rdma@vger.kernel.org
97 Contact: linux-rdma@vger.kernel.org
104 Contact: linux-rdma@vger.kernel.org
111 Contact: linux-rdma@vger.kernel.org
117 Contact: linux-rdma@vger.kernel.org
124 Contact: linux-rdma@vger.kernel.org
131 Contact: linux-rdma@vger.kernel.org
[all …]
H A Dsysfs-transport-srp4 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
11 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
20 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
36 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
50 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
/linux/drivers/infiniband/ulp/rtrs/
H A Drtrs-clt-stats.c35 this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt); in rtrs_clt_inc_failover_cnt()
90 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; in rtrs_clt_stats_rdma_to_str()
120 memset(&s->rdma, 0, sizeof(s->rdma)); in rtrs_clt_reset_rdma_stats()
168 this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt); in rtrs_clt_update_rdma_stats()
169 this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size); in rtrs_clt_update_rdma_stats()
H A DREADME29 server side for a given client for rdma transfer. A session
50 invalidate each rdma buffer before we hand it over to RNBD server and
95 6. Server and client exchange periodically heartbeat messages (empty rdma
123 on the server side and rdma writes there the user data, user header and the
129 2. When confirming a write request server sends an "empty" rdma message with
140 on the server side and rdma writes there the user data, user header and the
147 2. When confirming a write request server sends an "empty" rdma message with
163 on the server side and rdma writes there the user header and the
169 attaches an invalidation message if requested and finally an "empty" rdma
182 on the server side and rdma writes there the user header and the
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dcxgb3_offload.c281 struct rdma_info *rdma = data; in cxgb_rdma_ctl() local
284 rdma->udbell_physbase = pci_resource_start(pdev, 2); in cxgb_rdma_ctl()
285 rdma->udbell_len = pci_resource_len(pdev, 2); in cxgb_rdma_ctl()
286 rdma->tpt_base = in cxgb_rdma_ctl()
288 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); in cxgb_rdma_ctl()
289 rdma->pbl_base = in cxgb_rdma_ctl()
291 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); in cxgb_rdma_ctl()
292 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); in cxgb_rdma_ctl()
293 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); in cxgb_rdma_ctl()
294 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; in cxgb_rdma_ctl()
[all …]
/linux/fs/smb/client/
H A Dsmbdirect.c60 struct smb_extract_to_rdma *rdma);
230 rdma_disconnect(sc->rdma.cm_id); in smbd_disconnect_rdma_work()
431 sc->rdma.legacy_iwarp = true; in smbd_conn_upcall()
1055 sc->rdma.cm_id = smbd_create_id(sc, dstaddr, port); in smbd_ia_open()
1056 if (IS_ERR(sc->rdma.cm_id)) { in smbd_ia_open()
1057 rc = PTR_ERR(sc->rdma.cm_id); in smbd_ia_open()
1060 sc->ib.dev = sc->rdma.cm_id->device; in smbd_ia_open()
1080 rdma_destroy_id(sc->rdma.cm_id); in smbd_ia_open()
1081 sc->rdma.cm_id = NULL; in smbd_ia_open()
1923 rdma_destroy_qp(sc->rdma.cm_id); in smbd_destroy()
[all …]
/linux/drivers/nvme/host/
H A DMakefile8 obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
26 nvme-rdma-y += rdma.o
/linux/drivers/infiniband/
H A DKconfig25 need libibumad from rdma-core
26 <https://github.com/linux-rdma/rdma-core>.
38 rdma-core <https://github.com/linux-rdma/rdma-core>.
/linux/drivers/macintosh/
H A Drack-meter.c146 struct rackmeter_dma *rdma = rm->dma_buf_v; in rackmeter_do_pause() local
155 memset(rdma->buf1, 0, sizeof(rdma->buf1)); in rackmeter_do_pause()
156 memset(rdma->buf2, 0, sizeof(rdma->buf2)); in rackmeter_do_pause()
371 struct resource ri2s, rdma; in rackmeter_probe() local
431 of_address_to_resource(i2s, 1, &rdma)) { in rackmeter_probe()
441 pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start); in rackmeter_probe()
475 rm->dma_regs = ioremap(rdma.start, 0x100); in rackmeter_probe()
/linux/Documentation/filesystems/smb/
H A Dsmbdirect.rst45 `rdma link add` command to load the module and create an
52 sudo rdma link add siw0 type siw netdev eth0
83 - On the client, mount the share with `rdma` mount option to use SMB Direct
90 mount -t cifs //server/share /mnt/point -o vers=3.1.1,rdma
99 Or, verify `rdma` mount option for the share in `/proc/mounts`:
/linux/drivers/gpu/drm/mediatek/
H A Dmtk_disp_ovl_adaptor.c101 static const struct mtk_ddp_comp_funcs rdma = { variable
110 [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA0, 0, &rdma },
111 [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA1, 1, &rdma },
112 [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA2, 2, &rdma },
113 [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA3, 3, &rdma },
114 [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA4, 4, &rdma },
115 [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA5, 5, &rdma },
116 [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA6, 6, &rdma },
117 [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA7, 7, &rdma },
/linux/arch/arm/boot/dts/mediatek/
H A Dmt7623n.dtsi148 rdma0: rdma@14008000 {
149 compatible = "mediatek,mt7623-disp-rdma",
150 "mediatek,mt2701-disp-rdma";
207 rdma1: rdma@14012000 {
208 compatible = "mediatek,mt7623-disp-rdma",
209 "mediatek,mt2701-disp-rdma";
/linux/include/trace/events/
H A Drpcrdma.h1489 const struct svcxprt_rdma *rdma,
1493 TP_ARGS(rdma, status),
1497 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1513 const struct svcxprt_rdma *rdma, \
1516 TP_ARGS(rdma, status))
1831 const struct svcxprt_rdma *rdma,
1838 TP_ARGS(rdma, offset, handle, nents, status),
1849 __entry->cq_id = rdma->sc_sq_cq->res.id;
1864 const struct svcxprt_rdma *rdma,
1868 TP_ARGS(rdma, num_sges),
[all …]
/linux/drivers/infiniband/hw/mlx5/
H A Dodp.c79 } rdma; member
1474 u32 rkey = pfault->rdma.r_key; in mlx5_ib_mr_rdma_pfault_handler()
1483 pfault->rdma.rdma_va += pfault->bytes_committed; in mlx5_ib_mr_rdma_pfault_handler()
1484 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, in mlx5_ib_mr_rdma_pfault_handler()
1485 pfault->rdma.rdma_op_len); in mlx5_ib_mr_rdma_pfault_handler()
1488 address = pfault->rdma.rdma_va; in mlx5_ib_mr_rdma_pfault_handler()
1489 length = pfault->rdma.rdma_op_len; in mlx5_ib_mr_rdma_pfault_handler()
1496 length = pfault->rdma.packet_size; in mlx5_ib_mr_rdma_pfault_handler()
1658 be32_to_cpu(pf_eqe->rdma.bytes_committed); in mlx5_ib_eq_pf_process()
1660 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; in mlx5_ib_eq_pf_process()
[all …]
/linux/Documentation/translations/zh_CN/infiniband/
H A Duser_verbs.rst23 要使用verbs,需要libibverbs库,可从https://github.com/linux-rdma/rdma-core。
/linux/net/rds/
H A Dsend.c339 (rm->rdma.op_active && in rds_send_xmit()
368 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit()
369 rm->m_final_op = &rm->rdma; in rds_send_xmit()
374 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
411 ops_present = (rm->atomic.op_active || rm->rdma.op_active); in rds_send_xmit()
414 if (rm->rdma.op_active && !rm->rdma.op_silent) in rds_send_xmit()
568 ro = &rm->rdma; in rds_rdma_send_complete()
639 ro = &rm->rdma; in __rds_send_complete()
705 struct rm_rdma_op *ro = &rm->rdma; in rds_send_remove_from_sock()
718 rm->rdma.op_notifier = NULL; in rds_send_remove_from_sock()
[all …]
/linux/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/
H A Dswitchdev.rst68 mlx5 supports ETH,rdma and vdpa (vnet) auxiliary devices devlink params (see :ref:`Documentation/ne…
81 devices (netdev, rdma, vdpa, etc.) of the subfunction accesses only assigned
107 | | | netdev | | rdma dev |
248 $ rdma link show mlx5_0/1
251 $ rdma dev show
264 mlx5_core.eth.4 mlx5_core.rdma.4
265 (sf eth aux dev) (sf rdma aux dev)
269 (sf netdev) (sf rdma device)

123