| /linux/net/sunrpc/xprtrdma/ |
| H A D | svc_rdma_transport.c | 409 unsigned int ctxts, rq_depth, maxpayload; in svc_rdma_accept() local 456 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests + in svc_rdma_accept() 458 if (rq_depth > dev->attrs.max_qp_wr) { in svc_rdma_accept() 459 rq_depth = dev->attrs.max_qp_wr; in svc_rdma_accept() 461 newxprt->sc_max_requests = rq_depth - 2; in svc_rdma_accept() 473 newxprt->sc_sq_depth = rq_depth + ctxts; in svc_rdma_accept() 488 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); in svc_rdma_accept() 498 qp_attr.cap.max_recv_wr = rq_depth; in svc_rdma_accept() 510 newxprt->sc_sq_depth, rq_depth); in svc_rdma_accept()
|
| /linux/block/ |
| H A D | blk-rq-qos.h | 51 struct rq_depth { struct 99 bool rq_depth_scale_up(struct rq_depth *rqd); 100 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); 101 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
|
| H A D | blk-rq-qos.c | 110 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth() 158 bool rq_depth_scale_up(struct rq_depth *rqd) in rq_depth_scale_up() 177 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) in rq_depth_scale_down()
|
| /linux/net/9p/ |
| H A D | trans_rdma.c | 81 int rq_depth; member 117 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options() 118 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options() 470 rdma->rq_depth = opts->rq_depth; in alloc_rdma() 475 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma() 585 opts.sq_depth + opts.rq_depth + 1, in rdma_create_trans() 600 qp_attr.cap.max_recv_wr = opts.rq_depth; in rdma_create_trans()
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_dev.c | 271 unsigned int rq_depth) in set_hw_ioctxt() argument 285 hw_ioctxt.rq_depth = ilog2(rq_depth); in set_hw_ioctxt() 438 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) in hinic_hwdev_ifup() argument 462 func_to_io->rq_depth = rq_depth; in hinic_hwdev_ifup() 489 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); in hinic_hwdev_ifup()
|
| H A D | hinic_hw_io.h | 74 u16 rq_depth; member
|
| H A D | hinic_dev.h | 100 u16 rq_depth; member
|
| H A D | hinic_hw_dev.h | 286 u16 rq_depth; member 627 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
|
| H A D | hinic_ethtool.c | 557 ring->rx_pending = nic_dev->rq_depth; in hinic_get_ringparam() 600 new_rq_depth == nic_dev->rq_depth) in hinic_set_ringparam() 605 nic_dev->sq_depth, nic_dev->rq_depth, in hinic_set_ringparam() 609 nic_dev->rq_depth = new_rq_depth; in hinic_set_ringparam()
|
| H A D | hinic_hw_mbox.c | 1289 ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \ 1290 (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \ 1300 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && in hw_ctxt_qps_param_valid()
|
| H A D | hinic_main.c | 425 nic_dev->rq_depth); in hinic_open() 1215 nic_dev->rq_depth = HINIC_RQ_DEPTH; in nic_dev_init()
|
| H A D | hinic_hw_io.c | 293 func_to_io->rq_depth, HINIC_RQ_WQE_SIZE); in init_qp()
|
| H A D | hinic_port.h | 317 u32 rq_depth; member
|
| /linux/drivers/net/ethernet/fungible/funcore/ |
| H A D | fun_dev.c | 232 .rq_depth = areq->rq_depth, in fun_enable_admin_queue() 280 if (areq->rq_depth) { in fun_enable_admin_queue() 579 if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth) in fun_get_dev_limits()
|
| H A D | fun_dev.h | 94 u16 rq_depth; member
|
| /linux/drivers/infiniband/hw/irdma/ |
| H A D | user.h | 448 u32 *rq_depth, u8 *rq_shift); 560 u32 rq_depth; member
|
| H A D | verbs.c | 666 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, in irdma_setup_umode_qp() 674 (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; in irdma_setup_umode_qp() 676 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; in irdma_setup_umode_qp() 706 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, in irdma_setup_kmode_qp() 717 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); in irdma_setup_kmode_qp() 728 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; in irdma_setup_kmode_qp() 746 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; in irdma_setup_kmode_qp() 748 info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); in irdma_setup_kmode_qp() 750 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; in irdma_setup_kmode_qp() 754 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; in irdma_setup_kmode_qp()
|
| H A D | uk.c | 1676 u32 *rq_depth, u8 *rq_shift) in irdma_uk_calc_depth_shift_rq() argument 1689 *rq_shift, rq_depth); in irdma_uk_calc_depth_shift_rq()
|
| /linux/include/net/9p/ |
| H A D | client.h | 177 int rq_depth; member
|
| /linux/fs/9p/ |
| H A D | vfs_super.c | 344 ctx->rdma_opts.rq_depth = P9_RDMA_RQ_DEPTH; in v9fs_init_fs_context()
|
| H A D | v9fs.c | 380 rdma_opts->rq_depth = result.uint_32; in v9fs_parse_param()
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_hw_v2.h | 1215 __le16 rq_depth; member
|
| /linux/drivers/infiniband/hw/efa/ |
| H A D | efa_verbs.c | 765 create_qp_params.rq_depth = init_attr->cap.max_recv_wr; in efa_create_qp()
|