Home
last modified time | relevance | path

Searched refs:nreq (Results 1 – 21 of 21) sorted by relevance

/linux/drivers/infiniband/hw/mlx5/
H A Dwr.c29 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5r_wq_overflow() argument
35 if (likely(cur + nreq < wq->max_post)) in mlx5r_wq_overflow()
43 return cur + nreq >= wq->max_post; in mlx5r_wq_overflow()
726 int *size, void **cur_edge, int nreq, __be32 general_id, in mlx5r_begin_wqe() argument
729 if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in mlx5r_begin_wqe()
751 void **cur_edge, int nreq) in begin_wqe() argument
753 return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq, in begin_wqe()
760 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) in mlx5r_finish_wqe() argument
773 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in mlx5r_finish_wqe()
815 void **cur_edge, unsigned int *idx, int nreq, in handle_psv() argument
[all …]
H A Dwr.h94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
97 int *size, void **cur_edge, int nreq, __be32 general_id,
101 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
H A Dsrq.c419 int nreq; in mlx5_ib_post_srq_recv() local
430 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv()
462 if (likely(nreq)) { in mlx5_ib_post_srq_recv()
463 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c1568 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument
1575 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow()
1583 return cur + nreq >= wq->max; in mthca_wq_overflow()
1634 int nreq; in mthca_tavor_post_send() local
1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1656 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send()
1660 qp->sq.max, nreq); in mthca_tavor_post_send()
1789 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send()
1793 if (!nreq) { in mthca_tavor_post_send()
1806 if (likely(nreq)) { in mthca_tavor_post_send()
[all …]
H A Dmthca_srq.c493 int nreq; in mthca_tavor_post_srq_recv() local
502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
543 ++nreq; in mthca_tavor_post_srq_recv()
544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv()
545 nreq = 0; in mthca_tavor_post_srq_recv()
561 if (likely(nreq)) { in mthca_tavor_post_srq_recv()
568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
586 int nreq; in mthca_arbel_post_srq_recv() local
592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
627 if (likely(nreq)) { in mthca_arbel_post_srq_recv()
[all …]
/linux/drivers/infiniband/hw/mlx4/
H A Dsrq.c315 int nreq; in mlx4_ib_post_srq_recv() local
326 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
358 if (likely(nreq)) { in mlx4_ib_post_srq_recv()
359 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
/linux/fs/nfs/
H A Dpnfs_nfs.c414 unsigned int nreq = 0; in pnfs_bucket_alloc_ds_commits() local
426 nreq++; in pnfs_bucket_alloc_ds_commits()
430 return nreq; in pnfs_bucket_alloc_ds_commits()
435 return nreq; in pnfs_bucket_alloc_ds_commits()
470 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local
481 nreq++; in pnfs_generic_commit_pagelist()
484 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); in pnfs_generic_commit_pagelist()
485 if (nreq == 0) in pnfs_generic_commit_pagelist()
/linux/drivers/usb/gadget/function/
H A Duvc_video.c504 unsigned int nreq; in uvc_video_prep_requests() local
521 nreq = DIV_ROUND_UP(video->interval, interval_duration); in uvc_video_prep_requests()
523 header_size = nreq * UVCG_REQUEST_HEADER_LEN; in uvc_video_prep_requests()
525 req_size = DIV_ROUND_UP(video->imagesize + header_size, nreq); in uvc_video_prep_requests()
542 video->uvc_num_requests = nreq + UVCG_REQ_MAX_ZERO_COUNT; in uvc_video_prep_requests()
543 video->reqs_per_frame = nreq; in uvc_video_prep_requests()
/linux/drivers/crypto/inside-secure/
H A Dsafexcel.c824 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local
864 nreq++; in safexcel_dequeue()
875 if (!nreq) in safexcel_dequeue()
880 priv->ring[ring].requests += nreq; in safexcel_dequeue()
1016 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local
1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor()
1023 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor()
1024 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor()
1025 if (!nreq) in safexcel_handle_result_descriptor()
1028 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor()
[all …]
/linux/drivers/crypto/intel/qat/qat_common/
H A Dqat_asym_algs.c388 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_generate_public_key() local
393 memcpy(nreq, req, sizeof(*req)); in qat_dh_generate_public_key()
394 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_generate_public_key()
395 return crypto_kpp_generate_public_key(nreq); in qat_dh_generate_public_key()
403 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_compute_shared_secret() local
408 memcpy(nreq, req, sizeof(*req)); in qat_dh_compute_shared_secret()
409 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_compute_shared_secret()
410 return crypto_kpp_compute_shared_secret(nreq); in qat_dh_compute_shared_secret()
/linux/fs/nilfs2/
H A Dbtree.c1744 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() argument
1771 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert()
1772 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert()
1773 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1777 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert()
1790 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1804 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() argument
1822 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert()
1824 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert()
1840 tmpptr = nreq->bpr_ptr; in nilfs_btree_commit_convert_and_insert()
[all …]
/linux/drivers/dma/
H A Darm-dma350.c196 int nreq; member
561 dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg); in d350_probe()
563 dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq); in d350_probe()
/linux/drivers/crypto/rockchip/
H A Drk3288_crypto.h223 unsigned long nreq; member
H A Drk3288_crypto_skcipher.c322 rkc->nreq++; in rk_cipher_run()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c705 u32 nreq; in hns_roce_v2_post_send() local
713 nreq = 0; in hns_roce_v2_post_send()
719 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send()
720 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send()
726 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); in hns_roce_v2_post_send()
739 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send()
759 if (likely(nreq)) { in hns_roce_v2_post_send()
760 qp->sq.head += nreq; in hns_roce_v2_post_send()
763 if (nreq == 1 && !ret && in hns_roce_v2_post_send()
833 u32 wqe_idx, nreq, max_sge; in hns_roce_v2_post_recv() local
[all …]
H A Dhns_roce_qp.c1552 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, in hns_roce_wq_overflow() argument
1559 if (likely(cur + nreq < hr_wq->wqe_cnt)) in hns_roce_wq_overflow()
1567 return cur + nreq >= hr_wq->wqe_cnt; in hns_roce_wq_overflow()
H A Dhns_roce_device.h1278 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
/linux/drivers/usb/isp1760/
H A Disp1760-udc.c841 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local
868 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
/linux/drivers/infiniband/hw/hfi1/
H A Dtid_rdma.c5199 struct tid_rdma_request *req, *nreq; in make_tid_rdma_ack() local
5273 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack()
5274 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) in make_tid_rdma_ack()
/linux/drivers/nvme/host/
H A Dfc.c89 struct nvme_request nreq; /* member
2423 op->nreq.flags |= NVME_REQ_CANCELLED; in nvme_fc_terminate_exchange()
/linux/fs/ceph/
H A Dmds_client.c4488 struct ceph_mds_request *req, *nreq; in replay_unsafe_requests() local
4494 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) in replay_unsafe_requests()