/linux/drivers/infiniband/hw/mlx5/ |
H A D | wr.c | 29 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5r_wq_overflow() argument 35 if (likely(cur + nreq < wq->max_post)) in mlx5r_wq_overflow() 43 return cur + nreq >= wq->max_post; in mlx5r_wq_overflow() 726 int *size, void **cur_edge, int nreq, __be32 general_id, in mlx5r_begin_wqe() argument 729 if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in mlx5r_begin_wqe() 751 void **cur_edge, int nreq) in begin_wqe() argument 753 return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq, in begin_wqe() 760 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) in mlx5r_finish_wqe() argument 773 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in mlx5r_finish_wqe() 815 void **cur_edge, unsigned int *idx, int nreq, in handle_psv() argument [all …]
|
H A D | wr.h | 94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq); 97 int *size, void **cur_edge, int nreq, __be32 general_id, 101 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode); 102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
|
H A D | srq.c | 419 int nreq; in mlx5_ib_post_srq_recv() local 430 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 462 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 463 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 1568 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1575 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1583 return cur + nreq >= wq->max; in mthca_wq_overflow() 1634 int nreq; in mthca_tavor_post_send() local 1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1656 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send() 1660 qp->sq.max, nreq); in mthca_tavor_post_send() 1789 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send() 1793 if (!nreq) { in mthca_tavor_post_send() 1806 if (likely(nreq)) { in mthca_tavor_post_send() [all …]
|
H A D | mthca_srq.c | 493 int nreq; in mthca_tavor_post_srq_recv() local 502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 543 ++nreq; in mthca_tavor_post_srq_recv() 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 545 nreq = 0; in mthca_tavor_post_srq_recv() 561 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 586 int nreq; in mthca_arbel_post_srq_recv() local 592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv() 627 if (likely(nreq)) { in mthca_arbel_post_srq_recv() [all …]
|
/linux/crypto/ |
H A D | echainiv.c | 45 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in echainiv_encrypt() 47 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in echainiv_encrypt() 48 skcipher_request_set_callback(nreq, req->base.flags, in echainiv_encrypt() 50 skcipher_request_set_crypt(nreq, req->src, req->dst, in echainiv_encrypt() 54 err = crypto_skcipher_encrypt(nreq); in echainiv_encrypt()
|
H A D | seqiv.c | 68 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in seqiv_aead_encrypt() 70 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in seqiv_aead_encrypt() 71 skcipher_request_set_callback(nreq, req->base.flags, in seqiv_aead_encrypt() 73 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt() 77 err = crypto_skcipher_encrypt(nreq); in seqiv_aead_encrypt()
|
H A D | gcm.c | 962 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst() 964 skcipher_request_set_sync_tfm(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst() 965 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); in crypto_rfc4543_copy_src_to_dst() 966 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); in crypto_rfc4543_copy_src_to_dst() 968 return crypto_skcipher_encrypt(nreq); in crypto_rfc4543_copy_src_to_dst()
|
/linux/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 315 int nreq; in mlx4_ib_post_srq_recv() local 326 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 358 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 359 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
/linux/fs/nfs/ |
H A D | pnfs_nfs.c | 412 unsigned int nreq = 0; in pnfs_bucket_alloc_ds_commits() local 424 nreq++; in pnfs_bucket_alloc_ds_commits() 428 return nreq; in pnfs_bucket_alloc_ds_commits() 433 return nreq; in pnfs_bucket_alloc_ds_commits() 468 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local 479 nreq++; in pnfs_generic_commit_pagelist() 482 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); in pnfs_generic_commit_pagelist() 483 if (nreq == 0) in pnfs_generic_commit_pagelist()
|
/linux/drivers/crypto/inside-secure/ |
H A D | safexcel.c | 824 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local 864 nreq++; in safexcel_dequeue() 875 if (!nreq) in safexcel_dequeue() 880 priv->ring[ring].requests += nreq; in safexcel_dequeue() 1016 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local 1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor() 1023 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor() 1024 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor() 1025 if (!nreq) in safexcel_handle_result_descriptor() 1028 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor() [all …]
|
/linux/drivers/crypto/intel/qat/qat_common/ |
H A D | qat_algs.c | 1070 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_encrypt() local 1076 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_encrypt() 1077 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_encrypt() 1078 return crypto_skcipher_encrypt(nreq); in qat_alg_skcipher_xts_encrypt() 1138 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_decrypt() local 1144 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_decrypt() 1145 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_decrypt() 1146 return crypto_skcipher_decrypt(nreq); in qat_alg_skcipher_xts_decrypt()
|
H A D | qat_asym_algs.c | 388 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_generate_public_key() local 393 memcpy(nreq, req, sizeof(*req)); in qat_dh_generate_public_key() 394 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_generate_public_key() 395 return crypto_kpp_generate_public_key(nreq); in qat_dh_generate_public_key() 403 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_compute_shared_secret() local 408 memcpy(nreq, req, sizeof(*req)); in qat_dh_compute_shared_secret() 409 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_compute_shared_secret() 410 return crypto_kpp_compute_shared_secret(nreq); in qat_dh_compute_shared_secret()
|
/linux/fs/nilfs2/ |
H A D | btree.c | 1744 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() 1771 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert() 1772 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1773 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1777 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert() 1790 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1804 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() 1822 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert() 1824 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert() 1840 tmpptr = nreq in nilfs_btree_commit_convert_and_insert() 1745 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap * btree,__u64 key,union nilfs_bmap_ptr_req * dreq,union nilfs_bmap_ptr_req * nreq,struct buffer_head ** bhp,struct nilfs_bmap_stats * stats) nilfs_btree_prepare_convert_and_insert() argument 1805 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap * btree,__u64 key,__u64 ptr,const __u64 * keys,const __u64 * ptrs,int n,union nilfs_bmap_ptr_req * dreq,union nilfs_bmap_ptr_req * nreq,struct buffer_head * bh) nilfs_btree_commit_convert_and_insert() argument 1886 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; nilfs_btree_convert_and_insert() local [all...] |
/linux/drivers/crypto/rockchip/ |
H A D | rk3288_crypto.h | 223 unsigned long nreq; member
|
H A D | rk3288_crypto_ahash.c | 282 rkc->nreq++; in rk_hash_run()
|
H A D | rk3288_crypto_skcipher.c | 322 rkc->nreq++; in rk_cipher_run()
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_hw_v2.c | 699 u32 nreq; in hns_roce_v2_post_send() local 707 nreq = 0; in hns_roce_v2_post_send() 713 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send() 714 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send() 720 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); in hns_roce_v2_post_send() 733 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send() 748 if (likely(nreq)) { in hns_roce_v2_post_send() 749 qp->sq.head += nreq; in hns_roce_v2_post_send() 752 if (nreq == 1 && !ret && in hns_roce_v2_post_send() 819 u32 wqe_idx, nreq, max_sge; in hns_roce_v2_post_recv() local [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | main.c | 2964 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local 2970 nreq = min_t(int, nreq, msi_x); in mlx4_enable_msi_x() 2972 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x() 2976 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x() 2979 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x() 2980 nreq); in mlx4_enable_msi_x() 2982 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x() 2987 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
|
/linux/drivers/nvme/host/ |
H A D | apple.c | 789 struct nvme_request *nreq = nvme_req(req); in apple_nvme_init_request() local 792 nreq->ctrl = &anv->ctrl; in apple_nvme_init_request() 793 nreq->cmd = &iod->cmd; in apple_nvme_init_request()
|
H A D | fc.c | 89 struct nvme_request nreq; /* member 2418 op->nreq.flags |= NVME_REQ_CANCELLED; in nvme_fc_terminate_exchange()
|
/linux/drivers/usb/isp1760/ |
H A D | isp1760-udc.c | 841 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local 868 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
|
/linux/drivers/infiniband/sw/rdmavt/ |
H A D | qp.c | 2150 unsigned nreq = 0; in rvt_post_send() local 2177 nreq++; in rvt_post_send() 2181 if (nreq) { in rvt_post_send() 2186 if (nreq == 1 && call_send) in rvt_post_send()
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | tid_rdma.c | 5198 struct tid_rdma_request *req, *nreq; in make_tid_rdma_ack() local 5272 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack() 5273 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) in make_tid_rdma_ack()
|
/linux/fs/ceph/ |
H A D | mds_client.c | 4424 struct ceph_mds_request *req, *nreq; in replay_unsafe_requests() local 4430 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) in replay_unsafe_requests()
|