| /linux/drivers/net/ethernet/chelsio/cxgb/ |
| H A D | sge.c | 233 struct sge *sge; member 247 struct sge { struct 275 static void tx_sched_stop(struct sge *sge) in tx_sched_stop() argument 277 struct sched *s = sge->tx_sched; in tx_sched_stop() 290 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, in t1_sched_update_parms() argument 293 struct sched *s = sge->tx_sched; in t1_sched_update_parms() 313 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { in t1_sched_update_parms() 336 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) 338 struct sched *s = sge->tx_sched; 343 t1_sched_update_parms(sge, i, 0, 0); [all …]
|
| H A D | sge.h | 62 struct sge; 64 struct sge *t1_sge_create(struct adapter *, struct sge_params *); 65 int t1_sge_configure(struct sge *, struct sge_params *); 66 int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 67 void t1_sge_destroy(struct sge *); 74 void t1_sge_start(struct sge *); 75 void t1_sge_stop(struct sge *); 76 bool t1_sge_intr_error_handler(struct sge *sge); 77 void t1_sge_intr_enable(struct sge *); 78 void t1_sge_intr_disable(struct sge *); [all …]
|
| /linux/include/rdma/ |
| H A D | rdmavt_mr.h | 78 struct rvt_sge sge; /* progress state for the current SGE */ member 96 rvt_put_mr(ss->sge.mr); in rvt_put_ss() 98 ss->sge = *ss->sg_list++; in rvt_put_ss() 102 static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length) in rvt_get_sge_length() argument 104 u32 len = sge->length; in rvt_get_sge_length() 108 if (len > sge->sge_length) in rvt_get_sge_length() 109 len = sge->sge_length; in rvt_get_sge_length() 117 struct rvt_sge *sge = &ss->sge; in rvt_update_sge() local 119 sge->vaddr += length; in rvt_update_sge() 120 sge->length -= length; in rvt_update_sge() [all …]
|
| /linux/drivers/scsi/esas2r/ |
| H A D | esas2r_io.c | 222 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { in esas2r_build_sg_list_sge() 237 sgelen = (u8)((u8 *)sgc->sge.a64.curr in esas2r_build_sg_list_sge() 238 - (u8 *)sgc->sge.a64.last); in esas2r_build_sg_list_sge() 244 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); in esas2r_build_sg_list_sge() 247 sgc->sge.a64.curr = in esas2r_build_sg_list_sge() 252 sgc->sge.a64.limit = in esas2r_build_sg_list_sge() 257 sgc->sge.a64.last->length = cpu_to_le32( in esas2r_build_sg_list_sge() 259 sgc->sge.a64.last->address = in esas2r_build_sg_list_sge() 268 if (sgc->sge.a64.chain) { in esas2r_build_sg_list_sge() 269 sgc->sge.a64.chain->length |= in esas2r_build_sg_list_sge() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | trace_mr.h | 82 TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge), 83 TP_ARGS(sge, isge), 85 RDI_DEV_ENTRY(ib_to_rvt(sge->mr->pd->device)) 87 __field(struct rvt_sge *, sge) 100 RDI_DEV_ASSIGN(ib_to_rvt(sge->mr->pd->device)); 101 __entry->mr = sge->mr; 102 __entry->sge = sge; 104 __entry->vaddr = sge->vaddr; 106 __entry->lkey = sge->mr->lkey; 107 __entry->sge_length = sge->sge_length; [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_common.c | 55 void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) in hinic_set_sge() argument 57 sge->hi_addr = upper_32_bits(addr); in hinic_set_sge() 58 sge->lo_addr = lower_32_bits(addr); in hinic_set_sge() 59 sge->len = len; in hinic_set_sge() 68 dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) in hinic_sge_to_dma() argument 70 return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); in hinic_sge_to_dma()
|
| H A D | hinic_rx.c | 198 struct hinic_sge sge; in rx_alloc_pkts() local 215 hinic_set_sge(&sge, dma_addr, skb->len); in rx_alloc_pkts() 224 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); in rx_alloc_pkts() 247 struct hinic_sge sge; in free_all_rx_skbs() local 254 hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); in free_all_rx_skbs() 258 rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); in free_all_rx_skbs() 277 struct hinic_sge sge; in rx_recv_jumbo_pkt() local 286 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); in rx_recv_jumbo_pkt() 288 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); in rx_recv_jumbo_pkt() 365 struct hinic_sge sge; in rxq_recv() local [all …]
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_qp_tx.c | 37 static struct page *siw_get_page(struct siw_mem *mem, struct siw_sge *sge, in siw_get_page() argument 41 return siw_get_upage(mem->umem, sge->laddr + offset); in siw_get_page() 43 return siw_get_pblpage(mem, sge->laddr + offset, pbl_idx); in siw_get_page() 52 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() local 53 u32 bytes = sge->length; in siw_try_1seg() 62 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 68 memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes); in siw_try_1seg() 70 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), in siw_try_1seg() 74 unsigned int off = sge->laddr & ~PAGE_MASK; in siw_try_1seg() 79 p = siw_get_page(mem, sge, 0, &pbl_idx); in siw_try_1seg() [all …]
|
| H A D | siw_qp_rx.c | 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 367 wqe->rqe.sge[i].laddr = rqe->sge[i].laddr; in siw_rqe_get() 368 wqe->rqe.sge[i].lkey = rqe->sge[i].lkey; in siw_rqe_get() 369 wqe->rqe.sge[i].length = rqe->sge[i].length; in siw_rqe_get() 370 wqe->bytes += wqe->rqe.sge[i].length; in siw_rqe_get() 471 struct siw_sge *sge; in siw_proc_send() local 474 sge = &wqe->rqe.sge[frx->sge_idx]; in siw_proc_send() 476 if (!sge->length) { in siw_proc_send() 483 sge_bytes = min(data_bytes, sge->length - frx->sge_off); in siw_proc_send() [all …]
|
| H A D | siw_mem.c | 190 int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge, struct siw_mem *mem[], in siw_check_sge() argument 197 if (len + off > sge->length) { in siw_check_sge() 202 new = siw_mem_id2obj(sdev, sge->lkey >> 8); in siw_check_sge() 204 siw_dbg_pd(pd, "STag unknown: 0x%08x\n", sge->lkey); in siw_check_sge() 211 if (unlikely((*mem)->stag != sge->lkey)) { in siw_check_sge() 212 siw_dbg_mem((*mem), "STag mismatch: 0x%08x\n", sge->lkey); in siw_check_sge() 216 rv = siw_check_mem(pd, *mem, sge->laddr + off, perms, len); in siw_check_sge()
|
| /linux/drivers/infiniband/ulp/iser/ |
| H A D | iser_memory.c | 128 reg->sge.lkey = device->pd->local_dma_lkey; in iser_reg_dma() 138 reg->sge.addr = sg_dma_address(&sg[0]); in iser_reg_dma() 139 reg->sge.length = sg_dma_len(&sg[0]); in iser_reg_dma() 142 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_reg_dma() 143 reg->sge.addr, reg->sge.length); in iser_reg_dma() 293 sig_reg->sge.lkey = mr->lkey; in iser_reg_sig_mr() 295 sig_reg->sge.addr = mr->iova; in iser_reg_sig_mr() 296 sig_reg->sge.length = mr->length; in iser_reg_sig_mr() 299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, in iser_reg_sig_mr() 300 sig_reg->sge.length); in iser_reg_sig_mr() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| H A D | cxgb4_uld.c | 115 struct sge *s = &adap->sge; in alloc_uld_rxqs() 176 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in setup_sge_queues_uld() 185 struct sge *s = &adap->sge; in setup_sge_queues_uld() 214 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_sge_queues_uld() 217 struct sge *s = &adap->sge; in free_sge_queues_uld() 240 struct sge *s = &adap->sge; in cfg_queues_uld() 308 adap->sge.uld_rxq_info[uld_type] = rxq_info; in cfg_queues_uld() 315 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_queues_uld() 317 adap->sge.uld_rxq_info[uld_type] = NULL; in free_queues_uld() 326 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in request_msix_queue_irqs_uld() [all …]
|
| H A D | sge.c | 151 struct sge *s = &adapter->sge; in fl_mtu_bufsize() 238 const struct sge *s = &adapter->sge; in fl_starving() 401 struct sge *s = &adapter->sge; in get_buf_size() 538 struct sge *s = &adap->sge; in refill_fl() 547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl() 625 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl() 626 adap->sge.starving_fl); in refill_fl() 852 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_sgl() 866 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_sgl() 869 memcpy(sgl->sge, buf, part0); in cxgb4_write_sgl() [all …]
|
| /linux/drivers/infiniband/core/ |
| H A D | rw.c | 119 reg->sge.addr = reg->mr->iova; in rdma_rw_init_one_mr() 120 reg->sge.length = reg->mr->length; in rdma_rw_init_one_mr() 159 reg->wr.wr.sg_list = ®->sge; in rdma_rw_init_mr_wrs() 173 remote_addr += reg->sge.length; in rdma_rw_init_mr_wrs() 201 struct ib_sge *sge; in rdma_rw_init_map_wrs() local 206 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); in rdma_rw_init_map_wrs() 225 rdma_wr->wr.sg_list = sge; in rdma_rw_init_map_wrs() 228 sge->addr = sg_dma_address(sg) + offset; in rdma_rw_init_map_wrs() 229 sge->length = sg_dma_len(sg) - offset; in rdma_rw_init_map_wrs() 230 sge->lkey = qp->pd->local_dma_lkey; in rdma_rw_init_map_wrs() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
| H A D | sge.c | 264 const struct sge *s = &adapter->sge; in fl_starving() 325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl() 457 const struct sge *s = &adapter->sge; in get_buf_size() 607 struct sge *s = &adapter->sge; in refill_fl() 715 set_bit(fl->cntxt_id, adapter->sge.starving_fl); in refill_fl() 930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; in write_sgl() 944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; in write_sgl() 947 memcpy(sgl->sge, buf, part0); in write_sgl() 1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1568 struct sge *s = &adapter->sge; in do_gro() [all …]
|
| H A D | cxgb4vf_main.c | 382 struct sge *s = &adapter->sge; in request_msix_queue_irqs() 420 struct sge *s = &adapter->sge; in free_msix_queue_irqs() 453 struct sge *s = &adapter->sge; in enable_rx() 476 struct sge *s = &adapter->sge; in quiesce_rx() 534 struct sge *s = &adapter->sge; in fwevtq_handler() 590 struct sge *s = &adapter->sge; in setup_sge_queues() 705 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; in setup_rss() 1019 static int closest_timer(const struct sge *s, int us) in closest_timer() 1035 static int closest_thres(const struct sge *s, int thres) in closest_thres() 1060 ? adapter->sge.timer_val[timer_idx] in qtimer_val() [all …]
|
| H A D | adapter.h | 271 struct sge { struct 350 #define for_each_ethrxq(sge, iter) \ argument 351 for (iter = 0; iter < (sge)->ethqsets; iter++) 384 struct sge sge; member
|
| /linux/net/sunrpc/xprtrdma/ |
| H A D | rpc_rdma.c | 537 struct ib_sge *sge; in rpcrdma_sendctx_unmap() local 546 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count; in rpcrdma_sendctx_unmap() 547 ++sge, --sc->sc_unmap_count) in rpcrdma_sendctx_unmap() 548 ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length, in rpcrdma_sendctx_unmap() 561 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_hdr_sge() local 563 sge->addr = rdmab_addr(rb); in rpcrdma_prepare_hdr_sge() 564 sge->length = len; in rpcrdma_prepare_hdr_sge() 565 sge->lkey = rdmab_lkey(rb); in rpcrdma_prepare_hdr_sge() 567 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length, in rpcrdma_prepare_hdr_sge() 578 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_head_iov() local [all …]
|
| /linux/net/ipv4/ |
| H A D | tcp_bpf.c | 36 struct scatterlist *sge; in bpf_tcp_ingress() local 49 sge = sk_msg_elem(msg, i); in bpf_tcp_ingress() 50 size = (apply && apply_bytes < sge->length) ? in bpf_tcp_ingress() 51 apply_bytes : sge->length; in bpf_tcp_ingress() 62 if (sge->length) in bpf_tcp_ingress() 69 if (sge->length) in bpf_tcp_ingress() 95 struct scatterlist *sge; in tcp_bpf_push() local 104 sge = sk_msg_elem(msg, msg->sg.start); in tcp_bpf_push() 105 size = (apply && apply_bytes < sge->length) ? in tcp_bpf_push() 106 apply_bytes : sge->length; in tcp_bpf_push() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_mr.c | 344 struct rxe_sge *sge = &dma->sge[dma->cur_sge]; in copy_data() local 359 if (sge->length && (offset < sge->length)) { in copy_data() 360 mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL); in copy_data() 370 if (offset >= sge->length) { in copy_data() 375 sge++; in copy_data() 384 if (sge->length) { in copy_data() 385 mr = lookup_mr(pd, access, sge->lkey, in copy_data() 396 if (bytes > sge->length - offset) in copy_data() 397 bytes = sge->length - offset; in copy_data() 400 iova = sge->addr + offset; in copy_data() [all …]
|
| /linux/fs/smb/client/ |
| H A D | smbdirect.c | 48 struct ib_sge *sge; member 510 request->sge[i].addr, in send_done() 511 request->sge[i].length, in send_done() 683 response->sge.addr, in recv_done() 684 response->sge.length, in recv_done() 980 request->sge[0].addr = ib_dma_map_single( in smbd_post_send_negotiate_req() 983 if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { in smbd_post_send_negotiate_req() 988 request->sge[0].length = sizeof(*packet); in smbd_post_send_negotiate_req() 989 request->sge[0].lkey = sc->ib.pd->local_dma_lkey; in smbd_post_send_negotiate_req() 992 sc->ib.dev, request->sge[0].addr, in smbd_post_send_negotiate_req() [all …]
|
| /linux/drivers/scsi/qedf/ |
| H A D | drv_scsi_fw_funcs.c | 35 ctx_data_desc->sge[sge_index].sge_addr.lo = val; in init_scsi_sgl_context() 37 ctx_data_desc->sge[sge_index].sge_addr.hi = val; in init_scsi_sgl_context() 39 ctx_data_desc->sge[sge_index].sge_len = val; in init_scsi_sgl_context()
|
| /linux/drivers/nvme/target/ |
| H A D | rdma.c | 47 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; member 255 struct ib_sge *sge; in nvmet_rdma_free_inline_pages() local 262 sge = &c->sge[1]; in nvmet_rdma_free_inline_pages() 264 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { in nvmet_rdma_free_inline_pages() 265 if (sge->length) in nvmet_rdma_free_inline_pages() 266 ib_dma_unmap_page(ndev->device, sge->addr, in nvmet_rdma_free_inline_pages() 267 sge->length, DMA_FROM_DEVICE); in nvmet_rdma_free_inline_pages() 277 struct ib_sge *sge; in nvmet_rdma_alloc_inline_pages() local 287 sge = &c->sge[1]; in nvmet_rdma_alloc_inline_pages() 290 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { in nvmet_rdma_alloc_inline_pages() [all …]
|
| /linux/net/9p/ |
| H A D | trans_rdma.c | 275 struct ib_sge sge; in post_recv() local 286 sge.addr = c->busa; in post_recv() 287 sge.length = client->msize; in post_recv() 288 sge.lkey = rdma->pd->local_dma_lkey; in post_recv() 292 wr.sg_list = &sge; in post_recv() 310 struct ib_sge sge; in rdma_request() local 383 sge.addr = c->busa; in rdma_request() 384 sge.length = c->req->tc.size; in rdma_request() 385 sge.lkey = rdma->pd->local_dma_lkey; in rdma_request() 391 wr.sg_list = &sge; in rdma_request()
|
| /linux/drivers/scsi/elx/libefc_sli/ |
| H A D | sli4.c | 1422 struct sli4_sge *sge = sgl->virt; in sli_els_request64_wqe() local 1437 bptr->u.data.low = sge[0].buffer_address_low; in sli_els_request64_wqe() 1438 bptr->u.data.high = sge[0].buffer_address_high; in sli_els_request64_wqe() 1567 struct sli4_sge *sge = NULL; in sli_fcp_icmnd64_wqe() local 1578 sge = sgl->virt; in sli_fcp_icmnd64_wqe() 1586 (le32_to_cpu(sge[0].buffer_length) & in sli_fcp_icmnd64_wqe() 1589 bptr->u.data.low = sge[0].buffer_address_low; in sli_fcp_icmnd64_wqe() 1590 bptr->u.data.high = sge[0].buffer_address_high; in sli_fcp_icmnd64_wqe() 1602 len = le32_to_cpu(sge[0].buffer_length) + in sli_fcp_icmnd64_wqe() 1603 le32_to_cpu(sge[1].buffer_length); in sli_fcp_icmnd64_wqe() [all …]
|