| /linux/drivers/virt/ |
| H A D | fsl_hypervisor.c | 153 struct fh_sg_list *sg_list = NULL; in ioctl_memcpy() local 246 sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); in ioctl_memcpy() 263 sg_list[0].source = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy() 264 sg_list[0].target = param.remote_paddr; in ioctl_memcpy() 266 sg_list[0].source = param.remote_paddr; in ioctl_memcpy() 267 sg_list[0].target = page_to_phys(pages[0]) + lb_offset; in ioctl_memcpy() 269 sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset); in ioctl_memcpy() 271 remote_paddr = param.remote_paddr + sg_list[0].size; in ioctl_memcpy() 272 count = param.count - sg_list[0].size; in ioctl_memcpy() 277 sg_list[i].source = page_to_phys(pages[i]); in ioctl_memcpy() [all …]
|
| /linux/drivers/ufs/core/ |
| H A D | ufs_bsg.c | 57 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer() 73 struct scatterlist *sg_list = NULL; in ufs_bsg_exec_advanced_rpmb_req() local 111 sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir); in ufs_bsg_exec_advanced_rpmb_req() 114 sg_list = payload->sg_list; in ufs_bsg_exec_advanced_rpmb_req() 120 &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir); in ufs_bsg_exec_advanced_rpmb_req() 123 dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir); in ufs_bsg_exec_advanced_rpmb_req() 166 sg_copy_from_buffer(job->request_payload.sg_list, in ufs_bsg_request()
|
| /linux/drivers/scsi/qla2xxx/ |
| H A D | qla_bsg.c | 59 bsg_job->request_payload.sg_list, in qla2x00_bsg_sp_free() 64 bsg_job->reply_payload.sg_list, in qla2x00_bsg_sp_free() 74 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_bsg_sp_free() 77 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_bsg_sp_free() 213 bsg_job->reply_payload.sg_list, in qla24xx_proc_fcp_prio_cfg_cmd() 240 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla24xx_proc_fcp_prio_cfg_cmd() 376 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_process_els() 379 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, in qla2x00_process_els() 385 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_process_els() 388 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, in qla2x00_process_els() [all …]
|
| /linux/fs/smb/smbdirect/ |
| H A D | rw.c | 31 struct scatterlist *sg_list, in smbdirect_connection_rdma_get_sg_list() argument 51 if (!sg_list) in smbdirect_connection_rdma_get_sg_list() 53 sg_set_page(sg_list, page, len, offset); in smbdirect_connection_rdma_get_sg_list() 54 sg_list = sg_next(sg_list); in smbdirect_connection_rdma_get_sg_list() 170 msg = kzalloc_flex(*msg, sg_list, SG_CHUNK_SIZE, in smbdirect_connection_rdma_xmit() 186 msg->sgt.sgl = &msg->sg_list[0]; in smbdirect_connection_rdma_xmit() 189 msg->sg_list, in smbdirect_connection_rdma_xmit()
|
| /linux/drivers/tee/amdtee/ |
| H A D | call.c | 315 cmd->sg_list.count = count; in handle_map_shmem() 320 cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr); in handle_map_shmem() 321 cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr); in handle_map_shmem() 322 cmd->sg_list.buf[i].size = start[i].size; in handle_map_shmem() 323 cmd->sg_list.size += cmd->sg_list.buf[i].size; in handle_map_shmem() 326 cmd->sg_list.buf[i].hi_addr); in handle_map_shmem() 328 cmd->sg_list.buf[i].low_addr); in handle_map_shmem() 329 pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size); in handle_map_shmem() 330 pr_debug("list size = 0x%x\n", cmd->sg_list.size); in handle_map_shmem()
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | rc.c | 164 ss->sge = wqe->sg_list[0]; in rvt_restart_sge() 165 ss->sg_list = wqe->sg_list + 1; in rvt_restart_sge()
|
| /linux/include/rdma/ |
| H A D | rdmavt_mr.h | 77 struct rvt_sge *sg_list; /* next SGE to be used if any */ member 98 ss->sge = *ss->sg_list++; in rvt_put_ss() 126 *sge = *ss->sg_list++; in rvt_update_sge()
|
| /linux/drivers/dma/ |
| H A D | imx-dma.c | 160 struct scatterlist *sg_list; member 789 kfree(imxdmac->sg_list); in imxdma_free_chan_resources() 790 imxdmac->sg_list = NULL; in imxdma_free_chan_resources() 866 kfree(imxdmac->sg_list); in imxdma_prep_dma_cyclic() 868 imxdmac->sg_list = kzalloc_objs(struct scatterlist, periods + 1, in imxdma_prep_dma_cyclic() 870 if (!imxdmac->sg_list) in imxdma_prep_dma_cyclic() 873 sg_init_table(imxdmac->sg_list, periods); in imxdma_prep_dma_cyclic() 876 sg_assign_page(&imxdmac->sg_list[i], NULL); in imxdma_prep_dma_cyclic() 877 imxdmac->sg_list[i].offset = 0; in imxdma_prep_dma_cyclic() 878 imxdmac->sg_list[i].dma_address = dma_addr; in imxdma_prep_dma_cyclic() [all …]
|
| /linux/drivers/infiniband/hw/qedr/ |
| H A D | qedr_roce_cm.c | 110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = in qedr_ll2_complete_rx_packet() 405 send_size += swr->sg_list[i].length; in qedr_gsi_build_header() 530 packet->payload[i].baddr = swr->sg_list[i].addr; in qedr_gsi_build_packet() 531 packet->payload[i].len = swr->sg_list[i].length; in qedr_gsi_build_packet() 638 wr->sg_list[0].addr, in qedr_gsi_post_recv() 639 wr->sg_list[0].length, in qedr_gsi_post_recv() 651 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0]; in qedr_gsi_post_recv() 689 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; in qedr_gsi_poll_cq()
|
| /linux/drivers/scsi/aacraid/ |
| H A D | commctrl.c | 494 void *sg_list[HBA_MAX_SG_EMBEDDED]; in aac_send_raw_srb() local 519 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ in aac_send_raw_srb() 553 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { in aac_send_raw_srb() 681 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 742 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 797 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 854 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 895 sg_list[i] = p; // save so we can clean up later in aac_send_raw_srb() 935 if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) { in aac_send_raw_srb() 990 kfree(sg_list[i]); in aac_send_raw_srb()
|
| /linux/drivers/net/ethernet/marvell/octeon_ep/ |
| H A D | octep_ctrl_net.c | 51 msg->sg_list[0].msg = buf; in init_send_req() 52 msg->sg_list[0].sz = msg->hdr.s.sz; in init_send_req() 309 msg->sg_list[0].msg, in process_mbox_resp() 325 req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg; in process_mbox_notify() 367 msg.sg_list[0].sz = msg_sz; in octep_ctrl_net_recv_fw_messages() 368 msg.sg_list[0].msg = &data; in octep_ctrl_net_recv_fw_messages()
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| H A D | otx2_txrx.h | 78 struct sg_list { struct 101 struct sg_list *sg; argument
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | qp.c | 421 if ((plen + wr->sg_list[i].length) > max) in build_immd() 423 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd() 424 plen += wr->sg_list[i].length; in build_immd() 425 rem = wr->sg_list[i].length; in build_immd() 451 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, in build_isgl() argument 465 if ((plen + sg_list[i].length) < plen) in build_isgl() 467 plen += sg_list[i].length; in build_isgl() 468 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | in build_isgl() 469 sg_list[i].length); in build_isgl() 472 *flitp = cpu_to_be64(sg_list[i].addr); in build_isgl() [all …]
|
| /linux/drivers/net/ethernet/ibm/ehea/ |
| H A D | ehea_qmr.h | 106 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member 115 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; member 132 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; member
|
| /linux/arch/powerpc/include/asm/ |
| H A D | fsl_hcalls.h | 358 unsigned int target, phys_addr_t sg_list, unsigned int count) in fh_partition_memcpy() argument 370 r5 = (uint32_t) sg_list; in fh_partition_memcpy() 373 r6 = sg_list >> 32; in fh_partition_memcpy()
|
| /linux/drivers/scsi/qla4xxx/ |
| H A D | ql4_bsg.c | 62 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_read_flash() 122 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla4xxx_update_flash() 186 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_get_acb_state() 257 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_read_nvram() 321 sg_copy_to_buffer(bsg_job->request_payload.sg_list, in qla4xxx_update_nvram() 435 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, in qla4xxx_bsg_get_acb()
|
| /linux/include/uapi/rdma/ |
| H A D | rvt-abi.h | 47 struct rvt_wqe_sge sg_list[]; member
|
| /linux/drivers/xen/ |
| H A D | efi.c | 220 unsigned long count, unsigned long sg_list) in xen_efi_update_capsule() argument 230 efi_data(op).u.update_capsule.sg_list = sg_list; in xen_efi_update_capsule()
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | uc.c | 112 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_uc_req() 113 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_uc_req() 447 qp->r_sge.sg_list = NULL; in hfi1_uc_rcv()
|
| /linux/include/linux/ |
| H A D | bsg-lib.h | 26 struct scatterlist *sg_list; member
|
| H A D | agp_backend.h | 85 struct scatterlist *sg_list; member
|
| /linux/drivers/infiniband/core/ |
| H A D | mad.c | 906 mad_send_wr->sg_list[0].length = hdr_len; in ib_create_send_mad() 907 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad() 912 mad_send_wr->sg_list[1].length = data_len; in ib_create_send_mad() 914 mad_send_wr->sg_list[1].length = mad_size - hdr_len; in ib_create_send_mad() 916 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad() 921 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; in ib_create_send_mad() 1036 sge = mad_send_wr->sg_list; in ib_send_mad() 2532 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); in ib_mad_send_done() 2535 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); in ib_mad_send_done() 2931 struct ib_sge sg_list; in ib_mad_post_receive_mads() local [all …]
|
| /linux/drivers/crypto/bcm/ |
| H A D | util.c | 93 int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes) in spu_sg_count() argument 99 if (!sg_list) in spu_sg_count() 102 if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0) in spu_sg_count()
|
| /linux/net/smc/ |
| H A D | smc_wr.c | 325 link->wr_tx_v2_ib->sg_list[0].length = len; in smc_wr_tx_v2_send() 575 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; in smc_wr_init_sge() 584 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = in smc_wr_init_sge() 586 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = in smc_wr_init_sge() 596 lnk->wr_tx_v2_ib->sg_list = lnk->wr_tx_v2_sge; in smc_wr_init_sge() 626 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x]; in smc_wr_init_sge()
|
| /linux/drivers/scsi/ |
| H A D | 3w-9xxx.c | 1348 u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length); in twa_interrupt() 1395 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache)); in twa_load_sgl() 1396 newcommand->sg_list[0].length = cpu_to_le32(length); in twa_load_sgl() 1851 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); in DEF_SCSI_QCMD() 1852 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); in DEF_SCSI_QCMD() 1859 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); in DEF_SCSI_QCMD() 1860 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); in DEF_SCSI_QCMD() 1861 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { in DEF_SCSI_QCMD() 1872 command_packet->sg_list[i].address = sglistarg[i].address; in DEF_SCSI_QCMD() 1873 command_packet->sg_list[i].length = sglistarg[i].length; in DEF_SCSI_QCMD() [all …]
|