Home
last modified time | relevance | path

Searched full:sdma (Results 1 – 25 of 241) sorted by relevance

12345678910

/linux/drivers/net/ethernet/marvell/prestera/
H A Dprestera_rxtx.c102 /* protect SDMA with concurrent access from multiple CPUs */
107 struct prestera_sdma sdma; member
110 static int prestera_sdma_buf_init(struct prestera_sdma *sdma, in prestera_sdma_buf_init() argument
116 desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma); in prestera_sdma_buf_init()
128 static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa) in prestera_sdma_map() argument
130 return sdma->map_addr + pa; in prestera_sdma_map()
133 static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma, in prestera_sdma_rx_desc_init() argument
142 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); in prestera_sdma_rx_desc_init()
150 static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma, in prestera_sdma_rx_desc_set_next() argument
154 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); in prestera_sdma_rx_desc_set_next()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_sdma.c30 /* SDMA CSA reside in the 3rd page of CSA */
34 * GPU SDMA IP block helpers function.
42 for (i = 0; i < adev->sdma.num_instances; i++) in amdgpu_sdma_get_instance_from_ring()
43 if (ring == &adev->sdma.instance[i].ring || in amdgpu_sdma_get_instance_from_ring()
44 ring == &adev->sdma.instance[i].page) in amdgpu_sdma_get_instance_from_ring()
45 return &adev->sdma.instance[i]; in amdgpu_sdma_get_instance_from_ring()
55 for (i = 0; i < adev->sdma.num_instances; i++) { in amdgpu_sdma_get_index_from_ring()
56 if (ring == &adev->sdma.instance[i].ring || in amdgpu_sdma_get_index_from_ring()
57 ring == &adev->sdma.instance[i].page) { in amdgpu_sdma_get_index_from_ring()
74 /* don't enable OS preemption on SDMA under SRIOV */ in amdgpu_sdma_get_csa_mc_addr()
[all …]
H A Dsdma_v4_4_2.c34 #include "sdma/sdma_4_4_2_offset.h"
35 #include "sdma/sdma_4_4_2_sh_mask.h"
161 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_inst_init_golden_registers()
190 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_2_init_microcode()
346 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_4_2_ring_insert_nop() local
350 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_4_2_ring_insert_nop()
427 << (ring->me % adev->sdma.num_inst_per_aid); in sdma_v4_4_2_ring_emit_hdp_flush()
487 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_4_2_inst_gfx_stop() local
493 sdma[i] = &adev->sdma.instance[i].ring; in sdma_v4_4_2_inst_gfx_stop()
502 if (sdma[i]->use_doorbell) { in sdma_v4_4_2_inst_gfx_stop()
[all …]
H A Dsdma_v4_0.c602 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_setup_ulv()
627 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_init_microcode()
633 for every SDMA instance */ in sdma_v4_0_init_microcode()
786 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_insert_nop() local
790 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_insert_nop()
919 * @enable: enable SDMA RB/IB
927 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_gfx_enable()
961 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_page_stop()
1010 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_ctx_switch_enable()
1022 * Enable SDMA utilization. Its only supported on in sdma_v4_0_ctx_switch_enable()
[all …]
H A Dsdma_v3_0.c182 * sDMA - System DMA
190 * (ring buffer, IBs, etc.), but sDMA has it's own
192 * used by the CP. sDMA supports copying data, writing
254 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v3_0_free_microcode()
255 amdgpu_ucode_release(&adev->sdma.instance[i].fw); in sdma_v3_0_free_microcode()
305 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v3_0_init_microcode()
307 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, in sdma_v3_0_init_microcode()
311 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, in sdma_v3_0_init_microcode()
316 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v3_0_init_microcode()
317 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); in sdma_v3_0_init_microcode()
[all …]
H A Dcik_sdma.c77 for (i = 0; i < adev->sdma.num_instances; i++) in cik_sdma_free_microcode()
78 amdgpu_ucode_release(&adev->sdma.instance[i].fw); in cik_sdma_free_microcode()
82 * sDMA - System DMA
90 * (ring buffer, IBs, etc.), but sDMA has it's own
92 * used by the CP. sDMA supports copying data, writing
133 for (i = 0; i < adev->sdma.num_instances; i++) { in cik_sdma_init_microcode()
135 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, in cik_sdma_init_microcode()
139 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, in cik_sdma_init_microcode()
149 for (i = 0; i < adev->sdma.num_instances; i++) in cik_sdma_init_microcode()
150 amdgpu_ucode_release(&adev->sdma.instance[i].fw); in cik_sdma_init_microcode()
[all …]
H A Dsdma_v2_4.c78 * sDMA - System DMA
86 * (ring buffer, IBs, etc.), but sDMA has it's own
88 * used by the CP. sDMA supports copying data, writing
114 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v2_4_free_microcode()
115 amdgpu_ucode_release(&adev->sdma.instance[i].fw); in sdma_v2_4_free_microcode()
145 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v2_4_init_microcode()
147 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, in sdma_v2_4_init_microcode()
151 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, in sdma_v2_4_init_microcode()
156 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v2_4_init_microcode()
157 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); in sdma_v2_4_init_microcode()
[all …]
H A Dsdma_v7_0.c272 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v7_0_ring_insert_nop() local
276 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v7_0_ring_insert_nop()
430 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_gfx_stop()
485 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_enable()
493 * sdma_v7_0_gfx_resume_instance - start/restart a certain sdma engine
513 ring = &adev->sdma.instance[i].ring; in sdma_v7_0_gfx_resume_instance()
590 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); in sdma_v7_0_gfx_resume_instance()
598 /* Set up sdma hang watchdog */ in sdma_v7_0_gfx_resume_instance()
664 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v7_0_gfx_resume()
691 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v12_0_free_ucode_buffer()
[all …]
H A Dsdma_v6_0.c240 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v6_0_ring_insert_nop() local
244 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v6_0_ring_insert_nop()
398 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_gfx_stop()
434 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_ctxempty_int_enable()
464 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_enable()
472 * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine
491 ring = &adev->sdma.instance[i].ring; in sdma_v6_0_gfx_resume_instance()
566 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); in sdma_v6_0_gfx_resume_instance()
574 /* Set up sdma hang watchdog */ in sdma_v6_0_gfx_resume_instance()
634 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v6_0_gfx_resume()
[all …]
H A Dsdma_v5_2.c229 /* SDMA seems to miss doorbells sometimes when powergating kicks in. in sdma_v5_2_ring_set_wptr()
255 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v5_2_ring_insert_nop() local
259 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v5_2_ring_insert_nop()
418 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_gfx_stop()
477 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_ctx_switch_enable()
516 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_enable()
525 * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine
546 ring = &adev->sdma.instance[i].ring; in sdma_v5_2_gfx_resume_instance()
698 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_2_gfx_resume()
721 * sdma_v5_2_load_microcode - load the sDMA ME ucode
[all …]
H A Dsdma_v5_0.c292 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_init_microcode()
438 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v5_0_ring_insert_nop() local
442 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v5_0_ring_insert_nop()
599 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_gfx_stop()
658 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_ctx_switch_enable()
700 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_enable()
708 * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine
728 ring = &adev->sdma.instance[i].ring; in sdma_v5_0_gfx_resume_instance()
882 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v5_0_gfx_resume()
905 * sdma_v5_0_load_microcode - load the sDMA ME ucode
[all …]
H A Dsi_dma.c49 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; in si_dma_ring_get_wptr()
57 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; in si_dma_ring_set_wptr()
118 for (i = 0; i < adev->sdma.num_instances; i++) { in si_dma_stop()
133 for (i = 0; i < adev->sdma.num_instances; i++) { in si_dma_start()
134 ring = &adev->sdma.instance[i].ring; in si_dma_start()
348 * si_dma_vm_set_pte_pde - update the page tables using sDMA
357 * Update the page tables using sDMA (CIK).
429 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
436 * using sDMA (VI).
464 adev->sdma.num_instances = 2; in si_dma_early_init()
[all …]
H A Dsdma_v4_4.c24 #include "sdma/sdma_4_4_0_offset.h"
25 #include "sdma/sdma_4_4_0_sh_mask.h"
35 * to calculate register offset for all the sdma instances */
180 /* the SDMA_EDC_COUNTER register in each sdma instance in sdma_v4_4_get_ras_error_count()
187 dev_info(adev->dev, "Detected %s in SDMA%d, SED %d\n", in sdma_v4_4_get_ras_error_count()
222 * SDMA RAS supports single bit uncorrectable error detection. in sdma_v4_4_query_ras_error_count_by_instance()
228 * SDMA RAS does not support correctable errors. in sdma_v4_4_query_ras_error_count_by_instance()
241 /* write 0 to EDC_COUNTER reg to clear sdma edc counters */ in sdma_v4_4_reset_ras_error_count()
243 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_reset_ras_error_count()
256 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_4_query_ras_error_count()
[all …]
/linux/arch/arm/boot/dts/ti/omap/
H A Domap2.dtsi62 dmas = <&sdma 9 &sdma 10>;
100 sdma: dma-controller@0 { label
101 compatible = "ti,omap2420-sdma", "ti,omap-sdma";
136 dmas = <&sdma 35 &sdma 36 &sdma 37 &sdma 38
137 &sdma 39 &sdma 40 &sdma 41 &sdma 42>;
147 dmas = <&sdma 43 &sdma 44 &sdma 45 &sdma 46>;
163 dmas = <&sdma 13>;
172 dmas = <&sdma 49 &sdma 50>;
182 dmas = <&sdma 51 &sdma 52>;
192 dmas = <&sdma 53 &sdma 54>;
H A Domap3.dtsi181 dmas = <&sdma 9 &sdma 10>;
207 dmas = <&sdma 65 &sdma 66>;
289 sdma: dma-controller@0 { label
290 compatible = "ti,omap3430-sdma", "ti,omap-sdma";
373 dmas = <&sdma 49 &sdma 50>;
383 dmas = <&sdma 51 &sdma 52>;
393 dmas = <&sdma 53 &sdma 54>;
448 dmas = <&sdma 35>,
449 <&sdma 36>,
450 <&sdma 37>,
[all …]
H A Domap2430.dtsi186 dmas = <&sdma 31>,
187 <&sdma 32>;
202 dmas = <&sdma 33>,
203 <&sdma 34>;
218 dmas = <&sdma 17>,
219 <&sdma 18>;
234 dmas = <&sdma 19>,
235 <&sdma 20>;
250 dmas = <&sdma 21>,
251 <&sdma 22>;
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Dvnic_sdma.c7 * This file contains HFI1 support for VNIC SDMA functionality
10 #include "sdma.h"
21 * @txreq: sdma transmit request
22 * @sdma: vnic sdma pointer
30 struct hfi1_vnic_sdma *sdma; member
42 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma; in vnic_sdma_complete()
130 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma()
147 tx->sdma = vnic_sdma; in hfi1_vnic_send_dma()
157 /* When -ECOMM, sdma callback will be called with ABORT status */ in hfi1_vnic_send_dma()
179 * hfi1_vnic_sdma_sleep - vnic sdma sleep function
[all …]
H A Dvnic.h10 #include "sdma.h"
33 * struct hfi1_vnic_sdma - VNIC per Tx ring SDMA information
35 * @sde - sdma engine
38 * @stx - sdma tx request
39 * @state - vnic Tx ring SDMA state
79 * @sdma: VNIC SDMA structure per TXQ
95 struct hfi1_vnic_sdma sdma[HFI1_VNIC_MAX_TXQ]; member
H A Dsdma.c18 #include "sdma.h"
30 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
34 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
38 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
42 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
45 /* max wait time for a SDMA engine to indicate it has halted */
47 /* all SDMA engine errors that cause a halt */
261 * sdma engine 'sde' to drop to 0.
296 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
345 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
[all …]
H A DKconfig11 bool "HFI1 SDMA Order debug"
16 sdma completions for unit testing
18 bool "Config SDMA Verbosity"
23 SDMA debug
/linux/arch/powerpc/include/asm/
H A Dmpc52xx.h76 /* SDMA */
78 u32 taskBar; /* SDMA + 0x00 */
79 u32 currentPointer; /* SDMA + 0x04 */
80 u32 endPointer; /* SDMA + 0x08 */
81 u32 variablePointer; /* SDMA + 0x0c */
83 u8 IntVect1; /* SDMA + 0x10 */
84 u8 IntVect2; /* SDMA + 0x11 */
85 u16 PtdCntrl; /* SDMA + 0x12 */
87 u32 IntPend; /* SDMA + 0x14 */
88 u32 IntMask; /* SDMA + 0x18 */
[all …]
/linux/arch/arm/boot/dts/nxp/imx/
H A Dimx31.dtsi135 dmas = <&sdma 8 8 0>, <&sdma 9 8 0>;
182 dmas = <&sdma 20 3 0>;
193 dmas = <&sdma 21 3 0>;
213 dmas = <&sdma 6 8 0>, <&sdma 7 8 0>;
248 dmas = <&sdma 10 8 0>, <&sdma 11 8 0>;
300 sdma: dma-controller@53fd4000 { label
301 compatible = "fsl,imx31-sdma";
307 fsl,sdma-ram-script-name = "imx/sdma/sdma-imx31.bin";
348 dmas = <&sdma 30 17 0>;
/linux/drivers/gpu/drm/amd/include/ivsrcid/sdma3/
H A Dirqsrcs_sdma3_5_0.h26 #define SDMA3_5_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 // 0xD9 SDMA atomic*_rtn ops complete
27 #define SDMA3_5_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 // 0xDA SDMA atomic CMPSWAP loop timeout
28 #define SDMA3_5_0__SRCID__SDMA_IB_PREEMPT 219 // 0xDB sdma mid-command buffer preempt interrupt
37 #define SDMA3_5_0__SRCID__SDMA_PREEMPT 240 // 0xF0 SDMA New Run List
41 #define SDMA3_5_0__SRCID__SDMA_FROZEN 245 // 0xF5 SDMA Frozen
/linux/drivers/gpu/drm/amd/include/ivsrcid/sdma1/
H A Dirqsrcs_sdma1_5_0.h25 #define SDMA1_5_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 // 0xD9 SDMA atomic*_rtn ops complete
26 #define SDMA1_5_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 // 0xDA SDMA atomic CMPSWAP loop timeout
27 #define SDMA1_5_0__SRCID__SDMA_IB_PREEMPT 219 // 0xDB sdma mid-command buffer preempt interrupt
36 #define SDMA1_5_0__SRCID__SDMA_PREEMPT 240 // 0xF0 SDMA New Run List
40 #define SDMA1_5_0__SRCID__SDMA_FROZEN 245 // 0xF5 SDMA Frozen
/linux/drivers/gpu/drm/amd/include/ivsrcid/sdma2/
H A Dirqsrcs_sdma2_5_0.h26 #define SDMA2_5_0__SRCID__SDMA_ATOMIC_RTN_DONE 217 // 0xD9 SDMA atomic*_rtn ops complete
27 #define SDMA2_5_0__SRCID__SDMA_ATOMIC_TIMEOUT 218 // 0xDA SDMA atomic CMPSWAP loop timeout
28 #define SDMA2_5_0__SRCID__SDMA_IB_PREEMPT 219 // 0xDB sdma mid-command buffer preempt interrupt
37 #define SDMA2_5_0__SRCID__SDMA_PREEMPT 240 // 0xF0 SDMA New Run List
41 #define SDMA2_5_0__SRCID__SDMA_FROZEN 245 // 0xF5 SDMA Frozen

12345678910