| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_job.c | 37 struct amdgpu_job *job) in amdgpu_job_do_core_dump() argument 41 dev_info(adev->dev, "Dumping IP State\n"); in amdgpu_job_do_core_dump() 42 for (i = 0; i < adev->num_ip_blocks; i++) in amdgpu_job_do_core_dump() 43 if (adev->ip_blocks[i].version->funcs->dump_ip_state) in amdgpu_job_do_core_dump() 44 adev->ip_blocks[i].version->funcs in amdgpu_job_do_core_dump() 45 ->dump_ip_state((void *)&adev->ip_blocks[i]); in amdgpu_job_do_core_dump() 46 dev_info(adev->dev, "Dumping IP State Completed\n"); in amdgpu_job_do_core_dump() 48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump() 52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument 61 mutex_lock(&hive->hive_lock); in amdgpu_job_core_dump() [all …]
|
| H A D | amdgpu_ib.c | 45 * command ring and the hw will fetch the commands from the IB 48 * put in IBs for execution by the requested ring. 52 * amdgpu_ib_get - request an IB (Indirect Buffer) 62 * Returns 0 on success, error on failure. 71 r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], in amdgpu_ib_get() 72 &ib->sa_bo, size); in amdgpu_ib_get() 74 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); in amdgpu_ib_get() 78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); in amdgpu_ib_get() 80 ib->flag in amdgpu_ib_get() 124 amdgpu_ib_schedule(struct amdgpu_ring * ring,unsigned int num_ibs,struct amdgpu_ib * ibs,struct amdgpu_job * job,struct dma_fence ** f) amdgpu_ib_schedule() argument 125 amdgpu_ib_schedule(struct amdgpu_ring * ring,unsigned int num_ibs,struct amdgpu_ib * ibs,struct amdgpu_job * job,struct dma_fence ** f) amdgpu_ib_schedule() argument 419 struct amdgpu_ring *ring = adev->rings[i]; amdgpu_ib_ring_tests() local [all...] |
| H A D | amdgpu_ring.h | 58 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) 62 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 94 /* Direct submission to the ring buffer during init and reset. */ 119 /* sync_seq is protected by ring emission lock */ 136 * are no longer in use by the associated ring on the GPU and 144 struct amdgpu_ring *ring; member 155 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); 156 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); 160 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 161 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, [all …]
|
| H A D | amdgpu_jpeg.c | 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 42 INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler); in amdgpu_jpeg_sw_init() 43 mutex_init(&adev->jpeg.jpeg_pg_lock); in amdgpu_jpeg_sw_init() 44 atomic_set(&adev->jpeg.total_submission_cnt, 0); in amdgpu_jpeg_sw_init() 46 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && in amdgpu_jpeg_sw_init() 47 (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG)) in amdgpu_jpeg_sw_init() 48 adev->jpeg.indirect_sram = true; in amdgpu_jpeg_sw_init() 50 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { in amdgpu_jpeg_sw_init() 51 if (adev->jpeg.harvest_config & (1U << i)) in amdgpu_jpeg_sw_init() 54 if (adev->jpeg.indirect_sram) { in amdgpu_jpeg_sw_init() [all …]
|
| H A D | amdgpu_dev_coredump.c | 1 // SPDX-License-Identifier: MIT 32 bool vram_lost, struct amdgpu_job *job) in amdgpu_coredump() argument 79 struct atom_context *ctx = adev->mode_info.atom_context; in amdgpu_devcoredump_fw_info() 81 drm_printf(p, "VCE feature version: %u, fw version: 0x%08x\n", in amdgpu_devcoredump_fw_info() 82 adev->vce.fb_version, adev->vce.fw_version); in amdgpu_devcoredump_fw_info() 83 drm_printf(p, "UVD feature version: %u, fw version: 0x%08x\n", 0, in amdgpu_devcoredump_fw_info() 84 adev->uvd.fw_version); in amdgpu_devcoredump_fw_info() 85 drm_printf(p, "GMC feature version: %u, fw version: 0x%08x\n", 0, in amdgpu_devcoredump_fw_info() 86 adev->gmc.fw_version); in amdgpu_devcoredump_fw_info() 87 drm_printf(p, "ME feature version: %u, fw version: 0x%08x\n", in amdgpu_devcoredump_fw_info() [all …]
|
| H A D | amdgpu_vm.c | 29 #include <linux/dma-fence-array.h> 32 #include <linux/dma-buf.h> 69 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending 73 * VMID 0 is special. It is the GPUVM used for the kernel driver. In 74 * addition to an aperture managed by a page table, VMID 0 also has 79 * incurring the overhead of a page table. VMID 0 is used by the kernel 90 #define START(node) ((node)->start) 91 #define LAST(node) ((node)->last) 100 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 116 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence [all …]
|
| H A D | vcn_sw_ring.c | 27 void vcn_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, in vcn_dec_sw_ring_emit_fence() argument 32 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE); in vcn_dec_sw_ring_emit_fence() 33 amdgpu_ring_write(ring, addr); in vcn_dec_sw_ring_emit_fence() 34 amdgpu_ring_write(ring, upper_32_bits(addr)); in vcn_dec_sw_ring_emit_fence() 35 amdgpu_ring_write(ring, seq); in vcn_dec_sw_ring_emit_fence() 36 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP); in vcn_dec_sw_ring_emit_fence() 39 void vcn_dec_sw_ring_insert_end(struct amdgpu_ring *ring) in vcn_dec_sw_ring_insert_end() argument 41 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); in vcn_dec_sw_ring_insert_end() 44 void vcn_dec_sw_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, in vcn_dec_sw_ring_emit_ib() argument 47 uint32_t vmid = AMDGPU_JOB_GET_VMID(job); in vcn_dec_sw_ring_emit_ib() [all …]
|
| /linux/Documentation/devicetree/bindings/powerpc/fsl/ |
| H A D | raideng.txt | 3 RAID Engine nodes are defined to describe on-chip RAID accelerators. Each RAID 11 - compatible: Should contain "fsl,raideng-v1.0" as the value 13 major number whereas 0 represents minor number. The 15 - reg: offset and length of the register set for the device 16 - ranges: standard ranges property specifying the translation 22 compatible = "fsl,raideng-v1.0"; 23 #address-cells = <1>; 24 #size-cells = <1>; 25 reg = <0x320000 0x10000>; 26 ranges = <0 0x320000 0x10000>; [all …]
|
| /linux/arch/powerpc/boot/dts/fsl/ |
| H A D | qoriq-sec6.0-0.dtsi | 35 compatible = "fsl,sec-v6.0", "fsl,sec-v5.0", 36 "fsl,sec-v4.0"; 37 fsl,sec-era = <6>; 38 #address-cells = <1>; 39 #size-cells = <1>; 42 compatible = "fsl,sec-v6.0-job-ring", 43 "fsl,sec-v5.2-job-ring", 44 "fsl,sec-v5.0-job-ring", 45 "fsl,sec-v4.4-job-ring", 46 "fsl,sec-v4.0-job-ring"; [all …]
|
| H A D | qoriq-sec5.2-0.dtsi | 2 * QorIQ Sec/Crypto 5.2 device tree stub [ controller @ offset 0x300000 ] 4 * Copyright 2011-2012 Freescale Semiconductor Inc. 36 compatible = "fsl,sec-v5.2", "fsl,sec-v5.0", "fsl,sec-v4.0"; 37 fsl,sec-era = <5>; 38 #address-cells = <1>; 39 #size-cells = <1>; 40 reg = <0x300000 0x10000>; 41 ranges = <0 0x300000 0x10000>; 42 interrupts = <92 2 0 0>; 45 compatible = "fsl,sec-v5.2-job-ring", [all …]
|
| H A D | qoriq-sec5.3-0.dtsi | 2 * QorIQ Sec/Crypto 5.3 device tree stub [ controller @ offset 0x300000 ] 36 compatible = "fsl,sec-v5.3", "fsl,sec-v5.0", "fsl,sec-v4.0"; 37 fsl,sec-era = <4>; 38 #address-cells = <1>; 39 #size-cells = <1>; 40 reg = <0x300000 0x10000>; 41 ranges = <0 0x300000 0x10000>; 42 interrupts = <92 2 0 0>; 45 compatible = "fsl,sec-v5.3-job-ring", 46 "fsl,sec-v5.0-job-ring", [all …]
|
| H A D | pq3-sec4.4-0.dtsi | 2 * PQ3 Sec/Crypto 4.4 device tree stub [ controller @ offset 0x30000 ] 36 compatible = "fsl,sec-v4.4", "fsl,sec-v4.0"; 37 fsl,sec-era = <3>; 38 #address-cells = <1>; 39 #size-cells = <1>; 40 ranges = <0x0 0x30000 0x10000>; 41 reg = <0x30000 0x10000>; 42 interrupts = <58 2 0 0>; 45 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring"; 46 reg = <0x1000 0x1000>; [all …]
|
| H A D | qoriq-sec5.0-0.dtsi | 2 * QorIQ Sec/Crypto 5.0 device tree stub [ controller @ offset 0x300000 ] 36 compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; 37 fsl,sec-era = <5>; 38 #address-cells = <1>; 39 #size-cells = <1>; 40 reg = <0x300000 0x10000>; 41 ranges = <0 0x300000 0x10000>; 42 interrupts = <92 2 0 0>; 45 compatible = "fsl,sec-v5.0-job-ring", 46 "fsl,sec-v4.0-job-ring"; [all …]
|
| H A D | qoriq-sec4.2-0.dtsi | 2 * QorIQ Sec/Crypto 4.2 device tree stub [ controller @ offset 0x300000 ] 36 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; 37 fsl,sec-era = <3>; 38 #address-cells = <1>; 39 #size-cells = <1>; 40 reg = <0x300000 0x10000>; 41 ranges = <0 0x300000 0x10000>; 42 interrupts = <92 2 0 0>; 45 compatible = "fsl,sec-v4.2-job-ring", 46 "fsl,sec-v4.0-job-ring"; [all …]
|
| H A D | qoriq-raid1.0-0.dtsi | 2 * QorIQ RAID 1.0 device tree stub [ controller @ offset 0x320000 ] 36 compatible = "fsl,raideng-v1.0"; 37 #address-cells = <1>; 38 #size-cells = <1>; 39 reg = <0x320000 0x10000>; 40 ranges = <0 0x320000 0x10000>; 43 compatible = "fsl,raideng-v1.0-job-queue"; 44 #address-cells = <1>; 45 #size-cells = <1>; 46 reg = <0x1000 0x1000>; [all …]
|
| H A D | p1023si-post.dtsi | 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc. 36 compatible = "fsl,bman-fbpr"; 37 alloc-ranges = <0 0 0x10 0>; 41 compatible = "fsl,qman-fqd"; 42 alloc-ranges = <0 0 0x10 0>; 46 compatible = "fsl,qman-pfdr"; 47 alloc-ranges = <0 0 0x10 0>; 51 #address-cells = <2>; 52 #size-cells = <1>; 53 compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus"; [all …]
|
| H A D | qoriq-sec4.0-0.dtsi | 2 * QorIQ Sec/Crypto 4.0 device tree stub [ controller @ offset 0x300000 ] 36 compatible = "fsl,sec-v4.0"; 37 fsl,sec-era = <1>; 38 #address-cells = <1>; 39 #size-cells = <1>; 40 reg = <0x300000 0x10000>; 41 ranges = <0 0x300000 0x10000>; 42 interrupts = <92 2 0 0>; 45 compatible = "fsl,sec-v4.0-job-ring"; 46 reg = <0x1000 0x1000>; [all …]
|
| /linux/drivers/dma/ |
| H A D | fsl_raid.h | 13 * Copyright (c) 2010-2012 Freescale Semiconductor, Inc. 47 #define FSL_RE_GFM_POLY 0x1d000000 50 #define FSL_RE_CFG1_CBSI 0x08000000 51 #define FSL_RE_CFG1_CBS0 0x00080000 56 #define FSL_RE_PQ_OPCODE 0x1B 57 #define FSL_RE_XOR_OPCODE 0x1A 58 #define FSL_RE_MOVE_OPCODE 0x8 60 #define FSL_RE_BLOCK_SIZE 0x3 /* 4096 bytes */ 61 #define FSL_RE_CACHEABLE_IO 0x0 62 #define FSL_RE_BUFFER_OUTPUT 0x0 [all …]
|
| /linux/arch/arm64/boot/dts/freescale/ |
| H A D | imx8qxp-ss-security.dtsi | 1 // SPDX-License-Identifier: GPL-2.0+ 7 compatible = "fsl,imx8qxp-caam", "fsl,sec-v4.0"; 11 compatible = "fsl,imx8qxp-job-ring", "fsl,sec-v4.0-job-ring"; 15 compatible = "fsl,imx8qxp-job-ring", "fsl,sec-v4.0-job-ring";
|
| H A D | imx8-ss-security.dtsi | 1 // SPDX-License-Identifier: GPL-2.0+ 6 #include <dt-bindings/firmware/imx/rsrc.h> 9 compatible = "simple-bus"; 10 #address-cells = <1>; 11 #size-cells = <1>; 12 ranges = <0x31400000 0x0 0x31400000 0x90000>; 15 compatible = "fsl,imx8qm-caam", "fsl,sec-v4.0"; 16 reg = <0x31400000 0x90000>; 18 #address-cells = <1>; 19 #size-cells = <1>; [all …]
|
| H A D | fsl-ls1012a.dtsi | 1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT) 3 * Device Tree Include file for NXP Layerscape-1012A family SoC. 6 * Copyright 2019-2020 NXP 10 #include <dt-bindings/clock/fsl,qoriq-clockgen.h> 11 #include <dt-bindings/interrupt-controller/arm-gic.h> 12 #include <dt-bindings/thermal/thermal.h> 16 interrupt-parent = <&gic>; 17 #address-cells = <2>; 18 #size-cells = <2>; 23 rtic-a = &rtic_a; [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_exec.c | 1 // SPDX-License-Identifier: MIT 33 * - Passing in a list BO which are read / written to creating implicit syncs 34 * - Binding at exec time 35 * - Flow controlling the ring at exec time 38 * passed into an exec, using the dma-buf implicit sync uAPI, have binds as 39 * separate operations, and using the DRM scheduler to flow control the ring. 63 * Rebinds / dma-resv usage applies to non-comput 124 struct xe_sched_job *job; xe_exec_ioctl() local [all...] |
| H A D | xe_execlist.c | 1 // SPDX-License-Identifier: MIT 36 GENMASK_ULL(SW_CTX_ID_WIDTH + SW_CTX_ID_SHIFT - 1, \ 40 GENMASK_ULL(XEHP_SW_CTX_ID_WIDTH + XEHP_SW_CTX_ID_SHIFT - 1, \ 47 struct xe_gt *gt = hwe->gt; in __start_lrc() 48 struct xe_mmio *mmio = >->mmio; in __start_lrc() 56 xe_gt_assert(hwe->gt, FIELD_FIT(XEHP_SW_CTX_ID, ctx_id)); in __start_lrc() 59 xe_gt_assert(hwe->gt, FIELD_FIT(SW_CTX_ID, ctx_id)); in __start_lrc() 63 if (hwe->class == XE_ENGINE_CLASS_COMPUTE) in __start_lrc() 67 xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->rin in __start_lrc() 313 struct xe_sched_job *job = to_xe_sched_job(drm_job); execlist_run_job() local 325 struct xe_sched_job *job = to_xe_sched_job(drm_job); execlist_job_free() local [all...] |
| /linux/include/drm/ |
| H A D | gpu_scheduler.h | 28 #include <linux/dma-fence.h> 36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining 45 * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set 63 * to an array, and as such should start at 0. 75 * struct drm_sched_entity - A wrapper around a job queue (typically 79 * ring, and the scheduler will alternate between entities based on 96 * Lock protecting the run-queue (@rq) to which this entity belongs, 173 * The dependency fence of the job which is on the top of the job queu [all...] |
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_exec.h | 1 /* SPDX-License-Identifier: MIT */ 41 #define to_nouveau_exec_job(job) \ argument 42 container_of((job), struct nouveau_exec_job, base) 44 int nouveau_exec_job_init(struct nouveau_exec_job **job, 53 /* Limit the number of IBs per job to half the size of the ring in order in nouveau_exec_push_max_from_ib_max() 54 * to avoid the ring running dry between submissions and preserve one in nouveau_exec_push_max_from_ib_max() 55 * more slot for the job's HW fence. in nouveau_exec_push_max_from_ib_max() 57 return ib_max > 1 ? ib_max / 2 - 1 : 0; in nouveau_exec_push_max_from_ib_max()
|