Home
last modified time | relevance | path

Searched full:vpe (Results 1 – 25 of 68) sorted by relevance

123

/linux/drivers/gpu/drm/amd/amdgpu/
H A Dvpe_v6_1.c31 #include "ivsrcid/vpe/irqsrcs_vpe_6_1.h"
32 #include "vpe/vpe_6_1_0_offset.h"
33 #include "vpe/vpe_6_1_0_sh_mask.h"
64 static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset) in vpe_v6_1_get_reg_offset() argument
68 base = vpe->ring.adev->reg_offset[VPE_HWIP][inst][0]; in vpe_v6_1_get_reg_offset()
73 static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt) in vpe_v6_1_halt() argument
75 struct amdgpu_device *adev = vpe->ring.adev; in vpe_v6_1_halt()
78 for (i = 0; i < vpe->num_instances; i++) { in vpe_v6_1_halt()
79 f32_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL)); in vpe_v6_1_halt()
82 WREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL), f32_cntl); in vpe_v6_1_halt()
[all …]
H A Damdgpu_vpe.c34 /* VPE CSA resides in the 4th page of CSA */
114 * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
115 * VPE FW will dynamically decide which level should be used according to current loading.
117 * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
119 * The VPE FW can then request the appropriate frequency from the PMFW.
121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe) in amdgpu_vpe_configure_dpm() argument
123 struct amdgpu_device *adev = vpe->ring.adev; in amdgpu_vpe_configure_dpm()
135 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); in amdgpu_vpe_configure_dpm()
137 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); in amdgpu_vpe_configure_dpm()
148 /* Comfirm enabled vpe clk num in amdgpu_vpe_configure_dpm()
[all …]
H A Dvpe_6_1_fw_if.h29 * VPE OP Codes
65 * VPE NOP
74 * VPE Descriptor
84 * VPE Plane Config
/linux/arch/mips/kernel/
H A Dvpe-mt.c18 #include <asm/vpe.h>
25 /* We are prepared so configure and start the VPE... */
26 int vpe_run(struct vpe *v) in vpe_run()
33 /* check we are the Master VPE */ in vpe_run()
37 pr_warn("VPE loader: only Master VPE's are able to config MT\n"); in vpe_run()
51 pr_warn("VPE loader: No TC's associated with VPE %d\n", in vpe_run()
71 pr_warn("VPE loader: TC %d is already active!\n", in vpe_run()
95 * We don't pass the memsize here, so VPE programs need to be in vpe_run()
103 * bind the TC to VPE 1 as late as possible so we only have the final in vpe_run()
104 * VPE registers to set up, and so an EJTAG probe can trigger on it in vpe_run()
[all …]
H A Dvpe.c9 * VPE support module for loading a MIPS SP program into VPE1. The SP
37 #include <asm/vpe.h>
53 /* get the vpe associated with this minor */
54 struct vpe *get_vpe(int minor) in get_vpe()
56 struct vpe *res, *v; in get_vpe()
74 /* get the vpe associated with this minor */
92 /* allocate a vpe and associate it with this minor (or index) */
93 struct vpe *alloc_vpe(int minor) in alloc_vpe()
95 struct vpe *v; in alloc_vpe()
97 v = kzalloc(sizeof(struct vpe), GFP_KERNEL); in alloc_vpe()
[all …]
H A Dsmp-mt.c53 /* Deactivate all but VPE 0 */ in smvp_vpe_init()
59 /* master VPE */ in smvp_vpe_init()
88 /* bind a TC to each VPE, May as well put all excess TC's in smvp_tc_init()
89 on the last VPE */ in smvp_tc_init()
141 * assumes a 1:1 mapping of TC => VPE
154 /* enable the tc this vpe/cpu will be running */ in vsmp_boot_secondary()
159 /* enable the VPE */ in vsmp_boot_secondary()
210 /* we'll always have more TC's than VPE's, so loop setting everything in vsmp_smp_setup()
H A Dsmp-cps.c216 /* Detect & record VPE topology */ in cps_smp_setup()
219 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); in cps_smp_setup()
254 /* Indicate present CPUs (CPU being synonymous with VPE) */ in cps_smp_setup()
357 /* Allocate VPE boot configuration structs */ in cps_prepare_cpus()
627 /* Boot a VPE on a powered down core */ in cps_boot_secondary()
643 /* Boot a VPE on another powered up core */ in cps_boot_secondary()
665 /* Boot a VPE on this core */ in cps_boot_secondary()
676 /* Disable MT - we only want to run 1 TC per VPE */ in cps_init_secondary()
803 /* Look for another online VPE within the core */ in play_dead()
809 * There is an online VPE within the core. Just halt in play_dead()
[all …]
H A Drtlx-mt.c18 #include <asm/vpe.h>
74 pr_warn("VPE loader: not a MIPS MT capable processor\n"); in rtlx_module_init()
H A Drtlx.c22 #include <asm/vpe.h>
68 void rtlx_starting(int vpe) in rtlx_starting() argument
81 void rtlx_stopping(int vpe) in rtlx_stopping() argument
H A DMakefile65 obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
66 obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
/linux/drivers/irqchip/
H A Dirq-gic-v4.c55 * with the actual vPE affinity, and not the braindead concept of
65 * The choice made here is that each vcpu (VPE in old northern GICv4
68 * interrupt becomes a handle for the VPE, and that the hypervisor
72 * contain an irq domain where each interrupt maps to a VPE. In
78 * - irq_set_affinity is used to move a VPE from one redistributor to
82 * creating a new sub-API, namely scheduling/descheduling a VPE
119 static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) in its_alloc_vcpu_sgis() argument
131 vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); in its_alloc_vcpu_sgis()
132 if (!vpe->fwnode) in its_alloc_vcpu_sgis()
138 vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, in its_alloc_vcpu_sgis()
[all …]
H A Dirq-gic-v3-its.c358 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) in vpe_to_cpuid_lock() argument
360 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); in vpe_to_cpuid_lock()
361 return vpe->col_idx; in vpe_to_cpuid_lock()
364 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) in vpe_to_cpuid_unlock() argument
366 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in vpe_to_cpuid_unlock()
373 struct its_vpe *vpe = NULL; in irq_to_cpuid_lock() local
377 vpe = irq_data_get_irq_chip_data(d); in irq_to_cpuid_lock()
381 vpe = map->vpe; in irq_to_cpuid_lock()
384 if (vpe) { in irq_to_cpuid_lock()
385 cpu = vpe_to_cpuid_lock(vpe, flags); in irq_to_cpuid_lock()
[all …]
H A Dirq-econet-en751221.c7 * be routed to either VPE but not both, so to support per-CPU interrupts, a
8 * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In
18 * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29,
19 * telling the hardware to mask VPE#1's view of IRQ 30.
79 * irq being manipulated by a thread running on VPE#1. in econet_chmask()
80 * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. in econet_chmask()
/linux/arch/mips/lantiq/
H A Dirq.c50 #define ltq_icu_w32(vpe, m, x, y) \ argument
51 ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
53 #define ltq_icu_r32(vpe, m, x) \ argument
54 ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
83 int vpe; in ltq_disable_irq() local
88 for_each_present_cpu(vpe) { in ltq_disable_irq()
89 ltq_icu_w32(vpe, im, in ltq_disable_irq()
90 ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset), in ltq_disable_irq()
101 int vpe; in ltq_mask_and_ack_irq() local
106 for_each_present_cpu(vpe) { in ltq_mask_and_ack_irq()
[all …]
/linux/arch/mips/include/asm/
H A Dvpe.h17 #define VPE_MODULE_NAME "vpe"
49 struct vpe { struct
52 /* (device) minor associated with this vpe */
63 /* tc's associated with this vpe */ argument
66 /* The list of vpe's */ argument
82 struct vpe *pvpe; /* parent VPE */ argument
83 struct list_head tc; /* The list of TC's with this VPE */
88 void (*start)(int vpe);
89 void (*stop)(int vpe);
108 struct vpe *get_vpe(int minor);
[all …]
H A Drtlx.h30 void rtlx_starting(int vpe);
31 void rtlx_stopping(int vpe);
/linux/include/linux/irqchip/
H A Darm-gic-v4.h43 /* per-vPE VLPI tracking */
48 /* VPE resident */
55 /* VPE proxy mapping */
72 /* Track the VPE being mapped */
77 * vPE and vLPI operations using vpe->col_idx.
82 * redistributor for this VPE. The ID itself isn't involved in
86 /* Unique (system-wide) VPE identifier */
99 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
102 * @db_enabled: Is the VPE doorbell to be generated?
106 struct its_vpe *vpe; member
[all …]
/linux/Documentation/devicetree/bindings/media/
H A Dti,vpe.yaml4 $id: http://devicetree.org/schemas/media/ti,vpe.yaml#
7 title: Texas Instruments DRA7x Video Processing Engine (VPE)
13 The Video Processing Engine (VPE) is a key component for image post
14 processing applications. VPE consist of a single memory to memory
20 const: ti,dra7-vpe
24 - description: The VPE main register region
51 vpe: vpe@489d0000 {
52 compatible = "ti,dra7-vpe";
/linux/drivers/media/platform/ti/
H A DKconfig6 # These will be selected by VPE and VIP
47 tristate "TI VPE (Video Processing Engine) driver"
57 Support for the TI VPE(Video Processing Engine) block
61 bool "VPE debug messages"
64 Enable debug messages on VPE driver.
H A DMakefile4 obj-y += vpe/
/linux/Documentation/devicetree/bindings/timer/
H A Deconet,en751221-timer.yaml14 EcoNet SoCs, including the EN751221 and EN751627 families. It provides per-VPE
53 - description: VPE timers 0 and 1
54 - description: VPE timers 2 and 3
59 - description: VPE timers 0 and 1
/linux/Documentation/devicetree/bindings/interrupt-controller/
H A Deconet,en751221-intc.yaml15 be routed to either VPE but not both, so to support per-CPU interrupts, a
16 secondary IRQ number is allocated to control masking/unmasking on VPE#1. For
45 and the second is its shadow IRQ used for VPE#1 control. For example,
47 when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit.
/linux/drivers/media/platform/ti/vpe/
H A DMakefile2 obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
7 ti-vpe-y := vpe.o
H A Dvpe_regs.h13 /* VPE register offsets and field selectors */
15 /* VPE top level regs */
121 /* VPE chrominance upsampler regs */
192 /* VPE de-interlacer regs */
/linux/Documentation/admin-guide/media/
H A Dplatform-cardlist.rst69 ti-vpe TI VPE (Video Processing Engine)

123