/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | vpe_v6_1.c | 31 #include "ivsrcid/vpe/irqsrcs_vpe_6_1.h" 32 #include "vpe/vpe_6_1_0_offset.h" 33 #include "vpe/vpe_6_1_0_sh_mask.h" 64 static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset) in vpe_v6_1_get_reg_offset() argument 68 base = vpe->ring.adev->reg_offset[VPE_HWIP][inst][0]; in vpe_v6_1_get_reg_offset() 73 static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt) in vpe_v6_1_halt() argument 75 struct amdgpu_device *adev = vpe->ring.adev; in vpe_v6_1_halt() 78 for (i = 0; i < vpe->num_instances; i++) { in vpe_v6_1_halt() 79 f32_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL)); in vpe_v6_1_halt() 82 WREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL), f32_cntl); in vpe_v6_1_halt() [all …]
|
H A D | vpe_6_1_fw_if.h | 29 * VPE OP Codes 65 * VPE NOP 74 * VPE Descriptor 84 * VPE Plane Config
|
H A D | vpe_v6_1.h | 27 void vpe_v6_1_set_funcs(struct amdgpu_vpe *vpe);
|
/linux/arch/mips/kernel/ |
H A D | vpe-mt.c | 18 #include <asm/vpe.h> 25 /* We are prepared so configure and start the VPE... */ 26 int vpe_run(struct vpe *v) in vpe_run() 33 /* check we are the Master VPE */ in vpe_run() 37 pr_warn("VPE loader: only Master VPE's are able to config MT\n"); in vpe_run() 51 pr_warn("VPE loader: No TC's associated with VPE %d\n", in vpe_run() 71 pr_warn("VPE loader: TC %d is already active!\n", in vpe_run() 95 * We don't pass the memsize here, so VPE programs need to be in vpe_run() 103 * bind the TC to VPE 1 as late as possible so we only have the final in vpe_run() 104 * VPE registers to set up, and so an EJTAG probe can trigger on it in vpe_run() [all …]
|
H A D | vpe.c | 9 * VPE support module for loading a MIPS SP program into VPE1. The SP 37 #include <asm/vpe.h> 53 /* get the vpe associated with this minor */ 54 struct vpe *get_vpe(int minor) in get_vpe() 56 struct vpe *res, *v; in get_vpe() 74 /* get the vpe associated with this minor */ 92 /* allocate a vpe and associate it with this minor (or index) */ 93 struct vpe *alloc_vpe(int minor) in alloc_vpe() 95 struct vpe *v; in alloc_vpe() 97 v = kzalloc(sizeof(struct vpe), GFP_KERNEL); in alloc_vpe() [all …]
|
H A D | smp-mt.c | 53 /* Deactivate all but VPE 0 */ in smvp_vpe_init() 59 /* master VPE */ in smvp_vpe_init() 88 /* bind a TC to each VPE, May as well put all excess TC's in smvp_tc_init() 89 on the last VPE */ in smvp_tc_init() 141 * assumes a 1:1 mapping of TC => VPE 154 /* enable the tc this vpe/cpu will be running */ in vsmp_boot_secondary() 159 /* enable the VPE */ in vsmp_boot_secondary() 210 /* we'll always have more TC's than VPE's, so loop setting everything in vsmp_smp_setup()
|
H A D | smp-cps.c | 216 /* Detect & record VPE topology */ in cps_smp_setup() 219 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); in cps_smp_setup() 254 /* Indicate present CPUs (CPU being synonymous with VPE) */ in cps_smp_setup() 368 /* Allocate VPE boot configuration structs */ in cps_prepare_cpus() 641 /* Boot a VPE on a powered down core */ in cps_boot_secondary() 657 /* Boot a VPE on another powered up core */ in cps_boot_secondary() 679 /* Boot a VPE on this core */ in cps_boot_secondary() 690 /* Disable MT - we only want to run 1 TC per VPE */ in cps_init_secondary() 817 /* Look for another online VPE within the core */ in play_dead() 823 * There is an online VPE within the core. Just halt in play_dead() [all …]
|
H A D | rtlx-mt.c | 18 #include <asm/vpe.h> 74 pr_warn("VPE loader: not a MIPS MT capable processor\n"); in rtlx_module_init()
|
H A D | rtlx.c | 22 #include <asm/vpe.h> 68 void rtlx_starting(int vpe) in rtlx_starting() argument 81 void rtlx_stopping(int vpe) in rtlx_stopping() argument
|
H A D | Makefile | 65 obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 66 obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
|
/linux/drivers/irqchip/ |
H A D | irq-gic-v4.c | 55 * with the actual vPE affinity, and not the braindead concept of 65 * The choice made here is that each vcpu (VPE in old northern GICv4 68 * interrupt becomes a handle for the VPE, and that the hypervisor 72 * contain an irq domain where each interrupt maps to a VPE. In 78 * - irq_set_affinity is used to move a VPE from one redistributor to 82 * creating a new sub-API, namely scheduling/descheduling a VPE 119 static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) in its_alloc_vcpu_sgis() argument 131 vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx); in its_alloc_vcpu_sgis() 132 if (!vpe->fwnode) in its_alloc_vcpu_sgis() 138 vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16, in its_alloc_vcpu_sgis() [all …]
|
H A D | irq-gic-v3-its.c | 359 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) in vpe_to_cpuid_lock() argument 361 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); in vpe_to_cpuid_lock() 362 return vpe->col_idx; in vpe_to_cpuid_lock() 365 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) in vpe_to_cpuid_unlock() argument 367 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in vpe_to_cpuid_unlock() 374 struct its_vpe *vpe = NULL; in irq_to_cpuid_lock() local 378 vpe = irq_data_get_irq_chip_data(d); in irq_to_cpuid_lock() 382 vpe = map->vpe; in irq_to_cpuid_lock() 385 if (vpe) { in irq_to_cpuid_lock() 386 cpu = vpe_to_cpuid_lock(vpe, flags); in irq_to_cpuid_lock() [all …]
|
H A D | irq-econet-en751221.c | 7 * be routed to either VPE but not both, so to support per-CPU interrupts, a 8 * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In 18 * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29, 19 * telling the hardware to mask VPE#1's view of IRQ 30. 79 * irq being manipulated by a thread running on VPE#1. in econet_chmask() 80 * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. in econet_chmask()
|
/linux/include/linux/irqchip/ |
H A D | arm-gic-v4.h | 43 /* per-vPE VLPI tracking */ 48 /* VPE resident */ 55 /* VPE proxy mapping */ 72 /* Track the VPE being mapped */ 77 * vPE and vLPI operations using vpe->col_idx. 82 * redistributor for this VPE. The ID itself isn't involved in 86 /* Unique (system-wide) VPE identifier */ 99 * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE) 102 * @db_enabled: Is the VPE doorbell to be generated? 106 struct its_vpe *vpe; member [all …]
|
/linux/Documentation/devicetree/bindings/media/ |
H A D | ti,vpe.yaml | 4 $id: http://devicetree.org/schemas/media/ti,vpe.yaml# 7 title: Texas Instruments DRA7x Video Processing Engine (VPE) 13 The Video Processing Engine (VPE) is a key component for image post 14 processing applications. VPE consist of a single memory to memory 20 const: ti,dra7-vpe 24 - description: The VPE main register region 51 vpe: vpe@489d0000 { 52 compatible = "ti,dra7-vpe";
|
/linux/arch/arm64/kvm/vgic/ |
H A D | vgic-v4.c | 94 * The v4.1 doorbell can fire concurrently with the vPE being in vgic_v4_doorbell_handler() 108 static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq) in vgic_v4_sync_sgi_config() argument 110 vpe->sgi_config[irq->intid].enabled = irq->enabled; in vgic_v4_sync_sgi_config() 111 vpe->sgi_config[irq->intid].group = irq->group; in vgic_v4_sync_sgi_config() 112 vpe->sgi_config[irq->intid].priority = irq->priority; in vgic_v4_sync_sgi_config() 117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; in vgic_v4_enable_vsgis() local 137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i); in vgic_v4_enable_vsgis() 139 /* Transfer the full irq state to the vPE */ in vgic_v4_enable_vsgis() 140 vgic_v4_sync_sgi_config(vpe, irq); in vgic_v4_enable_vsgis() 208 * Must be called with GICv4.1 and the vPE unmapped, which [all …]
|
/linux/Documentation/devicetree/bindings/timer/ |
H A D | econet,en751221-timer.yaml | 14 EcoNet SoCs, including the EN751221 and EN751627 families. It provides per-VPE 53 - description: VPE timers 0 and 1 54 - description: VPE timers 2 and 3 59 - description: VPE timers 0 and 1
|
/linux/Documentation/devicetree/bindings/interrupt-controller/ |
H A D | econet,en751221-intc.yaml | 15 be routed to either VPE but not both, so to support per-CPU interrupts, a 16 secondary IRQ number is allocated to control masking/unmasking on VPE#1. For 45 and the second is its shadow IRQ used for VPE#1 control. For example, 47 when VPE#1 requests IRQ 8, it will manipulate the IRQ 3 mask bit.
|
/linux/drivers/media/platform/ti/vpe/ |
H A D | Makefile | 2 obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o 7 ti-vpe-y := vpe.o
|
H A D | vpe_regs.h | 13 /* VPE register offsets and field selectors */ 15 /* VPE top level regs */ 121 /* VPE chrominance upsampler regs */ 192 /* VPE de-interlacer regs */
|
/linux/arch/mips/include/asm/ |
H A D | rtlx.h | 30 void rtlx_starting(int vpe); 31 void rtlx_stopping(int vpe);
|
H A D | mipsmtregs.h | 103 /* VPEControl fields (per VPE) */ 124 /* VPEConf0 fields (per VPE) */ 132 /* VPEConf1 fields (per VPE) */
|
/linux/Documentation/admin-guide/media/ |
H A D | platform-cardlist.rst | 69 ti-vpe TI VPE (Video Processing Engine)
|
/linux/drivers/media/platform/ti/ |
H A D | Makefile | 4 obj-y += vpe/
|
/linux/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_virtchnl.c | 85 struct virtchnl_pf_event *vpe = in iavf_poll_virtchnl_msg() local 88 if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING) in iavf_poll_virtchnl_msg() 1649 * @vpe: virtchnl_pf_event structure 1655 struct virtchnl_pf_event *vpe) in iavf_get_vpe_link_status() argument 1658 return vpe->event_data.link_event_adv.link_status; in iavf_get_vpe_link_status() 1660 return vpe->event_data.link_event.link_status; in iavf_get_vpe_link_status() 1666 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting 1672 struct virtchnl_pf_event *vpe) in iavf_set_adapter_link_speed_from_vpe() argument 1676 vpe->event_data.link_event_adv.link_speed; in iavf_set_adapter_link_speed_from_vpe() 1678 adapter->link_speed = vpe->event_data.link_event.link_speed; in iavf_set_adapter_link_speed_from_vpe() [all …]
|