/linux/drivers/net/ethernet/marvell/mvpp2/ |
H A D | mvpp2_prs.c | 22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) in mvpp2_prs_hw_write() argument 26 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) in mvpp2_prs_hw_write() 30 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; in mvpp2_prs_hw_write() 33 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); in mvpp2_prs_hw_write() 35 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); in mvpp2_prs_hw_write() 38 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); in mvpp2_prs_hw_write() 40 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); in mvpp2_prs_hw_write() 46 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, in mvpp2_prs_init_from_hw() argument 54 memset(pe, 0, sizeof(*pe)); in mvpp2_prs_init_from_hw() 55 pe->index = tid; in mvpp2_prs_init_from_hw() [all …]
|
/linux/arch/powerpc/kernel/ |
H A D | eeh_pe.c | 3 * The file intends to implement PE based on the information from 7 * PE is only meaningful in one PHB domain. 27 * eeh_set_pe_aux_size - Set PE auxiliary data size 28 * @size: PE auxiliary data size in bytes 30 * Set PE auxiliary data size. 41 * eeh_pe_alloc - Allocate PE 43 * @type: PE type 45 * Allocate PE instance dynamically. 49 struct eeh_pe *pe; in eeh_pe_alloc() local 58 /* Allocate PHB PE */ in eeh_pe_alloc() [all …]
|
H A D | eeh_driver.c | 89 if (eeh_pe_passed(edev->pe)) in eeh_edev_actionable() 206 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) in eeh_dev_save_state() 218 struct eeh_pe *pe; in eeh_set_channel_state() local 221 eeh_for_each_pe(root, pe) in eeh_set_channel_state() 222 eeh_pe_for_each_dev(pe, edev, tmp) in eeh_set_channel_state() 229 struct eeh_pe *pe; in eeh_set_irq_state() local 232 eeh_for_each_pe(root, pe) { in eeh_set_irq_state() 233 eeh_pe_for_each_dev(pe, edev, tmp) { in eeh_set_irq_state() 292 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe)); in eeh_pe_report_edev() 303 struct eeh_pe *pe; in eeh_pe_report() local [all …]
|
H A D | eeh.c | 89 * PE would be created there. 95 * EEH allowed maximal frozen times. If one particular PE's 96 * frozen count in last hour exceeds this limit, the PE will 137 u64 slot_resets; /* PE reset */ 176 edev->pe->phb->global_number, edev->bdfn >> 8, in eeh_dump_dev_log() 179 edev->pe->phb->global_number, edev->bdfn >> 8, in eeh_dump_dev_log() 269 static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag) in eeh_dump_pe_log() argument 274 eeh_pe_for_each_dev(pe, edev, tmp) in eeh_dump_pe_log() 283 * @pe: EEH PE 291 void eeh_slot_error_detail(struct eeh_pe *pe, int severity) in eeh_slot_error_detail() argument [all …]
|
H A D | eeh_event.c | 60 /* We might have event without binding PE */ in eeh_event_handler() 61 if (event->pe) in eeh_event_handler() 62 eeh_handle_normal_event(event->pe); in eeh_event_handler() 96 * @pe: EEH PE 102 int __eeh_send_failure_event(struct eeh_pe *pe) in __eeh_send_failure_event() argument 112 event->pe = pe; in __eeh_send_failure_event() 115 * Mark the PE as recovering before inserting it in the queue. in __eeh_send_failure_event() 116 * This prevents the PE from being free()ed by a hotplug driver in __eeh_send_failure_event() 117 * while the PE is sitting in the event queue. in __eeh_send_failure_event() 119 if (pe) { in __eeh_send_failure_event() [all …]
|
/linux/net/netfilter/ipvs/ |
H A D | ip_vs_pe.c | 14 /* IPVS pe list */ 20 /* Get pe in the pe list by name */ 23 struct ip_vs_pe *pe; in __ip_vs_pe_getbyname() local 29 list_for_each_entry_rcu(pe, &ip_vs_pe, n_list) { in __ip_vs_pe_getbyname() 31 if (pe->module && in __ip_vs_pe_getbyname() 32 !try_module_get(pe->module)) { in __ip_vs_pe_getbyname() 33 /* This pe is just deleted */ in __ip_vs_pe_getbyname() 36 if (strcmp(pe_name, pe->name)==0) { in __ip_vs_pe_getbyname() 39 return pe; in __ip_vs_pe_getbyname() 41 module_put(pe->module); in __ip_vs_pe_getbyname() [all …]
|
/linux/arch/powerpc/platforms/powernv/ |
H A D | pci-ioda.c | 51 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); 54 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, in pe_level_printk() argument 66 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk() 67 strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk() 68 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk() 70 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk() 72 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk() 74 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk() 75 (pe->rid & 0xff00) >> 8, in pe_level_printk() 76 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk() [all …]
|
H A D | eeh-powernv.c | 70 struct eeh_pe *pe; in pnv_eeh_ei_write() local 90 /* Retrieve PE */ in pnv_eeh_ei_write() 91 pe = eeh_pe_get(hose, pe_no); in pnv_eeh_ei_write() 92 if (!pe) in pnv_eeh_ei_write() 96 ret = eeh_ops->err_inject(pe, type, func, addr, mask); in pnv_eeh_ei_write() 155 * to clear frozen PE during PCI config access. in pnv_eeh_enable_phbs() 300 /* for VFs we use the PF's PE as the upstream PE */ in pnv_eeh_get_upstream_pe() 305 /* otherwise use the PE of our parent bridge */ in pnv_eeh_get_upstream_pe() 338 if (!edev || edev->pe) in pnv_eeh_probe() 378 /* Create PE */ in pnv_eeh_probe() [all …]
|
H A D | pci.h | 25 #define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */ 26 #define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */ 27 #define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */ 28 #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */ 29 #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */ 30 #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */ 37 * (and PE) that initiated a DMA. In legacy PCI individual memory read/write 47 * bus of the bridge should go into the same PE. 50 /* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */ 53 /* Data associated with a PE, including IOMMU tracking etc.. */ [all …]
|
H A D | pci-sriov.c | 14 * the need to put the MMIO space for each VF into a separate PE. Internally 15 * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table". 39 * segments. The n'th segment is mapped to the n'th PE. 40 * b) An un-segmented BAR that maps the whole address range to a specific PE. 81 * This is where we actually allocate PE numbers for each VF and setup the 85 * ability because the PE space is shared by all devices on the same PHB. 86 * When using mode a) described above segment 0 in maps to PE#0 which might 89 * As a result we need allocate a contigious range of PE numbers, then shift 92 * PE number. This is handled in pnv_pci_vf_resource_shift(). 97 * PE that we allocated for it rather than the PE associated with the bus. [all …]
|
/linux/drivers/iommu/intel/ |
H A D | pasid.h | 89 static inline void pasid_clear_entry(struct pasid_entry *pe) in pasid_clear_entry() argument 91 WRITE_ONCE(pe->val[0], 0); in pasid_clear_entry() 92 WRITE_ONCE(pe->val[1], 0); in pasid_clear_entry() 93 WRITE_ONCE(pe->val[2], 0); in pasid_clear_entry() 94 WRITE_ONCE(pe->val[3], 0); in pasid_clear_entry() 95 WRITE_ONCE(pe->val[4], 0); in pasid_clear_entry() 96 WRITE_ONCE(pe->val[5], 0); in pasid_clear_entry() 97 WRITE_ONCE(pe->val[6], 0); in pasid_clear_entry() 98 WRITE_ONCE(pe->val[7], 0); in pasid_clear_entry() 101 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) in pasid_clear_entry_with_fpd() argument [all …]
|
/linux/arch/powerpc/include/asm/ |
H A D | eeh.h | 34 * Delay for PE reset, all in ms 44 * The struct is used to trace PE related EEH functionality. 46 * be created against particular PE. In nature, PEs correlate 49 * PE has EEH errors. 51 * Also, one particular PE might be composed of PCI device, PCI 53 * the information. Further more, one particular PE is only meaingful 58 #define EEH_PE_PHB (1 << 1) /* PHB PE */ 59 #define EEH_PE_DEVICE (1 << 2) /* Device PE */ 60 #define EEH_PE_BUS (1 << 3) /* Bus PE */ 61 #define EEH_PE_VF (1 << 4) /* VF PE */ [all …]
|
/linux/arch/powerpc/platforms/pseries/ |
H A D | eeh_pseries.c | 74 * parent PE in pseries_eeh_init_edev(). in pseries_pcibios_bus_add_device() 76 struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe; in pseries_pcibios_bus_add_device() 80 eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */ in pseries_pcibios_bus_add_device() 81 eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */ in pseries_pcibios_bus_add_device() 93 * pe_config_addr) as a handle to a given PE. This function finds the 110 * part of a PE or not. ret[0] being zero indicates it's not. in pseries_eeh_get_pe_config_addr() 118 /* Retrieve the associated PE config address with function 0 */ in pseries_eeh_get_pe_config_addr() 123 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", in pseries_eeh_get_pe_config_addr() 136 pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n", in pseries_eeh_get_pe_config_addr() 160 * Reset the specified PHB/PE [all …]
|
/linux/arch/alpha/include/asm/ |
H A D | core_marvel.h | 57 #define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35) argument 59 #define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off)) argument 60 #define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL)) argument 62 #define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off))) argument 63 #define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe))) argument 249 #define IO7_IPE(pe) (EV7_IPE(pe)) argument 252 #define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port)) argument 254 #define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL) argument 255 #define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL) argument 256 #define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL) argument [all …]
|
/linux/drivers/misc/cxl/ |
H A D | trace.h | 20 { CXL_PSL9_DSISR_An_PE, "PE" }, \ 30 { CXL_PSL_DSISR_An_PE, "PE" }, \ 71 __field(u16, pe) 77 __entry->pe = ctx->pe; 80 TP_printk("afu%i.%i pe=%i", 83 __entry->pe 96 __field(u16, pe) 106 __entry->pe = ctx->pe; 113 TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx", 117 __entry->pe, [all …]
|
/linux/tools/perf/arch/x86/tests/ |
H A D | intel-cqm.c | 44 struct perf_event_attr pe; in test__intel_cqm_count_nmi_context() local 72 memset(&pe, 0, sizeof(pe)); in test__intel_cqm_count_nmi_context() 73 pe.size = sizeof(pe); in test__intel_cqm_count_nmi_context() 75 pe.type = PERF_TYPE_HARDWARE; in test__intel_cqm_count_nmi_context() 76 pe.config = PERF_COUNT_HW_CPU_CYCLES; in test__intel_cqm_count_nmi_context() 77 pe.read_format = PERF_FORMAT_GROUP; in test__intel_cqm_count_nmi_context() 79 pe.sample_period = 128; in test__intel_cqm_count_nmi_context() 80 pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ; in test__intel_cqm_count_nmi_context() 84 fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag); in test__intel_cqm_count_nmi_context() 90 memset(&pe, 0, sizeof(pe)); in test__intel_cqm_count_nmi_context() [all …]
|
/linux/tools/perf/tests/ |
H A D | bp_signal_overflow.c | 65 struct perf_event_attr pe; in test__bp_signal_overflow() local 85 memset(&pe, 0, sizeof(struct perf_event_attr)); in test__bp_signal_overflow() 86 pe.type = PERF_TYPE_BREAKPOINT; in test__bp_signal_overflow() 87 pe.size = sizeof(struct perf_event_attr); in test__bp_signal_overflow() 89 pe.config = 0; in test__bp_signal_overflow() 90 pe.bp_type = HW_BREAKPOINT_X; in test__bp_signal_overflow() 91 pe.bp_addr = (unsigned long) test_function; in test__bp_signal_overflow() 92 pe.bp_len = default_breakpoint_len(); in test__bp_signal_overflow() 94 pe.sample_period = THRESHOLD; in test__bp_signal_overflow() 95 pe.sample_type = PERF_SAMPLE_IP; in test__bp_signal_overflow() [all …]
|
H A D | pe-file.c | 3 // pe-file.exe and pe-file.exe.debug built with; 4 // x86_64-w64-mingw32-gcc -o pe-file.exe pe-file.c 7 // --compress-debug-sections pe-file.exe pe-file.exe.debug 9 // --add-gnu-debuglink=pe-file.exe.debug pe-file.exe
|
/linux/Documentation/arch/powerpc/ |
H A D | pci_iov_resource_on_powernv.rst | 22 A Partitionable Endpoint (PE) is a way to group the various resources 28 There is thus, in HW, a table of PE states that contains a pair of "frozen" 30 cleared independently) for each PE. 32 When a PE is frozen, all stores in any direction are dropped and all loads 54 correspondence between a PCIe RID (bus/dev/fn) with a PE number. 57 - For DMA we then provide an entire address space for each PE that can 66 bridge being triggered. There's a PE# in the interrupt controller 67 descriptor table as well which is compared with the PE# obtained from 96 maps each segment to a PE#. That allows portions of the MMIO space 103 can be assigned to a PE. [all …]
|
/linux/tools/testing/selftests/user_events/ |
H A D | perf_test.c | 32 static long perf_event_open(struct perf_event_attr *pe, pid_t pid, in perf_event_open() argument 35 return syscall(__NR_perf_event_open, pe, pid, cpu, group_fd, flags); in perf_event_open() 134 struct perf_event_attr pe = {0}; in TEST_F() local 159 pe.type = PERF_TYPE_TRACEPOINT; in TEST_F() 160 pe.size = sizeof(pe); in TEST_F() 161 pe.config = id; in TEST_F() 162 pe.sample_type = PERF_SAMPLE_RAW; in TEST_F() 163 pe.sample_period = 1; in TEST_F() 164 pe.wakeup_events = 1; in TEST_F() 167 fd = perf_event_open(&pe, 0, -1, -1, 0); in TEST_F() [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_pmu.c | 228 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_start() local 236 if ((!pe->adev->df.funcs) || in amdgpu_perf_start() 237 (!pe->adev->df.funcs->pmc_start)) in amdgpu_perf_start() 247 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, in amdgpu_perf_start() 256 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, in amdgpu_perf_start() 270 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_read() local 275 if ((!pe->adev->df.funcs) || in amdgpu_perf_read() 276 (!pe->adev->df.funcs->pmc_get_count)) in amdgpu_perf_read() 284 pe->adev->df.funcs->pmc_get_count(pe->adev, in amdgpu_perf_read() 300 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_stop() local [all …]
|
/linux/drivers/pinctrl/freescale/ |
H A D | pinctrl-imx27.c | 23 #define PE 4 macro 150 MX27_PAD_USBOTG_NXT = PAD_ID(PE, 0), 151 MX27_PAD_USBOTG_STP = PAD_ID(PE, 1), 152 MX27_PAD_USBOTG_DIR = PAD_ID(PE, 2), 153 MX27_PAD_UART2_CTS = PAD_ID(PE, 3), 154 MX27_PAD_UART2_RTS = PAD_ID(PE, 4), 155 MX27_PAD_PWMO = PAD_ID(PE, 5), 156 MX27_PAD_UART2_TXD = PAD_ID(PE, 6), 157 MX27_PAD_UART2_RXD = PAD_ID(PE, 7), 158 MX27_PAD_UART3_TXD = PAD_ID(PE, 8), [all …]
|
/linux/drivers/misc/ocxl/ |
H A D | link.c | 56 struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */ 71 u64 pe; member 108 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) in read_irq() argument 115 *pe = reg & SPA_PE_MASK; in read_irq() 131 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, in ack_irq() 195 struct ocxl_process_element *pe; in xsl_fault_handler() local 203 pe = spa->spa_mem + pe_handle; in xsl_fault_handler() 204 pid = be32_to_cpu(pe->pid); in xsl_fault_handler() 205 /* We could be reading all null values here if the PE is being in xsl_fault_handler() 223 * AFU about PASID termination before removing the PE, in xsl_fault_handler() [all …]
|
/linux/drivers/gpu/drm/i915/gt/uc/ |
H A D | selftest_guc.c | 70 gt_err(gt, "Failed to create context %d: %pe\n", i, ce); in intel_guc_scrub_ctbs() 91 gt_err(gt, "Failed to create request %d: %pe\n", i, rq); in intel_guc_scrub_ctbs() 101 gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret)); in intel_guc_scrub_ctbs() 118 gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret)); in intel_guc_scrub_ctbs() 171 guc_err(guc, "Failed to create context: %pe\n", ce[context_index]); in intel_guc_steal_guc_ids() 177 guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret)); in intel_guc_steal_guc_ids() 184 guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq); in intel_guc_steal_guc_ids() 189 guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret)); in intel_guc_steal_guc_ids() 198 guc_err(guc, "Failed to create context: %pe\n", ce[context_index]); in intel_guc_steal_guc_ids() 208 guc_err(guc, "Failed to create %srequest %d: %pe\n", in intel_guc_steal_guc_ids() [all …]
|
/linux/drivers/net/wireless/ath/ath9k/ |
H A D | dfs.c | 202 struct pulse_event *pe) in ath9k_postprocess_radar_event() argument 266 pe->width = dur_to_usecs(sc->sc_ah, dur); in ath9k_postprocess_radar_event() 267 pe->rssi = rssi; in ath9k_postprocess_radar_event() 274 ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe) in ath9k_dfs_process_radar_pulse() argument 280 if (!pd->add_pulse(pd, pe, NULL)) in ath9k_dfs_process_radar_pulse() 295 struct pulse_event pe; in ath9k_dfs_process_phyerr() local 331 pe.freq = ah->curchan->channel; in ath9k_dfs_process_phyerr() 332 pe.ts = mactime; in ath9k_dfs_process_phyerr() 333 if (!ath9k_postprocess_radar_event(sc, &ard, &pe)) in ath9k_dfs_process_phyerr() 336 if (pe.width > MIN_CHIRP_PULSE_WIDTH && in ath9k_dfs_process_phyerr() [all …]
|