Home
last modified time | relevance | path

Searched full:pe (Results 1 – 25 of 520) sorted by relevance

12345678910>>...21

/linux/drivers/net/ethernet/marvell/mvpp2/
H A Dmvpp2_prs.c22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) in mvpp2_prs_hw_write() argument
28 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) in mvpp2_prs_hw_write()
32 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; in mvpp2_prs_hw_write()
35 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); in mvpp2_prs_hw_write()
37 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); in mvpp2_prs_hw_write()
40 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); in mvpp2_prs_hw_write()
42 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); in mvpp2_prs_hw_write()
49 struct mvpp2_prs_entry *pe, int tid) in __mvpp2_prs_init_from_hw() argument
58 memset(pe, 0, sizeof(*pe)); in __mvpp2_prs_init_from_hw()
59 pe->index = tid; in __mvpp2_prs_init_from_hw()
[all …]
H A Dmvpp2_debugfs.c248 struct mvpp2_prs_entry pe; in mvpp2_dbgfs_port_vid_show() local
255 mvpp2_prs_init_from_hw(priv, &pe, tid); in mvpp2_dbgfs_port_vid_show()
257 pmap = mvpp2_prs_tcam_port_map_get(&pe); in mvpp2_dbgfs_port_vid_show()
265 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); in mvpp2_dbgfs_port_vid_show()
266 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); in mvpp2_dbgfs_port_vid_show()
282 struct mvpp2_prs_entry pe; in mvpp2_dbgfs_port_parser_show() local
287 mvpp2_prs_init_from_hw(port->priv, &pe, i); in mvpp2_dbgfs_port_parser_show()
289 pmap = mvpp2_prs_tcam_port_map_get(&pe); in mvpp2_dbgfs_port_parser_show()
303 struct mvpp2_prs_entry pe; in mvpp2_dbgfs_filter_show() local
316 mvpp2_prs_init_from_hw(priv, &pe, tid); in mvpp2_dbgfs_filter_show()
[all …]
/linux/arch/powerpc/kernel/
H A Deeh_pe.c3 * The file intends to implement PE based on the information from
7 * PE is only meaningful in one PHB domain.
27 * eeh_set_pe_aux_size - Set PE auxiliary data size
28 * @size: PE auxiliary data size in bytes
30 * Set PE auxiliary data size.
41 * eeh_pe_alloc - Allocate PE
43 * @type: PE type
45 * Allocate PE instance dynamically.
49 struct eeh_pe *pe; in eeh_pe_alloc() local
58 /* Allocate PHB PE */ in eeh_pe_alloc()
[all …]
H A Deeh_driver.c89 if (eeh_pe_passed(edev->pe)) in eeh_edev_actionable()
206 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) in eeh_dev_save_state()
218 struct eeh_pe *pe; in eeh_set_channel_state() local
221 eeh_for_each_pe(root, pe) in eeh_set_channel_state()
222 eeh_pe_for_each_dev(pe, edev, tmp) in eeh_set_channel_state()
229 struct eeh_pe *pe; in eeh_set_irq_state() local
232 eeh_for_each_pe(root, pe) { in eeh_set_irq_state()
233 eeh_pe_for_each_dev(pe, edev, tmp) { in eeh_set_irq_state()
291 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe)); in eeh_pe_report_edev()
302 struct eeh_pe *pe; in eeh_pe_report() local
[all …]
H A Deeh.c89 * PE would be created there.
95 * EEH allowed maximal frozen times. If one particular PE's
96 * frozen count in last hour exceeds this limit, the PE will
137 u64 slot_resets; /* PE reset */
176 edev->pe->phb->global_number, edev->bdfn >> 8, in eeh_dump_dev_log()
179 edev->pe->phb->global_number, edev->bdfn >> 8, in eeh_dump_dev_log()
269 static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag) in eeh_dump_pe_log() argument
274 eeh_pe_for_each_dev(pe, edev, tmp) in eeh_dump_pe_log()
283 * @pe: EEH PE
291 void eeh_slot_error_detail(struct eeh_pe *pe, int severity) in eeh_slot_error_detail() argument
[all …]
H A Deeh_event.c60 /* We might have event without binding PE */ in eeh_event_handler()
61 if (event->pe) in eeh_event_handler()
62 eeh_handle_normal_event(event->pe); in eeh_event_handler()
96 * @pe: EEH PE
102 int __eeh_send_failure_event(struct eeh_pe *pe) in __eeh_send_failure_event() argument
112 event->pe = pe; in __eeh_send_failure_event()
115 * Mark the PE as recovering before inserting it in the queue. in __eeh_send_failure_event()
116 * This prevents the PE from being free()ed by a hotplug driver in __eeh_send_failure_event()
117 * while the PE is sitting in the event queue. in __eeh_send_failure_event()
119 if (pe) { in __eeh_send_failure_event()
[all …]
/linux/arch/powerpc/platforms/powernv/
H A Dpci-ioda.c49 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
52 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, in pe_level_printk() argument
64 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk()
65 strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk()
66 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk()
68 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk()
70 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk()
72 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk()
73 (pe->rid & 0xff00) >> 8, in pe_level_printk()
74 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk()
[all …]
H A Deeh-powernv.c70 struct eeh_pe *pe; in pnv_eeh_ei_write() local
90 /* Retrieve PE */ in pnv_eeh_ei_write()
91 pe = eeh_pe_get(hose, pe_no); in pnv_eeh_ei_write()
92 if (!pe) in pnv_eeh_ei_write()
96 ret = eeh_ops->err_inject(pe, type, func, addr, mask); in pnv_eeh_ei_write()
155 * to clear frozen PE during PCI config access. in pnv_eeh_enable_phbs()
300 /* for VFs we use the PF's PE as the upstream PE */ in pnv_eeh_get_upstream_pe()
305 /* otherwise use the PE of our parent bridge */ in pnv_eeh_get_upstream_pe()
338 if (!edev || edev->pe) in pnv_eeh_probe()
378 /* Create PE */ in pnv_eeh_probe()
[all …]
H A Dpci-sriov.c14 * the need to put the MMIO space for each VF into a separate PE. Internally
15 * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table".
39 * segments. The n'th segment is mapped to the n'th PE.
40 * b) An un-segmented BAR that maps the whole address range to a specific PE.
81 * This is where we actually allocate PE numbers for each VF and setup the
85 * ability because the PE space is shared by all devices on the same PHB.
86 * When using mode a) described above segment 0 in maps to PE#0 which might
89 * As a result we need allocate a contigious range of PE numbers, then shift
92 * PE number. This is handled in pnv_pci_vf_resource_shift().
97 * PE that we allocated for it rather than the PE associated with the bus.
[all …]
/linux/net/netfilter/ipvs/
H A Dip_vs_pe.c14 /* IPVS pe list */
20 /* Get pe in the pe list by name */
23 struct ip_vs_pe *pe; in __ip_vs_pe_getbyname() local
29 list_for_each_entry_rcu(pe, &ip_vs_pe, n_list) { in __ip_vs_pe_getbyname()
31 if (pe->module && in __ip_vs_pe_getbyname()
32 !try_module_get(pe->module)) { in __ip_vs_pe_getbyname()
33 /* This pe is just deleted */ in __ip_vs_pe_getbyname()
36 if (strcmp(pe_name, pe->name)==0) { in __ip_vs_pe_getbyname()
39 return pe; in __ip_vs_pe_getbyname()
41 module_put(pe->module); in __ip_vs_pe_getbyname()
[all …]
/linux/drivers/iommu/intel/
H A Dpasid.h87 static inline void pasid_clear_entry(struct pasid_entry *pe) in pasid_clear_entry() argument
89 WRITE_ONCE(pe->val[0], 0); in pasid_clear_entry()
90 WRITE_ONCE(pe->val[1], 0); in pasid_clear_entry()
91 WRITE_ONCE(pe->val[2], 0); in pasid_clear_entry()
92 WRITE_ONCE(pe->val[3], 0); in pasid_clear_entry()
93 WRITE_ONCE(pe->val[4], 0); in pasid_clear_entry()
94 WRITE_ONCE(pe->val[5], 0); in pasid_clear_entry()
95 WRITE_ONCE(pe->val[6], 0); in pasid_clear_entry()
96 WRITE_ONCE(pe->val[7], 0); in pasid_clear_entry()
99 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) in pasid_clear_entry_with_fpd() argument
[all …]
/linux/arch/powerpc/include/asm/
H A Deeh.h34 * Delay for PE reset, all in ms
44 * The struct is used to trace PE related EEH functionality.
46 * be created against particular PE. In nature, PEs correlate
49 * PE has EEH errors.
51 * Also, one particular PE might be composed of PCI device, PCI
53 * the information. Further more, one particular PE is only meaingful
58 #define EEH_PE_PHB (1 << 1) /* PHB PE */
59 #define EEH_PE_DEVICE (1 << 2) /* Device PE */
60 #define EEH_PE_BUS (1 << 3) /* Bus PE */
61 #define EEH_PE_VF (1 << 4) /* VF PE */
[all …]
H A Dppc-pci.h56 void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
57 int eeh_pci_enable(struct eeh_pe *pe, int function);
58 int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed);
60 void eeh_pe_state_mark(struct eeh_pe *pe, int state);
61 void eeh_pe_mark_isolated(struct eeh_pe *pe);
62 void eeh_pe_state_clear(struct eeh_pe *pe, int state, bool include_passed);
63 void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
64 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
/linux/arch/alpha/include/asm/
H A Dcore_marvel.h57 #define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35) argument
59 #define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off)) argument
60 #define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL)) argument
62 #define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off))) argument
63 #define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe))) argument
249 #define IO7_IPE(pe) (EV7_IPE(pe)) argument
252 #define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port)) argument
254 #define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL) argument
255 #define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL) argument
256 #define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL) argument
[all …]
/linux/Documentation/arch/powerpc/
H A Dpci_iov_resource_on_powernv.rst22 A Partitionable Endpoint (PE) is a way to group the various resources
28 There is thus, in HW, a table of PE states that contains a pair of "frozen"
30 cleared independently) for each PE.
32 When a PE is frozen, all stores in any direction are dropped and all loads
54 correspondence between a PCIe RID (bus/dev/fn) with a PE number.
57 - For DMA we then provide an entire address space for each PE that can
66 bridge being triggered. There's a PE# in the interrupt controller
67 descriptor table as well which is compared with the PE# obtained from
96 maps each segment to a PE#. That allows portions of the MMIO space
103 can be assigned to a PE.
[all …]
/linux/tools/perf/tests/
H A Dbp_signal_overflow.c65 struct perf_event_attr pe; in test__bp_signal_overflow() local
85 memset(&pe, 0, sizeof(struct perf_event_attr)); in test__bp_signal_overflow()
86 pe.type = PERF_TYPE_BREAKPOINT; in test__bp_signal_overflow()
87 pe.size = sizeof(struct perf_event_attr); in test__bp_signal_overflow()
89 pe.config = 0; in test__bp_signal_overflow()
90 pe.bp_type = HW_BREAKPOINT_X; in test__bp_signal_overflow()
91 pe.bp_addr = (unsigned long) test_function; in test__bp_signal_overflow()
92 pe.bp_len = default_breakpoint_len(); in test__bp_signal_overflow()
94 pe.sample_period = THRESHOLD; in test__bp_signal_overflow()
95 pe.sample_type = PERF_SAMPLE_IP; in test__bp_signal_overflow()
[all …]
H A Dpe-file.c3 // pe-file.exe and pe-file.exe.debug built with;
4 // x86_64-w64-mingw32-gcc -o pe-file.exe pe-file.c
7 // --compress-debug-sections pe-file.exe pe-file.exe.debug
9 // --add-gnu-debuglink=pe-file.exe.debug pe-file.exe
H A Dbp_signal.c105 struct perf_event_attr pe; in __event() local
108 memset(&pe, 0, sizeof(struct perf_event_attr)); in __event()
109 pe.type = PERF_TYPE_BREAKPOINT; in __event()
110 pe.size = sizeof(struct perf_event_attr); in __event()
112 pe.config = 0; in __event()
113 pe.bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W; in __event()
114 pe.bp_addr = (unsigned long) addr; in __event()
115 pe.bp_len = is_x ? default_breakpoint_len() : sizeof(long); in __event()
117 pe.sample_period = 1; in __event()
118 pe.sample_type = PERF_SAMPLE_IP; in __event()
[all …]
/linux/tools/testing/selftests/user_events/
H A Dperf_test.c32 static long perf_event_open(struct perf_event_attr *pe, pid_t pid, in perf_event_open() argument
35 return syscall(__NR_perf_event_open, pe, pid, cpu, group_fd, flags); in perf_event_open()
134 struct perf_event_attr pe = {0}; in TEST_F() local
159 pe.type = PERF_TYPE_TRACEPOINT; in TEST_F()
160 pe.size = sizeof(pe); in TEST_F()
161 pe.config = id; in TEST_F()
162 pe.sample_type = PERF_SAMPLE_RAW; in TEST_F()
163 pe.sample_period = 1; in TEST_F()
164 pe.wakeup_events = 1; in TEST_F()
167 fd = perf_event_open(&pe, 0, -1, -1, 0); in TEST_F()
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_pmu.c228 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_start() local
236 if ((!pe->adev->df.funcs) || in amdgpu_perf_start()
237 (!pe->adev->df.funcs->pmc_start)) in amdgpu_perf_start()
247 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, in amdgpu_perf_start()
256 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, in amdgpu_perf_start()
270 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_read() local
275 if ((!pe->adev->df.funcs) || in amdgpu_perf_read()
276 (!pe->adev->df.funcs->pmc_get_count)) in amdgpu_perf_read()
284 pe->adev->df.funcs->pmc_get_count(pe->adev, in amdgpu_perf_read()
300 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_stop() local
[all …]
/linux/drivers/misc/ocxl/
H A Dlink.c56 struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
71 u64 pe; member
108 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) in read_irq() argument
115 *pe = reg & SPA_PE_MASK; in read_irq()
131 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, in ack_irq()
195 struct ocxl_process_element *pe; in xsl_fault_handler() local
203 pe = spa->spa_mem + pe_handle; in xsl_fault_handler()
204 pid = be32_to_cpu(pe->pid); in xsl_fault_handler()
205 /* We could be reading all null values here if the PE is being in xsl_fault_handler()
223 * AFU about PASID termination before removing the PE, in xsl_fault_handler()
[all …]
/linux/drivers/gpu/drm/i915/gt/uc/
H A Dselftest_guc.c70 gt_err(gt, "Failed to create context %d: %pe\n", i, ce); in intel_guc_scrub_ctbs()
91 gt_err(gt, "Failed to create request %d: %pe\n", i, rq); in intel_guc_scrub_ctbs()
101 gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret)); in intel_guc_scrub_ctbs()
118 gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret)); in intel_guc_scrub_ctbs()
171 guc_err(guc, "Failed to create context: %pe\n", ce[context_index]); in intel_guc_steal_guc_ids()
177 guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret)); in intel_guc_steal_guc_ids()
184 guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq); in intel_guc_steal_guc_ids()
189 guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret)); in intel_guc_steal_guc_ids()
198 guc_err(guc, "Failed to create context: %pe\n", ce[context_index]); in intel_guc_steal_guc_ids()
208 guc_err(guc, "Failed to create %srequest %d: %pe\n", in intel_guc_steal_guc_ids()
[all …]
/linux/lib/crypto/
H A Ddes.c622 static unsigned long des_ekey(u32 *pe, const u8 *k) in des_ekey() argument
633 pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; in des_ekey()
634 pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; in des_ekey()
635 pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; in des_ekey()
636 pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; in des_ekey()
637 pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; in des_ekey()
638 pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; in des_ekey()
639 pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; in des_ekey()
640 pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; in des_ekey()
641 pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; in des_ekey()
[all …]
/linux/arch/alpha/kernel/
H A Dcore_marvel.c56 read_ev7_csr(int pe, unsigned long offset) in read_ev7_csr() argument
58 ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); in read_ev7_csr()
69 write_ev7_csr(int pe, unsigned long offset, unsigned long q) in write_ev7_csr() argument
71 ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); in write_ev7_csr()
79 mk_resource_name(int pe, int port, char *str) in mk_resource_name() argument
85 sz = scnprintf(tmp, sizeof(tmp), "PCI %s PE %d PORT %d", str, pe, port); in mk_resource_name()
100 marvel_find_io7(int pe) in marvel_find_io7() argument
104 for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next) in marvel_find_io7()
111 alloc_io7(unsigned int pe) in alloc_io7() argument
117 if (marvel_find_io7(pe)) { in alloc_io7()
[all …]
/linux/drivers/gpu/drm/radeon/
H A Dsi_dma.c62 * @pe: addr of the page entry
70 uint64_t pe, uint64_t src, in si_dma_vm_copy_pages() argument
80 ib->ptr[ib->length_dw++] = lower_32_bits(pe); in si_dma_vm_copy_pages()
82 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in si_dma_vm_copy_pages()
85 pe += bytes; in si_dma_vm_copy_pages()
96 * @pe: addr of the page entry
97 * @addr: dst addr to write into pe
106 uint64_t pe, in si_dma_vm_write_pages() argument
120 ib->ptr[ib->length_dw++] = pe; in si_dma_vm_write_pages()
121 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in si_dma_vm_write_pages()
[all …]

12345678910>>...21