/linux/arch/nios2/mm/ |
H A D | tlb.c | 47 unsigned int way; in replace_tlb_one_pid() local 50 /* remember pid/way until we return. */ in replace_tlb_one_pid() 55 for (way = 0; way < cpuinfo.tlb_num_ways; way++) { in replace_tlb_one_pid() 60 tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); in replace_tlb_one_pid() 73 (way << TLBMISC_WAY_SHIFT); in replace_tlb_one_pid() 126 unsigned int way; in flush_tlb_one() local 131 /* remember pid/way until we return. */ in flush_tlb_one() 136 for (way = 0; way < cpuinfo.tlb_num_ways; way++) { in flush_tlb_one() 140 tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); in flush_tlb_one() 147 pr_debug("Flush entry by writing way=%dl pid=%ld\n", in flush_tlb_one() [all …]
|
/linux/arch/x86/kernel/cpu/ |
H A D | cacheinfo.c | 60 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 61 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 62 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ 63 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 64 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 65 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ 66 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ 67 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ 68 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 69 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ [all …]
|
H A D | intel.c | 656 * One has 256kb of cache, the other 512. We have no way in intel_size_cache() 664 * Intel Quark SoC X1000 contains a 4-way set associative in intel_size_cache() 695 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 697 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 698 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, 699 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, 700 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, 706 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, 707 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, 709 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, [all …]
|
/linux/arch/sh/mm/ |
H A D | cache-sh2a.c | 26 static void sh2a_flush_oc_line(unsigned long v, int way) in sh2a_flush_oc_line() argument 28 unsigned long addr = (v & 0x000007f0) | (way << 11); in sh2a_flush_oc_line() 76 int way; in sh2a__flush_wback_region() local 77 for (way = 0; way < nr_ways; way++) { in sh2a__flush_wback_region() 79 sh2a_flush_oc_line(v, way); in sh2a__flush_wback_region() 106 int way; in sh2a__flush_purge_region() local 108 for (way = 0; way < nr_ways; way++) in sh2a__flush_purge_region() 109 sh2a_flush_oc_line(v, way); in sh2a__flush_purge_region()
|
H A D | cache-sh2.c | 28 int way; in sh2__flush_wback_region() local 29 for (way = 0; way < 4; way++) { in sh2__flush_wback_region() 30 unsigned long data = __raw_readl(addr | (way << 12)); in sh2__flush_wback_region() 33 __raw_writel(data, addr | (way << 12)); in sh2__flush_wback_region()
|
H A D | cache-debugfs.c | 29 unsigned int waysize, way; in cache_debugfs_show() local 66 for (way = 0; way < cache->ways; way++) { in cache_debugfs_show() 71 seq_printf(file, "Way %d\n", way); in cache_debugfs_show()
|
/linux/arch/xtensa/include/asm/ |
H A D | tlbflush.h | 130 static inline void write_dtlb_entry (pte_t entry, int way) in write_dtlb_entry() argument 133 : : "r" (way), "r" (entry) ); in write_dtlb_entry() 136 static inline void write_itlb_entry (pte_t entry, int way) in write_itlb_entry() argument 139 : : "r" (way), "r" (entry) ); in write_itlb_entry() 176 static inline unsigned long read_dtlb_virtual (int way) in read_dtlb_virtual() argument 179 __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); in read_dtlb_virtual() 183 static inline unsigned long read_dtlb_translation (int way) in read_dtlb_translation() argument 186 __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way)); in read_dtlb_translation() 190 static inline unsigned long read_itlb_virtual (int way) in read_itlb_virtual() argument 193 __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way)); in read_itlb_virtual() [all …]
|
/linux/arch/arm/mm/ |
H A D | cache-xsc3l2.c | 44 int set, way; in xsc3_l2_inv_all() local 49 for (way = 0; way < CACHE_WAY_PER_SET; way++) { in xsc3_l2_inv_all() 50 set_way = (way << 29) | (set << 5); in xsc3_l2_inv_all() 149 * optimize L2 flush all operation by set/way format 154 int set, way; in xsc3_l2_flush_all() local 159 for (way = 0; way < CACHE_WAY_PER_SET; way++) { in xsc3_l2_flush_all() 160 set_way = (way << 29) | (set << 5); in xsc3_l2_flush_all()
|
H A D | cache-v7m.S | 51 * dcisw: Invalidate data cache by set/way 58 * dccisw: Clean and invalidate data cache by set/way 201 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 202 clz r5, r4 @ find bit position of way size increment 209 orr r11, r10, r6 @ factor way and cache number into r11 212 dccisw r11, r6 @ clean/invalidate by set/way 215 subs r4, r4, #1 @ decrement the way 234 * working outwards from L1 cache. This is done using Set/Way based cache
|
/linux/scripts/tracing/ |
H A D | ftrace-bisect.sh | 25 # The old (slow) way, for kernels before v5.1. 27 # [old-way] # cat available_filter_functions > ~/full-file 29 # [old-way] *** Note *** this process will take several minutes to update the 30 # [old-way] filters. Setting multiple functions is an O(n^2) operation, and we 31 # [old-way] are dealing with thousands of functions. So go have coffee, talk 32 # [old-way] with your coworkers, read facebook. And eventually, this operation 33 # [old-way] will end. 35 # The new way (using numbers) is an O(n) operation, and usually takes less than a second. 45 # For either the new or old way, the rest of the operations remain the same. 78 # The way to figure out if the problem function is bad, just do:
|
/linux/arch/x86/crypto/ |
H A D | twofish_glue_3way.c | 3 * Glue Code for 3-way parallel assembler optimized version of Twofish 80 .base.cra_driver_name = "ecb-twofish-3way", 92 .base.cra_driver_name = "cbc-twofish-3way", 116 * On Atom, twofish-3way is slower than original assembler in is_blacklisted_cpu() 117 * implementation. Twofish-3way trades off some performance in in is_blacklisted_cpu() 129 * On Pentium 4, twofish-3way is slower than original assembler in is_blacklisted_cpu() 148 "twofish-x86_64-3way: performance on this CPU " in twofish_3way_init() 150 "twofish-x86_64-3way.\n"); in twofish_3way_init() 167 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
|
/linux/arch/openrisc/include/asm/ |
H A D | spr_defs.h | 72 #define SPR_DTLBMR_BASE(WAY) (SPRGROUP_DMMU + 0x200 + (WAY) * 0x100) argument 73 #define SPR_DTLBMR_LAST(WAY) (SPRGROUP_DMMU + 0x27f + (WAY) * 0x100) argument 74 #define SPR_DTLBTR_BASE(WAY) (SPRGROUP_DMMU + 0x280 + (WAY) * 0x100) argument 75 #define SPR_DTLBTR_LAST(WAY) (SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100) argument 80 #define SPR_ITLBMR_BASE(WAY) (SPRGROUP_IMMU + 0x200 + (WAY) * 0x100) argument 81 #define SPR_ITLBMR_LAST(WAY) (SPRGROUP_IMMU + 0x27f + (WAY) * 0x100) argument 82 #define SPR_ITLBTR_BASE(WAY) (SPRGROUP_IMMU + 0x280 + (WAY) * 0x100) argument 83 #define SPR_ITLBTR_LAST(WAY) (SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100) argument 92 #define SPR_DCR_BASE(WAY) (SPRGROUP_DC + 0x200 + (WAY) * 0x200) argument 93 #define SPR_DCR_LAST(WAY) (SPRGROUP_DC + 0x3ff + (WAY) * 0x200) argument [all …]
|
/linux/arch/arm64/boot/dts/broadcom/ |
H A D | bcm2712.dtsi | 61 d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set 64 i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set 71 cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set 85 d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set 88 i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set 95 cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set 109 d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set 112 i-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set 119 cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set 133 d-cache-sets = <256>; // 64KiB(size)/64(line-size)=1024ways/4-way set [all …]
|
/linux/arch/arm/boot/dts/broadcom/ |
H A D | bcm2836.dtsi | 58 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 61 i-cache-sets = <512>; // 32KiB(size)/32(line-size)=1024ways/2-way set 72 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 75 i-cache-sets = <512>; // 32KiB(size)/32(line-size)=1024ways/2-way set 86 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 89 i-cache-sets = <512>; // 32KiB(size)/32(line-size)=1024ways/2-way set 100 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 103 i-cache-sets = <512>; // 32KiB(size)/32(line-size)=1024ways/2-way set 118 cache-sets = <1024>; // 512KiB(size)/64(line-size)=8192ways/8-way set
|
H A D | bcm2837.dtsi | 57 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 60 i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set 72 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 75 i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set 87 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 90 i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set 102 d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set 105 i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set 120 cache-sets = <512>; // 512KiB(size)/64(line-size)=8192ways/16-way set
|
/linux/arch/arc/mm/ |
H A D | tlb.c | 175 * Flush the entire MM for userland. The fastest way is to move to Next ASID 204 * -Here the fastest way (if range is too large) is to move to next ASID 682 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) argument 709 int is_valid, way; in do_tlb_overlap_fault() local 713 for (way = 0, is_valid = 0; way < n_ways; way++) { in do_tlb_overlap_fault() 715 SET_WAY_TO_IDX(mmu, set, way)); in do_tlb_overlap_fault() 717 pd0[way] = read_aux_reg(ARC_REG_TLBPD0); in do_tlb_overlap_fault() 718 is_valid |= pd0[way] & _PAGE_PRESENT; in do_tlb_overlap_fault() 719 pd0[way] &= PAGE_MASK; in do_tlb_overlap_fault() 727 for (way = 0; way < n_ways - 1; way++) { in do_tlb_overlap_fault() [all …]
|
/linux/Documentation/arch/x86/ |
H A D | entry_64.rst | 28 either way. 36 magically-generated functions that make their way to common_interrupt() 64 Now, there's a secondary complication: there's a cheap way to test 65 which mode the CPU is in and an expensive way. 67 The cheap way is to pick this info off the entry frame on the kernel 75 The expensive (paranoid) way is to read back the MSR_GS_BASE value 96 stack but before we executed SWAPGS, then the only safe way to check
|
/linux/arch/mips/include/asm/octeon/ |
H A D | cvmx-l2c.h | 183 * Return the L2 Cache way partitioning for a given core. 199 * a way, while a 1 bit blocks the core from evicting any 200 * lines from that way. There must be at least one allowed 201 * way (0 bit) in the mask. 212 * Return the L2 Cache way partitioning for the hw blocks. 214 * Returns The mask specifying the reserved way. 0 bits in mask indicates 225 * a way, while a 1 bit blocks the core from evicting any 226 * lines from that way. There must be at least one allowed 227 * way (0 bit) in the mask. 295 * @index: Which way to read from. [all …]
|
/linux/arch/mips/kernel/ |
H A D | bmips_5xxx_init.S | 126 * Determine sets per way: IS 128 * This field contains the number of sets (i.e., indices) per way of 137 /* sets per way = (64<<IS) */ 164 /* v0 now have sets per way, multiply it by line size now 174 * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: 175 * 4-way, v) 0x4 - 0x7: Reserved. 219 * Determine sets per way: IS 221 * This field contains the number of sets (i.e., indices) per way of 230 /* sets per way = (64<<IS) */ 256 /* v0 now have sets per way, multiply it by line size now [all …]
|
/linux/arch/powerpc/mm/nohash/ |
H A D | tlb_low.S | 77 oris r7,r6,0x8000 /* specify way explicitly */ 104 /* 476 variant. There's not simple way to do this, hopefully we'll 112 lis r7,0x8000 /* Specify way explicitly */ 117 li r4,0 /* Current way */ 119 andi. r0,r8,1 /* Check if way 0 is bolted */ 120 mtctr r9 /* Load way counter */ 123 2: /* For each way */ 124 or r5,r3,r4 /* Make way|index for tlbre */ 127 3: addis r4,r4,0x2000 /* Next way */ 130 rlwimi r7,r5,0,1,2 /* Insert way number */ [all …]
|
/linux/arch/mips/mm/ |
H A D | cerr-sb1.c | 321 unsigned short way; in extract_ic() local 330 for (way = 0; way < 4; way++) { in extract_ic() 344 : "r" ((way << 13) | addr)); in extract_ic() 347 if (way == 0) { in extract_ic() 377 way, va, valid, taghi, taglo); in extract_ic() 399 : "r" ((way << 13) | addr | (offset << 3))); in extract_ic() 477 int valid, way; in extract_dc() local 485 for (way = 0; way < 4; way++) { in extract_dc() 498 : "r" ((way << 13) | addr)); in extract_dc() 502 if (way == 0) { in extract_dc() [all …]
|
/linux/tools/testing/selftests/net/forwarding/ |
H A D | README | 37 between 4-ports LAGs or 8-way ECMP requires many physical links that are 65 various ways. A number of these variables can be overridden. The way these 75 One way of overriding these variables is through the environment: 80 way to pass it through the environment. Its value can instead be given as 85 A way to customize variables in a persistent fashion is to create a file
|
/linux/lib/ |
H A D | dec_and_lock.c | 27 /* Otherwise do it the slow way */ in _atomic_dec_and_lock() 44 /* Otherwise do it the slow way */ in _atomic_dec_and_lock_irqsave() 59 /* Otherwise do it the slow way */ in _atomic_dec_and_raw_lock() 75 /* Otherwise do it the slow way */ in _atomic_dec_and_raw_lock_irqsave()
|
/linux/Documentation/process/ |
H A D | 2.Process.rst | 67 As fixes make their way into the mainline, the patch rate will slow over 154 describes the process in a somewhat idealized way. A much more detailed 159 - Design. This is where the real requirements for the patch - and the way 173 all the way to the mainline. The patch will show up in the maintainer's 217 unassisted. The way the kernel developers have addressed this growth is 224 subsystem maintainers are the gatekeepers (in a loose way) for the portion 253 normally the right way to go. 306 their way into linux-next some time before the merge window opens. 313 many sub-directories for drivers or filesystems that are on their way to 316 kernel proper. This is a way to keep track of drivers that aren't [all …]
|
/linux/Documentation/admin-guide/ |
H A D | devices.txt | 207 Partitions are handled in the same way as for IDE 509 Partitions are handled the same way as for IDE disks 520 Partitions are handled the same way as for the first 628 Partitions are handled in the same way as for IDE 716 Partitions are handled the same way as for the first 734 Partitions are handled the same way as for the first 861 Partitions are handled in the same way as for IDE 885 Partitions are handled in the same way as for IDE 1034 Partitions are handled the same way as for the first 1046 Partitions are handled the same way as for the first [all …]
|