/linux/tools/perf/pmu-events/arch/nds32/n13/ |
H A D | atcpmu.json | 6 "BriefDescription": "V3 Conditional branch" 12 "BriefDescription": "V3 Taken Conditional branch" 18 "BriefDescription": "V3 Prefetch Instruction" 24 "BriefDescription": "V3 RET Inst" 30 "BriefDescription": "V3 JR(non-RET) instructions" 36 "BriefDescription": "V3 JAL/JRAL instructions" 42 "BriefDescription": "V3 NOP instructions" 48 "BriefDescription": "V3 SCW instructions" 54 "BriefDescription": "V3 ISB/DSB instructions" 60 "BriefDescription": "V3 CCTL instructions" [all …]
|
/linux/drivers/pci/controller/ |
H A D | pci-v3-semi.c | 3 * Support for V3 Semiconductor PCI Local Bus to PCI Bridge 249 * The V3 PCI interface chip in Integrator provides several windows from 262 * There are three V3 windows, each described by a pair of V3 registers. 289 * The V3 chip translates an address by checking its range within 293 * LB_BASE1/LB_MAP1, the V3 will use the translation from 313 struct v3_pci *v3 = bus->sysdata; in v3_map_bus() local 365 writel(v3_addr_to_lb_base(v3->non_pre_mem) | in v3_map_bus() 367 v3->base + V3_LB_BASE0); in v3_map_bus() 373 writel(v3_addr_to_lb_base(v3->config_mem) | in v3_map_bus() 375 v3->base + V3_LB_BASE1); in v3_map_bus() [all …]
|
/linux/include/uapi/linux/ |
H A D | nfs.h | 47 NFS_OK = 0, /* v2 v3 v4 */ 48 NFSERR_PERM = 1, /* v2 v3 v4 */ 49 NFSERR_NOENT = 2, /* v2 v3 v4 */ 50 NFSERR_IO = 5, /* v2 v3 v4 */ 51 NFSERR_NXIO = 6, /* v2 v3 v4 */ 52 NFSERR_EAGAIN = 11, /* v2 v3 */ 53 NFSERR_ACCES = 13, /* v2 v3 v4 */ 54 NFSERR_EXIST = 17, /* v2 v3 v4 */ 55 NFSERR_XDEV = 18, /* v3 v4 */ 56 NFSERR_NODEV = 19, /* v2 v3 v4 */ [all …]
|
/linux/arch/s390/include/asm/ |
H A D | fpu-insn-asm.h | 103 .ifc \vxr,%v3 204 * @v3: Vector register designated operand whose MSB is stored in 211 * Note: In most vector instruction formats [1] V1, V2, V3, and V4 directly 212 * correspond to @v1, @v2, @v3, and @v4. But there are exceptions, such as but 219 .macro RXB rxb v1 v2=0 v3=0 v4=0 227 .if \v3 & 0x10 240 * @v3: Third vector register designated operand (for RXB) 243 * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro 246 .macro MRXB m v1 v2=0 v3=0 v4=0 248 RXB rxb, \v1, \v2, \v3, \v4 [all …]
|
H A D | fpu-insn.h | 146 static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3) in fpu_vab() argument 148 asm volatile("VAB %[v1],%[v2],%[v3]" in fpu_vab() 150 : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) in fpu_vab() 154 static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3) in fpu_vcksm() argument 156 asm volatile("VCKSM %[v1],%[v2],%[v3]" in fpu_vcksm() 158 : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) in fpu_vcksm() 162 static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3) in fpu_vesravb() argument 164 asm volatile("VESRAVB %[v1],%[v2],%[v3]" in fpu_vesravb() 166 : [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3) in fpu_vesravb() 170 static __always_inline void fpu_vgfmag(u8 v1, u8 v2, u8 v3, u8 v4) in fpu_vgfmag() argument [all …]
|
/linux/lib/ |
H A D | siphash.c | 20 #define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) 26 u64 v3 = SIPHASH_CONST_3; \ 28 v3 ^= key->key[1]; \ 34 v3 ^= b; \ 43 return (v0 ^ v1) ^ (v2 ^ v3); 54 v3 ^= m; in __siphash_aligned() 87 v3 ^= m; in __siphash_unaligned() 119 v3 ^= first; in siphash_1u64() 136 v3 ^= first; in siphash_2u64() 140 v3 ^= second; in siphash_2u64() [all …]
|
H A D | xxhash.c | 112 uint32_t v3 = seed + 0; in xxh32() local 120 v3 = xxh32_round(v3, get_unaligned_le32(p)); in xxh32() 127 xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18); in xxh32() 182 uint64_t v3 = seed + 0; in xxh64() local 190 v3 = xxh64_round(v3, get_unaligned_le64(p)); in xxh64() 197 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); in xxh64() 200 h64 = xxh64_merge_round(h64, v3); in xxh64() 250 state.v3 = seed + 0; in xxh32_reset() 264 state.v3 = seed + 0; in xxh64_reset() 297 state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); in xxh32_update() [all …]
|
/linux/arch/arm64/lib/ |
H A D | xor-neon.c | 19 register uint64x2_t v0, v1, v2, v3; in xor_arm64_neon_2() local 27 v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); in xor_arm64_neon_2() 33 vst1q_u64(dp1 + 6, v3); in xor_arm64_neon_2() 48 register uint64x2_t v0, v1, v2, v3; in xor_arm64_neon_3() local 56 v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); in xor_arm64_neon_3() 62 v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); in xor_arm64_neon_3() 68 vst1q_u64(dp1 + 6, v3); in xor_arm64_neon_3() 86 register uint64x2_t v0, v1, v2, v3; in xor_arm64_neon_4() local 94 v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6)); in xor_arm64_neon_4() 100 v3 = veorq_u64(v3, vld1q_u64(dp3 + 6)); in xor_arm64_neon_4() [all …]
|
/linux/arch/arm64/crypto/ |
H A D | sm4-ce-core.S | 64 sm4ekey v3.4s, v2.4s, v27.4s; 65 sm4ekey v4.4s, v3.4s, v28.4s; 73 st1 {v0.16b-v3.16b}, [x1], #64; 80 tbl v20.16b, {v3.16b}, v24.16b 121 ld1 {v0.16b-v3.16b}, [x2], #64; 124 SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); 126 st1 {v0.16b-v3.16b}, [x1], #64; 139 ld1 {v0.16b-v3.16b}, [x2], #64; 140 SM4_CRYPT_BLK4(v0, v1, v2, v3); 141 st1 {v0.16b-v3.16b}, [x1], #64; [all …]
|
H A D | aes-modes.S | 26 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 31 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 37 encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 42 decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 62 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 66 st1 {v0.16b-v3.16b}, [x0], #64 92 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 96 st1 {v0.16b-v3.16b}, [x0], #64 143 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 150 eor v3.16b, v3.16b, v2.16b [all …]
|
H A D | sm3-ce-core.S | 89 0: ld1 {v0.16b-v3.16b}, [x1], #64 98 CPU_LE( rev32 v3.16b, v3.16b ) 102 qround a, v0, v1, v2, v3, v4 103 qround a, v1, v2, v3, v4, v0 104 qround a, v2, v3, v4, v0, v1 105 qround a, v3, v4, v0, v1, v2 109 qround b, v4, v0, v1, v2, v3 110 qround b, v0, v1, v2, v3, v4 111 qround b, v1, v2, v3, v4, v0 112 qround b, v2, v3, v4, v0, v1 [all …]
|
H A D | chacha-neon-core.S | 32 * registers v0-v3. It performs matrix operations on four words in parallel, 47 eor v3.16b, v3.16b, v0.16b 48 rev32 v3.8h, v3.8h 51 add v2.4s, v2.4s, v3.4s 58 eor v3.16b, v3.16b, v0.16b 59 tbl v3.16b, {v3.16b}, v12.16b 62 add v2.4s, v2.4s, v3.4s 72 ext v3.16b, v3.16b, v3.16b, #12 76 eor v3.16b, v3.16b, v0.16b 77 rev32 v3.8h, v3.8h [all …]
|
H A D | aes-ce-core.S | 18 mov v3.16b, v1.16b 21 ld1 {v3.4s}, [x0], #16 25 aese v0.16b, v3.16b 31 ld1 {v3.4s}, [x0], #16 34 eor v0.16b, v0.16b, v3.16b 46 mov v3.16b, v1.16b 49 ld1 {v3.4s}, [x0], #16 53 aesd v0.16b, v3.16b 59 ld1 {v3.4s}, [x0], #16 62 eor v0.16b, v0.16b, v3.16b
|
H A D | sm4-neon-core.S | 273 ld4 {v0.4s-v3.4s}, [x2], #64 276 SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7) 278 st1 {v0.16b-v3.16b}, [x1], #64 291 ld4 {v0.4s-v3.4s}, [x2], #64 293 SM4_CRYPT_BLK4(v0, v1, v2, v3) 295 st1 {v0.16b-v3.16b}, [x1], #64 308 transpose_4x4(v0, v1, v2, v3) 310 SM4_CRYPT_BLK4(v0, v1, v2, v3) 340 ld4 {v0.4s-v3.4s}, [x2], #64 343 SM4_CRYPT_BLK8_norotate(v0, v1, v2, v3, v4, v5, v6, v7) [all …]
|
/linux/drivers/gpu/drm/amd/display/dc/inc/ |
H A D | reg_helper.h | 72 #define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \ argument 76 FN(reg, f3), v3) 78 #define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \ argument 82 FN(reg, f3), v3,\ 85 #define REG_SET_5(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \ argument 90 FN(reg, f3), v3,\ 94 #define REG_SET_6(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \ argument 99 FN(reg, f3), v3,\ 104 #define REG_SET_7(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \ argument 109 FN(reg, f3), v3,\ [all …]
|
/linux/arch/loongarch/lib/ |
H A D | xor_template.c | 38 const unsigned long * __restrict v3) 46 LD_AND_XOR_LINE(v3) 48 : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3) : "memory" 53 v3 += LINE_WIDTH / sizeof(unsigned long); 60 const unsigned long * __restrict v3, 69 LD_AND_XOR_LINE(v3) 72 : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4) 78 v3 += LINE_WIDTH / sizeof(unsigned long); 86 const unsigned long * __restrict v3, 96 LD_AND_XOR_LINE(v3) [all …]
|
/linux/arch/powerpc/lib/ |
H A D | xor_vmx.c | 78 DEFINE(v3); in __xor_altivec_3() 84 LOAD(v3); in __xor_altivec_3() 86 XOR(v1, v3); in __xor_altivec_3() 91 v3 += 4; in __xor_altivec_3() 103 DEFINE(v3); in __xor_altivec_4() 110 LOAD(v3); in __xor_altivec_4() 113 XOR(v3, v4); in __xor_altivec_4() 114 XOR(v1, v3); in __xor_altivec_4() 119 v3 += 4; in __xor_altivec_4() 133 DEFINE(v3); in __xor_altivec_5() [all …]
|
/linux/drivers/char/mwave/ |
H A D | mwavedd.h | 89 #define PRINTK_4(f,s,v1,v2,v3) \ argument 91 printk(s,v1,v2,v3); \ 94 #define PRINTK_5(f,s,v1,v2,v3,v4) \ argument 96 printk(s,v1,v2,v3,v4); \ 99 #define PRINTK_6(f,s,v1,v2,v3,v4,v5) \ argument 101 printk(s,v1,v2,v3,v4,v5); \ 104 #define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \ argument 106 printk(s,v1,v2,v3,v4,v5,v6); \ 109 #define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \ argument 111 printk(s,v1,v2,v3,v4,v5,v6,v7); \ [all …]
|
/linux/include/pcmcia/ |
H A D | device_id.h | 34 #define PCMCIA_DEVICE_PROD_ID3(v3, vh3) { \ argument 36 .prod_id = { NULL, NULL, (v3), NULL }, \ 45 #define PCMCIA_DEVICE_PROD_ID13(v1, v3, vh1, vh3) { \ argument 48 .prod_id = { (v1), NULL, (v3), NULL }, \ 57 #define PCMCIA_DEVICE_PROD_ID123(v1, v2, v3, vh1, vh2, vh3) { \ argument 61 .prod_id = { (v1), (v2), (v3), NULL },\ 71 #define PCMCIA_DEVICE_PROD_ID134(v1, v3, v4, vh1, vh3, vh4) { \ argument 75 .prod_id = { (v1), NULL, (v3), (v4) }, \ 78 #define PCMCIA_DEVICE_PROD_ID1234(v1, v2, v3, v4, vh1, vh2, vh3, vh4) { \ argument 83 .prod_id = { (v1), (v2), (v3), (v4) }, \ [all …]
|
/linux/Documentation/devicetree/bindings/hwmon/ |
H A D | ltc2990.txt | 15 2: V1-V2, V3, V4 16 3: TR1, V3, V4 17 4: TR1, V3-V4 19 6: V1-V2, V3-V4 20 7: V1, V2, V3, V4 27 2: TR2, V3 or V3-V4 only per mode 35 lltc,meas-mode = <7 3>; /* V1, V2, V3, V4 */
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | atombios_crtc.c | 237 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; member 277 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); in amdgpu_atombios_crtc_program_ss() 278 args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; in amdgpu_atombios_crtc_program_ss() 281 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; in amdgpu_atombios_crtc_program_ss() 284 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; in amdgpu_atombios_crtc_program_ss() 287 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; in amdgpu_atombios_crtc_program_ss() 292 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); in amdgpu_atombios_crtc_program_ss() 293 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); in amdgpu_atombios_crtc_program_ss() 294 args.v3.ucEnable = enable; in amdgpu_atombios_crtc_program_ss() 301 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; member [all …]
|
H A D | atombios_encoders.c | 554 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; member 601 args.v3.ucPanelMode = panel_mode; in amdgpu_atombios_encoder_setup_dig_encoder() 633 args.v3.ucAction = action; in amdgpu_atombios_encoder_setup_dig_encoder() 634 args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10); in amdgpu_atombios_encoder_setup_dig_encoder() 636 args.v3.ucPanelMode = panel_mode; in amdgpu_atombios_encoder_setup_dig_encoder() 638 args.v3.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder); in amdgpu_atombios_encoder_setup_dig_encoder() 640 if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode)) in amdgpu_atombios_encoder_setup_dig_encoder() 641 args.v3.ucLaneNum = dp_lane_count; in amdgpu_atombios_encoder_setup_dig_encoder() 643 args.v3.ucLaneNum = 8; in amdgpu_atombios_encoder_setup_dig_encoder() 645 args.v3.ucLaneNum = 4; in amdgpu_atombios_encoder_setup_dig_encoder() [all …]
|
/linux/Documentation/hwmon/ |
H A D | peci-dimmtemp.rst | 8 * Intel Xeon E5/E7 v3 server processors 9 Intel Xeon E5-14xx v3 family 10 Intel Xeon E5-24xx v3 family 11 Intel Xeon E5-16xx v3 family 12 Intel Xeon E5-26xx v3 family 13 Intel Xeon E5-46xx v3 family 14 Intel Xeon E7-48xx v3 family 15 Intel Xeon E7-88xx v3 family
|
/linux/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/ |
H A D | lib_float_math.c | 69 double math_max3(double v1, double v2, double v3) in math_max3() argument 71 return v3 > math_max2(v1, v2) ? v3 : math_max2(v1, v2); in math_max3() 74 double math_max4(double v1, double v2, double v3, double v4) in math_max4() argument 76 return v4 > math_max3(v1, v2, v3) ? v4 : math_max3(v1, v2, v3); in math_max4() 79 double math_max5(double v1, double v2, double v3, double v4, double v5) in math_max5() argument 81 return math_max3(v1, v2, v3) > math_max2(v4, v5) ? math_max3(v1, v2, v3) : math_max2(v4, v5); in math_max5()
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | test_siphash.h | 26 #define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) 32 u64 v3 = SIPHASH_CONST_3; \ 34 v3 ^= key->key[1]; \ 40 v3 ^= b; \ 49 return (v0 ^ v1) ^ (v2 ^ v3); 54 v3 ^= first; in siphash_2u64() 58 v3 ^= second; in siphash_2u64()
|