1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_ARCHRANDOM_H 3 #define _ASM_ARCHRANDOM_H 4 5 #include <linux/arm-smccc.h> 6 #include <linux/bug.h> 7 #include <linux/kernel.h> 8 #include <asm/cpufeature.h> 9 10 #define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL 11 12 extern bool smccc_trng_available; 13 14 static inline bool __init smccc_probe_trng(void) 15 { 16 struct arm_smccc_res res; 17 18 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res); 19 if ((s32)res.a0 < 0) 20 return false; 21 22 return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION; 23 } 24 25 static inline bool __arm64_rndr(unsigned long *v) 26 { 27 bool ok; 28 29 /* 30 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success, 31 * and set PSTATE.NZCV to 0b0100 otherwise. 32 */ 33 asm volatile( 34 __mrs_s("%0", SYS_RNDR_EL0) "\n" 35 " cset %w1, ne\n" 36 : "=r" (*v), "=r" (ok) 37 : 38 : "cc"); 39 40 return ok; 41 } 42 43 static inline bool __arm64_rndrrs(unsigned long *v) 44 { 45 bool ok; 46 47 /* 48 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success, 49 * and set PSTATE.NZCV to 0b0100 otherwise. 50 */ 51 asm volatile( 52 __mrs_s("%0", SYS_RNDRRS_EL0) "\n" 53 " cset %w1, ne\n" 54 : "=r" (*v), "=r" (ok) 55 : 56 : "cc"); 57 58 return ok; 59 } 60 61 static inline bool __must_check arch_get_random_long(unsigned long *v) 62 { 63 /* 64 * Only support the generic interface after we have detected 65 * the system wide capability, avoiding complexity with the 66 * cpufeature code and with potential scheduling between CPUs 67 * with and without the feature. 68 */ 69 if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v)) 70 return true; 71 return false; 72 } 73 74 static inline bool __must_check arch_get_random_int(unsigned int *v) 75 { 76 if (cpus_have_const_cap(ARM64_HAS_RNG)) { 77 unsigned long val; 78 79 if (__arm64_rndr(&val)) { 80 *v = val; 81 return true; 82 } 83 } 84 return false; 85 } 86 87 static inline bool __must_check arch_get_random_seed_long(unsigned long *v) 88 { 89 struct arm_smccc_res res; 90 91 /* 92 * We prefer the SMCCC call, since its semantics (return actual 93 * hardware backed entropy) is closer to the idea behind this 94 * function here than what even the RNDRSS register provides 95 * (the output of a pseudo RNG freshly seeded by a TRNG). 96 */ 97 if (smccc_trng_available) { 98 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); 99 if ((int)res.a0 >= 0) { 100 *v = res.a3; 101 return true; 102 } 103 } 104 105 /* 106 * RNDRRS is not backed by an entropy source but by a DRBG that is 107 * reseeded after each invocation. This is not a 100% fit but good 108 * enough to implement this API if no other entropy source exists. 109 */ 110 if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v)) 111 return true; 112 113 return false; 114 } 115 116 static inline bool __must_check arch_get_random_seed_int(unsigned int *v) 117 { 118 struct arm_smccc_res res; 119 unsigned long val; 120 121 if (smccc_trng_available) { 122 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res); 123 if ((int)res.a0 >= 0) { 124 *v = res.a3 & GENMASK(31, 0); 125 return true; 126 } 127 } 128 129 if (cpus_have_const_cap(ARM64_HAS_RNG)) { 130 if (__arm64_rndrrs(&val)) { 131 *v = val; 132 return true; 133 } 134 } 135 136 return false; 137 } 138 139 static inline bool __init __early_cpu_has_rndr(void) 140 { 141 /* Open code as we run prior to the first call to cpufeature. */ 142 unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); 143 return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; 144 } 145 146 static inline bool __init __must_check 147 arch_get_random_seed_long_early(unsigned long *v) 148 { 149 WARN_ON(system_state != SYSTEM_BOOTING); 150 151 if (smccc_trng_available) { 152 struct arm_smccc_res res; 153 154 arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res); 155 if ((int)res.a0 >= 0) { 156 *v = res.a3; 157 return true; 158 } 159 } 160 161 if (__early_cpu_has_rndr() && __arm64_rndr(v)) 162 return true; 163 164 return false; 165 } 166 #define arch_get_random_seed_long_early arch_get_random_seed_long_early 167 168 #endif /* _ASM_ARCHRANDOM_H */ 169