1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * set_id_regs - Test for setting ID register from usersapce. 4 * 5 * Copyright (c) 2023 Google LLC. 6 * 7 * 8 * Test that KVM supports setting ID registers from userspace and handles the 9 * feature set correctly. 10 */ 11 12 #include <stdint.h> 13 #include "kvm_util.h" 14 #include "processor.h" 15 #include "test_util.h" 16 #include <linux/bitfield.h> 17 18 enum ftr_type { 19 FTR_EXACT, /* Use a predefined safe value */ 20 FTR_LOWER_SAFE, /* Smaller value is safe */ 21 FTR_HIGHER_SAFE, /* Bigger value is safe */ 22 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ 23 FTR_END, /* Mark the last ftr bits */ 24 }; 25 26 #define FTR_SIGNED true /* Value should be treated as signed */ 27 #define FTR_UNSIGNED false /* Value should be treated as unsigned */ 28 29 struct reg_ftr_bits { 30 char *name; 31 bool sign; 32 enum ftr_type type; 33 u8 shift; 34 u64 mask; 35 /* 36 * For FTR_EXACT, safe_val is used as the exact safe value. 37 * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value. 38 */ 39 s64 safe_val; 40 41 /* Allowed to be changed by the host after run */ 42 bool mutable; 43 }; 44 45 struct test_feature_reg { 46 u32 reg; 47 const struct reg_ftr_bits *ftr_bits; 48 }; 49 50 #define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL, MUT) \ 51 { \ 52 .name = #NAME, \ 53 .sign = SIGNED, \ 54 .type = TYPE, \ 55 .shift = SHIFT, \ 56 .mask = MASK, \ 57 .safe_val = SAFE_VAL, \ 58 .mutable = MUT, \ 59 } 60 61 #define REG_FTR_BITS(type, reg, field, safe_val) \ 62 __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \ 63 reg##_##field##_MASK, safe_val, false) 64 65 #define REG_FTR_BITS_MUTABLE(type, reg, field, safe_val) \ 66 __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \ 67 reg##_##field##_MASK, safe_val, true) 68 69 #define S_REG_FTR_BITS(type, reg, field, safe_val) \ 70 __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \ 71 reg##_##field##_MASK, safe_val, false) 72 73 #define REG_FTR_END \ 74 { \ 75 .type = FTR_END, \ 76 } 77 78 static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = { 79 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DoubleLock, 0), 80 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, WRPs, 0), 81 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0), 82 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP), 83 REG_FTR_END, 84 }; 85 86 static const struct reg_ftr_bits ftr_id_dfr0_el1[] = { 87 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3), 88 REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8), 89 REG_FTR_END, 90 }; 91 92 static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = { 93 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0), 94 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0), 95 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0), 96 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0), 97 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0), 98 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0), 99 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0), 100 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0), 101 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0), 102 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0), 103 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0), 104 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0), 105 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0), 106 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0), 107 REG_FTR_END, 108 }; 109 110 static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = { 111 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0), 112 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0), 113 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0), 114 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0), 115 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0), 116 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0), 117 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0), 118 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0), 119 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0), 120 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0), 121 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0), 122 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0), 123 REG_FTR_END, 124 }; 125 126 static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = { 127 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0), 128 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0), 129 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0), 130 REG_FTR_END, 131 }; 132 133 static const struct reg_ftr_bits ftr_id_aa64isar3_el1[] = { 134 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FPRCVT, 0), 135 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSUI, 0), 136 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSFE, 0), 137 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FAMINMAX, 0), 138 REG_FTR_END, 139 }; 140 141 static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = { 142 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0), 143 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0), 144 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0), 145 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0), 146 /* GICv3 support will be forced at run time if available */ 147 REG_FTR_BITS_MUTABLE(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0), 148 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1), 149 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1), 150 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1), 151 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 1), 152 REG_FTR_END, 153 }; 154 155 static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = { 156 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, DF2, 0), 157 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, CSV2_frac, 0), 158 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, SSBS, ID_AA64PFR1_EL1_SSBS_NI), 159 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, BT, 0), 160 REG_FTR_END, 161 }; 162 163 static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = { 164 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0), 165 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0), 166 REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN4_2, 1), 167 REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN64_2, 1), 168 REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN16_2, 1), 169 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0), 170 S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0), 171 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0), 172 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0), 173 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0), 174 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0), 175 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0), 176 REG_FTR_END, 177 }; 178 179 static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = { 180 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0), 181 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0), 182 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HCX, 0), 183 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0), 184 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TWED, 0), 185 REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0), 186 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0), 187 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0), 188 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0), 189 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0), 190 REG_FTR_END, 191 }; 192 193 static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = { 194 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0), 195 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0), 196 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0), 197 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0), 198 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0), 199 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0), 200 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0), 201 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0), 202 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0), 203 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0), 204 REG_FTR_END, 205 }; 206 207 static const struct reg_ftr_bits ftr_id_aa64mmfr3_el1[] = { 208 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, S1POE, 0), 209 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, S1PIE, 0), 210 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, SCTLRX, 0), 211 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR3_EL1, TCRX, 0), 212 REG_FTR_END, 213 }; 214 215 static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = { 216 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0), 217 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0), 218 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0), 219 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0), 220 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0), 221 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0), 222 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0), 223 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0), 224 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0), 225 REG_FTR_END, 226 }; 227 228 #define TEST_REG(id, table) \ 229 { \ 230 .reg = id, \ 231 .ftr_bits = &((table)[0]), \ 232 } 233 234 static struct test_feature_reg test_regs[] = { 235 TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1), 236 TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1), 237 TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1), 238 TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1), 239 TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1), 240 TEST_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3_el1), 241 TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1), 242 TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1), 243 TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1), 244 TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1), 245 TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1), 246 TEST_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3_el1), 247 TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1), 248 }; 249 250 #define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0); 251 252 static void guest_code(void) 253 { 254 GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1); 255 GUEST_REG_SYNC(SYS_ID_DFR0_EL1); 256 GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1); 257 GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1); 258 GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1); 259 GUEST_REG_SYNC(SYS_ID_AA64ISAR3_EL1); 260 GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1); 261 GUEST_REG_SYNC(SYS_ID_AA64PFR1_EL1); 262 GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1); 263 GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1); 264 GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1); 265 GUEST_REG_SYNC(SYS_ID_AA64MMFR3_EL1); 266 GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1); 267 GUEST_REG_SYNC(SYS_MPIDR_EL1); 268 GUEST_REG_SYNC(SYS_CLIDR_EL1); 269 GUEST_REG_SYNC(SYS_CTR_EL0); 270 GUEST_REG_SYNC(SYS_MIDR_EL1); 271 GUEST_REG_SYNC(SYS_REVIDR_EL1); 272 GUEST_REG_SYNC(SYS_AIDR_EL1); 273 274 GUEST_DONE(); 275 } 276 277 /* Return a safe value to a given ftr_bits an ftr value */ 278 u64 get_safe_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) 279 { 280 u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; 281 282 TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); 283 284 if (ftr_bits->sign == FTR_UNSIGNED) { 285 switch (ftr_bits->type) { 286 case FTR_EXACT: 287 ftr = ftr_bits->safe_val; 288 break; 289 case FTR_LOWER_SAFE: 290 if (ftr > ftr_bits->safe_val) 291 ftr--; 292 break; 293 case FTR_HIGHER_SAFE: 294 if (ftr < ftr_max) 295 ftr++; 296 break; 297 case FTR_HIGHER_OR_ZERO_SAFE: 298 if (ftr == ftr_max) 299 ftr = 0; 300 else if (ftr != 0) 301 ftr++; 302 break; 303 default: 304 break; 305 } 306 } else if (ftr != ftr_max) { 307 switch (ftr_bits->type) { 308 case FTR_EXACT: 309 ftr = ftr_bits->safe_val; 310 break; 311 case FTR_LOWER_SAFE: 312 if (ftr > ftr_bits->safe_val) 313 ftr--; 314 break; 315 case FTR_HIGHER_SAFE: 316 if (ftr < ftr_max - 1) 317 ftr++; 318 break; 319 case FTR_HIGHER_OR_ZERO_SAFE: 320 if (ftr != 0 && ftr != ftr_max - 1) 321 ftr++; 322 break; 323 default: 324 break; 325 } 326 } 327 328 return ftr; 329 } 330 331 /* Return an invalid value to a given ftr_bits an ftr value */ 332 u64 get_invalid_value(const struct reg_ftr_bits *ftr_bits, u64 ftr) 333 { 334 u64 ftr_max = ftr_bits->mask >> ftr_bits->shift; 335 336 TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features"); 337 338 if (ftr_bits->sign == FTR_UNSIGNED) { 339 switch (ftr_bits->type) { 340 case FTR_EXACT: 341 ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); 342 break; 343 case FTR_LOWER_SAFE: 344 ftr++; 345 break; 346 case FTR_HIGHER_SAFE: 347 ftr--; 348 break; 349 case FTR_HIGHER_OR_ZERO_SAFE: 350 if (ftr == 0) 351 ftr = ftr_max; 352 else 353 ftr--; 354 break; 355 default: 356 break; 357 } 358 } else if (ftr != ftr_max) { 359 switch (ftr_bits->type) { 360 case FTR_EXACT: 361 ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1); 362 break; 363 case FTR_LOWER_SAFE: 364 ftr++; 365 break; 366 case FTR_HIGHER_SAFE: 367 ftr--; 368 break; 369 case FTR_HIGHER_OR_ZERO_SAFE: 370 if (ftr == 0) 371 ftr = ftr_max - 1; 372 else 373 ftr--; 374 break; 375 default: 376 break; 377 } 378 } else { 379 ftr = 0; 380 } 381 382 return ftr; 383 } 384 385 static u64 test_reg_set_success(struct kvm_vcpu *vcpu, u64 reg, 386 const struct reg_ftr_bits *ftr_bits) 387 { 388 u8 shift = ftr_bits->shift; 389 u64 mask = ftr_bits->mask; 390 u64 val, new_val, ftr; 391 392 val = vcpu_get_reg(vcpu, reg); 393 ftr = (val & mask) >> shift; 394 395 ftr = get_safe_value(ftr_bits, ftr); 396 397 ftr <<= shift; 398 val &= ~mask; 399 val |= ftr; 400 401 vcpu_set_reg(vcpu, reg, val); 402 new_val = vcpu_get_reg(vcpu, reg); 403 TEST_ASSERT_EQ(new_val, val); 404 405 return new_val; 406 } 407 408 static void test_reg_set_fail(struct kvm_vcpu *vcpu, u64 reg, 409 const struct reg_ftr_bits *ftr_bits) 410 { 411 u8 shift = ftr_bits->shift; 412 u64 mask = ftr_bits->mask; 413 u64 val, old_val, ftr; 414 int r; 415 416 val = vcpu_get_reg(vcpu, reg); 417 ftr = (val & mask) >> shift; 418 419 ftr = get_invalid_value(ftr_bits, ftr); 420 421 old_val = val; 422 ftr <<= shift; 423 val &= ~mask; 424 val |= ftr; 425 426 r = __vcpu_set_reg(vcpu, reg, val); 427 TEST_ASSERT(r < 0 && errno == EINVAL, 428 "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); 429 430 val = vcpu_get_reg(vcpu, reg); 431 TEST_ASSERT_EQ(val, old_val); 432 } 433 434 static u64 test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 435 436 #define encoding_to_range_idx(encoding) \ 437 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \ 438 sys_reg_CRn(encoding), sys_reg_CRm(encoding), \ 439 sys_reg_Op2(encoding)) 440 441 442 static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) 443 { 444 u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 445 struct reg_mask_range range = { 446 .addr = (__u64)masks, 447 }; 448 int ret; 449 450 /* KVM should return error when reserved field is not zero */ 451 range.reserved[0] = 1; 452 ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range); 453 TEST_ASSERT(ret, "KVM doesn't check invalid parameters."); 454 455 /* Get writable masks for feature ID registers */ 456 memset(range.reserved, 0, sizeof(range.reserved)); 457 vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range); 458 459 for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { 460 const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits; 461 u32 reg_id = test_regs[i].reg; 462 u64 reg = KVM_ARM64_SYS_REG(reg_id); 463 int idx; 464 465 /* Get the index to masks array for the idreg */ 466 idx = encoding_to_range_idx(reg_id); 467 468 for (int j = 0; ftr_bits[j].type != FTR_END; j++) { 469 /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */ 470 if (aarch64_only && sys_reg_CRm(reg_id) < 4) { 471 ksft_test_result_skip("%s on AARCH64 only system\n", 472 ftr_bits[j].name); 473 continue; 474 } 475 476 /* Make sure the feature field is writable */ 477 TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask); 478 479 test_reg_set_fail(vcpu, reg, &ftr_bits[j]); 480 481 test_reg_vals[idx] = test_reg_set_success(vcpu, reg, 482 &ftr_bits[j]); 483 484 ksft_test_result_pass("%s\n", ftr_bits[j].name); 485 } 486 } 487 } 488 489 #define MPAM_IDREG_TEST 6 490 static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu) 491 { 492 u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 493 struct reg_mask_range range = { 494 .addr = (__u64)masks, 495 }; 496 u64 val; 497 int idx, err; 498 499 /* 500 * If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero, 501 * check that if it can be set to 1, (i.e. it is supported by the 502 * hardware), that it can't be set to other values. 503 */ 504 505 /* Get writable masks for feature ID registers */ 506 memset(range.reserved, 0, sizeof(range.reserved)); 507 vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range); 508 509 /* Writeable? Nothing to test! */ 510 idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1); 511 if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) { 512 ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n"); 513 return; 514 } 515 516 /* Get the id register value */ 517 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 518 519 /* Try to set MPAM=0. This should always be possible. */ 520 val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 521 val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0); 522 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val); 523 if (err) 524 ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n"); 525 else 526 ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n"); 527 528 /* Try to set MPAM=1 */ 529 val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 530 val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1); 531 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val); 532 if (err) 533 ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n"); 534 else 535 ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n"); 536 537 /* Try to set MPAM=2 */ 538 val &= ~ID_AA64PFR0_EL1_MPAM_MASK; 539 val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2); 540 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val); 541 if (err) 542 ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n"); 543 else 544 ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n"); 545 546 /* And again for ID_AA64PFR1_EL1.MPAM_frac */ 547 idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1); 548 if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) { 549 ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n"); 550 return; 551 } 552 553 /* Get the id register value */ 554 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); 555 556 /* Try to set MPAM_frac=0. This should always be possible. */ 557 val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; 558 val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0); 559 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); 560 if (err) 561 ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n"); 562 else 563 ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n"); 564 565 /* Try to set MPAM_frac=1 */ 566 val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; 567 val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1); 568 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); 569 if (err) 570 ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n"); 571 else 572 ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n"); 573 574 /* Try to set MPAM_frac=2 */ 575 val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; 576 val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2); 577 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); 578 if (err) 579 ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n"); 580 else 581 ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n"); 582 } 583 584 #define MTE_IDREG_TEST 1 585 static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) 586 { 587 u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; 588 struct reg_mask_range range = { 589 .addr = (__u64)masks, 590 }; 591 u64 val; 592 u64 mte; 593 u64 mte_frac; 594 int idx, err; 595 596 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); 597 mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val); 598 if (!mte) { 599 ksft_test_result_skip("MTE capability not supported, nothing to test\n"); 600 return; 601 } 602 603 /* Get writable masks for feature ID registers */ 604 memset(range.reserved, 0, sizeof(range.reserved)); 605 vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range); 606 607 idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1); 608 if ((masks[idx] & ID_AA64PFR1_EL1_MTE_frac_MASK) == ID_AA64PFR1_EL1_MTE_frac_MASK) { 609 ksft_test_result_skip("ID_AA64PFR1_EL1.MTE_frac is officially writable, nothing to test\n"); 610 return; 611 } 612 613 /* 614 * When MTE is supported but MTE_ASYMM is not (ID_AA64PFR1_EL1.MTE == 2) 615 * ID_AA64PFR1_EL1.MTE_frac == 0xF indicates MTE_ASYNC is unsupported 616 * and MTE_frac == 0 indicates it is supported. 617 * 618 * As MTE_frac was previously unconditionally read as 0, check 619 * that the set to 0 succeeds but does not change MTE_frac 620 * from unsupported (0xF) to supported (0). 621 * 622 */ 623 mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); 624 if (mte != ID_AA64PFR1_EL1_MTE_MTE2 || 625 mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) { 626 ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n"); 627 return; 628 } 629 630 /* Try to set MTE_frac=0. */ 631 val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK; 632 val |= FIELD_PREP(ID_AA64PFR1_EL1_MTE_frac_MASK, 0); 633 err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val); 634 if (err) { 635 ksft_test_result_fail("ID_AA64PFR1_EL1.MTE_frac=0 was not accepted\n"); 636 return; 637 } 638 639 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); 640 mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); 641 if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI) 642 ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n"); 643 else 644 ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n"); 645 } 646 647 static u64 reset_mutable_bits(u32 id, u64 val) 648 { 649 struct test_feature_reg *reg = NULL; 650 651 for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { 652 if (test_regs[i].reg == id) { 653 reg = &test_regs[i]; 654 break; 655 } 656 } 657 658 if (!reg) 659 return val; 660 661 for (const struct reg_ftr_bits *bits = reg->ftr_bits; bits->type != FTR_END; bits++) { 662 if (bits->mutable) { 663 val &= ~bits->mask; 664 val |= bits->safe_val << bits->shift; 665 } 666 } 667 668 return val; 669 } 670 671 static void test_guest_reg_read(struct kvm_vcpu *vcpu) 672 { 673 bool done = false; 674 struct ucall uc; 675 676 while (!done) { 677 u64 val; 678 679 vcpu_run(vcpu); 680 681 switch (get_ucall(vcpu, &uc)) { 682 case UCALL_ABORT: 683 REPORT_GUEST_ASSERT(uc); 684 break; 685 case UCALL_SYNC: 686 val = test_reg_vals[encoding_to_range_idx(uc.args[2])]; 687 val = reset_mutable_bits(uc.args[2], val); 688 689 /* Make sure the written values are seen by guest */ 690 TEST_ASSERT_EQ(val, reset_mutable_bits(uc.args[2], uc.args[3])); 691 break; 692 case UCALL_DONE: 693 done = true; 694 break; 695 default: 696 TEST_FAIL("Unexpected ucall: %lu", uc.cmd); 697 } 698 } 699 } 700 701 /* Politely lifted from arch/arm64/include/asm/cache.h */ 702 /* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ 703 #define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) 704 #define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) 705 #define CLIDR_CTYPE(clidr, level) \ 706 (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) 707 708 static void test_clidr(struct kvm_vcpu *vcpu) 709 { 710 u64 clidr; 711 int level; 712 713 clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1)); 714 715 /* find the first empty level in the cache hierarchy */ 716 for (level = 1; level <= 7; level++) { 717 if (!CLIDR_CTYPE(clidr, level)) 718 break; 719 } 720 721 /* 722 * If you have a mind-boggling 7 levels of cache, congratulations, you 723 * get to fix this. 724 */ 725 TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy"); 726 727 /* stick in a unified cache level */ 728 clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level); 729 730 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr); 731 test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr; 732 } 733 734 static void test_ctr(struct kvm_vcpu *vcpu) 735 { 736 u64 ctr; 737 738 ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0)); 739 ctr &= ~CTR_EL0_DIC_MASK; 740 if (ctr & CTR_EL0_IminLine_MASK) 741 ctr--; 742 743 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr); 744 test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr; 745 } 746 747 static void test_id_reg(struct kvm_vcpu *vcpu, u32 id) 748 { 749 u64 val; 750 751 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(id)); 752 val++; 753 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(id), val); 754 test_reg_vals[encoding_to_range_idx(id)] = val; 755 } 756 757 static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu) 758 { 759 test_clidr(vcpu); 760 test_ctr(vcpu); 761 762 test_id_reg(vcpu, SYS_MPIDR_EL1); 763 ksft_test_result_pass("%s\n", __func__); 764 } 765 766 static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu) 767 { 768 test_id_reg(vcpu, SYS_MIDR_EL1); 769 test_id_reg(vcpu, SYS_REVIDR_EL1); 770 test_id_reg(vcpu, SYS_AIDR_EL1); 771 772 ksft_test_result_pass("%s\n", __func__); 773 } 774 775 static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding) 776 { 777 size_t idx = encoding_to_range_idx(encoding); 778 u64 observed; 779 780 observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding)); 781 TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]), 782 reset_mutable_bits(encoding, observed)); 783 } 784 785 static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu) 786 { 787 /* 788 * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an 789 * architectural reset of the vCPU. 790 */ 791 aarch64_vcpu_setup(vcpu, NULL); 792 793 for (int i = 0; i < ARRAY_SIZE(test_regs); i++) 794 test_assert_id_reg_unchanged(vcpu, test_regs[i].reg); 795 796 test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1); 797 test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1); 798 test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0); 799 test_assert_id_reg_unchanged(vcpu, SYS_MIDR_EL1); 800 test_assert_id_reg_unchanged(vcpu, SYS_REVIDR_EL1); 801 test_assert_id_reg_unchanged(vcpu, SYS_AIDR_EL1); 802 803 ksft_test_result_pass("%s\n", __func__); 804 } 805 806 int main(void) 807 { 808 struct kvm_vcpu *vcpu; 809 struct kvm_vm *vm; 810 bool aarch64_only; 811 u64 val, el0; 812 int test_cnt, i, j; 813 814 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES)); 815 TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_WRITABLE_IMP_ID_REGS)); 816 817 test_wants_mte(); 818 819 vm = vm_create(1); 820 vm_enable_cap(vm, KVM_CAP_ARM_WRITABLE_IMP_ID_REGS, 0); 821 vcpu = vm_vcpu_add(vm, 0, guest_code); 822 kvm_arch_vm_finalize_vcpus(vm); 823 824 /* Check for AARCH64 only system */ 825 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 826 el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val); 827 aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP); 828 829 ksft_print_header(); 830 831 test_cnt = 3 + MPAM_IDREG_TEST + MTE_IDREG_TEST; 832 for (i = 0; i < ARRAY_SIZE(test_regs); i++) 833 for (j = 0; test_regs[i].ftr_bits[j].type != FTR_END; j++) 834 test_cnt++; 835 836 ksft_set_plan(test_cnt); 837 838 test_vm_ftr_id_regs(vcpu, aarch64_only); 839 test_vcpu_ftr_id_regs(vcpu); 840 test_vcpu_non_ftr_id_regs(vcpu); 841 test_user_set_mpam_reg(vcpu); 842 test_user_set_mte_reg(vcpu); 843 844 test_guest_reg_read(vcpu); 845 846 test_reset_preserves_id_regs(vcpu); 847 848 kvm_vm_free(vm); 849 850 ksft_finished(); 851 } 852