1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * amx tests 4 * 5 * Copyright (C) 2021, Intel, Inc. 6 * 7 * Tests for amx #NM exception and save/restore. 8 */ 9 #include <fcntl.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/ioctl.h> 14 #include <sys/syscall.h> 15 16 #include "test_util.h" 17 18 #include "kvm_util.h" 19 #include "processor.h" 20 #include "vmx.h" 21 22 #ifndef __x86_64__ 23 # error This test is 64-bit only 24 #endif 25 26 #define NUM_TILES 8 27 #define TILE_SIZE 1024 28 #define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE) 29 30 /* Tile configuration associated: */ 31 #define PALETTE_TABLE_INDEX 1 32 #define MAX_TILES 16 33 #define RESERVED_BYTES 14 34 35 #define XSAVE_HDR_OFFSET 512 36 37 struct tile_config { 38 u8 palette_id; 39 u8 start_row; 40 u8 reserved[RESERVED_BYTES]; 41 u16 colsb[MAX_TILES]; 42 u8 rows[MAX_TILES]; 43 }; 44 45 struct tile_data { 46 u8 data[NUM_TILES * TILE_SIZE]; 47 }; 48 49 struct xtile_info { 50 u16 bytes_per_tile; 51 u16 bytes_per_row; 52 u16 max_names; 53 u16 max_rows; 54 u32 xsave_offset; 55 u32 xsave_size; 56 }; 57 58 static struct xtile_info xtile; 59 60 static inline void __ldtilecfg(void *cfg) 61 { 62 asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00" 63 : : "a"(cfg)); 64 } 65 66 static inline void __tileloadd(void *tile) 67 { 68 asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10" 69 : : "a"(tile), "d"(0)); 70 } 71 72 static inline void __tilerelease(void) 73 { 74 asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::); 75 } 76 77 static inline void __xsavec(struct xstate *xstate, uint64_t rfbm) 78 { 79 uint32_t rfbm_lo = rfbm; 80 uint32_t rfbm_hi = rfbm >> 32; 81 82 asm volatile("xsavec (%%rdi)" 83 : : "D" (xstate), "a" (rfbm_lo), "d" (rfbm_hi) 84 : "memory"); 85 } 86 87 static void check_xtile_info(void) 88 { 89 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0)); 90 GUEST_ASSERT(this_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE_XCR0) <= XSAVE_SIZE); 91 92 xtile.xsave_offset = this_cpu_property(X86_PROPERTY_XSTATE_TILE_OFFSET); 93 GUEST_ASSERT(xtile.xsave_offset == 2816); 94 xtile.xsave_size = this_cpu_property(X86_PROPERTY_XSTATE_TILE_SIZE); 95 GUEST_ASSERT(xtile.xsave_size == 8192); 96 GUEST_ASSERT(sizeof(struct tile_data) >= xtile.xsave_size); 97 98 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_MAX_PALETTE_TABLES)); 99 GUEST_ASSERT(this_cpu_property(X86_PROPERTY_AMX_MAX_PALETTE_TABLES) >= 100 PALETTE_TABLE_INDEX); 101 102 GUEST_ASSERT(this_cpu_has_p(X86_PROPERTY_AMX_NR_TILE_REGS)); 103 xtile.max_names = this_cpu_property(X86_PROPERTY_AMX_NR_TILE_REGS); 104 GUEST_ASSERT(xtile.max_names == 8); 105 xtile.bytes_per_tile = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_TILE); 106 GUEST_ASSERT(xtile.bytes_per_tile == 1024); 107 xtile.bytes_per_row = this_cpu_property(X86_PROPERTY_AMX_BYTES_PER_ROW); 108 GUEST_ASSERT(xtile.bytes_per_row == 64); 109 xtile.max_rows = this_cpu_property(X86_PROPERTY_AMX_MAX_ROWS); 110 GUEST_ASSERT(xtile.max_rows == 16); 111 } 112 113 static void set_tilecfg(struct tile_config *cfg) 114 { 115 int i; 116 117 /* Only palette id 1 */ 118 cfg->palette_id = 1; 119 for (i = 0; i < xtile.max_names; i++) { 120 cfg->colsb[i] = xtile.bytes_per_row; 121 cfg->rows[i] = xtile.max_rows; 122 } 123 } 124 125 static void init_regs(void) 126 { 127 uint64_t cr4, xcr0; 128 129 GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE)); 130 131 /* turn on CR4.OSXSAVE */ 132 cr4 = get_cr4(); 133 cr4 |= X86_CR4_OSXSAVE; 134 set_cr4(cr4); 135 GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE)); 136 137 xcr0 = xgetbv(0); 138 xcr0 |= XFEATURE_MASK_XTILE; 139 xsetbv(0x0, xcr0); 140 GUEST_ASSERT((xgetbv(0) & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE); 141 } 142 143 static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg, 144 struct tile_data *tiledata, 145 struct xstate *xstate) 146 { 147 init_regs(); 148 check_xtile_info(); 149 GUEST_SYNC(1); 150 151 /* xfd=0, enable amx */ 152 wrmsr(MSR_IA32_XFD, 0); 153 GUEST_SYNC(2); 154 GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0); 155 set_tilecfg(amx_cfg); 156 __ldtilecfg(amx_cfg); 157 GUEST_SYNC(3); 158 /* Check save/restore when trap to userspace */ 159 __tileloadd(tiledata); 160 GUEST_SYNC(4); 161 __tilerelease(); 162 GUEST_SYNC(5); 163 /* 164 * After XSAVEC, XTILEDATA is cleared in the xstate_bv but is set in 165 * the xcomp_bv. 166 */ 167 xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA; 168 __xsavec(xstate, XFEATURE_MASK_XTILE_DATA); 169 GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA)); 170 GUEST_ASSERT(xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA); 171 172 /* xfd=0x40000, disable amx tiledata */ 173 wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILE_DATA); 174 175 /* 176 * XTILEDATA is cleared in xstate_bv but set in xcomp_bv, this property 177 * remains the same even when amx tiledata is disabled by IA32_XFD. 178 */ 179 xstate->header.xstate_bv = XFEATURE_MASK_XTILE_DATA; 180 __xsavec(xstate, XFEATURE_MASK_XTILE_DATA); 181 GUEST_ASSERT(!(xstate->header.xstate_bv & XFEATURE_MASK_XTILE_DATA)); 182 GUEST_ASSERT((xstate->header.xcomp_bv & XFEATURE_MASK_XTILE_DATA)); 183 184 GUEST_SYNC(6); 185 GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA); 186 set_tilecfg(amx_cfg); 187 __ldtilecfg(amx_cfg); 188 /* Trigger #NM exception */ 189 __tileloadd(tiledata); 190 GUEST_SYNC(10); 191 192 GUEST_DONE(); 193 } 194 195 void guest_nm_handler(struct ex_regs *regs) 196 { 197 /* Check if #NM is triggered by XFEATURE_MASK_XTILE_DATA */ 198 GUEST_SYNC(7); 199 GUEST_ASSERT(!(get_cr0() & X86_CR0_TS)); 200 GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA); 201 GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA); 202 GUEST_SYNC(8); 203 GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILE_DATA); 204 GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILE_DATA); 205 /* Clear xfd_err */ 206 wrmsr(MSR_IA32_XFD_ERR, 0); 207 /* xfd=0, enable amx */ 208 wrmsr(MSR_IA32_XFD, 0); 209 GUEST_SYNC(9); 210 } 211 212 int main(int argc, char *argv[]) 213 { 214 struct kvm_regs regs1, regs2; 215 struct kvm_vcpu *vcpu; 216 struct kvm_vm *vm; 217 struct kvm_x86_state *state; 218 int xsave_restore_size; 219 vm_vaddr_t amx_cfg, tiledata, xstate; 220 struct ucall uc; 221 u32 amx_offset; 222 int ret; 223 224 /* 225 * Note, all off-by-default features must be enabled before anything 226 * caches KVM_GET_SUPPORTED_CPUID, e.g. before using kvm_cpu_has(). 227 */ 228 vm_xsave_require_permission(XFEATURE_MASK_XTILE_DATA); 229 230 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD)); 231 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE)); 232 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE)); 233 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG)); 234 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA)); 235 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA_XFD)); 236 237 /* Create VM */ 238 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 239 240 TEST_ASSERT(kvm_cpu_has_p(X86_PROPERTY_XSTATE_MAX_SIZE), 241 "KVM should enumerate max XSAVE size when XSAVE is supported"); 242 xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE); 243 244 vcpu_regs_get(vcpu, ®s1); 245 246 /* Register #NM handler */ 247 vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler); 248 249 /* amx cfg for guest_code */ 250 amx_cfg = vm_vaddr_alloc_page(vm); 251 memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize()); 252 253 /* amx tiledata for guest_code */ 254 tiledata = vm_vaddr_alloc_pages(vm, 2); 255 memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize()); 256 257 /* XSAVE state for guest_code */ 258 xstate = vm_vaddr_alloc_pages(vm, DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); 259 memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); 260 vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate); 261 262 for (;;) { 263 vcpu_run(vcpu); 264 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 265 266 switch (get_ucall(vcpu, &uc)) { 267 case UCALL_ABORT: 268 REPORT_GUEST_ASSERT(uc); 269 /* NOT REACHED */ 270 case UCALL_SYNC: 271 switch (uc.args[1]) { 272 case 1: 273 case 2: 274 case 3: 275 case 5: 276 case 6: 277 case 7: 278 case 8: 279 fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]); 280 break; 281 case 4: 282 case 10: 283 fprintf(stderr, 284 "GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]); 285 286 /* Compacted mode, get amx offset by xsave area 287 * size subtract 8K amx size. 288 */ 289 amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE; 290 state = vcpu_save_state(vcpu); 291 void *amx_start = (void *)state->xsave + amx_offset; 292 void *tiles_data = (void *)addr_gva2hva(vm, tiledata); 293 /* Only check TMM0 register, 1 tile */ 294 ret = memcmp(amx_start, tiles_data, TILE_SIZE); 295 TEST_ASSERT(ret == 0, "memcmp failed, ret=%d", ret); 296 kvm_x86_state_cleanup(state); 297 break; 298 case 9: 299 fprintf(stderr, 300 "GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]); 301 break; 302 } 303 break; 304 case UCALL_DONE: 305 fprintf(stderr, "UCALL_DONE\n"); 306 goto done; 307 default: 308 TEST_FAIL("Unknown ucall %lu", uc.cmd); 309 } 310 311 state = vcpu_save_state(vcpu); 312 memset(®s1, 0, sizeof(regs1)); 313 vcpu_regs_get(vcpu, ®s1); 314 315 kvm_vm_release(vm); 316 317 /* Restore state in a new VM. */ 318 vcpu = vm_recreate_with_one_vcpu(vm); 319 vcpu_load_state(vcpu, state); 320 kvm_x86_state_cleanup(state); 321 322 memset(®s2, 0, sizeof(regs2)); 323 vcpu_regs_get(vcpu, ®s2); 324 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), 325 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 326 (ulong) regs2.rdi, (ulong) regs2.rsi); 327 } 328 done: 329 kvm_vm_free(vm); 330 } 331