1// SPDX-License-Identifier: GPL-2.0-only 2// Copyright (C) 2021 ARM Limited. 3// 4// Assembly portion of the syscall ABI test 5 6// 7// Load values from memory into registers, invoke a syscall and save the 8// register values back to memory for later checking. The syscall to be 9// invoked is configured in x8 of the input GPR data. 10// 11// x0: SVE VL, 0 for FP only 12// x1: SME VL 13// 14// GPRs: gpr_in, gpr_out 15// FPRs: fpr_in, fpr_out 16// Zn: z_in, z_out 17// Pn: p_in, p_out 18// FFR: ffr_in, ffr_out 19// ZA: za_in, za_out 20// SVCR: svcr_in, svcr_out 21 22#include "syscall-abi.h" 23 24.arch_extension sve 25 26#define ID_AA64SMFR0_EL1_SMEver_SHIFT 56 27#define ID_AA64SMFR0_EL1_SMEver_WIDTH 4 28 29/* 30 * LDR (vector to ZA array): 31 * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] 32 */ 33.macro _ldr_za nw, nxbase, offset=0 34 .inst 0xe1000000 \ 35 | (((\nw) & 3) << 13) \ 36 | ((\nxbase) << 5) \ 37 | ((\offset) & 7) 38.endm 39 40/* 41 * STR (vector from ZA array): 42 * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL] 43 */ 44.macro _str_za nw, nxbase, offset=0 45 .inst 0xe1200000 \ 46 | (((\nw) & 3) << 13) \ 47 | ((\nxbase) << 5) \ 48 | ((\offset) & 7) 49.endm 50 51/* 52 * LDR (ZT0) 53 * 54 * LDR ZT0, nx 55 */ 56.macro _ldr_zt nx 57 .inst 0xe11f8000 \ 58 | (((\nx) & 0x1f) << 5) 59.endm 60 61/* 62 * STR (ZT0) 63 * 64 * STR ZT0, nx 65 */ 66.macro _str_zt nx 67 .inst 0xe13f8000 \ 68 | (((\nx) & 0x1f) << 5) 69.endm 70 71.globl do_syscall 72do_syscall: 73 // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1 74 stp x29, x30, [sp, #-112]! 75 mov x29, sp 76 stp x0, x1, [sp, #16] 77 stp x19, x20, [sp, #32] 78 stp x21, x22, [sp, #48] 79 stp x23, x24, [sp, #64] 80 stp x25, x26, [sp, #80] 81 stp x27, x28, [sp, #96] 82 83 // Set SVCR if we're doing SME 84 cbz x1, 1f 85 adrp x2, svcr_in 86 ldr x2, [x2, :lo12:svcr_in] 87 msr S3_3_C4_C2_2, x2 881: 89 90 // Load ZA and ZT0 if enabled - uses x12 as scratch due to SME LDR 91 tbz x2, #SVCR_ZA_SHIFT, 1f 92 mov w12, #0 93 ldr x2, =za_in 942: _ldr_za 12, 2 95 add x2, x2, x1 96 add x12, x12, #1 97 cmp x1, x12 98 bne 2b 99 100 // ZT0 101 mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1 102 ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \ 103 #ID_AA64SMFR0_EL1_SMEver_WIDTH 104 cbz x2, 1f 105 adrp x2, zt_in 106 add x2, x2, :lo12:zt_in 107 _ldr_zt 2 1081: 109 110 // Load GPRs x8-x28, and save our SP/FP for later comparison 111 ldr x2, =gpr_in 112 add x2, x2, #64 113 ldp x8, x9, [x2], #16 114 ldp x10, x11, [x2], #16 115 ldp x12, x13, [x2], #16 116 ldp x14, x15, [x2], #16 117 ldp x16, x17, [x2], #16 118 ldp x18, x19, [x2], #16 119 ldp x20, x21, [x2], #16 120 ldp x22, x23, [x2], #16 121 ldp x24, x25, [x2], #16 122 ldp x26, x27, [x2], #16 123 ldr x28, [x2], #8 124 str x29, [x2], #8 // FP 125 str x30, [x2], #8 // LR 126 127 // Load FPRs if we're not doing SVE 128 cbnz x0, 1f 129 ldr x2, =fpr_in 130 ldp q0, q1, [x2] 131 ldp q2, q3, [x2, #16 * 2] 132 ldp q4, q5, [x2, #16 * 4] 133 ldp q6, q7, [x2, #16 * 6] 134 ldp q8, q9, [x2, #16 * 8] 135 ldp q10, q11, [x2, #16 * 10] 136 ldp q12, q13, [x2, #16 * 12] 137 ldp q14, q15, [x2, #16 * 14] 138 ldp q16, q17, [x2, #16 * 16] 139 ldp q18, q19, [x2, #16 * 18] 140 ldp q20, q21, [x2, #16 * 20] 141 ldp q22, q23, [x2, #16 * 22] 142 ldp q24, q25, [x2, #16 * 24] 143 ldp q26, q27, [x2, #16 * 26] 144 ldp q28, q29, [x2, #16 * 28] 145 ldp q30, q31, [x2, #16 * 30] 1461: 147 148 // Load the SVE registers if we're doing SVE/SME 149 cbz x0, 1f 150 151 ldr x2, =z_in 152 ldr z0, [x2, #0, MUL VL] 153 ldr z1, [x2, #1, MUL VL] 154 ldr z2, [x2, #2, MUL VL] 155 ldr z3, [x2, #3, MUL VL] 156 ldr z4, [x2, #4, MUL VL] 157 ldr z5, [x2, #5, MUL VL] 158 ldr z6, [x2, #6, MUL VL] 159 ldr z7, [x2, #7, MUL VL] 160 ldr z8, [x2, #8, MUL VL] 161 ldr z9, [x2, #9, MUL VL] 162 ldr z10, [x2, #10, MUL VL] 163 ldr z11, [x2, #11, MUL VL] 164 ldr z12, [x2, #12, MUL VL] 165 ldr z13, [x2, #13, MUL VL] 166 ldr z14, [x2, #14, MUL VL] 167 ldr z15, [x2, #15, MUL VL] 168 ldr z16, [x2, #16, MUL VL] 169 ldr z17, [x2, #17, MUL VL] 170 ldr z18, [x2, #18, MUL VL] 171 ldr z19, [x2, #19, MUL VL] 172 ldr z20, [x2, #20, MUL VL] 173 ldr z21, [x2, #21, MUL VL] 174 ldr z22, [x2, #22, MUL VL] 175 ldr z23, [x2, #23, MUL VL] 176 ldr z24, [x2, #24, MUL VL] 177 ldr z25, [x2, #25, MUL VL] 178 ldr z26, [x2, #26, MUL VL] 179 ldr z27, [x2, #27, MUL VL] 180 ldr z28, [x2, #28, MUL VL] 181 ldr z29, [x2, #29, MUL VL] 182 ldr z30, [x2, #30, MUL VL] 183 ldr z31, [x2, #31, MUL VL] 184 185 // Only set a non-zero FFR, test patterns must be zero since the 186 // syscall should clear it - this lets us handle FA64. 187 ldr x2, =ffr_in 188 ldr p0, [x2] 189 ldr x2, [x2, #0] 190 cbz x2, 2f 191 wrffr p0.b 1922: 193 194 ldr x2, =p_in 195 ldr p0, [x2, #0, MUL VL] 196 ldr p1, [x2, #1, MUL VL] 197 ldr p2, [x2, #2, MUL VL] 198 ldr p3, [x2, #3, MUL VL] 199 ldr p4, [x2, #4, MUL VL] 200 ldr p5, [x2, #5, MUL VL] 201 ldr p6, [x2, #6, MUL VL] 202 ldr p7, [x2, #7, MUL VL] 203 ldr p8, [x2, #8, MUL VL] 204 ldr p9, [x2, #9, MUL VL] 205 ldr p10, [x2, #10, MUL VL] 206 ldr p11, [x2, #11, MUL VL] 207 ldr p12, [x2, #12, MUL VL] 208 ldr p13, [x2, #13, MUL VL] 209 ldr p14, [x2, #14, MUL VL] 210 ldr p15, [x2, #15, MUL VL] 2111: 212 213 // Do the syscall 214 svc #0 215 216 // Save GPRs x8-x30 217 ldr x2, =gpr_out 218 add x2, x2, #64 219 stp x8, x9, [x2], #16 220 stp x10, x11, [x2], #16 221 stp x12, x13, [x2], #16 222 stp x14, x15, [x2], #16 223 stp x16, x17, [x2], #16 224 stp x18, x19, [x2], #16 225 stp x20, x21, [x2], #16 226 stp x22, x23, [x2], #16 227 stp x24, x25, [x2], #16 228 stp x26, x27, [x2], #16 229 stp x28, x29, [x2], #16 230 str x30, [x2] 231 232 // Restore x0 and x1 for feature checks 233 ldp x0, x1, [sp, #16] 234 235 // Save FPSIMD state 236 ldr x2, =fpr_out 237 stp q0, q1, [x2] 238 stp q2, q3, [x2, #16 * 2] 239 stp q4, q5, [x2, #16 * 4] 240 stp q6, q7, [x2, #16 * 6] 241 stp q8, q9, [x2, #16 * 8] 242 stp q10, q11, [x2, #16 * 10] 243 stp q12, q13, [x2, #16 * 12] 244 stp q14, q15, [x2, #16 * 14] 245 stp q16, q17, [x2, #16 * 16] 246 stp q18, q19, [x2, #16 * 18] 247 stp q20, q21, [x2, #16 * 20] 248 stp q22, q23, [x2, #16 * 22] 249 stp q24, q25, [x2, #16 * 24] 250 stp q26, q27, [x2, #16 * 26] 251 stp q28, q29, [x2, #16 * 28] 252 stp q30, q31, [x2, #16 * 30] 253 254 // Save SVCR if we're doing SME 255 cbz x1, 1f 256 mrs x2, S3_3_C4_C2_2 257 adrp x3, svcr_out 258 str x2, [x3, :lo12:svcr_out] 2591: 260 261 // Save ZA if it's enabled - uses x12 as scratch due to SME STR 262 tbz x2, #SVCR_ZA_SHIFT, 1f 263 mov w12, #0 264 ldr x2, =za_out 2652: _str_za 12, 2 266 add x2, x2, x1 267 add x12, x12, #1 268 cmp x1, x12 269 bne 2b 270 271 // ZT0 272 mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1 273 ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \ 274 #ID_AA64SMFR0_EL1_SMEver_WIDTH 275 cbz x2, 1f 276 adrp x2, zt_out 277 add x2, x2, :lo12:zt_out 278 _str_zt 2 2791: 280 281 // Save the SVE state if we have some 282 cbz x0, 1f 283 284 ldr x2, =z_out 285 str z0, [x2, #0, MUL VL] 286 str z1, [x2, #1, MUL VL] 287 str z2, [x2, #2, MUL VL] 288 str z3, [x2, #3, MUL VL] 289 str z4, [x2, #4, MUL VL] 290 str z5, [x2, #5, MUL VL] 291 str z6, [x2, #6, MUL VL] 292 str z7, [x2, #7, MUL VL] 293 str z8, [x2, #8, MUL VL] 294 str z9, [x2, #9, MUL VL] 295 str z10, [x2, #10, MUL VL] 296 str z11, [x2, #11, MUL VL] 297 str z12, [x2, #12, MUL VL] 298 str z13, [x2, #13, MUL VL] 299 str z14, [x2, #14, MUL VL] 300 str z15, [x2, #15, MUL VL] 301 str z16, [x2, #16, MUL VL] 302 str z17, [x2, #17, MUL VL] 303 str z18, [x2, #18, MUL VL] 304 str z19, [x2, #19, MUL VL] 305 str z20, [x2, #20, MUL VL] 306 str z21, [x2, #21, MUL VL] 307 str z22, [x2, #22, MUL VL] 308 str z23, [x2, #23, MUL VL] 309 str z24, [x2, #24, MUL VL] 310 str z25, [x2, #25, MUL VL] 311 str z26, [x2, #26, MUL VL] 312 str z27, [x2, #27, MUL VL] 313 str z28, [x2, #28, MUL VL] 314 str z29, [x2, #29, MUL VL] 315 str z30, [x2, #30, MUL VL] 316 str z31, [x2, #31, MUL VL] 317 318 ldr x2, =p_out 319 str p0, [x2, #0, MUL VL] 320 str p1, [x2, #1, MUL VL] 321 str p2, [x2, #2, MUL VL] 322 str p3, [x2, #3, MUL VL] 323 str p4, [x2, #4, MUL VL] 324 str p5, [x2, #5, MUL VL] 325 str p6, [x2, #6, MUL VL] 326 str p7, [x2, #7, MUL VL] 327 str p8, [x2, #8, MUL VL] 328 str p9, [x2, #9, MUL VL] 329 str p10, [x2, #10, MUL VL] 330 str p11, [x2, #11, MUL VL] 331 str p12, [x2, #12, MUL VL] 332 str p13, [x2, #13, MUL VL] 333 str p14, [x2, #14, MUL VL] 334 str p15, [x2, #15, MUL VL] 335 336 // Only save FFR if we wrote a value for SME 337 ldr x2, =ffr_in 338 ldr x2, [x2, #0] 339 cbz x2, 1f 340 ldr x2, =ffr_out 341 rdffr p0.b 342 str p0, [x2] 3431: 344 345 // Restore callee saved registers x19-x30 346 ldp x19, x20, [sp, #32] 347 ldp x21, x22, [sp, #48] 348 ldp x23, x24, [sp, #64] 349 ldp x25, x26, [sp, #80] 350 ldp x27, x28, [sp, #96] 351 ldp x29, x30, [sp], #112 352 353 // Clear SVCR if we were doing SME so future tests don't have ZA 354 cbz x1, 1f 355 msr S3_3_C4_C2_2, xzr 3561: 357 358 ret 359