1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
4 */
5 #include <linux/kernel.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/module.h>
9 #include <linux/perf_event.h>
10 #include <linux/irq.h>
11 #include <linux/stringify.h>
12
13 #include <asm/processor.h>
14 #include <asm/ptrace.h>
15 #include <asm/csr.h>
16 #include <asm/entry-common.h>
17 #include <asm/hwprobe.h>
18 #include <asm/cpufeature.h>
19 #include <asm/sbi.h>
20 #include <asm/vector.h>
21 #include <asm/insn.h>
22
23 #ifdef CONFIG_FPU
24
25 #define FP_GET_RD(insn) (insn >> 7 & 0x1F)
26
27 extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
28
set_f32_rd(unsigned long insn,struct pt_regs * regs,unsigned long val)29 static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
30 unsigned long val)
31 {
32 unsigned long fp_reg = FP_GET_RD(insn);
33
34 put_f32_reg(fp_reg, val);
35 regs->status |= SR_FS_DIRTY;
36
37 return 0;
38 }
39
40 extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
41
set_f64_rd(unsigned long insn,struct pt_regs * regs,u64 val)42 static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
43 {
44 unsigned long fp_reg = FP_GET_RD(insn);
45 unsigned long value;
46
47 #if __riscv_xlen == 32
48 value = (unsigned long) &val;
49 #else
50 value = val;
51 #endif
52 put_f64_reg(fp_reg, value);
53 regs->status |= SR_FS_DIRTY;
54
55 return 0;
56 }
57
58 #if __riscv_xlen == 32
59 extern void get_f64_reg(unsigned long fp_reg, u64 *value);
60
get_f64_rs(unsigned long insn,u8 fp_reg_offset,struct pt_regs * regs)61 static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
62 struct pt_regs *regs)
63 {
64 unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
65 u64 val;
66
67 get_f64_reg(fp_reg, &val);
68 regs->status |= SR_FS_DIRTY;
69
70 return val;
71 }
72 #else
73
74 extern unsigned long get_f64_reg(unsigned long fp_reg);
75
get_f64_rs(unsigned long insn,u8 fp_reg_offset,struct pt_regs * regs)76 static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
77 struct pt_regs *regs)
78 {
79 unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
80 unsigned long val;
81
82 val = get_f64_reg(fp_reg);
83 regs->status |= SR_FS_DIRTY;
84
85 return val;
86 }
87
88 #endif
89
90 extern unsigned long get_f32_reg(unsigned long fp_reg);
91
get_f32_rs(unsigned long insn,u8 fp_reg_offset,struct pt_regs * regs)92 static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
93 struct pt_regs *regs)
94 {
95 unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
96 unsigned long val;
97
98 val = get_f32_reg(fp_reg);
99 regs->status |= SR_FS_DIRTY;
100
101 return val;
102 }
103
104 #else /* CONFIG_FPU */
set_f32_rd(unsigned long insn,struct pt_regs * regs,unsigned long val)105 static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
106 unsigned long val) {}
107
set_f64_rd(unsigned long insn,struct pt_regs * regs,u64 val)108 static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
109
get_f64_rs(unsigned long insn,u8 fp_reg_offset,struct pt_regs * regs)110 static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
111 struct pt_regs *regs)
112 {
113 return 0;
114 }
115
get_f32_rs(unsigned long insn,u8 fp_reg_offset,struct pt_regs * regs)116 static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
117 struct pt_regs *regs)
118 {
119 return 0;
120 }
121
122 #endif
123
124 #define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
125 #define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
126 #define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
127
128 #define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
129 #define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
130 #define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
131
132 #define __read_insn(regs, insn, insn_addr, type) \
133 ({ \
134 int __ret; \
135 \
136 if (user_mode(regs)) { \
137 __ret = get_user(insn, (type __user *) insn_addr); \
138 } else { \
139 insn = *(type *)insn_addr; \
140 __ret = 0; \
141 } \
142 \
143 __ret; \
144 })
145
get_insn(struct pt_regs * regs,ulong epc,ulong * r_insn)146 static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
147 {
148 ulong insn = 0;
149
150 if (epc & 0x2) {
151 ulong tmp = 0;
152
153 if (__read_insn(regs, insn, epc, u16))
154 return -EFAULT;
155 /* __get_user() uses regular "lw" which sign extend the loaded
156 * value make sure to clear higher order bits in case we "or" it
157 * below with the upper 16 bits half.
158 */
159 insn &= GENMASK(15, 0);
160 if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
161 *r_insn = insn;
162 return 0;
163 }
164 epc += sizeof(u16);
165 if (__read_insn(regs, tmp, epc, u16))
166 return -EFAULT;
167 *r_insn = (tmp << 16) | insn;
168
169 return 0;
170 } else {
171 if (__read_insn(regs, insn, epc, u32))
172 return -EFAULT;
173 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
174 *r_insn = insn;
175 return 0;
176 }
177 insn &= GENMASK(15, 0);
178 *r_insn = insn;
179
180 return 0;
181 }
182 }
183
184 union reg_data {
185 u8 data_bytes[8];
186 ulong data_ulong;
187 u64 data_u64;
188 };
189
190 /* sysctl hooks */
191 int unaligned_enabled __read_mostly = 1; /* Enabled by default */
192
193 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
handle_vector_misaligned_load(struct pt_regs * regs)194 static int handle_vector_misaligned_load(struct pt_regs *regs)
195 {
196 unsigned long epc = regs->epc;
197 unsigned long insn;
198
199 if (get_insn(regs, epc, &insn))
200 return -1;
201
202 /* Only return 0 when in check_vector_unaligned_access_emulated */
203 if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
204 *this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
205 regs->epc = epc + INSN_LEN(insn);
206 return 0;
207 }
208
209 /* If vector instruction we don't emulate it yet */
210 regs->epc = epc;
211 return -1;
212 }
213 #else
handle_vector_misaligned_load(struct pt_regs * regs)214 static int handle_vector_misaligned_load(struct pt_regs *regs)
215 {
216 return -1;
217 }
218 #endif
219
handle_scalar_misaligned_load(struct pt_regs * regs)220 static int handle_scalar_misaligned_load(struct pt_regs *regs)
221 {
222 union reg_data val;
223 unsigned long epc = regs->epc;
224 unsigned long insn;
225 unsigned long addr = regs->badaddr;
226 int fp = 0, shift = 0, len = 0;
227
228 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
229
230 *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
231
232 if (!unaligned_enabled)
233 return -1;
234
235 if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
236 return -1;
237
238 if (get_insn(regs, epc, &insn))
239 return -1;
240
241 regs->epc = 0;
242
243 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
244 len = 4;
245 shift = 8 * (sizeof(unsigned long) - len);
246 #if defined(CONFIG_64BIT)
247 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
248 len = 8;
249 shift = 8 * (sizeof(unsigned long) - len);
250 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
251 len = 4;
252 #endif
253 } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
254 fp = 1;
255 len = 8;
256 } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
257 fp = 1;
258 len = 4;
259 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
260 len = 2;
261 shift = 8 * (sizeof(unsigned long) - len);
262 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
263 len = 2;
264 #if defined(CONFIG_64BIT)
265 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
266 len = 8;
267 shift = 8 * (sizeof(unsigned long) - len);
268 insn = RVC_RS2S(insn) << SH_RD;
269 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
270 ((insn >> SH_RD) & 0x1f)) {
271 len = 8;
272 shift = 8 * (sizeof(unsigned long) - len);
273 #endif
274 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
275 len = 4;
276 shift = 8 * (sizeof(unsigned long) - len);
277 insn = RVC_RS2S(insn) << SH_RD;
278 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
279 ((insn >> SH_RD) & 0x1f)) {
280 len = 4;
281 shift = 8 * (sizeof(unsigned long) - len);
282 } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
283 fp = 1;
284 len = 8;
285 insn = RVC_RS2S(insn) << SH_RD;
286 } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
287 fp = 1;
288 len = 8;
289 #if defined(CONFIG_32BIT)
290 } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
291 fp = 1;
292 len = 4;
293 insn = RVC_RS2S(insn) << SH_RD;
294 } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
295 fp = 1;
296 len = 4;
297 #endif
298 } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
299 len = 2;
300 insn = RVC_RS2S(insn) << SH_RD;
301 } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
302 len = 2;
303 shift = 8 * (sizeof(ulong) - len);
304 insn = RVC_RS2S(insn) << SH_RD;
305 } else {
306 regs->epc = epc;
307 return -1;
308 }
309
310 if (!IS_ENABLED(CONFIG_FPU) && fp)
311 return -EOPNOTSUPP;
312
313 val.data_u64 = 0;
314 if (user_mode(regs)) {
315 if (copy_from_user(&val, (u8 __user *)addr, len))
316 return -1;
317 } else {
318 memcpy(&val, (u8 *)addr, len);
319 }
320
321 if (!fp)
322 SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
323 else if (len == 8)
324 set_f64_rd(insn, regs, val.data_u64);
325 else
326 set_f32_rd(insn, regs, val.data_ulong);
327
328 regs->epc = epc + INSN_LEN(insn);
329
330 return 0;
331 }
332
handle_scalar_misaligned_store(struct pt_regs * regs)333 static int handle_scalar_misaligned_store(struct pt_regs *regs)
334 {
335 union reg_data val;
336 unsigned long epc = regs->epc;
337 unsigned long insn;
338 unsigned long addr = regs->badaddr;
339 int len = 0, fp = 0;
340
341 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
342
343 if (!unaligned_enabled)
344 return -1;
345
346 if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
347 return -1;
348
349 if (get_insn(regs, epc, &insn))
350 return -1;
351
352 regs->epc = 0;
353
354 val.data_ulong = GET_RS2(insn, regs);
355
356 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
357 len = 4;
358 #if defined(CONFIG_64BIT)
359 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
360 len = 8;
361 #endif
362 } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
363 fp = 1;
364 len = 8;
365 val.data_u64 = GET_F64_RS2(insn, regs);
366 } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
367 fp = 1;
368 len = 4;
369 val.data_ulong = GET_F32_RS2(insn, regs);
370 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
371 len = 2;
372 #if defined(CONFIG_64BIT)
373 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
374 len = 8;
375 val.data_ulong = GET_RS2S(insn, regs);
376 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
377 len = 8;
378 val.data_ulong = GET_RS2C(insn, regs);
379 #endif
380 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
381 len = 4;
382 val.data_ulong = GET_RS2S(insn, regs);
383 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
384 len = 4;
385 val.data_ulong = GET_RS2C(insn, regs);
386 } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
387 fp = 1;
388 len = 8;
389 val.data_u64 = GET_F64_RS2S(insn, regs);
390 } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
391 fp = 1;
392 len = 8;
393 val.data_u64 = GET_F64_RS2C(insn, regs);
394 #if !defined(CONFIG_64BIT)
395 } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
396 fp = 1;
397 len = 4;
398 val.data_ulong = GET_F32_RS2S(insn, regs);
399 } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
400 fp = 1;
401 len = 4;
402 val.data_ulong = GET_F32_RS2C(insn, regs);
403 #endif
404 } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
405 len = 2;
406 val.data_ulong = GET_RS2S(insn, regs);
407 } else {
408 regs->epc = epc;
409 return -1;
410 }
411
412 if (!IS_ENABLED(CONFIG_FPU) && fp)
413 return -EOPNOTSUPP;
414
415 if (user_mode(regs)) {
416 if (copy_to_user((u8 __user *)addr, &val, len))
417 return -1;
418 } else {
419 memcpy((u8 *)addr, &val, len);
420 }
421
422 regs->epc = epc + INSN_LEN(insn);
423
424 return 0;
425 }
426
handle_misaligned_load(struct pt_regs * regs)427 int handle_misaligned_load(struct pt_regs *regs)
428 {
429 unsigned long epc = regs->epc;
430 unsigned long insn;
431
432 if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) {
433 if (get_insn(regs, epc, &insn))
434 return -1;
435
436 if (insn_is_vector(insn))
437 return handle_vector_misaligned_load(regs);
438 }
439
440 if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED))
441 return handle_scalar_misaligned_load(regs);
442
443 return -1;
444 }
445
handle_misaligned_store(struct pt_regs * regs)446 int handle_misaligned_store(struct pt_regs *regs)
447 {
448 if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED))
449 return handle_scalar_misaligned_store(regs);
450
451 return -1;
452 }
453
454 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
check_vector_unaligned_access_emulated(struct work_struct * work __always_unused)455 void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused)
456 {
457 long *mas_ptr = this_cpu_ptr(&vector_misaligned_access);
458 unsigned long tmp_var;
459
460 *mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
461
462 kernel_vector_begin();
463 /*
464 * In pre-13.0.0 versions of GCC, vector registers cannot appear in
465 * the clobber list. This inline asm clobbers v0, but since we do not
466 * currently build the kernel with V enabled, the v0 clobber arg is not
467 * needed (as the compiler will not emit vector code itself). If the kernel
468 * is changed to build with V enabled, the clobber arg will need to be
469 * added here.
470 */
471 __asm__ __volatile__ (
472 ".balign 4\n\t"
473 ".option push\n\t"
474 ".option arch, +zve32x\n\t"
475 " vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b
476 " vle16.v v0, (%[ptr])\n\t" // Load bytes
477 ".option pop\n\t"
478 : : [ptr] "r" ((u8 *)&tmp_var + 1));
479 kernel_vector_end();
480 }
481
check_vector_unaligned_access_emulated_all_cpus(void)482 bool __init check_vector_unaligned_access_emulated_all_cpus(void)
483 {
484 int cpu;
485
486 /*
487 * While being documented as very slow, schedule_on_each_cpu() is used since
488 * kernel_vector_begin() expects irqs to be enabled or it will panic()
489 */
490 schedule_on_each_cpu(check_vector_unaligned_access_emulated);
491
492 for_each_online_cpu(cpu)
493 if (per_cpu(vector_misaligned_access, cpu)
494 == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
495 return false;
496
497 return true;
498 }
499 #else
check_vector_unaligned_access_emulated_all_cpus(void)500 bool __init check_vector_unaligned_access_emulated_all_cpus(void)
501 {
502 return false;
503 }
504 #endif
505
all_cpus_unaligned_scalar_access_emulated(void)506 static bool all_cpus_unaligned_scalar_access_emulated(void)
507 {
508 int cpu;
509
510 for_each_online_cpu(cpu)
511 if (per_cpu(misaligned_access_speed, cpu) !=
512 RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
513 return false;
514
515 return true;
516 }
517
518 #ifdef CONFIG_RISCV_SCALAR_MISALIGNED
519
520 static bool unaligned_ctl __read_mostly;
521
check_unaligned_access_emulated(void * arg __always_unused)522 static void check_unaligned_access_emulated(void *arg __always_unused)
523 {
524 int cpu = smp_processor_id();
525 long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
526 unsigned long tmp_var, tmp_val;
527
528 *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
529
530 __asm__ __volatile__ (
531 " "REG_L" %[tmp], 1(%[ptr])\n"
532 : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
533 }
534
cpu_online_check_unaligned_access_emulated(unsigned int cpu)535 static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
536 {
537 long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
538
539 check_unaligned_access_emulated(NULL);
540
541 /*
542 * If unaligned_ctl is already set, this means that we detected that all
543 * CPUS uses emulated misaligned access at boot time. If that changed
544 * when hotplugging the new cpu, this is something we don't handle.
545 */
546 if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
547 pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
548 return -EINVAL;
549 }
550
551 return 0;
552 }
553
check_unaligned_access_emulated_all_cpus(void)554 bool __init check_unaligned_access_emulated_all_cpus(void)
555 {
556 /*
557 * We can only support PR_UNALIGN controls if all CPUs have misaligned
558 * accesses emulated since tasks requesting such control can run on any
559 * CPU.
560 */
561 on_each_cpu(check_unaligned_access_emulated, NULL, 1);
562
563 if (!all_cpus_unaligned_scalar_access_emulated())
564 return false;
565
566 unaligned_ctl = true;
567 return true;
568 }
569
unaligned_ctl_available(void)570 bool unaligned_ctl_available(void)
571 {
572 return unaligned_ctl;
573 }
574 #else
check_unaligned_access_emulated_all_cpus(void)575 bool __init check_unaligned_access_emulated_all_cpus(void)
576 {
577 return false;
578 }
cpu_online_check_unaligned_access_emulated(unsigned int cpu)579 static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
580 {
581 return 0;
582 }
583 #endif
584
585 static bool misaligned_traps_delegated;
586
587 #ifdef CONFIG_RISCV_SBI
588
cpu_online_sbi_unaligned_setup(unsigned int cpu)589 static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
590 {
591 if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) &&
592 misaligned_traps_delegated) {
593 pr_crit("Misaligned trap delegation non homogeneous (expected delegated)");
594 return -EINVAL;
595 }
596
597 return 0;
598 }
599
unaligned_access_init(void)600 void __init unaligned_access_init(void)
601 {
602 int ret;
603
604 ret = sbi_fwft_set_online_cpus(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0);
605 if (ret)
606 return;
607
608 misaligned_traps_delegated = true;
609 pr_info("SBI misaligned access exception delegation ok\n");
610 /*
611 * Note that we don't have to take any specific action here, if
612 * the delegation is successful, then
613 * check_unaligned_access_emulated() will verify that indeed the
614 * platform traps on misaligned accesses.
615 */
616 }
617 #else
unaligned_access_init(void)618 void __init unaligned_access_init(void) {}
619
cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)620 static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
621 {
622 return 0;
623 }
624
625 #endif
626
cpu_online_unaligned_access_init(unsigned int cpu)627 int cpu_online_unaligned_access_init(unsigned int cpu)
628 {
629 int ret;
630
631 ret = cpu_online_sbi_unaligned_setup(cpu);
632 if (ret)
633 return ret;
634
635 return cpu_online_check_unaligned_access_emulated(cpu);
636 }
637
misaligned_traps_can_delegate(void)638 bool misaligned_traps_can_delegate(void)
639 {
640 /*
641 * Either we successfully requested misaligned traps delegation for all
642 * CPUs, or the SBI does not implement the FWFT extension but delegated
643 * the exception by default.
644 */
645 return misaligned_traps_delegated ||
646 all_cpus_unaligned_scalar_access_emulated();
647 }
648 EXPORT_SYMBOL_GPL(misaligned_traps_can_delegate);
649