traps_misaligned.c (8d20a739f17a2de9e269db72330f5655d6545dd4) traps_misaligned.c (d1703dc7bc8ec7adb91f5ceaf1556ff1ed212858)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
4 */
5#include <linux/kernel.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/module.h>
9#include <linux/perf_event.h>
10#include <linux/irq.h>
11#include <linux/stringify.h>
12
13#include <asm/processor.h>
14#include <asm/ptrace.h>
15#include <asm/csr.h>
16#include <asm/entry-common.h>
17#include <asm/hwprobe.h>
18#include <asm/cpufeature.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
4 */
5#include <linux/kernel.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/module.h>
9#include <linux/perf_event.h>
10#include <linux/irq.h>
11#include <linux/stringify.h>
12
13#include <asm/processor.h>
14#include <asm/ptrace.h>
15#include <asm/csr.h>
16#include <asm/entry-common.h>
17#include <asm/hwprobe.h>
18#include <asm/cpufeature.h>
19#include <asm/vector.h>
19
20#define INSN_MATCH_LB 0x3
21#define INSN_MASK_LB 0x707f
22#define INSN_MATCH_LH 0x1003
23#define INSN_MASK_LH 0x707f
24#define INSN_MATCH_LW 0x2003
25#define INSN_MASK_LW 0x707f
26#define INSN_MATCH_LD 0x3003

--- 290 unchanged lines hidden (view full) ---

317}
318
319union reg_data {
320 u8 data_bytes[8];
321 ulong data_ulong;
322 u64 data_u64;
323};
324
20
21#define INSN_MATCH_LB 0x3
22#define INSN_MASK_LB 0x707f
23#define INSN_MATCH_LH 0x1003
24#define INSN_MASK_LH 0x707f
25#define INSN_MATCH_LW 0x2003
26#define INSN_MASK_LW 0x707f
27#define INSN_MATCH_LD 0x3003

--- 290 unchanged lines hidden (view full) ---

318}
319
320union reg_data {
321 u8 data_bytes[8];
322 ulong data_ulong;
323 u64 data_u64;
324};
325
325static bool unaligned_ctl __read_mostly;
326
327/* sysctl hooks */
328int unaligned_enabled __read_mostly = 1; /* Enabled by default */
329
326/* sysctl hooks */
327int unaligned_enabled __read_mostly = 1; /* Enabled by default */
328
330int handle_misaligned_load(struct pt_regs *regs)
329#ifdef CONFIG_RISCV_VECTOR_MISALIGNED
330static int handle_vector_misaligned_load(struct pt_regs *regs)
331{
331{
332 unsigned long epc = regs->epc;
333 unsigned long insn;
334
335 if (get_insn(regs, epc, &insn))
336 return -1;
337
338 /* Only return 0 when in check_vector_unaligned_access_emulated */
339 if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
340 *this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
341 regs->epc = epc + INSN_LEN(insn);
342 return 0;
343 }
344
345 /* If vector instruction we don't emulate it yet */
346 regs->epc = epc;
347 return -1;
348}
349#else
350static int handle_vector_misaligned_load(struct pt_regs *regs)
351{
352 return -1;
353}
354#endif
355
356static int handle_scalar_misaligned_load(struct pt_regs *regs)
357{
332 union reg_data val;
333 unsigned long epc = regs->epc;
334 unsigned long insn;
335 unsigned long addr = regs->badaddr;
336 int fp = 0, shift = 0, len = 0;
337
338 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
339

--- 90 unchanged lines hidden (view full) ---

430 else
431 set_f32_rd(insn, regs, val.data_ulong);
432
433 regs->epc = epc + INSN_LEN(insn);
434
435 return 0;
436}
437
358 union reg_data val;
359 unsigned long epc = regs->epc;
360 unsigned long insn;
361 unsigned long addr = regs->badaddr;
362 int fp = 0, shift = 0, len = 0;
363
364 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
365

--- 90 unchanged lines hidden (view full) ---

456 else
457 set_f32_rd(insn, regs, val.data_ulong);
458
459 regs->epc = epc + INSN_LEN(insn);
460
461 return 0;
462}
463
438int handle_misaligned_store(struct pt_regs *regs)
464static int handle_scalar_misaligned_store(struct pt_regs *regs)
439{
440 union reg_data val;
441 unsigned long epc = regs->epc;
442 unsigned long insn;
443 unsigned long addr = regs->badaddr;
444 int len = 0, fp = 0;
445
446 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);

--- 74 unchanged lines hidden (view full) ---

521 memcpy((u8 *)addr, &val, len);
522 }
523
524 regs->epc = epc + INSN_LEN(insn);
525
526 return 0;
527}
528
465{
466 union reg_data val;
467 unsigned long epc = regs->epc;
468 unsigned long insn;
469 unsigned long addr = regs->badaddr;
470 int len = 0, fp = 0;
471
472 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);

--- 74 unchanged lines hidden (view full) ---

547 memcpy((u8 *)addr, &val, len);
548 }
549
550 regs->epc = epc + INSN_LEN(insn);
551
552 return 0;
553}
554
555int handle_misaligned_load(struct pt_regs *regs)
556{
557 unsigned long epc = regs->epc;
558 unsigned long insn;
559
560 if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) {
561 if (get_insn(regs, epc, &insn))
562 return -1;
563
564 if (insn_is_vector(insn))
565 return handle_vector_misaligned_load(regs);
566 }
567
568 if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED))
569 return handle_scalar_misaligned_load(regs);
570
571 return -1;
572}
573
574int handle_misaligned_store(struct pt_regs *regs)
575{
576 if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED))
577 return handle_scalar_misaligned_store(regs);
578
579 return -1;
580}
581
582#ifdef CONFIG_RISCV_VECTOR_MISALIGNED
583void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused)
584{
585 long *mas_ptr = this_cpu_ptr(&vector_misaligned_access);
586 unsigned long tmp_var;
587
588 *mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
589
590 kernel_vector_begin();
591 /*
592 * In pre-13.0.0 versions of GCC, vector registers cannot appear in
593 * the clobber list. This inline asm clobbers v0, but since we do not
594 * currently build the kernel with V enabled, the v0 clobber arg is not
595 * needed (as the compiler will not emit vector code itself). If the kernel
596 * is changed to build with V enabled, the clobber arg will need to be
597 * added here.
598 */
599 __asm__ __volatile__ (
600 ".balign 4\n\t"
601 ".option push\n\t"
602 ".option arch, +zve32x\n\t"
603 " vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b
604 " vle16.v v0, (%[ptr])\n\t" // Load bytes
605 ".option pop\n\t"
606 : : [ptr] "r" ((u8 *)&tmp_var + 1));
607 kernel_vector_end();
608}
609
610bool check_vector_unaligned_access_emulated_all_cpus(void)
611{
612 int cpu;
613
614 if (!has_vector()) {
615 for_each_online_cpu(cpu)
616 per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
617 return false;
618 }
619
620 schedule_on_each_cpu(check_vector_unaligned_access_emulated);
621
622 for_each_online_cpu(cpu)
623 if (per_cpu(vector_misaligned_access, cpu)
624 == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
625 return false;
626
627 return true;
628}
629#else
630bool check_vector_unaligned_access_emulated_all_cpus(void)
631{
632 return false;
633}
634#endif
635
636#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
637
638static bool unaligned_ctl __read_mostly;
639
529void check_unaligned_access_emulated(struct work_struct *work __always_unused)
530{
531 int cpu = smp_processor_id();
532 long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
533 unsigned long tmp_var, tmp_val;
534
535 *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
536

--- 32 unchanged lines hidden (view full) ---

569 unaligned_ctl = true;
570 return true;
571}
572
573bool unaligned_ctl_available(void)
574{
575 return unaligned_ctl;
576}
640void check_unaligned_access_emulated(struct work_struct *work __always_unused)
641{
642 int cpu = smp_processor_id();
643 long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
644 unsigned long tmp_var, tmp_val;
645
646 *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
647

--- 32 unchanged lines hidden (view full) ---

680 unaligned_ctl = true;
681 return true;
682}
683
684bool unaligned_ctl_available(void)
685{
686 return unaligned_ctl;
687}
688#else
689bool check_unaligned_access_emulated_all_cpus(void)
690{
691 return false;
692}
693#endif