xref: /linux/lib/raid6/vpermxor.uc (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1/*
2 * Copyright 2017, Matt Brown, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * vpermxor$#.c
10 *
11 * Based on H. Peter Anvin's paper - The mathematics of RAID-6
12 *
13 * $#-way unrolled portable integer math RAID-6 instruction set
14 * This file is postprocessed using unroll.awk
15 *
16 * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
17 * syndrome calculations.
18 * This can be run on systems which have both Altivec and vpermxor instruction.
19 *
20 * This instruction was introduced in POWER8 - ISA v2.07.
21 */
22
23#include <linux/raid/pq.h>
24#ifdef CONFIG_ALTIVEC
25
26#include <altivec.h>
27#ifdef __KERNEL__
28#include <asm/cputable.h>
29#include <asm/ppc-opcode.h>
30#include <asm/switch_to.h>
31#endif
32
33typedef vector unsigned char unative_t;
34#define NSIZE sizeof(unative_t)
35
36static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
37					    0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
38					    0x06, 0x04, 0x02,0x00};
39static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
40					     0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
41					     0x60, 0x40, 0x20, 0x00};
42
43static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
44							void **ptrs)
45{
46	u8 **dptr = (u8 **)ptrs;
47	u8 *p, *q;
48	int d, z, z0;
49	unative_t wp$$, wq$$, wd$$;
50
51	z0 = disks - 3;		/* Highest data disk */
52	p = dptr[z0+1];		/* XOR parity */
53	q = dptr[z0+2];		/* RS syndrome */
54
55	for (d = 0; d < bytes; d += NSIZE*$#) {
56		wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
57
58		for (z = z0-1; z>=0; z--) {
59			wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
60			/* P syndrome */
61			wp$$ = vec_xor(wp$$, wd$$);
62
63			/* Q syndrome */
64			asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
65			wq$$ = vec_xor(wq$$, wd$$);
66		}
67		*(unative_t *)&p[d+NSIZE*$$] = wp$$;
68		*(unative_t *)&q[d+NSIZE*$$] = wq$$;
69	}
70}
71
72static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
73{
74	preempt_disable();
75	enable_kernel_altivec();
76
77	raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
78
79	disable_kernel_altivec();
80	preempt_enable();
81}
82
83int raid6_have_altivec_vpermxor(void);
84#if $# == 1
85int raid6_have_altivec_vpermxor(void)
86{
87	/* Check if arch has both altivec and the vpermxor instructions */
88# ifdef __KERNEL__
89	return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
90		cpu_has_feature(CPU_FTR_ARCH_207S));
91# else
92	return 1;
93#endif
94
95}
96#endif
97
98const struct raid6_calls raid6_vpermxor$# = {
99	raid6_vpermxor$#_gen_syndrome,
100	NULL,
101	raid6_have_altivec_vpermxor,
102	"vpermxor$#",
103	0
104};
105#endif
106