1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * asynchronous raid6 recovery self test 4 * Copyright (c) 2009, Intel Corporation. 5 * 6 * based on drivers/md/raid6test/test.c: 7 * Copyright 2002-2007 H. Peter Anvin 8 */ 9 #include <linux/async_tx.h> 10 #include <linux/gfp.h> 11 #include <linux/mm.h> 12 #include <linux/random.h> 13 #include <linux/module.h> 14 15 #undef pr 16 #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) 17 18 #define NDISKS 64 /* Including P and Q */ 19 20 static struct page *dataptrs[NDISKS]; 21 unsigned int dataoffs[NDISKS]; 22 static addr_conv_t addr_conv[NDISKS]; 23 static struct page *data[NDISKS+3]; 24 static struct page *spare; 25 static struct page *recovi; 26 static struct page *recovj; 27 28 static void callback(void *param) 29 { 30 struct completion *cmp = param; 31 32 complete(cmp); 33 } 34 35 static void makedata(int disks) 36 { 37 int i; 38 39 for (i = 0; i < disks; i++) { 40 get_random_bytes(page_address(data[i]), PAGE_SIZE); 41 dataptrs[i] = data[i]; 42 dataoffs[i] = 0; 43 } 44 } 45 46 static char disk_type(int d, int disks) 47 { 48 if (d == disks - 2) 49 return 'P'; 50 else if (d == disks - 1) 51 return 'Q'; 52 else 53 return 'D'; 54 } 55 56 /* Recover two failed blocks. */ 57 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, 58 struct page **ptrs, unsigned int *offs) 59 { 60 struct async_submit_ctl submit; 61 struct completion cmp; 62 struct dma_async_tx_descriptor *tx = NULL; 63 enum sum_check_flags result = ~0; 64 65 if (faila > failb) 66 swap(faila, failb); 67 68 if (failb == disks-1) { 69 if (faila == disks-2) { 70 /* P+Q failure. Just rebuild the syndrome. */ 71 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 72 tx = async_gen_syndrome(ptrs, offs, 73 disks, bytes, &submit); 74 } else { 75 struct page *blocks[NDISKS]; 76 struct page *dest; 77 int count = 0; 78 int i; 79 80 BUG_ON(disks > NDISKS); 81 82 /* data+Q failure. Reconstruct data from P, 83 * then rebuild syndrome 84 */ 85 for (i = disks; i-- ; ) { 86 if (i == faila || i == failb) 87 continue; 88 blocks[count++] = ptrs[i]; 89 } 90 dest = ptrs[faila]; 91 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, 92 NULL, NULL, addr_conv); 93 tx = async_xor(dest, blocks, 0, count, bytes, &submit); 94 95 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); 96 tx = async_gen_syndrome(ptrs, offs, 97 disks, bytes, &submit); 98 } 99 } else { 100 if (failb == disks-2) { 101 /* data+P failure. */ 102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 103 tx = async_raid6_datap_recov(disks, bytes, 104 faila, ptrs, offs, &submit); 105 } else { 106 /* data+data failure. */ 107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 108 tx = async_raid6_2data_recov(disks, bytes, 109 faila, failb, ptrs, offs, &submit); 110 } 111 } 112 init_completion(&cmp); 113 init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); 114 tx = async_syndrome_val(ptrs, offs, 115 disks, bytes, &result, spare, 0, &submit); 116 async_tx_issue_pending(tx); 117 118 if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) 119 pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", 120 __func__, faila, failb, disks); 121 122 if (result != 0) 123 pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n", 124 __func__, faila, failb, result); 125 } 126 127 static int test_disks(int i, int j, int disks) 128 { 129 int erra, errb; 130 131 memset(page_address(recovi), 0xf0, PAGE_SIZE); 132 memset(page_address(recovj), 0xba, PAGE_SIZE); 133 134 dataptrs[i] = recovi; 135 dataptrs[j] = recovj; 136 137 raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs, dataoffs); 138 139 erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); 140 errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); 141 142 pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n", 143 __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks), 144 (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB"); 145 146 dataptrs[i] = data[i]; 147 dataptrs[j] = data[j]; 148 149 return erra || errb; 150 } 151 152 static int test(int disks, int *tests) 153 { 154 struct dma_async_tx_descriptor *tx; 155 struct async_submit_ctl submit; 156 struct completion cmp; 157 int err = 0; 158 int i, j; 159 160 recovi = data[disks]; 161 recovj = data[disks+1]; 162 spare = data[disks+2]; 163 164 makedata(disks); 165 166 /* Nuke syndromes */ 167 memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); 168 memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); 169 170 /* Generate assumed good syndrome */ 171 init_completion(&cmp); 172 init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv); 173 tx = async_gen_syndrome(dataptrs, dataoffs, disks, PAGE_SIZE, &submit); 174 async_tx_issue_pending(tx); 175 176 if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) { 177 pr("error: initial gen_syndrome(%d) timed out\n", disks); 178 return 1; 179 } 180 181 pr("testing the %d-disk case...\n", disks); 182 for (i = 0; i < disks-1; i++) 183 for (j = i+1; j < disks; j++) { 184 (*tests)++; 185 err += test_disks(i, j, disks); 186 } 187 188 return err; 189 } 190 191 192 static int __init raid6_test(void) 193 { 194 int err = 0; 195 int tests = 0; 196 int i; 197 198 for (i = 0; i < NDISKS+3; i++) { 199 data[i] = alloc_page(GFP_KERNEL); 200 if (!data[i]) { 201 while (i--) 202 put_page(data[i]); 203 return -ENOMEM; 204 } 205 } 206 207 /* the 4-disk and 5-disk cases are special for the recovery code */ 208 if (NDISKS > 4) 209 err += test(4, &tests); 210 if (NDISKS > 5) 211 err += test(5, &tests); 212 /* the 11 and 12 disk cases are special for ioatdma (p-disabled 213 * q-continuation without extended descriptor) 214 */ 215 if (NDISKS > 12) { 216 err += test(11, &tests); 217 err += test(12, &tests); 218 } 219 220 /* the 24 disk case is special for ioatdma as it is the boundary point 221 * at which it needs to switch from 8-source ops to 16-source 222 * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) 223 */ 224 if (NDISKS > 24) 225 err += test(24, &tests); 226 227 err += test(NDISKS, &tests); 228 229 pr("\n"); 230 pr("complete (%d tests, %d failure%s)\n", 231 tests, err, err == 1 ? "" : "s"); 232 233 for (i = 0; i < NDISKS+3; i++) 234 put_page(data[i]); 235 236 return 0; 237 } 238 239 static void __exit raid6_test_exit(void) 240 { 241 } 242 243 /* when compiled-in wait for drivers to load first (assumes dma drivers 244 * are also compiled-in) 245 */ 246 late_initcall(raid6_test); 247 module_exit(raid6_test_exit); 248 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>"); 249 MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests"); 250 MODULE_LICENSE("GPL"); 251