1 /* 2 * Copyright (C) 2017 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/types.h> 20 #include "btrfs-tests.h" 21 #include "../ctree.h" 22 23 static void free_extent_map_tree(struct extent_map_tree *em_tree) 24 { 25 struct extent_map *em; 26 struct rb_node *node; 27 28 while (!RB_EMPTY_ROOT(&em_tree->map)) { 29 node = rb_first(&em_tree->map); 30 em = rb_entry(node, struct extent_map, rb_node); 31 remove_extent_mapping(em_tree, em); 32 33 #ifdef CONFIG_BTRFS_DEBUG 34 if (refcount_read(&em->refs) != 1) { 35 test_msg( 36 "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d\n", 37 em->start, em->len, em->block_start, 38 em->block_len, refcount_read(&em->refs)); 39 40 refcount_set(&em->refs, 1); 41 } 42 #endif 43 free_extent_map(em); 44 } 45 } 46 47 /* 48 * Test scenario: 49 * 50 * Suppose that no extent map has been loaded into memory yet, there is a file 51 * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads 52 * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is 53 * reading [0, 8K) 54 * 55 * t1 t2 56 * btrfs_get_extent() btrfs_get_extent() 57 * -> lookup_extent_mapping() ->lookup_extent_mapping() 58 * -> add_extent_mapping(0, 16K) 59 * -> return em 60 * ->add_extent_mapping(0, 16K) 61 * -> #handle -EEXIST 62 */ 63 static void test_case_1(struct extent_map_tree *em_tree) 64 { 65 struct extent_map *em; 66 u64 start = 0; 67 u64 len = SZ_8K; 68 int ret; 69 70 em = alloc_extent_map(); 71 if (!em) 72 /* Skip the test on error. */ 73 return; 74 75 /* Add [0, 16K) */ 76 em->start = 0; 77 em->len = SZ_16K; 78 em->block_start = 0; 79 em->block_len = SZ_16K; 80 ret = add_extent_mapping(em_tree, em, 0); 81 ASSERT(ret == 0); 82 free_extent_map(em); 83 84 /* Add [16K, 20K) following [0, 16K) */ 85 em = alloc_extent_map(); 86 if (!em) 87 goto out; 88 89 em->start = SZ_16K; 90 em->len = SZ_4K; 91 em->block_start = SZ_32K; /* avoid merging */ 92 em->block_len = SZ_4K; 93 ret = add_extent_mapping(em_tree, em, 0); 94 ASSERT(ret == 0); 95 free_extent_map(em); 96 97 em = alloc_extent_map(); 98 if (!em) 99 goto out; 100 101 /* Add [0, 8K), should return [0, 16K) instead. */ 102 em->start = start; 103 em->len = len; 104 em->block_start = start; 105 em->block_len = len; 106 ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len); 107 if (ret) 108 test_msg("case1 [%llu %llu]: ret %d\n", start, start + len, ret); 109 if (em && 110 (em->start != 0 || extent_map_end(em) != SZ_16K || 111 em->block_start != 0 || em->block_len != SZ_16K)) 112 test_msg( 113 "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n", 114 start, start + len, ret, em->start, em->len, 115 em->block_start, em->block_len); 116 free_extent_map(em); 117 out: 118 /* free memory */ 119 free_extent_map_tree(em_tree); 120 } 121 122 /* 123 * Test scenario: 124 * 125 * Reading the inline ending up with EEXIST, ie. read an inline 126 * extent and discard page cache and read it again. 127 */ 128 static void test_case_2(struct extent_map_tree *em_tree) 129 { 130 struct extent_map *em; 131 int ret; 132 133 em = alloc_extent_map(); 134 if (!em) 135 /* Skip the test on error. */ 136 return; 137 138 /* Add [0, 1K) */ 139 em->start = 0; 140 em->len = SZ_1K; 141 em->block_start = EXTENT_MAP_INLINE; 142 em->block_len = (u64)-1; 143 ret = add_extent_mapping(em_tree, em, 0); 144 ASSERT(ret == 0); 145 free_extent_map(em); 146 147 /* Add [4K, 4K) following [0, 1K) */ 148 em = alloc_extent_map(); 149 if (!em) 150 goto out; 151 152 em->start = SZ_4K; 153 em->len = SZ_4K; 154 em->block_start = SZ_4K; 155 em->block_len = SZ_4K; 156 ret = add_extent_mapping(em_tree, em, 0); 157 ASSERT(ret == 0); 158 free_extent_map(em); 159 160 em = alloc_extent_map(); 161 if (!em) 162 goto out; 163 164 /* Add [0, 1K) */ 165 em->start = 0; 166 em->len = SZ_1K; 167 em->block_start = EXTENT_MAP_INLINE; 168 em->block_len = (u64)-1; 169 ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len); 170 if (ret) 171 test_msg("case2 [0 1K]: ret %d\n", ret); 172 if (em && 173 (em->start != 0 || extent_map_end(em) != SZ_1K || 174 em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) 175 test_msg( 176 "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n", 177 ret, em->start, em->len, em->block_start, 178 em->block_len); 179 free_extent_map(em); 180 out: 181 /* free memory */ 182 free_extent_map_tree(em_tree); 183 } 184 185 static void __test_case_3(struct extent_map_tree *em_tree, u64 start) 186 { 187 struct extent_map *em; 188 u64 len = SZ_4K; 189 int ret; 190 191 em = alloc_extent_map(); 192 if (!em) 193 /* Skip this test on error. */ 194 return; 195 196 /* Add [4K, 8K) */ 197 em->start = SZ_4K; 198 em->len = SZ_4K; 199 em->block_start = SZ_4K; 200 em->block_len = SZ_4K; 201 ret = add_extent_mapping(em_tree, em, 0); 202 ASSERT(ret == 0); 203 free_extent_map(em); 204 205 em = alloc_extent_map(); 206 if (!em) 207 goto out; 208 209 /* Add [0, 16K) */ 210 em->start = 0; 211 em->len = SZ_16K; 212 em->block_start = 0; 213 em->block_len = SZ_16K; 214 ret = btrfs_add_extent_mapping(em_tree, &em, start, len); 215 if (ret) 216 test_msg("case3 [0x%llx 0x%llx): ret %d\n", 217 start, start + len, ret); 218 /* 219 * Since bytes within em are contiguous, em->block_start is identical to 220 * em->start. 221 */ 222 if (em && 223 (start < em->start || start + len > extent_map_end(em) || 224 em->start != em->block_start || em->len != em->block_len)) 225 test_msg( 226 "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n", 227 start, start + len, ret, em->start, em->len, 228 em->block_start, em->block_len); 229 free_extent_map(em); 230 out: 231 /* free memory */ 232 free_extent_map_tree(em_tree); 233 } 234 235 /* 236 * Test scenario: 237 * 238 * Suppose that no extent map has been loaded into memory yet. 239 * There is a file extent [0, 16K), two jobs are running concurrently 240 * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio 241 * read from [0, 4K) or [8K, 12K) or [12K, 16K). 242 * 243 * t1 goes ahead of t2 and adds em [4K, 8K) into tree. 244 * 245 * t1 t2 246 * cow_file_range() btrfs_get_extent() 247 * -> lookup_extent_mapping() 248 * -> add_extent_mapping() 249 * -> add_extent_mapping() 250 */ 251 static void test_case_3(struct extent_map_tree *em_tree) 252 { 253 __test_case_3(em_tree, 0); 254 __test_case_3(em_tree, SZ_8K); 255 __test_case_3(em_tree, (12 * 1024ULL)); 256 } 257 258 static void __test_case_4(struct extent_map_tree *em_tree, u64 start) 259 { 260 struct extent_map *em; 261 u64 len = SZ_4K; 262 int ret; 263 264 em = alloc_extent_map(); 265 if (!em) 266 /* Skip this test on error. */ 267 return; 268 269 /* Add [0K, 8K) */ 270 em->start = 0; 271 em->len = SZ_8K; 272 em->block_start = 0; 273 em->block_len = SZ_8K; 274 ret = add_extent_mapping(em_tree, em, 0); 275 ASSERT(ret == 0); 276 free_extent_map(em); 277 278 em = alloc_extent_map(); 279 if (!em) 280 goto out; 281 282 /* Add [8K, 24K) */ 283 em->start = SZ_8K; 284 em->len = 24 * 1024ULL; 285 em->block_start = SZ_16K; /* avoid merging */ 286 em->block_len = 24 * 1024ULL; 287 ret = add_extent_mapping(em_tree, em, 0); 288 ASSERT(ret == 0); 289 free_extent_map(em); 290 291 em = alloc_extent_map(); 292 if (!em) 293 goto out; 294 /* Add [0K, 32K) */ 295 em->start = 0; 296 em->len = SZ_32K; 297 em->block_start = 0; 298 em->block_len = SZ_32K; 299 ret = btrfs_add_extent_mapping(em_tree, &em, start, len); 300 if (ret) 301 test_msg("case4 [0x%llx 0x%llx): ret %d\n", 302 start, len, ret); 303 if (em && 304 (start < em->start || start + len > extent_map_end(em))) 305 test_msg( 306 "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n", 307 start, len, ret, em->start, em->len, em->block_start, 308 em->block_len); 309 free_extent_map(em); 310 out: 311 /* free memory */ 312 free_extent_map_tree(em_tree); 313 } 314 315 /* 316 * Test scenario: 317 * 318 * Suppose that no extent map has been loaded into memory yet. 319 * There is a file extent [0, 32K), two jobs are running concurrently 320 * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio 321 * read from [0, 4K) or [4K, 8K). 322 * 323 * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K). 324 * 325 * t1 t2 326 * btrfs_get_blocks_direct() btrfs_get_blocks_direct() 327 * -> btrfs_get_extent() -> btrfs_get_extent() 328 * -> lookup_extent_mapping() 329 * -> add_extent_mapping() -> lookup_extent_mapping() 330 * # load [0, 32K) 331 * -> btrfs_new_extent_direct() 332 * -> btrfs_drop_extent_cache() 333 * # split [0, 32K) 334 * -> add_extent_mapping() 335 * # add [8K, 32K) 336 * -> add_extent_mapping() 337 * # handle -EEXIST when adding 338 * # [0, 32K) 339 */ 340 static void test_case_4(struct extent_map_tree *em_tree) 341 { 342 __test_case_4(em_tree, 0); 343 __test_case_4(em_tree, SZ_4K); 344 } 345 346 int btrfs_test_extent_map() 347 { 348 struct extent_map_tree *em_tree; 349 350 test_msg("Running extent_map tests\n"); 351 352 em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL); 353 if (!em_tree) 354 /* Skip the test on error. */ 355 return 0; 356 357 extent_map_tree_init(em_tree); 358 359 test_case_1(em_tree); 360 test_case_2(em_tree); 361 test_case_3(em_tree); 362 test_case_4(em_tree); 363 364 kfree(em_tree); 365 return 0; 366 } 367