1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/completion.h> 26 #include <linux/delay.h> 27 #include <linux/prime_numbers.h> 28 29 #include "../i915_selftest.h" 30 31 static int __i915_sw_fence_call 32 fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 33 { 34 switch (state) { 35 case FENCE_COMPLETE: 36 break; 37 38 case FENCE_FREE: 39 /* Leave the fence for the caller to free it after testing */ 40 break; 41 } 42 43 return NOTIFY_DONE; 44 } 45 46 static struct i915_sw_fence *alloc_fence(void) 47 { 48 struct i915_sw_fence *fence; 49 50 fence = kmalloc(sizeof(*fence), GFP_KERNEL); 51 if (!fence) 52 return NULL; 53 54 i915_sw_fence_init(fence, fence_notify); 55 return fence; 56 } 57 58 static void free_fence(struct i915_sw_fence *fence) 59 { 60 i915_sw_fence_fini(fence); 61 kfree(fence); 62 } 63 64 static int __test_self(struct i915_sw_fence *fence) 65 { 66 if (i915_sw_fence_done(fence)) 67 return -EINVAL; 68 69 i915_sw_fence_commit(fence); 70 if (!i915_sw_fence_done(fence)) 71 return -EINVAL; 72 73 i915_sw_fence_wait(fence); 74 if (!i915_sw_fence_done(fence)) 75 return -EINVAL; 76 77 return 0; 78 } 79 80 static int test_self(void *arg) 81 { 82 struct i915_sw_fence *fence; 83 int ret; 84 85 /* Test i915_sw_fence signaling and completion testing */ 86 fence = alloc_fence(); 87 if (!fence) 88 return -ENOMEM; 89 90 ret = __test_self(fence); 91 92 free_fence(fence); 93 return ret; 94 } 95 96 static int test_dag(void *arg) 97 { 98 struct i915_sw_fence *A, *B, *C; 99 int ret = -EINVAL; 100 101 /* Test detection of cycles within the i915_sw_fence graphs */ 102 if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG)) 103 return 0; 104 105 A = alloc_fence(); 106 if (!A) 107 return -ENOMEM; 108 109 if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) { 110 pr_err("recursive cycle not detected (AA)\n"); 111 goto err_A; 112 } 113 114 B = alloc_fence(); 115 if (!B) { 116 ret = -ENOMEM; 117 goto err_A; 118 } 119 120 i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL); 121 if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) { 122 pr_err("single depth cycle not detected (BAB)\n"); 123 goto err_B; 124 } 125 126 C = alloc_fence(); 127 if (!C) { 128 ret = -ENOMEM; 129 goto err_B; 130 } 131 132 if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) { 133 pr_err("invalid cycle detected\n"); 134 goto err_C; 135 } 136 if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) { 137 pr_err("single depth cycle not detected (CBC)\n"); 138 goto err_C; 139 } 140 if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) { 141 pr_err("cycle not detected (BA, CB, AC)\n"); 142 goto err_C; 143 } 144 if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) { 145 pr_err("invalid cycle detected\n"); 146 goto err_C; 147 } 148 149 i915_sw_fence_commit(A); 150 i915_sw_fence_commit(B); 151 i915_sw_fence_commit(C); 152 153 ret = 0; 154 if (!i915_sw_fence_done(C)) { 155 pr_err("fence C not done\n"); 156 ret = -EINVAL; 157 } 158 if (!i915_sw_fence_done(B)) { 159 pr_err("fence B not done\n"); 160 ret = -EINVAL; 161 } 162 if (!i915_sw_fence_done(A)) { 163 pr_err("fence A not done\n"); 164 ret = -EINVAL; 165 } 166 err_C: 167 free_fence(C); 168 err_B: 169 free_fence(B); 170 err_A: 171 free_fence(A); 172 return ret; 173 } 174 175 static int test_AB(void *arg) 176 { 177 struct i915_sw_fence *A, *B; 178 int ret; 179 180 /* Test i915_sw_fence (A) waiting on an event source (B) */ 181 A = alloc_fence(); 182 if (!A) 183 return -ENOMEM; 184 B = alloc_fence(); 185 if (!B) { 186 ret = -ENOMEM; 187 goto err_A; 188 } 189 190 ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL); 191 if (ret < 0) 192 goto err_B; 193 if (ret == 0) { 194 pr_err("Incorrectly reported fence A was complete before await\n"); 195 ret = -EINVAL; 196 goto err_B; 197 } 198 199 ret = -EINVAL; 200 i915_sw_fence_commit(A); 201 if (i915_sw_fence_done(A)) 202 goto err_B; 203 204 i915_sw_fence_commit(B); 205 if (!i915_sw_fence_done(B)) { 206 pr_err("Fence B is not done\n"); 207 goto err_B; 208 } 209 210 if (!i915_sw_fence_done(A)) { 211 pr_err("Fence A is not done\n"); 212 goto err_B; 213 } 214 215 ret = 0; 216 err_B: 217 free_fence(B); 218 err_A: 219 free_fence(A); 220 return ret; 221 } 222 223 static int test_ABC(void *arg) 224 { 225 struct i915_sw_fence *A, *B, *C; 226 int ret; 227 228 /* Test a chain of fences, A waits on B who waits on C */ 229 A = alloc_fence(); 230 if (!A) 231 return -ENOMEM; 232 233 B = alloc_fence(); 234 if (!B) { 235 ret = -ENOMEM; 236 goto err_A; 237 } 238 239 C = alloc_fence(); 240 if (!C) { 241 ret = -ENOMEM; 242 goto err_B; 243 } 244 245 ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL); 246 if (ret < 0) 247 goto err_C; 248 if (ret == 0) { 249 pr_err("Incorrectly reported fence B was complete before await\n"); 250 goto err_C; 251 } 252 253 ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL); 254 if (ret < 0) 255 goto err_C; 256 if (ret == 0) { 257 pr_err("Incorrectly reported fence C was complete before await\n"); 258 goto err_C; 259 } 260 261 ret = -EINVAL; 262 i915_sw_fence_commit(A); 263 if (i915_sw_fence_done(A)) { 264 pr_err("Fence A completed early\n"); 265 goto err_C; 266 } 267 268 i915_sw_fence_commit(B); 269 if (i915_sw_fence_done(B)) { 270 pr_err("Fence B completed early\n"); 271 goto err_C; 272 } 273 274 if (i915_sw_fence_done(A)) { 275 pr_err("Fence A completed early (after signaling B)\n"); 276 goto err_C; 277 } 278 279 i915_sw_fence_commit(C); 280 281 ret = 0; 282 if (!i915_sw_fence_done(C)) { 283 pr_err("Fence C not done\n"); 284 ret = -EINVAL; 285 } 286 if (!i915_sw_fence_done(B)) { 287 pr_err("Fence B not done\n"); 288 ret = -EINVAL; 289 } 290 if (!i915_sw_fence_done(A)) { 291 pr_err("Fence A not done\n"); 292 ret = -EINVAL; 293 } 294 err_C: 295 free_fence(C); 296 err_B: 297 free_fence(B); 298 err_A: 299 free_fence(A); 300 return ret; 301 } 302 303 static int test_AB_C(void *arg) 304 { 305 struct i915_sw_fence *A, *B, *C; 306 int ret = -EINVAL; 307 308 /* Test multiple fences (AB) waiting on a single event (C) */ 309 A = alloc_fence(); 310 if (!A) 311 return -ENOMEM; 312 313 B = alloc_fence(); 314 if (!B) { 315 ret = -ENOMEM; 316 goto err_A; 317 } 318 319 C = alloc_fence(); 320 if (!C) { 321 ret = -ENOMEM; 322 goto err_B; 323 } 324 325 ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL); 326 if (ret < 0) 327 goto err_C; 328 if (ret == 0) { 329 ret = -EINVAL; 330 goto err_C; 331 } 332 333 ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL); 334 if (ret < 0) 335 goto err_C; 336 if (ret == 0) { 337 ret = -EINVAL; 338 goto err_C; 339 } 340 341 i915_sw_fence_commit(A); 342 i915_sw_fence_commit(B); 343 344 ret = 0; 345 if (i915_sw_fence_done(A)) { 346 pr_err("Fence A completed early\n"); 347 ret = -EINVAL; 348 } 349 350 if (i915_sw_fence_done(B)) { 351 pr_err("Fence B completed early\n"); 352 ret = -EINVAL; 353 } 354 355 i915_sw_fence_commit(C); 356 if (!i915_sw_fence_done(C)) { 357 pr_err("Fence C not done\n"); 358 ret = -EINVAL; 359 } 360 361 if (!i915_sw_fence_done(B)) { 362 pr_err("Fence B not done\n"); 363 ret = -EINVAL; 364 } 365 366 if (!i915_sw_fence_done(A)) { 367 pr_err("Fence A not done\n"); 368 ret = -EINVAL; 369 } 370 371 err_C: 372 free_fence(C); 373 err_B: 374 free_fence(B); 375 err_A: 376 free_fence(A); 377 return ret; 378 } 379 380 static int test_C_AB(void *arg) 381 { 382 struct i915_sw_fence *A, *B, *C; 383 int ret; 384 385 /* Test multiple event sources (A,B) for a single fence (C) */ 386 A = alloc_fence(); 387 if (!A) 388 return -ENOMEM; 389 390 B = alloc_fence(); 391 if (!B) { 392 ret = -ENOMEM; 393 goto err_A; 394 } 395 396 C = alloc_fence(); 397 if (!C) { 398 ret = -ENOMEM; 399 goto err_B; 400 } 401 402 ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL); 403 if (ret < 0) 404 goto err_C; 405 if (ret == 0) { 406 ret = -EINVAL; 407 goto err_C; 408 } 409 410 ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL); 411 if (ret < 0) 412 goto err_C; 413 if (ret == 0) { 414 ret = -EINVAL; 415 goto err_C; 416 } 417 418 ret = 0; 419 i915_sw_fence_commit(C); 420 if (i915_sw_fence_done(C)) 421 ret = -EINVAL; 422 423 i915_sw_fence_commit(A); 424 i915_sw_fence_commit(B); 425 426 if (!i915_sw_fence_done(A)) { 427 pr_err("Fence A not done\n"); 428 ret = -EINVAL; 429 } 430 431 if (!i915_sw_fence_done(B)) { 432 pr_err("Fence B not done\n"); 433 ret = -EINVAL; 434 } 435 436 if (!i915_sw_fence_done(C)) { 437 pr_err("Fence C not done\n"); 438 ret = -EINVAL; 439 } 440 441 err_C: 442 free_fence(C); 443 err_B: 444 free_fence(B); 445 err_A: 446 free_fence(A); 447 return ret; 448 } 449 450 static int test_chain(void *arg) 451 { 452 int nfences = 4096; 453 struct i915_sw_fence **fences; 454 int ret, i; 455 456 /* Test a long chain of fences */ 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); 458 if (!fences) 459 return -ENOMEM; 460 461 for (i = 0; i < nfences; i++) { 462 fences[i] = alloc_fence(); 463 if (!fences[i]) { 464 nfences = i; 465 ret = -ENOMEM; 466 goto err; 467 } 468 469 if (i > 0) { 470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], 471 fences[i - 1], 472 GFP_KERNEL); 473 if (ret < 0) { 474 nfences = i + 1; 475 goto err; 476 } 477 478 i915_sw_fence_commit(fences[i]); 479 } 480 } 481 482 ret = 0; 483 for (i = nfences; --i; ) { 484 if (i915_sw_fence_done(fences[i])) { 485 if (ret == 0) 486 pr_err("Fence[%d] completed early\n", i); 487 ret = -EINVAL; 488 } 489 } 490 i915_sw_fence_commit(fences[0]); 491 for (i = 0; ret == 0 && i < nfences; i++) { 492 if (!i915_sw_fence_done(fences[i])) { 493 pr_err("Fence[%d] is not done\n", i); 494 ret = -EINVAL; 495 } 496 } 497 498 err: 499 for (i = 0; i < nfences; i++) 500 free_fence(fences[i]); 501 kfree(fences); 502 return ret; 503 } 504 505 struct task_ipc { 506 struct work_struct work; 507 struct completion started; 508 struct i915_sw_fence *in, *out; 509 int value; 510 }; 511 512 static void task_ipc(struct work_struct *work) 513 { 514 struct task_ipc *ipc = container_of(work, typeof(*ipc), work); 515 516 complete(&ipc->started); 517 518 i915_sw_fence_wait(ipc->in); 519 smp_store_mb(ipc->value, 1); 520 i915_sw_fence_commit(ipc->out); 521 } 522 523 static int test_ipc(void *arg) 524 { 525 struct task_ipc ipc; 526 int ret = 0; 527 528 /* Test use of i915_sw_fence as an interprocess signaling mechanism */ 529 ipc.in = alloc_fence(); 530 if (!ipc.in) 531 return -ENOMEM; 532 ipc.out = alloc_fence(); 533 if (!ipc.out) { 534 ret = -ENOMEM; 535 goto err_in; 536 } 537 538 /* use a completion to avoid chicken-and-egg testing */ 539 init_completion(&ipc.started); 540 541 ipc.value = 0; 542 INIT_WORK_ONSTACK(&ipc.work, task_ipc); 543 schedule_work(&ipc.work); 544 545 wait_for_completion(&ipc.started); 546 547 usleep_range(1000, 2000); 548 if (READ_ONCE(ipc.value)) { 549 pr_err("worker updated value before i915_sw_fence was signaled\n"); 550 ret = -EINVAL; 551 } 552 553 i915_sw_fence_commit(ipc.in); 554 i915_sw_fence_wait(ipc.out); 555 556 if (!READ_ONCE(ipc.value)) { 557 pr_err("worker signaled i915_sw_fence before value was posted\n"); 558 ret = -EINVAL; 559 } 560 561 flush_work(&ipc.work); 562 destroy_work_on_stack(&ipc.work); 563 free_fence(ipc.out); 564 err_in: 565 free_fence(ipc.in); 566 return ret; 567 } 568 569 static int test_timer(void *arg) 570 { 571 unsigned long target, delay; 572 struct timed_fence tf; 573 574 timed_fence_init(&tf, target = jiffies); 575 if (!i915_sw_fence_done(&tf.fence)) { 576 pr_err("Fence with immediate expiration not signaled\n"); 577 goto err; 578 } 579 timed_fence_fini(&tf); 580 581 for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) { 582 timed_fence_init(&tf, target = jiffies + delay); 583 if (i915_sw_fence_done(&tf.fence)) { 584 pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay); 585 goto err; 586 } 587 588 i915_sw_fence_wait(&tf.fence); 589 if (!i915_sw_fence_done(&tf.fence)) { 590 pr_err("Fence not signaled after wait\n"); 591 goto err; 592 } 593 if (time_before(jiffies, target)) { 594 pr_err("Fence signaled too early, target=%lu, now=%lu\n", 595 target, jiffies); 596 goto err; 597 } 598 599 timed_fence_fini(&tf); 600 } 601 602 return 0; 603 604 err: 605 timed_fence_fini(&tf); 606 return -EINVAL; 607 } 608 609 static const char *mock_name(struct dma_fence *fence) 610 { 611 return "mock"; 612 } 613 614 static bool mock_enable_signaling(struct dma_fence *fence) 615 { 616 return true; 617 } 618 619 static const struct dma_fence_ops mock_fence_ops = { 620 .get_driver_name = mock_name, 621 .get_timeline_name = mock_name, 622 .enable_signaling = mock_enable_signaling, 623 .wait = dma_fence_default_wait, 624 .release = dma_fence_free, 625 }; 626 627 static DEFINE_SPINLOCK(mock_fence_lock); 628 629 static struct dma_fence *alloc_dma_fence(void) 630 { 631 struct dma_fence *dma; 632 633 dma = kmalloc(sizeof(*dma), GFP_KERNEL); 634 if (dma) 635 dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0); 636 637 return dma; 638 } 639 640 static struct i915_sw_fence * 641 wrap_dma_fence(struct dma_fence *dma, unsigned long delay) 642 { 643 struct i915_sw_fence *fence; 644 int err; 645 646 fence = alloc_fence(); 647 if (!fence) 648 return ERR_PTR(-ENOMEM); 649 650 err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT); 651 i915_sw_fence_commit(fence); 652 if (err < 0) { 653 free_fence(fence); 654 return ERR_PTR(err); 655 } 656 657 return fence; 658 } 659 660 static int test_dma_fence(void *arg) 661 { 662 struct i915_sw_fence *timeout = NULL, *not = NULL; 663 unsigned long delay = i915_selftest.timeout_jiffies; 664 unsigned long end, sleep; 665 struct dma_fence *dma; 666 int err; 667 668 dma = alloc_dma_fence(); 669 if (!dma) 670 return -ENOMEM; 671 672 timeout = wrap_dma_fence(dma, delay); 673 if (IS_ERR(timeout)) { 674 err = PTR_ERR(timeout); 675 goto err; 676 } 677 678 not = wrap_dma_fence(dma, 0); 679 if (IS_ERR(not)) { 680 err = PTR_ERR(not); 681 goto err; 682 } 683 684 err = -EINVAL; 685 if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) { 686 pr_err("Fences immediately signaled\n"); 687 goto err; 688 } 689 690 /* We round the timeout for the fence up to the next second */ 691 end = round_jiffies_up(jiffies + delay); 692 693 sleep = jiffies_to_usecs(delay) / 3; 694 usleep_range(sleep, 2 * sleep); 695 if (time_after(jiffies, end)) { 696 pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n", 697 delay, end, jiffies); 698 goto skip; 699 } 700 701 if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) { 702 pr_err("Fences signaled too early\n"); 703 goto err; 704 } 705 706 if (!wait_event_timeout(timeout->wait, 707 i915_sw_fence_done(timeout), 708 2 * (end - jiffies) + 1)) { 709 pr_err("Timeout fence unsignaled!\n"); 710 goto err; 711 } 712 713 if (i915_sw_fence_done(not)) { 714 pr_err("No timeout fence signaled!\n"); 715 goto err; 716 } 717 718 skip: 719 dma_fence_signal(dma); 720 721 if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) { 722 pr_err("Fences unsignaled\n"); 723 goto err; 724 } 725 726 free_fence(not); 727 free_fence(timeout); 728 dma_fence_put(dma); 729 730 return 0; 731 732 err: 733 dma_fence_signal(dma); 734 if (!IS_ERR_OR_NULL(timeout)) 735 free_fence(timeout); 736 if (!IS_ERR_OR_NULL(not)) 737 free_fence(not); 738 dma_fence_put(dma); 739 return err; 740 } 741 742 int i915_sw_fence_mock_selftests(void) 743 { 744 static const struct i915_subtest tests[] = { 745 SUBTEST(test_self), 746 SUBTEST(test_dag), 747 SUBTEST(test_AB), 748 SUBTEST(test_ABC), 749 SUBTEST(test_AB_C), 750 SUBTEST(test_C_AB), 751 SUBTEST(test_chain), 752 SUBTEST(test_ipc), 753 SUBTEST(test_timer), 754 SUBTEST(test_dma_fence), 755 }; 756 757 return i915_subtests(tests, NULL); 758 } 759