1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DMA traffic test driver 4 * 5 * Copyright (C) 2020, Intel Corporation 6 * Authors: Isaac Hazan <isaac.hazan@intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/completion.h> 12 #include <linux/debugfs.h> 13 #include <linux/module.h> 14 #include <linux/sizes.h> 15 #include <linux/thunderbolt.h> 16 17 #define DMA_TEST_HOPID 8 18 #define DMA_TEST_TX_RING_SIZE 64 19 #define DMA_TEST_RX_RING_SIZE 256 20 #define DMA_TEST_FRAME_SIZE SZ_4K 21 #define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL 22 #define DMA_TEST_MAX_PACKETS 1000 23 24 enum dma_test_frame_pdf { 25 DMA_TEST_PDF_FRAME_START = 1, 26 DMA_TEST_PDF_FRAME_END, 27 }; 28 29 struct dma_test_frame { 30 struct dma_test *dma_test; 31 void *data; 32 struct ring_frame frame; 33 }; 34 35 enum dma_test_test_error { 36 DMA_TEST_NO_ERROR, 37 DMA_TEST_INTERRUPTED, 38 DMA_TEST_BUFFER_ERROR, 39 DMA_TEST_DMA_ERROR, 40 DMA_TEST_CONFIG_ERROR, 41 DMA_TEST_SPEED_ERROR, 42 DMA_TEST_WIDTH_ERROR, 43 DMA_TEST_BONDING_ERROR, 44 DMA_TEST_PACKET_ERROR, 45 }; 46 47 static const char * const dma_test_error_names[] = { 48 [DMA_TEST_NO_ERROR] = "no errors", 49 [DMA_TEST_INTERRUPTED] = "interrupted by signal", 50 [DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers", 51 [DMA_TEST_DMA_ERROR] = "DMA ring setup failed", 52 [DMA_TEST_CONFIG_ERROR] = "configuration is not valid", 53 [DMA_TEST_SPEED_ERROR] = "unexpected link speed", 54 [DMA_TEST_WIDTH_ERROR] = "unexpected link width", 55 [DMA_TEST_BONDING_ERROR] = "lane bonding configuration error", 56 [DMA_TEST_PACKET_ERROR] = "packet check failed", 57 }; 58 59 enum dma_test_result { 60 DMA_TEST_NOT_RUN, 61 DMA_TEST_SUCCESS, 62 DMA_TEST_FAIL, 63 }; 64 65 static const char * const dma_test_result_names[] = { 66 [DMA_TEST_NOT_RUN] = "not run", 67 [DMA_TEST_SUCCESS] = "success", 68 [DMA_TEST_FAIL] = "failed", 69 }; 70 71 /** 72 * struct dma_test - DMA test device driver private data 73 * @svc: XDomain service the driver is bound to 74 * @xd: XDomain the service belongs to 75 * @rx_ring: Software ring holding RX frames 76 * @tx_ring: Software ring holding TX frames 77 * @packets_to_send: Number of packets to send 78 * @packets_to_receive: Number of packets to receive 79 * @packets_sent: Actual number of packets sent 80 * @packets_received: Actual number of packets received 81 * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated 82 * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated 83 * @crc_errors: Number of CRC errors during the test run 84 * @buffer_overflow_errors: Number of buffer overflow errors during the test 85 * run 86 * @result: Result of the last run 87 * @error_code: Error code of the last run 88 * @complete: Used to wait for the Rx to complete 89 * @lock: Lock serializing access to this structure 90 * @debugfs_dir: dentry of this dma_test 91 */ 92 struct dma_test { 93 const struct tb_service *svc; 94 struct tb_xdomain *xd; 95 struct tb_ring *rx_ring; 96 struct tb_ring *tx_ring; 97 unsigned int packets_to_send; 98 unsigned int packets_to_receive; 99 unsigned int packets_sent; 100 unsigned int packets_received; 101 unsigned int link_speed; 102 unsigned int link_width; 103 unsigned int crc_errors; 104 unsigned int buffer_overflow_errors; 105 enum dma_test_result result; 106 enum dma_test_test_error error_code; 107 struct completion complete; 108 struct mutex lock; 109 struct dentry *debugfs_dir; 110 }; 111 112 /* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */ 113 static const uuid_t dma_test_dir_uuid = 114 UUID_INIT(0x3188cd10, 0x6523, 0x4a5a, 115 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8); 116 117 static struct tb_property_dir *dma_test_dir; 118 static void *dma_test_pattern; 119 120 static void dma_test_free_rings(struct dma_test *dt) 121 { 122 if (dt->rx_ring) { 123 tb_ring_free(dt->rx_ring); 124 dt->rx_ring = NULL; 125 } 126 if (dt->tx_ring) { 127 tb_ring_free(dt->tx_ring); 128 dt->tx_ring = NULL; 129 } 130 } 131 132 static int dma_test_start_rings(struct dma_test *dt) 133 { 134 unsigned int flags = RING_FLAG_FRAME; 135 struct tb_xdomain *xd = dt->xd; 136 int ret, e2e_tx_hop = 0; 137 struct tb_ring *ring; 138 139 /* 140 * If we are both sender and receiver (traffic goes over a 141 * special loopback dongle) enable E2E flow control. This avoids 142 * losing packets. 143 */ 144 if (dt->packets_to_send && dt->packets_to_receive) 145 flags |= RING_FLAG_E2E; 146 147 if (dt->packets_to_send) { 148 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE, 149 flags); 150 if (!ring) 151 return -ENOMEM; 152 153 dt->tx_ring = ring; 154 e2e_tx_hop = ring->hop; 155 } 156 157 if (dt->packets_to_receive) { 158 u16 sof_mask, eof_mask; 159 160 sof_mask = BIT(DMA_TEST_PDF_FRAME_START); 161 eof_mask = BIT(DMA_TEST_PDF_FRAME_END); 162 163 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE, 164 flags, e2e_tx_hop, sof_mask, eof_mask, 165 NULL, NULL); 166 if (!ring) { 167 dma_test_free_rings(dt); 168 return -ENOMEM; 169 } 170 171 dt->rx_ring = ring; 172 } 173 174 ret = tb_xdomain_enable_paths(dt->xd, DMA_TEST_HOPID, 175 dt->tx_ring ? dt->tx_ring->hop : 0, 176 DMA_TEST_HOPID, 177 dt->rx_ring ? dt->rx_ring->hop : 0); 178 if (ret) { 179 dma_test_free_rings(dt); 180 return ret; 181 } 182 183 if (dt->tx_ring) 184 tb_ring_start(dt->tx_ring); 185 if (dt->rx_ring) 186 tb_ring_start(dt->rx_ring); 187 188 return 0; 189 } 190 191 static void dma_test_stop_rings(struct dma_test *dt) 192 { 193 if (dt->rx_ring) 194 tb_ring_stop(dt->rx_ring); 195 if (dt->tx_ring) 196 tb_ring_stop(dt->tx_ring); 197 198 if (tb_xdomain_disable_paths(dt->xd)) 199 dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); 200 201 dma_test_free_rings(dt); 202 } 203 204 static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame, 205 bool canceled) 206 { 207 struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); 208 struct dma_test *dt = tf->dma_test; 209 struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); 210 211 dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, 212 DMA_FROM_DEVICE); 213 kfree(tf->data); 214 215 if (canceled) { 216 kfree(tf); 217 return; 218 } 219 220 dt->packets_received++; 221 dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received, 222 dt->packets_to_receive); 223 224 if (tf->frame.flags & RING_DESC_CRC_ERROR) 225 dt->crc_errors++; 226 if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) 227 dt->buffer_overflow_errors++; 228 229 kfree(tf); 230 231 if (dt->packets_received == dt->packets_to_receive) 232 complete(&dt->complete); 233 } 234 235 static int dma_test_submit_rx(struct dma_test *dt, size_t npackets) 236 { 237 struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); 238 int i; 239 240 for (i = 0; i < npackets; i++) { 241 struct dma_test_frame *tf; 242 dma_addr_t dma_addr; 243 244 tf = kzalloc(sizeof(*tf), GFP_KERNEL); 245 if (!tf) 246 return -ENOMEM; 247 248 tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); 249 if (!tf->data) { 250 kfree(tf); 251 return -ENOMEM; 252 } 253 254 dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, 255 DMA_FROM_DEVICE); 256 if (dma_mapping_error(dma_dev, dma_addr)) { 257 kfree(tf->data); 258 kfree(tf); 259 return -ENOMEM; 260 } 261 262 tf->frame.buffer_phy = dma_addr; 263 tf->frame.callback = dma_test_rx_callback; 264 tf->dma_test = dt; 265 INIT_LIST_HEAD(&tf->frame.list); 266 267 tb_ring_rx(dt->rx_ring, &tf->frame); 268 } 269 270 return 0; 271 } 272 273 static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame, 274 bool canceled) 275 { 276 struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); 277 struct dma_test *dt = tf->dma_test; 278 struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); 279 280 dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, 281 DMA_TO_DEVICE); 282 kfree(tf->data); 283 kfree(tf); 284 } 285 286 static int dma_test_submit_tx(struct dma_test *dt, size_t npackets) 287 { 288 struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); 289 int i; 290 291 for (i = 0; i < npackets; i++) { 292 struct dma_test_frame *tf; 293 dma_addr_t dma_addr; 294 295 tf = kzalloc(sizeof(*tf), GFP_KERNEL); 296 if (!tf) 297 return -ENOMEM; 298 299 tf->frame.size = 0; /* means 4096 */ 300 tf->dma_test = dt; 301 302 tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); 303 if (!tf->data) { 304 kfree(tf); 305 return -ENOMEM; 306 } 307 308 memcpy(tf->data, dma_test_pattern, DMA_TEST_FRAME_SIZE); 309 310 dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, 311 DMA_TO_DEVICE); 312 if (dma_mapping_error(dma_dev, dma_addr)) { 313 kfree(tf->data); 314 kfree(tf); 315 return -ENOMEM; 316 } 317 318 tf->frame.buffer_phy = dma_addr; 319 tf->frame.callback = dma_test_tx_callback; 320 tf->frame.sof = DMA_TEST_PDF_FRAME_START; 321 tf->frame.eof = DMA_TEST_PDF_FRAME_END; 322 INIT_LIST_HEAD(&tf->frame.list); 323 324 dt->packets_sent++; 325 dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent, 326 dt->packets_to_send); 327 328 tb_ring_tx(dt->tx_ring, &tf->frame); 329 } 330 331 return 0; 332 } 333 334 #define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \ 335 static int __fops ## _show(void *data, u64 *val) \ 336 { \ 337 struct tb_service *svc = data; \ 338 struct dma_test *dt = tb_service_get_drvdata(svc); \ 339 int ret; \ 340 \ 341 ret = mutex_lock_interruptible(&dt->lock); \ 342 if (ret) \ 343 return ret; \ 344 __get(dt, val); \ 345 mutex_unlock(&dt->lock); \ 346 return 0; \ 347 } \ 348 static int __fops ## _store(void *data, u64 val) \ 349 { \ 350 struct tb_service *svc = data; \ 351 struct dma_test *dt = tb_service_get_drvdata(svc); \ 352 int ret; \ 353 \ 354 ret = __validate(val); \ 355 if (ret) \ 356 return ret; \ 357 ret = mutex_lock_interruptible(&dt->lock); \ 358 if (ret) \ 359 return ret; \ 360 __set(dt, val); \ 361 mutex_unlock(&dt->lock); \ 362 return 0; \ 363 } \ 364 DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \ 365 __fops ## _store, "%llu\n") 366 367 static void lanes_get(const struct dma_test *dt, u64 *val) 368 { 369 *val = dt->link_width; 370 } 371 372 static int lanes_validate(u64 val) 373 { 374 return val > 2 ? -EINVAL : 0; 375 } 376 377 static void lanes_set(struct dma_test *dt, u64 val) 378 { 379 dt->link_width = val; 380 } 381 DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set); 382 383 static void speed_get(const struct dma_test *dt, u64 *val) 384 { 385 *val = dt->link_speed; 386 } 387 388 static int speed_validate(u64 val) 389 { 390 switch (val) { 391 case 20: 392 case 10: 393 case 0: 394 return 0; 395 default: 396 return -EINVAL; 397 } 398 } 399 400 static void speed_set(struct dma_test *dt, u64 val) 401 { 402 dt->link_speed = val; 403 } 404 DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set); 405 406 static void packets_to_receive_get(const struct dma_test *dt, u64 *val) 407 { 408 *val = dt->packets_to_receive; 409 } 410 411 static int packets_to_receive_validate(u64 val) 412 { 413 return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; 414 } 415 416 static void packets_to_receive_set(struct dma_test *dt, u64 val) 417 { 418 dt->packets_to_receive = val; 419 } 420 DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get, 421 packets_to_receive_validate, packets_to_receive_set); 422 423 static void packets_to_send_get(const struct dma_test *dt, u64 *val) 424 { 425 *val = dt->packets_to_send; 426 } 427 428 static int packets_to_send_validate(u64 val) 429 { 430 return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; 431 } 432 433 static void packets_to_send_set(struct dma_test *dt, u64 val) 434 { 435 dt->packets_to_send = val; 436 } 437 DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get, 438 packets_to_send_validate, packets_to_send_set); 439 440 static int dma_test_set_bonding(struct dma_test *dt) 441 { 442 switch (dt->link_width) { 443 case 2: 444 return tb_xdomain_lane_bonding_enable(dt->xd); 445 case 1: 446 tb_xdomain_lane_bonding_disable(dt->xd); 447 fallthrough; 448 default: 449 return 0; 450 } 451 } 452 453 static bool dma_test_validate_config(struct dma_test *dt) 454 { 455 if (!dt->packets_to_send && !dt->packets_to_receive) 456 return false; 457 if (dt->packets_to_send && dt->packets_to_receive && 458 dt->packets_to_send != dt->packets_to_receive) 459 return false; 460 return true; 461 } 462 463 static void dma_test_check_errors(struct dma_test *dt, int ret) 464 { 465 if (!dt->error_code) { 466 if (dt->link_speed && dt->xd->link_speed != dt->link_speed) { 467 dt->error_code = DMA_TEST_SPEED_ERROR; 468 } else if (dt->link_width && 469 dt->xd->link_width != dt->link_width) { 470 dt->error_code = DMA_TEST_WIDTH_ERROR; 471 } else if (dt->packets_to_send != dt->packets_sent || 472 dt->packets_to_receive != dt->packets_received || 473 dt->crc_errors || dt->buffer_overflow_errors) { 474 dt->error_code = DMA_TEST_PACKET_ERROR; 475 } else { 476 return; 477 } 478 } 479 480 dt->result = DMA_TEST_FAIL; 481 } 482 483 static int test_store(void *data, u64 val) 484 { 485 struct tb_service *svc = data; 486 struct dma_test *dt = tb_service_get_drvdata(svc); 487 int ret; 488 489 if (val != 1) 490 return -EINVAL; 491 492 ret = mutex_lock_interruptible(&dt->lock); 493 if (ret) 494 return ret; 495 496 dt->packets_sent = 0; 497 dt->packets_received = 0; 498 dt->crc_errors = 0; 499 dt->buffer_overflow_errors = 0; 500 dt->result = DMA_TEST_SUCCESS; 501 dt->error_code = DMA_TEST_NO_ERROR; 502 503 dev_dbg(&svc->dev, "DMA test starting\n"); 504 if (dt->link_speed) 505 dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed); 506 if (dt->link_width) 507 dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width); 508 dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send); 509 dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive); 510 511 if (!dma_test_validate_config(dt)) { 512 dev_err(&svc->dev, "invalid test configuration\n"); 513 dt->error_code = DMA_TEST_CONFIG_ERROR; 514 goto out_unlock; 515 } 516 517 ret = dma_test_set_bonding(dt); 518 if (ret) { 519 dev_err(&svc->dev, "failed to set lanes\n"); 520 dt->error_code = DMA_TEST_BONDING_ERROR; 521 goto out_unlock; 522 } 523 524 ret = dma_test_start_rings(dt); 525 if (ret) { 526 dev_err(&svc->dev, "failed to enable DMA rings\n"); 527 dt->error_code = DMA_TEST_DMA_ERROR; 528 goto out_unlock; 529 } 530 531 if (dt->packets_to_receive) { 532 reinit_completion(&dt->complete); 533 ret = dma_test_submit_rx(dt, dt->packets_to_receive); 534 if (ret) { 535 dev_err(&svc->dev, "failed to submit receive buffers\n"); 536 dt->error_code = DMA_TEST_BUFFER_ERROR; 537 goto out_stop; 538 } 539 } 540 541 if (dt->packets_to_send) { 542 ret = dma_test_submit_tx(dt, dt->packets_to_send); 543 if (ret) { 544 dev_err(&svc->dev, "failed to submit transmit buffers\n"); 545 dt->error_code = DMA_TEST_BUFFER_ERROR; 546 goto out_stop; 547 } 548 } 549 550 if (dt->packets_to_receive) { 551 ret = wait_for_completion_interruptible(&dt->complete); 552 if (ret) { 553 dt->error_code = DMA_TEST_INTERRUPTED; 554 goto out_stop; 555 } 556 } 557 558 out_stop: 559 dma_test_stop_rings(dt); 560 out_unlock: 561 dma_test_check_errors(dt, ret); 562 mutex_unlock(&dt->lock); 563 564 dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]); 565 return ret; 566 } 567 DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n"); 568 569 static int status_show(struct seq_file *s, void *not_used) 570 { 571 struct tb_service *svc = s->private; 572 struct dma_test *dt = tb_service_get_drvdata(svc); 573 int ret; 574 575 ret = mutex_lock_interruptible(&dt->lock); 576 if (ret) 577 return ret; 578 579 seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]); 580 if (dt->result == DMA_TEST_NOT_RUN) 581 goto out_unlock; 582 583 seq_printf(s, "packets received: %u\n", dt->packets_received); 584 seq_printf(s, "packets sent: %u\n", dt->packets_sent); 585 seq_printf(s, "CRC errors: %u\n", dt->crc_errors); 586 seq_printf(s, "buffer overflow errors: %u\n", 587 dt->buffer_overflow_errors); 588 seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]); 589 590 out_unlock: 591 mutex_unlock(&dt->lock); 592 return 0; 593 } 594 DEFINE_SHOW_ATTRIBUTE(status); 595 596 static void dma_test_debugfs_init(struct tb_service *svc) 597 { 598 struct dma_test *dt = tb_service_get_drvdata(svc); 599 600 dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir); 601 602 debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops); 603 debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops); 604 debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc, 605 &packets_to_receive_fops); 606 debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc, 607 &packets_to_send_fops); 608 debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops); 609 debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops); 610 } 611 612 static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id) 613 { 614 struct tb_xdomain *xd = tb_service_parent(svc); 615 struct dma_test *dt; 616 617 dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL); 618 if (!dt) 619 return -ENOMEM; 620 621 dt->svc = svc; 622 dt->xd = xd; 623 mutex_init(&dt->lock); 624 init_completion(&dt->complete); 625 626 tb_service_set_drvdata(svc, dt); 627 dma_test_debugfs_init(svc); 628 629 return 0; 630 } 631 632 static void dma_test_remove(struct tb_service *svc) 633 { 634 struct dma_test *dt = tb_service_get_drvdata(svc); 635 636 mutex_lock(&dt->lock); 637 debugfs_remove_recursive(dt->debugfs_dir); 638 mutex_unlock(&dt->lock); 639 } 640 641 static int __maybe_unused dma_test_suspend(struct device *dev) 642 { 643 /* 644 * No need to do anything special here. If userspace is writing 645 * to the test attribute when suspend started, it comes out from 646 * wait_for_completion_interruptible() with -ERESTARTSYS and the 647 * DMA test fails tearing down the rings. Once userspace is 648 * thawed the kernel restarts the write syscall effectively 649 * re-running the test. 650 */ 651 return 0; 652 } 653 654 static int __maybe_unused dma_test_resume(struct device *dev) 655 { 656 return 0; 657 } 658 659 static const struct dev_pm_ops dma_test_pm_ops = { 660 SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume) 661 }; 662 663 static const struct tb_service_id dma_test_ids[] = { 664 { TB_SERVICE("dma_test", 1) }, 665 { }, 666 }; 667 MODULE_DEVICE_TABLE(tbsvc, dma_test_ids); 668 669 static struct tb_service_driver dma_test_driver = { 670 .driver = { 671 .owner = THIS_MODULE, 672 .name = "thunderbolt_dma_test", 673 .pm = &dma_test_pm_ops, 674 }, 675 .probe = dma_test_probe, 676 .remove = dma_test_remove, 677 .id_table = dma_test_ids, 678 }; 679 680 static int __init dma_test_init(void) 681 { 682 u64 data_value = DMA_TEST_DATA_PATTERN; 683 int i, ret; 684 685 dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); 686 if (!dma_test_pattern) 687 return -ENOMEM; 688 689 for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++) 690 ((u32 *)dma_test_pattern)[i] = data_value++; 691 692 dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid); 693 if (!dma_test_dir) { 694 ret = -ENOMEM; 695 goto err_free_pattern; 696 } 697 698 tb_property_add_immediate(dma_test_dir, "prtcid", 1); 699 tb_property_add_immediate(dma_test_dir, "prtcvers", 1); 700 tb_property_add_immediate(dma_test_dir, "prtcrevs", 0); 701 tb_property_add_immediate(dma_test_dir, "prtcstns", 0); 702 703 ret = tb_register_property_dir("dma_test", dma_test_dir); 704 if (ret) 705 goto err_free_dir; 706 707 ret = tb_register_service_driver(&dma_test_driver); 708 if (ret) 709 goto err_unregister_dir; 710 711 return 0; 712 713 err_unregister_dir: 714 tb_unregister_property_dir("dma_test", dma_test_dir); 715 err_free_dir: 716 tb_property_free_dir(dma_test_dir); 717 err_free_pattern: 718 kfree(dma_test_pattern); 719 720 return ret; 721 } 722 module_init(dma_test_init); 723 724 static void __exit dma_test_exit(void) 725 { 726 tb_unregister_service_driver(&dma_test_driver); 727 tb_unregister_property_dir("dma_test", dma_test_dir); 728 tb_property_free_dir(dma_test_dir); 729 kfree(dma_test_pattern); 730 } 731 module_exit(dma_test_exit); 732 733 MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>"); 734 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 735 MODULE_DESCRIPTION("DMA traffic test driver"); 736 MODULE_LICENSE("GPL v2"); 737