1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2012 Intel Corporation. All rights reserved. 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * BSD LICENSE 15 * 16 * Copyright(c) 2012 Intel Corporation. All rights reserved. 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 23 * * Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * * Redistributions in binary form must reproduce the above copy 26 * notice, this list of conditions and the following disclaimer in 27 * the documentation and/or other materials provided with the 28 * distribution. 29 * * Neither the name of Intel Corporation nor the names of its 30 * contributors may be used to endorse or promote products derived 31 * from this software without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 44 * 45 * PCIe NTB Transport Linux driver 46 * 47 * Contact Information: 48 * Jon Mason <jon.mason@intel.com> 49 */ 50 #include <linux/debugfs.h> 51 #include <linux/delay.h> 52 #include <linux/dmaengine.h> 53 #include <linux/dma-mapping.h> 54 #include <linux/errno.h> 55 #include <linux/export.h> 56 #include <linux/interrupt.h> 57 #include <linux/module.h> 58 #include <linux/pci.h> 59 #include <linux/slab.h> 60 #include <linux/types.h> 61 #include <linux/uaccess.h> 62 #include "linux/ntb.h" 63 #include "linux/ntb_transport.h" 64 65 #define NTB_TRANSPORT_VERSION 4 66 #define NTB_TRANSPORT_VER "4" 67 #define NTB_TRANSPORT_NAME "ntb_transport" 68 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" 69 70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); 71 MODULE_VERSION(NTB_TRANSPORT_VER); 72 MODULE_LICENSE("Dual BSD/GPL"); 73 MODULE_AUTHOR("Intel Corporation"); 74 75 static unsigned long max_mw_size; 76 module_param(max_mw_size, ulong, 0644); 77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); 78 79 static unsigned int transport_mtu = 0x10000; 80 module_param(transport_mtu, uint, 0644); 81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); 82 83 static unsigned char max_num_clients; 84 module_param(max_num_clients, byte, 0644); 85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); 86 87 static unsigned int copy_bytes = 1024; 88 module_param(copy_bytes, uint, 0644); 89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); 90 91 static bool use_dma; 92 module_param(use_dma, bool, 0644); 93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); 94 95 static struct dentry *nt_debugfs_dir; 96 97 struct ntb_queue_entry { 98 /* ntb_queue list reference */ 99 struct list_head entry; 100 /* pointers to data to be transferred */ 101 void *cb_data; 102 void *buf; 103 unsigned int len; 104 unsigned int flags; 105 106 struct ntb_transport_qp *qp; 107 union { 108 struct ntb_payload_header __iomem *tx_hdr; 109 struct ntb_payload_header *rx_hdr; 110 }; 111 unsigned int index; 112 }; 113 114 struct ntb_rx_info { 115 unsigned int entry; 116 }; 117 118 struct ntb_transport_qp { 119 struct ntb_transport_ctx *transport; 120 struct ntb_dev *ndev; 121 void *cb_data; 122 struct dma_chan *dma_chan; 123 124 bool client_ready; 125 bool link_is_up; 126 127 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ 128 u64 qp_bit; 129 130 struct ntb_rx_info __iomem *rx_info; 131 struct ntb_rx_info *remote_rx_info; 132 133 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 134 void *data, int len); 135 struct list_head tx_free_q; 136 spinlock_t ntb_tx_free_q_lock; 137 void __iomem *tx_mw; 138 dma_addr_t tx_mw_phys; 139 unsigned int tx_index; 140 unsigned int tx_max_entry; 141 unsigned int tx_max_frame; 142 143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 144 void *data, int len); 145 struct list_head rx_post_q; 146 struct list_head rx_pend_q; 147 struct list_head rx_free_q; 148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 149 spinlock_t ntb_rx_q_lock; 150 void *rx_buff; 151 unsigned int rx_index; 152 unsigned int rx_max_entry; 153 unsigned int rx_max_frame; 154 dma_cookie_t last_cookie; 155 struct tasklet_struct rxc_db_work; 156 157 void (*event_handler)(void *data, int status); 158 struct delayed_work link_work; 159 struct work_struct link_cleanup; 160 161 struct dentry *debugfs_dir; 162 struct dentry *debugfs_stats; 163 164 /* Stats */ 165 u64 rx_bytes; 166 u64 rx_pkts; 167 u64 rx_ring_empty; 168 u64 rx_err_no_buf; 169 u64 rx_err_oflow; 170 u64 rx_err_ver; 171 u64 rx_memcpy; 172 u64 rx_async; 173 u64 tx_bytes; 174 u64 tx_pkts; 175 u64 tx_ring_full; 176 u64 tx_err_no_buf; 177 u64 tx_memcpy; 178 u64 tx_async; 179 }; 180 181 struct ntb_transport_mw { 182 phys_addr_t phys_addr; 183 resource_size_t phys_size; 184 resource_size_t xlat_align; 185 resource_size_t xlat_align_size; 186 void __iomem *vbase; 187 size_t xlat_size; 188 size_t buff_size; 189 void *virt_addr; 190 dma_addr_t dma_addr; 191 }; 192 193 struct ntb_transport_client_dev { 194 struct list_head entry; 195 struct ntb_transport_ctx *nt; 196 struct device dev; 197 }; 198 199 struct ntb_transport_ctx { 200 struct list_head entry; 201 struct list_head client_devs; 202 203 struct ntb_dev *ndev; 204 205 struct ntb_transport_mw *mw_vec; 206 struct ntb_transport_qp *qp_vec; 207 unsigned int mw_count; 208 unsigned int qp_count; 209 u64 qp_bitmap; 210 u64 qp_bitmap_free; 211 212 bool link_is_up; 213 struct delayed_work link_work; 214 struct work_struct link_cleanup; 215 216 struct dentry *debugfs_node_dir; 217 }; 218 219 enum { 220 DESC_DONE_FLAG = BIT(0), 221 LINK_DOWN_FLAG = BIT(1), 222 }; 223 224 struct ntb_payload_header { 225 unsigned int ver; 226 unsigned int len; 227 unsigned int flags; 228 }; 229 230 enum { 231 VERSION = 0, 232 QP_LINKS, 233 NUM_QPS, 234 NUM_MWS, 235 MW0_SZ_HIGH, 236 MW0_SZ_LOW, 237 MW1_SZ_HIGH, 238 MW1_SZ_LOW, 239 MAX_SPAD, 240 }; 241 242 #define dev_client_dev(__dev) \ 243 container_of((__dev), struct ntb_transport_client_dev, dev) 244 245 #define drv_client(__drv) \ 246 container_of((__drv), struct ntb_transport_client, driver) 247 248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 249 #define NTB_QP_DEF_NUM_ENTRIES 100 250 #define NTB_LINK_DOWN_TIMEOUT 10 251 252 static void ntb_transport_rxc_db(unsigned long data); 253 static const struct ntb_ctx_ops ntb_transport_ops; 254 static struct ntb_client ntb_transport_client; 255 256 static int ntb_transport_bus_match(struct device *dev, 257 struct device_driver *drv) 258 { 259 return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); 260 } 261 262 static int ntb_transport_bus_probe(struct device *dev) 263 { 264 const struct ntb_transport_client *client; 265 int rc = -EINVAL; 266 267 get_device(dev); 268 269 client = drv_client(dev->driver); 270 rc = client->probe(dev); 271 if (rc) 272 put_device(dev); 273 274 return rc; 275 } 276 277 static int ntb_transport_bus_remove(struct device *dev) 278 { 279 const struct ntb_transport_client *client; 280 281 client = drv_client(dev->driver); 282 client->remove(dev); 283 284 put_device(dev); 285 286 return 0; 287 } 288 289 static struct bus_type ntb_transport_bus = { 290 .name = "ntb_transport", 291 .match = ntb_transport_bus_match, 292 .probe = ntb_transport_bus_probe, 293 .remove = ntb_transport_bus_remove, 294 }; 295 296 static LIST_HEAD(ntb_transport_list); 297 298 static int ntb_bus_init(struct ntb_transport_ctx *nt) 299 { 300 list_add(&nt->entry, &ntb_transport_list); 301 return 0; 302 } 303 304 static void ntb_bus_remove(struct ntb_transport_ctx *nt) 305 { 306 struct ntb_transport_client_dev *client_dev, *cd; 307 308 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { 309 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n", 310 dev_name(&client_dev->dev)); 311 list_del(&client_dev->entry); 312 device_unregister(&client_dev->dev); 313 } 314 315 list_del(&nt->entry); 316 } 317 318 static void ntb_transport_client_release(struct device *dev) 319 { 320 struct ntb_transport_client_dev *client_dev; 321 322 client_dev = dev_client_dev(dev); 323 kfree(client_dev); 324 } 325 326 /** 327 * ntb_transport_unregister_client_dev - Unregister NTB client device 328 * @device_name: Name of NTB client device 329 * 330 * Unregister an NTB client device with the NTB transport layer 331 */ 332 void ntb_transport_unregister_client_dev(char *device_name) 333 { 334 struct ntb_transport_client_dev *client, *cd; 335 struct ntb_transport_ctx *nt; 336 337 list_for_each_entry(nt, &ntb_transport_list, entry) 338 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) 339 if (!strncmp(dev_name(&client->dev), device_name, 340 strlen(device_name))) { 341 list_del(&client->entry); 342 device_unregister(&client->dev); 343 } 344 } 345 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); 346 347 /** 348 * ntb_transport_register_client_dev - Register NTB client device 349 * @device_name: Name of NTB client device 350 * 351 * Register an NTB client device with the NTB transport layer 352 */ 353 int ntb_transport_register_client_dev(char *device_name) 354 { 355 struct ntb_transport_client_dev *client_dev; 356 struct ntb_transport_ctx *nt; 357 int node; 358 int rc, i = 0; 359 360 if (list_empty(&ntb_transport_list)) 361 return -ENODEV; 362 363 list_for_each_entry(nt, &ntb_transport_list, entry) { 364 struct device *dev; 365 366 node = dev_to_node(&nt->ndev->dev); 367 368 client_dev = kzalloc_node(sizeof(*client_dev), 369 GFP_KERNEL, node); 370 if (!client_dev) { 371 rc = -ENOMEM; 372 goto err; 373 } 374 375 dev = &client_dev->dev; 376 377 /* setup and register client devices */ 378 dev_set_name(dev, "%s%d", device_name, i); 379 dev->bus = &ntb_transport_bus; 380 dev->release = ntb_transport_client_release; 381 dev->parent = &nt->ndev->dev; 382 383 rc = device_register(dev); 384 if (rc) { 385 kfree(client_dev); 386 goto err; 387 } 388 389 list_add_tail(&client_dev->entry, &nt->client_devs); 390 i++; 391 } 392 393 return 0; 394 395 err: 396 ntb_transport_unregister_client_dev(device_name); 397 398 return rc; 399 } 400 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); 401 402 /** 403 * ntb_transport_register_client - Register NTB client driver 404 * @drv: NTB client driver to be registered 405 * 406 * Register an NTB client driver with the NTB transport layer 407 * 408 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 409 */ 410 int ntb_transport_register_client(struct ntb_transport_client *drv) 411 { 412 drv->driver.bus = &ntb_transport_bus; 413 414 if (list_empty(&ntb_transport_list)) 415 return -ENODEV; 416 417 return driver_register(&drv->driver); 418 } 419 EXPORT_SYMBOL_GPL(ntb_transport_register_client); 420 421 /** 422 * ntb_transport_unregister_client - Unregister NTB client driver 423 * @drv: NTB client driver to be unregistered 424 * 425 * Unregister an NTB client driver with the NTB transport layer 426 * 427 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 428 */ 429 void ntb_transport_unregister_client(struct ntb_transport_client *drv) 430 { 431 driver_unregister(&drv->driver); 432 } 433 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); 434 435 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, 436 loff_t *offp) 437 { 438 struct ntb_transport_qp *qp; 439 char *buf; 440 ssize_t ret, out_offset, out_count; 441 442 qp = filp->private_data; 443 444 if (!qp || !qp->link_is_up) 445 return 0; 446 447 out_count = 1000; 448 449 buf = kmalloc(out_count, GFP_KERNEL); 450 if (!buf) 451 return -ENOMEM; 452 453 out_offset = 0; 454 out_offset += snprintf(buf + out_offset, out_count - out_offset, 455 "NTB QP stats\n"); 456 out_offset += snprintf(buf + out_offset, out_count - out_offset, 457 "rx_bytes - \t%llu\n", qp->rx_bytes); 458 out_offset += snprintf(buf + out_offset, out_count - out_offset, 459 "rx_pkts - \t%llu\n", qp->rx_pkts); 460 out_offset += snprintf(buf + out_offset, out_count - out_offset, 461 "rx_memcpy - \t%llu\n", qp->rx_memcpy); 462 out_offset += snprintf(buf + out_offset, out_count - out_offset, 463 "rx_async - \t%llu\n", qp->rx_async); 464 out_offset += snprintf(buf + out_offset, out_count - out_offset, 465 "rx_ring_empty - %llu\n", qp->rx_ring_empty); 466 out_offset += snprintf(buf + out_offset, out_count - out_offset, 467 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); 468 out_offset += snprintf(buf + out_offset, out_count - out_offset, 469 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); 470 out_offset += snprintf(buf + out_offset, out_count - out_offset, 471 "rx_err_ver - \t%llu\n", qp->rx_err_ver); 472 out_offset += snprintf(buf + out_offset, out_count - out_offset, 473 "rx_buff - \t%p\n", qp->rx_buff); 474 out_offset += snprintf(buf + out_offset, out_count - out_offset, 475 "rx_index - \t%u\n", qp->rx_index); 476 out_offset += snprintf(buf + out_offset, out_count - out_offset, 477 "rx_max_entry - \t%u\n", qp->rx_max_entry); 478 479 out_offset += snprintf(buf + out_offset, out_count - out_offset, 480 "tx_bytes - \t%llu\n", qp->tx_bytes); 481 out_offset += snprintf(buf + out_offset, out_count - out_offset, 482 "tx_pkts - \t%llu\n", qp->tx_pkts); 483 out_offset += snprintf(buf + out_offset, out_count - out_offset, 484 "tx_memcpy - \t%llu\n", qp->tx_memcpy); 485 out_offset += snprintf(buf + out_offset, out_count - out_offset, 486 "tx_async - \t%llu\n", qp->tx_async); 487 out_offset += snprintf(buf + out_offset, out_count - out_offset, 488 "tx_ring_full - \t%llu\n", qp->tx_ring_full); 489 out_offset += snprintf(buf + out_offset, out_count - out_offset, 490 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); 491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 "tx_mw - \t%p\n", qp->tx_mw); 493 out_offset += snprintf(buf + out_offset, out_count - out_offset, 494 "tx_index - \t%u\n", qp->tx_index); 495 out_offset += snprintf(buf + out_offset, out_count - out_offset, 496 "tx_max_entry - \t%u\n", qp->tx_max_entry); 497 498 out_offset += snprintf(buf + out_offset, out_count - out_offset, 499 "\nQP Link %s\n", 500 qp->link_is_up ? "Up" : "Down"); 501 if (out_offset > out_count) 502 out_offset = out_count; 503 504 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 505 kfree(buf); 506 return ret; 507 } 508 509 static const struct file_operations ntb_qp_debugfs_stats = { 510 .owner = THIS_MODULE, 511 .open = simple_open, 512 .read = debugfs_read, 513 }; 514 515 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, 516 struct list_head *list) 517 { 518 unsigned long flags; 519 520 spin_lock_irqsave(lock, flags); 521 list_add_tail(entry, list); 522 spin_unlock_irqrestore(lock, flags); 523 } 524 525 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, 526 struct list_head *list) 527 { 528 struct ntb_queue_entry *entry; 529 unsigned long flags; 530 531 spin_lock_irqsave(lock, flags); 532 if (list_empty(list)) { 533 entry = NULL; 534 goto out; 535 } 536 entry = list_first_entry(list, struct ntb_queue_entry, entry); 537 list_del(&entry->entry); 538 out: 539 spin_unlock_irqrestore(lock, flags); 540 541 return entry; 542 } 543 544 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, 545 struct list_head *list, 546 struct list_head *to_list) 547 { 548 struct ntb_queue_entry *entry; 549 unsigned long flags; 550 551 spin_lock_irqsave(lock, flags); 552 553 if (list_empty(list)) { 554 entry = NULL; 555 } else { 556 entry = list_first_entry(list, struct ntb_queue_entry, entry); 557 list_move_tail(&entry->entry, to_list); 558 } 559 560 spin_unlock_irqrestore(lock, flags); 561 562 return entry; 563 } 564 565 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 566 unsigned int qp_num) 567 { 568 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 569 struct ntb_transport_mw *mw; 570 unsigned int rx_size, num_qps_mw; 571 unsigned int mw_num, mw_count, qp_count; 572 unsigned int i; 573 574 mw_count = nt->mw_count; 575 qp_count = nt->qp_count; 576 577 mw_num = QP_TO_MW(nt, qp_num); 578 mw = &nt->mw_vec[mw_num]; 579 580 if (!mw->virt_addr) 581 return -ENOMEM; 582 583 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 584 num_qps_mw = qp_count / mw_count + 1; 585 else 586 num_qps_mw = qp_count / mw_count; 587 588 rx_size = (unsigned int)mw->xlat_size / num_qps_mw; 589 qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count; 590 rx_size -= sizeof(struct ntb_rx_info); 591 592 qp->remote_rx_info = qp->rx_buff + rx_size; 593 594 /* Due to housekeeping, there must be atleast 2 buffs */ 595 qp->rx_max_frame = min(transport_mtu, rx_size / 2); 596 qp->rx_max_entry = rx_size / qp->rx_max_frame; 597 qp->rx_index = 0; 598 599 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 600 601 /* setup the hdr offsets with 0's */ 602 for (i = 0; i < qp->rx_max_entry; i++) { 603 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - 604 sizeof(struct ntb_payload_header)); 605 memset(offset, 0, sizeof(struct ntb_payload_header)); 606 } 607 608 qp->rx_pkts = 0; 609 qp->tx_pkts = 0; 610 qp->tx_index = 0; 611 612 return 0; 613 } 614 615 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 616 { 617 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 618 struct pci_dev *pdev = nt->ndev->pdev; 619 620 if (!mw->virt_addr) 621 return; 622 623 ntb_mw_clear_trans(nt->ndev, num_mw); 624 dma_free_coherent(&pdev->dev, mw->buff_size, 625 mw->virt_addr, mw->dma_addr); 626 mw->xlat_size = 0; 627 mw->buff_size = 0; 628 mw->virt_addr = NULL; 629 } 630 631 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 632 resource_size_t size) 633 { 634 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 635 struct pci_dev *pdev = nt->ndev->pdev; 636 size_t xlat_size, buff_size; 637 int rc; 638 639 if (!size) 640 return -EINVAL; 641 642 xlat_size = round_up(size, mw->xlat_align_size); 643 buff_size = round_up(size, mw->xlat_align); 644 645 /* No need to re-setup */ 646 if (mw->xlat_size == xlat_size) 647 return 0; 648 649 if (mw->buff_size) 650 ntb_free_mw(nt, num_mw); 651 652 /* Alloc memory for receiving data. Must be aligned */ 653 mw->xlat_size = xlat_size; 654 mw->buff_size = buff_size; 655 656 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, 657 &mw->dma_addr, GFP_KERNEL); 658 if (!mw->virt_addr) { 659 mw->xlat_size = 0; 660 mw->buff_size = 0; 661 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", 662 buff_size); 663 return -ENOMEM; 664 } 665 666 /* 667 * we must ensure that the memory address allocated is BAR size 668 * aligned in order for the XLAT register to take the value. This 669 * is a requirement of the hardware. It is recommended to setup CMA 670 * for BAR sizes equal or greater than 4MB. 671 */ 672 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { 673 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", 674 &mw->dma_addr); 675 ntb_free_mw(nt, num_mw); 676 return -ENOMEM; 677 } 678 679 /* Notify HW the memory location of the receive buffer */ 680 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); 681 if (rc) { 682 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); 683 ntb_free_mw(nt, num_mw); 684 return -EIO; 685 } 686 687 return 0; 688 } 689 690 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 691 { 692 qp->link_is_up = false; 693 694 qp->tx_index = 0; 695 qp->rx_index = 0; 696 qp->rx_bytes = 0; 697 qp->rx_pkts = 0; 698 qp->rx_ring_empty = 0; 699 qp->rx_err_no_buf = 0; 700 qp->rx_err_oflow = 0; 701 qp->rx_err_ver = 0; 702 qp->rx_memcpy = 0; 703 qp->rx_async = 0; 704 qp->tx_bytes = 0; 705 qp->tx_pkts = 0; 706 qp->tx_ring_full = 0; 707 qp->tx_err_no_buf = 0; 708 qp->tx_memcpy = 0; 709 qp->tx_async = 0; 710 } 711 712 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 713 { 714 struct ntb_transport_ctx *nt = qp->transport; 715 struct pci_dev *pdev = nt->ndev->pdev; 716 717 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); 718 719 cancel_delayed_work_sync(&qp->link_work); 720 ntb_qp_link_down_reset(qp); 721 722 if (qp->event_handler) 723 qp->event_handler(qp->cb_data, qp->link_is_up); 724 } 725 726 static void ntb_qp_link_cleanup_work(struct work_struct *work) 727 { 728 struct ntb_transport_qp *qp = container_of(work, 729 struct ntb_transport_qp, 730 link_cleanup); 731 struct ntb_transport_ctx *nt = qp->transport; 732 733 ntb_qp_link_cleanup(qp); 734 735 if (nt->link_is_up) 736 schedule_delayed_work(&qp->link_work, 737 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 738 } 739 740 static void ntb_qp_link_down(struct ntb_transport_qp *qp) 741 { 742 schedule_work(&qp->link_cleanup); 743 } 744 745 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 746 { 747 struct ntb_transport_qp *qp; 748 u64 qp_bitmap_alloc; 749 int i; 750 751 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 752 753 /* Pass along the info to any clients */ 754 for (i = 0; i < nt->qp_count; i++) 755 if (qp_bitmap_alloc & BIT_ULL(i)) { 756 qp = &nt->qp_vec[i]; 757 ntb_qp_link_cleanup(qp); 758 cancel_work_sync(&qp->link_cleanup); 759 cancel_delayed_work_sync(&qp->link_work); 760 } 761 762 if (!nt->link_is_up) 763 cancel_delayed_work_sync(&nt->link_work); 764 765 /* The scratchpad registers keep the values if the remote side 766 * goes down, blast them now to give them a sane value the next 767 * time they are accessed 768 */ 769 for (i = 0; i < MAX_SPAD; i++) 770 ntb_spad_write(nt->ndev, i, 0); 771 } 772 773 static void ntb_transport_link_cleanup_work(struct work_struct *work) 774 { 775 struct ntb_transport_ctx *nt = 776 container_of(work, struct ntb_transport_ctx, link_cleanup); 777 778 ntb_transport_link_cleanup(nt); 779 } 780 781 static void ntb_transport_event_callback(void *data) 782 { 783 struct ntb_transport_ctx *nt = data; 784 785 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) 786 schedule_delayed_work(&nt->link_work, 0); 787 else 788 schedule_work(&nt->link_cleanup); 789 } 790 791 static void ntb_transport_link_work(struct work_struct *work) 792 { 793 struct ntb_transport_ctx *nt = 794 container_of(work, struct ntb_transport_ctx, link_work.work); 795 struct ntb_dev *ndev = nt->ndev; 796 struct pci_dev *pdev = ndev->pdev; 797 resource_size_t size; 798 u32 val; 799 int rc, i, spad; 800 801 /* send the local info, in the opposite order of the way we read it */ 802 for (i = 0; i < nt->mw_count; i++) { 803 size = nt->mw_vec[i].phys_size; 804 805 if (max_mw_size && size > max_mw_size) 806 size = max_mw_size; 807 808 spad = MW0_SZ_HIGH + (i * 2); 809 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32)); 810 811 spad = MW0_SZ_LOW + (i * 2); 812 ntb_peer_spad_write(ndev, spad, (u32)size); 813 } 814 815 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); 816 817 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); 818 819 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); 820 821 /* Query the remote side for its info */ 822 val = ntb_spad_read(ndev, VERSION); 823 dev_dbg(&pdev->dev, "Remote version = %d\n", val); 824 if (val != NTB_TRANSPORT_VERSION) 825 goto out; 826 827 val = ntb_spad_read(ndev, NUM_QPS); 828 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 829 if (val != nt->qp_count) 830 goto out; 831 832 val = ntb_spad_read(ndev, NUM_MWS); 833 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); 834 if (val != nt->mw_count) 835 goto out; 836 837 for (i = 0; i < nt->mw_count; i++) { 838 u64 val64; 839 840 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); 841 val64 = (u64)val << 32; 842 843 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); 844 val64 |= val; 845 846 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); 847 848 rc = ntb_set_mw(nt, i, val64); 849 if (rc) 850 goto out1; 851 } 852 853 nt->link_is_up = true; 854 855 for (i = 0; i < nt->qp_count; i++) { 856 struct ntb_transport_qp *qp = &nt->qp_vec[i]; 857 858 ntb_transport_setup_qp_mw(nt, i); 859 860 if (qp->client_ready) 861 schedule_delayed_work(&qp->link_work, 0); 862 } 863 864 return; 865 866 out1: 867 for (i = 0; i < nt->mw_count; i++) 868 ntb_free_mw(nt, i); 869 out: 870 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 871 schedule_delayed_work(&nt->link_work, 872 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 873 } 874 875 static void ntb_qp_link_work(struct work_struct *work) 876 { 877 struct ntb_transport_qp *qp = container_of(work, 878 struct ntb_transport_qp, 879 link_work.work); 880 struct pci_dev *pdev = qp->ndev->pdev; 881 struct ntb_transport_ctx *nt = qp->transport; 882 int val; 883 884 WARN_ON(!nt->link_is_up); 885 886 val = ntb_spad_read(nt->ndev, QP_LINKS); 887 888 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); 889 890 /* query remote spad for qp ready bits */ 891 ntb_peer_spad_read(nt->ndev, QP_LINKS); 892 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); 893 894 /* See if the remote side is up */ 895 if (val & BIT(qp->qp_num)) { 896 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); 897 qp->link_is_up = true; 898 899 if (qp->event_handler) 900 qp->event_handler(qp->cb_data, qp->link_is_up); 901 902 tasklet_schedule(&qp->rxc_db_work); 903 } else if (nt->link_is_up) 904 schedule_delayed_work(&qp->link_work, 905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 906 } 907 908 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, 909 unsigned int qp_num) 910 { 911 struct ntb_transport_qp *qp; 912 struct ntb_transport_mw *mw; 913 phys_addr_t mw_base; 914 resource_size_t mw_size; 915 unsigned int num_qps_mw, tx_size; 916 unsigned int mw_num, mw_count, qp_count; 917 u64 qp_offset; 918 919 mw_count = nt->mw_count; 920 qp_count = nt->qp_count; 921 922 mw_num = QP_TO_MW(nt, qp_num); 923 mw = &nt->mw_vec[mw_num]; 924 925 qp = &nt->qp_vec[qp_num]; 926 qp->qp_num = qp_num; 927 qp->transport = nt; 928 qp->ndev = nt->ndev; 929 qp->client_ready = false; 930 qp->event_handler = NULL; 931 ntb_qp_link_down_reset(qp); 932 933 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 934 num_qps_mw = qp_count / mw_count + 1; 935 else 936 num_qps_mw = qp_count / mw_count; 937 938 mw_base = nt->mw_vec[mw_num].phys_addr; 939 mw_size = nt->mw_vec[mw_num].phys_size; 940 941 tx_size = (unsigned int)mw_size / num_qps_mw; 942 qp_offset = tx_size * qp_num / mw_count; 943 944 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 945 if (!qp->tx_mw) 946 return -EINVAL; 947 948 qp->tx_mw_phys = mw_base + qp_offset; 949 if (!qp->tx_mw_phys) 950 return -EINVAL; 951 952 tx_size -= sizeof(struct ntb_rx_info); 953 qp->rx_info = qp->tx_mw + tx_size; 954 955 /* Due to housekeeping, there must be atleast 2 buffs */ 956 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 957 qp->tx_max_entry = tx_size / qp->tx_max_frame; 958 959 if (nt->debugfs_node_dir) { 960 char debugfs_name[4]; 961 962 snprintf(debugfs_name, 4, "qp%d", qp_num); 963 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 964 nt->debugfs_node_dir); 965 966 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 967 qp->debugfs_dir, qp, 968 &ntb_qp_debugfs_stats); 969 } else { 970 qp->debugfs_dir = NULL; 971 qp->debugfs_stats = NULL; 972 } 973 974 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 975 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 976 977 spin_lock_init(&qp->ntb_rx_q_lock); 978 spin_lock_init(&qp->ntb_tx_free_q_lock); 979 980 INIT_LIST_HEAD(&qp->rx_post_q); 981 INIT_LIST_HEAD(&qp->rx_pend_q); 982 INIT_LIST_HEAD(&qp->rx_free_q); 983 INIT_LIST_HEAD(&qp->tx_free_q); 984 985 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, 986 (unsigned long)qp); 987 988 return 0; 989 } 990 991 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) 992 { 993 struct ntb_transport_ctx *nt; 994 struct ntb_transport_mw *mw; 995 unsigned int mw_count, qp_count; 996 u64 qp_bitmap; 997 int node; 998 int rc, i; 999 1000 if (ntb_db_is_unsafe(ndev)) 1001 dev_dbg(&ndev->dev, 1002 "doorbell is unsafe, proceed anyway...\n"); 1003 if (ntb_spad_is_unsafe(ndev)) 1004 dev_dbg(&ndev->dev, 1005 "scratchpad is unsafe, proceed anyway...\n"); 1006 1007 node = dev_to_node(&ndev->dev); 1008 1009 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); 1010 if (!nt) 1011 return -ENOMEM; 1012 1013 nt->ndev = ndev; 1014 1015 mw_count = ntb_mw_count(ndev); 1016 1017 nt->mw_count = mw_count; 1018 1019 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec), 1020 GFP_KERNEL, node); 1021 if (!nt->mw_vec) { 1022 rc = -ENOMEM; 1023 goto err; 1024 } 1025 1026 for (i = 0; i < mw_count; i++) { 1027 mw = &nt->mw_vec[i]; 1028 1029 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, 1030 &mw->xlat_align, &mw->xlat_align_size); 1031 if (rc) 1032 goto err1; 1033 1034 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); 1035 if (!mw->vbase) { 1036 rc = -ENOMEM; 1037 goto err1; 1038 } 1039 1040 mw->buff_size = 0; 1041 mw->xlat_size = 0; 1042 mw->virt_addr = NULL; 1043 mw->dma_addr = 0; 1044 } 1045 1046 qp_bitmap = ntb_db_valid_mask(ndev); 1047 1048 qp_count = ilog2(qp_bitmap); 1049 if (max_num_clients && max_num_clients < qp_count) 1050 qp_count = max_num_clients; 1051 else if (mw_count < qp_count) 1052 qp_count = mw_count; 1053 1054 qp_bitmap &= BIT_ULL(qp_count) - 1; 1055 1056 nt->qp_count = qp_count; 1057 nt->qp_bitmap = qp_bitmap; 1058 nt->qp_bitmap_free = qp_bitmap; 1059 1060 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec), 1061 GFP_KERNEL, node); 1062 if (!nt->qp_vec) { 1063 rc = -ENOMEM; 1064 goto err2; 1065 } 1066 1067 if (nt_debugfs_dir) { 1068 nt->debugfs_node_dir = 1069 debugfs_create_dir(pci_name(ndev->pdev), 1070 nt_debugfs_dir); 1071 } 1072 1073 for (i = 0; i < qp_count; i++) { 1074 rc = ntb_transport_init_queue(nt, i); 1075 if (rc) 1076 goto err3; 1077 } 1078 1079 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); 1080 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); 1081 1082 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); 1083 if (rc) 1084 goto err3; 1085 1086 INIT_LIST_HEAD(&nt->client_devs); 1087 rc = ntb_bus_init(nt); 1088 if (rc) 1089 goto err4; 1090 1091 nt->link_is_up = false; 1092 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1093 ntb_link_event(ndev); 1094 1095 return 0; 1096 1097 err4: 1098 ntb_clear_ctx(ndev); 1099 err3: 1100 kfree(nt->qp_vec); 1101 err2: 1102 kfree(nt->mw_vec); 1103 err1: 1104 while (i--) { 1105 mw = &nt->mw_vec[i]; 1106 iounmap(mw->vbase); 1107 } 1108 err: 1109 kfree(nt); 1110 return rc; 1111 } 1112 1113 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) 1114 { 1115 struct ntb_transport_ctx *nt = ndev->ctx; 1116 struct ntb_transport_qp *qp; 1117 u64 qp_bitmap_alloc; 1118 int i; 1119 1120 ntb_transport_link_cleanup(nt); 1121 cancel_work_sync(&nt->link_cleanup); 1122 cancel_delayed_work_sync(&nt->link_work); 1123 1124 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; 1125 1126 /* verify that all the qp's are freed */ 1127 for (i = 0; i < nt->qp_count; i++) { 1128 qp = &nt->qp_vec[i]; 1129 if (qp_bitmap_alloc & BIT_ULL(i)) 1130 ntb_transport_free_queue(qp); 1131 debugfs_remove_recursive(qp->debugfs_dir); 1132 } 1133 1134 ntb_link_disable(ndev); 1135 ntb_clear_ctx(ndev); 1136 1137 ntb_bus_remove(nt); 1138 1139 for (i = nt->mw_count; i--; ) { 1140 ntb_free_mw(nt, i); 1141 iounmap(nt->mw_vec[i].vbase); 1142 } 1143 1144 kfree(nt->qp_vec); 1145 kfree(nt->mw_vec); 1146 kfree(nt); 1147 } 1148 1149 static void ntb_complete_rxc(struct ntb_transport_qp *qp) 1150 { 1151 struct ntb_queue_entry *entry; 1152 void *cb_data; 1153 unsigned int len; 1154 unsigned long irqflags; 1155 1156 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1157 1158 while (!list_empty(&qp->rx_post_q)) { 1159 entry = list_first_entry(&qp->rx_post_q, 1160 struct ntb_queue_entry, entry); 1161 if (!(entry->flags & DESC_DONE_FLAG)) 1162 break; 1163 1164 entry->rx_hdr->flags = 0; 1165 iowrite32(entry->index, &qp->rx_info->entry); 1166 1167 cb_data = entry->cb_data; 1168 len = entry->len; 1169 1170 list_move_tail(&entry->entry, &qp->rx_free_q); 1171 1172 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1173 1174 if (qp->rx_handler && qp->client_ready) 1175 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1176 1177 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); 1178 } 1179 1180 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); 1181 } 1182 1183 static void ntb_rx_copy_callback(void *data) 1184 { 1185 struct ntb_queue_entry *entry = data; 1186 1187 entry->flags |= DESC_DONE_FLAG; 1188 1189 ntb_complete_rxc(entry->qp); 1190 } 1191 1192 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1193 { 1194 void *buf = entry->buf; 1195 size_t len = entry->len; 1196 1197 memcpy(buf, offset, len); 1198 1199 /* Ensure that the data is fully copied out before clearing the flag */ 1200 wmb(); 1201 1202 ntb_rx_copy_callback(entry); 1203 } 1204 1205 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) 1206 { 1207 struct dma_async_tx_descriptor *txd; 1208 struct ntb_transport_qp *qp = entry->qp; 1209 struct dma_chan *chan = qp->dma_chan; 1210 struct dma_device *device; 1211 size_t pay_off, buff_off, len; 1212 struct dmaengine_unmap_data *unmap; 1213 dma_cookie_t cookie; 1214 void *buf = entry->buf; 1215 1216 len = entry->len; 1217 1218 if (!chan) 1219 goto err; 1220 1221 if (len < copy_bytes) 1222 goto err_wait; 1223 1224 device = chan->device; 1225 pay_off = (size_t)offset & ~PAGE_MASK; 1226 buff_off = (size_t)buf & ~PAGE_MASK; 1227 1228 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1229 goto err_wait; 1230 1231 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1232 if (!unmap) 1233 goto err_wait; 1234 1235 unmap->len = len; 1236 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1237 pay_off, len, DMA_TO_DEVICE); 1238 if (dma_mapping_error(device->dev, unmap->addr[0])) 1239 goto err_get_unmap; 1240 1241 unmap->to_cnt = 1; 1242 1243 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1244 buff_off, len, DMA_FROM_DEVICE); 1245 if (dma_mapping_error(device->dev, unmap->addr[1])) 1246 goto err_get_unmap; 1247 1248 unmap->from_cnt = 1; 1249 1250 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1251 unmap->addr[0], len, 1252 DMA_PREP_INTERRUPT); 1253 if (!txd) 1254 goto err_get_unmap; 1255 1256 txd->callback = ntb_rx_copy_callback; 1257 txd->callback_param = entry; 1258 dma_set_unmap(txd, unmap); 1259 1260 cookie = dmaengine_submit(txd); 1261 if (dma_submit_error(cookie)) 1262 goto err_set_unmap; 1263 1264 dmaengine_unmap_put(unmap); 1265 1266 qp->last_cookie = cookie; 1267 1268 qp->rx_async++; 1269 1270 return; 1271 1272 err_set_unmap: 1273 dmaengine_unmap_put(unmap); 1274 err_get_unmap: 1275 dmaengine_unmap_put(unmap); 1276 err_wait: 1277 /* If the callbacks come out of order, the writing of the index to the 1278 * last completed will be out of order. This may result in the 1279 * receive stalling forever. 1280 */ 1281 dma_sync_wait(chan, qp->last_cookie); 1282 err: 1283 ntb_memcpy_rx(entry, offset); 1284 qp->rx_memcpy++; 1285 } 1286 1287 static int ntb_process_rxc(struct ntb_transport_qp *qp) 1288 { 1289 struct ntb_payload_header *hdr; 1290 struct ntb_queue_entry *entry; 1291 void *offset; 1292 1293 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1294 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1295 1296 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", 1297 qp->qp_num, hdr->ver, hdr->len, hdr->flags); 1298 1299 if (!(hdr->flags & DESC_DONE_FLAG)) { 1300 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); 1301 qp->rx_ring_empty++; 1302 return -EAGAIN; 1303 } 1304 1305 if (hdr->flags & LINK_DOWN_FLAG) { 1306 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); 1307 ntb_qp_link_down(qp); 1308 hdr->flags = 0; 1309 return -EAGAIN; 1310 } 1311 1312 if (hdr->ver != (u32)qp->rx_pkts) { 1313 dev_dbg(&qp->ndev->pdev->dev, 1314 "version mismatch, expected %llu - got %u\n", 1315 qp->rx_pkts, hdr->ver); 1316 qp->rx_err_ver++; 1317 return -EIO; 1318 } 1319 1320 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1321 if (!entry) { 1322 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1323 qp->rx_err_no_buf++; 1324 return -EAGAIN; 1325 } 1326 1327 entry->rx_hdr = hdr; 1328 entry->index = qp->rx_index; 1329 1330 if (hdr->len > entry->len) { 1331 dev_dbg(&qp->ndev->pdev->dev, 1332 "receive buffer overflow! Wanted %d got %d\n", 1333 hdr->len, entry->len); 1334 qp->rx_err_oflow++; 1335 1336 entry->len = -EIO; 1337 entry->flags |= DESC_DONE_FLAG; 1338 1339 ntb_complete_rxc(qp); 1340 } else { 1341 dev_dbg(&qp->ndev->pdev->dev, 1342 "RX OK index %u ver %u size %d into buf size %d\n", 1343 qp->rx_index, hdr->ver, hdr->len, entry->len); 1344 1345 qp->rx_bytes += hdr->len; 1346 qp->rx_pkts++; 1347 1348 entry->len = hdr->len; 1349 1350 ntb_async_rx(entry, offset); 1351 } 1352 1353 qp->rx_index++; 1354 qp->rx_index %= qp->rx_max_entry; 1355 1356 return 0; 1357 } 1358 1359 static void ntb_transport_rxc_db(unsigned long data) 1360 { 1361 struct ntb_transport_qp *qp = (void *)data; 1362 int rc, i; 1363 1364 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", 1365 __func__, qp->qp_num); 1366 1367 /* Limit the number of packets processed in a single interrupt to 1368 * provide fairness to others 1369 */ 1370 for (i = 0; i < qp->rx_max_entry; i++) { 1371 rc = ntb_process_rxc(qp); 1372 if (rc) 1373 break; 1374 } 1375 1376 if (i && qp->dma_chan) 1377 dma_async_issue_pending(qp->dma_chan); 1378 1379 if (i == qp->rx_max_entry) { 1380 /* there is more work to do */ 1381 tasklet_schedule(&qp->rxc_db_work); 1382 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { 1383 /* the doorbell bit is set: clear it */ 1384 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); 1385 /* ntb_db_read ensures ntb_db_clear write is committed */ 1386 ntb_db_read(qp->ndev); 1387 1388 /* an interrupt may have arrived between finishing 1389 * ntb_process_rxc and clearing the doorbell bit: 1390 * there might be some more work to do. 1391 */ 1392 tasklet_schedule(&qp->rxc_db_work); 1393 } 1394 } 1395 1396 static void ntb_tx_copy_callback(void *data) 1397 { 1398 struct ntb_queue_entry *entry = data; 1399 struct ntb_transport_qp *qp = entry->qp; 1400 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; 1401 1402 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); 1403 1404 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); 1405 1406 /* The entry length can only be zero if the packet is intended to be a 1407 * "link down" or similar. Since no payload is being sent in these 1408 * cases, there is nothing to add to the completion queue. 1409 */ 1410 if (entry->len > 0) { 1411 qp->tx_bytes += entry->len; 1412 1413 if (qp->tx_handler) 1414 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 1415 entry->len); 1416 } 1417 1418 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); 1419 } 1420 1421 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) 1422 { 1423 #ifdef ARCH_HAS_NOCACHE_UACCESS 1424 /* 1425 * Using non-temporal mov to improve performance on non-cached 1426 * writes, even though we aren't actually copying from user space. 1427 */ 1428 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); 1429 #else 1430 memcpy_toio(offset, entry->buf, entry->len); 1431 #endif 1432 1433 /* Ensure that the data is fully copied out before setting the flags */ 1434 wmb(); 1435 1436 ntb_tx_copy_callback(entry); 1437 } 1438 1439 static void ntb_async_tx(struct ntb_transport_qp *qp, 1440 struct ntb_queue_entry *entry) 1441 { 1442 struct ntb_payload_header __iomem *hdr; 1443 struct dma_async_tx_descriptor *txd; 1444 struct dma_chan *chan = qp->dma_chan; 1445 struct dma_device *device; 1446 size_t dest_off, buff_off; 1447 struct dmaengine_unmap_data *unmap; 1448 dma_addr_t dest; 1449 dma_cookie_t cookie; 1450 void __iomem *offset; 1451 size_t len = entry->len; 1452 void *buf = entry->buf; 1453 1454 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1455 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); 1456 entry->tx_hdr = hdr; 1457 1458 iowrite32(entry->len, &hdr->len); 1459 iowrite32((u32)qp->tx_pkts, &hdr->ver); 1460 1461 if (!chan) 1462 goto err; 1463 1464 if (len < copy_bytes) 1465 goto err; 1466 1467 device = chan->device; 1468 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; 1469 buff_off = (size_t)buf & ~PAGE_MASK; 1470 dest_off = (size_t)dest & ~PAGE_MASK; 1471 1472 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1473 goto err; 1474 1475 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); 1476 if (!unmap) 1477 goto err; 1478 1479 unmap->len = len; 1480 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), 1481 buff_off, len, DMA_TO_DEVICE); 1482 if (dma_mapping_error(device->dev, unmap->addr[0])) 1483 goto err_get_unmap; 1484 1485 unmap->to_cnt = 1; 1486 1487 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1488 DMA_PREP_INTERRUPT); 1489 if (!txd) 1490 goto err_get_unmap; 1491 1492 txd->callback = ntb_tx_copy_callback; 1493 txd->callback_param = entry; 1494 dma_set_unmap(txd, unmap); 1495 1496 cookie = dmaengine_submit(txd); 1497 if (dma_submit_error(cookie)) 1498 goto err_set_unmap; 1499 1500 dmaengine_unmap_put(unmap); 1501 1502 dma_async_issue_pending(chan); 1503 qp->tx_async++; 1504 1505 return; 1506 err_set_unmap: 1507 dmaengine_unmap_put(unmap); 1508 err_get_unmap: 1509 dmaengine_unmap_put(unmap); 1510 err: 1511 ntb_memcpy_tx(entry, offset); 1512 qp->tx_memcpy++; 1513 } 1514 1515 static int ntb_process_tx(struct ntb_transport_qp *qp, 1516 struct ntb_queue_entry *entry) 1517 { 1518 if (qp->tx_index == qp->remote_rx_info->entry) { 1519 qp->tx_ring_full++; 1520 return -EAGAIN; 1521 } 1522 1523 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 1524 if (qp->tx_handler) 1525 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); 1526 1527 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1528 &qp->tx_free_q); 1529 return 0; 1530 } 1531 1532 ntb_async_tx(qp, entry); 1533 1534 qp->tx_index++; 1535 qp->tx_index %= qp->tx_max_entry; 1536 1537 qp->tx_pkts++; 1538 1539 return 0; 1540 } 1541 1542 static void ntb_send_link_down(struct ntb_transport_qp *qp) 1543 { 1544 struct pci_dev *pdev = qp->ndev->pdev; 1545 struct ntb_queue_entry *entry; 1546 int i, rc; 1547 1548 if (!qp->link_is_up) 1549 return; 1550 1551 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); 1552 1553 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1554 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1555 if (entry) 1556 break; 1557 msleep(100); 1558 } 1559 1560 if (!entry) 1561 return; 1562 1563 entry->cb_data = NULL; 1564 entry->buf = NULL; 1565 entry->len = 0; 1566 entry->flags = LINK_DOWN_FLAG; 1567 1568 rc = ntb_process_tx(qp, entry); 1569 if (rc) 1570 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", 1571 qp->qp_num); 1572 1573 ntb_qp_link_down_reset(qp); 1574 } 1575 1576 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) 1577 { 1578 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; 1579 } 1580 1581 /** 1582 * ntb_transport_create_queue - Create a new NTB transport layer queue 1583 * @rx_handler: receive callback function 1584 * @tx_handler: transmit callback function 1585 * @event_handler: event callback function 1586 * 1587 * Create a new NTB transport layer queue and provide the queue with a callback 1588 * routine for both transmit and receive. The receive callback routine will be 1589 * used to pass up data when the transport has received it on the queue. The 1590 * transmit callback routine will be called when the transport has completed the 1591 * transmission of the data on the queue and the data is ready to be freed. 1592 * 1593 * RETURNS: pointer to newly created ntb_queue, NULL on error. 1594 */ 1595 struct ntb_transport_qp * 1596 ntb_transport_create_queue(void *data, struct device *client_dev, 1597 const struct ntb_queue_handlers *handlers) 1598 { 1599 struct ntb_dev *ndev; 1600 struct pci_dev *pdev; 1601 struct ntb_transport_ctx *nt; 1602 struct ntb_queue_entry *entry; 1603 struct ntb_transport_qp *qp; 1604 u64 qp_bit; 1605 unsigned int free_queue; 1606 dma_cap_mask_t dma_mask; 1607 int node; 1608 int i; 1609 1610 ndev = dev_ntb(client_dev->parent); 1611 pdev = ndev->pdev; 1612 nt = ndev->ctx; 1613 1614 node = dev_to_node(&ndev->dev); 1615 1616 free_queue = ffs(nt->qp_bitmap); 1617 if (!free_queue) 1618 goto err; 1619 1620 /* decrement free_queue to make it zero based */ 1621 free_queue--; 1622 1623 qp = &nt->qp_vec[free_queue]; 1624 qp_bit = BIT_ULL(qp->qp_num); 1625 1626 nt->qp_bitmap_free &= ~qp_bit; 1627 1628 qp->cb_data = data; 1629 qp->rx_handler = handlers->rx_handler; 1630 qp->tx_handler = handlers->tx_handler; 1631 qp->event_handler = handlers->event_handler; 1632 1633 dma_cap_zero(dma_mask); 1634 dma_cap_set(DMA_MEMCPY, dma_mask); 1635 1636 if (use_dma) { 1637 qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn, 1638 (void *)(unsigned long)node); 1639 if (!qp->dma_chan) 1640 dev_info(&pdev->dev, "Unable to allocate DMA channel\n"); 1641 } else { 1642 qp->dma_chan = NULL; 1643 } 1644 dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU"); 1645 1646 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1647 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1648 if (!entry) 1649 goto err1; 1650 1651 entry->qp = qp; 1652 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, 1653 &qp->rx_free_q); 1654 } 1655 1656 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 1657 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); 1658 if (!entry) 1659 goto err2; 1660 1661 entry->qp = qp; 1662 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1663 &qp->tx_free_q); 1664 } 1665 1666 ntb_db_clear(qp->ndev, qp_bit); 1667 ntb_db_clear_mask(qp->ndev, qp_bit); 1668 1669 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); 1670 1671 return qp; 1672 1673 err2: 1674 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1675 kfree(entry); 1676 err1: 1677 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1678 kfree(entry); 1679 if (qp->dma_chan) 1680 dma_release_channel(qp->dma_chan); 1681 nt->qp_bitmap_free |= qp_bit; 1682 err: 1683 return NULL; 1684 } 1685 EXPORT_SYMBOL_GPL(ntb_transport_create_queue); 1686 1687 /** 1688 * ntb_transport_free_queue - Frees NTB transport queue 1689 * @qp: NTB queue to be freed 1690 * 1691 * Frees NTB transport queue 1692 */ 1693 void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1694 { 1695 struct pci_dev *pdev; 1696 struct ntb_queue_entry *entry; 1697 u64 qp_bit; 1698 1699 if (!qp) 1700 return; 1701 1702 pdev = qp->ndev->pdev; 1703 1704 if (qp->dma_chan) { 1705 struct dma_chan *chan = qp->dma_chan; 1706 /* Putting the dma_chan to NULL will force any new traffic to be 1707 * processed by the CPU instead of the DAM engine 1708 */ 1709 qp->dma_chan = NULL; 1710 1711 /* Try to be nice and wait for any queued DMA engine 1712 * transactions to process before smashing it with a rock 1713 */ 1714 dma_sync_wait(chan, qp->last_cookie); 1715 dmaengine_terminate_all(chan); 1716 dma_release_channel(chan); 1717 } 1718 1719 qp_bit = BIT_ULL(qp->qp_num); 1720 1721 ntb_db_set_mask(qp->ndev, qp_bit); 1722 tasklet_disable(&qp->rxc_db_work); 1723 1724 cancel_delayed_work_sync(&qp->link_work); 1725 1726 qp->cb_data = NULL; 1727 qp->rx_handler = NULL; 1728 qp->tx_handler = NULL; 1729 qp->event_handler = NULL; 1730 1731 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1732 kfree(entry); 1733 1734 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { 1735 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); 1736 kfree(entry); 1737 } 1738 1739 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { 1740 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); 1741 kfree(entry); 1742 } 1743 1744 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1745 kfree(entry); 1746 1747 qp->transport->qp_bitmap_free |= qp_bit; 1748 1749 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1750 } 1751 EXPORT_SYMBOL_GPL(ntb_transport_free_queue); 1752 1753 /** 1754 * ntb_transport_rx_remove - Dequeues enqueued rx packet 1755 * @qp: NTB queue to be freed 1756 * @len: pointer to variable to write enqueued buffers length 1757 * 1758 * Dequeues unused buffers from receive queue. Should only be used during 1759 * shutdown of qp. 1760 * 1761 * RETURNS: NULL error value on error, or void* for success. 1762 */ 1763 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) 1764 { 1765 struct ntb_queue_entry *entry; 1766 void *buf; 1767 1768 if (!qp || qp->client_ready) 1769 return NULL; 1770 1771 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); 1772 if (!entry) 1773 return NULL; 1774 1775 buf = entry->cb_data; 1776 *len = entry->len; 1777 1778 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); 1779 1780 return buf; 1781 } 1782 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove); 1783 1784 /** 1785 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry 1786 * @qp: NTB transport layer queue the entry is to be enqueued on 1787 * @cb: per buffer pointer for callback function to use 1788 * @data: pointer to data buffer that incoming packets will be copied into 1789 * @len: length of the data buffer 1790 * 1791 * Enqueue a new receive buffer onto the transport queue into which a NTB 1792 * payload can be received into. 1793 * 1794 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1795 */ 1796 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1797 unsigned int len) 1798 { 1799 struct ntb_queue_entry *entry; 1800 1801 if (!qp) 1802 return -EINVAL; 1803 1804 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); 1805 if (!entry) 1806 return -ENOMEM; 1807 1808 entry->cb_data = cb; 1809 entry->buf = data; 1810 entry->len = len; 1811 entry->flags = 0; 1812 1813 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); 1814 1815 tasklet_schedule(&qp->rxc_db_work); 1816 1817 return 0; 1818 } 1819 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); 1820 1821 /** 1822 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 1823 * @qp: NTB transport layer queue the entry is to be enqueued on 1824 * @cb: per buffer pointer for callback function to use 1825 * @data: pointer to data buffer that will be sent 1826 * @len: length of the data buffer 1827 * 1828 * Enqueue a new transmit buffer onto the transport queue from which a NTB 1829 * payload will be transmitted. This assumes that a lock is being held to 1830 * serialize access to the qp. 1831 * 1832 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1833 */ 1834 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 1835 unsigned int len) 1836 { 1837 struct ntb_queue_entry *entry; 1838 int rc; 1839 1840 if (!qp || !qp->link_is_up || !len) 1841 return -EINVAL; 1842 1843 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1844 if (!entry) { 1845 qp->tx_err_no_buf++; 1846 return -ENOMEM; 1847 } 1848 1849 entry->cb_data = cb; 1850 entry->buf = data; 1851 entry->len = len; 1852 entry->flags = 0; 1853 1854 rc = ntb_process_tx(qp, entry); 1855 if (rc) 1856 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, 1857 &qp->tx_free_q); 1858 1859 return rc; 1860 } 1861 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue); 1862 1863 /** 1864 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 1865 * @qp: NTB transport layer queue to be enabled 1866 * 1867 * Notify NTB transport layer of client readiness to use queue 1868 */ 1869 void ntb_transport_link_up(struct ntb_transport_qp *qp) 1870 { 1871 if (!qp) 1872 return; 1873 1874 qp->client_ready = true; 1875 1876 if (qp->transport->link_is_up) 1877 schedule_delayed_work(&qp->link_work, 0); 1878 } 1879 EXPORT_SYMBOL_GPL(ntb_transport_link_up); 1880 1881 /** 1882 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1883 * @qp: NTB transport layer queue to be disabled 1884 * 1885 * Notify NTB transport layer of client's desire to no longer receive data on 1886 * transport queue specified. It is the client's responsibility to ensure all 1887 * entries on queue are purged or otherwise handled appropriately. 1888 */ 1889 void ntb_transport_link_down(struct ntb_transport_qp *qp) 1890 { 1891 struct pci_dev *pdev; 1892 int val; 1893 1894 if (!qp) 1895 return; 1896 1897 pdev = qp->ndev->pdev; 1898 qp->client_ready = false; 1899 1900 val = ntb_spad_read(qp->ndev, QP_LINKS); 1901 1902 ntb_peer_spad_write(qp->ndev, QP_LINKS, 1903 val & ~BIT(qp->qp_num)); 1904 1905 if (qp->link_is_up) 1906 ntb_send_link_down(qp); 1907 else 1908 cancel_delayed_work_sync(&qp->link_work); 1909 } 1910 EXPORT_SYMBOL_GPL(ntb_transport_link_down); 1911 1912 /** 1913 * ntb_transport_link_query - Query transport link state 1914 * @qp: NTB transport layer queue to be queried 1915 * 1916 * Query connectivity to the remote system of the NTB transport queue 1917 * 1918 * RETURNS: true for link up or false for link down 1919 */ 1920 bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1921 { 1922 if (!qp) 1923 return false; 1924 1925 return qp->link_is_up; 1926 } 1927 EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1928 1929 /** 1930 * ntb_transport_qp_num - Query the qp number 1931 * @qp: NTB transport layer queue to be queried 1932 * 1933 * Query qp number of the NTB transport queue 1934 * 1935 * RETURNS: a zero based number specifying the qp number 1936 */ 1937 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1938 { 1939 if (!qp) 1940 return 0; 1941 1942 return qp->qp_num; 1943 } 1944 EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1945 1946 /** 1947 * ntb_transport_max_size - Query the max payload size of a qp 1948 * @qp: NTB transport layer queue to be queried 1949 * 1950 * Query the maximum payload size permissible on the given qp 1951 * 1952 * RETURNS: the max payload size of a qp 1953 */ 1954 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1955 { 1956 unsigned int max; 1957 1958 if (!qp) 1959 return 0; 1960 1961 if (!qp->dma_chan) 1962 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1963 1964 /* If DMA engine usage is possible, try to find the max size for that */ 1965 max = qp->tx_max_frame - sizeof(struct ntb_payload_header); 1966 max -= max % (1 << qp->dma_chan->device->copy_align); 1967 1968 return max; 1969 } 1970 EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1971 1972 static void ntb_transport_doorbell_callback(void *data, int vector) 1973 { 1974 struct ntb_transport_ctx *nt = data; 1975 struct ntb_transport_qp *qp; 1976 u64 db_bits; 1977 unsigned int qp_num; 1978 1979 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & 1980 ntb_db_vector_mask(nt->ndev, vector)); 1981 1982 while (db_bits) { 1983 qp_num = __ffs(db_bits); 1984 qp = &nt->qp_vec[qp_num]; 1985 1986 tasklet_schedule(&qp->rxc_db_work); 1987 1988 db_bits &= ~BIT_ULL(qp_num); 1989 } 1990 } 1991 1992 static const struct ntb_ctx_ops ntb_transport_ops = { 1993 .link_event = ntb_transport_event_callback, 1994 .db_event = ntb_transport_doorbell_callback, 1995 }; 1996 1997 static struct ntb_client ntb_transport_client = { 1998 .ops = { 1999 .probe = ntb_transport_probe, 2000 .remove = ntb_transport_free, 2001 }, 2002 }; 2003 2004 static int __init ntb_transport_init(void) 2005 { 2006 int rc; 2007 2008 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); 2009 2010 if (debugfs_initialized()) 2011 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 2012 2013 rc = bus_register(&ntb_transport_bus); 2014 if (rc) 2015 goto err_bus; 2016 2017 rc = ntb_register_client(&ntb_transport_client); 2018 if (rc) 2019 goto err_client; 2020 2021 return 0; 2022 2023 err_client: 2024 bus_unregister(&ntb_transport_bus); 2025 err_bus: 2026 debugfs_remove_recursive(nt_debugfs_dir); 2027 return rc; 2028 } 2029 module_init(ntb_transport_init); 2030 2031 static void __exit ntb_transport_exit(void) 2032 { 2033 debugfs_remove_recursive(nt_debugfs_dir); 2034 2035 ntb_unregister_client(&ntb_transport_client); 2036 bus_unregister(&ntb_transport_bus); 2037 } 2038 module_exit(ntb_transport_exit); 2039