1 /*- 2 * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org> 3 * Copyright (C) 2013 Intel Corporation 4 * Copyright (C) 2015 EMC Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * The Non-Transparent Bridge (NTB) is a device that allows you to connect 31 * two or more systems using a PCI-e links, providing remote memory access. 32 * 33 * This module contains a transport for sending and receiving messages by 34 * writing to remote memory window(s) provided by underlying NTB device. 35 * 36 * NOTE: Much of the code in this module is shared with Linux. Any patches may 37 * be picked up and redistributed in Linux with a dual GPL/BSD license. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/bus.h> 47 #include <sys/ktr.h> 48 #include <sys/limits.h> 49 #include <sys/lock.h> 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/module.h> 53 #include <sys/mutex.h> 54 #include <sys/queue.h> 55 #include <sys/sysctl.h> 56 #include <sys/taskqueue.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 61 #include <machine/bus.h> 62 63 #include "ntb.h" 64 #include "ntb_transport.h" 65 66 #define KTR_NTB KTR_SPARE3 67 68 #define NTB_TRANSPORT_VERSION 4 69 70 static SYSCTL_NODE(_hw, OID_AUTO, ntb_transport, CTLFLAG_RW, 0, "ntb_transport"); 71 72 static unsigned g_ntb_transport_debug_level; 73 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN, 74 &g_ntb_transport_debug_level, 0, 75 "ntb_transport log level -- higher is more verbose"); 76 #define ntb_printf(lvl, ...) do { \ 77 if ((lvl) <= g_ntb_transport_debug_level) { \ 78 printf(__VA_ARGS__); \ 79 } \ 80 } while (0) 81 82 static unsigned transport_mtu = 0x10000; 83 84 static uint64_t max_mw_size; 85 SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0, 86 "If enabled (non-zero), limit the size of large memory windows. " 87 "Both sides of the NTB MUST set the same value here."); 88 89 static unsigned enable_xeon_watchdog; 90 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN, 91 &enable_xeon_watchdog, 0, "If non-zero, write a register every second to " 92 "keep a watchdog from tearing down the NTB link"); 93 94 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); 95 96 typedef uint32_t ntb_q_idx_t; 97 98 struct ntb_queue_entry { 99 /* ntb_queue list reference */ 100 STAILQ_ENTRY(ntb_queue_entry) entry; 101 102 /* info on data to be transferred */ 103 void *cb_data; 104 void *buf; 105 uint32_t len; 106 uint32_t flags; 107 108 struct ntb_transport_qp *qp; 109 struct ntb_payload_header *x_hdr; 110 ntb_q_idx_t index; 111 }; 112 113 struct ntb_rx_info { 114 ntb_q_idx_t entry; 115 }; 116 117 struct ntb_transport_qp { 118 struct ntb_transport_ctx *transport; 119 device_t dev; 120 121 void *cb_data; 122 123 bool client_ready; 124 volatile bool link_is_up; 125 uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ 126 127 struct ntb_rx_info *rx_info; 128 struct ntb_rx_info *remote_rx_info; 129 130 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 131 void *data, int len); 132 struct ntb_queue_list tx_free_q; 133 struct mtx ntb_tx_free_q_lock; 134 caddr_t tx_mw; 135 bus_addr_t tx_mw_phys; 136 ntb_q_idx_t tx_index; 137 ntb_q_idx_t tx_max_entry; 138 uint64_t tx_max_frame; 139 140 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 141 void *data, int len); 142 struct ntb_queue_list rx_post_q; 143 struct ntb_queue_list rx_pend_q; 144 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 145 struct mtx ntb_rx_q_lock; 146 struct task rxc_db_work; 147 struct taskqueue *rxc_tq; 148 caddr_t rx_buff; 149 ntb_q_idx_t rx_index; 150 ntb_q_idx_t rx_max_entry; 151 uint64_t rx_max_frame; 152 153 void (*event_handler)(void *data, enum ntb_link_event status); 154 struct callout link_work; 155 struct callout rx_full; 156 157 uint64_t last_rx_no_buf; 158 159 /* Stats */ 160 uint64_t rx_bytes; 161 uint64_t rx_pkts; 162 uint64_t rx_ring_empty; 163 uint64_t rx_err_no_buf; 164 uint64_t rx_err_oflow; 165 uint64_t rx_err_ver; 166 uint64_t tx_bytes; 167 uint64_t tx_pkts; 168 uint64_t tx_ring_full; 169 uint64_t tx_err_no_buf; 170 171 struct mtx tx_lock; 172 }; 173 174 struct ntb_transport_mw { 175 vm_paddr_t phys_addr; 176 size_t phys_size; 177 size_t xlat_align; 178 size_t xlat_align_size; 179 bus_addr_t addr_limit; 180 /* Tx buff is off vbase / phys_addr */ 181 caddr_t vbase; 182 size_t xlat_size; 183 size_t buff_size; 184 /* Rx buff is off virt_addr / dma_addr */ 185 caddr_t virt_addr; 186 bus_addr_t dma_addr; 187 }; 188 189 struct ntb_transport_child { 190 device_t dev; 191 int qpoff; 192 int qpcnt; 193 struct ntb_transport_child *next; 194 }; 195 196 struct ntb_transport_ctx { 197 device_t dev; 198 struct ntb_transport_child *child; 199 struct ntb_transport_mw *mw_vec; 200 struct ntb_transport_qp *qp_vec; 201 unsigned mw_count; 202 unsigned qp_count; 203 uint64_t qp_bitmap; 204 volatile bool link_is_up; 205 struct callout link_work; 206 struct callout link_watchdog; 207 struct task link_cleanup; 208 }; 209 210 enum { 211 NTBT_DESC_DONE_FLAG = 1 << 0, 212 NTBT_LINK_DOWN_FLAG = 1 << 1, 213 }; 214 215 struct ntb_payload_header { 216 ntb_q_idx_t ver; 217 uint32_t len; 218 uint32_t flags; 219 }; 220 221 enum { 222 /* 223 * The order of this enum is part of the remote protocol. Do not 224 * reorder without bumping protocol version (and it's probably best 225 * to keep the protocol in lock-step with the Linux NTB driver. 226 */ 227 NTBT_VERSION = 0, 228 NTBT_QP_LINKS, 229 NTBT_NUM_QPS, 230 NTBT_NUM_MWS, 231 /* 232 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. 233 */ 234 NTBT_MW0_SZ_HIGH, 235 NTBT_MW0_SZ_LOW, 236 NTBT_MW1_SZ_HIGH, 237 NTBT_MW1_SZ_LOW, 238 239 /* 240 * Some NTB-using hardware have a watchdog to work around NTB hangs; if 241 * a register or doorbell isn't written every few seconds, the link is 242 * torn down. Write an otherwise unused register every few seconds to 243 * work around this watchdog. 244 */ 245 NTBT_WATCHDOG_SPAD = 15 246 }; 247 248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 249 #define NTB_QP_DEF_NUM_ENTRIES 100 250 #define NTB_LINK_DOWN_TIMEOUT 10 251 252 static int ntb_transport_probe(device_t dev); 253 static int ntb_transport_attach(device_t dev); 254 static int ntb_transport_detach(device_t dev); 255 static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, 256 unsigned int qp_num); 257 static int ntb_process_tx(struct ntb_transport_qp *qp, 258 struct ntb_queue_entry *entry); 259 static void ntb_transport_rxc_db(void *arg, int pending); 260 static int ntb_process_rxc(struct ntb_transport_qp *qp); 261 static void ntb_memcpy_rx(struct ntb_transport_qp *qp, 262 struct ntb_queue_entry *entry, void *offset); 263 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, 264 void *data); 265 static void ntb_complete_rxc(struct ntb_transport_qp *qp); 266 static void ntb_transport_doorbell_callback(void *data, uint32_t vector); 267 static void ntb_transport_event_callback(void *data); 268 static void ntb_transport_link_work(void *arg); 269 static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size); 270 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw); 271 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 272 unsigned int qp_num); 273 static void ntb_qp_link_work(void *arg); 274 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt); 275 static void ntb_transport_link_cleanup_work(void *, int); 276 static void ntb_qp_link_down(struct ntb_transport_qp *qp); 277 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp); 278 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); 279 static void ntb_send_link_down(struct ntb_transport_qp *qp); 280 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 281 struct ntb_queue_list *list); 282 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, 283 struct ntb_queue_list *list); 284 static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock, 285 struct ntb_queue_list *from, struct ntb_queue_list *to); 286 static void xeon_link_watchdog_hb(void *); 287 288 static const struct ntb_ctx_ops ntb_transport_ops = { 289 .link_event = ntb_transport_event_callback, 290 .db_event = ntb_transport_doorbell_callback, 291 }; 292 293 MALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver"); 294 295 static inline void 296 iowrite32(uint32_t val, void *addr) 297 { 298 299 bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr, 300 val); 301 } 302 303 /* Transport Init and teardown */ 304 305 static void 306 xeon_link_watchdog_hb(void *arg) 307 { 308 struct ntb_transport_ctx *nt; 309 310 nt = arg; 311 ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0); 312 callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt); 313 } 314 315 static int 316 ntb_transport_probe(device_t dev) 317 { 318 319 device_set_desc(dev, "NTB Transport"); 320 return (0); 321 } 322 323 static int 324 ntb_transport_attach(device_t dev) 325 { 326 struct ntb_transport_ctx *nt = device_get_softc(dev); 327 struct ntb_transport_child **cpp = &nt->child; 328 struct ntb_transport_child *nc; 329 struct ntb_transport_mw *mw; 330 uint64_t db_bitmap; 331 int rc, i, db_count, spad_count, qp, qpu, qpo, qpt; 332 char cfg[128] = ""; 333 char buf[32]; 334 char *n, *np, *c, *name; 335 336 nt->dev = dev; 337 nt->mw_count = ntb_mw_count(dev); 338 spad_count = ntb_spad_count(dev); 339 db_bitmap = ntb_db_valid_mask(dev); 340 db_count = flsll(db_bitmap); 341 KASSERT(db_bitmap == (1 << db_count) - 1, 342 ("Doorbells are not sequential (%jx).\n", db_bitmap)); 343 344 device_printf(dev, "%d memory windows, %d scratchpads, " 345 "%d doorbells\n", nt->mw_count, spad_count, db_count); 346 347 if (nt->mw_count == 0) { 348 device_printf(dev, "At least 1 memory window required.\n"); 349 return (ENXIO); 350 } 351 if (spad_count < 6) { 352 device_printf(dev, "At least 6 scratchpads required.\n"); 353 return (ENXIO); 354 } 355 if (spad_count < 4 + 2 * nt->mw_count) { 356 nt->mw_count = (spad_count - 4) / 2; 357 device_printf(dev, "Scratchpads enough only for %d " 358 "memory windows.\n", nt->mw_count); 359 } 360 if (db_bitmap == 0) { 361 device_printf(dev, "At least one doorbell required.\n"); 362 return (ENXIO); 363 } 364 365 nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T, 366 M_WAITOK | M_ZERO); 367 for (i = 0; i < nt->mw_count; i++) { 368 mw = &nt->mw_vec[i]; 369 370 rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase, 371 &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size, 372 &mw->addr_limit); 373 if (rc != 0) 374 goto err; 375 376 mw->buff_size = 0; 377 mw->xlat_size = 0; 378 mw->virt_addr = NULL; 379 mw->dma_addr = 0; 380 381 rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING); 382 if (rc) 383 ntb_printf(0, "Unable to set mw%d caching\n", i); 384 } 385 386 qpu = 0; 387 qpo = imin(db_count, nt->mw_count); 388 qpt = db_count; 389 390 snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev), 391 device_get_unit(dev)); 392 TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg)); 393 n = cfg; 394 i = 0; 395 while ((c = strsep(&n, ",")) != NULL) { 396 np = c; 397 name = strsep(&np, ":"); 398 if (name != NULL && name[0] == 0) 399 name = NULL; 400 qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu; 401 if (qp <= 0) 402 qp = 1; 403 404 if (qp > qpt - qpu) { 405 device_printf(dev, "Not enough resources for config\n"); 406 break; 407 } 408 409 nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO); 410 nc->qpoff = qpu; 411 nc->qpcnt = qp; 412 nc->dev = device_add_child(dev, name, -1); 413 if (nc->dev == NULL) { 414 device_printf(dev, "Can not add child.\n"); 415 break; 416 } 417 device_set_ivars(nc->dev, nc); 418 *cpp = nc; 419 cpp = &nc->next; 420 421 if (bootverbose) { 422 device_printf(dev, "%d \"%s\": queues %d", 423 i, name, qpu); 424 if (qp > 1) 425 printf("-%d", qpu + qp - 1); 426 printf("\n"); 427 } 428 429 qpu += qp; 430 i++; 431 } 432 nt->qp_count = qpu; 433 434 nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T, 435 M_WAITOK | M_ZERO); 436 437 for (i = 0; i < nt->qp_count; i++) 438 ntb_transport_init_queue(nt, i); 439 440 callout_init(&nt->link_work, 0); 441 callout_init(&nt->link_watchdog, 0); 442 TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt); 443 444 rc = ntb_set_ctx(dev, nt, &ntb_transport_ops); 445 if (rc != 0) 446 goto err; 447 448 nt->link_is_up = false; 449 ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 450 451 if (enable_xeon_watchdog != 0) 452 callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt); 453 454 bus_generic_attach(dev); 455 return (0); 456 457 err: 458 free(nt->qp_vec, M_NTB_T); 459 free(nt->mw_vec, M_NTB_T); 460 return (rc); 461 } 462 463 static int 464 ntb_transport_detach(device_t dev) 465 { 466 struct ntb_transport_ctx *nt = device_get_softc(dev); 467 struct ntb_transport_child **cpp = &nt->child; 468 struct ntb_transport_child *nc; 469 int error = 0, i; 470 471 while ((nc = *cpp) != NULL) { 472 *cpp = (*cpp)->next; 473 error = device_delete_child(dev, nc->dev); 474 if (error) 475 break; 476 free(nc, M_DEVBUF); 477 } 478 KASSERT(nt->qp_bitmap == 0, 479 ("Some queues not freed on detach (%jx)", nt->qp_bitmap)); 480 481 ntb_transport_link_cleanup(nt); 482 taskqueue_drain(taskqueue_swi, &nt->link_cleanup); 483 callout_drain(&nt->link_work); 484 callout_drain(&nt->link_watchdog); 485 486 ntb_link_disable(dev); 487 ntb_clear_ctx(dev); 488 489 for (i = 0; i < nt->mw_count; i++) 490 ntb_free_mw(nt, i); 491 492 free(nt->qp_vec, M_NTB_T); 493 free(nt->mw_vec, M_NTB_T); 494 return (0); 495 } 496 497 int 498 ntb_transport_queue_count(device_t dev) 499 { 500 struct ntb_transport_child *nc = device_get_ivars(dev); 501 502 return (nc->qpcnt); 503 } 504 505 static void 506 ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num) 507 { 508 struct ntb_transport_mw *mw; 509 struct ntb_transport_qp *qp; 510 vm_paddr_t mw_base; 511 uint64_t mw_size, qp_offset; 512 size_t tx_size; 513 unsigned num_qps_mw, mw_num, mw_count; 514 515 mw_count = nt->mw_count; 516 mw_num = QP_TO_MW(nt, qp_num); 517 mw = &nt->mw_vec[mw_num]; 518 519 qp = &nt->qp_vec[qp_num]; 520 qp->qp_num = qp_num; 521 qp->transport = nt; 522 qp->dev = nt->dev; 523 qp->client_ready = false; 524 qp->event_handler = NULL; 525 ntb_qp_link_down_reset(qp); 526 527 if (mw_num < nt->qp_count % mw_count) 528 num_qps_mw = nt->qp_count / mw_count + 1; 529 else 530 num_qps_mw = nt->qp_count / mw_count; 531 532 mw_base = mw->phys_addr; 533 mw_size = mw->phys_size; 534 535 tx_size = mw_size / num_qps_mw; 536 qp_offset = tx_size * (qp_num / mw_count); 537 538 qp->tx_mw = mw->vbase + qp_offset; 539 KASSERT(qp->tx_mw != NULL, ("uh oh?")); 540 541 /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */ 542 qp->tx_mw_phys = mw_base + qp_offset; 543 KASSERT(qp->tx_mw_phys != 0, ("uh oh?")); 544 545 tx_size -= sizeof(struct ntb_rx_info); 546 qp->rx_info = (void *)(qp->tx_mw + tx_size); 547 548 /* Due to house-keeping, there must be at least 2 buffs */ 549 qp->tx_max_frame = qmin(transport_mtu, tx_size / 2); 550 qp->tx_max_entry = tx_size / qp->tx_max_frame; 551 552 callout_init(&qp->link_work, 0); 553 callout_init(&qp->rx_full, 1); 554 555 mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN); 556 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); 557 mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF); 558 TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp); 559 qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK, 560 taskqueue_thread_enqueue, &qp->rxc_tq); 561 taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d", 562 device_get_nameunit(nt->dev), qp_num); 563 564 STAILQ_INIT(&qp->rx_post_q); 565 STAILQ_INIT(&qp->rx_pend_q); 566 STAILQ_INIT(&qp->tx_free_q); 567 } 568 569 void 570 ntb_transport_free_queue(struct ntb_transport_qp *qp) 571 { 572 struct ntb_transport_ctx *nt = qp->transport; 573 struct ntb_queue_entry *entry; 574 575 callout_drain(&qp->link_work); 576 577 ntb_db_set_mask(qp->dev, 1ull << qp->qp_num); 578 taskqueue_drain_all(qp->rxc_tq); 579 taskqueue_free(qp->rxc_tq); 580 581 qp->cb_data = NULL; 582 qp->rx_handler = NULL; 583 qp->tx_handler = NULL; 584 qp->event_handler = NULL; 585 586 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) 587 free(entry, M_NTB_T); 588 589 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) 590 free(entry, M_NTB_T); 591 592 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 593 free(entry, M_NTB_T); 594 595 nt->qp_bitmap &= ~(1 << qp->qp_num); 596 } 597 598 /** 599 * ntb_transport_create_queue - Create a new NTB transport layer queue 600 * @rx_handler: receive callback function 601 * @tx_handler: transmit callback function 602 * @event_handler: event callback function 603 * 604 * Create a new NTB transport layer queue and provide the queue with a callback 605 * routine for both transmit and receive. The receive callback routine will be 606 * used to pass up data when the transport has received it on the queue. The 607 * transmit callback routine will be called when the transport has completed the 608 * transmission of the data on the queue and the data is ready to be freed. 609 * 610 * RETURNS: pointer to newly created ntb_queue, NULL on error. 611 */ 612 struct ntb_transport_qp * 613 ntb_transport_create_queue(device_t dev, int q, 614 const struct ntb_queue_handlers *handlers, void *data) 615 { 616 struct ntb_transport_child *nc = device_get_ivars(dev); 617 struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev)); 618 struct ntb_queue_entry *entry; 619 struct ntb_transport_qp *qp; 620 int i; 621 622 if (q < 0 || q >= nc->qpcnt) 623 return (NULL); 624 625 qp = &nt->qp_vec[nc->qpoff + q]; 626 nt->qp_bitmap |= (1 << qp->qp_num); 627 qp->cb_data = data; 628 qp->rx_handler = handlers->rx_handler; 629 qp->tx_handler = handlers->tx_handler; 630 qp->event_handler = handlers->event_handler; 631 632 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 633 entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO); 634 entry->cb_data = data; 635 entry->buf = NULL; 636 entry->len = transport_mtu; 637 entry->qp = qp; 638 ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q); 639 } 640 641 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 642 entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO); 643 entry->qp = qp; 644 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 645 } 646 647 ntb_db_clear(dev, 1ull << qp->qp_num); 648 return (qp); 649 } 650 651 /** 652 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 653 * @qp: NTB transport layer queue to be enabled 654 * 655 * Notify NTB transport layer of client readiness to use queue 656 */ 657 void 658 ntb_transport_link_up(struct ntb_transport_qp *qp) 659 { 660 struct ntb_transport_ctx *nt = qp->transport; 661 662 qp->client_ready = true; 663 664 ntb_printf(2, "qp %d client ready\n", qp->qp_num); 665 666 if (nt->link_is_up) 667 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 668 } 669 670 671 672 /* Transport Tx */ 673 674 /** 675 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 676 * @qp: NTB transport layer queue the entry is to be enqueued on 677 * @cb: per buffer pointer for callback function to use 678 * @data: pointer to data buffer that will be sent 679 * @len: length of the data buffer 680 * 681 * Enqueue a new transmit buffer onto the transport queue from which a NTB 682 * payload will be transmitted. This assumes that a lock is being held to 683 * serialize access to the qp. 684 * 685 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 686 */ 687 int 688 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 689 unsigned int len) 690 { 691 struct ntb_queue_entry *entry; 692 int rc; 693 694 if (!qp->link_is_up || len == 0) { 695 CTR0(KTR_NTB, "TX: link not up"); 696 return (EINVAL); 697 } 698 699 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 700 if (entry == NULL) { 701 CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); 702 qp->tx_err_no_buf++; 703 return (EBUSY); 704 } 705 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); 706 707 entry->cb_data = cb; 708 entry->buf = data; 709 entry->len = len; 710 entry->flags = 0; 711 712 mtx_lock(&qp->tx_lock); 713 rc = ntb_process_tx(qp, entry); 714 mtx_unlock(&qp->tx_lock); 715 if (rc != 0) { 716 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 717 CTR1(KTR_NTB, 718 "TX: process_tx failed. Returning entry %p to tx_free_q", 719 entry); 720 } 721 return (rc); 722 } 723 724 static void 725 ntb_tx_copy_callback(void *data) 726 { 727 struct ntb_queue_entry *entry = data; 728 struct ntb_transport_qp *qp = entry->qp; 729 struct ntb_payload_header *hdr = entry->x_hdr; 730 731 iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags); 732 CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr); 733 734 ntb_peer_db_set(qp->dev, 1ull << qp->qp_num); 735 736 /* 737 * The entry length can only be zero if the packet is intended to be a 738 * "link down" or similar. Since no payload is being sent in these 739 * cases, there is nothing to add to the completion queue. 740 */ 741 if (entry->len > 0) { 742 qp->tx_bytes += entry->len; 743 744 if (qp->tx_handler) 745 qp->tx_handler(qp, qp->cb_data, entry->buf, 746 entry->len); 747 else 748 m_freem(entry->buf); 749 entry->buf = NULL; 750 } 751 752 CTR3(KTR_NTB, 753 "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning " 754 "to tx_free_q", entry, hdr->ver, hdr->flags); 755 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 756 } 757 758 static void 759 ntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset) 760 { 761 762 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); 763 if (entry->buf != NULL) { 764 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); 765 766 /* 767 * Ensure that the data is fully copied before setting the 768 * flags 769 */ 770 wmb(); 771 } 772 773 ntb_tx_copy_callback(entry); 774 } 775 776 static void 777 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) 778 { 779 struct ntb_payload_header *hdr; 780 void *offset; 781 782 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 783 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - 784 sizeof(struct ntb_payload_header)); 785 entry->x_hdr = hdr; 786 787 iowrite32(entry->len, &hdr->len); 788 iowrite32(qp->tx_pkts, &hdr->ver); 789 790 ntb_memcpy_tx(entry, offset); 791 } 792 793 static int 794 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) 795 { 796 797 CTR3(KTR_NTB, 798 "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u", 799 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); 800 if (qp->tx_index == qp->remote_rx_info->entry) { 801 CTR0(KTR_NTB, "TX: ring full"); 802 qp->tx_ring_full++; 803 return (EAGAIN); 804 } 805 806 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 807 if (qp->tx_handler != NULL) 808 qp->tx_handler(qp, qp->cb_data, entry->buf, 809 EIO); 810 else 811 m_freem(entry->buf); 812 813 entry->buf = NULL; 814 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 815 CTR1(KTR_NTB, 816 "TX: frame too big. returning entry %p to tx_free_q", 817 entry); 818 return (0); 819 } 820 CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index); 821 ntb_async_tx(qp, entry); 822 823 qp->tx_index++; 824 qp->tx_index %= qp->tx_max_entry; 825 826 qp->tx_pkts++; 827 828 return (0); 829 } 830 831 /* Transport Rx */ 832 static void 833 ntb_transport_rxc_db(void *arg, int pending __unused) 834 { 835 struct ntb_transport_qp *qp = arg; 836 int rc; 837 838 CTR0(KTR_NTB, "RX: transport_rx"); 839 again: 840 while ((rc = ntb_process_rxc(qp)) == 0) 841 ; 842 CTR1(KTR_NTB, "RX: process_rxc returned %d", rc); 843 844 if ((ntb_db_read(qp->dev) & (1ull << qp->qp_num)) != 0) { 845 /* If db is set, clear it and check queue once more. */ 846 ntb_db_clear(qp->dev, 1ull << qp->qp_num); 847 goto again; 848 } 849 } 850 851 static int 852 ntb_process_rxc(struct ntb_transport_qp *qp) 853 { 854 struct ntb_payload_header *hdr; 855 struct ntb_queue_entry *entry; 856 caddr_t offset; 857 858 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 859 hdr = (void *)(offset + qp->rx_max_frame - 860 sizeof(struct ntb_payload_header)); 861 862 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); 863 if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) { 864 CTR0(KTR_NTB, "RX: hdr not done"); 865 qp->rx_ring_empty++; 866 return (EAGAIN); 867 } 868 869 if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) { 870 CTR0(KTR_NTB, "RX: link down"); 871 ntb_qp_link_down(qp); 872 hdr->flags = 0; 873 return (EAGAIN); 874 } 875 876 if (hdr->ver != (uint32_t)qp->rx_pkts) { 877 CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " 878 "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts); 879 qp->rx_err_ver++; 880 return (EIO); 881 } 882 883 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 884 if (entry == NULL) { 885 qp->rx_err_no_buf++; 886 CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); 887 return (EAGAIN); 888 } 889 callout_stop(&qp->rx_full); 890 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); 891 892 entry->x_hdr = hdr; 893 entry->index = qp->rx_index; 894 895 if (hdr->len > entry->len) { 896 CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju", 897 (uintmax_t)hdr->len, (uintmax_t)entry->len); 898 qp->rx_err_oflow++; 899 900 entry->len = -EIO; 901 entry->flags |= NTBT_DESC_DONE_FLAG; 902 903 ntb_complete_rxc(qp); 904 } else { 905 qp->rx_bytes += hdr->len; 906 qp->rx_pkts++; 907 908 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); 909 910 entry->len = hdr->len; 911 912 ntb_memcpy_rx(qp, entry, offset); 913 } 914 915 qp->rx_index++; 916 qp->rx_index %= qp->rx_max_entry; 917 return (0); 918 } 919 920 static void 921 ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 922 void *offset) 923 { 924 struct ifnet *ifp = entry->cb_data; 925 unsigned int len = entry->len; 926 927 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); 928 929 entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL); 930 if (entry->buf == NULL) 931 entry->len = -ENOMEM; 932 933 /* Ensure that the data is globally visible before clearing the flag */ 934 wmb(); 935 936 CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf); 937 ntb_rx_copy_callback(qp, entry); 938 } 939 940 static inline void 941 ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) 942 { 943 struct ntb_queue_entry *entry; 944 945 entry = data; 946 entry->flags |= NTBT_DESC_DONE_FLAG; 947 ntb_complete_rxc(qp); 948 } 949 950 static void 951 ntb_complete_rxc(struct ntb_transport_qp *qp) 952 { 953 struct ntb_queue_entry *entry; 954 struct mbuf *m; 955 unsigned len; 956 957 CTR0(KTR_NTB, "RX: rx_completion_task"); 958 959 mtx_lock_spin(&qp->ntb_rx_q_lock); 960 961 while (!STAILQ_EMPTY(&qp->rx_post_q)) { 962 entry = STAILQ_FIRST(&qp->rx_post_q); 963 if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0) 964 break; 965 966 entry->x_hdr->flags = 0; 967 iowrite32(entry->index, &qp->rx_info->entry); 968 969 STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); 970 971 len = entry->len; 972 m = entry->buf; 973 974 /* 975 * Re-initialize queue_entry for reuse; rx_handler takes 976 * ownership of the mbuf. 977 */ 978 entry->buf = NULL; 979 entry->len = transport_mtu; 980 entry->cb_data = qp->cb_data; 981 982 STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); 983 984 mtx_unlock_spin(&qp->ntb_rx_q_lock); 985 986 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); 987 if (qp->rx_handler != NULL && qp->client_ready) 988 qp->rx_handler(qp, qp->cb_data, m, len); 989 else 990 m_freem(m); 991 992 mtx_lock_spin(&qp->ntb_rx_q_lock); 993 } 994 995 mtx_unlock_spin(&qp->ntb_rx_q_lock); 996 } 997 998 static void 999 ntb_transport_doorbell_callback(void *data, uint32_t vector) 1000 { 1001 struct ntb_transport_ctx *nt = data; 1002 struct ntb_transport_qp *qp; 1003 uint64_t vec_mask; 1004 unsigned qp_num; 1005 1006 vec_mask = ntb_db_vector_mask(nt->dev, vector); 1007 vec_mask &= nt->qp_bitmap; 1008 if ((vec_mask & (vec_mask - 1)) != 0) 1009 vec_mask &= ntb_db_read(nt->dev); 1010 while (vec_mask != 0) { 1011 qp_num = ffsll(vec_mask) - 1; 1012 1013 qp = &nt->qp_vec[qp_num]; 1014 if (qp->link_is_up) 1015 taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work); 1016 1017 vec_mask &= ~(1ull << qp_num); 1018 } 1019 } 1020 1021 /* Link Event handler */ 1022 static void 1023 ntb_transport_event_callback(void *data) 1024 { 1025 struct ntb_transport_ctx *nt = data; 1026 1027 if (ntb_link_is_up(nt->dev, NULL, NULL)) { 1028 ntb_printf(1, "HW link up\n"); 1029 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 1030 } else { 1031 ntb_printf(1, "HW link down\n"); 1032 taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup); 1033 } 1034 } 1035 1036 /* Link bring up */ 1037 static void 1038 ntb_transport_link_work(void *arg) 1039 { 1040 struct ntb_transport_ctx *nt = arg; 1041 device_t dev = nt->dev; 1042 struct ntb_transport_qp *qp; 1043 uint64_t val64, size; 1044 uint32_t val; 1045 unsigned i; 1046 int rc; 1047 1048 /* send the local info, in the opposite order of the way we read it */ 1049 for (i = 0; i < nt->mw_count; i++) { 1050 size = nt->mw_vec[i].phys_size; 1051 1052 if (max_mw_size != 0 && size > max_mw_size) 1053 size = max_mw_size; 1054 1055 ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2), 1056 size >> 32); 1057 ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size); 1058 } 1059 ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count); 1060 ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count); 1061 ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0); 1062 ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION); 1063 1064 /* Query the remote side for its info */ 1065 val = 0; 1066 ntb_spad_read(dev, NTBT_VERSION, &val); 1067 if (val != NTB_TRANSPORT_VERSION) 1068 goto out; 1069 1070 ntb_spad_read(dev, NTBT_NUM_QPS, &val); 1071 if (val != nt->qp_count) 1072 goto out; 1073 1074 ntb_spad_read(dev, NTBT_NUM_MWS, &val); 1075 if (val != nt->mw_count) 1076 goto out; 1077 1078 for (i = 0; i < nt->mw_count; i++) { 1079 ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val); 1080 val64 = (uint64_t)val << 32; 1081 1082 ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val); 1083 val64 |= val; 1084 1085 rc = ntb_set_mw(nt, i, val64); 1086 if (rc != 0) 1087 goto free_mws; 1088 } 1089 1090 nt->link_is_up = true; 1091 ntb_printf(1, "transport link up\n"); 1092 1093 for (i = 0; i < nt->qp_count; i++) { 1094 qp = &nt->qp_vec[i]; 1095 1096 ntb_transport_setup_qp_mw(nt, i); 1097 1098 if (qp->client_ready) 1099 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 1100 } 1101 1102 return; 1103 1104 free_mws: 1105 for (i = 0; i < nt->mw_count; i++) 1106 ntb_free_mw(nt, i); 1107 out: 1108 if (ntb_link_is_up(dev, NULL, NULL)) 1109 callout_reset(&nt->link_work, 1110 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); 1111 } 1112 1113 static int 1114 ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size) 1115 { 1116 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 1117 size_t xlat_size, buff_size; 1118 int rc; 1119 1120 if (size == 0) 1121 return (EINVAL); 1122 1123 xlat_size = roundup(size, mw->xlat_align_size); 1124 buff_size = xlat_size; 1125 1126 /* No need to re-setup */ 1127 if (mw->xlat_size == xlat_size) 1128 return (0); 1129 1130 if (mw->buff_size != 0) 1131 ntb_free_mw(nt, num_mw); 1132 1133 /* Alloc memory for receiving data. Must be aligned */ 1134 mw->xlat_size = xlat_size; 1135 mw->buff_size = buff_size; 1136 1137 mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_T, M_ZERO, 0, 1138 mw->addr_limit, mw->xlat_align, 0); 1139 if (mw->virt_addr == NULL) { 1140 ntb_printf(0, "Unable to allocate MW buffer of size %zu/%zu\n", 1141 mw->buff_size, mw->xlat_size); 1142 mw->xlat_size = 0; 1143 mw->buff_size = 0; 1144 return (ENOMEM); 1145 } 1146 /* TODO: replace with bus_space_* functions */ 1147 mw->dma_addr = vtophys(mw->virt_addr); 1148 1149 /* 1150 * Ensure that the allocation from contigmalloc is aligned as 1151 * requested. XXX: This may not be needed -- brought in for parity 1152 * with the Linux driver. 1153 */ 1154 if (mw->dma_addr % mw->xlat_align != 0) { 1155 ntb_printf(0, 1156 "DMA memory 0x%jx not aligned to BAR size 0x%zx\n", 1157 (uintmax_t)mw->dma_addr, size); 1158 ntb_free_mw(nt, num_mw); 1159 return (ENOMEM); 1160 } 1161 1162 /* Notify HW the memory location of the receive buffer */ 1163 rc = ntb_mw_set_trans(nt->dev, num_mw, mw->dma_addr, mw->xlat_size); 1164 if (rc) { 1165 ntb_printf(0, "Unable to set mw%d translation\n", num_mw); 1166 ntb_free_mw(nt, num_mw); 1167 return (rc); 1168 } 1169 1170 return (0); 1171 } 1172 1173 static void 1174 ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 1175 { 1176 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 1177 1178 if (mw->virt_addr == NULL) 1179 return; 1180 1181 ntb_mw_clear_trans(nt->dev, num_mw); 1182 contigfree(mw->virt_addr, mw->xlat_size, M_NTB_T); 1183 mw->xlat_size = 0; 1184 mw->buff_size = 0; 1185 mw->virt_addr = NULL; 1186 } 1187 1188 static int 1189 ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) 1190 { 1191 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 1192 struct ntb_transport_mw *mw; 1193 void *offset; 1194 ntb_q_idx_t i; 1195 size_t rx_size; 1196 unsigned num_qps_mw, mw_num, mw_count; 1197 1198 mw_count = nt->mw_count; 1199 mw_num = QP_TO_MW(nt, qp_num); 1200 mw = &nt->mw_vec[mw_num]; 1201 1202 if (mw->virt_addr == NULL) 1203 return (ENOMEM); 1204 1205 if (mw_num < nt->qp_count % mw_count) 1206 num_qps_mw = nt->qp_count / mw_count + 1; 1207 else 1208 num_qps_mw = nt->qp_count / mw_count; 1209 1210 rx_size = mw->xlat_size / num_qps_mw; 1211 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 1212 rx_size -= sizeof(struct ntb_rx_info); 1213 1214 qp->remote_rx_info = (void*)(qp->rx_buff + rx_size); 1215 1216 /* Due to house-keeping, there must be at least 2 buffs */ 1217 qp->rx_max_frame = qmin(transport_mtu, rx_size / 2); 1218 qp->rx_max_entry = rx_size / qp->rx_max_frame; 1219 qp->rx_index = 0; 1220 1221 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 1222 1223 /* Set up the hdr offsets with 0s */ 1224 for (i = 0; i < qp->rx_max_entry; i++) { 1225 offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) - 1226 sizeof(struct ntb_payload_header)); 1227 memset(offset, 0, sizeof(struct ntb_payload_header)); 1228 } 1229 1230 qp->rx_pkts = 0; 1231 qp->tx_pkts = 0; 1232 qp->tx_index = 0; 1233 1234 return (0); 1235 } 1236 1237 static void 1238 ntb_qp_link_work(void *arg) 1239 { 1240 struct ntb_transport_qp *qp = arg; 1241 device_t dev = qp->dev; 1242 struct ntb_transport_ctx *nt = qp->transport; 1243 int i; 1244 uint32_t val; 1245 1246 /* Report queues that are up on our side */ 1247 for (i = 0, val = 0; i < nt->qp_count; i++) { 1248 if (nt->qp_vec[i].client_ready) 1249 val |= (1 << i); 1250 } 1251 ntb_peer_spad_write(dev, NTBT_QP_LINKS, val); 1252 1253 /* See if the remote side is up */ 1254 ntb_spad_read(dev, NTBT_QP_LINKS, &val); 1255 if ((val & (1ull << qp->qp_num)) != 0) { 1256 ntb_printf(2, "qp %d link up\n", qp->qp_num); 1257 qp->link_is_up = true; 1258 1259 if (qp->event_handler != NULL) 1260 qp->event_handler(qp->cb_data, NTB_LINK_UP); 1261 1262 ntb_db_clear_mask(dev, 1ull << qp->qp_num); 1263 } else if (nt->link_is_up) 1264 callout_reset(&qp->link_work, 1265 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1266 } 1267 1268 /* Link down event*/ 1269 static void 1270 ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 1271 { 1272 struct ntb_transport_qp *qp; 1273 int i; 1274 1275 /* Pass along the info to any clients */ 1276 for (i = 0; i < nt->qp_count; i++) { 1277 if ((nt->qp_bitmap & (1 << i)) != 0) { 1278 qp = &nt->qp_vec[i]; 1279 ntb_qp_link_cleanup(qp); 1280 callout_drain(&qp->link_work); 1281 } 1282 } 1283 1284 if (!nt->link_is_up) 1285 callout_drain(&nt->link_work); 1286 1287 /* 1288 * The scratchpad registers keep the values if the remote side 1289 * goes down, blast them now to give them a sane value the next 1290 * time they are accessed 1291 */ 1292 ntb_spad_clear(nt->dev); 1293 } 1294 1295 static void 1296 ntb_transport_link_cleanup_work(void *arg, int pending __unused) 1297 { 1298 1299 ntb_transport_link_cleanup(arg); 1300 } 1301 1302 static void 1303 ntb_qp_link_down(struct ntb_transport_qp *qp) 1304 { 1305 1306 ntb_qp_link_cleanup(qp); 1307 } 1308 1309 static void 1310 ntb_qp_link_down_reset(struct ntb_transport_qp *qp) 1311 { 1312 1313 qp->link_is_up = false; 1314 ntb_db_set_mask(qp->dev, 1ull << qp->qp_num); 1315 1316 qp->tx_index = qp->rx_index = 0; 1317 qp->tx_bytes = qp->rx_bytes = 0; 1318 qp->tx_pkts = qp->rx_pkts = 0; 1319 1320 qp->rx_ring_empty = 0; 1321 qp->tx_ring_full = 0; 1322 1323 qp->rx_err_no_buf = qp->tx_err_no_buf = 0; 1324 qp->rx_err_oflow = qp->rx_err_ver = 0; 1325 } 1326 1327 static void 1328 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 1329 { 1330 1331 callout_drain(&qp->link_work); 1332 ntb_qp_link_down_reset(qp); 1333 1334 if (qp->event_handler != NULL) 1335 qp->event_handler(qp->cb_data, NTB_LINK_DOWN); 1336 } 1337 1338 /* Link commanded down */ 1339 /** 1340 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1341 * @qp: NTB transport layer queue to be disabled 1342 * 1343 * Notify NTB transport layer of client's desire to no longer receive data on 1344 * transport queue specified. It is the client's responsibility to ensure all 1345 * entries on queue are purged or otherwise handled appropriately. 1346 */ 1347 void 1348 ntb_transport_link_down(struct ntb_transport_qp *qp) 1349 { 1350 struct ntb_transport_ctx *nt = qp->transport; 1351 int i; 1352 uint32_t val; 1353 1354 qp->client_ready = false; 1355 for (i = 0, val = 0; i < nt->qp_count; i++) { 1356 if (nt->qp_vec[i].client_ready) 1357 val |= (1 << i); 1358 } 1359 ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val); 1360 1361 if (qp->link_is_up) 1362 ntb_send_link_down(qp); 1363 else 1364 callout_drain(&qp->link_work); 1365 } 1366 1367 /** 1368 * ntb_transport_link_query - Query transport link state 1369 * @qp: NTB transport layer queue to be queried 1370 * 1371 * Query connectivity to the remote system of the NTB transport queue 1372 * 1373 * RETURNS: true for link up or false for link down 1374 */ 1375 bool 1376 ntb_transport_link_query(struct ntb_transport_qp *qp) 1377 { 1378 1379 return (qp->link_is_up); 1380 } 1381 1382 static void 1383 ntb_send_link_down(struct ntb_transport_qp *qp) 1384 { 1385 struct ntb_queue_entry *entry; 1386 int i, rc; 1387 1388 if (!qp->link_is_up) 1389 return; 1390 1391 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1392 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1393 if (entry != NULL) 1394 break; 1395 pause("NTB Wait for link down", hz / 10); 1396 } 1397 1398 if (entry == NULL) 1399 return; 1400 1401 entry->cb_data = NULL; 1402 entry->buf = NULL; 1403 entry->len = 0; 1404 entry->flags = NTBT_LINK_DOWN_FLAG; 1405 1406 mtx_lock(&qp->tx_lock); 1407 rc = ntb_process_tx(qp, entry); 1408 mtx_unlock(&qp->tx_lock); 1409 if (rc != 0) 1410 printf("ntb: Failed to send link down\n"); 1411 1412 ntb_qp_link_down_reset(qp); 1413 } 1414 1415 1416 /* List Management */ 1417 1418 static void 1419 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 1420 struct ntb_queue_list *list) 1421 { 1422 1423 mtx_lock_spin(lock); 1424 STAILQ_INSERT_TAIL(list, entry, entry); 1425 mtx_unlock_spin(lock); 1426 } 1427 1428 static struct ntb_queue_entry * 1429 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) 1430 { 1431 struct ntb_queue_entry *entry; 1432 1433 mtx_lock_spin(lock); 1434 if (STAILQ_EMPTY(list)) { 1435 entry = NULL; 1436 goto out; 1437 } 1438 entry = STAILQ_FIRST(list); 1439 STAILQ_REMOVE_HEAD(list, entry); 1440 out: 1441 mtx_unlock_spin(lock); 1442 1443 return (entry); 1444 } 1445 1446 static struct ntb_queue_entry * 1447 ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, 1448 struct ntb_queue_list *to) 1449 { 1450 struct ntb_queue_entry *entry; 1451 1452 mtx_lock_spin(lock); 1453 if (STAILQ_EMPTY(from)) { 1454 entry = NULL; 1455 goto out; 1456 } 1457 entry = STAILQ_FIRST(from); 1458 STAILQ_REMOVE_HEAD(from, entry); 1459 STAILQ_INSERT_TAIL(to, entry, entry); 1460 1461 out: 1462 mtx_unlock_spin(lock); 1463 return (entry); 1464 } 1465 1466 /** 1467 * ntb_transport_qp_num - Query the qp number 1468 * @qp: NTB transport layer queue to be queried 1469 * 1470 * Query qp number of the NTB transport queue 1471 * 1472 * RETURNS: a zero based number specifying the qp number 1473 */ 1474 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1475 { 1476 1477 return (qp->qp_num); 1478 } 1479 1480 /** 1481 * ntb_transport_max_size - Query the max payload size of a qp 1482 * @qp: NTB transport layer queue to be queried 1483 * 1484 * Query the maximum payload size permissible on the given qp 1485 * 1486 * RETURNS: the max payload size of a qp 1487 */ 1488 unsigned int 1489 ntb_transport_max_size(struct ntb_transport_qp *qp) 1490 { 1491 1492 return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); 1493 } 1494 1495 unsigned int 1496 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) 1497 { 1498 unsigned int head = qp->tx_index; 1499 unsigned int tail = qp->remote_rx_info->entry; 1500 1501 return (tail >= head ? tail - head : qp->tx_max_entry + tail - head); 1502 } 1503 1504 static device_method_t ntb_transport_methods[] = { 1505 /* Device interface */ 1506 DEVMETHOD(device_probe, ntb_transport_probe), 1507 DEVMETHOD(device_attach, ntb_transport_attach), 1508 DEVMETHOD(device_detach, ntb_transport_detach), 1509 DEVMETHOD_END 1510 }; 1511 1512 devclass_t ntb_transport_devclass; 1513 static DEFINE_CLASS_0(ntb_transport, ntb_transport_driver, 1514 ntb_transport_methods, sizeof(struct ntb_transport_ctx)); 1515 DRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver, 1516 ntb_transport_devclass, NULL, NULL); 1517 MODULE_DEPEND(ntb_transport, ntb, 1, 1, 1); 1518 MODULE_VERSION(ntb_transport, 1); 1519