1 /*- 2 * Copyright (C) 2013 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/ktr.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/taskqueue.h> 43 #include <net/if.h> 44 #include <net/if_media.h> 45 #include <net/if_types.h> 46 #include <net/if_var.h> 47 #include <net/bpf.h> 48 #include <net/ethernet.h> 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <machine/bus.h> 52 #include <machine/cpufunc.h> 53 #include <machine/pmap.h> 54 55 #include "../ntb_hw/ntb_hw.h" 56 57 /* 58 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 59 * allows you to connect two systems using a PCI-e link. 60 * 61 * This module contains a protocol for sending and receiving messages, and 62 * exposes that protocol through a simulated ethernet device called ntb. 63 * 64 * NOTE: Much of the code in this module is shared with Linux. Any patches may 65 * be picked up and redistributed in Linux with a dual GPL/BSD license. 66 */ 67 68 /* TODO: These functions should really be part of the kernel */ 69 #define test_bit(pos, bitmap_addr) (*(bitmap_addr) & 1UL << (pos)) 70 #define set_bit(pos, bitmap_addr) *(bitmap_addr) |= 1UL << (pos) 71 #define clear_bit(pos, bitmap_addr) *(bitmap_addr) &= ~(1UL << (pos)) 72 73 #define KTR_NTB KTR_SPARE3 74 75 #define NTB_TRANSPORT_VERSION 3 76 #define NTB_RX_MAX_PKTS 64 77 #define NTB_RXQ_SIZE 300 78 79 static unsigned int transport_mtu = 0x4000 + ETHER_HDR_LEN + ETHER_CRC_LEN; 80 static unsigned int max_num_clients = 1; 81 82 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); 83 84 struct ntb_queue_entry { 85 /* ntb_queue list reference */ 86 STAILQ_ENTRY(ntb_queue_entry) entry; 87 88 /* info on data to be transfered */ 89 void *cb_data; 90 void *buf; 91 uint64_t len; 92 uint64_t flags; 93 }; 94 95 struct ntb_rx_info { 96 unsigned int entry; 97 }; 98 99 struct ntb_transport_qp { 100 struct ntb_netdev *transport; 101 struct ntb_softc *ntb; 102 103 void *cb_data; 104 105 bool client_ready; 106 bool qp_link; 107 uint8_t qp_num; /* Only 64 QP's are allowed. 0-63 */ 108 109 struct ntb_rx_info *rx_info; 110 struct ntb_rx_info *remote_rx_info; 111 112 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 113 void *data, int len); 114 struct ntb_queue_list tx_free_q; 115 struct mtx ntb_tx_free_q_lock; 116 void *tx_mw; 117 uint64_t tx_index; 118 uint64_t tx_max_entry; 119 uint64_t tx_max_frame; 120 121 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 122 void *data, int len); 123 struct ntb_queue_list rx_pend_q; 124 struct ntb_queue_list rx_free_q; 125 struct mtx ntb_rx_pend_q_lock; 126 struct mtx ntb_rx_free_q_lock; 127 struct task rx_completion_task; 128 void *rx_buff; 129 uint64_t rx_index; 130 uint64_t rx_max_entry; 131 uint64_t rx_max_frame; 132 133 void (*event_handler) (void *data, int status); 134 struct callout link_work; 135 struct callout queue_full; 136 struct callout rx_full; 137 138 uint64_t last_rx_no_buf; 139 140 /* Stats */ 141 uint64_t rx_bytes; 142 uint64_t rx_pkts; 143 uint64_t rx_ring_empty; 144 uint64_t rx_err_no_buf; 145 uint64_t rx_err_oflow; 146 uint64_t rx_err_ver; 147 uint64_t tx_bytes; 148 uint64_t tx_pkts; 149 uint64_t tx_ring_full; 150 }; 151 152 struct ntb_queue_handlers { 153 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 154 void *data, int len); 155 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 156 void *data, int len); 157 void (*event_handler) (void *data, int status); 158 }; 159 160 161 struct ntb_transport_mw { 162 size_t size; 163 void *virt_addr; 164 vm_paddr_t dma_addr; 165 }; 166 167 struct ntb_netdev { 168 struct ntb_softc *ntb; 169 struct ifnet *ifp; 170 struct ntb_transport_mw mw[NTB_NUM_MW]; 171 struct ntb_transport_qp *qps; 172 uint64_t max_qps; 173 uint64_t qp_bitmap; 174 bool transport_link; 175 struct callout link_work; 176 struct ntb_transport_qp *qp; 177 uint64_t bufsize; 178 u_char eaddr[ETHER_ADDR_LEN]; 179 struct mtx tx_lock; 180 struct mtx rx_lock; 181 }; 182 183 static struct ntb_netdev net_softc; 184 185 enum { 186 IF_NTB_DESC_DONE_FLAG = 1 << 0, 187 IF_NTB_LINK_DOWN_FLAG = 1 << 1, 188 }; 189 190 struct ntb_payload_header { 191 uint64_t ver; 192 uint64_t len; 193 uint64_t flags; 194 }; 195 196 enum { 197 IF_NTB_VERSION = 0, 198 IF_NTB_MW0_SZ, 199 IF_NTB_MW1_SZ, 200 IF_NTB_NUM_QPS, 201 IF_NTB_QP_LINKS, 202 IF_NTB_MAX_SPAD, 203 }; 204 205 #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW) 206 #define NTB_QP_DEF_NUM_ENTRIES 100 207 #define NTB_LINK_DOWN_TIMEOUT 10 208 209 static int ntb_handle_module_events(struct module *m, int what, void *arg); 210 static int ntb_setup_interface(void); 211 static int ntb_teardown_interface(void); 212 static void ntb_net_init(void *arg); 213 static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 214 static void ntb_start(struct ifnet *ifp); 215 static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, 216 void *data, int len); 217 static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, 218 void *data, int len); 219 static void ntb_net_event_handler(void *data, int status); 220 static int ntb_transport_init(struct ntb_softc *ntb); 221 static void ntb_transport_free(void *transport); 222 static void ntb_transport_init_queue(struct ntb_netdev *nt, 223 unsigned int qp_num); 224 static void ntb_transport_free_queue(struct ntb_transport_qp *qp); 225 static struct ntb_transport_qp * ntb_transport_create_queue(void *data, 226 struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); 227 static void ntb_transport_link_up(struct ntb_transport_qp *qp); 228 static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, 229 void *data, unsigned int len); 230 static int ntb_process_tx(struct ntb_transport_qp *qp, 231 struct ntb_queue_entry *entry); 232 static void ntb_tx_copy_task(struct ntb_transport_qp *qp, 233 struct ntb_queue_entry *entry, void *offset); 234 static void ntb_qp_full(void *arg); 235 static void ntb_transport_rxc_db(void *data, int db_num); 236 static void ntb_rx_pendq_full(void *arg); 237 static void ntb_transport_rx(struct ntb_transport_qp *qp); 238 static int ntb_process_rxc(struct ntb_transport_qp *qp); 239 static void ntb_rx_copy_task(struct ntb_transport_qp *qp, 240 struct ntb_queue_entry *entry, void *offset); 241 static void ntb_rx_completion_task(void *arg, int pending); 242 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event); 243 static void ntb_transport_link_work(void *arg); 244 static int ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size); 245 static void ntb_transport_setup_qp_mw(struct ntb_netdev *nt, 246 unsigned int qp_num); 247 static void ntb_qp_link_work(void *arg); 248 static void ntb_transport_link_cleanup(struct ntb_netdev *nt); 249 static void ntb_qp_link_down(struct ntb_transport_qp *qp); 250 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); 251 static void ntb_transport_link_down(struct ntb_transport_qp *qp); 252 static void ntb_send_link_down(struct ntb_transport_qp *qp); 253 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 254 struct ntb_queue_list *list); 255 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, 256 struct ntb_queue_list *list); 257 static void create_random_local_eui48(u_char *eaddr); 258 static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 259 260 MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); 261 262 /* Module setup and teardown */ 263 static int 264 ntb_handle_module_events(struct module *m, int what, void *arg) 265 { 266 int err = 0; 267 268 switch (what) { 269 case MOD_LOAD: 270 err = ntb_setup_interface(); 271 break; 272 case MOD_UNLOAD: 273 err = ntb_teardown_interface(); 274 break; 275 default: 276 err = EOPNOTSUPP; 277 break; 278 } 279 return (err); 280 } 281 282 static moduledata_t ntb_transport_mod = { 283 "ntb_transport", 284 ntb_handle_module_events, 285 NULL 286 }; 287 288 DECLARE_MODULE(ntb_transport, ntb_transport_mod, SI_SUB_KLD, SI_ORDER_ANY); 289 MODULE_DEPEND(ntb_transport, ntb_hw, 1, 1, 1); 290 291 static int 292 ntb_setup_interface() 293 { 294 struct ifnet *ifp; 295 struct ntb_queue_handlers handlers = { ntb_net_rx_handler, 296 ntb_net_tx_handler, ntb_net_event_handler }; 297 298 net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); 299 if (net_softc.ntb == NULL) { 300 printf("ntb: Can't find devclass\n"); 301 return (ENXIO); 302 } 303 304 ntb_transport_init(net_softc.ntb); 305 306 ifp = net_softc.ifp = if_alloc(IFT_ETHER); 307 if (ifp == NULL) { 308 printf("ntb: cannot allocate ifnet structure\n"); 309 return (ENOMEM); 310 } 311 312 net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, 313 &handlers); 314 if_initname(ifp, "ntb", 0); 315 ifp->if_init = ntb_net_init; 316 ifp->if_softc = &net_softc; 317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; 318 ifp->if_ioctl = ntb_ioctl; 319 ifp->if_start = ntb_start; 320 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 321 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 322 IFQ_SET_READY(&ifp->if_snd); 323 create_random_local_eui48(net_softc.eaddr); 324 ether_ifattach(ifp, net_softc.eaddr); 325 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; 326 ifp->if_capenable = ifp->if_capabilities; 327 328 ntb_transport_link_up(net_softc.qp); 329 net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + 330 sizeof(struct ether_header); 331 return (0); 332 } 333 334 static int 335 ntb_teardown_interface() 336 { 337 struct ifnet *ifp = net_softc.ifp; 338 339 ntb_transport_link_down(net_softc.qp); 340 341 ether_ifdetach(ifp); 342 if_free(ifp); 343 ntb_transport_free_queue(net_softc.qp); 344 ntb_transport_free(&net_softc); 345 346 return (0); 347 } 348 349 /* Network device interface */ 350 351 static void 352 ntb_net_init(void *arg) 353 { 354 struct ntb_netdev *ntb_softc = arg; 355 struct ifnet *ifp = ntb_softc->ifp; 356 357 ifp->if_drv_flags |= IFF_DRV_RUNNING; 358 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 359 ifp->if_flags |= IFF_UP; 360 if_link_state_change(ifp, LINK_STATE_UP); 361 } 362 363 static int 364 ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 365 { 366 struct ntb_netdev *nt = ifp->if_softc; 367 struct ifreq *ifr = (struct ifreq *)data; 368 int error = 0; 369 370 switch (command) { 371 case SIOCSIFMTU: 372 { 373 if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - 374 ETHER_HDR_LEN - ETHER_CRC_LEN) { 375 error = EINVAL; 376 break; 377 } 378 379 ifp->if_mtu = ifr->ifr_mtu; 380 break; 381 } 382 default: 383 error = ether_ioctl(ifp, command, data); 384 break; 385 } 386 387 return (error); 388 } 389 390 391 static void 392 ntb_start(struct ifnet *ifp) 393 { 394 struct mbuf *m_head; 395 struct ntb_netdev *nt = ifp->if_softc; 396 int rc; 397 398 mtx_lock(&nt->tx_lock); 399 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 400 CTR0(KTR_NTB, "TX: ntb_start"); 401 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 402 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 403 CTR1(KTR_NTB, "TX: start mbuf %p", m_head); 404 rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, 405 m_length(m_head, NULL)); 406 if (rc != 0) { 407 CTR1(KTR_NTB, 408 "TX: couldn't tx mbuf %p. Returning to snd q", 409 m_head); 410 if (rc == EAGAIN) { 411 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 412 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 413 callout_reset(&nt->qp->queue_full, hz / 1000, 414 ntb_qp_full, ifp); 415 } 416 break; 417 } 418 419 } 420 mtx_unlock(&nt->tx_lock); 421 } 422 423 /* Network Device Callbacks */ 424 static void 425 ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 426 int len) 427 { 428 429 m_freem(data); 430 CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); 431 } 432 433 static void 434 ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 435 int len) 436 { 437 struct mbuf *m = data; 438 struct ifnet *ifp = qp_data; 439 440 CTR0(KTR_NTB, "RX: rx handler"); 441 (*ifp->if_input)(ifp, m); 442 } 443 444 static void 445 ntb_net_event_handler(void *data, int status) 446 { 447 448 } 449 450 /* Transport Init and teardown */ 451 452 static int 453 ntb_transport_init(struct ntb_softc *ntb) 454 { 455 struct ntb_netdev *nt = &net_softc; 456 int rc, i; 457 458 nt->max_qps = max_num_clients; 459 ntb_register_transport(ntb, nt); 460 mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); 461 mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); 462 463 nt->qps = malloc(nt->max_qps * sizeof(struct ntb_transport_qp), 464 M_NTB_IF, M_WAITOK|M_ZERO); 465 466 nt->qp_bitmap = ((uint64_t) 1 << nt->max_qps) - 1; 467 468 for (i = 0; i < nt->max_qps; i++) 469 ntb_transport_init_queue(nt, i); 470 471 callout_init(&nt->link_work, 0); 472 473 rc = ntb_register_event_callback(ntb, 474 ntb_transport_event_callback); 475 if (rc != 0) 476 goto err; 477 478 if (ntb_query_link_status(ntb)) 479 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 480 481 return (0); 482 483 err: 484 free(nt->qps, M_NTB_IF); 485 ntb_unregister_transport(ntb); 486 return (rc); 487 } 488 489 static void 490 ntb_transport_free(void *transport) 491 { 492 struct ntb_netdev *nt = transport; 493 struct ntb_softc *ntb = nt->ntb; 494 int i; 495 496 nt->transport_link = NTB_LINK_DOWN; 497 498 callout_drain(&nt->link_work); 499 500 /* verify that all the qp's are freed */ 501 for (i = 0; i < nt->max_qps; i++) 502 if (!test_bit(i, &nt->qp_bitmap)) 503 ntb_transport_free_queue(&nt->qps[i]); 504 505 506 ntb_unregister_event_callback(ntb); 507 508 for (i = 0; i < NTB_NUM_MW; i++) 509 if (nt->mw[i].virt_addr != NULL) 510 contigfree(nt->mw[i].virt_addr, nt->mw[i].size, 511 M_NTB_IF); 512 513 free(nt->qps, M_NTB_IF); 514 ntb_unregister_transport(ntb); 515 } 516 517 static void 518 ntb_transport_init_queue(struct ntb_netdev *nt, unsigned int qp_num) 519 { 520 struct ntb_transport_qp *qp; 521 unsigned int num_qps_mw, tx_size; 522 uint8_t mw_num = QP_TO_MW(qp_num); 523 524 qp = &nt->qps[qp_num]; 525 qp->qp_num = qp_num; 526 qp->transport = nt; 527 qp->ntb = nt->ntb; 528 qp->qp_link = NTB_LINK_DOWN; 529 qp->client_ready = NTB_LINK_DOWN; 530 qp->event_handler = NULL; 531 532 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 533 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 534 else 535 num_qps_mw = nt->max_qps / NTB_NUM_MW; 536 537 tx_size = (unsigned int) ntb_get_mw_size(qp->ntb, mw_num) / num_qps_mw; 538 qp->rx_info = (struct ntb_rx_info *) 539 ((char *)ntb_get_mw_vbase(qp->ntb, mw_num) + 540 (qp_num / NTB_NUM_MW * tx_size)); 541 tx_size -= sizeof(struct ntb_rx_info); 542 543 qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); 544 qp->tx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), 545 tx_size); 546 qp->tx_max_entry = tx_size / qp->tx_max_frame; 547 qp->tx_index = 0; 548 549 callout_init(&qp->link_work, 0); 550 callout_init(&qp->queue_full, CALLOUT_MPSAFE); 551 callout_init(&qp->rx_full, CALLOUT_MPSAFE); 552 553 mtx_init(&qp->ntb_rx_pend_q_lock, "ntb rx pend q", NULL, MTX_SPIN); 554 mtx_init(&qp->ntb_rx_free_q_lock, "ntb rx free q", NULL, MTX_SPIN); 555 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); 556 TASK_INIT(&qp->rx_completion_task, 0, ntb_rx_completion_task, qp); 557 558 STAILQ_INIT(&qp->rx_pend_q); 559 STAILQ_INIT(&qp->rx_free_q); 560 STAILQ_INIT(&qp->tx_free_q); 561 } 562 563 static void 564 ntb_transport_free_queue(struct ntb_transport_qp *qp) 565 { 566 struct ntb_queue_entry *entry; 567 568 if (qp == NULL) 569 return; 570 571 callout_drain(&qp->link_work); 572 573 ntb_unregister_db_callback(qp->ntb, qp->qp_num); 574 575 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 576 free(entry, M_NTB_IF); 577 578 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) 579 free(entry, M_NTB_IF); 580 581 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 582 free(entry, M_NTB_IF); 583 584 set_bit(qp->qp_num, &qp->transport->qp_bitmap); 585 } 586 587 /** 588 * ntb_transport_create_queue - Create a new NTB transport layer queue 589 * @rx_handler: receive callback function 590 * @tx_handler: transmit callback function 591 * @event_handler: event callback function 592 * 593 * Create a new NTB transport layer queue and provide the queue with a callback 594 * routine for both transmit and receive. The receive callback routine will be 595 * used to pass up data when the transport has received it on the queue. The 596 * transmit callback routine will be called when the transport has completed the 597 * transmission of the data on the queue and the data is ready to be freed. 598 * 599 * RETURNS: pointer to newly created ntb_queue, NULL on error. 600 */ 601 static struct ntb_transport_qp * 602 ntb_transport_create_queue(void *data, struct ntb_softc *pdev, 603 const struct ntb_queue_handlers *handlers) 604 { 605 struct ntb_queue_entry *entry; 606 struct ntb_transport_qp *qp; 607 struct ntb_netdev *nt; 608 unsigned int free_queue; 609 int rc, i; 610 611 nt = ntb_find_transport(pdev); 612 if (nt == NULL) 613 goto err; 614 615 free_queue = ffs(nt->qp_bitmap); 616 if (free_queue == 0) 617 goto err; 618 619 /* decrement free_queue to make it zero based */ 620 free_queue--; 621 622 clear_bit(free_queue, &nt->qp_bitmap); 623 624 qp = &nt->qps[free_queue]; 625 qp->cb_data = data; 626 qp->rx_handler = handlers->rx_handler; 627 qp->tx_handler = handlers->tx_handler; 628 qp->event_handler = handlers->event_handler; 629 630 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 631 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, 632 M_WAITOK|M_ZERO); 633 entry->cb_data = nt->ifp; 634 entry->buf = NULL; 635 entry->len = transport_mtu; 636 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 637 } 638 639 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 640 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, 641 M_WAITOK|M_ZERO); 642 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 643 } 644 645 rc = ntb_register_db_callback(qp->ntb, free_queue, qp, 646 ntb_transport_rxc_db); 647 if (rc != 0) 648 goto err1; 649 650 return (qp); 651 652 err1: 653 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 654 free(entry, M_NTB_IF); 655 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 656 free(entry, M_NTB_IF); 657 set_bit(free_queue, &nt->qp_bitmap); 658 err: 659 return (NULL); 660 } 661 662 /** 663 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 664 * @qp: NTB transport layer queue to be enabled 665 * 666 * Notify NTB transport layer of client readiness to use queue 667 */ 668 static void 669 ntb_transport_link_up(struct ntb_transport_qp *qp) 670 { 671 672 if (qp == NULL) 673 return; 674 675 qp->client_ready = NTB_LINK_UP; 676 677 if (qp->transport->transport_link == NTB_LINK_UP) 678 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 679 } 680 681 682 683 /* Transport Tx */ 684 685 /** 686 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 687 * @qp: NTB transport layer queue the entry is to be enqueued on 688 * @cb: per buffer pointer for callback function to use 689 * @data: pointer to data buffer that will be sent 690 * @len: length of the data buffer 691 * 692 * Enqueue a new transmit buffer onto the transport queue from which a NTB 693 * payload will be transmitted. This assumes that a lock is behing held to 694 * serialize access to the qp. 695 * 696 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 697 */ 698 static int 699 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 700 unsigned int len) 701 { 702 struct ntb_queue_entry *entry; 703 int rc; 704 705 if (qp == NULL || qp->qp_link != NTB_LINK_UP || len == 0) { 706 CTR0(KTR_NTB, "TX: link not up"); 707 return (EINVAL); 708 } 709 710 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 711 if (entry == NULL) { 712 CTR0(KTR_NTB, "TX: couldn't get entry from tx_free_q"); 713 return (ENOMEM); 714 } 715 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); 716 717 entry->cb_data = cb; 718 entry->buf = data; 719 entry->len = len; 720 entry->flags = 0; 721 722 rc = ntb_process_tx(qp, entry); 723 if (rc != 0) { 724 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 725 CTR1(KTR_NTB, 726 "TX: process_tx failed. Returning entry %p to tx_free_q", 727 entry); 728 } 729 return (rc); 730 } 731 732 static int 733 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) 734 { 735 void *offset; 736 737 offset = (char *)qp->tx_mw + qp->tx_max_frame * qp->tx_index; 738 CTR3(KTR_NTB, 739 "TX: process_tx: tx_pkts=%u, tx_index=%u, remote entry=%u", 740 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); 741 if (qp->tx_index == qp->remote_rx_info->entry) { 742 CTR0(KTR_NTB, "TX: ring full"); 743 qp->tx_ring_full++; 744 return (EAGAIN); 745 } 746 747 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 748 if (qp->tx_handler != NULL) 749 qp->tx_handler(qp, qp->cb_data, entry->buf, 750 EIO); 751 752 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 753 CTR1(KTR_NTB, 754 "TX: frame too big. returning entry %p to tx_free_q", 755 entry); 756 return (0); 757 } 758 CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); 759 ntb_tx_copy_task(qp, entry, offset); 760 761 qp->tx_index++; 762 qp->tx_index %= qp->tx_max_entry; 763 764 qp->tx_pkts++; 765 766 return (0); 767 } 768 769 static void 770 ntb_tx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 771 void *offset) 772 { 773 struct ntb_payload_header *hdr; 774 775 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); 776 if (entry->buf != NULL) 777 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); 778 779 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - 780 sizeof(struct ntb_payload_header)); 781 hdr->len = entry->len; /* TODO: replace with bus_space_write */ 782 hdr->ver = qp->tx_pkts; /* TODO: replace with bus_space_write */ 783 wmb(); 784 /* TODO: replace with bus_space_write */ 785 hdr->flags = entry->flags | IF_NTB_DESC_DONE_FLAG; 786 787 ntb_ring_sdb(qp->ntb, qp->qp_num); 788 789 /* 790 * The entry length can only be zero if the packet is intended to be a 791 * "link down" or similar. Since no payload is being sent in these 792 * cases, there is nothing to add to the completion queue. 793 */ 794 if (entry->len > 0) { 795 qp->tx_bytes += entry->len; 796 797 if (qp->tx_handler) 798 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 799 entry->len); 800 } 801 802 CTR2(KTR_NTB, 803 "TX: entry %p sent. hdr->ver = %d, Returning to tx_free_q", entry, 804 hdr->ver); 805 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 806 } 807 808 static void 809 ntb_qp_full(void *arg) 810 { 811 812 CTR0(KTR_NTB, "TX: qp_full callout"); 813 ntb_start(arg); 814 } 815 816 /* Transport Rx */ 817 static void 818 ntb_transport_rxc_db(void *data, int db_num) 819 { 820 struct ntb_transport_qp *qp = data; 821 822 ntb_transport_rx(qp); 823 } 824 825 static void 826 ntb_rx_pendq_full(void *arg) 827 { 828 829 CTR0(KTR_NTB, "RX: ntb_rx_pendq_full callout"); 830 ntb_transport_rx(arg); 831 } 832 833 static void 834 ntb_transport_rx(struct ntb_transport_qp *qp) 835 { 836 int rc, i; 837 838 /* 839 * Limit the number of packets processed in a single interrupt to 840 * provide fairness to others 841 */ 842 mtx_lock(&qp->transport->rx_lock); 843 CTR0(KTR_NTB, "RX: transport_rx"); 844 for (i = 0; i < NTB_RX_MAX_PKTS; i++) { 845 rc = ntb_process_rxc(qp); 846 if (rc != 0) { 847 CTR0(KTR_NTB, "RX: process_rxc failed"); 848 break; 849 } 850 } 851 mtx_unlock(&qp->transport->rx_lock); 852 } 853 854 static int 855 ntb_process_rxc(struct ntb_transport_qp *qp) 856 { 857 struct ntb_payload_header *hdr; 858 struct ntb_queue_entry *entry; 859 void *offset; 860 861 offset = (void *) 862 ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index); 863 hdr = (void *) 864 ((char *)offset + qp->rx_max_frame - 865 sizeof(struct ntb_payload_header)); 866 867 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); 868 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 869 if (entry == NULL) { 870 qp->rx_err_no_buf++; 871 CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); 872 return (ENOMEM); 873 } 874 callout_stop(&qp->rx_full); 875 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); 876 877 if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { 878 CTR1(KTR_NTB, 879 "RX: hdr not done. Returning entry %p to rx_pend_q", entry); 880 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 881 qp->rx_ring_empty++; 882 return (EAGAIN); 883 } 884 885 if (hdr->ver != (uint32_t) qp->rx_pkts) { 886 CTR3(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " 887 "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts, 888 entry); 889 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 890 qp->rx_err_ver++; 891 return (EIO); 892 } 893 894 if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { 895 ntb_qp_link_down(qp); 896 CTR1(KTR_NTB, 897 "RX: link down. adding entry %p back to rx_pend_q", entry); 898 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 899 goto out; 900 } 901 902 if (hdr->len <= entry->len) { 903 entry->len = hdr->len; 904 ntb_rx_copy_task(qp, entry, offset); 905 } else { 906 CTR1(KTR_NTB, 907 "RX: len too long. Returning entry %p to rx_pend_q", entry); 908 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 909 910 qp->rx_err_oflow++; 911 } 912 913 qp->rx_bytes += hdr->len; 914 qp->rx_pkts++; 915 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); 916 917 918 out: 919 /* Ensure that the data is globally visible before clearing the flag */ 920 wmb(); 921 hdr->flags = 0; 922 /* TODO: replace with bus_space_write */ 923 qp->rx_info->entry = qp->rx_index; 924 925 qp->rx_index++; 926 qp->rx_index %= qp->rx_max_entry; 927 928 return (0); 929 } 930 931 static void 932 ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 933 void *offset) 934 { 935 struct ifnet *ifp = entry->cb_data; 936 unsigned int len = entry->len; 937 struct mbuf *m; 938 939 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); 940 m = m_devget(offset, len, 0, ifp, NULL); 941 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; 942 943 entry->buf = (void *)m; 944 945 CTR2(KTR_NTB, 946 "RX: copied entry %p to mbuf %p. Adding entry to rx_free_q", entry, 947 m); 948 ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q); 949 950 taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); 951 } 952 953 static void 954 ntb_rx_completion_task(void *arg, int pending) 955 { 956 struct ntb_transport_qp *qp = arg; 957 struct mbuf *m; 958 struct ntb_queue_entry *entry; 959 960 CTR0(KTR_NTB, "RX: rx_completion_task"); 961 962 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) { 963 m = entry->buf; 964 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); 965 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) 966 qp->rx_handler(qp, qp->cb_data, m, entry->len); 967 968 entry->buf = NULL; 969 entry->len = qp->transport->bufsize; 970 971 CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q " 972 "and added to rx_pend_q", entry); 973 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 974 if (qp->rx_err_no_buf > qp->last_rx_no_buf) { 975 qp->last_rx_no_buf = qp->rx_err_no_buf; 976 CTR0(KTR_NTB, "RX: could spawn rx task"); 977 callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full, 978 qp); 979 } 980 } 981 } 982 983 /* Link Event handler */ 984 static void 985 ntb_transport_event_callback(void *data, enum ntb_hw_event event) 986 { 987 struct ntb_netdev *nt = data; 988 989 switch (event) { 990 case NTB_EVENT_HW_LINK_UP: 991 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 992 break; 993 case NTB_EVENT_HW_LINK_DOWN: 994 ntb_transport_link_cleanup(nt); 995 break; 996 default: 997 panic("ntb: Unknown NTB event"); 998 } 999 } 1000 1001 /* Link bring up */ 1002 static void 1003 ntb_transport_link_work(void *arg) 1004 { 1005 struct ntb_netdev *nt = arg; 1006 struct ntb_softc *ntb = nt->ntb; 1007 struct ntb_transport_qp *qp; 1008 uint32_t val; 1009 int rc, i; 1010 1011 /* send the local info */ 1012 rc = ntb_write_remote_spad(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); 1013 if (rc != 0) 1014 goto out; 1015 1016 rc = ntb_write_remote_spad(ntb, IF_NTB_MW0_SZ, ntb_get_mw_size(ntb, 0)); 1017 if (rc != 0) 1018 goto out; 1019 1020 rc = ntb_write_remote_spad(ntb, IF_NTB_MW1_SZ, ntb_get_mw_size(ntb, 1)); 1021 if (rc != 0) 1022 goto out; 1023 1024 rc = ntb_write_remote_spad(ntb, IF_NTB_NUM_QPS, nt->max_qps); 1025 if (rc != 0) 1026 goto out; 1027 1028 rc = ntb_read_remote_spad(ntb, IF_NTB_QP_LINKS, &val); 1029 if (rc != 0) 1030 goto out; 1031 1032 rc = ntb_write_remote_spad(ntb, IF_NTB_QP_LINKS, val); 1033 if (rc != 0) 1034 goto out; 1035 1036 /* Query the remote side for its info */ 1037 rc = ntb_read_local_spad(ntb, IF_NTB_VERSION, &val); 1038 if (rc != 0) 1039 goto out; 1040 1041 if (val != NTB_TRANSPORT_VERSION) 1042 goto out; 1043 1044 rc = ntb_read_local_spad(ntb, IF_NTB_NUM_QPS, &val); 1045 if (rc != 0) 1046 goto out; 1047 1048 if (val != nt->max_qps) 1049 goto out; 1050 1051 rc = ntb_read_local_spad(ntb, IF_NTB_MW0_SZ, &val); 1052 if (rc != 0) 1053 goto out; 1054 1055 if (val == 0) 1056 goto out; 1057 1058 rc = ntb_set_mw(nt, 0, val); 1059 if (rc != 0) 1060 return; 1061 1062 rc = ntb_read_local_spad(ntb, IF_NTB_MW1_SZ, &val); 1063 if (rc != 0) 1064 goto out; 1065 1066 if (val == 0) 1067 goto out; 1068 1069 rc = ntb_set_mw(nt, 1, val); 1070 if (rc != 0) 1071 return; 1072 1073 nt->transport_link = NTB_LINK_UP; 1074 1075 for (i = 0; i < nt->max_qps; i++) { 1076 qp = &nt->qps[i]; 1077 1078 ntb_transport_setup_qp_mw(nt, i); 1079 1080 if (qp->client_ready == NTB_LINK_UP) 1081 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 1082 } 1083 1084 return; 1085 1086 out: 1087 if (ntb_query_link_status(ntb)) 1088 callout_reset(&nt->link_work, 1089 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); 1090 } 1091 1092 static int 1093 ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size) 1094 { 1095 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 1096 1097 /* Alloc memory for receiving data. Must be 4k aligned */ 1098 mw->size = size; 1099 1100 mw->virt_addr = contigmalloc(mw->size, M_NTB_IF, M_ZERO, 0, 1101 BUS_SPACE_MAXADDR, mw->size, 0); 1102 if (mw->virt_addr == NULL) { 1103 printf("ntb: Unable to allocate MW buffer of size %d\n", 1104 (int)mw->size); 1105 return (ENOMEM); 1106 } 1107 /* TODO: replace with bus_space_* functions */ 1108 mw->dma_addr = vtophys(mw->virt_addr); 1109 1110 /* Notify HW the memory location of the receive buffer */ 1111 ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr); 1112 1113 return (0); 1114 } 1115 1116 static void 1117 ntb_transport_setup_qp_mw(struct ntb_netdev *nt, unsigned int qp_num) 1118 { 1119 struct ntb_transport_qp *qp = &nt->qps[qp_num]; 1120 void *offset; 1121 unsigned int rx_size, num_qps_mw; 1122 uint8_t mw_num = QP_TO_MW(qp_num); 1123 unsigned int i; 1124 1125 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 1126 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 1127 else 1128 num_qps_mw = nt->max_qps / NTB_NUM_MW; 1129 1130 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw; 1131 qp->remote_rx_info = (void *)((uint8_t *)nt->mw[mw_num].virt_addr + 1132 (qp_num / NTB_NUM_MW * rx_size)); 1133 rx_size -= sizeof(struct ntb_rx_info); 1134 1135 qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); 1136 qp->rx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), 1137 rx_size); 1138 qp->rx_max_entry = rx_size / qp->rx_max_frame; 1139 qp->rx_index = 0; 1140 qp->tx_index = 0; 1141 1142 qp->remote_rx_info->entry = qp->rx_max_entry; 1143 1144 /* setup the hdr offsets with 0's */ 1145 for (i = 0; i < qp->rx_max_entry; i++) { 1146 offset = (void *)((uint8_t *)qp->rx_buff + 1147 qp->rx_max_frame * (i + 1) - 1148 sizeof(struct ntb_payload_header)); 1149 memset(offset, 0, sizeof(struct ntb_payload_header)); 1150 } 1151 1152 qp->rx_pkts = 0; 1153 qp->tx_pkts = 0; 1154 } 1155 1156 static void 1157 ntb_qp_link_work(void *arg) 1158 { 1159 struct ntb_transport_qp *qp = arg; 1160 struct ntb_softc *ntb = qp->ntb; 1161 struct ntb_netdev *nt = qp->transport; 1162 int rc, val; 1163 1164 1165 rc = ntb_read_remote_spad(ntb, IF_NTB_QP_LINKS, &val); 1166 if (rc != 0) 1167 return; 1168 1169 rc = ntb_write_remote_spad(ntb, IF_NTB_QP_LINKS, val | 1 << qp->qp_num); 1170 1171 /* query remote spad for qp ready bits */ 1172 rc = ntb_read_local_spad(ntb, IF_NTB_QP_LINKS, &val); 1173 1174 /* See if the remote side is up */ 1175 if ((1 << qp->qp_num & val) != 0) { 1176 qp->qp_link = NTB_LINK_UP; 1177 if (qp->event_handler != NULL) 1178 qp->event_handler(qp->cb_data, NTB_LINK_UP); 1179 } else if (nt->transport_link == NTB_LINK_UP) { 1180 callout_reset(&qp->link_work, 1181 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1182 } 1183 } 1184 1185 /* Link down event*/ 1186 static void 1187 ntb_transport_link_cleanup(struct ntb_netdev *nt) 1188 { 1189 int i; 1190 1191 if (nt->transport_link == NTB_LINK_DOWN) 1192 callout_drain(&nt->link_work); 1193 else 1194 nt->transport_link = NTB_LINK_DOWN; 1195 1196 /* Pass along the info to any clients */ 1197 for (i = 0; i < nt->max_qps; i++) 1198 if (!test_bit(i, &nt->qp_bitmap)) 1199 ntb_qp_link_down(&nt->qps[i]); 1200 1201 /* 1202 * The scratchpad registers keep the values if the remote side 1203 * goes down, blast them now to give them a sane value the next 1204 * time they are accessed 1205 */ 1206 for (i = 0; i < IF_NTB_MAX_SPAD; i++) 1207 ntb_write_local_spad(nt->ntb, i, 0); 1208 } 1209 1210 1211 static void 1212 ntb_qp_link_down(struct ntb_transport_qp *qp) 1213 { 1214 1215 ntb_qp_link_cleanup(qp); 1216 } 1217 1218 static void 1219 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 1220 { 1221 struct ntb_netdev *nt = qp->transport; 1222 1223 if (qp->qp_link == NTB_LINK_DOWN) { 1224 callout_drain(&qp->link_work); 1225 return; 1226 } 1227 1228 if (qp->event_handler != NULL) 1229 qp->event_handler(qp->cb_data, NTB_LINK_DOWN); 1230 1231 qp->qp_link = NTB_LINK_DOWN; 1232 1233 if (nt->transport_link == NTB_LINK_UP) 1234 callout_reset(&qp->link_work, 1235 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1236 } 1237 1238 /* Link commanded down */ 1239 /** 1240 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1241 * @qp: NTB transport layer queue to be disabled 1242 * 1243 * Notify NTB transport layer of client's desire to no longer receive data on 1244 * transport queue specified. It is the client's responsibility to ensure all 1245 * entries on queue are purged or otherwise handled appropraitely. 1246 */ 1247 static void 1248 ntb_transport_link_down(struct ntb_transport_qp *qp) 1249 { 1250 int rc, val; 1251 1252 if (qp == NULL) 1253 return; 1254 1255 qp->client_ready = NTB_LINK_DOWN; 1256 1257 rc = ntb_read_remote_spad(qp->ntb, IF_NTB_QP_LINKS, &val); 1258 if (rc != 0) 1259 return; 1260 1261 rc = ntb_write_remote_spad(qp->ntb, IF_NTB_QP_LINKS, 1262 val & ~(1 << qp->qp_num)); 1263 1264 if (qp->qp_link == NTB_LINK_UP) 1265 ntb_send_link_down(qp); 1266 else 1267 callout_drain(&qp->link_work); 1268 1269 } 1270 1271 static void 1272 ntb_send_link_down(struct ntb_transport_qp *qp) 1273 { 1274 struct ntb_queue_entry *entry; 1275 int i, rc; 1276 1277 if (qp->qp_link == NTB_LINK_DOWN) 1278 return; 1279 1280 qp->qp_link = NTB_LINK_DOWN; 1281 1282 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1283 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1284 if (entry != NULL) 1285 break; 1286 pause("NTB Wait for link down", hz / 10); 1287 } 1288 1289 if (entry == NULL) 1290 return; 1291 1292 entry->cb_data = NULL; 1293 entry->buf = NULL; 1294 entry->len = 0; 1295 entry->flags = IF_NTB_LINK_DOWN_FLAG; 1296 1297 mtx_lock(&qp->transport->tx_lock); 1298 rc = ntb_process_tx(qp, entry); 1299 if (rc != 0) 1300 printf("ntb: Failed to send link down\n"); 1301 mtx_unlock(&qp->transport->tx_lock); 1302 } 1303 1304 1305 /* List Management */ 1306 1307 static void 1308 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 1309 struct ntb_queue_list *list) 1310 { 1311 1312 mtx_lock_spin(lock); 1313 STAILQ_INSERT_TAIL(list, entry, entry); 1314 mtx_unlock_spin(lock); 1315 } 1316 1317 static struct ntb_queue_entry * 1318 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) 1319 { 1320 struct ntb_queue_entry *entry; 1321 1322 mtx_lock_spin(lock); 1323 if (STAILQ_EMPTY(list)) { 1324 entry = NULL; 1325 goto out; 1326 } 1327 entry = STAILQ_FIRST(list); 1328 STAILQ_REMOVE_HEAD(list, entry); 1329 out: 1330 mtx_unlock_spin(lock); 1331 1332 return (entry); 1333 } 1334 1335 /* Helper functions */ 1336 /* TODO: This too should really be part of the kernel */ 1337 #define EUI48_MULTICAST 1 << 0 1338 #define EUI48_LOCALLY_ADMINISTERED 1 << 1 1339 static void 1340 create_random_local_eui48(u_char *eaddr) 1341 { 1342 static uint8_t counter = 0; 1343 uint32_t seed = ticks; 1344 1345 eaddr[0] = EUI48_LOCALLY_ADMINISTERED; 1346 memcpy(&eaddr[1], &seed, sizeof(uint32_t)); 1347 eaddr[5] = counter++; 1348 } 1349 1350 /** 1351 * ntb_transport_max_size - Query the max payload size of a qp 1352 * @qp: NTB transport layer queue to be queried 1353 * 1354 * Query the maximum payload size permissible on the given qp 1355 * 1356 * RETURNS: the max payload size of a qp 1357 */ 1358 static unsigned int 1359 ntb_transport_max_size(struct ntb_transport_qp *qp) 1360 { 1361 1362 if (qp == NULL) 1363 return (0); 1364 1365 return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); 1366 } 1367