1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010-2016 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was developed in part by Philip Paeps under contract for 8 * Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing official 33 * policies, either expressed or implied, of the FreeBSD Project. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_rss.h" 40 41 #include <sys/param.h> 42 #include <sys/bus.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/queue.h> 46 #include <sys/rman.h> 47 #include <sys/syslog.h> 48 #include <sys/taskqueue.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 53 #include <dev/pci/pcireg.h> 54 #include <dev/pci/pcivar.h> 55 56 #ifdef RSS 57 #include <net/rss_config.h> 58 #endif 59 60 #include "common/efx.h" 61 62 #include "sfxge.h" 63 64 static int 65 sfxge_intr_line_filter(void *arg) 66 { 67 struct sfxge_evq *evq; 68 struct sfxge_softc *sc; 69 efx_nic_t *enp; 70 struct sfxge_intr *intr; 71 boolean_t fatal; 72 uint32_t qmask; 73 74 evq = (struct sfxge_evq *)arg; 75 sc = evq->sc; 76 enp = sc->enp; 77 intr = &sc->intr; 78 79 KASSERT(intr != NULL, ("intr == NULL")); 80 KASSERT(intr->type == EFX_INTR_LINE, 81 ("intr->type != EFX_INTR_LINE")); 82 83 if (intr->state != SFXGE_INTR_STARTED) 84 return (FILTER_STRAY); 85 86 (void)efx_intr_status_line(enp, &fatal, &qmask); 87 88 if (fatal) { 89 (void) efx_intr_disable(enp); 90 (void) efx_intr_fatal(enp); 91 return (FILTER_HANDLED); 92 } 93 94 if (qmask != 0) { 95 intr->zero_count = 0; 96 return (FILTER_SCHEDULE_THREAD); 97 } 98 99 /* SF bug 15783: If the function is not asserting its IRQ and 100 * we read the queue mask on the cycle before a flag is added 101 * to the mask, this inhibits the function from asserting the 102 * IRQ even though we don't see the flag set. To work around 103 * this, we must re-prime all event queues and report the IRQ 104 * as handled when we see a mask of zero. To allow for shared 105 * IRQs, we don't repeat this if we see a mask of zero twice 106 * or more in a row. 107 */ 108 if (intr->zero_count++ == 0) { 109 if (evq->init_state == SFXGE_EVQ_STARTED) { 110 if (efx_ev_qpending(evq->common, evq->read_ptr)) 111 return (FILTER_SCHEDULE_THREAD); 112 efx_ev_qprime(evq->common, evq->read_ptr); 113 return (FILTER_HANDLED); 114 } 115 } 116 117 return (FILTER_STRAY); 118 } 119 120 static void 121 sfxge_intr_line(void *arg) 122 { 123 struct sfxge_evq *evq = arg; 124 125 (void)sfxge_ev_qpoll(evq); 126 } 127 128 static void 129 sfxge_intr_message(void *arg) 130 { 131 struct sfxge_evq *evq; 132 struct sfxge_softc *sc; 133 efx_nic_t *enp; 134 struct sfxge_intr *intr; 135 unsigned int index; 136 boolean_t fatal; 137 138 evq = (struct sfxge_evq *)arg; 139 sc = evq->sc; 140 enp = sc->enp; 141 intr = &sc->intr; 142 index = evq->index; 143 144 KASSERT(intr != NULL, ("intr == NULL")); 145 KASSERT(intr->type == EFX_INTR_MESSAGE, 146 ("intr->type != EFX_INTR_MESSAGE")); 147 148 if (__predict_false(intr->state != SFXGE_INTR_STARTED)) 149 return; 150 151 (void)efx_intr_status_message(enp, index, &fatal); 152 153 if (fatal) { 154 (void)efx_intr_disable(enp); 155 (void)efx_intr_fatal(enp); 156 return; 157 } 158 159 (void)sfxge_ev_qpoll(evq); 160 } 161 162 static int 163 sfxge_intr_bus_enable(struct sfxge_softc *sc) 164 { 165 struct sfxge_intr *intr; 166 struct sfxge_intr_hdl *table; 167 driver_filter_t *filter; 168 driver_intr_t *handler; 169 int index; 170 int err; 171 172 intr = &sc->intr; 173 table = intr->table; 174 175 switch (intr->type) { 176 case EFX_INTR_MESSAGE: 177 filter = NULL; /* not shared */ 178 handler = sfxge_intr_message; 179 break; 180 181 case EFX_INTR_LINE: 182 filter = sfxge_intr_line_filter; 183 handler = sfxge_intr_line; 184 break; 185 186 default: 187 KASSERT(0, ("Invalid interrupt type")); 188 return (EINVAL); 189 } 190 191 /* Try to add the handlers */ 192 for (index = 0; index < intr->n_alloc; index++) { 193 if ((err = bus_setup_intr(sc->dev, table[index].eih_res, 194 INTR_MPSAFE|INTR_TYPE_NET, filter, handler, 195 sc->evq[index], &table[index].eih_tag)) != 0) { 196 goto fail; 197 } 198 if (intr->n_alloc > 1) 199 bus_describe_intr(sc->dev, table[index].eih_res, 200 table[index].eih_tag, "%d", index); 201 #ifdef RSS 202 bus_bind_intr(sc->dev, table[index].eih_res, 203 rss_getcpu(index)); 204 #else 205 bus_bind_intr(sc->dev, table[index].eih_res, index); 206 #endif 207 } 208 209 return (0); 210 211 fail: 212 /* Remove remaining handlers */ 213 while (--index >= 0) 214 bus_teardown_intr(sc->dev, table[index].eih_res, 215 table[index].eih_tag); 216 217 return (err); 218 } 219 220 static void 221 sfxge_intr_bus_disable(struct sfxge_softc *sc) 222 { 223 struct sfxge_intr *intr; 224 struct sfxge_intr_hdl *table; 225 int i; 226 227 intr = &sc->intr; 228 table = intr->table; 229 230 /* Remove all handlers */ 231 for (i = 0; i < intr->n_alloc; i++) 232 bus_teardown_intr(sc->dev, table[i].eih_res, 233 table[i].eih_tag); 234 } 235 236 static int 237 sfxge_intr_alloc(struct sfxge_softc *sc, int count) 238 { 239 device_t dev; 240 struct sfxge_intr_hdl *table; 241 struct sfxge_intr *intr; 242 struct resource *res; 243 int rid; 244 int error; 245 int i; 246 247 dev = sc->dev; 248 intr = &sc->intr; 249 error = 0; 250 251 table = malloc(count * sizeof(struct sfxge_intr_hdl), 252 M_SFXGE, M_WAITOK); 253 intr->table = table; 254 255 for (i = 0; i < count; i++) { 256 rid = i + 1; 257 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 258 RF_SHAREABLE | RF_ACTIVE); 259 if (res == NULL) { 260 device_printf(dev, "Couldn't allocate interrupts for " 261 "message %d\n", rid); 262 error = ENOMEM; 263 break; 264 } 265 table[i].eih_rid = rid; 266 table[i].eih_res = res; 267 } 268 269 if (error != 0) { 270 count = i - 1; 271 for (i = 0; i < count; i++) 272 bus_release_resource(dev, SYS_RES_IRQ, 273 table[i].eih_rid, table[i].eih_res); 274 } 275 276 return (error); 277 } 278 279 static void 280 sfxge_intr_teardown_msix(struct sfxge_softc *sc) 281 { 282 device_t dev; 283 struct resource *resp; 284 int rid; 285 286 dev = sc->dev; 287 resp = sc->intr.msix_res; 288 289 rid = rman_get_rid(resp); 290 bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); 291 } 292 293 static int 294 sfxge_intr_setup_msix(struct sfxge_softc *sc) 295 { 296 struct sfxge_intr *intr; 297 struct resource *resp; 298 device_t dev; 299 int count; 300 int rid; 301 302 dev = sc->dev; 303 intr = &sc->intr; 304 305 /* Check if MSI-X is available. */ 306 count = pci_msix_count(dev); 307 if (count == 0) 308 return (EINVAL); 309 310 /* Do not try to allocate more than already estimated EVQ maximum */ 311 KASSERT(sc->evq_max > 0, ("evq_max is zero")); 312 count = MIN(count, sc->evq_max); 313 314 rid = PCIR_BAR(4); 315 resp = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 316 if (resp == NULL) 317 return (ENOMEM); 318 319 if (pci_alloc_msix(dev, &count) != 0) { 320 bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); 321 return (ENOMEM); 322 } 323 324 /* Allocate interrupt handlers. */ 325 if (sfxge_intr_alloc(sc, count) != 0) { 326 bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); 327 pci_release_msi(dev); 328 return (ENOMEM); 329 } 330 331 intr->type = EFX_INTR_MESSAGE; 332 intr->n_alloc = count; 333 intr->msix_res = resp; 334 335 return (0); 336 } 337 338 static int 339 sfxge_intr_setup_msi(struct sfxge_softc *sc) 340 { 341 struct sfxge_intr *intr; 342 device_t dev; 343 int count; 344 int error; 345 346 dev = sc->dev; 347 intr = &sc->intr; 348 349 /* 350 * Check if MSI is available. All messages must be written to 351 * the same address and on x86 this means the IRQs have the 352 * same CPU affinity. So we only ever allocate 1. 353 */ 354 count = pci_msi_count(dev) ? 1 : 0; 355 if (count == 0) 356 return (EINVAL); 357 358 if ((error = pci_alloc_msi(dev, &count)) != 0) 359 return (ENOMEM); 360 361 /* Allocate interrupt handler. */ 362 if (sfxge_intr_alloc(sc, count) != 0) { 363 pci_release_msi(dev); 364 return (ENOMEM); 365 } 366 367 intr->type = EFX_INTR_MESSAGE; 368 intr->n_alloc = count; 369 370 return (0); 371 } 372 373 static int 374 sfxge_intr_setup_fixed(struct sfxge_softc *sc) 375 { 376 struct sfxge_intr_hdl *table; 377 struct sfxge_intr *intr; 378 struct resource *res; 379 device_t dev; 380 int rid; 381 382 dev = sc->dev; 383 intr = &sc->intr; 384 385 rid = 0; 386 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 387 RF_SHAREABLE | RF_ACTIVE); 388 if (res == NULL) 389 return (ENOMEM); 390 391 table = malloc(sizeof(struct sfxge_intr_hdl), M_SFXGE, M_WAITOK); 392 table[0].eih_rid = rid; 393 table[0].eih_res = res; 394 395 intr->type = EFX_INTR_LINE; 396 intr->n_alloc = 1; 397 intr->table = table; 398 399 return (0); 400 } 401 402 static const char *const __sfxge_err[] = { 403 "", 404 "SRAM out-of-bounds", 405 "Buffer ID out-of-bounds", 406 "Internal memory parity", 407 "Receive buffer ownership", 408 "Transmit buffer ownership", 409 "Receive descriptor ownership", 410 "Transmit descriptor ownership", 411 "Event queue ownership", 412 "Event queue FIFO overflow", 413 "Illegal address", 414 "SRAM parity" 415 }; 416 417 void 418 sfxge_err(efsys_identifier_t *arg, unsigned int code, uint32_t dword0, 419 uint32_t dword1) 420 { 421 struct sfxge_softc *sc = (struct sfxge_softc *)arg; 422 device_t dev = sc->dev; 423 424 log(LOG_WARNING, "[%s%d] FATAL ERROR: %s (0x%08x%08x)", 425 device_get_name(dev), device_get_unit(dev), 426 __sfxge_err[code], dword1, dword0); 427 } 428 429 void 430 sfxge_intr_stop(struct sfxge_softc *sc) 431 { 432 struct sfxge_intr *intr; 433 434 intr = &sc->intr; 435 436 KASSERT(intr->state == SFXGE_INTR_STARTED, 437 ("Interrupts not started")); 438 439 intr->state = SFXGE_INTR_INITIALIZED; 440 441 /* Disable interrupts at the NIC */ 442 efx_intr_disable(sc->enp); 443 444 /* Disable interrupts at the bus */ 445 sfxge_intr_bus_disable(sc); 446 447 /* Tear down common code interrupt bits. */ 448 efx_intr_fini(sc->enp); 449 } 450 451 int 452 sfxge_intr_start(struct sfxge_softc *sc) 453 { 454 struct sfxge_intr *intr; 455 efsys_mem_t *esmp; 456 int rc; 457 458 intr = &sc->intr; 459 esmp = &intr->status; 460 461 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 462 ("Interrupts not initialized")); 463 464 /* Zero the memory. */ 465 (void)memset(esmp->esm_base, 0, EFX_INTR_SIZE); 466 467 /* Initialize common code interrupt bits. */ 468 (void)efx_intr_init(sc->enp, intr->type, esmp); 469 470 /* Enable interrupts at the bus */ 471 if ((rc = sfxge_intr_bus_enable(sc)) != 0) 472 goto fail; 473 474 intr->state = SFXGE_INTR_STARTED; 475 476 /* Enable interrupts at the NIC */ 477 efx_intr_enable(sc->enp); 478 479 return (0); 480 481 fail: 482 /* Tear down common code interrupt bits. */ 483 efx_intr_fini(sc->enp); 484 485 intr->state = SFXGE_INTR_INITIALIZED; 486 487 return (rc); 488 } 489 490 void 491 sfxge_intr_fini(struct sfxge_softc *sc) 492 { 493 struct sfxge_intr_hdl *table; 494 struct sfxge_intr *intr; 495 efsys_mem_t *esmp; 496 device_t dev; 497 int i; 498 499 dev = sc->dev; 500 intr = &sc->intr; 501 esmp = &intr->status; 502 table = intr->table; 503 504 KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 505 ("intr->state != SFXGE_INTR_INITIALIZED")); 506 507 /* Free DMA memory. */ 508 sfxge_dma_free(esmp); 509 510 /* Free interrupt handles. */ 511 for (i = 0; i < intr->n_alloc; i++) 512 bus_release_resource(dev, SYS_RES_IRQ, 513 table[i].eih_rid, table[i].eih_res); 514 515 if (table[0].eih_rid != 0) 516 pci_release_msi(dev); 517 518 if (intr->msix_res != NULL) 519 sfxge_intr_teardown_msix(sc); 520 521 /* Free the handle table */ 522 free(table, M_SFXGE); 523 intr->table = NULL; 524 intr->n_alloc = 0; 525 526 /* Clear the interrupt type */ 527 intr->type = EFX_INTR_INVALID; 528 529 intr->state = SFXGE_INTR_UNINITIALIZED; 530 } 531 532 int 533 sfxge_intr_init(struct sfxge_softc *sc) 534 { 535 device_t dev; 536 struct sfxge_intr *intr; 537 efsys_mem_t *esmp; 538 int rc; 539 540 dev = sc->dev; 541 intr = &sc->intr; 542 esmp = &intr->status; 543 544 KASSERT(intr->state == SFXGE_INTR_UNINITIALIZED, 545 ("Interrupts already initialized")); 546 547 /* Try to setup MSI-X or MSI interrupts if available. */ 548 if ((rc = sfxge_intr_setup_msix(sc)) == 0) 549 device_printf(dev, "Using MSI-X interrupts\n"); 550 else if ((rc = sfxge_intr_setup_msi(sc)) == 0) 551 device_printf(dev, "Using MSI interrupts\n"); 552 else if ((rc = sfxge_intr_setup_fixed(sc)) == 0) { 553 device_printf(dev, "Using fixed interrupts\n"); 554 } else { 555 device_printf(dev, "Couldn't setup interrupts\n"); 556 return (ENOMEM); 557 } 558 559 /* Set up DMA for interrupts. */ 560 if ((rc = sfxge_dma_alloc(sc, EFX_INTR_SIZE, esmp)) != 0) 561 return (ENOMEM); 562 563 intr->state = SFXGE_INTR_INITIALIZED; 564 565 return (0); 566 } 567