1 /*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/fail.h> 37 #include <sys/ioccom.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/rman.h> 44 #include <sys/sbuf.h> 45 #include <sys/sysctl.h> 46 #include <sys/taskqueue.h> 47 #include <sys/time.h> 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcivar.h> 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/stdarg.h> 53 54 #ifdef DDB 55 #include <ddb/ddb.h> 56 #endif 57 58 #include "ioat.h" 59 #include "ioat_hw.h" 60 #include "ioat_internal.h" 61 62 #ifndef BUS_SPACE_MAXADDR_40BIT 63 #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL 64 #endif 65 #define IOAT_REFLK (&ioat->submit_lock) 66 67 static int ioat_probe(device_t device); 68 static int ioat_attach(device_t device); 69 static int ioat_detach(device_t device); 70 static int ioat_setup_intr(struct ioat_softc *ioat); 71 static int ioat_teardown_intr(struct ioat_softc *ioat); 72 static int ioat3_attach(device_t device); 73 static int ioat_start_channel(struct ioat_softc *ioat); 74 static int ioat_map_pci_bar(struct ioat_softc *ioat); 75 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 76 int error); 77 static void ioat_interrupt_handler(void *arg); 78 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); 79 static int chanerr_to_errno(uint32_t); 80 static void ioat_process_events(struct ioat_softc *ioat); 81 static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 82 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 83 static void ioat_free_ring(struct ioat_softc *, uint32_t size, 84 struct ioat_descriptor *); 85 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 86 static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *, 87 uint32_t index); 88 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *, 89 uint32_t index); 90 static void ioat_halted_debug(struct ioat_softc *, uint32_t); 91 static void ioat_poll_timer_callback(void *arg); 92 static void dump_descriptor(void *hw_desc); 93 static void ioat_submit_single(struct ioat_softc *ioat); 94 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 95 int error); 96 static int ioat_reset_hw(struct ioat_softc *ioat); 97 static void ioat_reset_hw_task(void *, int); 98 static void ioat_setup_sysctl(device_t device); 99 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 100 static inline struct ioat_softc *ioat_get(struct ioat_softc *, 101 enum ioat_ref_kind); 102 static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); 103 static inline void _ioat_putn(struct ioat_softc *, uint32_t, 104 enum ioat_ref_kind, boolean_t); 105 static inline void ioat_putn(struct ioat_softc *, uint32_t, 106 enum ioat_ref_kind); 107 static inline void ioat_putn_locked(struct ioat_softc *, uint32_t, 108 enum ioat_ref_kind); 109 static void ioat_drain_locked(struct ioat_softc *); 110 111 #define ioat_log_message(v, ...) do { \ 112 if ((v) <= g_ioat_debug_level) { \ 113 device_printf(ioat->device, __VA_ARGS__); \ 114 } \ 115 } while (0) 116 117 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); 118 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); 119 120 static int g_force_legacy_interrupts; 121 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, 122 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); 123 124 int g_ioat_debug_level = 0; 125 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 126 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); 127 128 unsigned g_ioat_ring_order = 13; 129 SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order, 130 0, "Set IOAT ring order. (1 << this) == ring size."); 131 132 /* 133 * OS <-> Driver interface structures 134 */ 135 static device_method_t ioat_pci_methods[] = { 136 /* Device interface */ 137 DEVMETHOD(device_probe, ioat_probe), 138 DEVMETHOD(device_attach, ioat_attach), 139 DEVMETHOD(device_detach, ioat_detach), 140 DEVMETHOD_END 141 }; 142 143 static driver_t ioat_pci_driver = { 144 "ioat", 145 ioat_pci_methods, 146 sizeof(struct ioat_softc), 147 }; 148 149 static devclass_t ioat_devclass; 150 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); 151 MODULE_VERSION(ioat, 1); 152 153 /* 154 * Private data structures 155 */ 156 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; 157 static unsigned ioat_channel_index = 0; 158 SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, 159 "Number of IOAT channels attached"); 160 161 static struct _pcsid 162 { 163 u_int32_t type; 164 const char *desc; 165 } pci_ids[] = { 166 { 0x34308086, "TBG IOAT Ch0" }, 167 { 0x34318086, "TBG IOAT Ch1" }, 168 { 0x34328086, "TBG IOAT Ch2" }, 169 { 0x34338086, "TBG IOAT Ch3" }, 170 { 0x34298086, "TBG IOAT Ch4" }, 171 { 0x342a8086, "TBG IOAT Ch5" }, 172 { 0x342b8086, "TBG IOAT Ch6" }, 173 { 0x342c8086, "TBG IOAT Ch7" }, 174 175 { 0x37108086, "JSF IOAT Ch0" }, 176 { 0x37118086, "JSF IOAT Ch1" }, 177 { 0x37128086, "JSF IOAT Ch2" }, 178 { 0x37138086, "JSF IOAT Ch3" }, 179 { 0x37148086, "JSF IOAT Ch4" }, 180 { 0x37158086, "JSF IOAT Ch5" }, 181 { 0x37168086, "JSF IOAT Ch6" }, 182 { 0x37178086, "JSF IOAT Ch7" }, 183 { 0x37188086, "JSF IOAT Ch0 (RAID)" }, 184 { 0x37198086, "JSF IOAT Ch1 (RAID)" }, 185 186 { 0x3c208086, "SNB IOAT Ch0" }, 187 { 0x3c218086, "SNB IOAT Ch1" }, 188 { 0x3c228086, "SNB IOAT Ch2" }, 189 { 0x3c238086, "SNB IOAT Ch3" }, 190 { 0x3c248086, "SNB IOAT Ch4" }, 191 { 0x3c258086, "SNB IOAT Ch5" }, 192 { 0x3c268086, "SNB IOAT Ch6" }, 193 { 0x3c278086, "SNB IOAT Ch7" }, 194 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, 195 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, 196 197 { 0x0e208086, "IVB IOAT Ch0" }, 198 { 0x0e218086, "IVB IOAT Ch1" }, 199 { 0x0e228086, "IVB IOAT Ch2" }, 200 { 0x0e238086, "IVB IOAT Ch3" }, 201 { 0x0e248086, "IVB IOAT Ch4" }, 202 { 0x0e258086, "IVB IOAT Ch5" }, 203 { 0x0e268086, "IVB IOAT Ch6" }, 204 { 0x0e278086, "IVB IOAT Ch7" }, 205 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, 206 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, 207 208 { 0x2f208086, "HSW IOAT Ch0" }, 209 { 0x2f218086, "HSW IOAT Ch1" }, 210 { 0x2f228086, "HSW IOAT Ch2" }, 211 { 0x2f238086, "HSW IOAT Ch3" }, 212 { 0x2f248086, "HSW IOAT Ch4" }, 213 { 0x2f258086, "HSW IOAT Ch5" }, 214 { 0x2f268086, "HSW IOAT Ch6" }, 215 { 0x2f278086, "HSW IOAT Ch7" }, 216 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, 217 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, 218 219 { 0x0c508086, "BWD IOAT Ch0" }, 220 { 0x0c518086, "BWD IOAT Ch1" }, 221 { 0x0c528086, "BWD IOAT Ch2" }, 222 { 0x0c538086, "BWD IOAT Ch3" }, 223 224 { 0x6f508086, "BDXDE IOAT Ch0" }, 225 { 0x6f518086, "BDXDE IOAT Ch1" }, 226 { 0x6f528086, "BDXDE IOAT Ch2" }, 227 { 0x6f538086, "BDXDE IOAT Ch3" }, 228 229 { 0x6f208086, "BDX IOAT Ch0" }, 230 { 0x6f218086, "BDX IOAT Ch1" }, 231 { 0x6f228086, "BDX IOAT Ch2" }, 232 { 0x6f238086, "BDX IOAT Ch3" }, 233 { 0x6f248086, "BDX IOAT Ch4" }, 234 { 0x6f258086, "BDX IOAT Ch5" }, 235 { 0x6f268086, "BDX IOAT Ch6" }, 236 { 0x6f278086, "BDX IOAT Ch7" }, 237 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, 238 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, 239 240 { 0x00000000, NULL } 241 }; 242 243 /* 244 * OS <-> Driver linkage functions 245 */ 246 static int 247 ioat_probe(device_t device) 248 { 249 struct _pcsid *ep; 250 u_int32_t type; 251 252 type = pci_get_devid(device); 253 for (ep = pci_ids; ep->type; ep++) { 254 if (ep->type == type) { 255 device_set_desc(device, ep->desc); 256 return (0); 257 } 258 } 259 return (ENXIO); 260 } 261 262 static int 263 ioat_attach(device_t device) 264 { 265 struct ioat_softc *ioat; 266 int error; 267 268 ioat = DEVICE2SOFTC(device); 269 ioat->device = device; 270 271 error = ioat_map_pci_bar(ioat); 272 if (error != 0) 273 goto err; 274 275 ioat->version = ioat_read_cbver(ioat); 276 if (ioat->version < IOAT_VER_3_0) { 277 error = ENODEV; 278 goto err; 279 } 280 281 error = ioat3_attach(device); 282 if (error != 0) 283 goto err; 284 285 error = pci_enable_busmaster(device); 286 if (error != 0) 287 goto err; 288 289 error = ioat_setup_intr(ioat); 290 if (error != 0) 291 goto err; 292 293 error = ioat_reset_hw(ioat); 294 if (error != 0) 295 goto err; 296 297 ioat_process_events(ioat); 298 ioat_setup_sysctl(device); 299 300 ioat->chan_idx = ioat_channel_index; 301 ioat_channel[ioat_channel_index++] = ioat; 302 ioat_test_attach(); 303 304 err: 305 if (error != 0) 306 ioat_detach(device); 307 return (error); 308 } 309 310 static int 311 ioat_detach(device_t device) 312 { 313 struct ioat_softc *ioat; 314 315 ioat = DEVICE2SOFTC(device); 316 317 ioat_test_detach(); 318 taskqueue_drain(taskqueue_thread, &ioat->reset_task); 319 320 mtx_lock(IOAT_REFLK); 321 ioat->quiescing = TRUE; 322 ioat->destroying = TRUE; 323 wakeup(&ioat->quiescing); 324 wakeup(&ioat->resetting); 325 326 ioat_channel[ioat->chan_idx] = NULL; 327 328 ioat_drain_locked(ioat); 329 mtx_unlock(IOAT_REFLK); 330 331 ioat_teardown_intr(ioat); 332 callout_drain(&ioat->poll_timer); 333 334 pci_disable_busmaster(device); 335 336 if (ioat->pci_resource != NULL) 337 bus_release_resource(device, SYS_RES_MEMORY, 338 ioat->pci_resource_id, ioat->pci_resource); 339 340 if (ioat->ring != NULL) 341 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); 342 343 if (ioat->comp_update != NULL) { 344 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); 345 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, 346 ioat->comp_update_map); 347 bus_dma_tag_destroy(ioat->comp_update_tag); 348 } 349 350 if (ioat->hw_desc_ring != NULL) { 351 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map); 352 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring, 353 ioat->hw_desc_map); 354 bus_dma_tag_destroy(ioat->hw_desc_tag); 355 } 356 357 return (0); 358 } 359 360 static int 361 ioat_teardown_intr(struct ioat_softc *ioat) 362 { 363 364 if (ioat->tag != NULL) 365 bus_teardown_intr(ioat->device, ioat->res, ioat->tag); 366 367 if (ioat->res != NULL) 368 bus_release_resource(ioat->device, SYS_RES_IRQ, 369 rman_get_rid(ioat->res), ioat->res); 370 371 pci_release_msi(ioat->device); 372 return (0); 373 } 374 375 static int 376 ioat_start_channel(struct ioat_softc *ioat) 377 { 378 struct ioat_dma_hw_descriptor *hw_desc; 379 struct ioat_descriptor *desc; 380 struct bus_dmadesc *dmadesc; 381 uint64_t status; 382 uint32_t chanerr; 383 int i; 384 385 ioat_acquire(&ioat->dmaengine); 386 387 /* Submit 'NULL' operation manually to avoid quiescing flag */ 388 desc = ioat_get_ring_entry(ioat, ioat->head); 389 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma; 390 dmadesc = &desc->bus_dmadesc; 391 392 dmadesc->callback_fn = NULL; 393 dmadesc->callback_arg = NULL; 394 395 hw_desc->u.control_raw = 0; 396 hw_desc->u.control_generic.op = IOAT_OP_COPY; 397 hw_desc->u.control_generic.completion_update = 1; 398 hw_desc->size = 8; 399 hw_desc->src_addr = 0; 400 hw_desc->dest_addr = 0; 401 hw_desc->u.control.null = 1; 402 403 ioat_submit_single(ioat); 404 ioat_release(&ioat->dmaengine); 405 406 for (i = 0; i < 100; i++) { 407 DELAY(1); 408 status = ioat_get_chansts(ioat); 409 if (is_ioat_idle(status)) 410 return (0); 411 } 412 413 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 414 ioat_log_message(0, "could not start channel: " 415 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, 416 IOAT_CHANERR_STR); 417 return (ENXIO); 418 } 419 420 /* 421 * Initialize Hardware 422 */ 423 static int 424 ioat3_attach(device_t device) 425 { 426 struct ioat_softc *ioat; 427 struct ioat_descriptor *ring; 428 struct ioat_dma_hw_descriptor *dma_hw_desc; 429 void *hw_desc; 430 size_t ringsz; 431 int i, num_descriptors; 432 int error; 433 uint8_t xfercap; 434 435 error = 0; 436 ioat = DEVICE2SOFTC(device); 437 ioat->capabilities = ioat_read_dmacapability(ioat); 438 439 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities, 440 IOAT_DMACAP_STR); 441 442 xfercap = ioat_read_xfercap(ioat); 443 ioat->max_xfer_size = 1 << xfercap; 444 445 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & 446 IOAT_INTRDELAY_SUPPORTED) != 0; 447 if (ioat->intrdelay_supported) 448 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; 449 450 /* TODO: need to check DCA here if we ever do XOR/PQ */ 451 452 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); 453 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); 454 callout_init(&ioat->poll_timer, 1); 455 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); 456 457 /* Establish lock order for Witness */ 458 mtx_lock(&ioat->submit_lock); 459 mtx_lock(&ioat->cleanup_lock); 460 mtx_unlock(&ioat->cleanup_lock); 461 mtx_unlock(&ioat->submit_lock); 462 463 ioat->is_submitter_processing = FALSE; 464 ioat->is_completion_pending = FALSE; 465 ioat->is_reset_pending = FALSE; 466 ioat->is_channel_running = FALSE; 467 468 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 469 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 470 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, 471 &ioat->comp_update_tag); 472 473 error = bus_dmamem_alloc(ioat->comp_update_tag, 474 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); 475 if (ioat->comp_update == NULL) 476 return (ENOMEM); 477 478 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, 479 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 480 0); 481 if (error != 0) 482 return (error); 483 484 ioat->ring_size_order = g_ioat_ring_order; 485 num_descriptors = 1 << ioat->ring_size_order; 486 ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors; 487 488 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device), 489 2 * 1024 * 1024, 0x0, BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, 490 NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL, &ioat->hw_desc_tag); 491 if (error != 0) 492 return (error); 493 494 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc, 495 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map); 496 if (error != 0) 497 return (error); 498 499 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 500 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_WAITOK); 501 if (error) 502 return (error); 503 504 ioat->hw_desc_ring = hw_desc; 505 506 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, 507 M_ZERO | M_WAITOK); 508 509 ring = ioat->ring; 510 for (i = 0; i < num_descriptors; i++) { 511 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc)); 512 ring[i].id = i; 513 } 514 515 for (i = 0; i < num_descriptors; i++) { 516 dma_hw_desc = &ioat->hw_desc_ring[i].dma; 517 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1); 518 } 519 520 ioat->head = ioat->hw_head = 0; 521 ioat->tail = 0; 522 ioat->last_seen = 0; 523 *ioat->comp_update = 0; 524 return (0); 525 } 526 527 static int 528 ioat_map_pci_bar(struct ioat_softc *ioat) 529 { 530 531 ioat->pci_resource_id = PCIR_BAR(0); 532 ioat->pci_resource = bus_alloc_resource_any(ioat->device, 533 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); 534 535 if (ioat->pci_resource == NULL) { 536 ioat_log_message(0, "unable to allocate pci resource\n"); 537 return (ENODEV); 538 } 539 540 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); 541 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); 542 return (0); 543 } 544 545 static void 546 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 547 { 548 struct ioat_softc *ioat = arg; 549 550 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 551 ioat->comp_update_bus_addr = seg[0].ds_addr; 552 } 553 554 static void 555 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 556 { 557 bus_addr_t *baddr; 558 559 KASSERT(error == 0, ("%s: error:%d", __func__, error)); 560 baddr = arg; 561 *baddr = segs->ds_addr; 562 } 563 564 /* 565 * Interrupt setup and handlers 566 */ 567 static int 568 ioat_setup_intr(struct ioat_softc *ioat) 569 { 570 uint32_t num_vectors; 571 int error; 572 boolean_t use_msix; 573 boolean_t force_legacy_interrupts; 574 575 use_msix = FALSE; 576 force_legacy_interrupts = FALSE; 577 578 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { 579 num_vectors = 1; 580 pci_alloc_msix(ioat->device, &num_vectors); 581 if (num_vectors == 1) 582 use_msix = TRUE; 583 } 584 585 if (use_msix) { 586 ioat->rid = 1; 587 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 588 &ioat->rid, RF_ACTIVE); 589 } else { 590 ioat->rid = 0; 591 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, 592 &ioat->rid, RF_SHAREABLE | RF_ACTIVE); 593 } 594 if (ioat->res == NULL) { 595 ioat_log_message(0, "bus_alloc_resource failed\n"); 596 return (ENOMEM); 597 } 598 599 ioat->tag = NULL; 600 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | 601 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); 602 if (error != 0) { 603 ioat_log_message(0, "bus_setup_intr failed\n"); 604 return (error); 605 } 606 607 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); 608 return (0); 609 } 610 611 static boolean_t 612 ioat_model_resets_msix(struct ioat_softc *ioat) 613 { 614 u_int32_t pciid; 615 616 pciid = pci_get_devid(ioat->device); 617 switch (pciid) { 618 /* BWD: */ 619 case 0x0c508086: 620 case 0x0c518086: 621 case 0x0c528086: 622 case 0x0c538086: 623 /* BDXDE: */ 624 case 0x6f508086: 625 case 0x6f518086: 626 case 0x6f528086: 627 case 0x6f538086: 628 return (TRUE); 629 } 630 631 return (FALSE); 632 } 633 634 static void 635 ioat_interrupt_handler(void *arg) 636 { 637 struct ioat_softc *ioat = arg; 638 639 ioat->stats.interrupts++; 640 ioat_process_events(ioat); 641 } 642 643 static int 644 chanerr_to_errno(uint32_t chanerr) 645 { 646 647 if (chanerr == 0) 648 return (0); 649 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 650 return (EFAULT); 651 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 652 return (EIO); 653 /* This one is probably our fault: */ 654 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 655 return (EIO); 656 return (EIO); 657 } 658 659 static void 660 ioat_process_events(struct ioat_softc *ioat) 661 { 662 struct ioat_descriptor *desc; 663 struct bus_dmadesc *dmadesc; 664 uint64_t comp_update, status; 665 uint32_t completed, chanerr; 666 boolean_t pending; 667 int error; 668 669 mtx_lock(&ioat->cleanup_lock); 670 671 /* 672 * Don't run while the hardware is being reset. Reset is responsible 673 * for blocking new work and draining & completing existing work, so 674 * there is nothing to do until new work is queued after reset anyway. 675 */ 676 if (ioat->resetting_cleanup) { 677 mtx_unlock(&ioat->cleanup_lock); 678 return; 679 } 680 681 completed = 0; 682 comp_update = *ioat->comp_update; 683 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 684 685 if (status < ioat->hw_desc_bus_addr || 686 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) * 687 sizeof(struct ioat_generic_hw_descriptor)) 688 panic("Bogus completion address %jx (channel %u)", 689 (uintmax_t)status, ioat->chan_idx); 690 691 if (status == ioat->last_seen) { 692 /* 693 * If we landed in process_events and nothing has been 694 * completed, check for a timeout due to channel halt. 695 */ 696 goto out; 697 } 698 CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx", 699 __func__, ioat->chan_idx, comp_update, ioat->last_seen); 700 701 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) { 702 desc = ioat_get_ring_entry(ioat, ioat->tail); 703 dmadesc = &desc->bus_dmadesc; 704 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)", 705 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 706 dmadesc->callback_arg); 707 708 if (dmadesc->callback_fn != NULL) 709 dmadesc->callback_fn(dmadesc->callback_arg, 0); 710 711 completed++; 712 ioat->tail++; 713 } 714 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 715 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 716 717 if (completed != 0) { 718 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1); 719 ioat->stats.descriptors_processed += completed; 720 } 721 722 out: 723 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 724 725 /* Perform a racy check first; only take the locks if it passes. */ 726 pending = (ioat_get_active(ioat) != 0); 727 if (!pending && ioat->is_completion_pending) { 728 mtx_unlock(&ioat->cleanup_lock); 729 mtx_lock(&ioat->submit_lock); 730 mtx_lock(&ioat->cleanup_lock); 731 732 pending = (ioat_get_active(ioat) != 0); 733 if (!pending && ioat->is_completion_pending) { 734 ioat->is_completion_pending = FALSE; 735 callout_stop(&ioat->poll_timer); 736 } 737 mtx_unlock(&ioat->submit_lock); 738 } 739 mtx_unlock(&ioat->cleanup_lock); 740 741 if (pending) 742 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, 743 ioat); 744 745 if (completed != 0) { 746 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); 747 wakeup(&ioat->tail); 748 } 749 750 /* 751 * The device doesn't seem to reliably push suspend/halt statuses to 752 * the channel completion memory address, so poll the device register 753 * here. 754 */ 755 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 756 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) 757 return; 758 759 ioat->stats.channel_halts++; 760 761 /* 762 * Fatal programming error on this DMA channel. Flush any outstanding 763 * work with error status and restart the engine. 764 */ 765 mtx_lock(&ioat->submit_lock); 766 mtx_lock(&ioat->cleanup_lock); 767 ioat->quiescing = TRUE; 768 /* 769 * This is safe to do here because we have both locks and the submit 770 * queue is quiesced. We know that we will drain all outstanding 771 * events, so ioat_reset_hw can't deadlock. It is necessary to 772 * protect other ioat_process_event threads from racing ioat_reset_hw, 773 * reading an indeterminate hw state, and attempting to continue 774 * issuing completions. 775 */ 776 ioat->resetting_cleanup = TRUE; 777 778 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 779 if (1 <= g_ioat_debug_level) 780 ioat_halted_debug(ioat, chanerr); 781 ioat->stats.last_halt_chanerr = chanerr; 782 783 while (ioat_get_active(ioat) > 0) { 784 desc = ioat_get_ring_entry(ioat, ioat->tail); 785 dmadesc = &desc->bus_dmadesc; 786 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)", 787 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn, 788 dmadesc->callback_arg); 789 790 if (dmadesc->callback_fn != NULL) 791 dmadesc->callback_fn(dmadesc->callback_arg, 792 chanerr_to_errno(chanerr)); 793 794 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF); 795 ioat->tail++; 796 ioat->stats.descriptors_processed++; 797 ioat->stats.descriptors_error++; 798 } 799 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__, 800 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat)); 801 802 if (ioat->is_completion_pending) { 803 ioat->is_completion_pending = FALSE; 804 callout_stop(&ioat->poll_timer); 805 } 806 807 /* Clear error status */ 808 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 809 810 mtx_unlock(&ioat->cleanup_lock); 811 mtx_unlock(&ioat->submit_lock); 812 813 ioat_log_message(0, "Resetting channel to recover from error\n"); 814 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); 815 KASSERT(error == 0, 816 ("%s: taskqueue_enqueue failed: %d", __func__, error)); 817 } 818 819 static void 820 ioat_reset_hw_task(void *ctx, int pending __unused) 821 { 822 struct ioat_softc *ioat; 823 int error; 824 825 ioat = ctx; 826 ioat_log_message(1, "%s: Resetting channel\n", __func__); 827 828 error = ioat_reset_hw(ioat); 829 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); 830 (void)error; 831 } 832 833 /* 834 * User API functions 835 */ 836 unsigned 837 ioat_get_nchannels(void) 838 { 839 840 return (ioat_channel_index); 841 } 842 843 bus_dmaengine_t 844 ioat_get_dmaengine(uint32_t index, int flags) 845 { 846 struct ioat_softc *ioat; 847 848 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, 849 ("invalid flags: 0x%08x", flags)); 850 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), 851 ("invalid wait | nowait")); 852 853 if (index >= ioat_channel_index) 854 return (NULL); 855 856 ioat = ioat_channel[index]; 857 if (ioat == NULL || ioat->destroying) 858 return (NULL); 859 860 if (ioat->quiescing) { 861 if ((flags & M_NOWAIT) != 0) 862 return (NULL); 863 864 mtx_lock(IOAT_REFLK); 865 while (ioat->quiescing && !ioat->destroying) 866 msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0); 867 mtx_unlock(IOAT_REFLK); 868 869 if (ioat->destroying) 870 return (NULL); 871 } 872 873 /* 874 * There's a race here between the quiescing check and HW reset or 875 * module destroy. 876 */ 877 return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine); 878 } 879 880 void 881 ioat_put_dmaengine(bus_dmaengine_t dmaengine) 882 { 883 struct ioat_softc *ioat; 884 885 ioat = to_ioat_softc(dmaengine); 886 ioat_put(ioat, IOAT_DMAENGINE_REF); 887 } 888 889 int 890 ioat_get_hwversion(bus_dmaengine_t dmaengine) 891 { 892 struct ioat_softc *ioat; 893 894 ioat = to_ioat_softc(dmaengine); 895 return (ioat->version); 896 } 897 898 size_t 899 ioat_get_max_io_size(bus_dmaengine_t dmaengine) 900 { 901 struct ioat_softc *ioat; 902 903 ioat = to_ioat_softc(dmaengine); 904 return (ioat->max_xfer_size); 905 } 906 907 uint32_t 908 ioat_get_capabilities(bus_dmaengine_t dmaengine) 909 { 910 struct ioat_softc *ioat; 911 912 ioat = to_ioat_softc(dmaengine); 913 return (ioat->capabilities); 914 } 915 916 int 917 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) 918 { 919 struct ioat_softc *ioat; 920 921 ioat = to_ioat_softc(dmaengine); 922 if (!ioat->intrdelay_supported) 923 return (ENODEV); 924 if (delay > ioat->intrdelay_max) 925 return (ERANGE); 926 927 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); 928 ioat->cached_intrdelay = 929 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; 930 return (0); 931 } 932 933 uint16_t 934 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) 935 { 936 struct ioat_softc *ioat; 937 938 ioat = to_ioat_softc(dmaengine); 939 return (ioat->intrdelay_max); 940 } 941 942 void 943 ioat_acquire(bus_dmaengine_t dmaengine) 944 { 945 struct ioat_softc *ioat; 946 947 ioat = to_ioat_softc(dmaengine); 948 mtx_lock(&ioat->submit_lock); 949 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 950 ioat->acq_head = ioat->head; 951 } 952 953 int 954 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) 955 { 956 struct ioat_softc *ioat; 957 int error; 958 959 ioat = to_ioat_softc(dmaengine); 960 ioat_acquire(dmaengine); 961 962 error = ioat_reserve_space(ioat, n, mflags); 963 if (error != 0) 964 ioat_release(dmaengine); 965 return (error); 966 } 967 968 void 969 ioat_release(bus_dmaengine_t dmaengine) 970 { 971 struct ioat_softc *ioat; 972 973 ioat = to_ioat_softc(dmaengine); 974 CTR4(KTR_IOAT, "%s channel=%u dispatch1 hw_head=%u head=%u", __func__, 975 ioat->chan_idx, ioat->hw_head & UINT16_MAX, ioat->head); 976 KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */); 977 CTR4(KTR_IOAT, "%s channel=%u dispatch2 hw_head=%u head=%u", __func__, 978 ioat->chan_idx, ioat->hw_head & UINT16_MAX, ioat->head); 979 980 if (ioat->acq_head != ioat->head) { 981 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, 982 (uint16_t)ioat->hw_head); 983 984 if (!ioat->is_completion_pending) { 985 ioat->is_completion_pending = TRUE; 986 callout_reset(&ioat->poll_timer, 1, 987 ioat_poll_timer_callback, ioat); 988 } 989 } 990 mtx_unlock(&ioat->submit_lock); 991 } 992 993 static struct ioat_descriptor * 994 ioat_op_generic(struct ioat_softc *ioat, uint8_t op, 995 uint32_t size, uint64_t src, uint64_t dst, 996 bus_dmaengine_callback_t callback_fn, void *callback_arg, 997 uint32_t flags) 998 { 999 struct ioat_generic_hw_descriptor *hw_desc; 1000 struct ioat_descriptor *desc; 1001 int mflags; 1002 1003 mtx_assert(&ioat->submit_lock, MA_OWNED); 1004 1005 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, 1006 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); 1007 if ((flags & DMA_NO_WAIT) != 0) 1008 mflags = M_NOWAIT; 1009 else 1010 mflags = M_WAITOK; 1011 1012 if (size > ioat->max_xfer_size) { 1013 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n", 1014 __func__, ioat->max_xfer_size, (unsigned)size); 1015 return (NULL); 1016 } 1017 1018 if (ioat_reserve_space(ioat, 1, mflags) != 0) 1019 return (NULL); 1020 1021 desc = ioat_get_ring_entry(ioat, ioat->head); 1022 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic; 1023 1024 hw_desc->u.control_raw = 0; 1025 hw_desc->u.control_generic.op = op; 1026 hw_desc->u.control_generic.completion_update = 1; 1027 1028 if ((flags & DMA_INT_EN) != 0) 1029 hw_desc->u.control_generic.int_enable = 1; 1030 if ((flags & DMA_FENCE) != 0) 1031 hw_desc->u.control_generic.fence = 1; 1032 1033 hw_desc->size = size; 1034 hw_desc->src_addr = src; 1035 hw_desc->dest_addr = dst; 1036 1037 desc->bus_dmadesc.callback_fn = callback_fn; 1038 desc->bus_dmadesc.callback_arg = callback_arg; 1039 return (desc); 1040 } 1041 1042 struct bus_dmadesc * 1043 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, 1044 void *callback_arg, uint32_t flags) 1045 { 1046 struct ioat_dma_hw_descriptor *hw_desc; 1047 struct ioat_descriptor *desc; 1048 struct ioat_softc *ioat; 1049 1050 ioat = to_ioat_softc(dmaengine); 1051 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1052 1053 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, 1054 callback_arg, flags); 1055 if (desc == NULL) 1056 return (NULL); 1057 1058 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1059 hw_desc->u.control.null = 1; 1060 ioat_submit_single(ioat); 1061 return (&desc->bus_dmadesc); 1062 } 1063 1064 struct bus_dmadesc * 1065 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, 1066 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, 1067 void *callback_arg, uint32_t flags) 1068 { 1069 struct ioat_dma_hw_descriptor *hw_desc; 1070 struct ioat_descriptor *desc; 1071 struct ioat_softc *ioat; 1072 1073 ioat = to_ioat_softc(dmaengine); 1074 1075 if (((src | dst) & (0xffffull << 48)) != 0) { 1076 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 1077 __func__); 1078 return (NULL); 1079 } 1080 1081 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, 1082 callback_arg, flags); 1083 if (desc == NULL) 1084 return (NULL); 1085 1086 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1087 if (g_ioat_debug_level >= 3) 1088 dump_descriptor(hw_desc); 1089 1090 ioat_submit_single(ioat); 1091 CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx", 1092 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len); 1093 return (&desc->bus_dmadesc); 1094 } 1095 1096 struct bus_dmadesc * 1097 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, 1098 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, 1099 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1100 { 1101 struct ioat_dma_hw_descriptor *hw_desc; 1102 struct ioat_descriptor *desc; 1103 struct ioat_softc *ioat; 1104 1105 ioat = to_ioat_softc(dmaengine); 1106 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1107 1108 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) { 1109 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", 1110 __func__); 1111 return (NULL); 1112 } 1113 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) { 1114 ioat_log_message(0, "%s: Addresses must be page-aligned\n", 1115 __func__); 1116 return (NULL); 1117 } 1118 1119 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, 1120 callback_fn, callback_arg, flags); 1121 if (desc == NULL) 1122 return (NULL); 1123 1124 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma; 1125 if (src2 != src1 + PAGE_SIZE) { 1126 hw_desc->u.control.src_page_break = 1; 1127 hw_desc->next_src_addr = src2; 1128 } 1129 if (dst2 != dst1 + PAGE_SIZE) { 1130 hw_desc->u.control.dest_page_break = 1; 1131 hw_desc->next_dest_addr = dst2; 1132 } 1133 1134 if (g_ioat_debug_level >= 3) 1135 dump_descriptor(hw_desc); 1136 1137 ioat_submit_single(ioat); 1138 return (&desc->bus_dmadesc); 1139 } 1140 1141 struct bus_dmadesc * 1142 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, 1143 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, 1144 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1145 { 1146 struct ioat_crc32_hw_descriptor *hw_desc; 1147 struct ioat_descriptor *desc; 1148 struct ioat_softc *ioat; 1149 uint32_t teststore; 1150 uint8_t op; 1151 1152 ioat = to_ioat_softc(dmaengine); 1153 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1154 1155 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) { 1156 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n", 1157 __func__); 1158 return (NULL); 1159 } 1160 if (((src | dst) & (0xffffffull << 40)) != 0) { 1161 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n", 1162 __func__); 1163 return (NULL); 1164 } 1165 teststore = (flags & _DMA_CRC_TESTSTORE); 1166 if (teststore == _DMA_CRC_TESTSTORE) { 1167 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); 1168 return (NULL); 1169 } 1170 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { 1171 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", 1172 __func__); 1173 return (NULL); 1174 } 1175 1176 switch (teststore) { 1177 case DMA_CRC_STORE: 1178 op = IOAT_OP_MOVECRC_STORE; 1179 break; 1180 case DMA_CRC_TEST: 1181 op = IOAT_OP_MOVECRC_TEST; 1182 break; 1183 default: 1184 KASSERT(teststore == 0, ("bogus")); 1185 op = IOAT_OP_MOVECRC; 1186 break; 1187 } 1188 1189 if ((flags & DMA_CRC_INLINE) == 0 && 1190 (crcptr & (0xffffffull << 40)) != 0) { 1191 ioat_log_message(0, 1192 "%s: High 24 bits of crcptr invalid\n", __func__); 1193 return (NULL); 1194 } 1195 1196 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, 1197 callback_arg, flags & ~_DMA_CRC_FLAGS); 1198 if (desc == NULL) 1199 return (NULL); 1200 1201 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1202 1203 if ((flags & DMA_CRC_INLINE) == 0) 1204 hw_desc->crc_address = crcptr; 1205 else 1206 hw_desc->u.control.crc_location = 1; 1207 1208 if (initialseed != NULL) { 1209 hw_desc->u.control.use_seed = 1; 1210 hw_desc->seed = *initialseed; 1211 } 1212 1213 if (g_ioat_debug_level >= 3) 1214 dump_descriptor(hw_desc); 1215 1216 ioat_submit_single(ioat); 1217 return (&desc->bus_dmadesc); 1218 } 1219 1220 struct bus_dmadesc * 1221 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, 1222 uint32_t *initialseed, bus_addr_t crcptr, 1223 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) 1224 { 1225 struct ioat_crc32_hw_descriptor *hw_desc; 1226 struct ioat_descriptor *desc; 1227 struct ioat_softc *ioat; 1228 uint32_t teststore; 1229 uint8_t op; 1230 1231 ioat = to_ioat_softc(dmaengine); 1232 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1233 1234 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) { 1235 ioat_log_message(0, "%s: Device lacks CRC capability\n", 1236 __func__); 1237 return (NULL); 1238 } 1239 if ((src & (0xffffffull << 40)) != 0) { 1240 ioat_log_message(0, "%s: High 24 bits of src invalid\n", 1241 __func__); 1242 return (NULL); 1243 } 1244 teststore = (flags & _DMA_CRC_TESTSTORE); 1245 if (teststore == _DMA_CRC_TESTSTORE) { 1246 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); 1247 return (NULL); 1248 } 1249 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { 1250 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", 1251 __func__); 1252 return (NULL); 1253 } 1254 1255 switch (teststore) { 1256 case DMA_CRC_STORE: 1257 op = IOAT_OP_CRC_STORE; 1258 break; 1259 case DMA_CRC_TEST: 1260 op = IOAT_OP_CRC_TEST; 1261 break; 1262 default: 1263 KASSERT(teststore == 0, ("bogus")); 1264 op = IOAT_OP_CRC; 1265 break; 1266 } 1267 1268 if ((flags & DMA_CRC_INLINE) == 0 && 1269 (crcptr & (0xffffffull << 40)) != 0) { 1270 ioat_log_message(0, 1271 "%s: High 24 bits of crcptr invalid\n", __func__); 1272 return (NULL); 1273 } 1274 1275 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, 1276 callback_arg, flags & ~_DMA_CRC_FLAGS); 1277 if (desc == NULL) 1278 return (NULL); 1279 1280 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32; 1281 1282 if ((flags & DMA_CRC_INLINE) == 0) 1283 hw_desc->crc_address = crcptr; 1284 else 1285 hw_desc->u.control.crc_location = 1; 1286 1287 if (initialseed != NULL) { 1288 hw_desc->u.control.use_seed = 1; 1289 hw_desc->seed = *initialseed; 1290 } 1291 1292 if (g_ioat_debug_level >= 3) 1293 dump_descriptor(hw_desc); 1294 1295 ioat_submit_single(ioat); 1296 return (&desc->bus_dmadesc); 1297 } 1298 1299 struct bus_dmadesc * 1300 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, 1301 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, 1302 uint32_t flags) 1303 { 1304 struct ioat_fill_hw_descriptor *hw_desc; 1305 struct ioat_descriptor *desc; 1306 struct ioat_softc *ioat; 1307 1308 ioat = to_ioat_softc(dmaengine); 1309 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1310 1311 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) { 1312 ioat_log_message(0, "%s: Device lacks BFILL capability\n", 1313 __func__); 1314 return (NULL); 1315 } 1316 1317 if ((dst & (0xffffull << 48)) != 0) { 1318 ioat_log_message(0, "%s: High 16 bits of dst invalid\n", 1319 __func__); 1320 return (NULL); 1321 } 1322 1323 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, 1324 callback_fn, callback_arg, flags); 1325 if (desc == NULL) 1326 return (NULL); 1327 1328 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill; 1329 if (g_ioat_debug_level >= 3) 1330 dump_descriptor(hw_desc); 1331 1332 ioat_submit_single(ioat); 1333 return (&desc->bus_dmadesc); 1334 } 1335 1336 /* 1337 * Ring Management 1338 */ 1339 static inline uint32_t 1340 ioat_get_active(struct ioat_softc *ioat) 1341 { 1342 1343 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); 1344 } 1345 1346 static inline uint32_t 1347 ioat_get_ring_space(struct ioat_softc *ioat) 1348 { 1349 1350 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); 1351 } 1352 1353 /* 1354 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain 1355 * for 'num_descs'. 1356 * 1357 * If mflags contains M_WAITOK, blocks until enough space is available. 1358 * 1359 * Returns zero on success, or an errno on error. If num_descs is beyond the 1360 * maximum ring size, returns EINVAl; if allocation would block and mflags 1361 * contains M_NOWAIT, returns EAGAIN. 1362 * 1363 * Must be called with the submit_lock held; returns with the lock held. The 1364 * lock may be dropped to allocate the ring. 1365 * 1366 * (The submit_lock is needed to add any entries to the ring, so callers are 1367 * assured enough room is available.) 1368 */ 1369 static int 1370 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) 1371 { 1372 boolean_t dug; 1373 int error; 1374 1375 mtx_assert(&ioat->submit_lock, MA_OWNED); 1376 error = 0; 1377 dug = FALSE; 1378 1379 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) { 1380 error = EINVAL; 1381 goto out; 1382 } 1383 1384 for (;;) { 1385 if (ioat->quiescing) { 1386 error = ENXIO; 1387 goto out; 1388 } 1389 1390 if (ioat_get_ring_space(ioat) >= num_descs) 1391 goto out; 1392 1393 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__, 1394 ioat->chan_idx, num_descs); 1395 1396 if (!dug && !ioat->is_submitter_processing) { 1397 ioat->is_submitter_processing = TRUE; 1398 mtx_unlock(&ioat->submit_lock); 1399 1400 CTR2(KTR_IOAT, "%s channel=%u attempting to process events", 1401 __func__, ioat->chan_idx); 1402 ioat_process_events(ioat); 1403 1404 mtx_lock(&ioat->submit_lock); 1405 dug = TRUE; 1406 KASSERT(ioat->is_submitter_processing == TRUE, 1407 ("is_submitter_processing")); 1408 ioat->is_submitter_processing = FALSE; 1409 wakeup(&ioat->tail); 1410 continue; 1411 } 1412 1413 if ((mflags & M_WAITOK) == 0) { 1414 error = EAGAIN; 1415 break; 1416 } 1417 CTR2(KTR_IOAT, "%s channel=%u blocking on completions", 1418 __func__, ioat->chan_idx); 1419 msleep(&ioat->tail, &ioat->submit_lock, 0, 1420 "ioat_full", 0); 1421 continue; 1422 } 1423 1424 out: 1425 mtx_assert(&ioat->submit_lock, MA_OWNED); 1426 KASSERT(!ioat->quiescing || error == ENXIO, 1427 ("reserved during quiesce")); 1428 return (error); 1429 } 1430 1431 static void 1432 ioat_free_ring(struct ioat_softc *ioat, uint32_t size, 1433 struct ioat_descriptor *ring) 1434 { 1435 1436 free(ring, M_IOAT); 1437 } 1438 1439 static struct ioat_descriptor * 1440 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) 1441 { 1442 1443 return (&ioat->ring[index % (1 << ioat->ring_size_order)]); 1444 } 1445 1446 static union ioat_hw_descriptor * 1447 ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index) 1448 { 1449 1450 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]); 1451 } 1452 1453 static void 1454 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) 1455 { 1456 union ioat_hw_descriptor *desc; 1457 1458 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1459 IOAT_CHANERR_STR); 1460 if (chanerr == 0) 1461 return; 1462 1463 mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1464 1465 desc = ioat_get_descriptor(ioat, ioat->tail + 0); 1466 dump_descriptor(desc); 1467 1468 desc = ioat_get_descriptor(ioat, ioat->tail + 1); 1469 dump_descriptor(desc); 1470 } 1471 1472 static void 1473 ioat_poll_timer_callback(void *arg) 1474 { 1475 struct ioat_softc *ioat; 1476 1477 ioat = arg; 1478 ioat_log_message(3, "%s\n", __func__); 1479 1480 ioat_process_events(ioat); 1481 } 1482 1483 /* 1484 * Support Functions 1485 */ 1486 static void 1487 ioat_submit_single(struct ioat_softc *ioat) 1488 { 1489 1490 mtx_assert(&ioat->submit_lock, MA_OWNED); 1491 1492 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); 1493 atomic_add_rel_int(&ioat->head, 1); 1494 atomic_add_rel_int(&ioat->hw_head, 1); 1495 CTR5(KTR_IOAT, "%s channel=%u head=%u hw_head=%u tail=%u", __func__, 1496 ioat->chan_idx, ioat->head, ioat->hw_head & UINT16_MAX, 1497 ioat->tail); 1498 1499 ioat->stats.descriptors_submitted++; 1500 } 1501 1502 static int 1503 ioat_reset_hw(struct ioat_softc *ioat) 1504 { 1505 uint64_t status; 1506 uint32_t chanerr; 1507 unsigned timeout; 1508 int error; 1509 1510 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx); 1511 1512 mtx_lock(IOAT_REFLK); 1513 while (ioat->resetting && !ioat->destroying) 1514 msleep(&ioat->resetting, IOAT_REFLK, 0, "IRH_drain", 0); 1515 if (ioat->destroying) { 1516 mtx_unlock(IOAT_REFLK); 1517 return (ENXIO); 1518 } 1519 ioat->resetting = TRUE; 1520 1521 ioat->quiescing = TRUE; 1522 ioat_drain_locked(ioat); 1523 mtx_unlock(IOAT_REFLK); 1524 1525 /* 1526 * Suspend ioat_process_events while the hardware and softc are in an 1527 * indeterminate state. 1528 */ 1529 mtx_lock(&ioat->cleanup_lock); 1530 ioat->resetting_cleanup = TRUE; 1531 mtx_unlock(&ioat->cleanup_lock); 1532 1533 CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__, 1534 ioat->chan_idx); 1535 1536 status = ioat_get_chansts(ioat); 1537 if (is_ioat_active(status) || is_ioat_idle(status)) 1538 ioat_suspend(ioat); 1539 1540 /* Wait at most 20 ms */ 1541 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && 1542 timeout < 20; timeout++) { 1543 DELAY(1000); 1544 status = ioat_get_chansts(ioat); 1545 } 1546 if (timeout == 20) { 1547 error = ETIMEDOUT; 1548 goto out; 1549 } 1550 1551 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); 1552 1553 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1554 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 1555 1556 CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__, 1557 ioat->chan_idx); 1558 1559 /* 1560 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors 1561 * that can cause stability issues for IOAT v3. 1562 */ 1563 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 1564 4); 1565 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); 1566 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); 1567 1568 /* 1569 * BDXDE and BWD models reset MSI-X registers on device reset. 1570 * Save/restore their contents manually. 1571 */ 1572 if (ioat_model_resets_msix(ioat)) { 1573 ioat_log_message(1, "device resets MSI-X registers; saving\n"); 1574 pci_save_state(ioat->device); 1575 } 1576 1577 ioat_reset(ioat); 1578 CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__, 1579 ioat->chan_idx); 1580 1581 /* Wait at most 20 ms */ 1582 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) 1583 DELAY(1000); 1584 if (timeout == 20) { 1585 error = ETIMEDOUT; 1586 goto out; 1587 } 1588 1589 if (ioat_model_resets_msix(ioat)) { 1590 ioat_log_message(1, "device resets registers; restored\n"); 1591 pci_restore_state(ioat->device); 1592 } 1593 1594 /* Reset attempts to return the hardware to "halted." */ 1595 status = ioat_get_chansts(ioat); 1596 if (is_ioat_active(status) || is_ioat_idle(status)) { 1597 /* So this really shouldn't happen... */ 1598 ioat_log_message(0, "Device is active after a reset?\n"); 1599 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1600 error = 0; 1601 goto out; 1602 } 1603 1604 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1605 if (chanerr != 0) { 1606 mtx_lock(&ioat->cleanup_lock); 1607 ioat_halted_debug(ioat, chanerr); 1608 mtx_unlock(&ioat->cleanup_lock); 1609 error = EIO; 1610 goto out; 1611 } 1612 1613 /* 1614 * Bring device back online after reset. Writing CHAINADDR brings the 1615 * device back to active. 1616 * 1617 * The internal ring counter resets to zero, so we have to start over 1618 * at zero as well. 1619 */ 1620 ioat->tail = ioat->head = ioat->hw_head = 0; 1621 ioat->last_seen = 0; 1622 *ioat->comp_update = 0; 1623 KASSERT(!ioat->is_completion_pending, ("bogus completion_pending")); 1624 1625 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1626 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); 1627 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0)); 1628 error = 0; 1629 CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__, 1630 ioat->chan_idx); 1631 1632 out: 1633 /* Enqueues a null operation and ensures it completes. */ 1634 if (error == 0) { 1635 error = ioat_start_channel(ioat); 1636 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__, 1637 ioat->chan_idx); 1638 } 1639 1640 /* 1641 * Resume completions now that ring state is consistent. 1642 */ 1643 mtx_lock(&ioat->cleanup_lock); 1644 ioat->resetting_cleanup = FALSE; 1645 mtx_unlock(&ioat->cleanup_lock); 1646 1647 /* Unblock submission of new work */ 1648 mtx_lock(IOAT_REFLK); 1649 ioat->quiescing = FALSE; 1650 wakeup(&ioat->quiescing); 1651 1652 ioat->resetting = FALSE; 1653 wakeup(&ioat->resetting); 1654 1655 if (ioat->is_completion_pending) 1656 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback, 1657 ioat); 1658 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx); 1659 mtx_unlock(IOAT_REFLK); 1660 1661 return (error); 1662 } 1663 1664 static int 1665 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1666 { 1667 struct ioat_softc *ioat; 1668 struct sbuf sb; 1669 uint64_t status; 1670 int error; 1671 1672 ioat = arg1; 1673 1674 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1675 1676 sbuf_new_for_sysctl(&sb, NULL, 256, req); 1677 switch (status) { 1678 case IOAT_CHANSTS_ACTIVE: 1679 sbuf_printf(&sb, "ACTIVE"); 1680 break; 1681 case IOAT_CHANSTS_IDLE: 1682 sbuf_printf(&sb, "IDLE"); 1683 break; 1684 case IOAT_CHANSTS_SUSPENDED: 1685 sbuf_printf(&sb, "SUSPENDED"); 1686 break; 1687 case IOAT_CHANSTS_HALTED: 1688 sbuf_printf(&sb, "HALTED"); 1689 break; 1690 case IOAT_CHANSTS_ARMED: 1691 sbuf_printf(&sb, "ARMED"); 1692 break; 1693 default: 1694 sbuf_printf(&sb, "UNKNOWN"); 1695 break; 1696 } 1697 error = sbuf_finish(&sb); 1698 sbuf_delete(&sb); 1699 1700 if (error != 0 || req->newptr == NULL) 1701 return (error); 1702 return (EINVAL); 1703 } 1704 1705 static int 1706 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) 1707 { 1708 struct ioat_softc *ioat; 1709 struct sbuf sb; 1710 #define PRECISION "1" 1711 const uintmax_t factor = 10; 1712 uintmax_t rate; 1713 int error; 1714 1715 ioat = arg1; 1716 sbuf_new_for_sysctl(&sb, NULL, 16, req); 1717 1718 if (ioat->stats.interrupts == 0) { 1719 sbuf_printf(&sb, "NaN"); 1720 goto out; 1721 } 1722 rate = ioat->stats.descriptors_processed * factor / 1723 ioat->stats.interrupts; 1724 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, 1725 rate % factor); 1726 #undef PRECISION 1727 out: 1728 error = sbuf_finish(&sb); 1729 sbuf_delete(&sb); 1730 if (error != 0 || req->newptr == NULL) 1731 return (error); 1732 return (EINVAL); 1733 } 1734 1735 static int 1736 sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1737 { 1738 struct ioat_softc *ioat; 1739 int error, arg; 1740 1741 ioat = arg1; 1742 1743 arg = 0; 1744 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1745 if (error != 0 || req->newptr == NULL) 1746 return (error); 1747 1748 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1749 if (error != 0) 1750 return (error); 1751 1752 if (arg != 0) 1753 error = ioat_reset_hw(ioat); 1754 1755 return (error); 1756 } 1757 1758 static void 1759 dump_descriptor(void *hw_desc) 1760 { 1761 int i, j; 1762 1763 for (i = 0; i < 2; i++) { 1764 for (j = 0; j < 8; j++) 1765 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); 1766 printf("\n"); 1767 } 1768 } 1769 1770 static void 1771 ioat_setup_sysctl(device_t device) 1772 { 1773 struct sysctl_oid_list *par, *statpar, *state, *hammer; 1774 struct sysctl_ctx_list *ctx; 1775 struct sysctl_oid *tree, *tmp; 1776 struct ioat_softc *ioat; 1777 1778 ioat = DEVICE2SOFTC(device); 1779 ctx = device_get_sysctl_ctx(device); 1780 tree = device_get_sysctl_tree(device); 1781 par = SYSCTL_CHILDREN(tree); 1782 1783 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, 1784 &ioat->version, 0, "HW version (0xMM form)"); 1785 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, 1786 &ioat->max_xfer_size, 0, "HW maximum transfer size"); 1787 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, 1788 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); 1789 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, 1790 &ioat->intrdelay_max, 0, 1791 "Maximum configurable INTRDELAY on this channel (microseconds)"); 1792 1793 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, 1794 "IOAT channel internal state"); 1795 state = SYSCTL_CHILDREN(tmp); 1796 1797 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, 1798 &ioat->ring_size_order, 0, "SW descriptor ring size order"); 1799 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 1800 0, "SW descriptor head pointer index"); 1801 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 1802 0, "SW descriptor tail pointer index"); 1803 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD, 1804 &ioat->hw_head, 0, "HW DMACOUNT"); 1805 1806 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, 1807 ioat->comp_update, "HW addr of last completion"); 1808 1809 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing", 1810 CTLFLAG_RD, &ioat->is_submitter_processing, 0, 1811 "submitter processing"); 1812 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending", 1813 CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); 1814 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD, 1815 &ioat->is_reset_pending, 0, "reset pending"); 1816 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD, 1817 &ioat->is_channel_running, 0, "channel running"); 1818 1819 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", 1820 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 1821 "String of the channel status"); 1822 1823 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, 1824 &ioat->cached_intrdelay, 0, 1825 "Current INTRDELAY on this channel (cached, microseconds)"); 1826 1827 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, 1828 "Big hammers (mostly for testing)"); 1829 hammer = SYSCTL_CHILDREN(tmp); 1830 1831 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", 1832 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1833 "Set to non-zero to reset the hardware"); 1834 1835 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, 1836 "IOAT channel statistics"); 1837 statpar = SYSCTL_CHILDREN(tmp); 1838 1839 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, 1840 &ioat->stats.interrupts, 1841 "Number of interrupts processed on this channel"); 1842 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, 1843 &ioat->stats.descriptors_processed, 1844 "Number of descriptors processed on this channel"); 1845 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, 1846 &ioat->stats.descriptors_submitted, 1847 "Number of descriptors submitted to this channel"); 1848 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, 1849 &ioat->stats.descriptors_error, 1850 "Number of descriptors failed by channel errors"); 1851 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, 1852 &ioat->stats.channel_halts, 0, 1853 "Number of times the channel has halted"); 1854 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, 1855 &ioat->stats.last_halt_chanerr, 0, 1856 "The raw CHANERR when the channel was last halted"); 1857 1858 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", 1859 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", 1860 "Descriptors per interrupt"); 1861 } 1862 1863 static inline struct ioat_softc * 1864 ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1865 { 1866 uint32_t old; 1867 1868 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 1869 1870 old = atomic_fetchadd_32(&ioat->refcnt, 1); 1871 KASSERT(old < UINT32_MAX, ("refcnt overflow")); 1872 1873 #ifdef INVARIANTS 1874 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); 1875 KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); 1876 #endif 1877 1878 return (ioat); 1879 } 1880 1881 static inline void 1882 ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 1883 { 1884 1885 _ioat_putn(ioat, n, kind, FALSE); 1886 } 1887 1888 static inline void 1889 ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 1890 { 1891 1892 _ioat_putn(ioat, n, kind, TRUE); 1893 } 1894 1895 static inline void 1896 _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, 1897 boolean_t locked) 1898 { 1899 uint32_t old; 1900 1901 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 1902 1903 if (n == 0) 1904 return; 1905 1906 #ifdef INVARIANTS 1907 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); 1908 KASSERT(old >= n, ("refcnt kind underflow")); 1909 #endif 1910 1911 /* Skip acquiring the lock if resulting refcnt > 0. */ 1912 for (;;) { 1913 old = ioat->refcnt; 1914 if (old <= n) 1915 break; 1916 if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) 1917 return; 1918 } 1919 1920 if (locked) 1921 mtx_assert(IOAT_REFLK, MA_OWNED); 1922 else 1923 mtx_lock(IOAT_REFLK); 1924 1925 old = atomic_fetchadd_32(&ioat->refcnt, -n); 1926 KASSERT(old >= n, ("refcnt error")); 1927 1928 if (old == n) 1929 wakeup(IOAT_REFLK); 1930 if (!locked) 1931 mtx_unlock(IOAT_REFLK); 1932 } 1933 1934 static inline void 1935 ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1936 { 1937 1938 ioat_putn(ioat, 1, kind); 1939 } 1940 1941 static void 1942 ioat_drain_locked(struct ioat_softc *ioat) 1943 { 1944 1945 mtx_assert(IOAT_REFLK, MA_OWNED); 1946 while (ioat->refcnt > 0) 1947 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); 1948 } 1949 1950 #ifdef DDB 1951 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) 1952 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) 1953 DB_SHOW_COMMAND(ioat, db_show_ioat) 1954 { 1955 struct ioat_softc *sc; 1956 unsigned idx; 1957 1958 if (!have_addr) 1959 goto usage; 1960 idx = (unsigned)addr; 1961 if (idx >= ioat_channel_index) 1962 goto usage; 1963 1964 sc = ioat_channel[idx]; 1965 db_printf("ioat softc at %p\n", sc); 1966 if (sc == NULL) 1967 return; 1968 1969 db_printf(" version: %d\n", sc->version); 1970 db_printf(" chan_idx: %u\n", sc->chan_idx); 1971 db_printf(" submit_lock: "); 1972 db_show_lock(&sc->submit_lock); 1973 1974 db_printf(" capabilities: %b\n", (int)sc->capabilities, 1975 IOAT_DMACAP_STR); 1976 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay); 1977 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update); 1978 1979 db_printf(" poll_timer:\n"); 1980 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time); 1981 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg); 1982 db_printf(" c_func: %p\n", sc->poll_timer.c_func); 1983 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock); 1984 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags); 1985 1986 db_printf(" quiescing: %d\n", (int)sc->quiescing); 1987 db_printf(" destroying: %d\n", (int)sc->destroying); 1988 db_printf(" is_submitter_processing: %d\n", 1989 (int)sc->is_submitter_processing); 1990 db_printf(" is_completion_pending: %d\n", (int)sc->is_completion_pending); 1991 db_printf(" is_reset_pending: %d\n", (int)sc->is_reset_pending); 1992 db_printf(" is_channel_running: %d\n", (int)sc->is_channel_running); 1993 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported); 1994 db_printf(" resetting: %d\n", (int)sc->resetting); 1995 1996 db_printf(" head: %u\n", sc->head); 1997 db_printf(" tail: %u\n", sc->tail); 1998 db_printf(" hw_head: %u\n", sc->hw_head); 1999 db_printf(" ring_size_order: %u\n", sc->ring_size_order); 2000 db_printf(" last_seen: 0x%lx\n", sc->last_seen); 2001 db_printf(" ring: %p\n", sc->ring); 2002 db_printf(" descriptors: %p\n", sc->hw_desc_ring); 2003 db_printf(" descriptors (phys): 0x%jx\n", 2004 (uintmax_t)sc->hw_desc_bus_addr); 2005 2006 db_printf(" ring[%u] (tail):\n", sc->tail % 2007 (1 << sc->ring_size_order)); 2008 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id); 2009 db_printf(" addr: 0x%lx\n", 2010 RING_PHYS_ADDR(sc, sc->tail)); 2011 db_printf(" next: 0x%lx\n", 2012 ioat_get_descriptor(sc, sc->tail)->generic.next); 2013 2014 db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) % 2015 (1 << sc->ring_size_order)); 2016 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id); 2017 db_printf(" addr: 0x%lx\n", 2018 RING_PHYS_ADDR(sc, sc->head - 1)); 2019 db_printf(" next: 0x%lx\n", 2020 ioat_get_descriptor(sc, sc->head - 1)->generic.next); 2021 2022 db_printf(" ring[%u] (head):\n", (sc->head) % 2023 (1 << sc->ring_size_order)); 2024 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id); 2025 db_printf(" addr: 0x%lx\n", 2026 RING_PHYS_ADDR(sc, sc->head)); 2027 db_printf(" next: 0x%lx\n", 2028 ioat_get_descriptor(sc, sc->head)->generic.next); 2029 2030 for (idx = 0; idx < (1 << sc->ring_size_order); idx++) 2031 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK) 2032 == RING_PHYS_ADDR(sc, idx)) 2033 db_printf(" ring[%u] == hardware tail\n", idx); 2034 2035 db_printf(" cleanup_lock: "); 2036 db_show_lock(&sc->cleanup_lock); 2037 2038 db_printf(" refcnt: %u\n", sc->refcnt); 2039 #ifdef INVARIANTS 2040 CTASSERT(IOAT_NUM_REF_KINDS == 2); 2041 db_printf(" refkinds: [ENG=%u, DESCR=%u]\n", sc->refkinds[0], 2042 sc->refkinds[1]); 2043 #endif 2044 db_printf(" stats:\n"); 2045 db_printf(" interrupts: %lu\n", sc->stats.interrupts); 2046 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed); 2047 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error); 2048 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted); 2049 2050 db_printf(" channel_halts: %u\n", sc->stats.channel_halts); 2051 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr); 2052 2053 if (db_pager_quit) 2054 return; 2055 2056 db_printf(" hw status:\n"); 2057 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc)); 2058 db_printf(" chanctrl: 0x%x\n", 2059 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET)); 2060 db_printf(" chancmd: 0x%x\n", 2061 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET)); 2062 db_printf(" dmacount: 0x%x\n", 2063 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET)); 2064 db_printf(" chainaddr: 0x%lx\n", 2065 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW)); 2066 db_printf(" chancmp: 0x%lx\n", 2067 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW)); 2068 db_printf(" chanerr: %b\n", 2069 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR); 2070 return; 2071 usage: 2072 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index); 2073 return; 2074 } 2075 #endif /* DDB */ 2076