ioat.c (5f77bd3e244b85b4bbd5ab7386e7d5fbf4fe4316) | ioat.c (faefad9c125a9478dd46ccadd0b20a2c825de803) |
---|---|
1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 24 unchanged lines hidden (view full) --- 33#include <sys/conf.h> 34#include <sys/ioccom.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/module.h> 39#include <sys/mutex.h> 40#include <sys/rman.h> | 1/*- 2 * Copyright (C) 2012 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 24 unchanged lines hidden (view full) --- 33#include <sys/conf.h> 34#include <sys/ioccom.h> 35#include <sys/kernel.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/module.h> 39#include <sys/mutex.h> 40#include <sys/rman.h> |
41#include <sys/sbuf.h> |
|
41#include <sys/sysctl.h> 42#include <sys/time.h> 43#include <dev/pci/pcireg.h> 44#include <dev/pci/pcivar.h> 45#include <machine/bus.h> 46#include <machine/resource.h> 47#include <machine/stdarg.h> 48 --- 11 unchanged lines hidden (view full) --- 60static int ioat_teardown_intr(struct ioat_softc *ioat); 61static int ioat3_attach(device_t device); 62static int ioat_start_channel(struct ioat_softc *ioat); 63static int ioat_map_pci_bar(struct ioat_softc *ioat); 64static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 65 int error); 66static void ioat_interrupt_handler(void *arg); 67static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); | 42#include <sys/sysctl.h> 43#include <sys/time.h> 44#include <dev/pci/pcireg.h> 45#include <dev/pci/pcivar.h> 46#include <machine/bus.h> 47#include <machine/resource.h> 48#include <machine/stdarg.h> 49 --- 11 unchanged lines hidden (view full) --- 61static int ioat_teardown_intr(struct ioat_softc *ioat); 62static int ioat3_attach(device_t device); 63static int ioat_start_channel(struct ioat_softc *ioat); 64static int ioat_map_pci_bar(struct ioat_softc *ioat); 65static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, 66 int error); 67static void ioat_interrupt_handler(void *arg); 68static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); |
69static int chanerr_to_errno(uint32_t); |
|
68static void ioat_process_events(struct ioat_softc *ioat); 69static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 70static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 71static void ioat_free_ring(struct ioat_softc *, uint32_t size, 72 struct ioat_descriptor **); 73static void ioat_free_ring_entry(struct ioat_softc *ioat, 74 struct ioat_descriptor *desc); 75static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, 76 int mflags); 77static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 78static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, 79 uint32_t index); 80static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, 81 uint32_t size, boolean_t need_dscr, int mflags); 82static int ring_grow(struct ioat_softc *, uint32_t oldorder, 83 struct ioat_descriptor **); 84static int ring_shrink(struct ioat_softc *, uint32_t oldorder, 85 struct ioat_descriptor **); | 70static void ioat_process_events(struct ioat_softc *ioat); 71static inline uint32_t ioat_get_active(struct ioat_softc *ioat); 72static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); 73static void ioat_free_ring(struct ioat_softc *, uint32_t size, 74 struct ioat_descriptor **); 75static void ioat_free_ring_entry(struct ioat_softc *ioat, 76 struct ioat_descriptor *desc); 77static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, 78 int mflags); 79static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); 80static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, 81 uint32_t index); 82static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, 83 uint32_t size, boolean_t need_dscr, int mflags); 84static int ring_grow(struct ioat_softc *, uint32_t oldorder, 85 struct ioat_descriptor **); 86static int ring_shrink(struct ioat_softc *, uint32_t oldorder, 87 struct ioat_descriptor **); |
88static void ioat_halted_debug(struct ioat_softc *, uint32_t); |
|
86static void ioat_timer_callback(void *arg); 87static void dump_descriptor(void *hw_desc); 88static void ioat_submit_single(struct ioat_softc *ioat); 89static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 90 int error); 91static int ioat_reset_hw(struct ioat_softc *ioat); 92static void ioat_setup_sysctl(device_t device); 93static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 94static inline struct ioat_softc *ioat_get(struct ioat_softc *, 95 enum ioat_ref_kind); 96static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); | 89static void ioat_timer_callback(void *arg); 90static void dump_descriptor(void *hw_desc); 91static void ioat_submit_single(struct ioat_softc *ioat); 92static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, 93 int error); 94static int ioat_reset_hw(struct ioat_softc *ioat); 95static void ioat_setup_sysctl(device_t device); 96static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); 97static inline struct ioat_softc *ioat_get(struct ioat_softc *, 98 enum ioat_ref_kind); 99static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); |
100static inline void _ioat_putn(struct ioat_softc *, uint32_t, 101 enum ioat_ref_kind, boolean_t); |
|
97static inline void ioat_putn(struct ioat_softc *, uint32_t, 98 enum ioat_ref_kind); | 102static inline void ioat_putn(struct ioat_softc *, uint32_t, 103 enum ioat_ref_kind); |
104static inline void ioat_putn_locked(struct ioat_softc *, uint32_t, 105 enum ioat_ref_kind); |
|
99static void ioat_drain_locked(struct ioat_softc *); 100 101#define ioat_log_message(v, ...) do { \ 102 if ((v) <= g_ioat_debug_level) { \ 103 device_printf(ioat->device, __VA_ARGS__); \ 104 } \ 105} while (0) 106 --- 276 unchanged lines hidden (view full) --- 383 IOAT_DMACAP_STR); 384 385 xfercap = ioat_read_xfercap(ioat); 386 ioat->max_xfer_size = 1 << xfercap; 387 388 /* TODO: need to check DCA here if we ever do XOR/PQ */ 389 390 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); | 106static void ioat_drain_locked(struct ioat_softc *); 107 108#define ioat_log_message(v, ...) do { \ 109 if ((v) <= g_ioat_debug_level) { \ 110 device_printf(ioat->device, __VA_ARGS__); \ 111 } \ 112} while (0) 113 --- 276 unchanged lines hidden (view full) --- 390 IOAT_DMACAP_STR); 391 392 xfercap = ioat_read_xfercap(ioat); 393 ioat->max_xfer_size = 1 << xfercap; 394 395 /* TODO: need to check DCA here if we ever do XOR/PQ */ 396 397 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); |
391 mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF); | 398 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); |
392 callout_init(&ioat->timer, 1); 393 | 399 callout_init(&ioat->timer, 1); 400 |
401 /* Establish lock order for Witness */ 402 mtx_lock(&ioat->submit_lock); 403 mtx_lock(&ioat->cleanup_lock); 404 mtx_unlock(&ioat->cleanup_lock); 405 mtx_unlock(&ioat->submit_lock); 406 |
|
394 ioat->is_resize_pending = FALSE; 395 ioat->is_completion_pending = FALSE; 396 ioat->is_reset_pending = FALSE; 397 ioat->is_channel_running = FALSE; 398 399 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 400 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 401 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, --- 159 unchanged lines hidden (view full) --- 561static void 562ioat_interrupt_handler(void *arg) 563{ 564 struct ioat_softc *ioat = arg; 565 566 ioat_process_events(ioat); 567} 568 | 407 ioat->is_resize_pending = FALSE; 408 ioat->is_completion_pending = FALSE; 409 ioat->is_reset_pending = FALSE; 410 ioat->is_channel_running = FALSE; 411 412 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, 413 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 414 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, --- 159 unchanged lines hidden (view full) --- 574static void 575ioat_interrupt_handler(void *arg) 576{ 577 struct ioat_softc *ioat = arg; 578 579 ioat_process_events(ioat); 580} 581 |
582static int 583chanerr_to_errno(uint32_t chanerr) 584{ 585 586 if (chanerr == 0) 587 return (0); 588 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) 589 return (EFAULT); 590 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) 591 return (EIO); 592 /* This one is probably our fault: */ 593 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) 594 return (EIO); 595 return (EIO); 596} 597 |
|
569static void 570ioat_process_events(struct ioat_softc *ioat) 571{ 572 struct ioat_descriptor *desc; 573 struct bus_dmadesc *dmadesc; 574 uint64_t comp_update, status; | 598static void 599ioat_process_events(struct ioat_softc *ioat) 600{ 601 struct ioat_descriptor *desc; 602 struct bus_dmadesc *dmadesc; 603 uint64_t comp_update, status; |
575 uint32_t completed; | 604 uint32_t completed, chanerr; 605 int error; |
576 577 mtx_lock(&ioat->cleanup_lock); 578 579 completed = 0; 580 comp_update = *ioat->comp_update; 581 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 582 583 CTR0(KTR_IOAT, __func__); 584 585 if (status == ioat->last_seen) 586 goto out; 587 588 while (1) { 589 desc = ioat_get_ring_entry(ioat, ioat->tail); 590 dmadesc = &desc->bus_dmadesc; 591 CTR1(KTR_IOAT, "completing desc %d", ioat->tail); 592 | 606 607 mtx_lock(&ioat->cleanup_lock); 608 609 completed = 0; 610 comp_update = *ioat->comp_update; 611 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; 612 613 CTR0(KTR_IOAT, __func__); 614 615 if (status == ioat->last_seen) 616 goto out; 617 618 while (1) { 619 desc = ioat_get_ring_entry(ioat, ioat->tail); 620 dmadesc = &desc->bus_dmadesc; 621 CTR1(KTR_IOAT, "completing desc %d", ioat->tail); 622 |
593 if (dmadesc->callback_fn) 594 (*dmadesc->callback_fn)(dmadesc->callback_arg); | 623 if (dmadesc->callback_fn != NULL) 624 dmadesc->callback_fn(dmadesc->callback_arg, 0); |
595 596 completed++; 597 ioat->tail++; 598 if (desc->hw_desc_bus_addr == status) 599 break; 600 } 601 602 ioat->last_seen = desc->hw_desc_bus_addr; --- 5 unchanged lines hidden (view full) --- 608 } 609 610out: 611 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 612 mtx_unlock(&ioat->cleanup_lock); 613 614 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); 615 wakeup(&ioat->tail); | 625 626 completed++; 627 ioat->tail++; 628 if (desc->hw_desc_bus_addr == status) 629 break; 630 } 631 632 ioat->last_seen = desc->hw_desc_bus_addr; --- 5 unchanged lines hidden (view full) --- 638 } 639 640out: 641 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 642 mtx_unlock(&ioat->cleanup_lock); 643 644 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); 645 wakeup(&ioat->tail); |
646 647 if (!is_ioat_halted(comp_update)) 648 return; 649 650 /* 651 * Fatal programming error on this DMA channel. Flush any outstanding 652 * work with error status and restart the engine. 653 */ 654 ioat_log_message(0, "Channel halted due to fatal programming error\n"); 655 mtx_lock(&ioat->submit_lock); 656 mtx_lock(&ioat->cleanup_lock); 657 ioat->quiescing = TRUE; 658 659 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 660 ioat_halted_debug(ioat, chanerr); 661 662 while (ioat_get_active(ioat) > 0) { 663 desc = ioat_get_ring_entry(ioat, ioat->tail); 664 dmadesc = &desc->bus_dmadesc; 665 CTR1(KTR_IOAT, "completing err desc %d", ioat->tail); 666 667 if (dmadesc->callback_fn != NULL) 668 dmadesc->callback_fn(dmadesc->callback_arg, 669 chanerr_to_errno(chanerr)); 670 671 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF); 672 ioat->tail++; 673 } 674 675 /* Clear error status */ 676 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); 677 678 mtx_unlock(&ioat->cleanup_lock); 679 mtx_unlock(&ioat->submit_lock); 680 681 ioat_log_message(0, "Resetting channel to recover from error\n"); 682 error = ioat_reset_hw(ioat); 683 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); |
|
616} 617 618/* 619 * User API functions 620 */ 621bus_dmaengine_t 622ioat_get_dmaengine(uint32_t index) 623{ --- 212 unchanged lines hidden (view full) --- 836 if (desc == NULL) 837 goto out; 838 839 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, 840 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); 841 if (hw_desc == NULL) 842 goto out; 843 | 684} 685 686/* 687 * User API functions 688 */ 689bus_dmaengine_t 690ioat_get_dmaengine(uint32_t index) 691{ --- 212 unchanged lines hidden (view full) --- 904 if (desc == NULL) 905 goto out; 906 907 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, 908 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); 909 if (hw_desc == NULL) 910 goto out; 911 |
912 memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc)); |
|
844 desc->u.generic = hw_desc; 845 846 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 847 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, 848 busdmaflag); 849 if (error) 850 goto out; 851 --- 311 unchanged lines hidden (view full) --- 1163{ 1164 struct ioat_descriptor *desc; 1165 1166 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1167 IOAT_CHANERR_STR); 1168 if (chanerr == 0) 1169 return; 1170 | 913 desc->u.generic = hw_desc; 914 915 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, 916 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, 917 busdmaflag); 918 if (error) 919 goto out; 920 --- 311 unchanged lines hidden (view full) --- 1232{ 1233 struct ioat_descriptor *desc; 1234 1235 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, 1236 IOAT_CHANERR_STR); 1237 if (chanerr == 0) 1238 return; 1239 |
1171 mtx_lock(&ioat->submit_lock); | 1240 mtx_assert(&ioat->cleanup_lock, MA_OWNED); 1241 |
1172 desc = ioat_get_ring_entry(ioat, ioat->tail + 0); 1173 dump_descriptor(desc->u.raw); 1174 1175 desc = ioat_get_ring_entry(ioat, ioat->tail + 1); 1176 dump_descriptor(desc->u.raw); | 1242 desc = ioat_get_ring_entry(ioat, ioat->tail + 0); 1243 dump_descriptor(desc->u.raw); 1244 1245 desc = ioat_get_ring_entry(ioat, ioat->tail + 1); 1246 dump_descriptor(desc->u.raw); |
1177 mtx_unlock(&ioat->submit_lock); | |
1178} 1179 1180static void 1181ioat_timer_callback(void *arg) 1182{ 1183 struct ioat_descriptor **newring; 1184 struct ioat_softc *ioat; | 1247} 1248 1249static void 1250ioat_timer_callback(void *arg) 1251{ 1252 struct ioat_descriptor **newring; 1253 struct ioat_softc *ioat; |
1185 uint64_t status; 1186 uint32_t chanerr, order; | 1254 uint32_t order; |
1187 1188 ioat = arg; 1189 ioat_log_message(1, "%s\n", __func__); 1190 1191 if (ioat->is_completion_pending) { | 1255 1256 ioat = arg; 1257 ioat_log_message(1, "%s\n", __func__); 1258 1259 if (ioat->is_completion_pending) { |
1192 status = ioat_get_chansts(ioat); 1193 1194 /* 1195 * When halted due to errors, check for channel programming 1196 * errors before advancing the completion state. 1197 */ 1198 if (is_ioat_halted(status)) { 1199 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); 1200 ioat_halted_debug(ioat, chanerr); 1201 } | |
1202 ioat_process_events(ioat); | 1260 ioat_process_events(ioat); |
1203 } else { 1204 mtx_lock(&ioat->submit_lock); 1205 order = ioat->ring_size_order; 1206 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { 1207 mtx_unlock(&ioat->submit_lock); 1208 goto out; 1209 } 1210 ioat->is_resize_pending = TRUE; | 1261 return; 1262 } 1263 1264 /* Slowly scale the ring down if idle. */ 1265 mtx_lock(&ioat->submit_lock); 1266 order = ioat->ring_size_order; 1267 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { |
1211 mtx_unlock(&ioat->submit_lock); | 1268 mtx_unlock(&ioat->submit_lock); |
1269 goto out; 1270 } 1271 ioat->is_resize_pending = TRUE; 1272 mtx_unlock(&ioat->submit_lock); |
|
1212 | 1273 |
1213 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, 1214 M_NOWAIT); | 1274 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, 1275 M_NOWAIT); |
1215 | 1276 |
1216 mtx_lock(&ioat->submit_lock); 1217 KASSERT(ioat->ring_size_order == order, 1218 ("resize_pending protects order")); | 1277 mtx_lock(&ioat->submit_lock); 1278 KASSERT(ioat->ring_size_order == order, 1279 ("resize_pending protects order")); |
1219 | 1280 |
1220 if (newring != NULL) 1221 ring_shrink(ioat, order, newring); | 1281 if (newring != NULL) 1282 ring_shrink(ioat, order, newring); |
1222 | 1283 |
1223 ioat->is_resize_pending = FALSE; 1224 mtx_unlock(&ioat->submit_lock); | 1284 ioat->is_resize_pending = FALSE; 1285 mtx_unlock(&ioat->submit_lock); |
1225 1226out: | 1286 1287out: |
1227 /* Slowly scale the ring down if idle. */ 1228 if (ioat->ring_size_order > IOAT_MIN_ORDER) 1229 callout_reset(&ioat->timer, 10 * hz, 1230 ioat_timer_callback, ioat); 1231 } | 1288 if (ioat->ring_size_order > IOAT_MIN_ORDER) 1289 callout_reset(&ioat->timer, 10 * hz, 1290 ioat_timer_callback, ioat); |
1232} 1233 1234/* 1235 * Support Functions 1236 */ 1237static void 1238ioat_submit_single(struct ioat_softc *ioat) 1239{ --- 81 unchanged lines hidden (view full) --- 1321 /* So this really shouldn't happen... */ 1322 ioat_log_message(0, "Device is active after a reset?\n"); 1323 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1324 error = 0; 1325 goto out; 1326 } 1327 1328 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); | 1291} 1292 1293/* 1294 * Support Functions 1295 */ 1296static void 1297ioat_submit_single(struct ioat_softc *ioat) 1298{ --- 81 unchanged lines hidden (view full) --- 1380 /* So this really shouldn't happen... */ 1381 ioat_log_message(0, "Device is active after a reset?\n"); 1382 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); 1383 error = 0; 1384 goto out; 1385 } 1386 1387 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); |
1329 ioat_halted_debug(ioat, chanerr); | |
1330 if (chanerr != 0) { | 1388 if (chanerr != 0) { |
1389 mtx_lock(&ioat->cleanup_lock); 1390 ioat_halted_debug(ioat, chanerr); 1391 mtx_unlock(&ioat->cleanup_lock); |
|
1331 error = EIO; 1332 goto out; 1333 } 1334 1335 /* 1336 * Bring device back online after reset. Writing CHAINADDR brings the 1337 * device back to active. 1338 * --- 15 unchanged lines hidden (view full) --- 1354 1355 if (error == 0) 1356 error = ioat_start_channel(ioat); 1357 1358 return (error); 1359} 1360 1361static int | 1392 error = EIO; 1393 goto out; 1394 } 1395 1396 /* 1397 * Bring device back online after reset. Writing CHAINADDR brings the 1398 * device back to active. 1399 * --- 15 unchanged lines hidden (view full) --- 1415 1416 if (error == 0) 1417 error = ioat_start_channel(ioat); 1418 1419 return (error); 1420} 1421 1422static int |
1423sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) 1424{ 1425 struct ioat_softc *ioat; 1426 struct sbuf sb; 1427 uint64_t status; 1428 int error; 1429 1430 ioat = arg1; 1431 1432 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; 1433 1434 sbuf_new_for_sysctl(&sb, NULL, 256, req); 1435 switch (status) { 1436 case IOAT_CHANSTS_ACTIVE: 1437 sbuf_printf(&sb, "ACTIVE"); 1438 break; 1439 case IOAT_CHANSTS_IDLE: 1440 sbuf_printf(&sb, "IDLE"); 1441 break; 1442 case IOAT_CHANSTS_SUSPENDED: 1443 sbuf_printf(&sb, "SUSPENDED"); 1444 break; 1445 case IOAT_CHANSTS_HALTED: 1446 sbuf_printf(&sb, "HALTED"); 1447 break; 1448 case IOAT_CHANSTS_ARMED: 1449 sbuf_printf(&sb, "ARMED"); 1450 break; 1451 default: 1452 sbuf_printf(&sb, "UNKNOWN"); 1453 break; 1454 } 1455 error = sbuf_finish(&sb); 1456 sbuf_delete(&sb); 1457 1458 if (error != 0 || req->newptr == NULL) 1459 return (error); 1460 return (EINVAL); 1461} 1462 1463static int 1464sysctl_handle_error(SYSCTL_HANDLER_ARGS) 1465{ 1466 struct ioat_descriptor *desc; 1467 struct ioat_softc *ioat; 1468 int error, arg; 1469 1470 ioat = arg1; 1471 1472 arg = 0; 1473 error = SYSCTL_OUT(req, &arg, sizeof(arg)); 1474 if (error != 0 || req->newptr == NULL) 1475 return (error); 1476 1477 error = SYSCTL_IN(req, &arg, sizeof(arg)); 1478 if (error != 0) 1479 return (error); 1480 1481 if (arg != 0) { 1482 ioat_acquire(&ioat->dmaengine); 1483 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1, 1484 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL, 1485 0); 1486 if (desc == NULL) 1487 error = ENOMEM; 1488 else 1489 ioat_submit_single(ioat); 1490 ioat_release(&ioat->dmaengine); 1491 } 1492 return (error); 1493} 1494 1495static int |
|
1362sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1363{ 1364 struct ioat_softc *ioat; 1365 int error, arg; 1366 1367 ioat = arg1; 1368 1369 arg = 0; --- 60 unchanged lines hidden (view full) --- 1430 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_reset_pending", CTLFLAG_RD, 1431 &ioat->is_reset_pending, 0, "reset pending"); 1432 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_channel_running", CTLFLAG_RD, 1433 &ioat->is_channel_running, 0, "channel running"); 1434 1435 SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_reset", 1436 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1437 "Set to non-zero to reset the hardware"); | 1496sysctl_handle_reset(SYSCTL_HANDLER_ARGS) 1497{ 1498 struct ioat_softc *ioat; 1499 int error, arg; 1500 1501 ioat = arg1; 1502 1503 arg = 0; --- 60 unchanged lines hidden (view full) --- 1564 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_reset_pending", CTLFLAG_RD, 1565 &ioat->is_reset_pending, 0, "reset pending"); 1566 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_channel_running", CTLFLAG_RD, 1567 &ioat->is_channel_running, 0, "channel running"); 1568 1569 SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_reset", 1570 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", 1571 "Set to non-zero to reset the hardware"); |
1572 SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_error", 1573 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I", 1574 "Set to non-zero to inject a recoverable hardware error"); 1575 SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "chansts", 1576 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", 1577 "String of the channel status"); |
|
1438} 1439 1440static inline struct ioat_softc * 1441ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1442{ 1443 uint32_t old; 1444 1445 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); --- 7 unchanged lines hidden (view full) --- 1453#endif 1454 1455 return (ioat); 1456} 1457 1458static inline void 1459ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 1460{ | 1578} 1579 1580static inline struct ioat_softc * 1581ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1582{ 1583 uint32_t old; 1584 1585 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); --- 7 unchanged lines hidden (view full) --- 1593#endif 1594 1595 return (ioat); 1596} 1597 1598static inline void 1599ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 1600{ |
1601 1602 _ioat_putn(ioat, n, kind, FALSE); 1603} 1604 1605static inline void 1606ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) 1607{ 1608 1609 _ioat_putn(ioat, n, kind, TRUE); 1610} 1611 1612static inline void 1613_ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, 1614 boolean_t locked) 1615{ |
|
1461 uint32_t old; 1462 1463 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 1464 1465 if (n == 0) 1466 return; 1467 1468#ifdef INVARIANTS --- 5 unchanged lines hidden (view full) --- 1474 for (;;) { 1475 old = ioat->refcnt; 1476 if (old <= n) 1477 break; 1478 if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) 1479 return; 1480 } 1481 | 1616 uint32_t old; 1617 1618 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); 1619 1620 if (n == 0) 1621 return; 1622 1623#ifdef INVARIANTS --- 5 unchanged lines hidden (view full) --- 1629 for (;;) { 1630 old = ioat->refcnt; 1631 if (old <= n) 1632 break; 1633 if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) 1634 return; 1635 } 1636 |
1482 mtx_lock(IOAT_REFLK); | 1637 if (locked) 1638 mtx_assert(IOAT_REFLK, MA_OWNED); 1639 else 1640 mtx_lock(IOAT_REFLK); 1641 |
1483 old = atomic_fetchadd_32(&ioat->refcnt, -n); 1484 KASSERT(old >= n, ("refcnt error")); 1485 1486 if (old == n) 1487 wakeup(IOAT_REFLK); | 1642 old = atomic_fetchadd_32(&ioat->refcnt, -n); 1643 KASSERT(old >= n, ("refcnt error")); 1644 1645 if (old == n) 1646 wakeup(IOAT_REFLK); |
1488 mtx_unlock(IOAT_REFLK); | 1647 if (!locked) 1648 mtx_unlock(IOAT_REFLK); |
1489} 1490 1491static inline void 1492ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1493{ 1494 1495 ioat_putn(ioat, 1, kind); 1496} 1497 1498static void 1499ioat_drain_locked(struct ioat_softc *ioat) 1500{ 1501 1502 mtx_assert(IOAT_REFLK, MA_OWNED); 1503 while (ioat->refcnt > 0) 1504 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); 1505} | 1649} 1650 1651static inline void 1652ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) 1653{ 1654 1655 ioat_putn(ioat, 1, kind); 1656} 1657 1658static void 1659ioat_drain_locked(struct ioat_softc *ioat) 1660{ 1661 1662 mtx_assert(IOAT_REFLK, MA_OWNED); 1663 while (ioat->refcnt > 0) 1664 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); 1665} |