1 /* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/spinlock.h> 49 #include <linux/seqlock.h> 50 #include <linux/netdevice.h> 51 #include <linux/moduleparam.h> 52 #include <linux/bitops.h> 53 #include <linux/timer.h> 54 #include <linux/vmalloc.h> 55 #include <linux/highmem.h> 56 57 #include "hfi.h" 58 #include "common.h" 59 #include "qp.h" 60 #include "sdma.h" 61 #include "iowait.h" 62 #include "trace.h" 63 64 /* must be a power of 2 >= 64 <= 32768 */ 65 #define SDMA_DESCQ_CNT 2048 66 #define SDMA_DESC_INTR 64 67 #define INVALID_TAIL 0xffff 68 69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT; 70 module_param(sdma_descq_cnt, uint, S_IRUGO); 71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); 72 73 static uint sdma_idle_cnt = 250; 74 module_param(sdma_idle_cnt, uint, S_IRUGO); 75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); 76 77 uint mod_num_sdma; 78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); 79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); 80 81 static uint sdma_desct_intr = SDMA_DESC_INTR; 82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); 83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); 84 85 #define SDMA_WAIT_BATCH_SIZE 20 86 /* max wait time for a SDMA engine to indicate it has halted */ 87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ 88 /* all SDMA engine errors that cause a halt */ 89 90 #define SD(name) SEND_DMA_##name 91 #define ALL_SDMA_ENG_HALT_ERRS \ 92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ 93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ 94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ 95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ 96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ 97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ 98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ 99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ 100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ 101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ 102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ 103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ 104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ 105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ 106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ 107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ 108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ 109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) 110 111 /* sdma_sendctrl operations */ 112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0) 113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) 114 #define SDMA_SENDCTRL_OP_HALT BIT(2) 115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) 116 117 /* handle long defines */ 118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ 119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK 120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ 121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 122 123 static const char * const sdma_state_names[] = { 124 [sdma_state_s00_hw_down] = "s00_HwDown", 125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", 126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", 127 [sdma_state_s20_idle] = "s20_Idle", 128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", 129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", 130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", 131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", 132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze", 133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", 134 [sdma_state_s99_running] = "s99_Running", 135 }; 136 137 #ifdef CONFIG_SDMA_VERBOSITY 138 static const char * const sdma_event_names[] = { 139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown", 140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart", 141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", 142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", 143 [sdma_event_e30_go_running] = "e30_GoRunning", 144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", 145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", 146 [sdma_event_e60_hw_halted] = "e60_HwHalted", 147 [sdma_event_e70_go_idle] = "e70_GoIdle", 148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze", 149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen", 150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", 151 [sdma_event_e85_link_down] = "e85_LinkDown", 152 [sdma_event_e90_sw_halted] = "e90_SwHalted", 153 }; 154 #endif 155 156 static const struct sdma_set_state_action sdma_action_table[] = { 157 [sdma_state_s00_hw_down] = { 158 .go_s99_running_tofalse = 1, 159 .op_enable = 0, 160 .op_intenable = 0, 161 .op_halt = 0, 162 .op_cleanup = 0, 163 }, 164 [sdma_state_s10_hw_start_up_halt_wait] = { 165 .op_enable = 0, 166 .op_intenable = 0, 167 .op_halt = 1, 168 .op_cleanup = 0, 169 }, 170 [sdma_state_s15_hw_start_up_clean_wait] = { 171 .op_enable = 0, 172 .op_intenable = 1, 173 .op_halt = 0, 174 .op_cleanup = 1, 175 }, 176 [sdma_state_s20_idle] = { 177 .op_enable = 0, 178 .op_intenable = 1, 179 .op_halt = 0, 180 .op_cleanup = 0, 181 }, 182 [sdma_state_s30_sw_clean_up_wait] = { 183 .op_enable = 0, 184 .op_intenable = 0, 185 .op_halt = 0, 186 .op_cleanup = 0, 187 }, 188 [sdma_state_s40_hw_clean_up_wait] = { 189 .op_enable = 0, 190 .op_intenable = 0, 191 .op_halt = 0, 192 .op_cleanup = 1, 193 }, 194 [sdma_state_s50_hw_halt_wait] = { 195 .op_enable = 0, 196 .op_intenable = 0, 197 .op_halt = 0, 198 .op_cleanup = 0, 199 }, 200 [sdma_state_s60_idle_halt_wait] = { 201 .go_s99_running_tofalse = 1, 202 .op_enable = 0, 203 .op_intenable = 0, 204 .op_halt = 1, 205 .op_cleanup = 0, 206 }, 207 [sdma_state_s80_hw_freeze] = { 208 .op_enable = 0, 209 .op_intenable = 0, 210 .op_halt = 0, 211 .op_cleanup = 0, 212 }, 213 [sdma_state_s82_freeze_sw_clean] = { 214 .op_enable = 0, 215 .op_intenable = 0, 216 .op_halt = 0, 217 .op_cleanup = 0, 218 }, 219 [sdma_state_s99_running] = { 220 .op_enable = 1, 221 .op_intenable = 1, 222 .op_halt = 0, 223 .op_cleanup = 0, 224 .go_s99_running_totrue = 1, 225 }, 226 }; 227 228 #define SDMA_TAIL_UPDATE_THRESH 0x1F 229 230 /* declare all statics here rather than keep sorting */ 231 static void sdma_complete(struct kref *); 232 static void sdma_finalput(struct sdma_state *); 233 static void sdma_get(struct sdma_state *); 234 static void sdma_hw_clean_up_task(unsigned long); 235 static void sdma_put(struct sdma_state *); 236 static void sdma_set_state(struct sdma_engine *, enum sdma_states); 237 static void sdma_start_hw_clean_up(struct sdma_engine *); 238 static void sdma_sw_clean_up_task(unsigned long); 239 static void sdma_sendctrl(struct sdma_engine *, unsigned); 240 static void init_sdma_regs(struct sdma_engine *, u32, uint); 241 static void sdma_process_event( 242 struct sdma_engine *sde, 243 enum sdma_events event); 244 static void __sdma_process_event( 245 struct sdma_engine *sde, 246 enum sdma_events event); 247 static void dump_sdma_state(struct sdma_engine *sde); 248 static void sdma_make_progress(struct sdma_engine *sde, u64 status); 249 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail); 250 static void sdma_flush_descq(struct sdma_engine *sde); 251 252 /** 253 * sdma_state_name() - return state string from enum 254 * @state: state 255 */ 256 static const char *sdma_state_name(enum sdma_states state) 257 { 258 return sdma_state_names[state]; 259 } 260 261 static void sdma_get(struct sdma_state *ss) 262 { 263 kref_get(&ss->kref); 264 } 265 266 static void sdma_complete(struct kref *kref) 267 { 268 struct sdma_state *ss = 269 container_of(kref, struct sdma_state, kref); 270 271 complete(&ss->comp); 272 } 273 274 static void sdma_put(struct sdma_state *ss) 275 { 276 kref_put(&ss->kref, sdma_complete); 277 } 278 279 static void sdma_finalput(struct sdma_state *ss) 280 { 281 sdma_put(ss); 282 wait_for_completion(&ss->comp); 283 } 284 285 static inline void write_sde_csr( 286 struct sdma_engine *sde, 287 u32 offset0, 288 u64 value) 289 { 290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); 291 } 292 293 static inline u64 read_sde_csr( 294 struct sdma_engine *sde, 295 u32 offset0) 296 { 297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0); 298 } 299 300 /* 301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for 302 * sdma engine 'sde' to drop to 0. 303 */ 304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde, 305 int pause) 306 { 307 u64 off = 8 * sde->this_idx; 308 struct hfi1_devdata *dd = sde->dd; 309 int lcnt = 0; 310 u64 reg_prev; 311 u64 reg = 0; 312 313 while (1) { 314 reg_prev = reg; 315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); 316 317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; 318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; 319 if (reg == 0) 320 break; 321 /* counter is reest if accupancy count changes */ 322 if (reg != reg_prev) 323 lcnt = 0; 324 if (lcnt++ > 500) { 325 /* timed out - bounce the link */ 326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", 327 __func__, sde->this_idx, (u32)reg); 328 queue_work(dd->pport->hfi1_wq, 329 &dd->pport->link_bounce_work); 330 break; 331 } 332 udelay(1); 333 } 334 } 335 336 /* 337 * sdma_wait() - wait for packet egress to complete for all SDMA engines, 338 * and pause for credit return. 339 */ 340 void sdma_wait(struct hfi1_devdata *dd) 341 { 342 int i; 343 344 for (i = 0; i < dd->num_sdma; i++) { 345 struct sdma_engine *sde = &dd->per_sdma[i]; 346 347 sdma_wait_for_packet_egress(sde, 0); 348 } 349 } 350 351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) 352 { 353 u64 reg; 354 355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) 356 return; 357 reg = cnt; 358 reg &= SD(DESC_CNT_CNT_MASK); 359 reg <<= SD(DESC_CNT_CNT_SHIFT); 360 write_sde_csr(sde, SD(DESC_CNT), reg); 361 } 362 363 static inline void complete_tx(struct sdma_engine *sde, 364 struct sdma_txreq *tx, 365 int res) 366 { 367 /* protect against complete modifying */ 368 struct iowait *wait = tx->wait; 369 callback_t complete = tx->complete; 370 371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 372 trace_hfi1_sdma_out_sn(sde, tx->sn); 373 if (WARN_ON_ONCE(sde->head_sn != tx->sn)) 374 dd_dev_err(sde->dd, "expected %llu got %llu\n", 375 sde->head_sn, tx->sn); 376 sde->head_sn++; 377 #endif 378 __sdma_txclean(sde->dd, tx); 379 if (complete) 380 (*complete)(tx, res); 381 if (wait && iowait_sdma_dec(wait)) 382 iowait_drain_wakeup(wait); 383 } 384 385 /* 386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status 387 * 388 * Depending on timing there can be txreqs in two places: 389 * - in the descq ring 390 * - in the flush list 391 * 392 * To avoid ordering issues the descq ring needs to be flushed 393 * first followed by the flush list. 394 * 395 * This routine is called from two places 396 * - From a work queue item 397 * - Directly from the state machine just before setting the 398 * state to running 399 * 400 * Must be called with head_lock held 401 * 402 */ 403 static void sdma_flush(struct sdma_engine *sde) 404 { 405 struct sdma_txreq *txp, *txp_next; 406 LIST_HEAD(flushlist); 407 unsigned long flags; 408 409 /* flush from head to tail */ 410 sdma_flush_descq(sde); 411 spin_lock_irqsave(&sde->flushlist_lock, flags); 412 /* copy flush list */ 413 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { 414 list_del_init(&txp->list); 415 list_add_tail(&txp->list, &flushlist); 416 } 417 spin_unlock_irqrestore(&sde->flushlist_lock, flags); 418 /* flush from flush list */ 419 list_for_each_entry_safe(txp, txp_next, &flushlist, list) 420 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 421 } 422 423 /* 424 * Fields a work request for flushing the descq ring 425 * and the flush list 426 * 427 * If the engine has been brought to running during 428 * the scheduling delay, the flush is ignored, assuming 429 * that the process of bringing the engine to running 430 * would have done this flush prior to going to running. 431 * 432 */ 433 static void sdma_field_flush(struct work_struct *work) 434 { 435 unsigned long flags; 436 struct sdma_engine *sde = 437 container_of(work, struct sdma_engine, flush_worker); 438 439 write_seqlock_irqsave(&sde->head_lock, flags); 440 if (!__sdma_running(sde)) 441 sdma_flush(sde); 442 write_sequnlock_irqrestore(&sde->head_lock, flags); 443 } 444 445 static void sdma_err_halt_wait(struct work_struct *work) 446 { 447 struct sdma_engine *sde = container_of(work, struct sdma_engine, 448 err_halt_worker); 449 u64 statuscsr; 450 unsigned long timeout; 451 452 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); 453 while (1) { 454 statuscsr = read_sde_csr(sde, SD(STATUS)); 455 statuscsr &= SD(STATUS_ENG_HALTED_SMASK); 456 if (statuscsr) 457 break; 458 if (time_after(jiffies, timeout)) { 459 dd_dev_err(sde->dd, 460 "SDMA engine %d - timeout waiting for engine to halt\n", 461 sde->this_idx); 462 /* 463 * Continue anyway. This could happen if there was 464 * an uncorrectable error in the wrong spot. 465 */ 466 break; 467 } 468 usleep_range(80, 120); 469 } 470 471 sdma_process_event(sde, sdma_event_e15_hw_halt_done); 472 } 473 474 static void sdma_err_progress_check_schedule(struct sdma_engine *sde) 475 { 476 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { 477 unsigned index; 478 struct hfi1_devdata *dd = sde->dd; 479 480 for (index = 0; index < dd->num_sdma; index++) { 481 struct sdma_engine *curr_sdma = &dd->per_sdma[index]; 482 483 if (curr_sdma != sde) 484 curr_sdma->progress_check_head = 485 curr_sdma->descq_head; 486 } 487 dd_dev_err(sde->dd, 488 "SDMA engine %d - check scheduled\n", 489 sde->this_idx); 490 mod_timer(&sde->err_progress_check_timer, jiffies + 10); 491 } 492 } 493 494 static void sdma_err_progress_check(unsigned long data) 495 { 496 unsigned index; 497 struct sdma_engine *sde = (struct sdma_engine *)data; 498 499 dd_dev_err(sde->dd, "SDE progress check event\n"); 500 for (index = 0; index < sde->dd->num_sdma; index++) { 501 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; 502 unsigned long flags; 503 504 /* check progress on each engine except the current one */ 505 if (curr_sde == sde) 506 continue; 507 /* 508 * We must lock interrupts when acquiring sde->lock, 509 * to avoid a deadlock if interrupt triggers and spins on 510 * the same lock on same CPU 511 */ 512 spin_lock_irqsave(&curr_sde->tail_lock, flags); 513 write_seqlock(&curr_sde->head_lock); 514 515 /* skip non-running queues */ 516 if (curr_sde->state.current_state != sdma_state_s99_running) { 517 write_sequnlock(&curr_sde->head_lock); 518 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 519 continue; 520 } 521 522 if ((curr_sde->descq_head != curr_sde->descq_tail) && 523 (curr_sde->descq_head == 524 curr_sde->progress_check_head)) 525 __sdma_process_event(curr_sde, 526 sdma_event_e90_sw_halted); 527 write_sequnlock(&curr_sde->head_lock); 528 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 529 } 530 schedule_work(&sde->err_halt_worker); 531 } 532 533 static void sdma_hw_clean_up_task(unsigned long opaque) 534 { 535 struct sdma_engine *sde = (struct sdma_engine *)opaque; 536 u64 statuscsr; 537 538 while (1) { 539 #ifdef CONFIG_SDMA_VERBOSITY 540 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 541 sde->this_idx, slashstrip(__FILE__), __LINE__, 542 __func__); 543 #endif 544 statuscsr = read_sde_csr(sde, SD(STATUS)); 545 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); 546 if (statuscsr) 547 break; 548 udelay(10); 549 } 550 551 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); 552 } 553 554 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) 555 { 556 smp_read_barrier_depends(); /* see sdma_update_tail() */ 557 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; 558 } 559 560 /* 561 * flush ring for recovery 562 */ 563 static void sdma_flush_descq(struct sdma_engine *sde) 564 { 565 u16 head, tail; 566 int progress = 0; 567 struct sdma_txreq *txp = get_txhead(sde); 568 569 /* The reason for some of the complexity of this code is that 570 * not all descriptors have corresponding txps. So, we have to 571 * be able to skip over descs until we wander into the range of 572 * the next txp on the list. 573 */ 574 head = sde->descq_head & sde->sdma_mask; 575 tail = sde->descq_tail & sde->sdma_mask; 576 while (head != tail) { 577 /* advance head, wrap if needed */ 578 head = ++sde->descq_head & sde->sdma_mask; 579 /* if now past this txp's descs, do the callback */ 580 if (txp && txp->next_descq_idx == head) { 581 /* remove from list */ 582 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 583 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 584 trace_hfi1_sdma_progress(sde, head, tail, txp); 585 txp = get_txhead(sde); 586 } 587 progress++; 588 } 589 if (progress) 590 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 591 } 592 593 static void sdma_sw_clean_up_task(unsigned long opaque) 594 { 595 struct sdma_engine *sde = (struct sdma_engine *)opaque; 596 unsigned long flags; 597 598 spin_lock_irqsave(&sde->tail_lock, flags); 599 write_seqlock(&sde->head_lock); 600 601 /* 602 * At this point, the following should always be true: 603 * - We are halted, so no more descriptors are getting retired. 604 * - We are not running, so no one is submitting new work. 605 * - Only we can send the e40_sw_cleaned, so we can't start 606 * running again until we say so. So, the active list and 607 * descq are ours to play with. 608 */ 609 610 /* 611 * In the error clean up sequence, software clean must be called 612 * before the hardware clean so we can use the hardware head in 613 * the progress routine. A hardware clean or SPC unfreeze will 614 * reset the hardware head. 615 * 616 * Process all retired requests. The progress routine will use the 617 * latest physical hardware head - we are not running so speed does 618 * not matter. 619 */ 620 sdma_make_progress(sde, 0); 621 622 sdma_flush(sde); 623 624 /* 625 * Reset our notion of head and tail. 626 * Note that the HW registers have been reset via an earlier 627 * clean up. 628 */ 629 sde->descq_tail = 0; 630 sde->descq_head = 0; 631 sde->desc_avail = sdma_descq_freecnt(sde); 632 *sde->head_dma = 0; 633 634 __sdma_process_event(sde, sdma_event_e40_sw_cleaned); 635 636 write_sequnlock(&sde->head_lock); 637 spin_unlock_irqrestore(&sde->tail_lock, flags); 638 } 639 640 static void sdma_sw_tear_down(struct sdma_engine *sde) 641 { 642 struct sdma_state *ss = &sde->state; 643 644 /* Releasing this reference means the state machine has stopped. */ 645 sdma_put(ss); 646 647 /* stop waiting for all unfreeze events to complete */ 648 atomic_set(&sde->dd->sdma_unfreeze_count, -1); 649 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 650 } 651 652 static void sdma_start_hw_clean_up(struct sdma_engine *sde) 653 { 654 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); 655 } 656 657 static void sdma_set_state(struct sdma_engine *sde, 658 enum sdma_states next_state) 659 { 660 struct sdma_state *ss = &sde->state; 661 const struct sdma_set_state_action *action = sdma_action_table; 662 unsigned op = 0; 663 664 trace_hfi1_sdma_state( 665 sde, 666 sdma_state_names[ss->current_state], 667 sdma_state_names[next_state]); 668 669 /* debugging bookkeeping */ 670 ss->previous_state = ss->current_state; 671 ss->previous_op = ss->current_op; 672 ss->current_state = next_state; 673 674 if (ss->previous_state != sdma_state_s99_running && 675 next_state == sdma_state_s99_running) 676 sdma_flush(sde); 677 678 if (action[next_state].op_enable) 679 op |= SDMA_SENDCTRL_OP_ENABLE; 680 681 if (action[next_state].op_intenable) 682 op |= SDMA_SENDCTRL_OP_INTENABLE; 683 684 if (action[next_state].op_halt) 685 op |= SDMA_SENDCTRL_OP_HALT; 686 687 if (action[next_state].op_cleanup) 688 op |= SDMA_SENDCTRL_OP_CLEANUP; 689 690 if (action[next_state].go_s99_running_tofalse) 691 ss->go_s99_running = 0; 692 693 if (action[next_state].go_s99_running_totrue) 694 ss->go_s99_running = 1; 695 696 ss->current_op = op; 697 sdma_sendctrl(sde, ss->current_op); 698 } 699 700 /** 701 * sdma_get_descq_cnt() - called when device probed 702 * 703 * Return a validated descq count. 704 * 705 * This is currently only used in the verbs initialization to build the tx 706 * list. 707 * 708 * This will probably be deleted in favor of a more scalable approach to 709 * alloc tx's. 710 * 711 */ 712 u16 sdma_get_descq_cnt(void) 713 { 714 u16 count = sdma_descq_cnt; 715 716 if (!count) 717 return SDMA_DESCQ_CNT; 718 /* count must be a power of 2 greater than 64 and less than 719 * 32768. Otherwise return default. 720 */ 721 if (!is_power_of_2(count)) 722 return SDMA_DESCQ_CNT; 723 if (count < 64 || count > 32768) 724 return SDMA_DESCQ_CNT; 725 return count; 726 } 727 728 /** 729 * sdma_engine_get_vl() - return vl for a given sdma engine 730 * @sde: sdma engine 731 * 732 * This function returns the vl mapped to a given engine, or an error if 733 * the mapping can't be found. The mapping fields are protected by RCU. 734 */ 735 int sdma_engine_get_vl(struct sdma_engine *sde) 736 { 737 struct hfi1_devdata *dd = sde->dd; 738 struct sdma_vl_map *m; 739 u8 vl; 740 741 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) 742 return -EINVAL; 743 744 rcu_read_lock(); 745 m = rcu_dereference(dd->sdma_map); 746 if (unlikely(!m)) { 747 rcu_read_unlock(); 748 return -EINVAL; 749 } 750 vl = m->engine_to_vl[sde->this_idx]; 751 rcu_read_unlock(); 752 753 return vl; 754 } 755 756 /** 757 * sdma_select_engine_vl() - select sdma engine 758 * @dd: devdata 759 * @selector: a spreading factor 760 * @vl: this vl 761 * 762 * 763 * This function returns an engine based on the selector and a vl. The 764 * mapping fields are protected by RCU. 765 */ 766 struct sdma_engine *sdma_select_engine_vl( 767 struct hfi1_devdata *dd, 768 u32 selector, 769 u8 vl) 770 { 771 struct sdma_vl_map *m; 772 struct sdma_map_elem *e; 773 struct sdma_engine *rval; 774 775 /* NOTE This should only happen if SC->VL changed after the initial 776 * checks on the QP/AH 777 * Default will return engine 0 below 778 */ 779 if (vl >= num_vls) { 780 rval = NULL; 781 goto done; 782 } 783 784 rcu_read_lock(); 785 m = rcu_dereference(dd->sdma_map); 786 if (unlikely(!m)) { 787 rcu_read_unlock(); 788 return &dd->per_sdma[0]; 789 } 790 e = m->map[vl & m->mask]; 791 rval = e->sde[selector & e->mask]; 792 rcu_read_unlock(); 793 794 done: 795 rval = !rval ? &dd->per_sdma[0] : rval; 796 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); 797 return rval; 798 } 799 800 /** 801 * sdma_select_engine_sc() - select sdma engine 802 * @dd: devdata 803 * @selector: a spreading factor 804 * @sc5: the 5 bit sc 805 * 806 * 807 * This function returns an engine based on the selector and an sc. 808 */ 809 struct sdma_engine *sdma_select_engine_sc( 810 struct hfi1_devdata *dd, 811 u32 selector, 812 u8 sc5) 813 { 814 u8 vl = sc_to_vlt(dd, sc5); 815 816 return sdma_select_engine_vl(dd, selector, vl); 817 } 818 819 struct sdma_rht_map_elem { 820 u32 mask; 821 u8 ctr; 822 struct sdma_engine *sde[0]; 823 }; 824 825 struct sdma_rht_node { 826 unsigned long cpu_id; 827 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; 828 struct rhash_head node; 829 }; 830 831 #define NR_CPUS_HINT 192 832 833 static const struct rhashtable_params sdma_rht_params = { 834 .nelem_hint = NR_CPUS_HINT, 835 .head_offset = offsetof(struct sdma_rht_node, node), 836 .key_offset = offsetof(struct sdma_rht_node, cpu_id), 837 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), 838 .max_size = NR_CPUS, 839 .min_size = 8, 840 .automatic_shrinking = true, 841 }; 842 843 /* 844 * sdma_select_user_engine() - select sdma engine based on user setup 845 * @dd: devdata 846 * @selector: a spreading factor 847 * @vl: this vl 848 * 849 * This function returns an sdma engine for a user sdma request. 850 * User defined sdma engine affinity setting is honored when applicable, 851 * otherwise system default sdma engine mapping is used. To ensure correct 852 * ordering, the mapping from <selector, vl> to sde must remain unchanged. 853 */ 854 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, 855 u32 selector, u8 vl) 856 { 857 struct sdma_rht_node *rht_node; 858 struct sdma_engine *sde = NULL; 859 const struct cpumask *current_mask = ¤t->cpus_allowed; 860 unsigned long cpu_id; 861 862 /* 863 * To ensure that always the same sdma engine(s) will be 864 * selected make sure the process is pinned to this CPU only. 865 */ 866 if (cpumask_weight(current_mask) != 1) 867 goto out; 868 869 cpu_id = smp_processor_id(); 870 rcu_read_lock(); 871 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, 872 sdma_rht_params); 873 874 if (rht_node && rht_node->map[vl]) { 875 struct sdma_rht_map_elem *map = rht_node->map[vl]; 876 877 sde = map->sde[selector & map->mask]; 878 } 879 rcu_read_unlock(); 880 881 if (sde) 882 return sde; 883 884 out: 885 return sdma_select_engine_vl(dd, selector, vl); 886 } 887 888 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) 889 { 890 int i; 891 892 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) 893 map->sde[map->ctr + i] = map->sde[i]; 894 } 895 896 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, 897 struct sdma_engine *sde) 898 { 899 unsigned int i, pow; 900 901 /* only need to check the first ctr entries for a match */ 902 for (i = 0; i < map->ctr; i++) { 903 if (map->sde[i] == sde) { 904 memmove(&map->sde[i], &map->sde[i + 1], 905 (map->ctr - i - 1) * sizeof(map->sde[0])); 906 map->ctr--; 907 pow = roundup_pow_of_two(map->ctr ? : 1); 908 map->mask = pow - 1; 909 sdma_populate_sde_map(map); 910 break; 911 } 912 } 913 } 914 915 /* 916 * Prevents concurrent reads and writes of the sdma engine cpu_mask 917 */ 918 static DEFINE_MUTEX(process_to_sde_mutex); 919 920 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, 921 size_t count) 922 { 923 struct hfi1_devdata *dd = sde->dd; 924 cpumask_var_t mask, new_mask; 925 unsigned long cpu; 926 int ret, vl, sz; 927 928 vl = sdma_engine_get_vl(sde); 929 if (unlikely(vl < 0)) 930 return -EINVAL; 931 932 ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 933 if (!ret) 934 return -ENOMEM; 935 936 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); 937 if (!ret) { 938 free_cpumask_var(mask); 939 return -ENOMEM; 940 } 941 ret = cpulist_parse(buf, mask); 942 if (ret) 943 goto out_free; 944 945 if (!cpumask_subset(mask, cpu_online_mask)) { 946 dd_dev_warn(sde->dd, "Invalid CPU mask\n"); 947 ret = -EINVAL; 948 goto out_free; 949 } 950 951 sz = sizeof(struct sdma_rht_map_elem) + 952 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); 953 954 mutex_lock(&process_to_sde_mutex); 955 956 for_each_cpu(cpu, mask) { 957 struct sdma_rht_node *rht_node; 958 959 /* Check if we have this already mapped */ 960 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { 961 cpumask_set_cpu(cpu, new_mask); 962 continue; 963 } 964 965 if (vl >= ARRAY_SIZE(rht_node->map)) { 966 ret = -EINVAL; 967 goto out; 968 } 969 970 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 971 sdma_rht_params); 972 if (!rht_node) { 973 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); 974 if (!rht_node) { 975 ret = -ENOMEM; 976 goto out; 977 } 978 979 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 980 if (!rht_node->map[vl]) { 981 kfree(rht_node); 982 ret = -ENOMEM; 983 goto out; 984 } 985 rht_node->cpu_id = cpu; 986 rht_node->map[vl]->mask = 0; 987 rht_node->map[vl]->ctr = 1; 988 rht_node->map[vl]->sde[0] = sde; 989 990 ret = rhashtable_insert_fast(dd->sdma_rht, 991 &rht_node->node, 992 sdma_rht_params); 993 if (ret) { 994 kfree(rht_node->map[vl]); 995 kfree(rht_node); 996 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", 997 cpu); 998 goto out; 999 } 1000 1001 } else { 1002 int ctr, pow; 1003 1004 /* Add new user mappings */ 1005 if (!rht_node->map[vl]) 1006 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 1007 1008 if (!rht_node->map[vl]) { 1009 ret = -ENOMEM; 1010 goto out; 1011 } 1012 1013 rht_node->map[vl]->ctr++; 1014 ctr = rht_node->map[vl]->ctr; 1015 rht_node->map[vl]->sde[ctr - 1] = sde; 1016 pow = roundup_pow_of_two(ctr); 1017 rht_node->map[vl]->mask = pow - 1; 1018 1019 /* Populate the sde map table */ 1020 sdma_populate_sde_map(rht_node->map[vl]); 1021 } 1022 cpumask_set_cpu(cpu, new_mask); 1023 } 1024 1025 /* Clean up old mappings */ 1026 for_each_cpu(cpu, cpu_online_mask) { 1027 struct sdma_rht_node *rht_node; 1028 1029 /* Don't cleanup sdes that are set in the new mask */ 1030 if (cpumask_test_cpu(cpu, mask)) 1031 continue; 1032 1033 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 1034 sdma_rht_params); 1035 if (rht_node) { 1036 bool empty = true; 1037 int i; 1038 1039 /* Remove mappings for old sde */ 1040 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1041 if (rht_node->map[i]) 1042 sdma_cleanup_sde_map(rht_node->map[i], 1043 sde); 1044 1045 /* Free empty hash table entries */ 1046 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1047 if (!rht_node->map[i]) 1048 continue; 1049 1050 if (rht_node->map[i]->ctr) { 1051 empty = false; 1052 break; 1053 } 1054 } 1055 1056 if (empty) { 1057 ret = rhashtable_remove_fast(dd->sdma_rht, 1058 &rht_node->node, 1059 sdma_rht_params); 1060 WARN_ON(ret); 1061 1062 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1063 kfree(rht_node->map[i]); 1064 1065 kfree(rht_node); 1066 } 1067 } 1068 } 1069 1070 cpumask_copy(&sde->cpu_mask, new_mask); 1071 out: 1072 mutex_unlock(&process_to_sde_mutex); 1073 out_free: 1074 free_cpumask_var(mask); 1075 free_cpumask_var(new_mask); 1076 return ret ? : strnlen(buf, PAGE_SIZE); 1077 } 1078 1079 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) 1080 { 1081 mutex_lock(&process_to_sde_mutex); 1082 if (cpumask_empty(&sde->cpu_mask)) 1083 snprintf(buf, PAGE_SIZE, "%s\n", "empty"); 1084 else 1085 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); 1086 mutex_unlock(&process_to_sde_mutex); 1087 return strnlen(buf, PAGE_SIZE); 1088 } 1089 1090 static void sdma_rht_free(void *ptr, void *arg) 1091 { 1092 struct sdma_rht_node *rht_node = ptr; 1093 int i; 1094 1095 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1096 kfree(rht_node->map[i]); 1097 1098 kfree(rht_node); 1099 } 1100 1101 /** 1102 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings 1103 * @s: seq file 1104 * @dd: hfi1_devdata 1105 * @cpuid: cpu id 1106 * 1107 * This routine dumps the process to sde mappings per cpu 1108 */ 1109 void sdma_seqfile_dump_cpu_list(struct seq_file *s, 1110 struct hfi1_devdata *dd, 1111 unsigned long cpuid) 1112 { 1113 struct sdma_rht_node *rht_node; 1114 int i, j; 1115 1116 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, 1117 sdma_rht_params); 1118 if (!rht_node) 1119 return; 1120 1121 seq_printf(s, "cpu%3lu: ", cpuid); 1122 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1123 if (!rht_node->map[i] || !rht_node->map[i]->ctr) 1124 continue; 1125 1126 seq_printf(s, " vl%d: [", i); 1127 1128 for (j = 0; j < rht_node->map[i]->ctr; j++) { 1129 if (!rht_node->map[i]->sde[j]) 1130 continue; 1131 1132 if (j > 0) 1133 seq_puts(s, ","); 1134 1135 seq_printf(s, " sdma%2d", 1136 rht_node->map[i]->sde[j]->this_idx); 1137 } 1138 seq_puts(s, " ]"); 1139 } 1140 1141 seq_puts(s, "\n"); 1142 } 1143 1144 /* 1145 * Free the indicated map struct 1146 */ 1147 static void sdma_map_free(struct sdma_vl_map *m) 1148 { 1149 int i; 1150 1151 for (i = 0; m && i < m->actual_vls; i++) 1152 kfree(m->map[i]); 1153 kfree(m); 1154 } 1155 1156 /* 1157 * Handle RCU callback 1158 */ 1159 static void sdma_map_rcu_callback(struct rcu_head *list) 1160 { 1161 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); 1162 1163 sdma_map_free(m); 1164 } 1165 1166 /** 1167 * sdma_map_init - called when # vls change 1168 * @dd: hfi1_devdata 1169 * @port: port number 1170 * @num_vls: number of vls 1171 * @vl_engines: per vl engine mapping (optional) 1172 * 1173 * This routine changes the mapping based on the number of vls. 1174 * 1175 * vl_engines is used to specify a non-uniform vl/engine loading. NULL 1176 * implies auto computing the loading and giving each VLs a uniform 1177 * distribution of engines per VL. 1178 * 1179 * The auto algorithm computes the sde_per_vl and the number of extra 1180 * engines. Any extra engines are added from the last VL on down. 1181 * 1182 * rcu locking is used here to control access to the mapping fields. 1183 * 1184 * If either the num_vls or num_sdma are non-power of 2, the array sizes 1185 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded 1186 * up to the next highest power of 2 and the first entry is reused 1187 * in a round robin fashion. 1188 * 1189 * If an error occurs the map change is not done and the mapping is 1190 * not changed. 1191 * 1192 */ 1193 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) 1194 { 1195 int i, j; 1196 int extra, sde_per_vl; 1197 int engine = 0; 1198 u8 lvl_engines[OPA_MAX_VLS]; 1199 struct sdma_vl_map *oldmap, *newmap; 1200 1201 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 1202 return 0; 1203 1204 if (!vl_engines) { 1205 /* truncate divide */ 1206 sde_per_vl = dd->num_sdma / num_vls; 1207 /* extras */ 1208 extra = dd->num_sdma % num_vls; 1209 vl_engines = lvl_engines; 1210 /* add extras from last vl down */ 1211 for (i = num_vls - 1; i >= 0; i--, extra--) 1212 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); 1213 } 1214 /* build new map */ 1215 newmap = kzalloc( 1216 sizeof(struct sdma_vl_map) + 1217 roundup_pow_of_two(num_vls) * 1218 sizeof(struct sdma_map_elem *), 1219 GFP_KERNEL); 1220 if (!newmap) 1221 goto bail; 1222 newmap->actual_vls = num_vls; 1223 newmap->vls = roundup_pow_of_two(num_vls); 1224 newmap->mask = (1 << ilog2(newmap->vls)) - 1; 1225 /* initialize back-map */ 1226 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) 1227 newmap->engine_to_vl[i] = -1; 1228 for (i = 0; i < newmap->vls; i++) { 1229 /* save for wrap around */ 1230 int first_engine = engine; 1231 1232 if (i < newmap->actual_vls) { 1233 int sz = roundup_pow_of_two(vl_engines[i]); 1234 1235 /* only allocate once */ 1236 newmap->map[i] = kzalloc( 1237 sizeof(struct sdma_map_elem) + 1238 sz * sizeof(struct sdma_engine *), 1239 GFP_KERNEL); 1240 if (!newmap->map[i]) 1241 goto bail; 1242 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1243 /* assign engines */ 1244 for (j = 0; j < sz; j++) { 1245 newmap->map[i]->sde[j] = 1246 &dd->per_sdma[engine]; 1247 if (++engine >= first_engine + vl_engines[i]) 1248 /* wrap back to first engine */ 1249 engine = first_engine; 1250 } 1251 /* assign back-map */ 1252 for (j = 0; j < vl_engines[i]; j++) 1253 newmap->engine_to_vl[first_engine + j] = i; 1254 } else { 1255 /* just re-use entry without allocating */ 1256 newmap->map[i] = newmap->map[i % num_vls]; 1257 } 1258 engine = first_engine + vl_engines[i]; 1259 } 1260 /* newmap in hand, save old map */ 1261 spin_lock_irq(&dd->sde_map_lock); 1262 oldmap = rcu_dereference_protected(dd->sdma_map, 1263 lockdep_is_held(&dd->sde_map_lock)); 1264 1265 /* publish newmap */ 1266 rcu_assign_pointer(dd->sdma_map, newmap); 1267 1268 spin_unlock_irq(&dd->sde_map_lock); 1269 /* success, free any old map after grace period */ 1270 if (oldmap) 1271 call_rcu(&oldmap->list, sdma_map_rcu_callback); 1272 return 0; 1273 bail: 1274 /* free any partial allocation */ 1275 sdma_map_free(newmap); 1276 return -ENOMEM; 1277 } 1278 1279 /* 1280 * Clean up allocated memory. 1281 * 1282 * This routine is can be called regardless of the success of sdma_init() 1283 * 1284 */ 1285 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) 1286 { 1287 size_t i; 1288 struct sdma_engine *sde; 1289 1290 if (dd->sdma_pad_dma) { 1291 dma_free_coherent(&dd->pcidev->dev, 4, 1292 (void *)dd->sdma_pad_dma, 1293 dd->sdma_pad_phys); 1294 dd->sdma_pad_dma = NULL; 1295 dd->sdma_pad_phys = 0; 1296 } 1297 if (dd->sdma_heads_dma) { 1298 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, 1299 (void *)dd->sdma_heads_dma, 1300 dd->sdma_heads_phys); 1301 dd->sdma_heads_dma = NULL; 1302 dd->sdma_heads_phys = 0; 1303 } 1304 for (i = 0; dd->per_sdma && i < num_engines; ++i) { 1305 sde = &dd->per_sdma[i]; 1306 1307 sde->head_dma = NULL; 1308 sde->head_phys = 0; 1309 1310 if (sde->descq) { 1311 dma_free_coherent( 1312 &dd->pcidev->dev, 1313 sde->descq_cnt * sizeof(u64[2]), 1314 sde->descq, 1315 sde->descq_phys 1316 ); 1317 sde->descq = NULL; 1318 sde->descq_phys = 0; 1319 } 1320 kvfree(sde->tx_ring); 1321 sde->tx_ring = NULL; 1322 } 1323 spin_lock_irq(&dd->sde_map_lock); 1324 sdma_map_free(rcu_access_pointer(dd->sdma_map)); 1325 RCU_INIT_POINTER(dd->sdma_map, NULL); 1326 spin_unlock_irq(&dd->sde_map_lock); 1327 synchronize_rcu(); 1328 kfree(dd->per_sdma); 1329 dd->per_sdma = NULL; 1330 1331 if (dd->sdma_rht) { 1332 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); 1333 kfree(dd->sdma_rht); 1334 dd->sdma_rht = NULL; 1335 } 1336 } 1337 1338 /** 1339 * sdma_init() - called when device probed 1340 * @dd: hfi1_devdata 1341 * @port: port number (currently only zero) 1342 * 1343 * sdma_init initializes the specified number of engines. 1344 * 1345 * The code initializes each sde, its csrs. Interrupts 1346 * are not required to be enabled. 1347 * 1348 * Returns: 1349 * 0 - success, -errno on failure 1350 */ 1351 int sdma_init(struct hfi1_devdata *dd, u8 port) 1352 { 1353 unsigned this_idx; 1354 struct sdma_engine *sde; 1355 struct rhashtable *tmp_sdma_rht; 1356 u16 descq_cnt; 1357 void *curr_head; 1358 struct hfi1_pportdata *ppd = dd->pport + port; 1359 u32 per_sdma_credits; 1360 uint idle_cnt = sdma_idle_cnt; 1361 size_t num_engines = dd->chip_sdma_engines; 1362 int ret = -ENOMEM; 1363 1364 if (!HFI1_CAP_IS_KSET(SDMA)) { 1365 HFI1_CAP_CLEAR(SDMA_AHG); 1366 return 0; 1367 } 1368 if (mod_num_sdma && 1369 /* can't exceed chip support */ 1370 mod_num_sdma <= dd->chip_sdma_engines && 1371 /* count must be >= vls */ 1372 mod_num_sdma >= num_vls) 1373 num_engines = mod_num_sdma; 1374 1375 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); 1376 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines); 1377 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", 1378 dd->chip_sdma_mem_size); 1379 1380 per_sdma_credits = 1381 dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE); 1382 1383 /* set up freeze waitqueue */ 1384 init_waitqueue_head(&dd->sdma_unfreeze_wq); 1385 atomic_set(&dd->sdma_unfreeze_count, 0); 1386 1387 descq_cnt = sdma_get_descq_cnt(); 1388 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", 1389 num_engines, descq_cnt); 1390 1391 /* alloc memory for array of send engines */ 1392 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL); 1393 if (!dd->per_sdma) 1394 return ret; 1395 1396 idle_cnt = ns_to_cclock(dd, idle_cnt); 1397 if (!sdma_desct_intr) 1398 sdma_desct_intr = SDMA_DESC_INTR; 1399 1400 /* Allocate memory for SendDMA descriptor FIFOs */ 1401 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1402 sde = &dd->per_sdma[this_idx]; 1403 sde->dd = dd; 1404 sde->ppd = ppd; 1405 sde->this_idx = this_idx; 1406 sde->descq_cnt = descq_cnt; 1407 sde->desc_avail = sdma_descq_freecnt(sde); 1408 sde->sdma_shift = ilog2(descq_cnt); 1409 sde->sdma_mask = (1 << sde->sdma_shift) - 1; 1410 1411 /* Create a mask specifically for each interrupt source */ 1412 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + 1413 this_idx); 1414 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + 1415 this_idx); 1416 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + 1417 this_idx); 1418 /* Create a combined mask to cover all 3 interrupt sources */ 1419 sde->imask = sde->int_mask | sde->progress_mask | 1420 sde->idle_mask; 1421 1422 spin_lock_init(&sde->tail_lock); 1423 seqlock_init(&sde->head_lock); 1424 spin_lock_init(&sde->senddmactrl_lock); 1425 spin_lock_init(&sde->flushlist_lock); 1426 /* insure there is always a zero bit */ 1427 sde->ahg_bits = 0xfffffffe00000000ULL; 1428 1429 sdma_set_state(sde, sdma_state_s00_hw_down); 1430 1431 /* set up reference counting */ 1432 kref_init(&sde->state.kref); 1433 init_completion(&sde->state.comp); 1434 1435 INIT_LIST_HEAD(&sde->flushlist); 1436 INIT_LIST_HEAD(&sde->dmawait); 1437 1438 sde->tail_csr = 1439 get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); 1440 1441 if (idle_cnt) 1442 dd->default_desc1 = 1443 SDMA_DESC1_HEAD_TO_HOST_FLAG; 1444 else 1445 dd->default_desc1 = 1446 SDMA_DESC1_INT_REQ_FLAG; 1447 1448 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, 1449 (unsigned long)sde); 1450 1451 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, 1452 (unsigned long)sde); 1453 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); 1454 INIT_WORK(&sde->flush_worker, sdma_field_flush); 1455 1456 sde->progress_check_head = 0; 1457 1458 setup_timer(&sde->err_progress_check_timer, 1459 sdma_err_progress_check, (unsigned long)sde); 1460 1461 sde->descq = dma_zalloc_coherent( 1462 &dd->pcidev->dev, 1463 descq_cnt * sizeof(u64[2]), 1464 &sde->descq_phys, 1465 GFP_KERNEL 1466 ); 1467 if (!sde->descq) 1468 goto bail; 1469 sde->tx_ring = 1470 kcalloc(descq_cnt, sizeof(struct sdma_txreq *), 1471 GFP_KERNEL); 1472 if (!sde->tx_ring) 1473 sde->tx_ring = 1474 vzalloc( 1475 sizeof(struct sdma_txreq *) * 1476 descq_cnt); 1477 if (!sde->tx_ring) 1478 goto bail; 1479 } 1480 1481 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1482 /* Allocate memory for DMA of head registers to memory */ 1483 dd->sdma_heads_dma = dma_zalloc_coherent( 1484 &dd->pcidev->dev, 1485 dd->sdma_heads_size, 1486 &dd->sdma_heads_phys, 1487 GFP_KERNEL 1488 ); 1489 if (!dd->sdma_heads_dma) { 1490 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1491 goto bail; 1492 } 1493 1494 /* Allocate memory for pad */ 1495 dd->sdma_pad_dma = dma_zalloc_coherent( 1496 &dd->pcidev->dev, 1497 sizeof(u32), 1498 &dd->sdma_pad_phys, 1499 GFP_KERNEL 1500 ); 1501 if (!dd->sdma_pad_dma) { 1502 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1503 goto bail; 1504 } 1505 1506 /* assign each engine to different cacheline and init registers */ 1507 curr_head = (void *)dd->sdma_heads_dma; 1508 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1509 unsigned long phys_offset; 1510 1511 sde = &dd->per_sdma[this_idx]; 1512 1513 sde->head_dma = curr_head; 1514 curr_head += L1_CACHE_BYTES; 1515 phys_offset = (unsigned long)sde->head_dma - 1516 (unsigned long)dd->sdma_heads_dma; 1517 sde->head_phys = dd->sdma_heads_phys + phys_offset; 1518 init_sdma_regs(sde, per_sdma_credits, idle_cnt); 1519 } 1520 dd->flags |= HFI1_HAS_SEND_DMA; 1521 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; 1522 dd->num_sdma = num_engines; 1523 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); 1524 if (ret < 0) 1525 goto bail; 1526 1527 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); 1528 if (!tmp_sdma_rht) { 1529 ret = -ENOMEM; 1530 goto bail; 1531 } 1532 1533 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); 1534 if (ret < 0) 1535 goto bail; 1536 dd->sdma_rht = tmp_sdma_rht; 1537 1538 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); 1539 return 0; 1540 1541 bail: 1542 sdma_clean(dd, num_engines); 1543 return ret; 1544 } 1545 1546 /** 1547 * sdma_all_running() - called when the link goes up 1548 * @dd: hfi1_devdata 1549 * 1550 * This routine moves all engines to the running state. 1551 */ 1552 void sdma_all_running(struct hfi1_devdata *dd) 1553 { 1554 struct sdma_engine *sde; 1555 unsigned int i; 1556 1557 /* move all engines to running */ 1558 for (i = 0; i < dd->num_sdma; ++i) { 1559 sde = &dd->per_sdma[i]; 1560 sdma_process_event(sde, sdma_event_e30_go_running); 1561 } 1562 } 1563 1564 /** 1565 * sdma_all_idle() - called when the link goes down 1566 * @dd: hfi1_devdata 1567 * 1568 * This routine moves all engines to the idle state. 1569 */ 1570 void sdma_all_idle(struct hfi1_devdata *dd) 1571 { 1572 struct sdma_engine *sde; 1573 unsigned int i; 1574 1575 /* idle all engines */ 1576 for (i = 0; i < dd->num_sdma; ++i) { 1577 sde = &dd->per_sdma[i]; 1578 sdma_process_event(sde, sdma_event_e70_go_idle); 1579 } 1580 } 1581 1582 /** 1583 * sdma_start() - called to kick off state processing for all engines 1584 * @dd: hfi1_devdata 1585 * 1586 * This routine is for kicking off the state processing for all required 1587 * sdma engines. Interrupts need to be working at this point. 1588 * 1589 */ 1590 void sdma_start(struct hfi1_devdata *dd) 1591 { 1592 unsigned i; 1593 struct sdma_engine *sde; 1594 1595 /* kick off the engines state processing */ 1596 for (i = 0; i < dd->num_sdma; ++i) { 1597 sde = &dd->per_sdma[i]; 1598 sdma_process_event(sde, sdma_event_e10_go_hw_start); 1599 } 1600 } 1601 1602 /** 1603 * sdma_exit() - used when module is removed 1604 * @dd: hfi1_devdata 1605 */ 1606 void sdma_exit(struct hfi1_devdata *dd) 1607 { 1608 unsigned this_idx; 1609 struct sdma_engine *sde; 1610 1611 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; 1612 ++this_idx) { 1613 sde = &dd->per_sdma[this_idx]; 1614 if (!list_empty(&sde->dmawait)) 1615 dd_dev_err(dd, "sde %u: dmawait list not empty!\n", 1616 sde->this_idx); 1617 sdma_process_event(sde, sdma_event_e00_go_hw_down); 1618 1619 del_timer_sync(&sde->err_progress_check_timer); 1620 1621 /* 1622 * This waits for the state machine to exit so it is not 1623 * necessary to kill the sdma_sw_clean_up_task to make sure 1624 * it is not running. 1625 */ 1626 sdma_finalput(&sde->state); 1627 } 1628 sdma_clean(dd, dd->num_sdma); 1629 } 1630 1631 /* 1632 * unmap the indicated descriptor 1633 */ 1634 static inline void sdma_unmap_desc( 1635 struct hfi1_devdata *dd, 1636 struct sdma_desc *descp) 1637 { 1638 switch (sdma_mapping_type(descp)) { 1639 case SDMA_MAP_SINGLE: 1640 dma_unmap_single( 1641 &dd->pcidev->dev, 1642 sdma_mapping_addr(descp), 1643 sdma_mapping_len(descp), 1644 DMA_TO_DEVICE); 1645 break; 1646 case SDMA_MAP_PAGE: 1647 dma_unmap_page( 1648 &dd->pcidev->dev, 1649 sdma_mapping_addr(descp), 1650 sdma_mapping_len(descp), 1651 DMA_TO_DEVICE); 1652 break; 1653 } 1654 } 1655 1656 /* 1657 * return the mode as indicated by the first 1658 * descriptor in the tx. 1659 */ 1660 static inline u8 ahg_mode(struct sdma_txreq *tx) 1661 { 1662 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) 1663 >> SDMA_DESC1_HEADER_MODE_SHIFT; 1664 } 1665 1666 /** 1667 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's 1668 * @dd: hfi1_devdata for unmapping 1669 * @tx: tx request to clean 1670 * 1671 * This is used in the progress routine to clean the tx or 1672 * by the ULP to toss an in-process tx build. 1673 * 1674 * The code can be called multiple times without issue. 1675 * 1676 */ 1677 void __sdma_txclean( 1678 struct hfi1_devdata *dd, 1679 struct sdma_txreq *tx) 1680 { 1681 u16 i; 1682 1683 if (tx->num_desc) { 1684 u8 skip = 0, mode = ahg_mode(tx); 1685 1686 /* unmap first */ 1687 sdma_unmap_desc(dd, &tx->descp[0]); 1688 /* determine number of AHG descriptors to skip */ 1689 if (mode > SDMA_AHG_APPLY_UPDATE1) 1690 skip = mode >> 1; 1691 for (i = 1 + skip; i < tx->num_desc; i++) 1692 sdma_unmap_desc(dd, &tx->descp[i]); 1693 tx->num_desc = 0; 1694 } 1695 kfree(tx->coalesce_buf); 1696 tx->coalesce_buf = NULL; 1697 /* kmalloc'ed descp */ 1698 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { 1699 tx->desc_limit = ARRAY_SIZE(tx->descs); 1700 kfree(tx->descp); 1701 } 1702 } 1703 1704 static inline u16 sdma_gethead(struct sdma_engine *sde) 1705 { 1706 struct hfi1_devdata *dd = sde->dd; 1707 int use_dmahead; 1708 u16 hwhead; 1709 1710 #ifdef CONFIG_SDMA_VERBOSITY 1711 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1712 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1713 #endif 1714 1715 retry: 1716 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && 1717 (dd->flags & HFI1_HAS_SDMA_TIMEOUT); 1718 hwhead = use_dmahead ? 1719 (u16)le64_to_cpu(*sde->head_dma) : 1720 (u16)read_sde_csr(sde, SD(HEAD)); 1721 1722 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { 1723 u16 cnt; 1724 u16 swtail; 1725 u16 swhead; 1726 int sane; 1727 1728 swhead = sde->descq_head & sde->sdma_mask; 1729 /* this code is really bad for cache line trading */ 1730 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; 1731 cnt = sde->descq_cnt; 1732 1733 if (swhead < swtail) 1734 /* not wrapped */ 1735 sane = (hwhead >= swhead) & (hwhead <= swtail); 1736 else if (swhead > swtail) 1737 /* wrapped around */ 1738 sane = ((hwhead >= swhead) && (hwhead < cnt)) || 1739 (hwhead <= swtail); 1740 else 1741 /* empty */ 1742 sane = (hwhead == swhead); 1743 1744 if (unlikely(!sane)) { 1745 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n", 1746 sde->this_idx, 1747 use_dmahead ? "dma" : "kreg", 1748 hwhead, swhead, swtail, cnt); 1749 if (use_dmahead) { 1750 /* try one more time, using csr */ 1751 use_dmahead = 0; 1752 goto retry; 1753 } 1754 /* proceed as if no progress */ 1755 hwhead = swhead; 1756 } 1757 } 1758 return hwhead; 1759 } 1760 1761 /* 1762 * This is called when there are send DMA descriptors that might be 1763 * available. 1764 * 1765 * This is called with head_lock held. 1766 */ 1767 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail) 1768 { 1769 struct iowait *wait, *nw; 1770 struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; 1771 unsigned i, n = 0, seq; 1772 struct sdma_txreq *stx; 1773 struct hfi1_ibdev *dev = &sde->dd->verbs_dev; 1774 1775 #ifdef CONFIG_SDMA_VERBOSITY 1776 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 1777 slashstrip(__FILE__), __LINE__, __func__); 1778 dd_dev_err(sde->dd, "avail: %u\n", avail); 1779 #endif 1780 1781 do { 1782 seq = read_seqbegin(&dev->iowait_lock); 1783 if (!list_empty(&sde->dmawait)) { 1784 /* at least one item */ 1785 write_seqlock(&dev->iowait_lock); 1786 /* Harvest waiters wanting DMA descriptors */ 1787 list_for_each_entry_safe( 1788 wait, 1789 nw, 1790 &sde->dmawait, 1791 list) { 1792 u16 num_desc = 0; 1793 1794 if (!wait->wakeup) 1795 continue; 1796 if (n == ARRAY_SIZE(waits)) 1797 break; 1798 if (!list_empty(&wait->tx_head)) { 1799 stx = list_first_entry( 1800 &wait->tx_head, 1801 struct sdma_txreq, 1802 list); 1803 num_desc = stx->num_desc; 1804 } 1805 if (num_desc > avail) 1806 break; 1807 avail -= num_desc; 1808 list_del_init(&wait->list); 1809 waits[n++] = wait; 1810 } 1811 write_sequnlock(&dev->iowait_lock); 1812 break; 1813 } 1814 } while (read_seqretry(&dev->iowait_lock, seq)); 1815 1816 for (i = 0; i < n; i++) 1817 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); 1818 } 1819 1820 /* head_lock must be held */ 1821 static void sdma_make_progress(struct sdma_engine *sde, u64 status) 1822 { 1823 struct sdma_txreq *txp = NULL; 1824 int progress = 0; 1825 u16 hwhead, swhead; 1826 int idle_check_done = 0; 1827 1828 hwhead = sdma_gethead(sde); 1829 1830 /* The reason for some of the complexity of this code is that 1831 * not all descriptors have corresponding txps. So, we have to 1832 * be able to skip over descs until we wander into the range of 1833 * the next txp on the list. 1834 */ 1835 1836 retry: 1837 txp = get_txhead(sde); 1838 swhead = sde->descq_head & sde->sdma_mask; 1839 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1840 while (swhead != hwhead) { 1841 /* advance head, wrap if needed */ 1842 swhead = ++sde->descq_head & sde->sdma_mask; 1843 1844 /* if now past this txp's descs, do the callback */ 1845 if (txp && txp->next_descq_idx == swhead) { 1846 /* remove from list */ 1847 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 1848 complete_tx(sde, txp, SDMA_TXREQ_S_OK); 1849 /* see if there is another txp */ 1850 txp = get_txhead(sde); 1851 } 1852 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1853 progress++; 1854 } 1855 1856 /* 1857 * The SDMA idle interrupt is not guaranteed to be ordered with respect 1858 * to updates to the the dma_head location in host memory. The head 1859 * value read might not be fully up to date. If there are pending 1860 * descriptors and the SDMA idle interrupt fired then read from the 1861 * CSR SDMA head instead to get the latest value from the hardware. 1862 * The hardware SDMA head should be read at most once in this invocation 1863 * of sdma_make_progress(..) which is ensured by idle_check_done flag 1864 */ 1865 if ((status & sde->idle_mask) && !idle_check_done) { 1866 u16 swtail; 1867 1868 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; 1869 if (swtail != hwhead) { 1870 hwhead = (u16)read_sde_csr(sde, SD(HEAD)); 1871 idle_check_done = 1; 1872 goto retry; 1873 } 1874 } 1875 1876 sde->last_status = status; 1877 if (progress) 1878 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 1879 } 1880 1881 /* 1882 * sdma_engine_interrupt() - interrupt handler for engine 1883 * @sde: sdma engine 1884 * @status: sdma interrupt reason 1885 * 1886 * Status is a mask of the 3 possible interrupts for this engine. It will 1887 * contain bits _only_ for this SDMA engine. It will contain at least one 1888 * bit, it may contain more. 1889 */ 1890 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) 1891 { 1892 trace_hfi1_sdma_engine_interrupt(sde, status); 1893 write_seqlock(&sde->head_lock); 1894 sdma_set_desc_cnt(sde, sdma_desct_intr); 1895 if (status & sde->idle_mask) 1896 sde->idle_int_cnt++; 1897 else if (status & sde->progress_mask) 1898 sde->progress_int_cnt++; 1899 else if (status & sde->int_mask) 1900 sde->sdma_int_cnt++; 1901 sdma_make_progress(sde, status); 1902 write_sequnlock(&sde->head_lock); 1903 } 1904 1905 /** 1906 * sdma_engine_error() - error handler for engine 1907 * @sde: sdma engine 1908 * @status: sdma interrupt reason 1909 */ 1910 void sdma_engine_error(struct sdma_engine *sde, u64 status) 1911 { 1912 unsigned long flags; 1913 1914 #ifdef CONFIG_SDMA_VERBOSITY 1915 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", 1916 sde->this_idx, 1917 (unsigned long long)status, 1918 sdma_state_names[sde->state.current_state]); 1919 #endif 1920 spin_lock_irqsave(&sde->tail_lock, flags); 1921 write_seqlock(&sde->head_lock); 1922 if (status & ALL_SDMA_ENG_HALT_ERRS) 1923 __sdma_process_event(sde, sdma_event_e60_hw_halted); 1924 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { 1925 dd_dev_err(sde->dd, 1926 "SDMA (%u) engine error: 0x%llx state %s\n", 1927 sde->this_idx, 1928 (unsigned long long)status, 1929 sdma_state_names[sde->state.current_state]); 1930 dump_sdma_state(sde); 1931 } 1932 write_sequnlock(&sde->head_lock); 1933 spin_unlock_irqrestore(&sde->tail_lock, flags); 1934 } 1935 1936 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) 1937 { 1938 u64 set_senddmactrl = 0; 1939 u64 clr_senddmactrl = 0; 1940 unsigned long flags; 1941 1942 #ifdef CONFIG_SDMA_VERBOSITY 1943 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", 1944 sde->this_idx, 1945 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, 1946 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, 1947 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, 1948 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); 1949 #endif 1950 1951 if (op & SDMA_SENDCTRL_OP_ENABLE) 1952 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1953 else 1954 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1955 1956 if (op & SDMA_SENDCTRL_OP_INTENABLE) 1957 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1958 else 1959 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1960 1961 if (op & SDMA_SENDCTRL_OP_HALT) 1962 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1963 else 1964 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1965 1966 spin_lock_irqsave(&sde->senddmactrl_lock, flags); 1967 1968 sde->p_senddmactrl |= set_senddmactrl; 1969 sde->p_senddmactrl &= ~clr_senddmactrl; 1970 1971 if (op & SDMA_SENDCTRL_OP_CLEANUP) 1972 write_sde_csr(sde, SD(CTRL), 1973 sde->p_senddmactrl | 1974 SD(CTRL_SDMA_CLEANUP_SMASK)); 1975 else 1976 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); 1977 1978 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); 1979 1980 #ifdef CONFIG_SDMA_VERBOSITY 1981 sdma_dumpstate(sde); 1982 #endif 1983 } 1984 1985 static void sdma_setlengen(struct sdma_engine *sde) 1986 { 1987 #ifdef CONFIG_SDMA_VERBOSITY 1988 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1989 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1990 #endif 1991 1992 /* 1993 * Set SendDmaLenGen and clear-then-set the MSB of the generation 1994 * count to enable generation checking and load the internal 1995 * generation counter. 1996 */ 1997 write_sde_csr(sde, SD(LEN_GEN), 1998 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); 1999 write_sde_csr(sde, SD(LEN_GEN), 2000 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | 2001 (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); 2002 } 2003 2004 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) 2005 { 2006 /* Commit writes to memory and advance the tail on the chip */ 2007 smp_wmb(); /* see get_txhead() */ 2008 writeq(tail, sde->tail_csr); 2009 } 2010 2011 /* 2012 * This is called when changing to state s10_hw_start_up_halt_wait as 2013 * a result of send buffer errors or send DMA descriptor errors. 2014 */ 2015 static void sdma_hw_start_up(struct sdma_engine *sde) 2016 { 2017 u64 reg; 2018 2019 #ifdef CONFIG_SDMA_VERBOSITY 2020 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2021 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2022 #endif 2023 2024 sdma_setlengen(sde); 2025 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2026 *sde->head_dma = 0; 2027 2028 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << 2029 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); 2030 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2031 } 2032 2033 /* 2034 * set_sdma_integrity 2035 * 2036 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. 2037 */ 2038 static void set_sdma_integrity(struct sdma_engine *sde) 2039 { 2040 struct hfi1_devdata *dd = sde->dd; 2041 2042 write_sde_csr(sde, SD(CHECK_ENABLE), 2043 hfi1_pkt_base_sdma_integrity(dd)); 2044 } 2045 2046 static void init_sdma_regs( 2047 struct sdma_engine *sde, 2048 u32 credits, 2049 uint idle_cnt) 2050 { 2051 u8 opval, opmask; 2052 #ifdef CONFIG_SDMA_VERBOSITY 2053 struct hfi1_devdata *dd = sde->dd; 2054 2055 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2056 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2057 #endif 2058 2059 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); 2060 sdma_setlengen(sde); 2061 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2062 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); 2063 write_sde_csr(sde, SD(DESC_CNT), 0); 2064 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); 2065 write_sde_csr(sde, SD(MEMORY), 2066 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | 2067 ((u64)(credits * sde->this_idx) << 2068 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); 2069 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); 2070 set_sdma_integrity(sde); 2071 opmask = OPCODE_CHECK_MASK_DISABLED; 2072 opval = OPCODE_CHECK_VAL_DISABLED; 2073 write_sde_csr(sde, SD(CHECK_OPCODE), 2074 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | 2075 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); 2076 } 2077 2078 #ifdef CONFIG_SDMA_VERBOSITY 2079 2080 #define sdma_dumpstate_helper0(reg) do { \ 2081 csr = read_csr(sde->dd, reg); \ 2082 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ 2083 } while (0) 2084 2085 #define sdma_dumpstate_helper(reg) do { \ 2086 csr = read_sde_csr(sde, reg); \ 2087 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ 2088 #reg, sde->this_idx, csr); \ 2089 } while (0) 2090 2091 #define sdma_dumpstate_helper2(reg) do { \ 2092 csr = read_csr(sde->dd, reg + (8 * i)); \ 2093 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ 2094 #reg, i, csr); \ 2095 } while (0) 2096 2097 void sdma_dumpstate(struct sdma_engine *sde) 2098 { 2099 u64 csr; 2100 unsigned i; 2101 2102 sdma_dumpstate_helper(SD(CTRL)); 2103 sdma_dumpstate_helper(SD(STATUS)); 2104 sdma_dumpstate_helper0(SD(ERR_STATUS)); 2105 sdma_dumpstate_helper0(SD(ERR_MASK)); 2106 sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); 2107 sdma_dumpstate_helper(SD(ENG_ERR_MASK)); 2108 2109 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { 2110 sdma_dumpstate_helper2(CCE_INT_STATUS); 2111 sdma_dumpstate_helper2(CCE_INT_MASK); 2112 sdma_dumpstate_helper2(CCE_INT_BLOCKED); 2113 } 2114 2115 sdma_dumpstate_helper(SD(TAIL)); 2116 sdma_dumpstate_helper(SD(HEAD)); 2117 sdma_dumpstate_helper(SD(PRIORITY_THLD)); 2118 sdma_dumpstate_helper(SD(IDLE_CNT)); 2119 sdma_dumpstate_helper(SD(RELOAD_CNT)); 2120 sdma_dumpstate_helper(SD(DESC_CNT)); 2121 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); 2122 sdma_dumpstate_helper(SD(MEMORY)); 2123 sdma_dumpstate_helper0(SD(ENGINES)); 2124 sdma_dumpstate_helper0(SD(MEM_SIZE)); 2125 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ 2126 sdma_dumpstate_helper(SD(BASE_ADDR)); 2127 sdma_dumpstate_helper(SD(LEN_GEN)); 2128 sdma_dumpstate_helper(SD(HEAD_ADDR)); 2129 sdma_dumpstate_helper(SD(CHECK_ENABLE)); 2130 sdma_dumpstate_helper(SD(CHECK_VL)); 2131 sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); 2132 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); 2133 sdma_dumpstate_helper(SD(CHECK_SLID)); 2134 sdma_dumpstate_helper(SD(CHECK_OPCODE)); 2135 } 2136 #endif 2137 2138 static void dump_sdma_state(struct sdma_engine *sde) 2139 { 2140 struct hw_sdma_desc *descq; 2141 struct hw_sdma_desc *descqp; 2142 u64 desc[2]; 2143 u64 addr; 2144 u8 gen; 2145 u16 len; 2146 u16 head, tail, cnt; 2147 2148 head = sde->descq_head & sde->sdma_mask; 2149 tail = sde->descq_tail & sde->sdma_mask; 2150 cnt = sdma_descq_freecnt(sde); 2151 descq = sde->descq; 2152 2153 dd_dev_err(sde->dd, 2154 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", 2155 sde->this_idx, head, tail, cnt, 2156 !list_empty(&sde->flushlist)); 2157 2158 /* print info for each entry in the descriptor queue */ 2159 while (head != tail) { 2160 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2161 2162 descqp = &sde->descq[head]; 2163 desc[0] = le64_to_cpu(descqp->qw[0]); 2164 desc[1] = le64_to_cpu(descqp->qw[1]); 2165 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2166 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2167 'H' : '-'; 2168 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2169 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2170 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2171 & SDMA_DESC0_PHY_ADDR_MASK; 2172 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2173 & SDMA_DESC1_GENERATION_MASK; 2174 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2175 & SDMA_DESC0_BYTE_COUNT_MASK; 2176 dd_dev_err(sde->dd, 2177 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2178 head, flags, addr, gen, len); 2179 dd_dev_err(sde->dd, 2180 "\tdesc0:0x%016llx desc1 0x%016llx\n", 2181 desc[0], desc[1]); 2182 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2183 dd_dev_err(sde->dd, 2184 "\taidx: %u amode: %u alen: %u\n", 2185 (u8)((desc[1] & 2186 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2187 SDMA_DESC1_HEADER_INDEX_SHIFT), 2188 (u8)((desc[1] & 2189 SDMA_DESC1_HEADER_MODE_SMASK) >> 2190 SDMA_DESC1_HEADER_MODE_SHIFT), 2191 (u8)((desc[1] & 2192 SDMA_DESC1_HEADER_DWS_SMASK) >> 2193 SDMA_DESC1_HEADER_DWS_SHIFT)); 2194 head++; 2195 head &= sde->sdma_mask; 2196 } 2197 } 2198 2199 #define SDE_FMT \ 2200 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" 2201 /** 2202 * sdma_seqfile_dump_sde() - debugfs dump of sde 2203 * @s: seq file 2204 * @sde: send dma engine to dump 2205 * 2206 * This routine dumps the sde to the indicated seq file. 2207 */ 2208 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) 2209 { 2210 u16 head, tail; 2211 struct hw_sdma_desc *descqp; 2212 u64 desc[2]; 2213 u64 addr; 2214 u8 gen; 2215 u16 len; 2216 2217 head = sde->descq_head & sde->sdma_mask; 2218 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; 2219 seq_printf(s, SDE_FMT, sde->this_idx, 2220 sde->cpu, 2221 sdma_state_name(sde->state.current_state), 2222 (unsigned long long)read_sde_csr(sde, SD(CTRL)), 2223 (unsigned long long)read_sde_csr(sde, SD(STATUS)), 2224 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), 2225 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, 2226 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, 2227 (unsigned long long)le64_to_cpu(*sde->head_dma), 2228 (unsigned long long)read_sde_csr(sde, SD(MEMORY)), 2229 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), 2230 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), 2231 (unsigned long long)sde->last_status, 2232 (unsigned long long)sde->ahg_bits, 2233 sde->tx_tail, 2234 sde->tx_head, 2235 sde->descq_tail, 2236 sde->descq_head, 2237 !list_empty(&sde->flushlist), 2238 sde->descq_full_count, 2239 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); 2240 2241 /* print info for each entry in the descriptor queue */ 2242 while (head != tail) { 2243 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2244 2245 descqp = &sde->descq[head]; 2246 desc[0] = le64_to_cpu(descqp->qw[0]); 2247 desc[1] = le64_to_cpu(descqp->qw[1]); 2248 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2249 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2250 'H' : '-'; 2251 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2252 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2253 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2254 & SDMA_DESC0_PHY_ADDR_MASK; 2255 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2256 & SDMA_DESC1_GENERATION_MASK; 2257 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2258 & SDMA_DESC0_BYTE_COUNT_MASK; 2259 seq_printf(s, 2260 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2261 head, flags, addr, gen, len); 2262 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2263 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", 2264 (u8)((desc[1] & 2265 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2266 SDMA_DESC1_HEADER_INDEX_SHIFT), 2267 (u8)((desc[1] & 2268 SDMA_DESC1_HEADER_MODE_SMASK) >> 2269 SDMA_DESC1_HEADER_MODE_SHIFT)); 2270 head = (head + 1) & sde->sdma_mask; 2271 } 2272 } 2273 2274 /* 2275 * add the generation number into 2276 * the qw1 and return 2277 */ 2278 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) 2279 { 2280 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; 2281 2282 qw1 &= ~SDMA_DESC1_GENERATION_SMASK; 2283 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) 2284 << SDMA_DESC1_GENERATION_SHIFT; 2285 return qw1; 2286 } 2287 2288 /* 2289 * This routine submits the indicated tx 2290 * 2291 * Space has already been guaranteed and 2292 * tail side of ring is locked. 2293 * 2294 * The hardware tail update is done 2295 * in the caller and that is facilitated 2296 * by returning the new tail. 2297 * 2298 * There is special case logic for ahg 2299 * to not add the generation number for 2300 * up to 2 descriptors that follow the 2301 * first descriptor. 2302 * 2303 */ 2304 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) 2305 { 2306 int i; 2307 u16 tail; 2308 struct sdma_desc *descp = tx->descp; 2309 u8 skip = 0, mode = ahg_mode(tx); 2310 2311 tail = sde->descq_tail & sde->sdma_mask; 2312 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2313 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); 2314 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], 2315 tail, &sde->descq[tail]); 2316 tail = ++sde->descq_tail & sde->sdma_mask; 2317 descp++; 2318 if (mode > SDMA_AHG_APPLY_UPDATE1) 2319 skip = mode >> 1; 2320 for (i = 1; i < tx->num_desc; i++, descp++) { 2321 u64 qw1; 2322 2323 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2324 if (skip) { 2325 /* edits don't have generation */ 2326 qw1 = descp->qw[1]; 2327 skip--; 2328 } else { 2329 /* replace generation with real one for non-edits */ 2330 qw1 = add_gen(sde, descp->qw[1]); 2331 } 2332 sde->descq[tail].qw[1] = cpu_to_le64(qw1); 2333 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, 2334 tail, &sde->descq[tail]); 2335 tail = ++sde->descq_tail & sde->sdma_mask; 2336 } 2337 tx->next_descq_idx = tail; 2338 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2339 tx->sn = sde->tail_sn++; 2340 trace_hfi1_sdma_in_sn(sde, tx->sn); 2341 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); 2342 #endif 2343 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; 2344 sde->desc_avail -= tx->num_desc; 2345 return tail; 2346 } 2347 2348 /* 2349 * Check for progress 2350 */ 2351 static int sdma_check_progress( 2352 struct sdma_engine *sde, 2353 struct iowait *wait, 2354 struct sdma_txreq *tx) 2355 { 2356 int ret; 2357 2358 sde->desc_avail = sdma_descq_freecnt(sde); 2359 if (tx->num_desc <= sde->desc_avail) 2360 return -EAGAIN; 2361 /* pulse the head_lock */ 2362 if (wait && wait->sleep) { 2363 unsigned seq; 2364 2365 seq = raw_seqcount_begin( 2366 (const seqcount_t *)&sde->head_lock.seqcount); 2367 ret = wait->sleep(sde, wait, tx, seq); 2368 if (ret == -EAGAIN) 2369 sde->desc_avail = sdma_descq_freecnt(sde); 2370 } else { 2371 ret = -EBUSY; 2372 } 2373 return ret; 2374 } 2375 2376 /** 2377 * sdma_send_txreq() - submit a tx req to ring 2378 * @sde: sdma engine to use 2379 * @wait: wait structure to use when full (may be NULL) 2380 * @tx: sdma_txreq to submit 2381 * 2382 * The call submits the tx into the ring. If a iowait structure is non-NULL 2383 * the packet will be queued to the list in wait. 2384 * 2385 * Return: 2386 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in 2387 * ring (wait == NULL) 2388 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2389 */ 2390 int sdma_send_txreq(struct sdma_engine *sde, 2391 struct iowait *wait, 2392 struct sdma_txreq *tx) 2393 { 2394 int ret = 0; 2395 u16 tail; 2396 unsigned long flags; 2397 2398 /* user should have supplied entire packet */ 2399 if (unlikely(tx->tlen)) 2400 return -EINVAL; 2401 tx->wait = wait; 2402 spin_lock_irqsave(&sde->tail_lock, flags); 2403 retry: 2404 if (unlikely(!__sdma_running(sde))) 2405 goto unlock_noconn; 2406 if (unlikely(tx->num_desc > sde->desc_avail)) 2407 goto nodesc; 2408 tail = submit_tx(sde, tx); 2409 if (wait) 2410 iowait_sdma_inc(wait); 2411 sdma_update_tail(sde, tail); 2412 unlock: 2413 spin_unlock_irqrestore(&sde->tail_lock, flags); 2414 return ret; 2415 unlock_noconn: 2416 if (wait) 2417 iowait_sdma_inc(wait); 2418 tx->next_descq_idx = 0; 2419 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2420 tx->sn = sde->tail_sn++; 2421 trace_hfi1_sdma_in_sn(sde, tx->sn); 2422 #endif 2423 spin_lock(&sde->flushlist_lock); 2424 list_add_tail(&tx->list, &sde->flushlist); 2425 spin_unlock(&sde->flushlist_lock); 2426 if (wait) { 2427 wait->tx_count++; 2428 wait->count += tx->num_desc; 2429 } 2430 schedule_work(&sde->flush_worker); 2431 ret = -ECOMM; 2432 goto unlock; 2433 nodesc: 2434 ret = sdma_check_progress(sde, wait, tx); 2435 if (ret == -EAGAIN) { 2436 ret = 0; 2437 goto retry; 2438 } 2439 sde->descq_full_count++; 2440 goto unlock; 2441 } 2442 2443 /** 2444 * sdma_send_txlist() - submit a list of tx req to ring 2445 * @sde: sdma engine to use 2446 * @wait: wait structure to use when full (may be NULL) 2447 * @tx_list: list of sdma_txreqs to submit 2448 * @count: pointer to a u32 which, after return will contain the total number of 2449 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs 2450 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs 2451 * which are added to SDMA engine flush list if the SDMA engine state is 2452 * not running. 2453 * 2454 * The call submits the list into the ring. 2455 * 2456 * If the iowait structure is non-NULL and not equal to the iowait list 2457 * the unprocessed part of the list will be appended to the list in wait. 2458 * 2459 * In all cases, the tx_list will be updated so the head of the tx_list is 2460 * the list of descriptors that have yet to be transmitted. 2461 * 2462 * The intent of this call is to provide a more efficient 2463 * way of submitting multiple packets to SDMA while holding the tail 2464 * side locking. 2465 * 2466 * Return: 2467 * 0 - Success, 2468 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) 2469 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2470 */ 2471 int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, 2472 struct list_head *tx_list, u32 *count_out) 2473 { 2474 struct sdma_txreq *tx, *tx_next; 2475 int ret = 0; 2476 unsigned long flags; 2477 u16 tail = INVALID_TAIL; 2478 u32 submit_count = 0, flush_count = 0, total_count; 2479 2480 spin_lock_irqsave(&sde->tail_lock, flags); 2481 retry: 2482 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2483 tx->wait = wait; 2484 if (unlikely(!__sdma_running(sde))) 2485 goto unlock_noconn; 2486 if (unlikely(tx->num_desc > sde->desc_avail)) 2487 goto nodesc; 2488 if (unlikely(tx->tlen)) { 2489 ret = -EINVAL; 2490 goto update_tail; 2491 } 2492 list_del_init(&tx->list); 2493 tail = submit_tx(sde, tx); 2494 submit_count++; 2495 if (tail != INVALID_TAIL && 2496 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { 2497 sdma_update_tail(sde, tail); 2498 tail = INVALID_TAIL; 2499 } 2500 } 2501 update_tail: 2502 total_count = submit_count + flush_count; 2503 if (wait) 2504 iowait_sdma_add(wait, total_count); 2505 if (tail != INVALID_TAIL) 2506 sdma_update_tail(sde, tail); 2507 spin_unlock_irqrestore(&sde->tail_lock, flags); 2508 *count_out = total_count; 2509 return ret; 2510 unlock_noconn: 2511 spin_lock(&sde->flushlist_lock); 2512 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2513 tx->wait = wait; 2514 list_del_init(&tx->list); 2515 tx->next_descq_idx = 0; 2516 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2517 tx->sn = sde->tail_sn++; 2518 trace_hfi1_sdma_in_sn(sde, tx->sn); 2519 #endif 2520 list_add_tail(&tx->list, &sde->flushlist); 2521 flush_count++; 2522 if (wait) { 2523 wait->tx_count++; 2524 wait->count += tx->num_desc; 2525 } 2526 } 2527 spin_unlock(&sde->flushlist_lock); 2528 schedule_work(&sde->flush_worker); 2529 ret = -ECOMM; 2530 goto update_tail; 2531 nodesc: 2532 ret = sdma_check_progress(sde, wait, tx); 2533 if (ret == -EAGAIN) { 2534 ret = 0; 2535 goto retry; 2536 } 2537 sde->descq_full_count++; 2538 goto update_tail; 2539 } 2540 2541 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) 2542 { 2543 unsigned long flags; 2544 2545 spin_lock_irqsave(&sde->tail_lock, flags); 2546 write_seqlock(&sde->head_lock); 2547 2548 __sdma_process_event(sde, event); 2549 2550 if (sde->state.current_state == sdma_state_s99_running) 2551 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 2552 2553 write_sequnlock(&sde->head_lock); 2554 spin_unlock_irqrestore(&sde->tail_lock, flags); 2555 } 2556 2557 static void __sdma_process_event(struct sdma_engine *sde, 2558 enum sdma_events event) 2559 { 2560 struct sdma_state *ss = &sde->state; 2561 int need_progress = 0; 2562 2563 /* CONFIG SDMA temporary */ 2564 #ifdef CONFIG_SDMA_VERBOSITY 2565 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, 2566 sdma_state_names[ss->current_state], 2567 sdma_event_names[event]); 2568 #endif 2569 2570 switch (ss->current_state) { 2571 case sdma_state_s00_hw_down: 2572 switch (event) { 2573 case sdma_event_e00_go_hw_down: 2574 break; 2575 case sdma_event_e30_go_running: 2576 /* 2577 * If down, but running requested (usually result 2578 * of link up, then we need to start up. 2579 * This can happen when hw down is requested while 2580 * bringing the link up with traffic active on 2581 * 7220, e.g. 2582 */ 2583 ss->go_s99_running = 1; 2584 /* fall through and start dma engine */ 2585 case sdma_event_e10_go_hw_start: 2586 /* This reference means the state machine is started */ 2587 sdma_get(&sde->state); 2588 sdma_set_state(sde, 2589 sdma_state_s10_hw_start_up_halt_wait); 2590 break; 2591 case sdma_event_e15_hw_halt_done: 2592 break; 2593 case sdma_event_e25_hw_clean_up_done: 2594 break; 2595 case sdma_event_e40_sw_cleaned: 2596 sdma_sw_tear_down(sde); 2597 break; 2598 case sdma_event_e50_hw_cleaned: 2599 break; 2600 case sdma_event_e60_hw_halted: 2601 break; 2602 case sdma_event_e70_go_idle: 2603 break; 2604 case sdma_event_e80_hw_freeze: 2605 break; 2606 case sdma_event_e81_hw_frozen: 2607 break; 2608 case sdma_event_e82_hw_unfreeze: 2609 break; 2610 case sdma_event_e85_link_down: 2611 break; 2612 case sdma_event_e90_sw_halted: 2613 break; 2614 } 2615 break; 2616 2617 case sdma_state_s10_hw_start_up_halt_wait: 2618 switch (event) { 2619 case sdma_event_e00_go_hw_down: 2620 sdma_set_state(sde, sdma_state_s00_hw_down); 2621 sdma_sw_tear_down(sde); 2622 break; 2623 case sdma_event_e10_go_hw_start: 2624 break; 2625 case sdma_event_e15_hw_halt_done: 2626 sdma_set_state(sde, 2627 sdma_state_s15_hw_start_up_clean_wait); 2628 sdma_start_hw_clean_up(sde); 2629 break; 2630 case sdma_event_e25_hw_clean_up_done: 2631 break; 2632 case sdma_event_e30_go_running: 2633 ss->go_s99_running = 1; 2634 break; 2635 case sdma_event_e40_sw_cleaned: 2636 break; 2637 case sdma_event_e50_hw_cleaned: 2638 break; 2639 case sdma_event_e60_hw_halted: 2640 schedule_work(&sde->err_halt_worker); 2641 break; 2642 case sdma_event_e70_go_idle: 2643 ss->go_s99_running = 0; 2644 break; 2645 case sdma_event_e80_hw_freeze: 2646 break; 2647 case sdma_event_e81_hw_frozen: 2648 break; 2649 case sdma_event_e82_hw_unfreeze: 2650 break; 2651 case sdma_event_e85_link_down: 2652 break; 2653 case sdma_event_e90_sw_halted: 2654 break; 2655 } 2656 break; 2657 2658 case sdma_state_s15_hw_start_up_clean_wait: 2659 switch (event) { 2660 case sdma_event_e00_go_hw_down: 2661 sdma_set_state(sde, sdma_state_s00_hw_down); 2662 sdma_sw_tear_down(sde); 2663 break; 2664 case sdma_event_e10_go_hw_start: 2665 break; 2666 case sdma_event_e15_hw_halt_done: 2667 break; 2668 case sdma_event_e25_hw_clean_up_done: 2669 sdma_hw_start_up(sde); 2670 sdma_set_state(sde, ss->go_s99_running ? 2671 sdma_state_s99_running : 2672 sdma_state_s20_idle); 2673 break; 2674 case sdma_event_e30_go_running: 2675 ss->go_s99_running = 1; 2676 break; 2677 case sdma_event_e40_sw_cleaned: 2678 break; 2679 case sdma_event_e50_hw_cleaned: 2680 break; 2681 case sdma_event_e60_hw_halted: 2682 break; 2683 case sdma_event_e70_go_idle: 2684 ss->go_s99_running = 0; 2685 break; 2686 case sdma_event_e80_hw_freeze: 2687 break; 2688 case sdma_event_e81_hw_frozen: 2689 break; 2690 case sdma_event_e82_hw_unfreeze: 2691 break; 2692 case sdma_event_e85_link_down: 2693 break; 2694 case sdma_event_e90_sw_halted: 2695 break; 2696 } 2697 break; 2698 2699 case sdma_state_s20_idle: 2700 switch (event) { 2701 case sdma_event_e00_go_hw_down: 2702 sdma_set_state(sde, sdma_state_s00_hw_down); 2703 sdma_sw_tear_down(sde); 2704 break; 2705 case sdma_event_e10_go_hw_start: 2706 break; 2707 case sdma_event_e15_hw_halt_done: 2708 break; 2709 case sdma_event_e25_hw_clean_up_done: 2710 break; 2711 case sdma_event_e30_go_running: 2712 sdma_set_state(sde, sdma_state_s99_running); 2713 ss->go_s99_running = 1; 2714 break; 2715 case sdma_event_e40_sw_cleaned: 2716 break; 2717 case sdma_event_e50_hw_cleaned: 2718 break; 2719 case sdma_event_e60_hw_halted: 2720 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 2721 schedule_work(&sde->err_halt_worker); 2722 break; 2723 case sdma_event_e70_go_idle: 2724 break; 2725 case sdma_event_e85_link_down: 2726 /* fall through */ 2727 case sdma_event_e80_hw_freeze: 2728 sdma_set_state(sde, sdma_state_s80_hw_freeze); 2729 atomic_dec(&sde->dd->sdma_unfreeze_count); 2730 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2731 break; 2732 case sdma_event_e81_hw_frozen: 2733 break; 2734 case sdma_event_e82_hw_unfreeze: 2735 break; 2736 case sdma_event_e90_sw_halted: 2737 break; 2738 } 2739 break; 2740 2741 case sdma_state_s30_sw_clean_up_wait: 2742 switch (event) { 2743 case sdma_event_e00_go_hw_down: 2744 sdma_set_state(sde, sdma_state_s00_hw_down); 2745 break; 2746 case sdma_event_e10_go_hw_start: 2747 break; 2748 case sdma_event_e15_hw_halt_done: 2749 break; 2750 case sdma_event_e25_hw_clean_up_done: 2751 break; 2752 case sdma_event_e30_go_running: 2753 ss->go_s99_running = 1; 2754 break; 2755 case sdma_event_e40_sw_cleaned: 2756 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); 2757 sdma_start_hw_clean_up(sde); 2758 break; 2759 case sdma_event_e50_hw_cleaned: 2760 break; 2761 case sdma_event_e60_hw_halted: 2762 break; 2763 case sdma_event_e70_go_idle: 2764 ss->go_s99_running = 0; 2765 break; 2766 case sdma_event_e80_hw_freeze: 2767 break; 2768 case sdma_event_e81_hw_frozen: 2769 break; 2770 case sdma_event_e82_hw_unfreeze: 2771 break; 2772 case sdma_event_e85_link_down: 2773 ss->go_s99_running = 0; 2774 break; 2775 case sdma_event_e90_sw_halted: 2776 break; 2777 } 2778 break; 2779 2780 case sdma_state_s40_hw_clean_up_wait: 2781 switch (event) { 2782 case sdma_event_e00_go_hw_down: 2783 sdma_set_state(sde, sdma_state_s00_hw_down); 2784 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2785 break; 2786 case sdma_event_e10_go_hw_start: 2787 break; 2788 case sdma_event_e15_hw_halt_done: 2789 break; 2790 case sdma_event_e25_hw_clean_up_done: 2791 sdma_hw_start_up(sde); 2792 sdma_set_state(sde, ss->go_s99_running ? 2793 sdma_state_s99_running : 2794 sdma_state_s20_idle); 2795 break; 2796 case sdma_event_e30_go_running: 2797 ss->go_s99_running = 1; 2798 break; 2799 case sdma_event_e40_sw_cleaned: 2800 break; 2801 case sdma_event_e50_hw_cleaned: 2802 break; 2803 case sdma_event_e60_hw_halted: 2804 break; 2805 case sdma_event_e70_go_idle: 2806 ss->go_s99_running = 0; 2807 break; 2808 case sdma_event_e80_hw_freeze: 2809 break; 2810 case sdma_event_e81_hw_frozen: 2811 break; 2812 case sdma_event_e82_hw_unfreeze: 2813 break; 2814 case sdma_event_e85_link_down: 2815 ss->go_s99_running = 0; 2816 break; 2817 case sdma_event_e90_sw_halted: 2818 break; 2819 } 2820 break; 2821 2822 case sdma_state_s50_hw_halt_wait: 2823 switch (event) { 2824 case sdma_event_e00_go_hw_down: 2825 sdma_set_state(sde, sdma_state_s00_hw_down); 2826 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2827 break; 2828 case sdma_event_e10_go_hw_start: 2829 break; 2830 case sdma_event_e15_hw_halt_done: 2831 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2832 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2833 break; 2834 case sdma_event_e25_hw_clean_up_done: 2835 break; 2836 case sdma_event_e30_go_running: 2837 ss->go_s99_running = 1; 2838 break; 2839 case sdma_event_e40_sw_cleaned: 2840 break; 2841 case sdma_event_e50_hw_cleaned: 2842 break; 2843 case sdma_event_e60_hw_halted: 2844 schedule_work(&sde->err_halt_worker); 2845 break; 2846 case sdma_event_e70_go_idle: 2847 ss->go_s99_running = 0; 2848 break; 2849 case sdma_event_e80_hw_freeze: 2850 break; 2851 case sdma_event_e81_hw_frozen: 2852 break; 2853 case sdma_event_e82_hw_unfreeze: 2854 break; 2855 case sdma_event_e85_link_down: 2856 ss->go_s99_running = 0; 2857 break; 2858 case sdma_event_e90_sw_halted: 2859 break; 2860 } 2861 break; 2862 2863 case sdma_state_s60_idle_halt_wait: 2864 switch (event) { 2865 case sdma_event_e00_go_hw_down: 2866 sdma_set_state(sde, sdma_state_s00_hw_down); 2867 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2868 break; 2869 case sdma_event_e10_go_hw_start: 2870 break; 2871 case sdma_event_e15_hw_halt_done: 2872 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2873 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2874 break; 2875 case sdma_event_e25_hw_clean_up_done: 2876 break; 2877 case sdma_event_e30_go_running: 2878 ss->go_s99_running = 1; 2879 break; 2880 case sdma_event_e40_sw_cleaned: 2881 break; 2882 case sdma_event_e50_hw_cleaned: 2883 break; 2884 case sdma_event_e60_hw_halted: 2885 schedule_work(&sde->err_halt_worker); 2886 break; 2887 case sdma_event_e70_go_idle: 2888 ss->go_s99_running = 0; 2889 break; 2890 case sdma_event_e80_hw_freeze: 2891 break; 2892 case sdma_event_e81_hw_frozen: 2893 break; 2894 case sdma_event_e82_hw_unfreeze: 2895 break; 2896 case sdma_event_e85_link_down: 2897 break; 2898 case sdma_event_e90_sw_halted: 2899 break; 2900 } 2901 break; 2902 2903 case sdma_state_s80_hw_freeze: 2904 switch (event) { 2905 case sdma_event_e00_go_hw_down: 2906 sdma_set_state(sde, sdma_state_s00_hw_down); 2907 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2908 break; 2909 case sdma_event_e10_go_hw_start: 2910 break; 2911 case sdma_event_e15_hw_halt_done: 2912 break; 2913 case sdma_event_e25_hw_clean_up_done: 2914 break; 2915 case sdma_event_e30_go_running: 2916 ss->go_s99_running = 1; 2917 break; 2918 case sdma_event_e40_sw_cleaned: 2919 break; 2920 case sdma_event_e50_hw_cleaned: 2921 break; 2922 case sdma_event_e60_hw_halted: 2923 break; 2924 case sdma_event_e70_go_idle: 2925 ss->go_s99_running = 0; 2926 break; 2927 case sdma_event_e80_hw_freeze: 2928 break; 2929 case sdma_event_e81_hw_frozen: 2930 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); 2931 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2932 break; 2933 case sdma_event_e82_hw_unfreeze: 2934 break; 2935 case sdma_event_e85_link_down: 2936 break; 2937 case sdma_event_e90_sw_halted: 2938 break; 2939 } 2940 break; 2941 2942 case sdma_state_s82_freeze_sw_clean: 2943 switch (event) { 2944 case sdma_event_e00_go_hw_down: 2945 sdma_set_state(sde, sdma_state_s00_hw_down); 2946 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2947 break; 2948 case sdma_event_e10_go_hw_start: 2949 break; 2950 case sdma_event_e15_hw_halt_done: 2951 break; 2952 case sdma_event_e25_hw_clean_up_done: 2953 break; 2954 case sdma_event_e30_go_running: 2955 ss->go_s99_running = 1; 2956 break; 2957 case sdma_event_e40_sw_cleaned: 2958 /* notify caller this engine is done cleaning */ 2959 atomic_dec(&sde->dd->sdma_unfreeze_count); 2960 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2961 break; 2962 case sdma_event_e50_hw_cleaned: 2963 break; 2964 case sdma_event_e60_hw_halted: 2965 break; 2966 case sdma_event_e70_go_idle: 2967 ss->go_s99_running = 0; 2968 break; 2969 case sdma_event_e80_hw_freeze: 2970 break; 2971 case sdma_event_e81_hw_frozen: 2972 break; 2973 case sdma_event_e82_hw_unfreeze: 2974 sdma_hw_start_up(sde); 2975 sdma_set_state(sde, ss->go_s99_running ? 2976 sdma_state_s99_running : 2977 sdma_state_s20_idle); 2978 break; 2979 case sdma_event_e85_link_down: 2980 break; 2981 case sdma_event_e90_sw_halted: 2982 break; 2983 } 2984 break; 2985 2986 case sdma_state_s99_running: 2987 switch (event) { 2988 case sdma_event_e00_go_hw_down: 2989 sdma_set_state(sde, sdma_state_s00_hw_down); 2990 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2991 break; 2992 case sdma_event_e10_go_hw_start: 2993 break; 2994 case sdma_event_e15_hw_halt_done: 2995 break; 2996 case sdma_event_e25_hw_clean_up_done: 2997 break; 2998 case sdma_event_e30_go_running: 2999 break; 3000 case sdma_event_e40_sw_cleaned: 3001 break; 3002 case sdma_event_e50_hw_cleaned: 3003 break; 3004 case sdma_event_e60_hw_halted: 3005 need_progress = 1; 3006 sdma_err_progress_check_schedule(sde); 3007 case sdma_event_e90_sw_halted: 3008 /* 3009 * SW initiated halt does not perform engines 3010 * progress check 3011 */ 3012 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 3013 schedule_work(&sde->err_halt_worker); 3014 break; 3015 case sdma_event_e70_go_idle: 3016 sdma_set_state(sde, sdma_state_s60_idle_halt_wait); 3017 break; 3018 case sdma_event_e85_link_down: 3019 ss->go_s99_running = 0; 3020 /* fall through */ 3021 case sdma_event_e80_hw_freeze: 3022 sdma_set_state(sde, sdma_state_s80_hw_freeze); 3023 atomic_dec(&sde->dd->sdma_unfreeze_count); 3024 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 3025 break; 3026 case sdma_event_e81_hw_frozen: 3027 break; 3028 case sdma_event_e82_hw_unfreeze: 3029 break; 3030 } 3031 break; 3032 } 3033 3034 ss->last_event = event; 3035 if (need_progress) 3036 sdma_make_progress(sde, 0); 3037 } 3038 3039 /* 3040 * _extend_sdma_tx_descs() - helper to extend txreq 3041 * 3042 * This is called once the initial nominal allocation 3043 * of descriptors in the sdma_txreq is exhausted. 3044 * 3045 * The code will bump the allocation up to the max 3046 * of MAX_DESC (64) descriptors. There doesn't seem 3047 * much point in an interim step. The last descriptor 3048 * is reserved for coalesce buffer in order to support 3049 * cases where input packet has >MAX_DESC iovecs. 3050 * 3051 */ 3052 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3053 { 3054 int i; 3055 3056 /* Handle last descriptor */ 3057 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { 3058 /* if tlen is 0, it is for padding, release last descriptor */ 3059 if (!tx->tlen) { 3060 tx->desc_limit = MAX_DESC; 3061 } else if (!tx->coalesce_buf) { 3062 /* allocate coalesce buffer with space for padding */ 3063 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), 3064 GFP_ATOMIC); 3065 if (!tx->coalesce_buf) 3066 goto enomem; 3067 tx->coalesce_idx = 0; 3068 } 3069 return 0; 3070 } 3071 3072 if (unlikely(tx->num_desc == MAX_DESC)) 3073 goto enomem; 3074 3075 tx->descp = kmalloc_array( 3076 MAX_DESC, 3077 sizeof(struct sdma_desc), 3078 GFP_ATOMIC); 3079 if (!tx->descp) 3080 goto enomem; 3081 3082 /* reserve last descriptor for coalescing */ 3083 tx->desc_limit = MAX_DESC - 1; 3084 /* copy ones already built */ 3085 for (i = 0; i < tx->num_desc; i++) 3086 tx->descp[i] = tx->descs[i]; 3087 return 0; 3088 enomem: 3089 __sdma_txclean(dd, tx); 3090 return -ENOMEM; 3091 } 3092 3093 /* 3094 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors 3095 * 3096 * This is called once the initial nominal allocation of descriptors 3097 * in the sdma_txreq is exhausted. 3098 * 3099 * This function calls _extend_sdma_tx_descs to extend or allocate 3100 * coalesce buffer. If there is a allocated coalesce buffer, it will 3101 * copy the input packet data into the coalesce buffer. It also adds 3102 * coalesce buffer descriptor once when whole packet is received. 3103 * 3104 * Return: 3105 * <0 - error 3106 * 0 - coalescing, don't populate descriptor 3107 * 1 - continue with populating descriptor 3108 */ 3109 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, 3110 int type, void *kvaddr, struct page *page, 3111 unsigned long offset, u16 len) 3112 { 3113 int pad_len, rval; 3114 dma_addr_t addr; 3115 3116 rval = _extend_sdma_tx_descs(dd, tx); 3117 if (rval) { 3118 __sdma_txclean(dd, tx); 3119 return rval; 3120 } 3121 3122 /* If coalesce buffer is allocated, copy data into it */ 3123 if (tx->coalesce_buf) { 3124 if (type == SDMA_MAP_NONE) { 3125 __sdma_txclean(dd, tx); 3126 return -EINVAL; 3127 } 3128 3129 if (type == SDMA_MAP_PAGE) { 3130 kvaddr = kmap(page); 3131 kvaddr += offset; 3132 } else if (WARN_ON(!kvaddr)) { 3133 __sdma_txclean(dd, tx); 3134 return -EINVAL; 3135 } 3136 3137 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); 3138 tx->coalesce_idx += len; 3139 if (type == SDMA_MAP_PAGE) 3140 kunmap(page); 3141 3142 /* If there is more data, return */ 3143 if (tx->tlen - tx->coalesce_idx) 3144 return 0; 3145 3146 /* Whole packet is received; add any padding */ 3147 pad_len = tx->packet_len & (sizeof(u32) - 1); 3148 if (pad_len) { 3149 pad_len = sizeof(u32) - pad_len; 3150 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); 3151 /* padding is taken care of for coalescing case */ 3152 tx->packet_len += pad_len; 3153 tx->tlen += pad_len; 3154 } 3155 3156 /* dma map the coalesce buffer */ 3157 addr = dma_map_single(&dd->pcidev->dev, 3158 tx->coalesce_buf, 3159 tx->tlen, 3160 DMA_TO_DEVICE); 3161 3162 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { 3163 __sdma_txclean(dd, tx); 3164 return -ENOSPC; 3165 } 3166 3167 /* Add descriptor for coalesce buffer */ 3168 tx->desc_limit = MAX_DESC; 3169 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, 3170 addr, tx->tlen); 3171 } 3172 3173 return 1; 3174 } 3175 3176 /* Update sdes when the lmc changes */ 3177 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) 3178 { 3179 struct sdma_engine *sde; 3180 int i; 3181 u64 sreg; 3182 3183 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << 3184 SD(CHECK_SLID_MASK_SHIFT)) | 3185 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << 3186 SD(CHECK_SLID_VALUE_SHIFT)); 3187 3188 for (i = 0; i < dd->num_sdma; i++) { 3189 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", 3190 i, (u32)sreg); 3191 sde = &dd->per_sdma[i]; 3192 write_sde_csr(sde, SD(CHECK_SLID), sreg); 3193 } 3194 } 3195 3196 /* tx not dword sized - pad */ 3197 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3198 { 3199 int rval = 0; 3200 3201 tx->num_desc++; 3202 if ((unlikely(tx->num_desc == tx->desc_limit))) { 3203 rval = _extend_sdma_tx_descs(dd, tx); 3204 if (rval) { 3205 __sdma_txclean(dd, tx); 3206 return rval; 3207 } 3208 } 3209 /* finish the one just added */ 3210 make_tx_sdma_desc( 3211 tx, 3212 SDMA_MAP_NONE, 3213 dd->sdma_pad_phys, 3214 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); 3215 _sdma_close_tx(dd, tx); 3216 return rval; 3217 } 3218 3219 /* 3220 * Add ahg to the sdma_txreq 3221 * 3222 * The logic will consume up to 3 3223 * descriptors at the beginning of 3224 * sdma_txreq. 3225 */ 3226 void _sdma_txreq_ahgadd( 3227 struct sdma_txreq *tx, 3228 u8 num_ahg, 3229 u8 ahg_entry, 3230 u32 *ahg, 3231 u8 ahg_hlen) 3232 { 3233 u32 i, shift = 0, desc = 0; 3234 u8 mode; 3235 3236 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); 3237 /* compute mode */ 3238 if (num_ahg == 1) 3239 mode = SDMA_AHG_APPLY_UPDATE1; 3240 else if (num_ahg <= 5) 3241 mode = SDMA_AHG_APPLY_UPDATE2; 3242 else 3243 mode = SDMA_AHG_APPLY_UPDATE3; 3244 tx->num_desc++; 3245 /* initialize to consumed descriptors to zero */ 3246 switch (mode) { 3247 case SDMA_AHG_APPLY_UPDATE3: 3248 tx->num_desc++; 3249 tx->descs[2].qw[0] = 0; 3250 tx->descs[2].qw[1] = 0; 3251 /* FALLTHROUGH */ 3252 case SDMA_AHG_APPLY_UPDATE2: 3253 tx->num_desc++; 3254 tx->descs[1].qw[0] = 0; 3255 tx->descs[1].qw[1] = 0; 3256 break; 3257 } 3258 ahg_hlen >>= 2; 3259 tx->descs[0].qw[1] |= 3260 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) 3261 << SDMA_DESC1_HEADER_INDEX_SHIFT) | 3262 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) 3263 << SDMA_DESC1_HEADER_DWS_SHIFT) | 3264 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) 3265 << SDMA_DESC1_HEADER_MODE_SHIFT) | 3266 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) 3267 << SDMA_DESC1_HEADER_UPDATE1_SHIFT); 3268 for (i = 0; i < (num_ahg - 1); i++) { 3269 if (!shift && !(i & 2)) 3270 desc++; 3271 tx->descs[desc].qw[!!(i & 2)] |= 3272 (((u64)ahg[i + 1]) 3273 << shift); 3274 shift = (shift + 32) & 63; 3275 } 3276 } 3277 3278 /** 3279 * sdma_ahg_alloc - allocate an AHG entry 3280 * @sde: engine to allocate from 3281 * 3282 * Return: 3283 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, 3284 * -ENOSPC if an entry is not available 3285 */ 3286 int sdma_ahg_alloc(struct sdma_engine *sde) 3287 { 3288 int nr; 3289 int oldbit; 3290 3291 if (!sde) { 3292 trace_hfi1_ahg_allocate(sde, -EINVAL); 3293 return -EINVAL; 3294 } 3295 while (1) { 3296 nr = ffz(ACCESS_ONCE(sde->ahg_bits)); 3297 if (nr > 31) { 3298 trace_hfi1_ahg_allocate(sde, -ENOSPC); 3299 return -ENOSPC; 3300 } 3301 oldbit = test_and_set_bit(nr, &sde->ahg_bits); 3302 if (!oldbit) 3303 break; 3304 cpu_relax(); 3305 } 3306 trace_hfi1_ahg_allocate(sde, nr); 3307 return nr; 3308 } 3309 3310 /** 3311 * sdma_ahg_free - free an AHG entry 3312 * @sde: engine to return AHG entry 3313 * @ahg_index: index to free 3314 * 3315 * This routine frees the indicate AHG entry. 3316 */ 3317 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) 3318 { 3319 if (!sde) 3320 return; 3321 trace_hfi1_ahg_deallocate(sde, ahg_index); 3322 if (ahg_index < 0 || ahg_index > 31) 3323 return; 3324 clear_bit(ahg_index, &sde->ahg_bits); 3325 } 3326 3327 /* 3328 * SPC freeze handling for SDMA engines. Called when the driver knows 3329 * the SPC is going into a freeze but before the freeze is fully 3330 * settled. Generally an error interrupt. 3331 * 3332 * This event will pull the engine out of running so no more entries can be 3333 * added to the engine's queue. 3334 */ 3335 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) 3336 { 3337 int i; 3338 enum sdma_events event = link_down ? sdma_event_e85_link_down : 3339 sdma_event_e80_hw_freeze; 3340 3341 /* set up the wait but do not wait here */ 3342 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3343 3344 /* tell all engines to stop running and wait */ 3345 for (i = 0; i < dd->num_sdma; i++) 3346 sdma_process_event(&dd->per_sdma[i], event); 3347 3348 /* sdma_freeze() will wait for all engines to have stopped */ 3349 } 3350 3351 /* 3352 * SPC freeze handling for SDMA engines. Called when the driver knows 3353 * the SPC is fully frozen. 3354 */ 3355 void sdma_freeze(struct hfi1_devdata *dd) 3356 { 3357 int i; 3358 int ret; 3359 3360 /* 3361 * Make sure all engines have moved out of the running state before 3362 * continuing. 3363 */ 3364 ret = wait_event_interruptible(dd->sdma_unfreeze_wq, 3365 atomic_read(&dd->sdma_unfreeze_count) <= 3366 0); 3367 /* interrupted or count is negative, then unloading - just exit */ 3368 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) 3369 return; 3370 3371 /* set up the count for the next wait */ 3372 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3373 3374 /* tell all engines that the SPC is frozen, they can start cleaning */ 3375 for (i = 0; i < dd->num_sdma; i++) 3376 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); 3377 3378 /* 3379 * Wait for everyone to finish software clean before exiting. The 3380 * software clean will read engine CSRs, so must be completed before 3381 * the next step, which will clear the engine CSRs. 3382 */ 3383 (void)wait_event_interruptible(dd->sdma_unfreeze_wq, 3384 atomic_read(&dd->sdma_unfreeze_count) <= 0); 3385 /* no need to check results - done no matter what */ 3386 } 3387 3388 /* 3389 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. 3390 * 3391 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All 3392 * that is left is a software clean. We could do it after the SPC is fully 3393 * frozen, but then we'd have to add another state to wait for the unfreeze. 3394 * Instead, just defer the software clean until the unfreeze step. 3395 */ 3396 void sdma_unfreeze(struct hfi1_devdata *dd) 3397 { 3398 int i; 3399 3400 /* tell all engines start freeze clean up */ 3401 for (i = 0; i < dd->num_sdma; i++) 3402 sdma_process_event(&dd->per_sdma[i], 3403 sdma_event_e82_hw_unfreeze); 3404 } 3405 3406 /** 3407 * _sdma_engine_progress_schedule() - schedule progress on engine 3408 * @sde: sdma_engine to schedule progress 3409 * 3410 */ 3411 void _sdma_engine_progress_schedule( 3412 struct sdma_engine *sde) 3413 { 3414 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); 3415 /* assume we have selected a good cpu */ 3416 write_csr(sde->dd, 3417 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), 3418 sde->progress_mask); 3419 } 3420