1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 #include <linux/spinlock.h> 49 #include <linux/seqlock.h> 50 #include <linux/netdevice.h> 51 #include <linux/moduleparam.h> 52 #include <linux/bitops.h> 53 #include <linux/timer.h> 54 #include <linux/vmalloc.h> 55 #include <linux/highmem.h> 56 57 #include "hfi.h" 58 #include "common.h" 59 #include "qp.h" 60 #include "sdma.h" 61 #include "iowait.h" 62 #include "trace.h" 63 64 /* must be a power of 2 >= 64 <= 32768 */ 65 #define SDMA_DESCQ_CNT 2048 66 #define SDMA_DESC_INTR 64 67 #define INVALID_TAIL 0xffff 68 #define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32)) 69 70 static uint sdma_descq_cnt = SDMA_DESCQ_CNT; 71 module_param(sdma_descq_cnt, uint, S_IRUGO); 72 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); 73 74 static uint sdma_idle_cnt = 250; 75 module_param(sdma_idle_cnt, uint, S_IRUGO); 76 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); 77 78 uint mod_num_sdma; 79 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); 80 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); 81 82 static uint sdma_desct_intr = SDMA_DESC_INTR; 83 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); 84 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); 85 86 #define SDMA_WAIT_BATCH_SIZE 20 87 /* max wait time for a SDMA engine to indicate it has halted */ 88 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ 89 /* all SDMA engine errors that cause a halt */ 90 91 #define SD(name) SEND_DMA_##name 92 #define ALL_SDMA_ENG_HALT_ERRS \ 93 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ 94 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ 95 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ 96 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ 97 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ 98 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ 99 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ 100 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ 101 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ 102 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ 103 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ 104 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ 105 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ 106 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ 107 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ 108 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ 109 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ 110 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) 111 112 /* sdma_sendctrl operations */ 113 #define SDMA_SENDCTRL_OP_ENABLE BIT(0) 114 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) 115 #define SDMA_SENDCTRL_OP_HALT BIT(2) 116 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) 117 118 /* handle long defines */ 119 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ 120 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK 121 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ 122 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 123 124 static const char * const sdma_state_names[] = { 125 [sdma_state_s00_hw_down] = "s00_HwDown", 126 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", 127 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", 128 [sdma_state_s20_idle] = "s20_Idle", 129 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", 130 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", 131 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", 132 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", 133 [sdma_state_s80_hw_freeze] = "s80_HwFreeze", 134 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", 135 [sdma_state_s99_running] = "s99_Running", 136 }; 137 138 #ifdef CONFIG_SDMA_VERBOSITY 139 static const char * const sdma_event_names[] = { 140 [sdma_event_e00_go_hw_down] = "e00_GoHwDown", 141 [sdma_event_e10_go_hw_start] = "e10_GoHwStart", 142 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", 143 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", 144 [sdma_event_e30_go_running] = "e30_GoRunning", 145 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", 146 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", 147 [sdma_event_e60_hw_halted] = "e60_HwHalted", 148 [sdma_event_e70_go_idle] = "e70_GoIdle", 149 [sdma_event_e80_hw_freeze] = "e80_HwFreeze", 150 [sdma_event_e81_hw_frozen] = "e81_HwFrozen", 151 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", 152 [sdma_event_e85_link_down] = "e85_LinkDown", 153 [sdma_event_e90_sw_halted] = "e90_SwHalted", 154 }; 155 #endif 156 157 static const struct sdma_set_state_action sdma_action_table[] = { 158 [sdma_state_s00_hw_down] = { 159 .go_s99_running_tofalse = 1, 160 .op_enable = 0, 161 .op_intenable = 0, 162 .op_halt = 0, 163 .op_cleanup = 0, 164 }, 165 [sdma_state_s10_hw_start_up_halt_wait] = { 166 .op_enable = 0, 167 .op_intenable = 0, 168 .op_halt = 1, 169 .op_cleanup = 0, 170 }, 171 [sdma_state_s15_hw_start_up_clean_wait] = { 172 .op_enable = 0, 173 .op_intenable = 1, 174 .op_halt = 0, 175 .op_cleanup = 1, 176 }, 177 [sdma_state_s20_idle] = { 178 .op_enable = 0, 179 .op_intenable = 1, 180 .op_halt = 0, 181 .op_cleanup = 0, 182 }, 183 [sdma_state_s30_sw_clean_up_wait] = { 184 .op_enable = 0, 185 .op_intenable = 0, 186 .op_halt = 0, 187 .op_cleanup = 0, 188 }, 189 [sdma_state_s40_hw_clean_up_wait] = { 190 .op_enable = 0, 191 .op_intenable = 0, 192 .op_halt = 0, 193 .op_cleanup = 1, 194 }, 195 [sdma_state_s50_hw_halt_wait] = { 196 .op_enable = 0, 197 .op_intenable = 0, 198 .op_halt = 0, 199 .op_cleanup = 0, 200 }, 201 [sdma_state_s60_idle_halt_wait] = { 202 .go_s99_running_tofalse = 1, 203 .op_enable = 0, 204 .op_intenable = 0, 205 .op_halt = 1, 206 .op_cleanup = 0, 207 }, 208 [sdma_state_s80_hw_freeze] = { 209 .op_enable = 0, 210 .op_intenable = 0, 211 .op_halt = 0, 212 .op_cleanup = 0, 213 }, 214 [sdma_state_s82_freeze_sw_clean] = { 215 .op_enable = 0, 216 .op_intenable = 0, 217 .op_halt = 0, 218 .op_cleanup = 0, 219 }, 220 [sdma_state_s99_running] = { 221 .op_enable = 1, 222 .op_intenable = 1, 223 .op_halt = 0, 224 .op_cleanup = 0, 225 .go_s99_running_totrue = 1, 226 }, 227 }; 228 229 #define SDMA_TAIL_UPDATE_THRESH 0x1F 230 231 /* declare all statics here rather than keep sorting */ 232 static void sdma_complete(struct kref *); 233 static void sdma_finalput(struct sdma_state *); 234 static void sdma_get(struct sdma_state *); 235 static void sdma_hw_clean_up_task(struct tasklet_struct *); 236 static void sdma_put(struct sdma_state *); 237 static void sdma_set_state(struct sdma_engine *, enum sdma_states); 238 static void sdma_start_hw_clean_up(struct sdma_engine *); 239 static void sdma_sw_clean_up_task(struct tasklet_struct *); 240 static void sdma_sendctrl(struct sdma_engine *, unsigned); 241 static void init_sdma_regs(struct sdma_engine *, u32, uint); 242 static void sdma_process_event( 243 struct sdma_engine *sde, 244 enum sdma_events event); 245 static void __sdma_process_event( 246 struct sdma_engine *sde, 247 enum sdma_events event); 248 static void dump_sdma_state(struct sdma_engine *sde); 249 static void sdma_make_progress(struct sdma_engine *sde, u64 status); 250 static void sdma_desc_avail(struct sdma_engine *sde, uint avail); 251 static void sdma_flush_descq(struct sdma_engine *sde); 252 253 /** 254 * sdma_state_name() - return state string from enum 255 * @state: state 256 */ 257 static const char *sdma_state_name(enum sdma_states state) 258 { 259 return sdma_state_names[state]; 260 } 261 262 static void sdma_get(struct sdma_state *ss) 263 { 264 kref_get(&ss->kref); 265 } 266 267 static void sdma_complete(struct kref *kref) 268 { 269 struct sdma_state *ss = 270 container_of(kref, struct sdma_state, kref); 271 272 complete(&ss->comp); 273 } 274 275 static void sdma_put(struct sdma_state *ss) 276 { 277 kref_put(&ss->kref, sdma_complete); 278 } 279 280 static void sdma_finalput(struct sdma_state *ss) 281 { 282 sdma_put(ss); 283 wait_for_completion(&ss->comp); 284 } 285 286 static inline void write_sde_csr( 287 struct sdma_engine *sde, 288 u32 offset0, 289 u64 value) 290 { 291 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); 292 } 293 294 static inline u64 read_sde_csr( 295 struct sdma_engine *sde, 296 u32 offset0) 297 { 298 return read_kctxt_csr(sde->dd, sde->this_idx, offset0); 299 } 300 301 /* 302 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for 303 * sdma engine 'sde' to drop to 0. 304 */ 305 static void sdma_wait_for_packet_egress(struct sdma_engine *sde, 306 int pause) 307 { 308 u64 off = 8 * sde->this_idx; 309 struct hfi1_devdata *dd = sde->dd; 310 int lcnt = 0; 311 u64 reg_prev; 312 u64 reg = 0; 313 314 while (1) { 315 reg_prev = reg; 316 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); 317 318 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; 319 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; 320 if (reg == 0) 321 break; 322 /* counter is reest if accupancy count changes */ 323 if (reg != reg_prev) 324 lcnt = 0; 325 if (lcnt++ > 500) { 326 /* timed out - bounce the link */ 327 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", 328 __func__, sde->this_idx, (u32)reg); 329 queue_work(dd->pport->link_wq, 330 &dd->pport->link_bounce_work); 331 break; 332 } 333 udelay(1); 334 } 335 } 336 337 /* 338 * sdma_wait() - wait for packet egress to complete for all SDMA engines, 339 * and pause for credit return. 340 */ 341 void sdma_wait(struct hfi1_devdata *dd) 342 { 343 int i; 344 345 for (i = 0; i < dd->num_sdma; i++) { 346 struct sdma_engine *sde = &dd->per_sdma[i]; 347 348 sdma_wait_for_packet_egress(sde, 0); 349 } 350 } 351 352 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) 353 { 354 u64 reg; 355 356 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) 357 return; 358 reg = cnt; 359 reg &= SD(DESC_CNT_CNT_MASK); 360 reg <<= SD(DESC_CNT_CNT_SHIFT); 361 write_sde_csr(sde, SD(DESC_CNT), reg); 362 } 363 364 static inline void complete_tx(struct sdma_engine *sde, 365 struct sdma_txreq *tx, 366 int res) 367 { 368 /* protect against complete modifying */ 369 struct iowait *wait = tx->wait; 370 callback_t complete = tx->complete; 371 372 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 373 trace_hfi1_sdma_out_sn(sde, tx->sn); 374 if (WARN_ON_ONCE(sde->head_sn != tx->sn)) 375 dd_dev_err(sde->dd, "expected %llu got %llu\n", 376 sde->head_sn, tx->sn); 377 sde->head_sn++; 378 #endif 379 __sdma_txclean(sde->dd, tx); 380 if (complete) 381 (*complete)(tx, res); 382 if (iowait_sdma_dec(wait)) 383 iowait_drain_wakeup(wait); 384 } 385 386 /* 387 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status 388 * 389 * Depending on timing there can be txreqs in two places: 390 * - in the descq ring 391 * - in the flush list 392 * 393 * To avoid ordering issues the descq ring needs to be flushed 394 * first followed by the flush list. 395 * 396 * This routine is called from two places 397 * - From a work queue item 398 * - Directly from the state machine just before setting the 399 * state to running 400 * 401 * Must be called with head_lock held 402 * 403 */ 404 static void sdma_flush(struct sdma_engine *sde) 405 { 406 struct sdma_txreq *txp, *txp_next; 407 LIST_HEAD(flushlist); 408 unsigned long flags; 409 uint seq; 410 411 /* flush from head to tail */ 412 sdma_flush_descq(sde); 413 spin_lock_irqsave(&sde->flushlist_lock, flags); 414 /* copy flush list */ 415 list_splice_init(&sde->flushlist, &flushlist); 416 spin_unlock_irqrestore(&sde->flushlist_lock, flags); 417 /* flush from flush list */ 418 list_for_each_entry_safe(txp, txp_next, &flushlist, list) 419 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 420 /* wakeup QPs orphaned on the dmawait list */ 421 do { 422 struct iowait *w, *nw; 423 424 seq = read_seqbegin(&sde->waitlock); 425 if (!list_empty(&sde->dmawait)) { 426 write_seqlock(&sde->waitlock); 427 list_for_each_entry_safe(w, nw, &sde->dmawait, list) { 428 if (w->wakeup) { 429 w->wakeup(w, SDMA_AVAIL_REASON); 430 list_del_init(&w->list); 431 } 432 } 433 write_sequnlock(&sde->waitlock); 434 } 435 } while (read_seqretry(&sde->waitlock, seq)); 436 } 437 438 /* 439 * Fields a work request for flushing the descq ring 440 * and the flush list 441 * 442 * If the engine has been brought to running during 443 * the scheduling delay, the flush is ignored, assuming 444 * that the process of bringing the engine to running 445 * would have done this flush prior to going to running. 446 * 447 */ 448 static void sdma_field_flush(struct work_struct *work) 449 { 450 unsigned long flags; 451 struct sdma_engine *sde = 452 container_of(work, struct sdma_engine, flush_worker); 453 454 write_seqlock_irqsave(&sde->head_lock, flags); 455 if (!__sdma_running(sde)) 456 sdma_flush(sde); 457 write_sequnlock_irqrestore(&sde->head_lock, flags); 458 } 459 460 static void sdma_err_halt_wait(struct work_struct *work) 461 { 462 struct sdma_engine *sde = container_of(work, struct sdma_engine, 463 err_halt_worker); 464 u64 statuscsr; 465 unsigned long timeout; 466 467 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); 468 while (1) { 469 statuscsr = read_sde_csr(sde, SD(STATUS)); 470 statuscsr &= SD(STATUS_ENG_HALTED_SMASK); 471 if (statuscsr) 472 break; 473 if (time_after(jiffies, timeout)) { 474 dd_dev_err(sde->dd, 475 "SDMA engine %d - timeout waiting for engine to halt\n", 476 sde->this_idx); 477 /* 478 * Continue anyway. This could happen if there was 479 * an uncorrectable error in the wrong spot. 480 */ 481 break; 482 } 483 usleep_range(80, 120); 484 } 485 486 sdma_process_event(sde, sdma_event_e15_hw_halt_done); 487 } 488 489 static void sdma_err_progress_check_schedule(struct sdma_engine *sde) 490 { 491 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { 492 unsigned index; 493 struct hfi1_devdata *dd = sde->dd; 494 495 for (index = 0; index < dd->num_sdma; index++) { 496 struct sdma_engine *curr_sdma = &dd->per_sdma[index]; 497 498 if (curr_sdma != sde) 499 curr_sdma->progress_check_head = 500 curr_sdma->descq_head; 501 } 502 dd_dev_err(sde->dd, 503 "SDMA engine %d - check scheduled\n", 504 sde->this_idx); 505 mod_timer(&sde->err_progress_check_timer, jiffies + 10); 506 } 507 } 508 509 static void sdma_err_progress_check(struct timer_list *t) 510 { 511 unsigned index; 512 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); 513 514 dd_dev_err(sde->dd, "SDE progress check event\n"); 515 for (index = 0; index < sde->dd->num_sdma; index++) { 516 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; 517 unsigned long flags; 518 519 /* check progress on each engine except the current one */ 520 if (curr_sde == sde) 521 continue; 522 /* 523 * We must lock interrupts when acquiring sde->lock, 524 * to avoid a deadlock if interrupt triggers and spins on 525 * the same lock on same CPU 526 */ 527 spin_lock_irqsave(&curr_sde->tail_lock, flags); 528 write_seqlock(&curr_sde->head_lock); 529 530 /* skip non-running queues */ 531 if (curr_sde->state.current_state != sdma_state_s99_running) { 532 write_sequnlock(&curr_sde->head_lock); 533 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 534 continue; 535 } 536 537 if ((curr_sde->descq_head != curr_sde->descq_tail) && 538 (curr_sde->descq_head == 539 curr_sde->progress_check_head)) 540 __sdma_process_event(curr_sde, 541 sdma_event_e90_sw_halted); 542 write_sequnlock(&curr_sde->head_lock); 543 spin_unlock_irqrestore(&curr_sde->tail_lock, flags); 544 } 545 schedule_work(&sde->err_halt_worker); 546 } 547 548 static void sdma_hw_clean_up_task(struct tasklet_struct *t) 549 { 550 struct sdma_engine *sde = from_tasklet(sde, t, 551 sdma_hw_clean_up_task); 552 u64 statuscsr; 553 554 while (1) { 555 #ifdef CONFIG_SDMA_VERBOSITY 556 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 557 sde->this_idx, slashstrip(__FILE__), __LINE__, 558 __func__); 559 #endif 560 statuscsr = read_sde_csr(sde, SD(STATUS)); 561 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); 562 if (statuscsr) 563 break; 564 udelay(10); 565 } 566 567 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); 568 } 569 570 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) 571 { 572 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; 573 } 574 575 /* 576 * flush ring for recovery 577 */ 578 static void sdma_flush_descq(struct sdma_engine *sde) 579 { 580 u16 head, tail; 581 int progress = 0; 582 struct sdma_txreq *txp = get_txhead(sde); 583 584 /* The reason for some of the complexity of this code is that 585 * not all descriptors have corresponding txps. So, we have to 586 * be able to skip over descs until we wander into the range of 587 * the next txp on the list. 588 */ 589 head = sde->descq_head & sde->sdma_mask; 590 tail = sde->descq_tail & sde->sdma_mask; 591 while (head != tail) { 592 /* advance head, wrap if needed */ 593 head = ++sde->descq_head & sde->sdma_mask; 594 /* if now past this txp's descs, do the callback */ 595 if (txp && txp->next_descq_idx == head) { 596 /* remove from list */ 597 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 598 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); 599 trace_hfi1_sdma_progress(sde, head, tail, txp); 600 txp = get_txhead(sde); 601 } 602 progress++; 603 } 604 if (progress) 605 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 606 } 607 608 static void sdma_sw_clean_up_task(struct tasklet_struct *t) 609 { 610 struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); 611 unsigned long flags; 612 613 spin_lock_irqsave(&sde->tail_lock, flags); 614 write_seqlock(&sde->head_lock); 615 616 /* 617 * At this point, the following should always be true: 618 * - We are halted, so no more descriptors are getting retired. 619 * - We are not running, so no one is submitting new work. 620 * - Only we can send the e40_sw_cleaned, so we can't start 621 * running again until we say so. So, the active list and 622 * descq are ours to play with. 623 */ 624 625 /* 626 * In the error clean up sequence, software clean must be called 627 * before the hardware clean so we can use the hardware head in 628 * the progress routine. A hardware clean or SPC unfreeze will 629 * reset the hardware head. 630 * 631 * Process all retired requests. The progress routine will use the 632 * latest physical hardware head - we are not running so speed does 633 * not matter. 634 */ 635 sdma_make_progress(sde, 0); 636 637 sdma_flush(sde); 638 639 /* 640 * Reset our notion of head and tail. 641 * Note that the HW registers have been reset via an earlier 642 * clean up. 643 */ 644 sde->descq_tail = 0; 645 sde->descq_head = 0; 646 sde->desc_avail = sdma_descq_freecnt(sde); 647 *sde->head_dma = 0; 648 649 __sdma_process_event(sde, sdma_event_e40_sw_cleaned); 650 651 write_sequnlock(&sde->head_lock); 652 spin_unlock_irqrestore(&sde->tail_lock, flags); 653 } 654 655 static void sdma_sw_tear_down(struct sdma_engine *sde) 656 { 657 struct sdma_state *ss = &sde->state; 658 659 /* Releasing this reference means the state machine has stopped. */ 660 sdma_put(ss); 661 662 /* stop waiting for all unfreeze events to complete */ 663 atomic_set(&sde->dd->sdma_unfreeze_count, -1); 664 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 665 } 666 667 static void sdma_start_hw_clean_up(struct sdma_engine *sde) 668 { 669 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); 670 } 671 672 static void sdma_set_state(struct sdma_engine *sde, 673 enum sdma_states next_state) 674 { 675 struct sdma_state *ss = &sde->state; 676 const struct sdma_set_state_action *action = sdma_action_table; 677 unsigned op = 0; 678 679 trace_hfi1_sdma_state( 680 sde, 681 sdma_state_names[ss->current_state], 682 sdma_state_names[next_state]); 683 684 /* debugging bookkeeping */ 685 ss->previous_state = ss->current_state; 686 ss->previous_op = ss->current_op; 687 ss->current_state = next_state; 688 689 if (ss->previous_state != sdma_state_s99_running && 690 next_state == sdma_state_s99_running) 691 sdma_flush(sde); 692 693 if (action[next_state].op_enable) 694 op |= SDMA_SENDCTRL_OP_ENABLE; 695 696 if (action[next_state].op_intenable) 697 op |= SDMA_SENDCTRL_OP_INTENABLE; 698 699 if (action[next_state].op_halt) 700 op |= SDMA_SENDCTRL_OP_HALT; 701 702 if (action[next_state].op_cleanup) 703 op |= SDMA_SENDCTRL_OP_CLEANUP; 704 705 if (action[next_state].go_s99_running_tofalse) 706 ss->go_s99_running = 0; 707 708 if (action[next_state].go_s99_running_totrue) 709 ss->go_s99_running = 1; 710 711 ss->current_op = op; 712 sdma_sendctrl(sde, ss->current_op); 713 } 714 715 /** 716 * sdma_get_descq_cnt() - called when device probed 717 * 718 * Return a validated descq count. 719 * 720 * This is currently only used in the verbs initialization to build the tx 721 * list. 722 * 723 * This will probably be deleted in favor of a more scalable approach to 724 * alloc tx's. 725 * 726 */ 727 u16 sdma_get_descq_cnt(void) 728 { 729 u16 count = sdma_descq_cnt; 730 731 if (!count) 732 return SDMA_DESCQ_CNT; 733 /* count must be a power of 2 greater than 64 and less than 734 * 32768. Otherwise return default. 735 */ 736 if (!is_power_of_2(count)) 737 return SDMA_DESCQ_CNT; 738 if (count < 64 || count > 32768) 739 return SDMA_DESCQ_CNT; 740 return count; 741 } 742 743 /** 744 * sdma_engine_get_vl() - return vl for a given sdma engine 745 * @sde: sdma engine 746 * 747 * This function returns the vl mapped to a given engine, or an error if 748 * the mapping can't be found. The mapping fields are protected by RCU. 749 */ 750 int sdma_engine_get_vl(struct sdma_engine *sde) 751 { 752 struct hfi1_devdata *dd = sde->dd; 753 struct sdma_vl_map *m; 754 u8 vl; 755 756 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) 757 return -EINVAL; 758 759 rcu_read_lock(); 760 m = rcu_dereference(dd->sdma_map); 761 if (unlikely(!m)) { 762 rcu_read_unlock(); 763 return -EINVAL; 764 } 765 vl = m->engine_to_vl[sde->this_idx]; 766 rcu_read_unlock(); 767 768 return vl; 769 } 770 771 /** 772 * sdma_select_engine_vl() - select sdma engine 773 * @dd: devdata 774 * @selector: a spreading factor 775 * @vl: this vl 776 * 777 * 778 * This function returns an engine based on the selector and a vl. The 779 * mapping fields are protected by RCU. 780 */ 781 struct sdma_engine *sdma_select_engine_vl( 782 struct hfi1_devdata *dd, 783 u32 selector, 784 u8 vl) 785 { 786 struct sdma_vl_map *m; 787 struct sdma_map_elem *e; 788 struct sdma_engine *rval; 789 790 /* NOTE This should only happen if SC->VL changed after the initial 791 * checks on the QP/AH 792 * Default will return engine 0 below 793 */ 794 if (vl >= num_vls) { 795 rval = NULL; 796 goto done; 797 } 798 799 rcu_read_lock(); 800 m = rcu_dereference(dd->sdma_map); 801 if (unlikely(!m)) { 802 rcu_read_unlock(); 803 return &dd->per_sdma[0]; 804 } 805 e = m->map[vl & m->mask]; 806 rval = e->sde[selector & e->mask]; 807 rcu_read_unlock(); 808 809 done: 810 rval = !rval ? &dd->per_sdma[0] : rval; 811 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); 812 return rval; 813 } 814 815 /** 816 * sdma_select_engine_sc() - select sdma engine 817 * @dd: devdata 818 * @selector: a spreading factor 819 * @sc5: the 5 bit sc 820 * 821 * 822 * This function returns an engine based on the selector and an sc. 823 */ 824 struct sdma_engine *sdma_select_engine_sc( 825 struct hfi1_devdata *dd, 826 u32 selector, 827 u8 sc5) 828 { 829 u8 vl = sc_to_vlt(dd, sc5); 830 831 return sdma_select_engine_vl(dd, selector, vl); 832 } 833 834 struct sdma_rht_map_elem { 835 u32 mask; 836 u8 ctr; 837 struct sdma_engine *sde[]; 838 }; 839 840 struct sdma_rht_node { 841 unsigned long cpu_id; 842 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; 843 struct rhash_head node; 844 }; 845 846 #define NR_CPUS_HINT 192 847 848 static const struct rhashtable_params sdma_rht_params = { 849 .nelem_hint = NR_CPUS_HINT, 850 .head_offset = offsetof(struct sdma_rht_node, node), 851 .key_offset = offsetof(struct sdma_rht_node, cpu_id), 852 .key_len = sizeof_field(struct sdma_rht_node, cpu_id), 853 .max_size = NR_CPUS, 854 .min_size = 8, 855 .automatic_shrinking = true, 856 }; 857 858 /* 859 * sdma_select_user_engine() - select sdma engine based on user setup 860 * @dd: devdata 861 * @selector: a spreading factor 862 * @vl: this vl 863 * 864 * This function returns an sdma engine for a user sdma request. 865 * User defined sdma engine affinity setting is honored when applicable, 866 * otherwise system default sdma engine mapping is used. To ensure correct 867 * ordering, the mapping from <selector, vl> to sde must remain unchanged. 868 */ 869 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, 870 u32 selector, u8 vl) 871 { 872 struct sdma_rht_node *rht_node; 873 struct sdma_engine *sde = NULL; 874 unsigned long cpu_id; 875 876 /* 877 * To ensure that always the same sdma engine(s) will be 878 * selected make sure the process is pinned to this CPU only. 879 */ 880 if (current->nr_cpus_allowed != 1) 881 goto out; 882 883 cpu_id = smp_processor_id(); 884 rcu_read_lock(); 885 rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, 886 sdma_rht_params); 887 888 if (rht_node && rht_node->map[vl]) { 889 struct sdma_rht_map_elem *map = rht_node->map[vl]; 890 891 sde = map->sde[selector & map->mask]; 892 } 893 rcu_read_unlock(); 894 895 if (sde) 896 return sde; 897 898 out: 899 return sdma_select_engine_vl(dd, selector, vl); 900 } 901 902 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) 903 { 904 int i; 905 906 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) 907 map->sde[map->ctr + i] = map->sde[i]; 908 } 909 910 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, 911 struct sdma_engine *sde) 912 { 913 unsigned int i, pow; 914 915 /* only need to check the first ctr entries for a match */ 916 for (i = 0; i < map->ctr; i++) { 917 if (map->sde[i] == sde) { 918 memmove(&map->sde[i], &map->sde[i + 1], 919 (map->ctr - i - 1) * sizeof(map->sde[0])); 920 map->ctr--; 921 pow = roundup_pow_of_two(map->ctr ? : 1); 922 map->mask = pow - 1; 923 sdma_populate_sde_map(map); 924 break; 925 } 926 } 927 } 928 929 /* 930 * Prevents concurrent reads and writes of the sdma engine cpu_mask 931 */ 932 static DEFINE_MUTEX(process_to_sde_mutex); 933 934 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, 935 size_t count) 936 { 937 struct hfi1_devdata *dd = sde->dd; 938 cpumask_var_t mask, new_mask; 939 unsigned long cpu; 940 int ret, vl, sz; 941 struct sdma_rht_node *rht_node; 942 943 vl = sdma_engine_get_vl(sde); 944 if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) 945 return -EINVAL; 946 947 ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 948 if (!ret) 949 return -ENOMEM; 950 951 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); 952 if (!ret) { 953 free_cpumask_var(mask); 954 return -ENOMEM; 955 } 956 ret = cpulist_parse(buf, mask); 957 if (ret) 958 goto out_free; 959 960 if (!cpumask_subset(mask, cpu_online_mask)) { 961 dd_dev_warn(sde->dd, "Invalid CPU mask\n"); 962 ret = -EINVAL; 963 goto out_free; 964 } 965 966 sz = sizeof(struct sdma_rht_map_elem) + 967 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); 968 969 mutex_lock(&process_to_sde_mutex); 970 971 for_each_cpu(cpu, mask) { 972 /* Check if we have this already mapped */ 973 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { 974 cpumask_set_cpu(cpu, new_mask); 975 continue; 976 } 977 978 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 979 sdma_rht_params); 980 if (!rht_node) { 981 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); 982 if (!rht_node) { 983 ret = -ENOMEM; 984 goto out; 985 } 986 987 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 988 if (!rht_node->map[vl]) { 989 kfree(rht_node); 990 ret = -ENOMEM; 991 goto out; 992 } 993 rht_node->cpu_id = cpu; 994 rht_node->map[vl]->mask = 0; 995 rht_node->map[vl]->ctr = 1; 996 rht_node->map[vl]->sde[0] = sde; 997 998 ret = rhashtable_insert_fast(dd->sdma_rht, 999 &rht_node->node, 1000 sdma_rht_params); 1001 if (ret) { 1002 kfree(rht_node->map[vl]); 1003 kfree(rht_node); 1004 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", 1005 cpu); 1006 goto out; 1007 } 1008 1009 } else { 1010 int ctr, pow; 1011 1012 /* Add new user mappings */ 1013 if (!rht_node->map[vl]) 1014 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); 1015 1016 if (!rht_node->map[vl]) { 1017 ret = -ENOMEM; 1018 goto out; 1019 } 1020 1021 rht_node->map[vl]->ctr++; 1022 ctr = rht_node->map[vl]->ctr; 1023 rht_node->map[vl]->sde[ctr - 1] = sde; 1024 pow = roundup_pow_of_two(ctr); 1025 rht_node->map[vl]->mask = pow - 1; 1026 1027 /* Populate the sde map table */ 1028 sdma_populate_sde_map(rht_node->map[vl]); 1029 } 1030 cpumask_set_cpu(cpu, new_mask); 1031 } 1032 1033 /* Clean up old mappings */ 1034 for_each_cpu(cpu, cpu_online_mask) { 1035 struct sdma_rht_node *rht_node; 1036 1037 /* Don't cleanup sdes that are set in the new mask */ 1038 if (cpumask_test_cpu(cpu, mask)) 1039 continue; 1040 1041 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, 1042 sdma_rht_params); 1043 if (rht_node) { 1044 bool empty = true; 1045 int i; 1046 1047 /* Remove mappings for old sde */ 1048 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1049 if (rht_node->map[i]) 1050 sdma_cleanup_sde_map(rht_node->map[i], 1051 sde); 1052 1053 /* Free empty hash table entries */ 1054 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1055 if (!rht_node->map[i]) 1056 continue; 1057 1058 if (rht_node->map[i]->ctr) { 1059 empty = false; 1060 break; 1061 } 1062 } 1063 1064 if (empty) { 1065 ret = rhashtable_remove_fast(dd->sdma_rht, 1066 &rht_node->node, 1067 sdma_rht_params); 1068 WARN_ON(ret); 1069 1070 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1071 kfree(rht_node->map[i]); 1072 1073 kfree(rht_node); 1074 } 1075 } 1076 } 1077 1078 cpumask_copy(&sde->cpu_mask, new_mask); 1079 out: 1080 mutex_unlock(&process_to_sde_mutex); 1081 out_free: 1082 free_cpumask_var(mask); 1083 free_cpumask_var(new_mask); 1084 return ret ? : strnlen(buf, PAGE_SIZE); 1085 } 1086 1087 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) 1088 { 1089 mutex_lock(&process_to_sde_mutex); 1090 if (cpumask_empty(&sde->cpu_mask)) 1091 snprintf(buf, PAGE_SIZE, "%s\n", "empty"); 1092 else 1093 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); 1094 mutex_unlock(&process_to_sde_mutex); 1095 return strnlen(buf, PAGE_SIZE); 1096 } 1097 1098 static void sdma_rht_free(void *ptr, void *arg) 1099 { 1100 struct sdma_rht_node *rht_node = ptr; 1101 int i; 1102 1103 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) 1104 kfree(rht_node->map[i]); 1105 1106 kfree(rht_node); 1107 } 1108 1109 /** 1110 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings 1111 * @s: seq file 1112 * @dd: hfi1_devdata 1113 * @cpuid: cpu id 1114 * 1115 * This routine dumps the process to sde mappings per cpu 1116 */ 1117 void sdma_seqfile_dump_cpu_list(struct seq_file *s, 1118 struct hfi1_devdata *dd, 1119 unsigned long cpuid) 1120 { 1121 struct sdma_rht_node *rht_node; 1122 int i, j; 1123 1124 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, 1125 sdma_rht_params); 1126 if (!rht_node) 1127 return; 1128 1129 seq_printf(s, "cpu%3lu: ", cpuid); 1130 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { 1131 if (!rht_node->map[i] || !rht_node->map[i]->ctr) 1132 continue; 1133 1134 seq_printf(s, " vl%d: [", i); 1135 1136 for (j = 0; j < rht_node->map[i]->ctr; j++) { 1137 if (!rht_node->map[i]->sde[j]) 1138 continue; 1139 1140 if (j > 0) 1141 seq_puts(s, ","); 1142 1143 seq_printf(s, " sdma%2d", 1144 rht_node->map[i]->sde[j]->this_idx); 1145 } 1146 seq_puts(s, " ]"); 1147 } 1148 1149 seq_puts(s, "\n"); 1150 } 1151 1152 /* 1153 * Free the indicated map struct 1154 */ 1155 static void sdma_map_free(struct sdma_vl_map *m) 1156 { 1157 int i; 1158 1159 for (i = 0; m && i < m->actual_vls; i++) 1160 kfree(m->map[i]); 1161 kfree(m); 1162 } 1163 1164 /* 1165 * Handle RCU callback 1166 */ 1167 static void sdma_map_rcu_callback(struct rcu_head *list) 1168 { 1169 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); 1170 1171 sdma_map_free(m); 1172 } 1173 1174 /** 1175 * sdma_map_init - called when # vls change 1176 * @dd: hfi1_devdata 1177 * @port: port number 1178 * @num_vls: number of vls 1179 * @vl_engines: per vl engine mapping (optional) 1180 * 1181 * This routine changes the mapping based on the number of vls. 1182 * 1183 * vl_engines is used to specify a non-uniform vl/engine loading. NULL 1184 * implies auto computing the loading and giving each VLs a uniform 1185 * distribution of engines per VL. 1186 * 1187 * The auto algorithm computes the sde_per_vl and the number of extra 1188 * engines. Any extra engines are added from the last VL on down. 1189 * 1190 * rcu locking is used here to control access to the mapping fields. 1191 * 1192 * If either the num_vls or num_sdma are non-power of 2, the array sizes 1193 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded 1194 * up to the next highest power of 2 and the first entry is reused 1195 * in a round robin fashion. 1196 * 1197 * If an error occurs the map change is not done and the mapping is 1198 * not changed. 1199 * 1200 */ 1201 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) 1202 { 1203 int i, j; 1204 int extra, sde_per_vl; 1205 int engine = 0; 1206 u8 lvl_engines[OPA_MAX_VLS]; 1207 struct sdma_vl_map *oldmap, *newmap; 1208 1209 if (!(dd->flags & HFI1_HAS_SEND_DMA)) 1210 return 0; 1211 1212 if (!vl_engines) { 1213 /* truncate divide */ 1214 sde_per_vl = dd->num_sdma / num_vls; 1215 /* extras */ 1216 extra = dd->num_sdma % num_vls; 1217 vl_engines = lvl_engines; 1218 /* add extras from last vl down */ 1219 for (i = num_vls - 1; i >= 0; i--, extra--) 1220 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); 1221 } 1222 /* build new map */ 1223 newmap = kzalloc( 1224 sizeof(struct sdma_vl_map) + 1225 roundup_pow_of_two(num_vls) * 1226 sizeof(struct sdma_map_elem *), 1227 GFP_KERNEL); 1228 if (!newmap) 1229 goto bail; 1230 newmap->actual_vls = num_vls; 1231 newmap->vls = roundup_pow_of_two(num_vls); 1232 newmap->mask = (1 << ilog2(newmap->vls)) - 1; 1233 /* initialize back-map */ 1234 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) 1235 newmap->engine_to_vl[i] = -1; 1236 for (i = 0; i < newmap->vls; i++) { 1237 /* save for wrap around */ 1238 int first_engine = engine; 1239 1240 if (i < newmap->actual_vls) { 1241 int sz = roundup_pow_of_two(vl_engines[i]); 1242 1243 /* only allocate once */ 1244 newmap->map[i] = kzalloc( 1245 sizeof(struct sdma_map_elem) + 1246 sz * sizeof(struct sdma_engine *), 1247 GFP_KERNEL); 1248 if (!newmap->map[i]) 1249 goto bail; 1250 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1251 /* assign engines */ 1252 for (j = 0; j < sz; j++) { 1253 newmap->map[i]->sde[j] = 1254 &dd->per_sdma[engine]; 1255 if (++engine >= first_engine + vl_engines[i]) 1256 /* wrap back to first engine */ 1257 engine = first_engine; 1258 } 1259 /* assign back-map */ 1260 for (j = 0; j < vl_engines[i]; j++) 1261 newmap->engine_to_vl[first_engine + j] = i; 1262 } else { 1263 /* just re-use entry without allocating */ 1264 newmap->map[i] = newmap->map[i % num_vls]; 1265 } 1266 engine = first_engine + vl_engines[i]; 1267 } 1268 /* newmap in hand, save old map */ 1269 spin_lock_irq(&dd->sde_map_lock); 1270 oldmap = rcu_dereference_protected(dd->sdma_map, 1271 lockdep_is_held(&dd->sde_map_lock)); 1272 1273 /* publish newmap */ 1274 rcu_assign_pointer(dd->sdma_map, newmap); 1275 1276 spin_unlock_irq(&dd->sde_map_lock); 1277 /* success, free any old map after grace period */ 1278 if (oldmap) 1279 call_rcu(&oldmap->list, sdma_map_rcu_callback); 1280 return 0; 1281 bail: 1282 /* free any partial allocation */ 1283 sdma_map_free(newmap); 1284 return -ENOMEM; 1285 } 1286 1287 /** 1288 * sdma_clean() Clean up allocated memory 1289 * @dd: struct hfi1_devdata 1290 * @num_engines: num sdma engines 1291 * 1292 * This routine can be called regardless of the success of 1293 * sdma_init() 1294 */ 1295 void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) 1296 { 1297 size_t i; 1298 struct sdma_engine *sde; 1299 1300 if (dd->sdma_pad_dma) { 1301 dma_free_coherent(&dd->pcidev->dev, SDMA_PAD, 1302 (void *)dd->sdma_pad_dma, 1303 dd->sdma_pad_phys); 1304 dd->sdma_pad_dma = NULL; 1305 dd->sdma_pad_phys = 0; 1306 } 1307 if (dd->sdma_heads_dma) { 1308 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, 1309 (void *)dd->sdma_heads_dma, 1310 dd->sdma_heads_phys); 1311 dd->sdma_heads_dma = NULL; 1312 dd->sdma_heads_phys = 0; 1313 } 1314 for (i = 0; dd->per_sdma && i < num_engines; ++i) { 1315 sde = &dd->per_sdma[i]; 1316 1317 sde->head_dma = NULL; 1318 sde->head_phys = 0; 1319 1320 if (sde->descq) { 1321 dma_free_coherent( 1322 &dd->pcidev->dev, 1323 sde->descq_cnt * sizeof(u64[2]), 1324 sde->descq, 1325 sde->descq_phys 1326 ); 1327 sde->descq = NULL; 1328 sde->descq_phys = 0; 1329 } 1330 kvfree(sde->tx_ring); 1331 sde->tx_ring = NULL; 1332 } 1333 spin_lock_irq(&dd->sde_map_lock); 1334 sdma_map_free(rcu_access_pointer(dd->sdma_map)); 1335 RCU_INIT_POINTER(dd->sdma_map, NULL); 1336 spin_unlock_irq(&dd->sde_map_lock); 1337 synchronize_rcu(); 1338 kfree(dd->per_sdma); 1339 dd->per_sdma = NULL; 1340 1341 if (dd->sdma_rht) { 1342 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); 1343 kfree(dd->sdma_rht); 1344 dd->sdma_rht = NULL; 1345 } 1346 } 1347 1348 /** 1349 * sdma_init() - called when device probed 1350 * @dd: hfi1_devdata 1351 * @port: port number (currently only zero) 1352 * 1353 * Initializes each sde and its csrs. 1354 * Interrupts are not required to be enabled. 1355 * 1356 * Returns: 1357 * 0 - success, -errno on failure 1358 */ 1359 int sdma_init(struct hfi1_devdata *dd, u8 port) 1360 { 1361 unsigned this_idx; 1362 struct sdma_engine *sde; 1363 struct rhashtable *tmp_sdma_rht; 1364 u16 descq_cnt; 1365 void *curr_head; 1366 struct hfi1_pportdata *ppd = dd->pport + port; 1367 u32 per_sdma_credits; 1368 uint idle_cnt = sdma_idle_cnt; 1369 size_t num_engines = chip_sdma_engines(dd); 1370 int ret = -ENOMEM; 1371 1372 if (!HFI1_CAP_IS_KSET(SDMA)) { 1373 HFI1_CAP_CLEAR(SDMA_AHG); 1374 return 0; 1375 } 1376 if (mod_num_sdma && 1377 /* can't exceed chip support */ 1378 mod_num_sdma <= chip_sdma_engines(dd) && 1379 /* count must be >= vls */ 1380 mod_num_sdma >= num_vls) 1381 num_engines = mod_num_sdma; 1382 1383 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); 1384 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd)); 1385 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", 1386 chip_sdma_mem_size(dd)); 1387 1388 per_sdma_credits = 1389 chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE); 1390 1391 /* set up freeze waitqueue */ 1392 init_waitqueue_head(&dd->sdma_unfreeze_wq); 1393 atomic_set(&dd->sdma_unfreeze_count, 0); 1394 1395 descq_cnt = sdma_get_descq_cnt(); 1396 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", 1397 num_engines, descq_cnt); 1398 1399 /* alloc memory for array of send engines */ 1400 dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), 1401 GFP_KERNEL, dd->node); 1402 if (!dd->per_sdma) 1403 return ret; 1404 1405 idle_cnt = ns_to_cclock(dd, idle_cnt); 1406 if (idle_cnt) 1407 dd->default_desc1 = 1408 SDMA_DESC1_HEAD_TO_HOST_FLAG; 1409 else 1410 dd->default_desc1 = 1411 SDMA_DESC1_INT_REQ_FLAG; 1412 1413 if (!sdma_desct_intr) 1414 sdma_desct_intr = SDMA_DESC_INTR; 1415 1416 /* Allocate memory for SendDMA descriptor FIFOs */ 1417 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1418 sde = &dd->per_sdma[this_idx]; 1419 sde->dd = dd; 1420 sde->ppd = ppd; 1421 sde->this_idx = this_idx; 1422 sde->descq_cnt = descq_cnt; 1423 sde->desc_avail = sdma_descq_freecnt(sde); 1424 sde->sdma_shift = ilog2(descq_cnt); 1425 sde->sdma_mask = (1 << sde->sdma_shift) - 1; 1426 1427 /* Create a mask specifically for each interrupt source */ 1428 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + 1429 this_idx); 1430 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + 1431 this_idx); 1432 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + 1433 this_idx); 1434 /* Create a combined mask to cover all 3 interrupt sources */ 1435 sde->imask = sde->int_mask | sde->progress_mask | 1436 sde->idle_mask; 1437 1438 spin_lock_init(&sde->tail_lock); 1439 seqlock_init(&sde->head_lock); 1440 spin_lock_init(&sde->senddmactrl_lock); 1441 spin_lock_init(&sde->flushlist_lock); 1442 seqlock_init(&sde->waitlock); 1443 /* insure there is always a zero bit */ 1444 sde->ahg_bits = 0xfffffffe00000000ULL; 1445 1446 sdma_set_state(sde, sdma_state_s00_hw_down); 1447 1448 /* set up reference counting */ 1449 kref_init(&sde->state.kref); 1450 init_completion(&sde->state.comp); 1451 1452 INIT_LIST_HEAD(&sde->flushlist); 1453 INIT_LIST_HEAD(&sde->dmawait); 1454 1455 sde->tail_csr = 1456 get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); 1457 1458 tasklet_setup(&sde->sdma_hw_clean_up_task, 1459 sdma_hw_clean_up_task); 1460 tasklet_setup(&sde->sdma_sw_clean_up_task, 1461 sdma_sw_clean_up_task); 1462 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); 1463 INIT_WORK(&sde->flush_worker, sdma_field_flush); 1464 1465 sde->progress_check_head = 0; 1466 1467 timer_setup(&sde->err_progress_check_timer, 1468 sdma_err_progress_check, 0); 1469 1470 sde->descq = dma_alloc_coherent(&dd->pcidev->dev, 1471 descq_cnt * sizeof(u64[2]), 1472 &sde->descq_phys, GFP_KERNEL); 1473 if (!sde->descq) 1474 goto bail; 1475 sde->tx_ring = 1476 kvzalloc_node(array_size(descq_cnt, 1477 sizeof(struct sdma_txreq *)), 1478 GFP_KERNEL, dd->node); 1479 if (!sde->tx_ring) 1480 goto bail; 1481 } 1482 1483 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1484 /* Allocate memory for DMA of head registers to memory */ 1485 dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, 1486 dd->sdma_heads_size, 1487 &dd->sdma_heads_phys, 1488 GFP_KERNEL); 1489 if (!dd->sdma_heads_dma) { 1490 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1491 goto bail; 1492 } 1493 1494 /* Allocate memory for pad */ 1495 dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, 1496 &dd->sdma_pad_phys, GFP_KERNEL); 1497 if (!dd->sdma_pad_dma) { 1498 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1499 goto bail; 1500 } 1501 1502 /* assign each engine to different cacheline and init registers */ 1503 curr_head = (void *)dd->sdma_heads_dma; 1504 for (this_idx = 0; this_idx < num_engines; ++this_idx) { 1505 unsigned long phys_offset; 1506 1507 sde = &dd->per_sdma[this_idx]; 1508 1509 sde->head_dma = curr_head; 1510 curr_head += L1_CACHE_BYTES; 1511 phys_offset = (unsigned long)sde->head_dma - 1512 (unsigned long)dd->sdma_heads_dma; 1513 sde->head_phys = dd->sdma_heads_phys + phys_offset; 1514 init_sdma_regs(sde, per_sdma_credits, idle_cnt); 1515 } 1516 dd->flags |= HFI1_HAS_SEND_DMA; 1517 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; 1518 dd->num_sdma = num_engines; 1519 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); 1520 if (ret < 0) 1521 goto bail; 1522 1523 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); 1524 if (!tmp_sdma_rht) { 1525 ret = -ENOMEM; 1526 goto bail; 1527 } 1528 1529 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); 1530 if (ret < 0) { 1531 kfree(tmp_sdma_rht); 1532 goto bail; 1533 } 1534 1535 dd->sdma_rht = tmp_sdma_rht; 1536 1537 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); 1538 return 0; 1539 1540 bail: 1541 sdma_clean(dd, num_engines); 1542 return ret; 1543 } 1544 1545 /** 1546 * sdma_all_running() - called when the link goes up 1547 * @dd: hfi1_devdata 1548 * 1549 * This routine moves all engines to the running state. 1550 */ 1551 void sdma_all_running(struct hfi1_devdata *dd) 1552 { 1553 struct sdma_engine *sde; 1554 unsigned int i; 1555 1556 /* move all engines to running */ 1557 for (i = 0; i < dd->num_sdma; ++i) { 1558 sde = &dd->per_sdma[i]; 1559 sdma_process_event(sde, sdma_event_e30_go_running); 1560 } 1561 } 1562 1563 /** 1564 * sdma_all_idle() - called when the link goes down 1565 * @dd: hfi1_devdata 1566 * 1567 * This routine moves all engines to the idle state. 1568 */ 1569 void sdma_all_idle(struct hfi1_devdata *dd) 1570 { 1571 struct sdma_engine *sde; 1572 unsigned int i; 1573 1574 /* idle all engines */ 1575 for (i = 0; i < dd->num_sdma; ++i) { 1576 sde = &dd->per_sdma[i]; 1577 sdma_process_event(sde, sdma_event_e70_go_idle); 1578 } 1579 } 1580 1581 /** 1582 * sdma_start() - called to kick off state processing for all engines 1583 * @dd: hfi1_devdata 1584 * 1585 * This routine is for kicking off the state processing for all required 1586 * sdma engines. Interrupts need to be working at this point. 1587 * 1588 */ 1589 void sdma_start(struct hfi1_devdata *dd) 1590 { 1591 unsigned i; 1592 struct sdma_engine *sde; 1593 1594 /* kick off the engines state processing */ 1595 for (i = 0; i < dd->num_sdma; ++i) { 1596 sde = &dd->per_sdma[i]; 1597 sdma_process_event(sde, sdma_event_e10_go_hw_start); 1598 } 1599 } 1600 1601 /** 1602 * sdma_exit() - used when module is removed 1603 * @dd: hfi1_devdata 1604 */ 1605 void sdma_exit(struct hfi1_devdata *dd) 1606 { 1607 unsigned this_idx; 1608 struct sdma_engine *sde; 1609 1610 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; 1611 ++this_idx) { 1612 sde = &dd->per_sdma[this_idx]; 1613 if (!list_empty(&sde->dmawait)) 1614 dd_dev_err(dd, "sde %u: dmawait list not empty!\n", 1615 sde->this_idx); 1616 sdma_process_event(sde, sdma_event_e00_go_hw_down); 1617 1618 del_timer_sync(&sde->err_progress_check_timer); 1619 1620 /* 1621 * This waits for the state machine to exit so it is not 1622 * necessary to kill the sdma_sw_clean_up_task to make sure 1623 * it is not running. 1624 */ 1625 sdma_finalput(&sde->state); 1626 } 1627 } 1628 1629 /* 1630 * unmap the indicated descriptor 1631 */ 1632 static inline void sdma_unmap_desc( 1633 struct hfi1_devdata *dd, 1634 struct sdma_desc *descp) 1635 { 1636 switch (sdma_mapping_type(descp)) { 1637 case SDMA_MAP_SINGLE: 1638 dma_unmap_single( 1639 &dd->pcidev->dev, 1640 sdma_mapping_addr(descp), 1641 sdma_mapping_len(descp), 1642 DMA_TO_DEVICE); 1643 break; 1644 case SDMA_MAP_PAGE: 1645 dma_unmap_page( 1646 &dd->pcidev->dev, 1647 sdma_mapping_addr(descp), 1648 sdma_mapping_len(descp), 1649 DMA_TO_DEVICE); 1650 break; 1651 } 1652 } 1653 1654 /* 1655 * return the mode as indicated by the first 1656 * descriptor in the tx. 1657 */ 1658 static inline u8 ahg_mode(struct sdma_txreq *tx) 1659 { 1660 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) 1661 >> SDMA_DESC1_HEADER_MODE_SHIFT; 1662 } 1663 1664 /** 1665 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's 1666 * @dd: hfi1_devdata for unmapping 1667 * @tx: tx request to clean 1668 * 1669 * This is used in the progress routine to clean the tx or 1670 * by the ULP to toss an in-process tx build. 1671 * 1672 * The code can be called multiple times without issue. 1673 * 1674 */ 1675 void __sdma_txclean( 1676 struct hfi1_devdata *dd, 1677 struct sdma_txreq *tx) 1678 { 1679 u16 i; 1680 1681 if (tx->num_desc) { 1682 u8 skip = 0, mode = ahg_mode(tx); 1683 1684 /* unmap first */ 1685 sdma_unmap_desc(dd, &tx->descp[0]); 1686 /* determine number of AHG descriptors to skip */ 1687 if (mode > SDMA_AHG_APPLY_UPDATE1) 1688 skip = mode >> 1; 1689 for (i = 1 + skip; i < tx->num_desc; i++) 1690 sdma_unmap_desc(dd, &tx->descp[i]); 1691 tx->num_desc = 0; 1692 } 1693 kfree(tx->coalesce_buf); 1694 tx->coalesce_buf = NULL; 1695 /* kmalloc'ed descp */ 1696 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { 1697 tx->desc_limit = ARRAY_SIZE(tx->descs); 1698 kfree(tx->descp); 1699 } 1700 } 1701 1702 static inline u16 sdma_gethead(struct sdma_engine *sde) 1703 { 1704 struct hfi1_devdata *dd = sde->dd; 1705 int use_dmahead; 1706 u16 hwhead; 1707 1708 #ifdef CONFIG_SDMA_VERBOSITY 1709 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1710 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1711 #endif 1712 1713 retry: 1714 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && 1715 (dd->flags & HFI1_HAS_SDMA_TIMEOUT); 1716 hwhead = use_dmahead ? 1717 (u16)le64_to_cpu(*sde->head_dma) : 1718 (u16)read_sde_csr(sde, SD(HEAD)); 1719 1720 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { 1721 u16 cnt; 1722 u16 swtail; 1723 u16 swhead; 1724 int sane; 1725 1726 swhead = sde->descq_head & sde->sdma_mask; 1727 /* this code is really bad for cache line trading */ 1728 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 1729 cnt = sde->descq_cnt; 1730 1731 if (swhead < swtail) 1732 /* not wrapped */ 1733 sane = (hwhead >= swhead) & (hwhead <= swtail); 1734 else if (swhead > swtail) 1735 /* wrapped around */ 1736 sane = ((hwhead >= swhead) && (hwhead < cnt)) || 1737 (hwhead <= swtail); 1738 else 1739 /* empty */ 1740 sane = (hwhead == swhead); 1741 1742 if (unlikely(!sane)) { 1743 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%u swhd=%u swtl=%u cnt=%u\n", 1744 sde->this_idx, 1745 use_dmahead ? "dma" : "kreg", 1746 hwhead, swhead, swtail, cnt); 1747 if (use_dmahead) { 1748 /* try one more time, using csr */ 1749 use_dmahead = 0; 1750 goto retry; 1751 } 1752 /* proceed as if no progress */ 1753 hwhead = swhead; 1754 } 1755 } 1756 return hwhead; 1757 } 1758 1759 /* 1760 * This is called when there are send DMA descriptors that might be 1761 * available. 1762 * 1763 * This is called with head_lock held. 1764 */ 1765 static void sdma_desc_avail(struct sdma_engine *sde, uint avail) 1766 { 1767 struct iowait *wait, *nw, *twait; 1768 struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; 1769 uint i, n = 0, seq, tidx = 0; 1770 1771 #ifdef CONFIG_SDMA_VERBOSITY 1772 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 1773 slashstrip(__FILE__), __LINE__, __func__); 1774 dd_dev_err(sde->dd, "avail: %u\n", avail); 1775 #endif 1776 1777 do { 1778 seq = read_seqbegin(&sde->waitlock); 1779 if (!list_empty(&sde->dmawait)) { 1780 /* at least one item */ 1781 write_seqlock(&sde->waitlock); 1782 /* Harvest waiters wanting DMA descriptors */ 1783 list_for_each_entry_safe( 1784 wait, 1785 nw, 1786 &sde->dmawait, 1787 list) { 1788 u32 num_desc; 1789 1790 if (!wait->wakeup) 1791 continue; 1792 if (n == ARRAY_SIZE(waits)) 1793 break; 1794 iowait_init_priority(wait); 1795 num_desc = iowait_get_all_desc(wait); 1796 if (num_desc > avail) 1797 break; 1798 avail -= num_desc; 1799 /* Find the top-priority wait memeber */ 1800 if (n) { 1801 twait = waits[tidx]; 1802 tidx = 1803 iowait_priority_update_top(wait, 1804 twait, 1805 n, 1806 tidx); 1807 } 1808 list_del_init(&wait->list); 1809 waits[n++] = wait; 1810 } 1811 write_sequnlock(&sde->waitlock); 1812 break; 1813 } 1814 } while (read_seqretry(&sde->waitlock, seq)); 1815 1816 /* Schedule the top-priority entry first */ 1817 if (n) 1818 waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); 1819 1820 for (i = 0; i < n; i++) 1821 if (i != tidx) 1822 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); 1823 } 1824 1825 /* head_lock must be held */ 1826 static void sdma_make_progress(struct sdma_engine *sde, u64 status) 1827 { 1828 struct sdma_txreq *txp = NULL; 1829 int progress = 0; 1830 u16 hwhead, swhead; 1831 int idle_check_done = 0; 1832 1833 hwhead = sdma_gethead(sde); 1834 1835 /* The reason for some of the complexity of this code is that 1836 * not all descriptors have corresponding txps. So, we have to 1837 * be able to skip over descs until we wander into the range of 1838 * the next txp on the list. 1839 */ 1840 1841 retry: 1842 txp = get_txhead(sde); 1843 swhead = sde->descq_head & sde->sdma_mask; 1844 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1845 while (swhead != hwhead) { 1846 /* advance head, wrap if needed */ 1847 swhead = ++sde->descq_head & sde->sdma_mask; 1848 1849 /* if now past this txp's descs, do the callback */ 1850 if (txp && txp->next_descq_idx == swhead) { 1851 /* remove from list */ 1852 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; 1853 complete_tx(sde, txp, SDMA_TXREQ_S_OK); 1854 /* see if there is another txp */ 1855 txp = get_txhead(sde); 1856 } 1857 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); 1858 progress++; 1859 } 1860 1861 /* 1862 * The SDMA idle interrupt is not guaranteed to be ordered with respect 1863 * to updates to the the dma_head location in host memory. The head 1864 * value read might not be fully up to date. If there are pending 1865 * descriptors and the SDMA idle interrupt fired then read from the 1866 * CSR SDMA head instead to get the latest value from the hardware. 1867 * The hardware SDMA head should be read at most once in this invocation 1868 * of sdma_make_progress(..) which is ensured by idle_check_done flag 1869 */ 1870 if ((status & sde->idle_mask) && !idle_check_done) { 1871 u16 swtail; 1872 1873 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 1874 if (swtail != hwhead) { 1875 hwhead = (u16)read_sde_csr(sde, SD(HEAD)); 1876 idle_check_done = 1; 1877 goto retry; 1878 } 1879 } 1880 1881 sde->last_status = status; 1882 if (progress) 1883 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 1884 } 1885 1886 /* 1887 * sdma_engine_interrupt() - interrupt handler for engine 1888 * @sde: sdma engine 1889 * @status: sdma interrupt reason 1890 * 1891 * Status is a mask of the 3 possible interrupts for this engine. It will 1892 * contain bits _only_ for this SDMA engine. It will contain at least one 1893 * bit, it may contain more. 1894 */ 1895 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) 1896 { 1897 trace_hfi1_sdma_engine_interrupt(sde, status); 1898 write_seqlock(&sde->head_lock); 1899 sdma_set_desc_cnt(sde, sdma_desct_intr); 1900 if (status & sde->idle_mask) 1901 sde->idle_int_cnt++; 1902 else if (status & sde->progress_mask) 1903 sde->progress_int_cnt++; 1904 else if (status & sde->int_mask) 1905 sde->sdma_int_cnt++; 1906 sdma_make_progress(sde, status); 1907 write_sequnlock(&sde->head_lock); 1908 } 1909 1910 /** 1911 * sdma_engine_error() - error handler for engine 1912 * @sde: sdma engine 1913 * @status: sdma interrupt reason 1914 */ 1915 void sdma_engine_error(struct sdma_engine *sde, u64 status) 1916 { 1917 unsigned long flags; 1918 1919 #ifdef CONFIG_SDMA_VERBOSITY 1920 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", 1921 sde->this_idx, 1922 (unsigned long long)status, 1923 sdma_state_names[sde->state.current_state]); 1924 #endif 1925 spin_lock_irqsave(&sde->tail_lock, flags); 1926 write_seqlock(&sde->head_lock); 1927 if (status & ALL_SDMA_ENG_HALT_ERRS) 1928 __sdma_process_event(sde, sdma_event_e60_hw_halted); 1929 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { 1930 dd_dev_err(sde->dd, 1931 "SDMA (%u) engine error: 0x%llx state %s\n", 1932 sde->this_idx, 1933 (unsigned long long)status, 1934 sdma_state_names[sde->state.current_state]); 1935 dump_sdma_state(sde); 1936 } 1937 write_sequnlock(&sde->head_lock); 1938 spin_unlock_irqrestore(&sde->tail_lock, flags); 1939 } 1940 1941 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) 1942 { 1943 u64 set_senddmactrl = 0; 1944 u64 clr_senddmactrl = 0; 1945 unsigned long flags; 1946 1947 #ifdef CONFIG_SDMA_VERBOSITY 1948 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", 1949 sde->this_idx, 1950 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, 1951 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, 1952 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, 1953 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); 1954 #endif 1955 1956 if (op & SDMA_SENDCTRL_OP_ENABLE) 1957 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1958 else 1959 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); 1960 1961 if (op & SDMA_SENDCTRL_OP_INTENABLE) 1962 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1963 else 1964 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); 1965 1966 if (op & SDMA_SENDCTRL_OP_HALT) 1967 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1968 else 1969 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); 1970 1971 spin_lock_irqsave(&sde->senddmactrl_lock, flags); 1972 1973 sde->p_senddmactrl |= set_senddmactrl; 1974 sde->p_senddmactrl &= ~clr_senddmactrl; 1975 1976 if (op & SDMA_SENDCTRL_OP_CLEANUP) 1977 write_sde_csr(sde, SD(CTRL), 1978 sde->p_senddmactrl | 1979 SD(CTRL_SDMA_CLEANUP_SMASK)); 1980 else 1981 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); 1982 1983 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); 1984 1985 #ifdef CONFIG_SDMA_VERBOSITY 1986 sdma_dumpstate(sde); 1987 #endif 1988 } 1989 1990 static void sdma_setlengen(struct sdma_engine *sde) 1991 { 1992 #ifdef CONFIG_SDMA_VERBOSITY 1993 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 1994 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 1995 #endif 1996 1997 /* 1998 * Set SendDmaLenGen and clear-then-set the MSB of the generation 1999 * count to enable generation checking and load the internal 2000 * generation counter. 2001 */ 2002 write_sde_csr(sde, SD(LEN_GEN), 2003 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); 2004 write_sde_csr(sde, SD(LEN_GEN), 2005 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | 2006 (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); 2007 } 2008 2009 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) 2010 { 2011 /* Commit writes to memory and advance the tail on the chip */ 2012 smp_wmb(); /* see get_txhead() */ 2013 writeq(tail, sde->tail_csr); 2014 } 2015 2016 /* 2017 * This is called when changing to state s10_hw_start_up_halt_wait as 2018 * a result of send buffer errors or send DMA descriptor errors. 2019 */ 2020 static void sdma_hw_start_up(struct sdma_engine *sde) 2021 { 2022 u64 reg; 2023 2024 #ifdef CONFIG_SDMA_VERBOSITY 2025 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2026 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2027 #endif 2028 2029 sdma_setlengen(sde); 2030 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2031 *sde->head_dma = 0; 2032 2033 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << 2034 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); 2035 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2036 } 2037 2038 /* 2039 * set_sdma_integrity 2040 * 2041 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. 2042 */ 2043 static void set_sdma_integrity(struct sdma_engine *sde) 2044 { 2045 struct hfi1_devdata *dd = sde->dd; 2046 2047 write_sde_csr(sde, SD(CHECK_ENABLE), 2048 hfi1_pkt_base_sdma_integrity(dd)); 2049 } 2050 2051 static void init_sdma_regs( 2052 struct sdma_engine *sde, 2053 u32 credits, 2054 uint idle_cnt) 2055 { 2056 u8 opval, opmask; 2057 #ifdef CONFIG_SDMA_VERBOSITY 2058 struct hfi1_devdata *dd = sde->dd; 2059 2060 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", 2061 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); 2062 #endif 2063 2064 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); 2065 sdma_setlengen(sde); 2066 sdma_update_tail(sde, 0); /* Set SendDmaTail */ 2067 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); 2068 write_sde_csr(sde, SD(DESC_CNT), 0); 2069 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); 2070 write_sde_csr(sde, SD(MEMORY), 2071 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | 2072 ((u64)(credits * sde->this_idx) << 2073 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); 2074 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); 2075 set_sdma_integrity(sde); 2076 opmask = OPCODE_CHECK_MASK_DISABLED; 2077 opval = OPCODE_CHECK_VAL_DISABLED; 2078 write_sde_csr(sde, SD(CHECK_OPCODE), 2079 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | 2080 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); 2081 } 2082 2083 #ifdef CONFIG_SDMA_VERBOSITY 2084 2085 #define sdma_dumpstate_helper0(reg) do { \ 2086 csr = read_csr(sde->dd, reg); \ 2087 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ 2088 } while (0) 2089 2090 #define sdma_dumpstate_helper(reg) do { \ 2091 csr = read_sde_csr(sde, reg); \ 2092 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ 2093 #reg, sde->this_idx, csr); \ 2094 } while (0) 2095 2096 #define sdma_dumpstate_helper2(reg) do { \ 2097 csr = read_csr(sde->dd, reg + (8 * i)); \ 2098 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ 2099 #reg, i, csr); \ 2100 } while (0) 2101 2102 void sdma_dumpstate(struct sdma_engine *sde) 2103 { 2104 u64 csr; 2105 unsigned i; 2106 2107 sdma_dumpstate_helper(SD(CTRL)); 2108 sdma_dumpstate_helper(SD(STATUS)); 2109 sdma_dumpstate_helper0(SD(ERR_STATUS)); 2110 sdma_dumpstate_helper0(SD(ERR_MASK)); 2111 sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); 2112 sdma_dumpstate_helper(SD(ENG_ERR_MASK)); 2113 2114 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { 2115 sdma_dumpstate_helper2(CCE_INT_STATUS); 2116 sdma_dumpstate_helper2(CCE_INT_MASK); 2117 sdma_dumpstate_helper2(CCE_INT_BLOCKED); 2118 } 2119 2120 sdma_dumpstate_helper(SD(TAIL)); 2121 sdma_dumpstate_helper(SD(HEAD)); 2122 sdma_dumpstate_helper(SD(PRIORITY_THLD)); 2123 sdma_dumpstate_helper(SD(IDLE_CNT)); 2124 sdma_dumpstate_helper(SD(RELOAD_CNT)); 2125 sdma_dumpstate_helper(SD(DESC_CNT)); 2126 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); 2127 sdma_dumpstate_helper(SD(MEMORY)); 2128 sdma_dumpstate_helper0(SD(ENGINES)); 2129 sdma_dumpstate_helper0(SD(MEM_SIZE)); 2130 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ 2131 sdma_dumpstate_helper(SD(BASE_ADDR)); 2132 sdma_dumpstate_helper(SD(LEN_GEN)); 2133 sdma_dumpstate_helper(SD(HEAD_ADDR)); 2134 sdma_dumpstate_helper(SD(CHECK_ENABLE)); 2135 sdma_dumpstate_helper(SD(CHECK_VL)); 2136 sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); 2137 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); 2138 sdma_dumpstate_helper(SD(CHECK_SLID)); 2139 sdma_dumpstate_helper(SD(CHECK_OPCODE)); 2140 } 2141 #endif 2142 2143 static void dump_sdma_state(struct sdma_engine *sde) 2144 { 2145 struct hw_sdma_desc *descqp; 2146 u64 desc[2]; 2147 u64 addr; 2148 u8 gen; 2149 u16 len; 2150 u16 head, tail, cnt; 2151 2152 head = sde->descq_head & sde->sdma_mask; 2153 tail = sde->descq_tail & sde->sdma_mask; 2154 cnt = sdma_descq_freecnt(sde); 2155 2156 dd_dev_err(sde->dd, 2157 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", 2158 sde->this_idx, head, tail, cnt, 2159 !list_empty(&sde->flushlist)); 2160 2161 /* print info for each entry in the descriptor queue */ 2162 while (head != tail) { 2163 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2164 2165 descqp = &sde->descq[head]; 2166 desc[0] = le64_to_cpu(descqp->qw[0]); 2167 desc[1] = le64_to_cpu(descqp->qw[1]); 2168 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2169 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2170 'H' : '-'; 2171 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2172 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2173 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2174 & SDMA_DESC0_PHY_ADDR_MASK; 2175 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2176 & SDMA_DESC1_GENERATION_MASK; 2177 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2178 & SDMA_DESC0_BYTE_COUNT_MASK; 2179 dd_dev_err(sde->dd, 2180 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2181 head, flags, addr, gen, len); 2182 dd_dev_err(sde->dd, 2183 "\tdesc0:0x%016llx desc1 0x%016llx\n", 2184 desc[0], desc[1]); 2185 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2186 dd_dev_err(sde->dd, 2187 "\taidx: %u amode: %u alen: %u\n", 2188 (u8)((desc[1] & 2189 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2190 SDMA_DESC1_HEADER_INDEX_SHIFT), 2191 (u8)((desc[1] & 2192 SDMA_DESC1_HEADER_MODE_SMASK) >> 2193 SDMA_DESC1_HEADER_MODE_SHIFT), 2194 (u8)((desc[1] & 2195 SDMA_DESC1_HEADER_DWS_SMASK) >> 2196 SDMA_DESC1_HEADER_DWS_SHIFT)); 2197 head++; 2198 head &= sde->sdma_mask; 2199 } 2200 } 2201 2202 #define SDE_FMT \ 2203 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" 2204 /** 2205 * sdma_seqfile_dump_sde() - debugfs dump of sde 2206 * @s: seq file 2207 * @sde: send dma engine to dump 2208 * 2209 * This routine dumps the sde to the indicated seq file. 2210 */ 2211 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) 2212 { 2213 u16 head, tail; 2214 struct hw_sdma_desc *descqp; 2215 u64 desc[2]; 2216 u64 addr; 2217 u8 gen; 2218 u16 len; 2219 2220 head = sde->descq_head & sde->sdma_mask; 2221 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; 2222 seq_printf(s, SDE_FMT, sde->this_idx, 2223 sde->cpu, 2224 sdma_state_name(sde->state.current_state), 2225 (unsigned long long)read_sde_csr(sde, SD(CTRL)), 2226 (unsigned long long)read_sde_csr(sde, SD(STATUS)), 2227 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), 2228 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, 2229 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, 2230 (unsigned long long)le64_to_cpu(*sde->head_dma), 2231 (unsigned long long)read_sde_csr(sde, SD(MEMORY)), 2232 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), 2233 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), 2234 (unsigned long long)sde->last_status, 2235 (unsigned long long)sde->ahg_bits, 2236 sde->tx_tail, 2237 sde->tx_head, 2238 sde->descq_tail, 2239 sde->descq_head, 2240 !list_empty(&sde->flushlist), 2241 sde->descq_full_count, 2242 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); 2243 2244 /* print info for each entry in the descriptor queue */ 2245 while (head != tail) { 2246 char flags[6] = { 'x', 'x', 'x', 'x', 0 }; 2247 2248 descqp = &sde->descq[head]; 2249 desc[0] = le64_to_cpu(descqp->qw[0]); 2250 desc[1] = le64_to_cpu(descqp->qw[1]); 2251 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; 2252 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 2253 'H' : '-'; 2254 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; 2255 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; 2256 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) 2257 & SDMA_DESC0_PHY_ADDR_MASK; 2258 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) 2259 & SDMA_DESC1_GENERATION_MASK; 2260 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) 2261 & SDMA_DESC0_BYTE_COUNT_MASK; 2262 seq_printf(s, 2263 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", 2264 head, flags, addr, gen, len); 2265 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) 2266 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", 2267 (u8)((desc[1] & 2268 SDMA_DESC1_HEADER_INDEX_SMASK) >> 2269 SDMA_DESC1_HEADER_INDEX_SHIFT), 2270 (u8)((desc[1] & 2271 SDMA_DESC1_HEADER_MODE_SMASK) >> 2272 SDMA_DESC1_HEADER_MODE_SHIFT)); 2273 head = (head + 1) & sde->sdma_mask; 2274 } 2275 } 2276 2277 /* 2278 * add the generation number into 2279 * the qw1 and return 2280 */ 2281 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) 2282 { 2283 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; 2284 2285 qw1 &= ~SDMA_DESC1_GENERATION_SMASK; 2286 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) 2287 << SDMA_DESC1_GENERATION_SHIFT; 2288 return qw1; 2289 } 2290 2291 /* 2292 * This routine submits the indicated tx 2293 * 2294 * Space has already been guaranteed and 2295 * tail side of ring is locked. 2296 * 2297 * The hardware tail update is done 2298 * in the caller and that is facilitated 2299 * by returning the new tail. 2300 * 2301 * There is special case logic for ahg 2302 * to not add the generation number for 2303 * up to 2 descriptors that follow the 2304 * first descriptor. 2305 * 2306 */ 2307 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) 2308 { 2309 int i; 2310 u16 tail; 2311 struct sdma_desc *descp = tx->descp; 2312 u8 skip = 0, mode = ahg_mode(tx); 2313 2314 tail = sde->descq_tail & sde->sdma_mask; 2315 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2316 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); 2317 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], 2318 tail, &sde->descq[tail]); 2319 tail = ++sde->descq_tail & sde->sdma_mask; 2320 descp++; 2321 if (mode > SDMA_AHG_APPLY_UPDATE1) 2322 skip = mode >> 1; 2323 for (i = 1; i < tx->num_desc; i++, descp++) { 2324 u64 qw1; 2325 2326 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); 2327 if (skip) { 2328 /* edits don't have generation */ 2329 qw1 = descp->qw[1]; 2330 skip--; 2331 } else { 2332 /* replace generation with real one for non-edits */ 2333 qw1 = add_gen(sde, descp->qw[1]); 2334 } 2335 sde->descq[tail].qw[1] = cpu_to_le64(qw1); 2336 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, 2337 tail, &sde->descq[tail]); 2338 tail = ++sde->descq_tail & sde->sdma_mask; 2339 } 2340 tx->next_descq_idx = tail; 2341 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2342 tx->sn = sde->tail_sn++; 2343 trace_hfi1_sdma_in_sn(sde, tx->sn); 2344 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); 2345 #endif 2346 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; 2347 sde->desc_avail -= tx->num_desc; 2348 return tail; 2349 } 2350 2351 /* 2352 * Check for progress 2353 */ 2354 static int sdma_check_progress( 2355 struct sdma_engine *sde, 2356 struct iowait_work *wait, 2357 struct sdma_txreq *tx, 2358 bool pkts_sent) 2359 { 2360 int ret; 2361 2362 sde->desc_avail = sdma_descq_freecnt(sde); 2363 if (tx->num_desc <= sde->desc_avail) 2364 return -EAGAIN; 2365 /* pulse the head_lock */ 2366 if (wait && iowait_ioww_to_iow(wait)->sleep) { 2367 unsigned seq; 2368 2369 seq = raw_seqcount_begin( 2370 (const seqcount_t *)&sde->head_lock.seqcount); 2371 ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); 2372 if (ret == -EAGAIN) 2373 sde->desc_avail = sdma_descq_freecnt(sde); 2374 } else { 2375 ret = -EBUSY; 2376 } 2377 return ret; 2378 } 2379 2380 /** 2381 * sdma_send_txreq() - submit a tx req to ring 2382 * @sde: sdma engine to use 2383 * @wait: SE wait structure to use when full (may be NULL) 2384 * @tx: sdma_txreq to submit 2385 * @pkts_sent: has any packet been sent yet? 2386 * 2387 * The call submits the tx into the ring. If a iowait structure is non-NULL 2388 * the packet will be queued to the list in wait. 2389 * 2390 * Return: 2391 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in 2392 * ring (wait == NULL) 2393 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2394 */ 2395 int sdma_send_txreq(struct sdma_engine *sde, 2396 struct iowait_work *wait, 2397 struct sdma_txreq *tx, 2398 bool pkts_sent) 2399 { 2400 int ret = 0; 2401 u16 tail; 2402 unsigned long flags; 2403 2404 /* user should have supplied entire packet */ 2405 if (unlikely(tx->tlen)) 2406 return -EINVAL; 2407 tx->wait = iowait_ioww_to_iow(wait); 2408 spin_lock_irqsave(&sde->tail_lock, flags); 2409 retry: 2410 if (unlikely(!__sdma_running(sde))) 2411 goto unlock_noconn; 2412 if (unlikely(tx->num_desc > sde->desc_avail)) 2413 goto nodesc; 2414 tail = submit_tx(sde, tx); 2415 if (wait) 2416 iowait_sdma_inc(iowait_ioww_to_iow(wait)); 2417 sdma_update_tail(sde, tail); 2418 unlock: 2419 spin_unlock_irqrestore(&sde->tail_lock, flags); 2420 return ret; 2421 unlock_noconn: 2422 if (wait) 2423 iowait_sdma_inc(iowait_ioww_to_iow(wait)); 2424 tx->next_descq_idx = 0; 2425 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2426 tx->sn = sde->tail_sn++; 2427 trace_hfi1_sdma_in_sn(sde, tx->sn); 2428 #endif 2429 spin_lock(&sde->flushlist_lock); 2430 list_add_tail(&tx->list, &sde->flushlist); 2431 spin_unlock(&sde->flushlist_lock); 2432 iowait_inc_wait_count(wait, tx->num_desc); 2433 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); 2434 ret = -ECOMM; 2435 goto unlock; 2436 nodesc: 2437 ret = sdma_check_progress(sde, wait, tx, pkts_sent); 2438 if (ret == -EAGAIN) { 2439 ret = 0; 2440 goto retry; 2441 } 2442 sde->descq_full_count++; 2443 goto unlock; 2444 } 2445 2446 /** 2447 * sdma_send_txlist() - submit a list of tx req to ring 2448 * @sde: sdma engine to use 2449 * @wait: SE wait structure to use when full (may be NULL) 2450 * @tx_list: list of sdma_txreqs to submit 2451 * @count_out: pointer to a u16 which, after return will contain the total number of 2452 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs 2453 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs 2454 * which are added to SDMA engine flush list if the SDMA engine state is 2455 * not running. 2456 * 2457 * The call submits the list into the ring. 2458 * 2459 * If the iowait structure is non-NULL and not equal to the iowait list 2460 * the unprocessed part of the list will be appended to the list in wait. 2461 * 2462 * In all cases, the tx_list will be updated so the head of the tx_list is 2463 * the list of descriptors that have yet to be transmitted. 2464 * 2465 * The intent of this call is to provide a more efficient 2466 * way of submitting multiple packets to SDMA while holding the tail 2467 * side locking. 2468 * 2469 * Return: 2470 * 0 - Success, 2471 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) 2472 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state 2473 */ 2474 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, 2475 struct list_head *tx_list, u16 *count_out) 2476 { 2477 struct sdma_txreq *tx, *tx_next; 2478 int ret = 0; 2479 unsigned long flags; 2480 u16 tail = INVALID_TAIL; 2481 u32 submit_count = 0, flush_count = 0, total_count; 2482 2483 spin_lock_irqsave(&sde->tail_lock, flags); 2484 retry: 2485 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2486 tx->wait = iowait_ioww_to_iow(wait); 2487 if (unlikely(!__sdma_running(sde))) 2488 goto unlock_noconn; 2489 if (unlikely(tx->num_desc > sde->desc_avail)) 2490 goto nodesc; 2491 if (unlikely(tx->tlen)) { 2492 ret = -EINVAL; 2493 goto update_tail; 2494 } 2495 list_del_init(&tx->list); 2496 tail = submit_tx(sde, tx); 2497 submit_count++; 2498 if (tail != INVALID_TAIL && 2499 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { 2500 sdma_update_tail(sde, tail); 2501 tail = INVALID_TAIL; 2502 } 2503 } 2504 update_tail: 2505 total_count = submit_count + flush_count; 2506 if (wait) { 2507 iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); 2508 iowait_starve_clear(submit_count > 0, 2509 iowait_ioww_to_iow(wait)); 2510 } 2511 if (tail != INVALID_TAIL) 2512 sdma_update_tail(sde, tail); 2513 spin_unlock_irqrestore(&sde->tail_lock, flags); 2514 *count_out = total_count; 2515 return ret; 2516 unlock_noconn: 2517 spin_lock(&sde->flushlist_lock); 2518 list_for_each_entry_safe(tx, tx_next, tx_list, list) { 2519 tx->wait = iowait_ioww_to_iow(wait); 2520 list_del_init(&tx->list); 2521 tx->next_descq_idx = 0; 2522 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER 2523 tx->sn = sde->tail_sn++; 2524 trace_hfi1_sdma_in_sn(sde, tx->sn); 2525 #endif 2526 list_add_tail(&tx->list, &sde->flushlist); 2527 flush_count++; 2528 iowait_inc_wait_count(wait, tx->num_desc); 2529 } 2530 spin_unlock(&sde->flushlist_lock); 2531 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); 2532 ret = -ECOMM; 2533 goto update_tail; 2534 nodesc: 2535 ret = sdma_check_progress(sde, wait, tx, submit_count > 0); 2536 if (ret == -EAGAIN) { 2537 ret = 0; 2538 goto retry; 2539 } 2540 sde->descq_full_count++; 2541 goto update_tail; 2542 } 2543 2544 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) 2545 { 2546 unsigned long flags; 2547 2548 spin_lock_irqsave(&sde->tail_lock, flags); 2549 write_seqlock(&sde->head_lock); 2550 2551 __sdma_process_event(sde, event); 2552 2553 if (sde->state.current_state == sdma_state_s99_running) 2554 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); 2555 2556 write_sequnlock(&sde->head_lock); 2557 spin_unlock_irqrestore(&sde->tail_lock, flags); 2558 } 2559 2560 static void __sdma_process_event(struct sdma_engine *sde, 2561 enum sdma_events event) 2562 { 2563 struct sdma_state *ss = &sde->state; 2564 int need_progress = 0; 2565 2566 /* CONFIG SDMA temporary */ 2567 #ifdef CONFIG_SDMA_VERBOSITY 2568 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, 2569 sdma_state_names[ss->current_state], 2570 sdma_event_names[event]); 2571 #endif 2572 2573 switch (ss->current_state) { 2574 case sdma_state_s00_hw_down: 2575 switch (event) { 2576 case sdma_event_e00_go_hw_down: 2577 break; 2578 case sdma_event_e30_go_running: 2579 /* 2580 * If down, but running requested (usually result 2581 * of link up, then we need to start up. 2582 * This can happen when hw down is requested while 2583 * bringing the link up with traffic active on 2584 * 7220, e.g. 2585 */ 2586 ss->go_s99_running = 1; 2587 fallthrough; /* and start dma engine */ 2588 case sdma_event_e10_go_hw_start: 2589 /* This reference means the state machine is started */ 2590 sdma_get(&sde->state); 2591 sdma_set_state(sde, 2592 sdma_state_s10_hw_start_up_halt_wait); 2593 break; 2594 case sdma_event_e15_hw_halt_done: 2595 break; 2596 case sdma_event_e25_hw_clean_up_done: 2597 break; 2598 case sdma_event_e40_sw_cleaned: 2599 sdma_sw_tear_down(sde); 2600 break; 2601 case sdma_event_e50_hw_cleaned: 2602 break; 2603 case sdma_event_e60_hw_halted: 2604 break; 2605 case sdma_event_e70_go_idle: 2606 break; 2607 case sdma_event_e80_hw_freeze: 2608 break; 2609 case sdma_event_e81_hw_frozen: 2610 break; 2611 case sdma_event_e82_hw_unfreeze: 2612 break; 2613 case sdma_event_e85_link_down: 2614 break; 2615 case sdma_event_e90_sw_halted: 2616 break; 2617 } 2618 break; 2619 2620 case sdma_state_s10_hw_start_up_halt_wait: 2621 switch (event) { 2622 case sdma_event_e00_go_hw_down: 2623 sdma_set_state(sde, sdma_state_s00_hw_down); 2624 sdma_sw_tear_down(sde); 2625 break; 2626 case sdma_event_e10_go_hw_start: 2627 break; 2628 case sdma_event_e15_hw_halt_done: 2629 sdma_set_state(sde, 2630 sdma_state_s15_hw_start_up_clean_wait); 2631 sdma_start_hw_clean_up(sde); 2632 break; 2633 case sdma_event_e25_hw_clean_up_done: 2634 break; 2635 case sdma_event_e30_go_running: 2636 ss->go_s99_running = 1; 2637 break; 2638 case sdma_event_e40_sw_cleaned: 2639 break; 2640 case sdma_event_e50_hw_cleaned: 2641 break; 2642 case sdma_event_e60_hw_halted: 2643 schedule_work(&sde->err_halt_worker); 2644 break; 2645 case sdma_event_e70_go_idle: 2646 ss->go_s99_running = 0; 2647 break; 2648 case sdma_event_e80_hw_freeze: 2649 break; 2650 case sdma_event_e81_hw_frozen: 2651 break; 2652 case sdma_event_e82_hw_unfreeze: 2653 break; 2654 case sdma_event_e85_link_down: 2655 break; 2656 case sdma_event_e90_sw_halted: 2657 break; 2658 } 2659 break; 2660 2661 case sdma_state_s15_hw_start_up_clean_wait: 2662 switch (event) { 2663 case sdma_event_e00_go_hw_down: 2664 sdma_set_state(sde, sdma_state_s00_hw_down); 2665 sdma_sw_tear_down(sde); 2666 break; 2667 case sdma_event_e10_go_hw_start: 2668 break; 2669 case sdma_event_e15_hw_halt_done: 2670 break; 2671 case sdma_event_e25_hw_clean_up_done: 2672 sdma_hw_start_up(sde); 2673 sdma_set_state(sde, ss->go_s99_running ? 2674 sdma_state_s99_running : 2675 sdma_state_s20_idle); 2676 break; 2677 case sdma_event_e30_go_running: 2678 ss->go_s99_running = 1; 2679 break; 2680 case sdma_event_e40_sw_cleaned: 2681 break; 2682 case sdma_event_e50_hw_cleaned: 2683 break; 2684 case sdma_event_e60_hw_halted: 2685 break; 2686 case sdma_event_e70_go_idle: 2687 ss->go_s99_running = 0; 2688 break; 2689 case sdma_event_e80_hw_freeze: 2690 break; 2691 case sdma_event_e81_hw_frozen: 2692 break; 2693 case sdma_event_e82_hw_unfreeze: 2694 break; 2695 case sdma_event_e85_link_down: 2696 break; 2697 case sdma_event_e90_sw_halted: 2698 break; 2699 } 2700 break; 2701 2702 case sdma_state_s20_idle: 2703 switch (event) { 2704 case sdma_event_e00_go_hw_down: 2705 sdma_set_state(sde, sdma_state_s00_hw_down); 2706 sdma_sw_tear_down(sde); 2707 break; 2708 case sdma_event_e10_go_hw_start: 2709 break; 2710 case sdma_event_e15_hw_halt_done: 2711 break; 2712 case sdma_event_e25_hw_clean_up_done: 2713 break; 2714 case sdma_event_e30_go_running: 2715 sdma_set_state(sde, sdma_state_s99_running); 2716 ss->go_s99_running = 1; 2717 break; 2718 case sdma_event_e40_sw_cleaned: 2719 break; 2720 case sdma_event_e50_hw_cleaned: 2721 break; 2722 case sdma_event_e60_hw_halted: 2723 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 2724 schedule_work(&sde->err_halt_worker); 2725 break; 2726 case sdma_event_e70_go_idle: 2727 break; 2728 case sdma_event_e85_link_down: 2729 case sdma_event_e80_hw_freeze: 2730 sdma_set_state(sde, sdma_state_s80_hw_freeze); 2731 atomic_dec(&sde->dd->sdma_unfreeze_count); 2732 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2733 break; 2734 case sdma_event_e81_hw_frozen: 2735 break; 2736 case sdma_event_e82_hw_unfreeze: 2737 break; 2738 case sdma_event_e90_sw_halted: 2739 break; 2740 } 2741 break; 2742 2743 case sdma_state_s30_sw_clean_up_wait: 2744 switch (event) { 2745 case sdma_event_e00_go_hw_down: 2746 sdma_set_state(sde, sdma_state_s00_hw_down); 2747 break; 2748 case sdma_event_e10_go_hw_start: 2749 break; 2750 case sdma_event_e15_hw_halt_done: 2751 break; 2752 case sdma_event_e25_hw_clean_up_done: 2753 break; 2754 case sdma_event_e30_go_running: 2755 ss->go_s99_running = 1; 2756 break; 2757 case sdma_event_e40_sw_cleaned: 2758 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); 2759 sdma_start_hw_clean_up(sde); 2760 break; 2761 case sdma_event_e50_hw_cleaned: 2762 break; 2763 case sdma_event_e60_hw_halted: 2764 break; 2765 case sdma_event_e70_go_idle: 2766 ss->go_s99_running = 0; 2767 break; 2768 case sdma_event_e80_hw_freeze: 2769 break; 2770 case sdma_event_e81_hw_frozen: 2771 break; 2772 case sdma_event_e82_hw_unfreeze: 2773 break; 2774 case sdma_event_e85_link_down: 2775 ss->go_s99_running = 0; 2776 break; 2777 case sdma_event_e90_sw_halted: 2778 break; 2779 } 2780 break; 2781 2782 case sdma_state_s40_hw_clean_up_wait: 2783 switch (event) { 2784 case sdma_event_e00_go_hw_down: 2785 sdma_set_state(sde, sdma_state_s00_hw_down); 2786 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2787 break; 2788 case sdma_event_e10_go_hw_start: 2789 break; 2790 case sdma_event_e15_hw_halt_done: 2791 break; 2792 case sdma_event_e25_hw_clean_up_done: 2793 sdma_hw_start_up(sde); 2794 sdma_set_state(sde, ss->go_s99_running ? 2795 sdma_state_s99_running : 2796 sdma_state_s20_idle); 2797 break; 2798 case sdma_event_e30_go_running: 2799 ss->go_s99_running = 1; 2800 break; 2801 case sdma_event_e40_sw_cleaned: 2802 break; 2803 case sdma_event_e50_hw_cleaned: 2804 break; 2805 case sdma_event_e60_hw_halted: 2806 break; 2807 case sdma_event_e70_go_idle: 2808 ss->go_s99_running = 0; 2809 break; 2810 case sdma_event_e80_hw_freeze: 2811 break; 2812 case sdma_event_e81_hw_frozen: 2813 break; 2814 case sdma_event_e82_hw_unfreeze: 2815 break; 2816 case sdma_event_e85_link_down: 2817 ss->go_s99_running = 0; 2818 break; 2819 case sdma_event_e90_sw_halted: 2820 break; 2821 } 2822 break; 2823 2824 case sdma_state_s50_hw_halt_wait: 2825 switch (event) { 2826 case sdma_event_e00_go_hw_down: 2827 sdma_set_state(sde, sdma_state_s00_hw_down); 2828 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2829 break; 2830 case sdma_event_e10_go_hw_start: 2831 break; 2832 case sdma_event_e15_hw_halt_done: 2833 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2834 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2835 break; 2836 case sdma_event_e25_hw_clean_up_done: 2837 break; 2838 case sdma_event_e30_go_running: 2839 ss->go_s99_running = 1; 2840 break; 2841 case sdma_event_e40_sw_cleaned: 2842 break; 2843 case sdma_event_e50_hw_cleaned: 2844 break; 2845 case sdma_event_e60_hw_halted: 2846 schedule_work(&sde->err_halt_worker); 2847 break; 2848 case sdma_event_e70_go_idle: 2849 ss->go_s99_running = 0; 2850 break; 2851 case sdma_event_e80_hw_freeze: 2852 break; 2853 case sdma_event_e81_hw_frozen: 2854 break; 2855 case sdma_event_e82_hw_unfreeze: 2856 break; 2857 case sdma_event_e85_link_down: 2858 ss->go_s99_running = 0; 2859 break; 2860 case sdma_event_e90_sw_halted: 2861 break; 2862 } 2863 break; 2864 2865 case sdma_state_s60_idle_halt_wait: 2866 switch (event) { 2867 case sdma_event_e00_go_hw_down: 2868 sdma_set_state(sde, sdma_state_s00_hw_down); 2869 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2870 break; 2871 case sdma_event_e10_go_hw_start: 2872 break; 2873 case sdma_event_e15_hw_halt_done: 2874 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); 2875 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2876 break; 2877 case sdma_event_e25_hw_clean_up_done: 2878 break; 2879 case sdma_event_e30_go_running: 2880 ss->go_s99_running = 1; 2881 break; 2882 case sdma_event_e40_sw_cleaned: 2883 break; 2884 case sdma_event_e50_hw_cleaned: 2885 break; 2886 case sdma_event_e60_hw_halted: 2887 schedule_work(&sde->err_halt_worker); 2888 break; 2889 case sdma_event_e70_go_idle: 2890 ss->go_s99_running = 0; 2891 break; 2892 case sdma_event_e80_hw_freeze: 2893 break; 2894 case sdma_event_e81_hw_frozen: 2895 break; 2896 case sdma_event_e82_hw_unfreeze: 2897 break; 2898 case sdma_event_e85_link_down: 2899 break; 2900 case sdma_event_e90_sw_halted: 2901 break; 2902 } 2903 break; 2904 2905 case sdma_state_s80_hw_freeze: 2906 switch (event) { 2907 case sdma_event_e00_go_hw_down: 2908 sdma_set_state(sde, sdma_state_s00_hw_down); 2909 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2910 break; 2911 case sdma_event_e10_go_hw_start: 2912 break; 2913 case sdma_event_e15_hw_halt_done: 2914 break; 2915 case sdma_event_e25_hw_clean_up_done: 2916 break; 2917 case sdma_event_e30_go_running: 2918 ss->go_s99_running = 1; 2919 break; 2920 case sdma_event_e40_sw_cleaned: 2921 break; 2922 case sdma_event_e50_hw_cleaned: 2923 break; 2924 case sdma_event_e60_hw_halted: 2925 break; 2926 case sdma_event_e70_go_idle: 2927 ss->go_s99_running = 0; 2928 break; 2929 case sdma_event_e80_hw_freeze: 2930 break; 2931 case sdma_event_e81_hw_frozen: 2932 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); 2933 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2934 break; 2935 case sdma_event_e82_hw_unfreeze: 2936 break; 2937 case sdma_event_e85_link_down: 2938 break; 2939 case sdma_event_e90_sw_halted: 2940 break; 2941 } 2942 break; 2943 2944 case sdma_state_s82_freeze_sw_clean: 2945 switch (event) { 2946 case sdma_event_e00_go_hw_down: 2947 sdma_set_state(sde, sdma_state_s00_hw_down); 2948 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2949 break; 2950 case sdma_event_e10_go_hw_start: 2951 break; 2952 case sdma_event_e15_hw_halt_done: 2953 break; 2954 case sdma_event_e25_hw_clean_up_done: 2955 break; 2956 case sdma_event_e30_go_running: 2957 ss->go_s99_running = 1; 2958 break; 2959 case sdma_event_e40_sw_cleaned: 2960 /* notify caller this engine is done cleaning */ 2961 atomic_dec(&sde->dd->sdma_unfreeze_count); 2962 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 2963 break; 2964 case sdma_event_e50_hw_cleaned: 2965 break; 2966 case sdma_event_e60_hw_halted: 2967 break; 2968 case sdma_event_e70_go_idle: 2969 ss->go_s99_running = 0; 2970 break; 2971 case sdma_event_e80_hw_freeze: 2972 break; 2973 case sdma_event_e81_hw_frozen: 2974 break; 2975 case sdma_event_e82_hw_unfreeze: 2976 sdma_hw_start_up(sde); 2977 sdma_set_state(sde, ss->go_s99_running ? 2978 sdma_state_s99_running : 2979 sdma_state_s20_idle); 2980 break; 2981 case sdma_event_e85_link_down: 2982 break; 2983 case sdma_event_e90_sw_halted: 2984 break; 2985 } 2986 break; 2987 2988 case sdma_state_s99_running: 2989 switch (event) { 2990 case sdma_event_e00_go_hw_down: 2991 sdma_set_state(sde, sdma_state_s00_hw_down); 2992 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); 2993 break; 2994 case sdma_event_e10_go_hw_start: 2995 break; 2996 case sdma_event_e15_hw_halt_done: 2997 break; 2998 case sdma_event_e25_hw_clean_up_done: 2999 break; 3000 case sdma_event_e30_go_running: 3001 break; 3002 case sdma_event_e40_sw_cleaned: 3003 break; 3004 case sdma_event_e50_hw_cleaned: 3005 break; 3006 case sdma_event_e60_hw_halted: 3007 need_progress = 1; 3008 sdma_err_progress_check_schedule(sde); 3009 fallthrough; 3010 case sdma_event_e90_sw_halted: 3011 /* 3012 * SW initiated halt does not perform engines 3013 * progress check 3014 */ 3015 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); 3016 schedule_work(&sde->err_halt_worker); 3017 break; 3018 case sdma_event_e70_go_idle: 3019 sdma_set_state(sde, sdma_state_s60_idle_halt_wait); 3020 break; 3021 case sdma_event_e85_link_down: 3022 ss->go_s99_running = 0; 3023 fallthrough; 3024 case sdma_event_e80_hw_freeze: 3025 sdma_set_state(sde, sdma_state_s80_hw_freeze); 3026 atomic_dec(&sde->dd->sdma_unfreeze_count); 3027 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); 3028 break; 3029 case sdma_event_e81_hw_frozen: 3030 break; 3031 case sdma_event_e82_hw_unfreeze: 3032 break; 3033 } 3034 break; 3035 } 3036 3037 ss->last_event = event; 3038 if (need_progress) 3039 sdma_make_progress(sde, 0); 3040 } 3041 3042 /* 3043 * _extend_sdma_tx_descs() - helper to extend txreq 3044 * 3045 * This is called once the initial nominal allocation 3046 * of descriptors in the sdma_txreq is exhausted. 3047 * 3048 * The code will bump the allocation up to the max 3049 * of MAX_DESC (64) descriptors. There doesn't seem 3050 * much point in an interim step. The last descriptor 3051 * is reserved for coalesce buffer in order to support 3052 * cases where input packet has >MAX_DESC iovecs. 3053 * 3054 */ 3055 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3056 { 3057 int i; 3058 3059 /* Handle last descriptor */ 3060 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { 3061 /* if tlen is 0, it is for padding, release last descriptor */ 3062 if (!tx->tlen) { 3063 tx->desc_limit = MAX_DESC; 3064 } else if (!tx->coalesce_buf) { 3065 /* allocate coalesce buffer with space for padding */ 3066 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), 3067 GFP_ATOMIC); 3068 if (!tx->coalesce_buf) 3069 goto enomem; 3070 tx->coalesce_idx = 0; 3071 } 3072 return 0; 3073 } 3074 3075 if (unlikely(tx->num_desc == MAX_DESC)) 3076 goto enomem; 3077 3078 tx->descp = kmalloc_array( 3079 MAX_DESC, 3080 sizeof(struct sdma_desc), 3081 GFP_ATOMIC); 3082 if (!tx->descp) 3083 goto enomem; 3084 3085 /* reserve last descriptor for coalescing */ 3086 tx->desc_limit = MAX_DESC - 1; 3087 /* copy ones already built */ 3088 for (i = 0; i < tx->num_desc; i++) 3089 tx->descp[i] = tx->descs[i]; 3090 return 0; 3091 enomem: 3092 __sdma_txclean(dd, tx); 3093 return -ENOMEM; 3094 } 3095 3096 /* 3097 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors 3098 * 3099 * This is called once the initial nominal allocation of descriptors 3100 * in the sdma_txreq is exhausted. 3101 * 3102 * This function calls _extend_sdma_tx_descs to extend or allocate 3103 * coalesce buffer. If there is a allocated coalesce buffer, it will 3104 * copy the input packet data into the coalesce buffer. It also adds 3105 * coalesce buffer descriptor once when whole packet is received. 3106 * 3107 * Return: 3108 * <0 - error 3109 * 0 - coalescing, don't populate descriptor 3110 * 1 - continue with populating descriptor 3111 */ 3112 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, 3113 int type, void *kvaddr, struct page *page, 3114 unsigned long offset, u16 len) 3115 { 3116 int pad_len, rval; 3117 dma_addr_t addr; 3118 3119 rval = _extend_sdma_tx_descs(dd, tx); 3120 if (rval) { 3121 __sdma_txclean(dd, tx); 3122 return rval; 3123 } 3124 3125 /* If coalesce buffer is allocated, copy data into it */ 3126 if (tx->coalesce_buf) { 3127 if (type == SDMA_MAP_NONE) { 3128 __sdma_txclean(dd, tx); 3129 return -EINVAL; 3130 } 3131 3132 if (type == SDMA_MAP_PAGE) { 3133 kvaddr = kmap(page); 3134 kvaddr += offset; 3135 } else if (WARN_ON(!kvaddr)) { 3136 __sdma_txclean(dd, tx); 3137 return -EINVAL; 3138 } 3139 3140 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); 3141 tx->coalesce_idx += len; 3142 if (type == SDMA_MAP_PAGE) 3143 kunmap(page); 3144 3145 /* If there is more data, return */ 3146 if (tx->tlen - tx->coalesce_idx) 3147 return 0; 3148 3149 /* Whole packet is received; add any padding */ 3150 pad_len = tx->packet_len & (sizeof(u32) - 1); 3151 if (pad_len) { 3152 pad_len = sizeof(u32) - pad_len; 3153 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); 3154 /* padding is taken care of for coalescing case */ 3155 tx->packet_len += pad_len; 3156 tx->tlen += pad_len; 3157 } 3158 3159 /* dma map the coalesce buffer */ 3160 addr = dma_map_single(&dd->pcidev->dev, 3161 tx->coalesce_buf, 3162 tx->tlen, 3163 DMA_TO_DEVICE); 3164 3165 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { 3166 __sdma_txclean(dd, tx); 3167 return -ENOSPC; 3168 } 3169 3170 /* Add descriptor for coalesce buffer */ 3171 tx->desc_limit = MAX_DESC; 3172 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, 3173 addr, tx->tlen); 3174 } 3175 3176 return 1; 3177 } 3178 3179 /* Update sdes when the lmc changes */ 3180 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) 3181 { 3182 struct sdma_engine *sde; 3183 int i; 3184 u64 sreg; 3185 3186 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << 3187 SD(CHECK_SLID_MASK_SHIFT)) | 3188 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << 3189 SD(CHECK_SLID_VALUE_SHIFT)); 3190 3191 for (i = 0; i < dd->num_sdma; i++) { 3192 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", 3193 i, (u32)sreg); 3194 sde = &dd->per_sdma[i]; 3195 write_sde_csr(sde, SD(CHECK_SLID), sreg); 3196 } 3197 } 3198 3199 /* tx not dword sized - pad */ 3200 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) 3201 { 3202 int rval = 0; 3203 3204 tx->num_desc++; 3205 if ((unlikely(tx->num_desc == tx->desc_limit))) { 3206 rval = _extend_sdma_tx_descs(dd, tx); 3207 if (rval) { 3208 __sdma_txclean(dd, tx); 3209 return rval; 3210 } 3211 } 3212 /* finish the one just added */ 3213 make_tx_sdma_desc( 3214 tx, 3215 SDMA_MAP_NONE, 3216 dd->sdma_pad_phys, 3217 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); 3218 _sdma_close_tx(dd, tx); 3219 return rval; 3220 } 3221 3222 /* 3223 * Add ahg to the sdma_txreq 3224 * 3225 * The logic will consume up to 3 3226 * descriptors at the beginning of 3227 * sdma_txreq. 3228 */ 3229 void _sdma_txreq_ahgadd( 3230 struct sdma_txreq *tx, 3231 u8 num_ahg, 3232 u8 ahg_entry, 3233 u32 *ahg, 3234 u8 ahg_hlen) 3235 { 3236 u32 i, shift = 0, desc = 0; 3237 u8 mode; 3238 3239 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); 3240 /* compute mode */ 3241 if (num_ahg == 1) 3242 mode = SDMA_AHG_APPLY_UPDATE1; 3243 else if (num_ahg <= 5) 3244 mode = SDMA_AHG_APPLY_UPDATE2; 3245 else 3246 mode = SDMA_AHG_APPLY_UPDATE3; 3247 tx->num_desc++; 3248 /* initialize to consumed descriptors to zero */ 3249 switch (mode) { 3250 case SDMA_AHG_APPLY_UPDATE3: 3251 tx->num_desc++; 3252 tx->descs[2].qw[0] = 0; 3253 tx->descs[2].qw[1] = 0; 3254 fallthrough; 3255 case SDMA_AHG_APPLY_UPDATE2: 3256 tx->num_desc++; 3257 tx->descs[1].qw[0] = 0; 3258 tx->descs[1].qw[1] = 0; 3259 break; 3260 } 3261 ahg_hlen >>= 2; 3262 tx->descs[0].qw[1] |= 3263 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) 3264 << SDMA_DESC1_HEADER_INDEX_SHIFT) | 3265 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) 3266 << SDMA_DESC1_HEADER_DWS_SHIFT) | 3267 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) 3268 << SDMA_DESC1_HEADER_MODE_SHIFT) | 3269 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) 3270 << SDMA_DESC1_HEADER_UPDATE1_SHIFT); 3271 for (i = 0; i < (num_ahg - 1); i++) { 3272 if (!shift && !(i & 2)) 3273 desc++; 3274 tx->descs[desc].qw[!!(i & 2)] |= 3275 (((u64)ahg[i + 1]) 3276 << shift); 3277 shift = (shift + 32) & 63; 3278 } 3279 } 3280 3281 /** 3282 * sdma_ahg_alloc - allocate an AHG entry 3283 * @sde: engine to allocate from 3284 * 3285 * Return: 3286 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, 3287 * -ENOSPC if an entry is not available 3288 */ 3289 int sdma_ahg_alloc(struct sdma_engine *sde) 3290 { 3291 int nr; 3292 int oldbit; 3293 3294 if (!sde) { 3295 trace_hfi1_ahg_allocate(sde, -EINVAL); 3296 return -EINVAL; 3297 } 3298 while (1) { 3299 nr = ffz(READ_ONCE(sde->ahg_bits)); 3300 if (nr > 31) { 3301 trace_hfi1_ahg_allocate(sde, -ENOSPC); 3302 return -ENOSPC; 3303 } 3304 oldbit = test_and_set_bit(nr, &sde->ahg_bits); 3305 if (!oldbit) 3306 break; 3307 cpu_relax(); 3308 } 3309 trace_hfi1_ahg_allocate(sde, nr); 3310 return nr; 3311 } 3312 3313 /** 3314 * sdma_ahg_free - free an AHG entry 3315 * @sde: engine to return AHG entry 3316 * @ahg_index: index to free 3317 * 3318 * This routine frees the indicate AHG entry. 3319 */ 3320 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) 3321 { 3322 if (!sde) 3323 return; 3324 trace_hfi1_ahg_deallocate(sde, ahg_index); 3325 if (ahg_index < 0 || ahg_index > 31) 3326 return; 3327 clear_bit(ahg_index, &sde->ahg_bits); 3328 } 3329 3330 /* 3331 * SPC freeze handling for SDMA engines. Called when the driver knows 3332 * the SPC is going into a freeze but before the freeze is fully 3333 * settled. Generally an error interrupt. 3334 * 3335 * This event will pull the engine out of running so no more entries can be 3336 * added to the engine's queue. 3337 */ 3338 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) 3339 { 3340 int i; 3341 enum sdma_events event = link_down ? sdma_event_e85_link_down : 3342 sdma_event_e80_hw_freeze; 3343 3344 /* set up the wait but do not wait here */ 3345 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3346 3347 /* tell all engines to stop running and wait */ 3348 for (i = 0; i < dd->num_sdma; i++) 3349 sdma_process_event(&dd->per_sdma[i], event); 3350 3351 /* sdma_freeze() will wait for all engines to have stopped */ 3352 } 3353 3354 /* 3355 * SPC freeze handling for SDMA engines. Called when the driver knows 3356 * the SPC is fully frozen. 3357 */ 3358 void sdma_freeze(struct hfi1_devdata *dd) 3359 { 3360 int i; 3361 int ret; 3362 3363 /* 3364 * Make sure all engines have moved out of the running state before 3365 * continuing. 3366 */ 3367 ret = wait_event_interruptible(dd->sdma_unfreeze_wq, 3368 atomic_read(&dd->sdma_unfreeze_count) <= 3369 0); 3370 /* interrupted or count is negative, then unloading - just exit */ 3371 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) 3372 return; 3373 3374 /* set up the count for the next wait */ 3375 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); 3376 3377 /* tell all engines that the SPC is frozen, they can start cleaning */ 3378 for (i = 0; i < dd->num_sdma; i++) 3379 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); 3380 3381 /* 3382 * Wait for everyone to finish software clean before exiting. The 3383 * software clean will read engine CSRs, so must be completed before 3384 * the next step, which will clear the engine CSRs. 3385 */ 3386 (void)wait_event_interruptible(dd->sdma_unfreeze_wq, 3387 atomic_read(&dd->sdma_unfreeze_count) <= 0); 3388 /* no need to check results - done no matter what */ 3389 } 3390 3391 /* 3392 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. 3393 * 3394 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All 3395 * that is left is a software clean. We could do it after the SPC is fully 3396 * frozen, but then we'd have to add another state to wait for the unfreeze. 3397 * Instead, just defer the software clean until the unfreeze step. 3398 */ 3399 void sdma_unfreeze(struct hfi1_devdata *dd) 3400 { 3401 int i; 3402 3403 /* tell all engines start freeze clean up */ 3404 for (i = 0; i < dd->num_sdma; i++) 3405 sdma_process_event(&dd->per_sdma[i], 3406 sdma_event_e82_hw_unfreeze); 3407 } 3408 3409 /** 3410 * _sdma_engine_progress_schedule() - schedule progress on engine 3411 * @sde: sdma_engine to schedule progress 3412 * 3413 */ 3414 void _sdma_engine_progress_schedule( 3415 struct sdma_engine *sde) 3416 { 3417 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); 3418 /* assume we have selected a good cpu */ 3419 write_csr(sde->dd, 3420 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), 3421 sde->progress_mask); 3422 } 3423