1 /* 2 * linux/drivers/s390/cio/qdio_main.c 3 * 4 * Linux for s390 qdio support, buffer handling, qdio API and module support. 5 * 6 * Copyright 2000,2008 IBM Corp. 7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8 * Jan Glauber <jang@linux.vnet.ibm.com> 9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 10 */ 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/timer.h> 15 #include <linux/delay.h> 16 #include <asm/atomic.h> 17 #include <asm/debug.h> 18 #include <asm/qdio.h> 19 20 #include "cio.h" 21 #include "css.h" 22 #include "device.h" 23 #include "qdio.h" 24 #include "qdio_debug.h" 25 #include "qdio_perf.h" 26 27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 29 MODULE_DESCRIPTION("QDIO base support"); 30 MODULE_LICENSE("GPL"); 31 32 static inline int do_siga_sync(struct subchannel_id schid, 33 unsigned int out_mask, unsigned int in_mask) 34 { 35 register unsigned long __fc asm ("0") = 2; 36 register struct subchannel_id __schid asm ("1") = schid; 37 register unsigned long out asm ("2") = out_mask; 38 register unsigned long in asm ("3") = in_mask; 39 int cc; 40 41 asm volatile( 42 " siga 0\n" 43 " ipm %0\n" 44 " srl %0,28\n" 45 : "=d" (cc) 46 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); 47 return cc; 48 } 49 50 static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) 51 { 52 register unsigned long __fc asm ("0") = 1; 53 register struct subchannel_id __schid asm ("1") = schid; 54 register unsigned long __mask asm ("2") = mask; 55 int cc; 56 57 asm volatile( 58 " siga 0\n" 59 " ipm %0\n" 60 " srl %0,28\n" 61 : "=d" (cc) 62 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); 63 return cc; 64 } 65 66 /** 67 * do_siga_output - perform SIGA-w/wt function 68 * @schid: subchannel id or in case of QEBSM the subchannel token 69 * @mask: which output queues to process 70 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 71 * @fc: function code to perform 72 * 73 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. 74 * Note: For IQDC unicast queues only the highest priority queue is processed. 75 */ 76 static inline int do_siga_output(unsigned long schid, unsigned long mask, 77 u32 *bb, unsigned int fc) 78 { 79 register unsigned long __fc asm("0") = fc; 80 register unsigned long __schid asm("1") = schid; 81 register unsigned long __mask asm("2") = mask; 82 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; 83 84 asm volatile( 85 " siga 0\n" 86 "0: ipm %0\n" 87 " srl %0,28\n" 88 "1:\n" 89 EX_TABLE(0b, 1b) 90 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) 91 : : "cc", "memory"); 92 *bb = ((unsigned int) __fc) >> 31; 93 return cc; 94 } 95 96 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 97 { 98 char dbf_text[15]; 99 100 /* all done or next buffer state different */ 101 if (ccq == 0 || ccq == 32) 102 return 0; 103 /* not all buffers processed */ 104 if (ccq == 96 || ccq == 97) 105 return 1; 106 /* notify devices immediately */ 107 sprintf(dbf_text, "%d", ccq); 108 QDIO_DBF_TEXT2(1, trace, dbf_text); 109 return -EIO; 110 } 111 112 /** 113 * qdio_do_eqbs - extract buffer states for QEBSM 114 * @q: queue to manipulate 115 * @state: state of the extracted buffers 116 * @start: buffer number to start at 117 * @count: count of buffers to examine 118 * 119 * Returns the number of successfull extracted equal buffer states. 120 * Stops processing if a state is different from the last buffers state. 121 */ 122 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 123 int start, int count) 124 { 125 unsigned int ccq = 0; 126 int tmp_count = count, tmp_start = start; 127 int nr = q->nr; 128 int rc; 129 char dbf_text[15]; 130 131 BUG_ON(!q->irq_ptr->sch_token); 132 133 if (!q->is_input_q) 134 nr += q->irq_ptr->nr_input_qs; 135 again: 136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 137 rc = qdio_check_ccq(q, ccq); 138 139 /* At least one buffer was processed, return and extract the remaining 140 * buffers later. 141 */ 142 if ((ccq == 96) && (count != tmp_count)) 143 return (count - tmp_count); 144 if (rc == 1) { 145 QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); 146 goto again; 147 } 148 149 if (rc < 0) { 150 QDIO_DBF_TEXT2(1, trace, "eqberr"); 151 sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); 152 QDIO_DBF_TEXT2(1, trace, dbf_text); 153 q->handler(q->irq_ptr->cdev, 154 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 155 0, -1, -1, q->irq_ptr->int_parm); 156 return 0; 157 } 158 return count - tmp_count; 159 } 160 161 /** 162 * qdio_do_sqbs - set buffer states for QEBSM 163 * @q: queue to manipulate 164 * @state: new state of the buffers 165 * @start: first buffer number to change 166 * @count: how many buffers to change 167 * 168 * Returns the number of successfully changed buffers. 169 * Does retrying until the specified count of buffer states is set or an 170 * error occurs. 171 */ 172 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, 173 int count) 174 { 175 unsigned int ccq = 0; 176 int tmp_count = count, tmp_start = start; 177 int nr = q->nr; 178 int rc; 179 char dbf_text[15]; 180 181 BUG_ON(!q->irq_ptr->sch_token); 182 183 if (!q->is_input_q) 184 nr += q->irq_ptr->nr_input_qs; 185 again: 186 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 187 rc = qdio_check_ccq(q, ccq); 188 if (rc == 1) { 189 QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); 190 goto again; 191 } 192 if (rc < 0) { 193 QDIO_DBF_TEXT3(1, trace, "sqberr"); 194 sprintf(dbf_text, "%2x,%2x", count, tmp_count); 195 QDIO_DBF_TEXT3(1, trace, dbf_text); 196 sprintf(dbf_text, "%d,%d", ccq, nr); 197 QDIO_DBF_TEXT3(1, trace, dbf_text); 198 199 q->handler(q->irq_ptr->cdev, 200 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 201 0, -1, -1, q->irq_ptr->int_parm); 202 return 0; 203 } 204 WARN_ON(tmp_count); 205 return count - tmp_count; 206 } 207 208 /* returns number of examined buffers and their common state in *state */ 209 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 210 unsigned char *state, unsigned int count) 211 { 212 unsigned char __state = 0; 213 int i; 214 215 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 216 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 217 218 if (is_qebsm(q)) 219 return qdio_do_eqbs(q, state, bufnr, count); 220 221 for (i = 0; i < count; i++) { 222 if (!__state) 223 __state = q->slsb.val[bufnr]; 224 else if (q->slsb.val[bufnr] != __state) 225 break; 226 bufnr = next_buf(bufnr); 227 } 228 *state = __state; 229 return i; 230 } 231 232 inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 233 unsigned char *state) 234 { 235 return get_buf_states(q, bufnr, state, 1); 236 } 237 238 /* wrap-around safe setting of slsb states, returns number of changed buffers */ 239 static inline int set_buf_states(struct qdio_q *q, int bufnr, 240 unsigned char state, int count) 241 { 242 int i; 243 244 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 245 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 246 247 if (is_qebsm(q)) 248 return qdio_do_sqbs(q, state, bufnr, count); 249 250 for (i = 0; i < count; i++) { 251 xchg(&q->slsb.val[bufnr], state); 252 bufnr = next_buf(bufnr); 253 } 254 return count; 255 } 256 257 static inline int set_buf_state(struct qdio_q *q, int bufnr, 258 unsigned char state) 259 { 260 return set_buf_states(q, bufnr, state, 1); 261 } 262 263 /* set slsb states to initial state */ 264 void qdio_init_buf_states(struct qdio_irq *irq_ptr) 265 { 266 struct qdio_q *q; 267 int i; 268 269 for_each_input_queue(irq_ptr, q, i) 270 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, 271 QDIO_MAX_BUFFERS_PER_Q); 272 for_each_output_queue(irq_ptr, q, i) 273 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, 274 QDIO_MAX_BUFFERS_PER_Q); 275 } 276 277 static int qdio_siga_sync(struct qdio_q *q, unsigned int output, 278 unsigned int input) 279 { 280 int cc; 281 282 if (!need_siga_sync(q)) 283 return 0; 284 285 qdio_perf_stat_inc(&perf_stats.siga_sync); 286 287 cc = do_siga_sync(q->irq_ptr->schid, output, input); 288 if (cc) { 289 QDIO_DBF_TEXT4(0, trace, "sigasync"); 290 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 291 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); 292 } 293 return cc; 294 } 295 296 inline int qdio_siga_sync_q(struct qdio_q *q) 297 { 298 if (q->is_input_q) 299 return qdio_siga_sync(q, 0, q->mask); 300 else 301 return qdio_siga_sync(q, q->mask, 0); 302 } 303 304 static inline int qdio_siga_sync_out(struct qdio_q *q) 305 { 306 return qdio_siga_sync(q, ~0U, 0); 307 } 308 309 static inline int qdio_siga_sync_all(struct qdio_q *q) 310 { 311 return qdio_siga_sync(q, ~0U, ~0U); 312 } 313 314 static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) 315 { 316 unsigned int fc = 0; 317 unsigned long schid; 318 319 if (q->u.out.use_enh_siga) { 320 fc = 3; 321 } 322 if (!is_qebsm(q)) 323 schid = *((u32 *)&q->irq_ptr->schid); 324 else { 325 schid = q->irq_ptr->sch_token; 326 fc |= 0x80; 327 } 328 return do_siga_output(schid, q->mask, busy_bit, fc); 329 } 330 331 static int qdio_siga_output(struct qdio_q *q) 332 { 333 int cc; 334 u32 busy_bit; 335 u64 start_time = 0; 336 char dbf_text[15]; 337 338 QDIO_DBF_TEXT5(0, trace, "sigaout"); 339 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); 340 341 qdio_perf_stat_inc(&perf_stats.siga_out); 342 again: 343 cc = qdio_do_siga_output(q, &busy_bit); 344 if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { 345 sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); 346 QDIO_DBF_TEXT3(0, trace, dbf_text); 347 348 if (!start_time) 349 start_time = get_usecs(); 350 else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) 351 goto again; 352 } 353 354 if (cc == 2 && busy_bit) 355 cc |= QDIO_ERROR_SIGA_BUSY; 356 if (cc) 357 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); 358 return cc; 359 } 360 361 static inline int qdio_siga_input(struct qdio_q *q) 362 { 363 int cc; 364 365 QDIO_DBF_TEXT4(0, trace, "sigain"); 366 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 367 368 qdio_perf_stat_inc(&perf_stats.siga_in); 369 370 cc = do_siga_input(q->irq_ptr->schid, q->mask); 371 if (cc) 372 QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); 373 return cc; 374 } 375 376 /* called from thinint inbound handler */ 377 void qdio_sync_after_thinint(struct qdio_q *q) 378 { 379 if (pci_out_supported(q)) { 380 if (need_siga_sync_thinint(q)) 381 qdio_siga_sync_all(q); 382 else if (need_siga_sync_out_thinint(q)) 383 qdio_siga_sync_out(q); 384 } else 385 qdio_siga_sync_q(q); 386 } 387 388 inline void qdio_stop_polling(struct qdio_q *q) 389 { 390 spin_lock_bh(&q->u.in.lock); 391 if (!q->u.in.polling) { 392 spin_unlock_bh(&q->u.in.lock); 393 return; 394 } 395 q->u.in.polling = 0; 396 qdio_perf_stat_inc(&perf_stats.debug_stop_polling); 397 398 /* show the card that we are not polling anymore */ 399 set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); 400 spin_unlock_bh(&q->u.in.lock); 401 } 402 403 static void announce_buffer_error(struct qdio_q *q) 404 { 405 char dbf_text[15]; 406 407 if (q->is_input_q) 408 QDIO_DBF_TEXT3(1, trace, "inperr"); 409 else 410 QDIO_DBF_TEXT3(0, trace, "outperr"); 411 412 sprintf(dbf_text, "%x-%x-%x", q->first_to_check, 413 q->sbal[q->first_to_check]->element[14].flags, 414 q->sbal[q->first_to_check]->element[15].flags); 415 QDIO_DBF_TEXT3(1, trace, dbf_text); 416 QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); 417 418 q->qdio_error = QDIO_ERROR_SLSB_STATE; 419 } 420 421 static int get_inbound_buffer_frontier(struct qdio_q *q) 422 { 423 int count, stop; 424 unsigned char state; 425 426 /* 427 * If we still poll don't update last_move_ftc, keep the 428 * previously ACK buffer there. 429 */ 430 if (!q->u.in.polling) 431 q->last_move_ftc = q->first_to_check; 432 433 /* 434 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 435 * would return 0. 436 */ 437 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 438 stop = add_buf(q->first_to_check, count); 439 440 /* 441 * No siga sync here, as a PCI or we after a thin interrupt 442 * will sync the queues. 443 */ 444 445 /* need to set count to 1 for non-qebsm */ 446 if (!is_qebsm(q)) 447 count = 1; 448 449 check_next: 450 if (q->first_to_check == stop) 451 goto out; 452 453 count = get_buf_states(q, q->first_to_check, &state, count); 454 if (!count) 455 goto out; 456 457 switch (state) { 458 case SLSB_P_INPUT_PRIMED: 459 QDIO_DBF_TEXT5(0, trace, "inptprim"); 460 461 /* 462 * Only ACK the first buffer. The ACK will be removed in 463 * qdio_stop_polling. 464 */ 465 if (q->u.in.polling) 466 state = SLSB_P_INPUT_NOT_INIT; 467 else { 468 q->u.in.polling = 1; 469 state = SLSB_P_INPUT_ACK; 470 } 471 set_buf_state(q, q->first_to_check, state); 472 473 /* 474 * Need to change all PRIMED buffers to NOT_INIT, otherwise 475 * we're loosing initiative in the thinint code. 476 */ 477 if (count > 1) 478 set_buf_states(q, next_buf(q->first_to_check), 479 SLSB_P_INPUT_NOT_INIT, count - 1); 480 481 /* 482 * No siga-sync needed for non-qebsm here, as the inbound queue 483 * will be synced on the next siga-r, resp. 484 * tiqdio_is_inbound_q_done will do the siga-sync. 485 */ 486 q->first_to_check = add_buf(q->first_to_check, count); 487 atomic_sub(count, &q->nr_buf_used); 488 goto check_next; 489 case SLSB_P_INPUT_ERROR: 490 announce_buffer_error(q); 491 /* process the buffer, the upper layer will take care of it */ 492 q->first_to_check = add_buf(q->first_to_check, count); 493 atomic_sub(count, &q->nr_buf_used); 494 break; 495 case SLSB_CU_INPUT_EMPTY: 496 case SLSB_P_INPUT_NOT_INIT: 497 case SLSB_P_INPUT_ACK: 498 QDIO_DBF_TEXT5(0, trace, "inpnipro"); 499 break; 500 default: 501 BUG(); 502 } 503 out: 504 QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); 505 return q->first_to_check; 506 } 507 508 int qdio_inbound_q_moved(struct qdio_q *q) 509 { 510 int bufnr; 511 512 bufnr = get_inbound_buffer_frontier(q); 513 514 if ((bufnr != q->last_move_ftc) || q->qdio_error) { 515 if (!need_siga_sync(q) && !pci_out_supported(q)) 516 q->u.in.timestamp = get_usecs(); 517 518 QDIO_DBF_TEXT4(0, trace, "inhasmvd"); 519 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 520 return 1; 521 } else 522 return 0; 523 } 524 525 static int qdio_inbound_q_done(struct qdio_q *q) 526 { 527 unsigned char state; 528 #ifdef CONFIG_QDIO_DEBUG 529 char dbf_text[15]; 530 #endif 531 532 if (!atomic_read(&q->nr_buf_used)) 533 return 1; 534 535 /* 536 * We need that one for synchronization with the adapter, as it 537 * does a kind of PCI avoidance. 538 */ 539 qdio_siga_sync_q(q); 540 541 get_buf_state(q, q->first_to_check, &state); 542 if (state == SLSB_P_INPUT_PRIMED) 543 /* we got something to do */ 544 return 0; 545 546 /* on VM, we don't poll, so the q is always done here */ 547 if (need_siga_sync(q) || pci_out_supported(q)) 548 return 1; 549 550 /* 551 * At this point we know, that inbound first_to_check 552 * has (probably) not moved (see qdio_inbound_processing). 553 */ 554 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 555 #ifdef CONFIG_QDIO_DEBUG 556 QDIO_DBF_TEXT4(0, trace, "inqisdon"); 557 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 558 sprintf(dbf_text, "pf%02x", q->first_to_check); 559 QDIO_DBF_TEXT4(0, trace, dbf_text); 560 #endif /* CONFIG_QDIO_DEBUG */ 561 return 1; 562 } else { 563 #ifdef CONFIG_QDIO_DEBUG 564 QDIO_DBF_TEXT4(0, trace, "inqisntd"); 565 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 566 sprintf(dbf_text, "pf%02x", q->first_to_check); 567 QDIO_DBF_TEXT4(0, trace, dbf_text); 568 #endif /* CONFIG_QDIO_DEBUG */ 569 return 0; 570 } 571 } 572 573 void qdio_kick_inbound_handler(struct qdio_q *q) 574 { 575 int count, start, end; 576 #ifdef CONFIG_QDIO_DEBUG 577 char dbf_text[15]; 578 #endif 579 580 qdio_perf_stat_inc(&perf_stats.inbound_handler); 581 582 start = q->first_to_kick; 583 end = q->first_to_check; 584 if (end >= start) 585 count = end - start; 586 else 587 count = end + QDIO_MAX_BUFFERS_PER_Q - start; 588 589 #ifdef CONFIG_QDIO_DEBUG 590 sprintf(dbf_text, "s=%2xc=%2x", start, count); 591 QDIO_DBF_TEXT4(0, trace, dbf_text); 592 #endif /* CONFIG_QDIO_DEBUG */ 593 594 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 595 return; 596 597 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, 598 start, count, q->irq_ptr->int_parm); 599 600 /* for the next time */ 601 q->first_to_kick = q->first_to_check; 602 q->qdio_error = 0; 603 } 604 605 static void __qdio_inbound_processing(struct qdio_q *q) 606 { 607 qdio_perf_stat_inc(&perf_stats.tasklet_inbound); 608 again: 609 if (!qdio_inbound_q_moved(q)) 610 return; 611 612 qdio_kick_inbound_handler(q); 613 614 if (!qdio_inbound_q_done(q)) 615 /* means poll time is not yet over */ 616 goto again; 617 618 qdio_stop_polling(q); 619 /* 620 * We need to check again to not lose initiative after 621 * resetting the ACK state. 622 */ 623 if (!qdio_inbound_q_done(q)) 624 goto again; 625 } 626 627 /* inbound tasklet */ 628 void qdio_inbound_processing(unsigned long data) 629 { 630 struct qdio_q *q = (struct qdio_q *)data; 631 __qdio_inbound_processing(q); 632 } 633 634 static int get_outbound_buffer_frontier(struct qdio_q *q) 635 { 636 int count, stop; 637 unsigned char state; 638 639 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || 640 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) 641 qdio_siga_sync_q(q); 642 643 /* 644 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 645 * would return 0. 646 */ 647 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 648 stop = add_buf(q->first_to_check, count); 649 650 /* need to set count to 1 for non-qebsm */ 651 if (!is_qebsm(q)) 652 count = 1; 653 654 check_next: 655 if (q->first_to_check == stop) 656 return q->first_to_check; 657 658 count = get_buf_states(q, q->first_to_check, &state, count); 659 if (!count) 660 return q->first_to_check; 661 662 switch (state) { 663 case SLSB_P_OUTPUT_EMPTY: 664 /* the adapter got it */ 665 QDIO_DBF_TEXT5(0, trace, "outpempt"); 666 667 atomic_sub(count, &q->nr_buf_used); 668 q->first_to_check = add_buf(q->first_to_check, count); 669 /* 670 * We fetch all buffer states at once. get_buf_states may 671 * return count < stop. For QEBSM we do not loop. 672 */ 673 if (is_qebsm(q)) 674 break; 675 goto check_next; 676 case SLSB_P_OUTPUT_ERROR: 677 announce_buffer_error(q); 678 /* process the buffer, the upper layer will take care of it */ 679 q->first_to_check = add_buf(q->first_to_check, count); 680 atomic_sub(count, &q->nr_buf_used); 681 break; 682 case SLSB_CU_OUTPUT_PRIMED: 683 /* the adapter has not fetched the output yet */ 684 QDIO_DBF_TEXT5(0, trace, "outpprim"); 685 break; 686 case SLSB_P_OUTPUT_NOT_INIT: 687 case SLSB_P_OUTPUT_HALTED: 688 break; 689 default: 690 BUG(); 691 } 692 return q->first_to_check; 693 } 694 695 /* all buffers processed? */ 696 static inline int qdio_outbound_q_done(struct qdio_q *q) 697 { 698 return atomic_read(&q->nr_buf_used) == 0; 699 } 700 701 static inline int qdio_outbound_q_moved(struct qdio_q *q) 702 { 703 int bufnr; 704 705 bufnr = get_outbound_buffer_frontier(q); 706 707 if ((bufnr != q->last_move_ftc) || q->qdio_error) { 708 q->last_move_ftc = bufnr; 709 QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); 710 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 711 return 1; 712 } else 713 return 0; 714 } 715 716 /* 717 * VM could present us cc=2 and busy bit set on SIGA-write 718 * during reconfiguration of their Guest LAN (only in iqdio mode, 719 * otherwise qdio is asynchronous and cc=2 and busy bit there will take 720 * the queues down immediately). 721 * 722 * Therefore qdio_siga_output will try for a short time constantly, 723 * if such a condition occurs. If it doesn't change, it will 724 * increase the busy_siga_counter and save the timestamp, and 725 * schedule the queue for later processing. qdio_outbound_processing 726 * will check out the counter. If non-zero, it will call qdio_kick_outbound_q 727 * as often as the value of the counter. This will attempt further SIGA 728 * instructions. For each successful SIGA, the counter is 729 * decreased, for failing SIGAs the counter remains the same, after 730 * all. After some time of no movement, qdio_kick_outbound_q will 731 * finally fail and reflect corresponding error codes to call 732 * the upper layer module and have it take the queues down. 733 * 734 * Note that this is a change from the original HiperSockets design 735 * (saying cc=2 and busy bit means take the queues down), but in 736 * these days Guest LAN didn't exist... excessive cc=2 with busy bit 737 * conditions will still take the queues down, but the threshold is 738 * higher due to the Guest LAN environment. 739 * 740 * Called from outbound tasklet and do_QDIO handler. 741 */ 742 static void qdio_kick_outbound_q(struct qdio_q *q) 743 { 744 int rc; 745 #ifdef CONFIG_QDIO_DEBUG 746 char dbf_text[15]; 747 748 QDIO_DBF_TEXT5(0, trace, "kickoutq"); 749 QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); 750 #endif /* CONFIG_QDIO_DEBUG */ 751 752 if (!need_siga_out(q)) 753 return; 754 755 rc = qdio_siga_output(q); 756 switch (rc) { 757 case 0: 758 /* TODO: improve error handling for CC=0 case */ 759 #ifdef CONFIG_QDIO_DEBUG 760 if (q->u.out.timestamp) { 761 QDIO_DBF_TEXT3(0, trace, "cc2reslv"); 762 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, 763 q->nr, 764 atomic_read(&q->u.out.busy_siga_counter)); 765 QDIO_DBF_TEXT3(0, trace, dbf_text); 766 } 767 #endif /* CONFIG_QDIO_DEBUG */ 768 /* went smooth this time, reset timestamp */ 769 q->u.out.timestamp = 0; 770 break; 771 /* cc=2 and busy bit */ 772 case (2 | QDIO_ERROR_SIGA_BUSY): 773 atomic_inc(&q->u.out.busy_siga_counter); 774 775 /* if the last siga was successful, save timestamp here */ 776 if (!q->u.out.timestamp) 777 q->u.out.timestamp = get_usecs(); 778 779 /* if we're in time, don't touch qdio_error */ 780 if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { 781 tasklet_schedule(&q->tasklet); 782 break; 783 } 784 QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); 785 #ifdef CONFIG_QDIO_DEBUG 786 sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, 787 atomic_read(&q->u.out.busy_siga_counter)); 788 QDIO_DBF_TEXT3(0, trace, dbf_text); 789 #endif /* CONFIG_QDIO_DEBUG */ 790 default: 791 /* for plain cc=1, 2 or 3 */ 792 q->qdio_error = rc; 793 } 794 } 795 796 static void qdio_kick_outbound_handler(struct qdio_q *q) 797 { 798 int start, end, count; 799 #ifdef CONFIG_QDIO_DEBUG 800 char dbf_text[15]; 801 #endif 802 803 start = q->first_to_kick; 804 end = q->last_move_ftc; 805 if (end >= start) 806 count = end - start; 807 else 808 count = end + QDIO_MAX_BUFFERS_PER_Q - start; 809 810 #ifdef CONFIG_QDIO_DEBUG 811 QDIO_DBF_TEXT4(0, trace, "kickouth"); 812 QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); 813 814 sprintf(dbf_text, "s=%2xc=%2x", start, count); 815 QDIO_DBF_TEXT4(0, trace, dbf_text); 816 #endif /* CONFIG_QDIO_DEBUG */ 817 818 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 819 return; 820 821 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 822 q->irq_ptr->int_parm); 823 824 /* for the next time: */ 825 q->first_to_kick = q->last_move_ftc; 826 q->qdio_error = 0; 827 } 828 829 static void __qdio_outbound_processing(struct qdio_q *q) 830 { 831 int siga_attempts; 832 833 qdio_perf_stat_inc(&perf_stats.tasklet_outbound); 834 835 /* see comment in qdio_kick_outbound_q */ 836 siga_attempts = atomic_read(&q->u.out.busy_siga_counter); 837 while (siga_attempts--) { 838 atomic_dec(&q->u.out.busy_siga_counter); 839 qdio_kick_outbound_q(q); 840 } 841 842 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 843 844 if (qdio_outbound_q_moved(q)) 845 qdio_kick_outbound_handler(q); 846 847 if (queue_type(q) == QDIO_ZFCP_QFMT) { 848 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 849 tasklet_schedule(&q->tasklet); 850 return; 851 } 852 853 /* bail out for HiperSockets unicast queues */ 854 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) 855 return; 856 857 if ((queue_type(q) == QDIO_IQDIO_QFMT) && 858 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) { 859 tasklet_schedule(&q->tasklet); 860 return; 861 } 862 863 if (q->u.out.pci_out_enabled) 864 return; 865 866 /* 867 * Now we know that queue type is either qeth without pci enabled 868 * or HiperSockets multicast. Make sure buffer switch from PRIMED to 869 * EMPTY is noticed and outbound_handler is called after some time. 870 */ 871 if (qdio_outbound_q_done(q)) 872 del_timer(&q->u.out.timer); 873 else { 874 if (!timer_pending(&q->u.out.timer)) { 875 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 876 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); 877 } 878 } 879 } 880 881 /* outbound tasklet */ 882 void qdio_outbound_processing(unsigned long data) 883 { 884 struct qdio_q *q = (struct qdio_q *)data; 885 __qdio_outbound_processing(q); 886 } 887 888 void qdio_outbound_timer(unsigned long data) 889 { 890 struct qdio_q *q = (struct qdio_q *)data; 891 tasklet_schedule(&q->tasklet); 892 } 893 894 /* called from thinint inbound tasklet */ 895 void qdio_check_outbound_after_thinint(struct qdio_q *q) 896 { 897 struct qdio_q *out; 898 int i; 899 900 if (!pci_out_supported(q)) 901 return; 902 903 for_each_output_queue(q->irq_ptr, out, i) 904 if (!qdio_outbound_q_done(out)) 905 tasklet_schedule(&out->tasklet); 906 } 907 908 static inline void qdio_set_state(struct qdio_irq *irq_ptr, 909 enum qdio_irq_states state) 910 { 911 #ifdef CONFIG_QDIO_DEBUG 912 char dbf_text[15]; 913 914 QDIO_DBF_TEXT5(0, trace, "newstate"); 915 sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); 916 QDIO_DBF_TEXT5(0, trace, dbf_text); 917 #endif /* CONFIG_QDIO_DEBUG */ 918 919 irq_ptr->state = state; 920 mb(); 921 } 922 923 static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) 924 { 925 char dbf_text[15]; 926 927 if (irb->esw.esw0.erw.cons) { 928 sprintf(dbf_text, "sens%4x", schid.sch_no); 929 QDIO_DBF_TEXT2(1, trace, dbf_text); 930 QDIO_DBF_HEX0(0, trace, irb, 64); 931 QDIO_DBF_HEX0(0, trace, irb->ecw, 64); 932 } 933 } 934 935 /* PCI interrupt handler */ 936 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) 937 { 938 int i; 939 struct qdio_q *q; 940 941 qdio_perf_stat_inc(&perf_stats.pci_int); 942 943 for_each_input_queue(irq_ptr, q, i) 944 tasklet_schedule(&q->tasklet); 945 946 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 947 return; 948 949 for_each_output_queue(irq_ptr, q, i) { 950 if (qdio_outbound_q_done(q)) 951 continue; 952 953 if (!siga_syncs_out_pci(q)) 954 qdio_siga_sync_q(q); 955 956 tasklet_schedule(&q->tasklet); 957 } 958 } 959 960 static void qdio_handle_activate_check(struct ccw_device *cdev, 961 unsigned long intparm, int cstat, int dstat) 962 { 963 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 964 struct qdio_q *q; 965 char dbf_text[15]; 966 967 QDIO_DBF_TEXT2(1, trace, "ick2"); 968 sprintf(dbf_text, "%s", dev_name(&cdev->dev)); 969 QDIO_DBF_TEXT2(1, trace, dbf_text); 970 QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); 971 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); 972 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); 973 974 if (irq_ptr->nr_input_qs) { 975 q = irq_ptr->input_qs[0]; 976 } else if (irq_ptr->nr_output_qs) { 977 q = irq_ptr->output_qs[0]; 978 } else { 979 dump_stack(); 980 goto no_handler; 981 } 982 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 983 0, -1, -1, irq_ptr->int_parm); 984 no_handler: 985 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 986 } 987 988 static void qdio_call_shutdown(struct work_struct *work) 989 { 990 struct ccw_device_private *priv; 991 struct ccw_device *cdev; 992 993 priv = container_of(work, struct ccw_device_private, kick_work); 994 cdev = priv->cdev; 995 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 996 put_device(&cdev->dev); 997 } 998 999 static void qdio_int_error(struct ccw_device *cdev) 1000 { 1001 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1002 1003 switch (irq_ptr->state) { 1004 case QDIO_IRQ_STATE_INACTIVE: 1005 case QDIO_IRQ_STATE_CLEANUP: 1006 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1007 break; 1008 case QDIO_IRQ_STATE_ESTABLISHED: 1009 case QDIO_IRQ_STATE_ACTIVE: 1010 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1011 if (get_device(&cdev->dev)) { 1012 /* Can't call shutdown from interrupt context. */ 1013 PREPARE_WORK(&cdev->private->kick_work, 1014 qdio_call_shutdown); 1015 queue_work(ccw_device_work, &cdev->private->kick_work); 1016 } 1017 break; 1018 default: 1019 WARN_ON(1); 1020 } 1021 wake_up(&cdev->private->wait_q); 1022 } 1023 1024 static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, 1025 int dstat) 1026 { 1027 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1028 1029 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { 1030 QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); 1031 goto error; 1032 } 1033 1034 if (!(dstat & DEV_STAT_DEV_END)) { 1035 QDIO_DBF_TEXT2(1, setup, "eq:no de"); 1036 goto error; 1037 } 1038 1039 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { 1040 QDIO_DBF_TEXT2(1, setup, "eq:badio"); 1041 goto error; 1042 } 1043 return 0; 1044 error: 1045 QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); 1046 QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); 1047 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1048 return 1; 1049 } 1050 1051 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 1052 int dstat) 1053 { 1054 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1055 char dbf_text[15]; 1056 1057 sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); 1058 QDIO_DBF_TEXT0(0, setup, dbf_text); 1059 QDIO_DBF_TEXT0(0, trace, dbf_text); 1060 1061 if (!qdio_establish_check_errors(cdev, cstat, dstat)) 1062 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 1063 } 1064 1065 /* qdio interrupt handler */ 1066 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, 1067 struct irb *irb) 1068 { 1069 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1070 int cstat, dstat; 1071 char dbf_text[15]; 1072 1073 qdio_perf_stat_inc(&perf_stats.qdio_int); 1074 1075 if (!intparm || !irq_ptr) { 1076 sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); 1077 QDIO_DBF_TEXT2(1, setup, dbf_text); 1078 return; 1079 } 1080 1081 if (IS_ERR(irb)) { 1082 switch (PTR_ERR(irb)) { 1083 case -EIO: 1084 sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); 1085 QDIO_DBF_TEXT2(1, setup, dbf_text); 1086 return; 1087 case -ETIMEDOUT: 1088 sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); 1089 QDIO_DBF_TEXT2(1, setup, dbf_text); 1090 qdio_int_error(cdev); 1091 return; 1092 default: 1093 WARN_ON(1); 1094 return; 1095 } 1096 } 1097 qdio_irq_check_sense(irq_ptr->schid, irb); 1098 1099 cstat = irb->scsw.cmd.cstat; 1100 dstat = irb->scsw.cmd.dstat; 1101 1102 switch (irq_ptr->state) { 1103 case QDIO_IRQ_STATE_INACTIVE: 1104 qdio_establish_handle_irq(cdev, cstat, dstat); 1105 break; 1106 1107 case QDIO_IRQ_STATE_CLEANUP: 1108 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1109 break; 1110 1111 case QDIO_IRQ_STATE_ESTABLISHED: 1112 case QDIO_IRQ_STATE_ACTIVE: 1113 if (cstat & SCHN_STAT_PCI) { 1114 qdio_int_handler_pci(irq_ptr); 1115 /* no state change so no need to wake up wait_q */ 1116 return; 1117 } 1118 if ((cstat & ~SCHN_STAT_PCI) || dstat) { 1119 qdio_handle_activate_check(cdev, intparm, cstat, 1120 dstat); 1121 break; 1122 } 1123 default: 1124 WARN_ON(1); 1125 } 1126 wake_up(&cdev->private->wait_q); 1127 } 1128 1129 /** 1130 * qdio_get_ssqd_desc - get qdio subchannel description 1131 * @cdev: ccw device to get description for 1132 * 1133 * Returns a pointer to the saved qdio subchannel description, 1134 * or NULL for not setup qdio devices. 1135 */ 1136 struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) 1137 { 1138 struct qdio_irq *irq_ptr; 1139 char dbf_text[15]; 1140 1141 sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); 1142 QDIO_DBF_TEXT0(0, setup, dbf_text); 1143 1144 irq_ptr = cdev->private->qdio_data; 1145 if (!irq_ptr) 1146 return NULL; 1147 1148 return &irq_ptr->ssqd_desc; 1149 } 1150 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1151 1152 /** 1153 * qdio_cleanup - shutdown queues and free data structures 1154 * @cdev: associated ccw device 1155 * @how: use halt or clear to shutdown 1156 * 1157 * This function calls qdio_shutdown() for @cdev with method @how 1158 * and on success qdio_free() for @cdev. 1159 */ 1160 int qdio_cleanup(struct ccw_device *cdev, int how) 1161 { 1162 struct qdio_irq *irq_ptr; 1163 char dbf_text[15]; 1164 int rc; 1165 1166 sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); 1167 QDIO_DBF_TEXT0(0, setup, dbf_text); 1168 1169 irq_ptr = cdev->private->qdio_data; 1170 if (!irq_ptr) 1171 return -ENODEV; 1172 1173 rc = qdio_shutdown(cdev, how); 1174 if (rc == 0) 1175 rc = qdio_free(cdev); 1176 return rc; 1177 } 1178 EXPORT_SYMBOL_GPL(qdio_cleanup); 1179 1180 static void qdio_shutdown_queues(struct ccw_device *cdev) 1181 { 1182 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1183 struct qdio_q *q; 1184 int i; 1185 1186 for_each_input_queue(irq_ptr, q, i) 1187 tasklet_disable(&q->tasklet); 1188 1189 for_each_output_queue(irq_ptr, q, i) { 1190 tasklet_disable(&q->tasklet); 1191 del_timer(&q->u.out.timer); 1192 } 1193 } 1194 1195 /** 1196 * qdio_shutdown - shut down a qdio subchannel 1197 * @cdev: associated ccw device 1198 * @how: use halt or clear to shutdown 1199 */ 1200 int qdio_shutdown(struct ccw_device *cdev, int how) 1201 { 1202 struct qdio_irq *irq_ptr; 1203 int rc; 1204 unsigned long flags; 1205 char dbf_text[15]; 1206 1207 sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); 1208 QDIO_DBF_TEXT0(0, setup, dbf_text); 1209 1210 irq_ptr = cdev->private->qdio_data; 1211 if (!irq_ptr) 1212 return -ENODEV; 1213 1214 mutex_lock(&irq_ptr->setup_mutex); 1215 /* 1216 * Subchannel was already shot down. We cannot prevent being called 1217 * twice since cio may trigger a shutdown asynchronously. 1218 */ 1219 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1220 mutex_unlock(&irq_ptr->setup_mutex); 1221 return 0; 1222 } 1223 1224 tiqdio_remove_input_queues(irq_ptr); 1225 qdio_shutdown_queues(cdev); 1226 qdio_shutdown_debug_entries(irq_ptr, cdev); 1227 1228 /* cleanup subchannel */ 1229 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1230 1231 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 1232 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 1233 else 1234 /* default behaviour is halt */ 1235 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); 1236 if (rc) { 1237 sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); 1238 QDIO_DBF_TEXT0(0, setup, dbf_text); 1239 sprintf(dbf_text, "rc=%d", rc); 1240 QDIO_DBF_TEXT0(0, setup, dbf_text); 1241 goto no_cleanup; 1242 } 1243 1244 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 1245 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1246 wait_event_interruptible_timeout(cdev->private->wait_q, 1247 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 1248 irq_ptr->state == QDIO_IRQ_STATE_ERR, 1249 10 * HZ); 1250 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1251 1252 no_cleanup: 1253 qdio_shutdown_thinint(irq_ptr); 1254 1255 /* restore interrupt handler */ 1256 if ((void *)cdev->handler == (void *)qdio_int_handler) 1257 cdev->handler = irq_ptr->orig_handler; 1258 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1259 1260 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1261 mutex_unlock(&irq_ptr->setup_mutex); 1262 if (rc) 1263 return rc; 1264 return 0; 1265 } 1266 EXPORT_SYMBOL_GPL(qdio_shutdown); 1267 1268 /** 1269 * qdio_free - free data structures for a qdio subchannel 1270 * @cdev: associated ccw device 1271 */ 1272 int qdio_free(struct ccw_device *cdev) 1273 { 1274 struct qdio_irq *irq_ptr; 1275 char dbf_text[15]; 1276 1277 sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); 1278 QDIO_DBF_TEXT0(0, setup, dbf_text); 1279 1280 irq_ptr = cdev->private->qdio_data; 1281 if (!irq_ptr) 1282 return -ENODEV; 1283 1284 mutex_lock(&irq_ptr->setup_mutex); 1285 cdev->private->qdio_data = NULL; 1286 mutex_unlock(&irq_ptr->setup_mutex); 1287 1288 qdio_release_memory(irq_ptr); 1289 return 0; 1290 } 1291 EXPORT_SYMBOL_GPL(qdio_free); 1292 1293 /** 1294 * qdio_initialize - allocate and establish queues for a qdio subchannel 1295 * @init_data: initialization data 1296 * 1297 * This function first allocates queues via qdio_allocate() and on success 1298 * establishes them via qdio_establish(). 1299 */ 1300 int qdio_initialize(struct qdio_initialize *init_data) 1301 { 1302 int rc; 1303 char dbf_text[15]; 1304 1305 sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); 1306 QDIO_DBF_TEXT0(0, setup, dbf_text); 1307 1308 rc = qdio_allocate(init_data); 1309 if (rc) 1310 return rc; 1311 1312 rc = qdio_establish(init_data); 1313 if (rc) 1314 qdio_free(init_data->cdev); 1315 return rc; 1316 } 1317 EXPORT_SYMBOL_GPL(qdio_initialize); 1318 1319 /** 1320 * qdio_allocate - allocate qdio queues and associated data 1321 * @init_data: initialization data 1322 */ 1323 int qdio_allocate(struct qdio_initialize *init_data) 1324 { 1325 struct qdio_irq *irq_ptr; 1326 char dbf_text[15]; 1327 1328 sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); 1329 QDIO_DBF_TEXT0(0, setup, dbf_text); 1330 1331 if ((init_data->no_input_qs && !init_data->input_handler) || 1332 (init_data->no_output_qs && !init_data->output_handler)) 1333 return -EINVAL; 1334 1335 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || 1336 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) 1337 return -EINVAL; 1338 1339 if ((!init_data->input_sbal_addr_array) || 1340 (!init_data->output_sbal_addr_array)) 1341 return -EINVAL; 1342 1343 qdio_allocate_do_dbf(init_data); 1344 1345 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ 1346 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1347 if (!irq_ptr) 1348 goto out_err; 1349 QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); 1350 QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); 1351 1352 mutex_init(&irq_ptr->setup_mutex); 1353 1354 /* 1355 * Allocate a page for the chsc calls in qdio_establish. 1356 * Must be pre-allocated since a zfcp recovery will call 1357 * qdio_establish. In case of low memory and swap on a zfcp disk 1358 * we may not be able to allocate memory otherwise. 1359 */ 1360 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); 1361 if (!irq_ptr->chsc_page) 1362 goto out_rel; 1363 1364 /* qdr is used in ccw1.cda which is u32 */ 1365 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1366 if (!irq_ptr->qdr) 1367 goto out_rel; 1368 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); 1369 1370 QDIO_DBF_TEXT0(0, setup, "qdr:"); 1371 QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); 1372 1373 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1374 init_data->no_output_qs)) 1375 goto out_rel; 1376 1377 init_data->cdev->private->qdio_data = irq_ptr; 1378 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1379 return 0; 1380 out_rel: 1381 qdio_release_memory(irq_ptr); 1382 out_err: 1383 return -ENOMEM; 1384 } 1385 EXPORT_SYMBOL_GPL(qdio_allocate); 1386 1387 /** 1388 * qdio_establish - establish queues on a qdio subchannel 1389 * @init_data: initialization data 1390 */ 1391 int qdio_establish(struct qdio_initialize *init_data) 1392 { 1393 char dbf_text[20]; 1394 struct qdio_irq *irq_ptr; 1395 struct ccw_device *cdev = init_data->cdev; 1396 unsigned long saveflags; 1397 int rc; 1398 1399 sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); 1400 QDIO_DBF_TEXT0(0, setup, dbf_text); 1401 1402 irq_ptr = cdev->private->qdio_data; 1403 if (!irq_ptr) 1404 return -ENODEV; 1405 1406 if (cdev->private->state != DEV_STATE_ONLINE) 1407 return -EINVAL; 1408 1409 mutex_lock(&irq_ptr->setup_mutex); 1410 qdio_setup_irq(init_data); 1411 1412 rc = qdio_establish_thinint(irq_ptr); 1413 if (rc) { 1414 mutex_unlock(&irq_ptr->setup_mutex); 1415 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1416 return rc; 1417 } 1418 1419 /* establish q */ 1420 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; 1421 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1422 irq_ptr->ccw.count = irq_ptr->equeue.count; 1423 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); 1424 1425 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1426 ccw_device_set_options_mask(cdev, 0); 1427 1428 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1429 if (rc) { 1430 sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); 1431 QDIO_DBF_TEXT2(1, setup, dbf_text); 1432 sprintf(dbf_text, "eq:rc%4x", rc); 1433 QDIO_DBF_TEXT2(1, setup, dbf_text); 1434 } 1435 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1436 1437 if (rc) { 1438 mutex_unlock(&irq_ptr->setup_mutex); 1439 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1440 return rc; 1441 } 1442 1443 wait_event_interruptible_timeout(cdev->private->wait_q, 1444 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 1445 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); 1446 1447 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { 1448 mutex_unlock(&irq_ptr->setup_mutex); 1449 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1450 return -EIO; 1451 } 1452 1453 qdio_setup_ssqd_info(irq_ptr); 1454 sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc); 1455 QDIO_DBF_TEXT2(0, setup, dbf_text); 1456 sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); 1457 QDIO_DBF_TEXT2(0, setup, dbf_text); 1458 1459 /* qebsm is now setup if available, initialize buffer states */ 1460 qdio_init_buf_states(irq_ptr); 1461 1462 mutex_unlock(&irq_ptr->setup_mutex); 1463 qdio_print_subchannel_info(irq_ptr, cdev); 1464 qdio_setup_debug_entries(irq_ptr, cdev); 1465 return 0; 1466 } 1467 EXPORT_SYMBOL_GPL(qdio_establish); 1468 1469 /** 1470 * qdio_activate - activate queues on a qdio subchannel 1471 * @cdev: associated cdev 1472 */ 1473 int qdio_activate(struct ccw_device *cdev) 1474 { 1475 struct qdio_irq *irq_ptr; 1476 int rc; 1477 unsigned long saveflags; 1478 char dbf_text[20]; 1479 1480 sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); 1481 QDIO_DBF_TEXT0(0, setup, dbf_text); 1482 1483 irq_ptr = cdev->private->qdio_data; 1484 if (!irq_ptr) 1485 return -ENODEV; 1486 1487 if (cdev->private->state != DEV_STATE_ONLINE) 1488 return -EINVAL; 1489 1490 mutex_lock(&irq_ptr->setup_mutex); 1491 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1492 rc = -EBUSY; 1493 goto out; 1494 } 1495 1496 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1497 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1498 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1499 irq_ptr->ccw.cda = 0; 1500 1501 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1502 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1503 1504 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1505 0, DOIO_DENY_PREFETCH); 1506 if (rc) { 1507 sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); 1508 QDIO_DBF_TEXT2(1, setup, dbf_text); 1509 sprintf(dbf_text, "aq:rc%4x", rc); 1510 QDIO_DBF_TEXT2(1, setup, dbf_text); 1511 } 1512 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1513 1514 if (rc) 1515 goto out; 1516 1517 if (is_thinint_irq(irq_ptr)) 1518 tiqdio_add_input_queues(irq_ptr); 1519 1520 /* wait for subchannel to become active */ 1521 msleep(5); 1522 1523 switch (irq_ptr->state) { 1524 case QDIO_IRQ_STATE_STOPPED: 1525 case QDIO_IRQ_STATE_ERR: 1526 mutex_unlock(&irq_ptr->setup_mutex); 1527 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1528 return -EIO; 1529 default: 1530 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1531 rc = 0; 1532 } 1533 out: 1534 mutex_unlock(&irq_ptr->setup_mutex); 1535 return rc; 1536 } 1537 EXPORT_SYMBOL_GPL(qdio_activate); 1538 1539 static inline int buf_in_between(int bufnr, int start, int count) 1540 { 1541 int end = add_buf(start, count); 1542 1543 if (end > start) { 1544 if (bufnr >= start && bufnr < end) 1545 return 1; 1546 else 1547 return 0; 1548 } 1549 1550 /* wrap-around case */ 1551 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || 1552 (bufnr < end)) 1553 return 1; 1554 else 1555 return 0; 1556 } 1557 1558 /** 1559 * handle_inbound - reset processed input buffers 1560 * @q: queue containing the buffers 1561 * @callflags: flags 1562 * @bufnr: first buffer to process 1563 * @count: how many buffers are emptied 1564 */ 1565 static void handle_inbound(struct qdio_q *q, unsigned int callflags, 1566 int bufnr, int count) 1567 { 1568 unsigned long flags; 1569 int used, rc; 1570 1571 /* 1572 * do_QDIO could run in parallel with the queue tasklet so the 1573 * upper-layer programm could empty the ACK'ed buffer here. 1574 * If that happens we must clear the polling flag, otherwise 1575 * qdio_stop_polling() could set the buffer to NOT_INIT after 1576 * it was set to EMPTY which would kill us. 1577 */ 1578 spin_lock_irqsave(&q->u.in.lock, flags); 1579 if (q->u.in.polling) 1580 if (buf_in_between(q->last_move_ftc, bufnr, count)) 1581 q->u.in.polling = 0; 1582 1583 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1584 spin_unlock_irqrestore(&q->u.in.lock, flags); 1585 1586 used = atomic_add_return(count, &q->nr_buf_used) - count; 1587 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); 1588 1589 /* no need to signal as long as the adapter had free buffers */ 1590 if (used) 1591 return; 1592 1593 if (need_siga_in(q)) { 1594 rc = qdio_siga_input(q); 1595 if (rc) 1596 q->qdio_error = rc; 1597 } 1598 } 1599 1600 /** 1601 * handle_outbound - process filled outbound buffers 1602 * @q: queue containing the buffers 1603 * @callflags: flags 1604 * @bufnr: first buffer to process 1605 * @count: how many buffers are filled 1606 */ 1607 static void handle_outbound(struct qdio_q *q, unsigned int callflags, 1608 int bufnr, int count) 1609 { 1610 unsigned char state; 1611 int used; 1612 1613 qdio_perf_stat_inc(&perf_stats.outbound_handler); 1614 1615 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1616 used = atomic_add_return(count, &q->nr_buf_used); 1617 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1618 1619 if (callflags & QDIO_FLAG_PCI_OUT) 1620 q->u.out.pci_out_enabled = 1; 1621 else 1622 q->u.out.pci_out_enabled = 0; 1623 1624 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1625 if (multicast_outbound(q)) 1626 qdio_kick_outbound_q(q); 1627 else 1628 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1629 (count > 1) && 1630 (count <= q->irq_ptr->ssqd_desc.mmwc)) { 1631 /* exploit enhanced SIGA */ 1632 q->u.out.use_enh_siga = 1; 1633 qdio_kick_outbound_q(q); 1634 } else { 1635 /* 1636 * One siga-w per buffer required for unicast 1637 * HiperSockets. 1638 */ 1639 q->u.out.use_enh_siga = 0; 1640 while (count--) 1641 qdio_kick_outbound_q(q); 1642 } 1643 goto out; 1644 } 1645 1646 if (need_siga_sync(q)) { 1647 qdio_siga_sync_q(q); 1648 goto out; 1649 } 1650 1651 /* try to fast requeue buffers */ 1652 get_buf_state(q, prev_buf(bufnr), &state); 1653 if (state != SLSB_CU_OUTPUT_PRIMED) 1654 qdio_kick_outbound_q(q); 1655 else { 1656 QDIO_DBF_TEXT5(0, trace, "fast-req"); 1657 qdio_perf_stat_inc(&perf_stats.fast_requeue); 1658 } 1659 out: 1660 /* Fixme: could wait forever if called from process context */ 1661 tasklet_schedule(&q->tasklet); 1662 } 1663 1664 /** 1665 * do_QDIO - process input or output buffers 1666 * @cdev: associated ccw_device for the qdio subchannel 1667 * @callflags: input or output and special flags from the program 1668 * @q_nr: queue number 1669 * @bufnr: buffer number 1670 * @count: how many buffers to process 1671 */ 1672 int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1673 int q_nr, int bufnr, int count) 1674 { 1675 struct qdio_irq *irq_ptr; 1676 #ifdef CONFIG_QDIO_DEBUG 1677 char dbf_text[20]; 1678 1679 sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); 1680 QDIO_DBF_TEXT3(0, trace, dbf_text); 1681 #endif /* CONFIG_QDIO_DEBUG */ 1682 1683 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || 1684 (count > QDIO_MAX_BUFFERS_PER_Q) || 1685 (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) 1686 return -EINVAL; 1687 1688 if (!count) 1689 return 0; 1690 1691 irq_ptr = cdev->private->qdio_data; 1692 if (!irq_ptr) 1693 return -ENODEV; 1694 1695 #ifdef CONFIG_QDIO_DEBUG 1696 if (callflags & QDIO_FLAG_SYNC_INPUT) 1697 QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], 1698 sizeof(void *)); 1699 else 1700 QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], 1701 sizeof(void *)); 1702 1703 sprintf(dbf_text, "flag%04x", callflags); 1704 QDIO_DBF_TEXT3(0, trace, dbf_text); 1705 sprintf(dbf_text, "qi%02xct%02x", bufnr, count); 1706 QDIO_DBF_TEXT3(0, trace, dbf_text); 1707 #endif /* CONFIG_QDIO_DEBUG */ 1708 1709 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1710 return -EBUSY; 1711 1712 if (callflags & QDIO_FLAG_SYNC_INPUT) 1713 handle_inbound(irq_ptr->input_qs[q_nr], 1714 callflags, bufnr, count); 1715 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1716 handle_outbound(irq_ptr->output_qs[q_nr], 1717 callflags, bufnr, count); 1718 else { 1719 QDIO_DBF_TEXT3(1, trace, "doQD:inv"); 1720 return -EINVAL; 1721 } 1722 return 0; 1723 } 1724 EXPORT_SYMBOL_GPL(do_QDIO); 1725 1726 static int __init init_QDIO(void) 1727 { 1728 int rc; 1729 1730 rc = qdio_setup_init(); 1731 if (rc) 1732 return rc; 1733 rc = tiqdio_allocate_memory(); 1734 if (rc) 1735 goto out_cache; 1736 rc = qdio_debug_init(); 1737 if (rc) 1738 goto out_ti; 1739 rc = qdio_setup_perf_stats(); 1740 if (rc) 1741 goto out_debug; 1742 rc = tiqdio_register_thinints(); 1743 if (rc) 1744 goto out_perf; 1745 return 0; 1746 1747 out_perf: 1748 qdio_remove_perf_stats(); 1749 out_debug: 1750 qdio_debug_exit(); 1751 out_ti: 1752 tiqdio_free_memory(); 1753 out_cache: 1754 qdio_setup_exit(); 1755 return rc; 1756 } 1757 1758 static void __exit exit_QDIO(void) 1759 { 1760 tiqdio_unregister_thinints(); 1761 tiqdio_free_memory(); 1762 qdio_remove_perf_stats(); 1763 qdio_debug_exit(); 1764 qdio_setup_exit(); 1765 } 1766 1767 module_init(init_QDIO); 1768 module_exit(exit_QDIO); 1769