1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/mii.h> 23 #include <linux/kthread.h> 24 #include <linux/slab.h> 25 #include <linux/if_vlan.h> 26 #include <linux/netdevice.h> 27 #include <linux/netdev_features.h> 28 #include <linux/skbuff.h> 29 #include <linux/vmalloc.h> 30 31 #include <net/iucv/af_iucv.h> 32 #include <net/dsfield.h> 33 34 #include <asm/ebcdic.h> 35 #include <asm/chpid.h> 36 #include <asm/io.h> 37 #include <asm/sysinfo.h> 38 #include <asm/diag.h> 39 #include <asm/cio.h> 40 #include <asm/ccwdev.h> 41 #include <asm/cpcmd.h> 42 43 #include "qeth_core.h" 44 45 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 46 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 47 /* N P A M L V H */ 48 [QETH_DBF_SETUP] = {"qeth_setup", 49 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 50 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 51 &debug_sprintf_view, NULL}, 52 [QETH_DBF_CTRL] = {"qeth_control", 53 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 54 }; 55 EXPORT_SYMBOL_GPL(qeth_dbf); 56 57 struct kmem_cache *qeth_core_header_cache; 58 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 59 static struct kmem_cache *qeth_qdio_outbuf_cache; 60 61 static struct device *qeth_core_root_dev; 62 static struct lock_class_key qdio_out_skb_queue_key; 63 64 static void qeth_send_control_data_cb(struct qeth_card *card, 65 struct qeth_channel *channel, 66 struct qeth_cmd_buffer *iob); 67 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); 68 static void qeth_free_buffer_pool(struct qeth_card *); 69 static int qeth_qdio_establish(struct qeth_card *); 70 static void qeth_free_qdio_buffers(struct qeth_card *); 71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 72 struct qeth_qdio_out_buffer *buf, 73 enum iucv_tx_notify notification); 74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 76 77 static struct workqueue_struct *qeth_wq; 78 79 int qeth_card_hw_is_reachable(struct qeth_card *card) 80 { 81 return (card->state == CARD_STATE_SOFTSETUP) || 82 (card->state == CARD_STATE_UP); 83 } 84 EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable); 85 86 static void qeth_close_dev_handler(struct work_struct *work) 87 { 88 struct qeth_card *card; 89 90 card = container_of(work, struct qeth_card, close_dev_work); 91 QETH_CARD_TEXT(card, 2, "cldevhdl"); 92 rtnl_lock(); 93 dev_close(card->dev); 94 rtnl_unlock(); 95 ccwgroup_set_offline(card->gdev); 96 } 97 98 void qeth_close_dev(struct qeth_card *card) 99 { 100 QETH_CARD_TEXT(card, 2, "cldevsubm"); 101 queue_work(qeth_wq, &card->close_dev_work); 102 } 103 EXPORT_SYMBOL_GPL(qeth_close_dev); 104 105 static const char *qeth_get_cardname(struct qeth_card *card) 106 { 107 if (card->info.guestlan) { 108 switch (card->info.type) { 109 case QETH_CARD_TYPE_OSD: 110 return " Virtual NIC QDIO"; 111 case QETH_CARD_TYPE_IQD: 112 return " Virtual NIC Hiper"; 113 case QETH_CARD_TYPE_OSM: 114 return " Virtual NIC QDIO - OSM"; 115 case QETH_CARD_TYPE_OSX: 116 return " Virtual NIC QDIO - OSX"; 117 default: 118 return " unknown"; 119 } 120 } else { 121 switch (card->info.type) { 122 case QETH_CARD_TYPE_OSD: 123 return " OSD Express"; 124 case QETH_CARD_TYPE_IQD: 125 return " HiperSockets"; 126 case QETH_CARD_TYPE_OSN: 127 return " OSN QDIO"; 128 case QETH_CARD_TYPE_OSM: 129 return " OSM QDIO"; 130 case QETH_CARD_TYPE_OSX: 131 return " OSX QDIO"; 132 default: 133 return " unknown"; 134 } 135 } 136 return " n/a"; 137 } 138 139 /* max length to be returned: 14 */ 140 const char *qeth_get_cardname_short(struct qeth_card *card) 141 { 142 if (card->info.guestlan) { 143 switch (card->info.type) { 144 case QETH_CARD_TYPE_OSD: 145 return "Virt.NIC QDIO"; 146 case QETH_CARD_TYPE_IQD: 147 return "Virt.NIC Hiper"; 148 case QETH_CARD_TYPE_OSM: 149 return "Virt.NIC OSM"; 150 case QETH_CARD_TYPE_OSX: 151 return "Virt.NIC OSX"; 152 default: 153 return "unknown"; 154 } 155 } else { 156 switch (card->info.type) { 157 case QETH_CARD_TYPE_OSD: 158 switch (card->info.link_type) { 159 case QETH_LINK_TYPE_FAST_ETH: 160 return "OSD_100"; 161 case QETH_LINK_TYPE_HSTR: 162 return "HSTR"; 163 case QETH_LINK_TYPE_GBIT_ETH: 164 return "OSD_1000"; 165 case QETH_LINK_TYPE_10GBIT_ETH: 166 return "OSD_10GIG"; 167 case QETH_LINK_TYPE_25GBIT_ETH: 168 return "OSD_25GIG"; 169 case QETH_LINK_TYPE_LANE_ETH100: 170 return "OSD_FE_LANE"; 171 case QETH_LINK_TYPE_LANE_TR: 172 return "OSD_TR_LANE"; 173 case QETH_LINK_TYPE_LANE_ETH1000: 174 return "OSD_GbE_LANE"; 175 case QETH_LINK_TYPE_LANE: 176 return "OSD_ATM_LANE"; 177 default: 178 return "OSD_Express"; 179 } 180 case QETH_CARD_TYPE_IQD: 181 return "HiperSockets"; 182 case QETH_CARD_TYPE_OSN: 183 return "OSN"; 184 case QETH_CARD_TYPE_OSM: 185 return "OSM_1000"; 186 case QETH_CARD_TYPE_OSX: 187 return "OSX_10GIG"; 188 default: 189 return "unknown"; 190 } 191 } 192 return "n/a"; 193 } 194 195 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 196 int clear_start_mask) 197 { 198 unsigned long flags; 199 200 spin_lock_irqsave(&card->thread_mask_lock, flags); 201 card->thread_allowed_mask = threads; 202 if (clear_start_mask) 203 card->thread_start_mask &= threads; 204 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 205 wake_up(&card->wait_q); 206 } 207 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 208 209 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 210 { 211 unsigned long flags; 212 int rc = 0; 213 214 spin_lock_irqsave(&card->thread_mask_lock, flags); 215 rc = (card->thread_running_mask & threads); 216 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 217 return rc; 218 } 219 EXPORT_SYMBOL_GPL(qeth_threads_running); 220 221 void qeth_clear_working_pool_list(struct qeth_card *card) 222 { 223 struct qeth_buffer_pool_entry *pool_entry, *tmp; 224 225 QETH_CARD_TEXT(card, 5, "clwrklst"); 226 list_for_each_entry_safe(pool_entry, tmp, 227 &card->qdio.in_buf_pool.entry_list, list){ 228 list_del(&pool_entry->list); 229 } 230 } 231 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); 232 233 static int qeth_alloc_buffer_pool(struct qeth_card *card) 234 { 235 struct qeth_buffer_pool_entry *pool_entry; 236 void *ptr; 237 int i, j; 238 239 QETH_CARD_TEXT(card, 5, "alocpool"); 240 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 241 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); 242 if (!pool_entry) { 243 qeth_free_buffer_pool(card); 244 return -ENOMEM; 245 } 246 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { 247 ptr = (void *) __get_free_page(GFP_KERNEL); 248 if (!ptr) { 249 while (j > 0) 250 free_page((unsigned long) 251 pool_entry->elements[--j]); 252 kfree(pool_entry); 253 qeth_free_buffer_pool(card); 254 return -ENOMEM; 255 } 256 pool_entry->elements[j] = ptr; 257 } 258 list_add(&pool_entry->init_list, 259 &card->qdio.init_pool.entry_list); 260 } 261 return 0; 262 } 263 264 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) 265 { 266 QETH_CARD_TEXT(card, 2, "realcbp"); 267 268 if ((card->state != CARD_STATE_DOWN) && 269 (card->state != CARD_STATE_RECOVER)) 270 return -EPERM; 271 272 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ 273 qeth_clear_working_pool_list(card); 274 qeth_free_buffer_pool(card); 275 card->qdio.in_buf_pool.buf_count = bufcnt; 276 card->qdio.init_pool.buf_count = bufcnt; 277 return qeth_alloc_buffer_pool(card); 278 } 279 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 280 281 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 282 { 283 if (!q) 284 return; 285 286 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 287 kfree(q); 288 } 289 290 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 291 { 292 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 293 int i; 294 295 if (!q) 296 return NULL; 297 298 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 299 kfree(q); 300 return NULL; 301 } 302 303 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 304 q->bufs[i].buffer = q->qdio_bufs[i]; 305 306 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 307 return q; 308 } 309 310 static int qeth_cq_init(struct qeth_card *card) 311 { 312 int rc; 313 314 if (card->options.cq == QETH_CQ_ENABLED) { 315 QETH_DBF_TEXT(SETUP, 2, "cqinit"); 316 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 317 QDIO_MAX_BUFFERS_PER_Q); 318 card->qdio.c_q->next_buf_to_init = 127; 319 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 320 card->qdio.no_in_queues - 1, 0, 321 127); 322 if (rc) { 323 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 324 goto out; 325 } 326 } 327 rc = 0; 328 out: 329 return rc; 330 } 331 332 static int qeth_alloc_cq(struct qeth_card *card) 333 { 334 int rc; 335 336 if (card->options.cq == QETH_CQ_ENABLED) { 337 int i; 338 struct qdio_outbuf_state *outbuf_states; 339 340 QETH_DBF_TEXT(SETUP, 2, "cqon"); 341 card->qdio.c_q = qeth_alloc_qdio_queue(); 342 if (!card->qdio.c_q) { 343 rc = -1; 344 goto kmsg_out; 345 } 346 card->qdio.no_in_queues = 2; 347 card->qdio.out_bufstates = 348 kcalloc(card->qdio.no_out_queues * 349 QDIO_MAX_BUFFERS_PER_Q, 350 sizeof(struct qdio_outbuf_state), 351 GFP_KERNEL); 352 outbuf_states = card->qdio.out_bufstates; 353 if (outbuf_states == NULL) { 354 rc = -1; 355 goto free_cq_out; 356 } 357 for (i = 0; i < card->qdio.no_out_queues; ++i) { 358 card->qdio.out_qs[i]->bufstates = outbuf_states; 359 outbuf_states += QDIO_MAX_BUFFERS_PER_Q; 360 } 361 } else { 362 QETH_DBF_TEXT(SETUP, 2, "nocq"); 363 card->qdio.c_q = NULL; 364 card->qdio.no_in_queues = 1; 365 } 366 QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); 367 rc = 0; 368 out: 369 return rc; 370 free_cq_out: 371 qeth_free_qdio_queue(card->qdio.c_q); 372 card->qdio.c_q = NULL; 373 kmsg_out: 374 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 375 goto out; 376 } 377 378 static void qeth_free_cq(struct qeth_card *card) 379 { 380 if (card->qdio.c_q) { 381 --card->qdio.no_in_queues; 382 qeth_free_qdio_queue(card->qdio.c_q); 383 card->qdio.c_q = NULL; 384 } 385 kfree(card->qdio.out_bufstates); 386 card->qdio.out_bufstates = NULL; 387 } 388 389 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 390 int delayed) 391 { 392 enum iucv_tx_notify n; 393 394 switch (sbalf15) { 395 case 0: 396 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 397 break; 398 case 4: 399 case 16: 400 case 17: 401 case 18: 402 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 403 TX_NOTIFY_UNREACHABLE; 404 break; 405 default: 406 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 407 TX_NOTIFY_GENERALERROR; 408 break; 409 } 410 411 return n; 412 } 413 414 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, 415 int forced_cleanup) 416 { 417 if (q->card->options.cq != QETH_CQ_ENABLED) 418 return; 419 420 if (q->bufs[bidx]->next_pending != NULL) { 421 struct qeth_qdio_out_buffer *head = q->bufs[bidx]; 422 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; 423 424 while (c) { 425 if (forced_cleanup || 426 atomic_read(&c->state) == 427 QETH_QDIO_BUF_HANDLED_DELAYED) { 428 struct qeth_qdio_out_buffer *f = c; 429 QETH_CARD_TEXT(f->q->card, 5, "fp"); 430 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); 431 /* release here to avoid interleaving between 432 outbound tasklet and inbound tasklet 433 regarding notifications and lifecycle */ 434 qeth_release_skbs(c); 435 436 c = f->next_pending; 437 WARN_ON_ONCE(head->next_pending != f); 438 head->next_pending = c; 439 kmem_cache_free(qeth_qdio_outbuf_cache, f); 440 } else { 441 head = c; 442 c = c->next_pending; 443 } 444 445 } 446 } 447 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == 448 QETH_QDIO_BUF_HANDLED_DELAYED)) { 449 /* for recovery situations */ 450 qeth_init_qdio_out_buf(q, bidx); 451 QETH_CARD_TEXT(q->card, 2, "clprecov"); 452 } 453 } 454 455 456 static void qeth_qdio_handle_aob(struct qeth_card *card, 457 unsigned long phys_aob_addr) 458 { 459 struct qaob *aob; 460 struct qeth_qdio_out_buffer *buffer; 461 enum iucv_tx_notify notification; 462 unsigned int i; 463 464 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 465 QETH_CARD_TEXT(card, 5, "haob"); 466 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); 467 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 468 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 469 470 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 471 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { 472 notification = TX_NOTIFY_OK; 473 } else { 474 WARN_ON_ONCE(atomic_read(&buffer->state) != 475 QETH_QDIO_BUF_PENDING); 476 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); 477 notification = TX_NOTIFY_DELAYED_OK; 478 } 479 480 if (aob->aorc != 0) { 481 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); 482 notification = qeth_compute_cq_notification(aob->aorc, 1); 483 } 484 qeth_notify_skbs(buffer->q, buffer, notification); 485 486 /* Free dangling allocations. The attached skbs are handled by 487 * qeth_cleanup_handled_pending(). 488 */ 489 for (i = 0; 490 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); 491 i++) { 492 if (aob->sba[i] && buffer->is_header[i]) 493 kmem_cache_free(qeth_core_header_cache, 494 (void *) aob->sba[i]); 495 } 496 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); 497 498 qdio_release_aob(aob); 499 } 500 501 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) 502 { 503 return card->options.cq == QETH_CQ_ENABLED && 504 card->qdio.c_q != NULL && 505 queue != 0 && 506 queue == card->qdio.no_in_queues - 1; 507 } 508 509 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data) 510 { 511 ccw->cmd_code = cmd_code; 512 ccw->flags = CCW_FLAG_SLI; 513 ccw->count = len; 514 ccw->cda = (__u32) __pa(data); 515 } 516 517 static int __qeth_issue_next_read(struct qeth_card *card) 518 { 519 struct qeth_channel *channel = &card->read; 520 struct qeth_cmd_buffer *iob; 521 int rc; 522 523 QETH_CARD_TEXT(card, 5, "issnxrd"); 524 if (channel->state != CH_STATE_UP) 525 return -EIO; 526 iob = qeth_get_buffer(channel); 527 if (!iob) { 528 dev_warn(&card->gdev->dev, "The qeth device driver " 529 "failed to recover an error on the device\n"); 530 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n", 531 CARD_DEVID(card)); 532 return -ENOMEM; 533 } 534 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); 535 QETH_CARD_TEXT(card, 6, "noirqpnd"); 536 rc = ccw_device_start(channel->ccwdev, channel->ccw, 537 (addr_t) iob, 0, 0); 538 if (rc) { 539 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 540 rc, CARD_DEVID(card)); 541 atomic_set(&channel->irq_pending, 0); 542 qeth_release_buffer(channel, iob); 543 card->read_or_write_problem = 1; 544 qeth_schedule_recovery(card); 545 wake_up(&card->wait_q); 546 } 547 return rc; 548 } 549 550 static int qeth_issue_next_read(struct qeth_card *card) 551 { 552 int ret; 553 554 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 555 ret = __qeth_issue_next_read(card); 556 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 557 558 return ret; 559 } 560 561 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) 562 { 563 struct qeth_reply *reply; 564 565 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); 566 if (reply) { 567 refcount_set(&reply->refcnt, 1); 568 atomic_set(&reply->received, 0); 569 init_waitqueue_head(&reply->wait_q); 570 } 571 return reply; 572 } 573 574 static void qeth_get_reply(struct qeth_reply *reply) 575 { 576 refcount_inc(&reply->refcnt); 577 } 578 579 static void qeth_put_reply(struct qeth_reply *reply) 580 { 581 if (refcount_dec_and_test(&reply->refcnt)) 582 kfree(reply); 583 } 584 585 static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply) 586 { 587 spin_lock_irq(&card->lock); 588 list_add_tail(&reply->list, &card->cmd_waiter_list); 589 spin_unlock_irq(&card->lock); 590 } 591 592 static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) 593 { 594 spin_lock_irq(&card->lock); 595 list_del(&reply->list); 596 spin_unlock_irq(&card->lock); 597 } 598 599 static void qeth_notify_reply(struct qeth_reply *reply) 600 { 601 atomic_inc(&reply->received); 602 wake_up(&reply->wait_q); 603 } 604 605 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 606 struct qeth_card *card) 607 { 608 const char *ipa_name; 609 int com = cmd->hdr.command; 610 ipa_name = qeth_get_ipa_cmd_name(com); 611 612 if (rc) 613 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 614 ipa_name, com, CARD_DEVID(card), rc, 615 qeth_get_ipa_msg(rc)); 616 else 617 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 618 ipa_name, com, CARD_DEVID(card)); 619 } 620 621 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 622 struct qeth_ipa_cmd *cmd) 623 { 624 QETH_CARD_TEXT(card, 5, "chkipad"); 625 626 if (IS_IPA_REPLY(cmd)) { 627 if (cmd->hdr.command != IPA_CMD_SETCCID && 628 cmd->hdr.command != IPA_CMD_DELCCID && 629 cmd->hdr.command != IPA_CMD_MODCCID && 630 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 631 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 632 return cmd; 633 } 634 635 /* handle unsolicited event: */ 636 switch (cmd->hdr.command) { 637 case IPA_CMD_STOPLAN: 638 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 639 dev_err(&card->gdev->dev, 640 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 641 QETH_CARD_IFNAME(card)); 642 qeth_close_dev(card); 643 } else { 644 dev_warn(&card->gdev->dev, 645 "The link for interface %s on CHPID 0x%X failed\n", 646 QETH_CARD_IFNAME(card), card->info.chpid); 647 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 648 netif_carrier_off(card->dev); 649 } 650 return NULL; 651 case IPA_CMD_STARTLAN: 652 dev_info(&card->gdev->dev, 653 "The link for %s on CHPID 0x%X has been restored\n", 654 QETH_CARD_IFNAME(card), card->info.chpid); 655 if (card->info.hwtrap) 656 card->info.hwtrap = 2; 657 qeth_schedule_recovery(card); 658 return NULL; 659 case IPA_CMD_SETBRIDGEPORT_IQD: 660 case IPA_CMD_SETBRIDGEPORT_OSA: 661 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 662 if (card->discipline->control_event_handler(card, cmd)) 663 return cmd; 664 return NULL; 665 case IPA_CMD_MODCCID: 666 return cmd; 667 case IPA_CMD_REGISTER_LOCAL_ADDR: 668 QETH_CARD_TEXT(card, 3, "irla"); 669 return NULL; 670 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 671 QETH_CARD_TEXT(card, 3, "urla"); 672 return NULL; 673 default: 674 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 675 return cmd; 676 } 677 } 678 679 void qeth_clear_ipacmd_list(struct qeth_card *card) 680 { 681 struct qeth_reply *reply; 682 unsigned long flags; 683 684 QETH_CARD_TEXT(card, 4, "clipalst"); 685 686 spin_lock_irqsave(&card->lock, flags); 687 list_for_each_entry(reply, &card->cmd_waiter_list, list) { 688 reply->rc = -EIO; 689 qeth_notify_reply(reply); 690 } 691 spin_unlock_irqrestore(&card->lock, flags); 692 } 693 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 694 695 static int qeth_check_idx_response(struct qeth_card *card, 696 unsigned char *buffer) 697 { 698 if (!buffer) 699 return 0; 700 701 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 702 if ((buffer[2] & 0xc0) == 0xc0) { 703 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 704 buffer[4]); 705 QETH_CARD_TEXT(card, 2, "ckidxres"); 706 QETH_CARD_TEXT(card, 2, " idxterm"); 707 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 708 if (buffer[4] == 0xf6) { 709 dev_err(&card->gdev->dev, 710 "The qeth device is not configured " 711 "for the OSI layer required by z/VM\n"); 712 return -EPERM; 713 } 714 return -EIO; 715 } 716 return 0; 717 } 718 719 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) 720 { 721 __u8 index; 722 723 index = channel->io_buf_no; 724 do { 725 if (channel->iob[index].state == BUF_STATE_FREE) { 726 channel->iob[index].state = BUF_STATE_LOCKED; 727 channel->io_buf_no = (channel->io_buf_no + 1) % 728 QETH_CMD_BUFFER_NO; 729 memset(channel->iob[index].data, 0, QETH_BUFSIZE); 730 return channel->iob + index; 731 } 732 index = (index + 1) % QETH_CMD_BUFFER_NO; 733 } while (index != channel->io_buf_no); 734 735 return NULL; 736 } 737 738 void qeth_release_buffer(struct qeth_channel *channel, 739 struct qeth_cmd_buffer *iob) 740 { 741 unsigned long flags; 742 743 spin_lock_irqsave(&channel->iob_lock, flags); 744 iob->state = BUF_STATE_FREE; 745 iob->callback = qeth_send_control_data_cb; 746 if (iob->reply) { 747 qeth_put_reply(iob->reply); 748 iob->reply = NULL; 749 } 750 spin_unlock_irqrestore(&channel->iob_lock, flags); 751 wake_up(&channel->wait_q); 752 } 753 EXPORT_SYMBOL_GPL(qeth_release_buffer); 754 755 static void qeth_release_buffer_cb(struct qeth_card *card, 756 struct qeth_channel *channel, 757 struct qeth_cmd_buffer *iob) 758 { 759 qeth_release_buffer(channel, iob); 760 } 761 762 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 763 { 764 struct qeth_reply *reply = iob->reply; 765 766 if (reply) { 767 reply->rc = rc; 768 qeth_notify_reply(reply); 769 } 770 qeth_release_buffer(iob->channel, iob); 771 } 772 773 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) 774 { 775 struct qeth_cmd_buffer *buffer = NULL; 776 unsigned long flags; 777 778 spin_lock_irqsave(&channel->iob_lock, flags); 779 buffer = __qeth_get_buffer(channel); 780 spin_unlock_irqrestore(&channel->iob_lock, flags); 781 return buffer; 782 } 783 784 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) 785 { 786 struct qeth_cmd_buffer *buffer; 787 wait_event(channel->wait_q, 788 ((buffer = qeth_get_buffer(channel)) != NULL)); 789 return buffer; 790 } 791 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); 792 793 void qeth_clear_cmd_buffers(struct qeth_channel *channel) 794 { 795 int cnt; 796 797 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 798 qeth_release_buffer(channel, &channel->iob[cnt]); 799 channel->io_buf_no = 0; 800 } 801 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); 802 803 static void qeth_send_control_data_cb(struct qeth_card *card, 804 struct qeth_channel *channel, 805 struct qeth_cmd_buffer *iob) 806 { 807 struct qeth_ipa_cmd *cmd = NULL; 808 struct qeth_reply *reply = NULL; 809 struct qeth_reply *r; 810 unsigned long flags; 811 int rc = 0; 812 813 QETH_CARD_TEXT(card, 4, "sndctlcb"); 814 rc = qeth_check_idx_response(card, iob->data); 815 switch (rc) { 816 case 0: 817 break; 818 case -EIO: 819 qeth_clear_ipacmd_list(card); 820 qeth_schedule_recovery(card); 821 /* fall through */ 822 default: 823 goto out; 824 } 825 826 if (IS_IPA(iob->data)) { 827 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); 828 cmd = qeth_check_ipa_data(card, cmd); 829 if (!cmd) 830 goto out; 831 if (IS_OSN(card) && card->osn_info.assist_cb && 832 cmd->hdr.command != IPA_CMD_STARTLAN) { 833 card->osn_info.assist_cb(card->dev, cmd); 834 goto out; 835 } 836 } else { 837 /* non-IPA commands should only flow during initialization */ 838 if (card->state != CARD_STATE_DOWN) 839 goto out; 840 } 841 842 /* match against pending cmd requests */ 843 spin_lock_irqsave(&card->lock, flags); 844 list_for_each_entry(r, &card->cmd_waiter_list, list) { 845 if ((r->seqno == QETH_IDX_COMMAND_SEQNO) || 846 (cmd && (r->seqno == cmd->hdr.seqno))) { 847 reply = r; 848 /* take the object outside the lock */ 849 qeth_get_reply(reply); 850 break; 851 } 852 } 853 spin_unlock_irqrestore(&card->lock, flags); 854 855 if (!reply) 856 goto out; 857 858 if (!reply->callback) { 859 rc = 0; 860 } else { 861 if (cmd) { 862 reply->offset = (u16)((char *)cmd - (char *)iob->data); 863 rc = reply->callback(card, reply, (unsigned long)cmd); 864 } else { 865 rc = reply->callback(card, reply, (unsigned long)iob); 866 } 867 } 868 869 if (rc <= 0) { 870 reply->rc = rc; 871 qeth_notify_reply(reply); 872 } 873 874 qeth_put_reply(reply); 875 876 out: 877 memcpy(&card->seqno.pdu_hdr_ack, 878 QETH_PDU_HEADER_SEQ_NO(iob->data), 879 QETH_SEQ_NO_LENGTH); 880 qeth_release_buffer(channel, iob); 881 } 882 883 static int qeth_set_thread_start_bit(struct qeth_card *card, 884 unsigned long thread) 885 { 886 unsigned long flags; 887 888 spin_lock_irqsave(&card->thread_mask_lock, flags); 889 if (!(card->thread_allowed_mask & thread) || 890 (card->thread_start_mask & thread)) { 891 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 892 return -EPERM; 893 } 894 card->thread_start_mask |= thread; 895 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 896 return 0; 897 } 898 899 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) 900 { 901 unsigned long flags; 902 903 spin_lock_irqsave(&card->thread_mask_lock, flags); 904 card->thread_start_mask &= ~thread; 905 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 906 wake_up(&card->wait_q); 907 } 908 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); 909 910 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) 911 { 912 unsigned long flags; 913 914 spin_lock_irqsave(&card->thread_mask_lock, flags); 915 card->thread_running_mask &= ~thread; 916 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 917 wake_up_all(&card->wait_q); 918 } 919 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); 920 921 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 922 { 923 unsigned long flags; 924 int rc = 0; 925 926 spin_lock_irqsave(&card->thread_mask_lock, flags); 927 if (card->thread_start_mask & thread) { 928 if ((card->thread_allowed_mask & thread) && 929 !(card->thread_running_mask & thread)) { 930 rc = 1; 931 card->thread_start_mask &= ~thread; 932 card->thread_running_mask |= thread; 933 } else 934 rc = -EPERM; 935 } 936 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 937 return rc; 938 } 939 940 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 941 { 942 int rc = 0; 943 944 wait_event(card->wait_q, 945 (rc = __qeth_do_run_thread(card, thread)) >= 0); 946 return rc; 947 } 948 EXPORT_SYMBOL_GPL(qeth_do_run_thread); 949 950 void qeth_schedule_recovery(struct qeth_card *card) 951 { 952 QETH_CARD_TEXT(card, 2, "startrec"); 953 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 954 schedule_work(&card->kernel_thread_starter); 955 } 956 EXPORT_SYMBOL_GPL(qeth_schedule_recovery); 957 958 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 959 struct irb *irb) 960 { 961 int dstat, cstat; 962 char *sense; 963 964 sense = (char *) irb->ecw; 965 cstat = irb->scsw.cmd.cstat; 966 dstat = irb->scsw.cmd.dstat; 967 968 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 969 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 970 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 971 QETH_CARD_TEXT(card, 2, "CGENCHK"); 972 dev_warn(&cdev->dev, "The qeth device driver " 973 "failed to recover an error on the device\n"); 974 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 975 CCW_DEVID(cdev), dstat, cstat); 976 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 977 16, 1, irb, 64, 1); 978 return 1; 979 } 980 981 if (dstat & DEV_STAT_UNIT_CHECK) { 982 if (sense[SENSE_RESETTING_EVENT_BYTE] & 983 SENSE_RESETTING_EVENT_FLAG) { 984 QETH_CARD_TEXT(card, 2, "REVIND"); 985 return 1; 986 } 987 if (sense[SENSE_COMMAND_REJECT_BYTE] & 988 SENSE_COMMAND_REJECT_FLAG) { 989 QETH_CARD_TEXT(card, 2, "CMDREJi"); 990 return 1; 991 } 992 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 993 QETH_CARD_TEXT(card, 2, "AFFE"); 994 return 1; 995 } 996 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 997 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 998 return 0; 999 } 1000 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1001 return 1; 1002 } 1003 return 0; 1004 } 1005 1006 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1007 unsigned long intparm, struct irb *irb) 1008 { 1009 if (!IS_ERR(irb)) 1010 return 0; 1011 1012 switch (PTR_ERR(irb)) { 1013 case -EIO: 1014 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1015 CCW_DEVID(cdev)); 1016 QETH_CARD_TEXT(card, 2, "ckirberr"); 1017 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1018 return -EIO; 1019 case -ETIMEDOUT: 1020 dev_warn(&cdev->dev, "A hardware operation timed out" 1021 " on the device\n"); 1022 QETH_CARD_TEXT(card, 2, "ckirberr"); 1023 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1024 if (intparm == QETH_RCD_PARM) { 1025 if (card->data.ccwdev == cdev) { 1026 card->data.state = CH_STATE_DOWN; 1027 wake_up(&card->wait_q); 1028 } 1029 } 1030 return -ETIMEDOUT; 1031 default: 1032 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1033 PTR_ERR(irb), CCW_DEVID(cdev)); 1034 QETH_CARD_TEXT(card, 2, "ckirberr"); 1035 QETH_CARD_TEXT(card, 2, " rc???"); 1036 return PTR_ERR(irb); 1037 } 1038 } 1039 1040 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1041 struct irb *irb) 1042 { 1043 int rc; 1044 int cstat, dstat; 1045 struct qeth_cmd_buffer *iob = NULL; 1046 struct ccwgroup_device *gdev; 1047 struct qeth_channel *channel; 1048 struct qeth_card *card; 1049 1050 /* while we hold the ccwdev lock, this stays valid: */ 1051 gdev = dev_get_drvdata(&cdev->dev); 1052 card = dev_get_drvdata(&gdev->dev); 1053 if (!card) 1054 return; 1055 1056 QETH_CARD_TEXT(card, 5, "irq"); 1057 1058 if (card->read.ccwdev == cdev) { 1059 channel = &card->read; 1060 QETH_CARD_TEXT(card, 5, "read"); 1061 } else if (card->write.ccwdev == cdev) { 1062 channel = &card->write; 1063 QETH_CARD_TEXT(card, 5, "write"); 1064 } else { 1065 channel = &card->data; 1066 QETH_CARD_TEXT(card, 5, "data"); 1067 } 1068 1069 if (qeth_intparm_is_iob(intparm)) 1070 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); 1071 1072 rc = qeth_check_irb_error(card, cdev, intparm, irb); 1073 if (rc) { 1074 /* IO was terminated, free its resources. */ 1075 if (iob) 1076 qeth_cancel_cmd(iob, rc); 1077 atomic_set(&channel->irq_pending, 0); 1078 wake_up(&card->wait_q); 1079 return; 1080 } 1081 1082 atomic_set(&channel->irq_pending, 0); 1083 1084 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) 1085 channel->state = CH_STATE_STOPPED; 1086 1087 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) 1088 channel->state = CH_STATE_HALTED; 1089 1090 /*let's wake up immediately on data channel*/ 1091 if ((channel == &card->data) && (intparm != 0) && 1092 (intparm != QETH_RCD_PARM)) 1093 goto out; 1094 1095 if (intparm == QETH_CLEAR_CHANNEL_PARM) { 1096 QETH_CARD_TEXT(card, 6, "clrchpar"); 1097 /* we don't have to handle this further */ 1098 intparm = 0; 1099 } 1100 if (intparm == QETH_HALT_CHANNEL_PARM) { 1101 QETH_CARD_TEXT(card, 6, "hltchpar"); 1102 /* we don't have to handle this further */ 1103 intparm = 0; 1104 } 1105 1106 cstat = irb->scsw.cmd.cstat; 1107 dstat = irb->scsw.cmd.dstat; 1108 1109 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1110 (dstat & DEV_STAT_UNIT_CHECK) || 1111 (cstat)) { 1112 if (irb->esw.esw0.erw.cons) { 1113 dev_warn(&channel->ccwdev->dev, 1114 "The qeth device driver failed to recover " 1115 "an error on the device\n"); 1116 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1117 CCW_DEVID(channel->ccwdev), cstat, 1118 dstat); 1119 print_hex_dump(KERN_WARNING, "qeth: irb ", 1120 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1121 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1122 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1123 } 1124 if (intparm == QETH_RCD_PARM) { 1125 channel->state = CH_STATE_DOWN; 1126 goto out; 1127 } 1128 rc = qeth_get_problem(card, cdev, irb); 1129 if (rc) { 1130 card->read_or_write_problem = 1; 1131 if (iob) 1132 qeth_cancel_cmd(iob, rc); 1133 qeth_clear_ipacmd_list(card); 1134 qeth_schedule_recovery(card); 1135 goto out; 1136 } 1137 } 1138 1139 if (intparm == QETH_RCD_PARM) { 1140 channel->state = CH_STATE_RCD_DONE; 1141 goto out; 1142 } 1143 if (channel == &card->data) 1144 return; 1145 if (channel == &card->read && 1146 channel->state == CH_STATE_UP) 1147 __qeth_issue_next_read(card); 1148 1149 if (iob && iob->callback) 1150 iob->callback(card, iob->channel, iob); 1151 1152 out: 1153 wake_up(&card->wait_q); 1154 return; 1155 } 1156 1157 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1158 struct qeth_qdio_out_buffer *buf, 1159 enum iucv_tx_notify notification) 1160 { 1161 struct sk_buff *skb; 1162 1163 skb_queue_walk(&buf->skb_list, skb) { 1164 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1165 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1166 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) 1167 iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1168 } 1169 } 1170 1171 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) 1172 { 1173 /* release may never happen from within CQ tasklet scope */ 1174 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1175 1176 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1177 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); 1178 1179 __skb_queue_purge(&buf->skb_list); 1180 } 1181 1182 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1183 struct qeth_qdio_out_buffer *buf) 1184 { 1185 int i; 1186 1187 /* is PCI flag set on buffer? */ 1188 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1189 atomic_dec(&queue->set_pci_flags_count); 1190 1191 qeth_release_skbs(buf); 1192 1193 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 1194 if (buf->buffer->element[i].addr && buf->is_header[i]) 1195 kmem_cache_free(qeth_core_header_cache, 1196 buf->buffer->element[i].addr); 1197 buf->is_header[i] = 0; 1198 } 1199 1200 qeth_scrub_qdio_buffer(buf->buffer, 1201 QETH_MAX_BUFFER_ELEMENTS(queue->card)); 1202 buf->next_element_to_fill = 0; 1203 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1204 } 1205 1206 static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) 1207 { 1208 int j; 1209 1210 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1211 if (!q->bufs[j]) 1212 continue; 1213 qeth_cleanup_handled_pending(q, j, 1); 1214 qeth_clear_output_buffer(q, q->bufs[j]); 1215 if (free) { 1216 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); 1217 q->bufs[j] = NULL; 1218 } 1219 } 1220 } 1221 1222 void qeth_clear_qdio_buffers(struct qeth_card *card) 1223 { 1224 int i; 1225 1226 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1227 /* clear outbound buffers to free skbs */ 1228 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1229 if (card->qdio.out_qs[i]) { 1230 qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); 1231 } 1232 } 1233 } 1234 EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); 1235 1236 static void qeth_free_buffer_pool(struct qeth_card *card) 1237 { 1238 struct qeth_buffer_pool_entry *pool_entry, *tmp; 1239 int i = 0; 1240 list_for_each_entry_safe(pool_entry, tmp, 1241 &card->qdio.init_pool.entry_list, init_list){ 1242 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) 1243 free_page((unsigned long)pool_entry->elements[i]); 1244 list_del(&pool_entry->init_list); 1245 kfree(pool_entry); 1246 } 1247 } 1248 1249 static void qeth_clean_channel(struct qeth_channel *channel) 1250 { 1251 struct ccw_device *cdev = channel->ccwdev; 1252 int cnt; 1253 1254 QETH_DBF_TEXT(SETUP, 2, "freech"); 1255 1256 spin_lock_irq(get_ccwdev_lock(cdev)); 1257 cdev->handler = NULL; 1258 spin_unlock_irq(get_ccwdev_lock(cdev)); 1259 1260 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 1261 kfree(channel->iob[cnt].data); 1262 kfree(channel->ccw); 1263 } 1264 1265 static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) 1266 { 1267 struct ccw_device *cdev = channel->ccwdev; 1268 int cnt; 1269 1270 QETH_DBF_TEXT(SETUP, 2, "setupch"); 1271 1272 channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 1273 if (!channel->ccw) 1274 return -ENOMEM; 1275 channel->state = CH_STATE_DOWN; 1276 atomic_set(&channel->irq_pending, 0); 1277 init_waitqueue_head(&channel->wait_q); 1278 1279 spin_lock_irq(get_ccwdev_lock(cdev)); 1280 cdev->handler = qeth_irq; 1281 spin_unlock_irq(get_ccwdev_lock(cdev)); 1282 1283 if (!alloc_buffers) 1284 return 0; 1285 1286 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { 1287 channel->iob[cnt].data = kmalloc(QETH_BUFSIZE, 1288 GFP_KERNEL | GFP_DMA); 1289 if (channel->iob[cnt].data == NULL) 1290 break; 1291 channel->iob[cnt].state = BUF_STATE_FREE; 1292 channel->iob[cnt].channel = channel; 1293 channel->iob[cnt].callback = qeth_send_control_data_cb; 1294 } 1295 if (cnt < QETH_CMD_BUFFER_NO) { 1296 qeth_clean_channel(channel); 1297 return -ENOMEM; 1298 } 1299 channel->io_buf_no = 0; 1300 spin_lock_init(&channel->iob_lock); 1301 1302 return 0; 1303 } 1304 1305 static void qeth_set_single_write_queues(struct qeth_card *card) 1306 { 1307 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && 1308 (card->qdio.no_out_queues == 4)) 1309 qeth_free_qdio_buffers(card); 1310 1311 card->qdio.no_out_queues = 1; 1312 if (card->qdio.default_out_queue != 0) 1313 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1314 1315 card->qdio.default_out_queue = 0; 1316 } 1317 1318 static void qeth_set_multiple_write_queues(struct qeth_card *card) 1319 { 1320 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && 1321 (card->qdio.no_out_queues == 1)) { 1322 qeth_free_qdio_buffers(card); 1323 card->qdio.default_out_queue = 2; 1324 } 1325 card->qdio.no_out_queues = 4; 1326 } 1327 1328 static void qeth_update_from_chp_desc(struct qeth_card *card) 1329 { 1330 struct ccw_device *ccwdev; 1331 struct channel_path_desc_fmt0 *chp_dsc; 1332 1333 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 1334 1335 ccwdev = card->data.ccwdev; 1336 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1337 if (!chp_dsc) 1338 goto out; 1339 1340 card->info.func_level = 0x4100 + chp_dsc->desc; 1341 if (card->info.type == QETH_CARD_TYPE_IQD) 1342 goto out; 1343 1344 /* CHPP field bit 6 == 1 -> single queue */ 1345 if ((chp_dsc->chpp & 0x02) == 0x02) 1346 qeth_set_single_write_queues(card); 1347 else 1348 qeth_set_multiple_write_queues(card); 1349 out: 1350 kfree(chp_dsc); 1351 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1352 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1353 } 1354 1355 static void qeth_init_qdio_info(struct qeth_card *card) 1356 { 1357 QETH_DBF_TEXT(SETUP, 4, "intqdinf"); 1358 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1359 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1360 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1361 card->qdio.no_out_queues = QETH_MAX_QUEUES; 1362 1363 /* inbound */ 1364 card->qdio.no_in_queues = 1; 1365 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1366 if (card->info.type == QETH_CARD_TYPE_IQD) 1367 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1368 else 1369 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1370 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1371 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1372 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1373 } 1374 1375 static void qeth_set_initial_options(struct qeth_card *card) 1376 { 1377 card->options.route4.type = NO_ROUTER; 1378 card->options.route6.type = NO_ROUTER; 1379 card->options.rx_sg_cb = QETH_RX_SG_CB; 1380 card->options.isolation = ISOLATION_MODE_NONE; 1381 card->options.cq = QETH_CQ_DISABLED; 1382 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1383 } 1384 1385 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1386 { 1387 unsigned long flags; 1388 int rc = 0; 1389 1390 spin_lock_irqsave(&card->thread_mask_lock, flags); 1391 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1392 (u8) card->thread_start_mask, 1393 (u8) card->thread_allowed_mask, 1394 (u8) card->thread_running_mask); 1395 rc = (card->thread_start_mask & thread); 1396 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1397 return rc; 1398 } 1399 1400 static void qeth_start_kernel_thread(struct work_struct *work) 1401 { 1402 struct task_struct *ts; 1403 struct qeth_card *card = container_of(work, struct qeth_card, 1404 kernel_thread_starter); 1405 QETH_CARD_TEXT(card , 2, "strthrd"); 1406 1407 if (card->read.state != CH_STATE_UP && 1408 card->write.state != CH_STATE_UP) 1409 return; 1410 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1411 ts = kthread_run(card->discipline->recover, (void *)card, 1412 "qeth_recover"); 1413 if (IS_ERR(ts)) { 1414 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1415 qeth_clear_thread_running_bit(card, 1416 QETH_RECOVER_THREAD); 1417 } 1418 } 1419 } 1420 1421 static void qeth_buffer_reclaim_work(struct work_struct *); 1422 static void qeth_setup_card(struct qeth_card *card) 1423 { 1424 QETH_DBF_TEXT(SETUP, 2, "setupcrd"); 1425 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1426 1427 card->info.type = CARD_RDEV(card)->id.driver_info; 1428 card->state = CARD_STATE_DOWN; 1429 spin_lock_init(&card->mclock); 1430 spin_lock_init(&card->lock); 1431 spin_lock_init(&card->ip_lock); 1432 spin_lock_init(&card->thread_mask_lock); 1433 mutex_init(&card->conf_mutex); 1434 mutex_init(&card->discipline_mutex); 1435 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1436 INIT_LIST_HEAD(&card->cmd_waiter_list); 1437 init_waitqueue_head(&card->wait_q); 1438 qeth_set_initial_options(card); 1439 /* IP address takeover */ 1440 INIT_LIST_HEAD(&card->ipato.entries); 1441 qeth_init_qdio_info(card); 1442 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1443 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1444 } 1445 1446 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1447 { 1448 struct qeth_card *card = container_of(slr, struct qeth_card, 1449 qeth_service_level); 1450 if (card->info.mcl_level[0]) 1451 seq_printf(m, "qeth: %s firmware level %s\n", 1452 CARD_BUS_ID(card), card->info.mcl_level); 1453 } 1454 1455 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1456 { 1457 struct qeth_card *card; 1458 1459 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1460 card = kzalloc(sizeof(*card), GFP_KERNEL); 1461 if (!card) 1462 goto out; 1463 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1464 1465 card->gdev = gdev; 1466 dev_set_drvdata(&gdev->dev, card); 1467 CARD_RDEV(card) = gdev->cdev[0]; 1468 CARD_WDEV(card) = gdev->cdev[1]; 1469 CARD_DDEV(card) = gdev->cdev[2]; 1470 1471 card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); 1472 if (!card->event_wq) 1473 goto out_wq; 1474 if (qeth_setup_channel(&card->read, true)) 1475 goto out_ip; 1476 if (qeth_setup_channel(&card->write, true)) 1477 goto out_channel; 1478 if (qeth_setup_channel(&card->data, false)) 1479 goto out_data; 1480 card->qeth_service_level.seq_print = qeth_core_sl_print; 1481 register_service_level(&card->qeth_service_level); 1482 return card; 1483 1484 out_data: 1485 qeth_clean_channel(&card->write); 1486 out_channel: 1487 qeth_clean_channel(&card->read); 1488 out_ip: 1489 destroy_workqueue(card->event_wq); 1490 out_wq: 1491 dev_set_drvdata(&gdev->dev, NULL); 1492 kfree(card); 1493 out: 1494 return NULL; 1495 } 1496 1497 static int qeth_clear_channel(struct qeth_card *card, 1498 struct qeth_channel *channel) 1499 { 1500 int rc; 1501 1502 QETH_CARD_TEXT(card, 3, "clearch"); 1503 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1504 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); 1505 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1506 1507 if (rc) 1508 return rc; 1509 rc = wait_event_interruptible_timeout(card->wait_q, 1510 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1511 if (rc == -ERESTARTSYS) 1512 return rc; 1513 if (channel->state != CH_STATE_STOPPED) 1514 return -ETIME; 1515 channel->state = CH_STATE_DOWN; 1516 return 0; 1517 } 1518 1519 static int qeth_halt_channel(struct qeth_card *card, 1520 struct qeth_channel *channel) 1521 { 1522 int rc; 1523 1524 QETH_CARD_TEXT(card, 3, "haltch"); 1525 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1526 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); 1527 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1528 1529 if (rc) 1530 return rc; 1531 rc = wait_event_interruptible_timeout(card->wait_q, 1532 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1533 if (rc == -ERESTARTSYS) 1534 return rc; 1535 if (channel->state != CH_STATE_HALTED) 1536 return -ETIME; 1537 return 0; 1538 } 1539 1540 static int qeth_halt_channels(struct qeth_card *card) 1541 { 1542 int rc1 = 0, rc2 = 0, rc3 = 0; 1543 1544 QETH_CARD_TEXT(card, 3, "haltchs"); 1545 rc1 = qeth_halt_channel(card, &card->read); 1546 rc2 = qeth_halt_channel(card, &card->write); 1547 rc3 = qeth_halt_channel(card, &card->data); 1548 if (rc1) 1549 return rc1; 1550 if (rc2) 1551 return rc2; 1552 return rc3; 1553 } 1554 1555 static int qeth_clear_channels(struct qeth_card *card) 1556 { 1557 int rc1 = 0, rc2 = 0, rc3 = 0; 1558 1559 QETH_CARD_TEXT(card, 3, "clearchs"); 1560 rc1 = qeth_clear_channel(card, &card->read); 1561 rc2 = qeth_clear_channel(card, &card->write); 1562 rc3 = qeth_clear_channel(card, &card->data); 1563 if (rc1) 1564 return rc1; 1565 if (rc2) 1566 return rc2; 1567 return rc3; 1568 } 1569 1570 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1571 { 1572 int rc = 0; 1573 1574 QETH_CARD_TEXT(card, 3, "clhacrd"); 1575 1576 if (halt) 1577 rc = qeth_halt_channels(card); 1578 if (rc) 1579 return rc; 1580 return qeth_clear_channels(card); 1581 } 1582 1583 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1584 { 1585 int rc = 0; 1586 1587 QETH_CARD_TEXT(card, 3, "qdioclr"); 1588 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1589 QETH_QDIO_CLEANING)) { 1590 case QETH_QDIO_ESTABLISHED: 1591 if (card->info.type == QETH_CARD_TYPE_IQD) 1592 rc = qdio_shutdown(CARD_DDEV(card), 1593 QDIO_FLAG_CLEANUP_USING_HALT); 1594 else 1595 rc = qdio_shutdown(CARD_DDEV(card), 1596 QDIO_FLAG_CLEANUP_USING_CLEAR); 1597 if (rc) 1598 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1599 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1600 break; 1601 case QETH_QDIO_CLEANING: 1602 return rc; 1603 default: 1604 break; 1605 } 1606 rc = qeth_clear_halt_card(card, use_halt); 1607 if (rc) 1608 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1609 card->state = CARD_STATE_DOWN; 1610 return rc; 1611 } 1612 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); 1613 1614 static int qeth_read_conf_data(struct qeth_card *card, void **buffer, 1615 int *length) 1616 { 1617 struct ciw *ciw; 1618 char *rcd_buf; 1619 int ret; 1620 struct qeth_channel *channel = &card->data; 1621 1622 /* 1623 * scan for RCD command in extended SenseID data 1624 */ 1625 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 1626 if (!ciw || ciw->cmd == 0) 1627 return -EOPNOTSUPP; 1628 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 1629 if (!rcd_buf) 1630 return -ENOMEM; 1631 1632 qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf); 1633 channel->state = CH_STATE_RCD; 1634 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1635 ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw, 1636 QETH_RCD_PARM, LPM_ANYPATH, 0, 1637 QETH_RCD_TIMEOUT); 1638 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1639 if (!ret) 1640 wait_event(card->wait_q, 1641 (channel->state == CH_STATE_RCD_DONE || 1642 channel->state == CH_STATE_DOWN)); 1643 if (channel->state == CH_STATE_DOWN) 1644 ret = -EIO; 1645 else 1646 channel->state = CH_STATE_DOWN; 1647 if (ret) { 1648 kfree(rcd_buf); 1649 *buffer = NULL; 1650 *length = 0; 1651 } else { 1652 *length = ciw->count; 1653 *buffer = rcd_buf; 1654 } 1655 return ret; 1656 } 1657 1658 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) 1659 { 1660 QETH_DBF_TEXT(SETUP, 2, "cfgunit"); 1661 card->info.chpid = prcd[30]; 1662 card->info.unit_addr2 = prcd[31]; 1663 card->info.cula = prcd[63]; 1664 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && 1665 (prcd[0x11] == _ascebc['M'])); 1666 } 1667 1668 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1669 { 1670 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1671 struct diag26c_vnic_resp *response = NULL; 1672 struct diag26c_vnic_req *request = NULL; 1673 struct ccw_dev_id id; 1674 char userid[80]; 1675 int rc = 0; 1676 1677 QETH_DBF_TEXT(SETUP, 2, "vmlayer"); 1678 1679 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1680 if (rc) 1681 goto out; 1682 1683 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1684 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1685 if (!request || !response) { 1686 rc = -ENOMEM; 1687 goto out; 1688 } 1689 1690 ccw_device_get_id(CARD_RDEV(card), &id); 1691 request->resp_buf_len = sizeof(*response); 1692 request->resp_version = DIAG26C_VERSION6_VM65918; 1693 request->req_format = DIAG26C_VNIC_INFO; 1694 ASCEBC(userid, 8); 1695 memcpy(&request->sys_name, userid, 8); 1696 request->devno = id.devno; 1697 1698 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1699 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1700 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1701 if (rc) 1702 goto out; 1703 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1704 1705 if (request->resp_buf_len < sizeof(*response) || 1706 response->version != request->resp_version) { 1707 rc = -EIO; 1708 goto out; 1709 } 1710 1711 if (response->protocol == VNIC_INFO_PROT_L2) 1712 disc = QETH_DISCIPLINE_LAYER2; 1713 else if (response->protocol == VNIC_INFO_PROT_L3) 1714 disc = QETH_DISCIPLINE_LAYER3; 1715 1716 out: 1717 kfree(response); 1718 kfree(request); 1719 if (rc) 1720 QETH_DBF_TEXT_(SETUP, 2, "err%x", rc); 1721 return disc; 1722 } 1723 1724 /* Determine whether the device requires a specific layer discipline */ 1725 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1726 { 1727 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1728 1729 if (card->info.type == QETH_CARD_TYPE_OSM || 1730 card->info.type == QETH_CARD_TYPE_OSN) 1731 disc = QETH_DISCIPLINE_LAYER2; 1732 else if (card->info.guestlan) 1733 disc = (card->info.type == QETH_CARD_TYPE_IQD) ? 1734 QETH_DISCIPLINE_LAYER3 : 1735 qeth_vm_detect_layer(card); 1736 1737 switch (disc) { 1738 case QETH_DISCIPLINE_LAYER2: 1739 QETH_DBF_TEXT(SETUP, 3, "force l2"); 1740 break; 1741 case QETH_DISCIPLINE_LAYER3: 1742 QETH_DBF_TEXT(SETUP, 3, "force l3"); 1743 break; 1744 default: 1745 QETH_DBF_TEXT(SETUP, 3, "force no"); 1746 } 1747 1748 return disc; 1749 } 1750 1751 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) 1752 { 1753 QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); 1754 1755 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && 1756 prcd[76] >= 0xF1 && prcd[76] <= 0xF4) { 1757 card->info.blkt.time_total = 0; 1758 card->info.blkt.inter_packet = 0; 1759 card->info.blkt.inter_packet_jumbo = 0; 1760 } else { 1761 card->info.blkt.time_total = 250; 1762 card->info.blkt.inter_packet = 5; 1763 card->info.blkt.inter_packet_jumbo = 15; 1764 } 1765 } 1766 1767 static void qeth_init_tokens(struct qeth_card *card) 1768 { 1769 card->token.issuer_rm_w = 0x00010103UL; 1770 card->token.cm_filter_w = 0x00010108UL; 1771 card->token.cm_connection_w = 0x0001010aUL; 1772 card->token.ulp_filter_w = 0x0001010bUL; 1773 card->token.ulp_connection_w = 0x0001010dUL; 1774 } 1775 1776 static void qeth_init_func_level(struct qeth_card *card) 1777 { 1778 switch (card->info.type) { 1779 case QETH_CARD_TYPE_IQD: 1780 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1781 break; 1782 case QETH_CARD_TYPE_OSD: 1783 case QETH_CARD_TYPE_OSN: 1784 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1785 break; 1786 default: 1787 break; 1788 } 1789 } 1790 1791 static int qeth_idx_activate_get_answer(struct qeth_card *card, 1792 struct qeth_channel *channel, 1793 void (*reply_cb)(struct qeth_card *, 1794 struct qeth_channel *, 1795 struct qeth_cmd_buffer *)) 1796 { 1797 struct qeth_cmd_buffer *iob; 1798 int rc; 1799 1800 QETH_DBF_TEXT(SETUP, 2, "idxanswr"); 1801 iob = qeth_get_buffer(channel); 1802 if (!iob) 1803 return -ENOMEM; 1804 iob->callback = reply_cb; 1805 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); 1806 1807 wait_event(card->wait_q, 1808 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1809 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1810 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1811 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, 1812 (addr_t) iob, 0, 0, QETH_TIMEOUT); 1813 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1814 1815 if (rc) { 1816 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); 1817 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 1818 atomic_set(&channel->irq_pending, 0); 1819 qeth_release_buffer(channel, iob); 1820 wake_up(&card->wait_q); 1821 return rc; 1822 } 1823 rc = wait_event_interruptible_timeout(card->wait_q, 1824 channel->state == CH_STATE_UP, QETH_TIMEOUT); 1825 if (rc == -ERESTARTSYS) 1826 return rc; 1827 if (channel->state != CH_STATE_UP) { 1828 rc = -ETIME; 1829 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 1830 } else 1831 rc = 0; 1832 return rc; 1833 } 1834 1835 static int qeth_idx_activate_channel(struct qeth_card *card, 1836 struct qeth_channel *channel, 1837 void (*reply_cb)(struct qeth_card *, 1838 struct qeth_channel *, 1839 struct qeth_cmd_buffer *)) 1840 { 1841 struct qeth_cmd_buffer *iob; 1842 __u16 temp; 1843 __u8 tmp; 1844 int rc; 1845 struct ccw_dev_id temp_devid; 1846 1847 QETH_DBF_TEXT(SETUP, 2, "idxactch"); 1848 1849 iob = qeth_get_buffer(channel); 1850 if (!iob) 1851 return -ENOMEM; 1852 iob->callback = reply_cb; 1853 qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, 1854 iob->data); 1855 if (channel == &card->write) { 1856 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 1857 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), 1858 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); 1859 card->seqno.trans_hdr++; 1860 } else { 1861 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 1862 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), 1863 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); 1864 } 1865 tmp = ((u8)card->dev->dev_port) | 0x80; 1866 memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); 1867 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1868 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 1869 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 1870 &card->info.func_level, sizeof(__u16)); 1871 ccw_device_get_id(CARD_DDEV(card), &temp_devid); 1872 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); 1873 temp = (card->info.cula << 8) + card->info.unit_addr2; 1874 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); 1875 1876 wait_event(card->wait_q, 1877 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1878 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1879 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1880 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, 1881 (addr_t) iob, 0, 0, QETH_TIMEOUT); 1882 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1883 1884 if (rc) { 1885 QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", 1886 rc); 1887 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 1888 atomic_set(&channel->irq_pending, 0); 1889 qeth_release_buffer(channel, iob); 1890 wake_up(&card->wait_q); 1891 return rc; 1892 } 1893 rc = wait_event_interruptible_timeout(card->wait_q, 1894 channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); 1895 if (rc == -ERESTARTSYS) 1896 return rc; 1897 if (channel->state != CH_STATE_ACTIVATING) { 1898 dev_warn(&channel->ccwdev->dev, "The qeth device driver" 1899 " failed to recover an error on the device\n"); 1900 QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n", 1901 CCW_DEVID(channel->ccwdev)); 1902 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); 1903 return -ETIME; 1904 } 1905 return qeth_idx_activate_get_answer(card, channel, reply_cb); 1906 } 1907 1908 static int qeth_peer_func_level(int level) 1909 { 1910 if ((level & 0xff) == 8) 1911 return (level & 0xff) + 0x400; 1912 if (((level >> 8) & 3) == 1) 1913 return (level & 0xff) + 0x200; 1914 return level; 1915 } 1916 1917 static void qeth_idx_write_cb(struct qeth_card *card, 1918 struct qeth_channel *channel, 1919 struct qeth_cmd_buffer *iob) 1920 { 1921 __u16 temp; 1922 1923 QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); 1924 1925 if (channel->state == CH_STATE_DOWN) { 1926 channel->state = CH_STATE_ACTIVATING; 1927 goto out; 1928 } 1929 1930 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { 1931 if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) 1932 dev_err(&channel->ccwdev->dev, 1933 "The adapter is used exclusively by another " 1934 "host\n"); 1935 else 1936 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 1937 CCW_DEVID(channel->ccwdev)); 1938 goto out; 1939 } 1940 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1941 if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { 1942 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1943 CCW_DEVID(channel->ccwdev), 1944 card->info.func_level, temp); 1945 goto out; 1946 } 1947 channel->state = CH_STATE_UP; 1948 out: 1949 qeth_release_buffer(channel, iob); 1950 } 1951 1952 static void qeth_idx_read_cb(struct qeth_card *card, 1953 struct qeth_channel *channel, 1954 struct qeth_cmd_buffer *iob) 1955 { 1956 __u16 temp; 1957 1958 QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); 1959 if (channel->state == CH_STATE_DOWN) { 1960 channel->state = CH_STATE_ACTIVATING; 1961 goto out; 1962 } 1963 1964 if (qeth_check_idx_response(card, iob->data)) 1965 goto out; 1966 1967 if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { 1968 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 1969 case QETH_IDX_ACT_ERR_EXCL: 1970 dev_err(&channel->ccwdev->dev, 1971 "The adapter is used exclusively by another " 1972 "host\n"); 1973 break; 1974 case QETH_IDX_ACT_ERR_AUTH: 1975 case QETH_IDX_ACT_ERR_AUTH_USER: 1976 dev_err(&channel->ccwdev->dev, 1977 "Setting the device online failed because of " 1978 "insufficient authorization\n"); 1979 break; 1980 default: 1981 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 1982 CCW_DEVID(channel->ccwdev)); 1983 } 1984 QETH_CARD_TEXT_(card, 2, "idxread%c", 1985 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 1986 goto out; 1987 } 1988 1989 memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1990 if (temp != qeth_peer_func_level(card->info.func_level)) { 1991 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1992 CCW_DEVID(channel->ccwdev), 1993 card->info.func_level, temp); 1994 goto out; 1995 } 1996 memcpy(&card->token.issuer_rm_r, 1997 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1998 QETH_MPC_TOKEN_LENGTH); 1999 memcpy(&card->info.mcl_level[0], 2000 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2001 channel->state = CH_STATE_UP; 2002 out: 2003 qeth_release_buffer(channel, iob); 2004 } 2005 2006 void qeth_prepare_control_data(struct qeth_card *card, int len, 2007 struct qeth_cmd_buffer *iob) 2008 { 2009 qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); 2010 iob->callback = qeth_release_buffer_cb; 2011 2012 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), 2013 &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); 2014 card->seqno.trans_hdr++; 2015 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 2016 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 2017 card->seqno.pdu_hdr++; 2018 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 2019 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 2020 QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); 2021 } 2022 EXPORT_SYMBOL_GPL(qeth_prepare_control_data); 2023 2024 /** 2025 * qeth_send_control_data() - send control command to the card 2026 * @card: qeth_card structure pointer 2027 * @len: size of the command buffer 2028 * @iob: qeth_cmd_buffer pointer 2029 * @reply_cb: callback function pointer 2030 * @cb_card: pointer to the qeth_card structure 2031 * @cb_reply: pointer to the qeth_reply structure 2032 * @cb_cmd: pointer to the original iob for non-IPA 2033 * commands, or to the qeth_ipa_cmd structure 2034 * for the IPA commands. 2035 * @reply_param: private pointer passed to the callback 2036 * 2037 * Callback function gets called one or more times, with cb_cmd 2038 * pointing to the response returned by the hardware. Callback 2039 * function must return 2040 * > 0 if more reply blocks are expected, 2041 * 0 if the last or only reply block is received, and 2042 * < 0 on error. 2043 * Callback function can get the value of the reply_param pointer from the 2044 * field 'param' of the structure qeth_reply. 2045 */ 2046 2047 static int qeth_send_control_data(struct qeth_card *card, int len, 2048 struct qeth_cmd_buffer *iob, 2049 int (*reply_cb)(struct qeth_card *cb_card, 2050 struct qeth_reply *cb_reply, 2051 unsigned long cb_cmd), 2052 void *reply_param) 2053 { 2054 struct qeth_channel *channel = iob->channel; 2055 int rc; 2056 struct qeth_reply *reply = NULL; 2057 unsigned long timeout, event_timeout; 2058 struct qeth_ipa_cmd *cmd = NULL; 2059 2060 QETH_CARD_TEXT(card, 2, "sendctl"); 2061 2062 if (card->read_or_write_problem) { 2063 qeth_release_buffer(channel, iob); 2064 return -EIO; 2065 } 2066 reply = qeth_alloc_reply(card); 2067 if (!reply) { 2068 qeth_release_buffer(channel, iob); 2069 return -ENOMEM; 2070 } 2071 reply->callback = reply_cb; 2072 reply->param = reply_param; 2073 2074 /* pairs with qeth_release_buffer(): */ 2075 qeth_get_reply(reply); 2076 iob->reply = reply; 2077 2078 while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; 2079 2080 if (IS_IPA(iob->data)) { 2081 cmd = __ipa_cmd(iob); 2082 cmd->hdr.seqno = card->seqno.ipa++; 2083 reply->seqno = cmd->hdr.seqno; 2084 event_timeout = QETH_IPA_TIMEOUT; 2085 } else { 2086 reply->seqno = QETH_IDX_COMMAND_SEQNO; 2087 event_timeout = QETH_TIMEOUT; 2088 } 2089 qeth_prepare_control_data(card, len, iob); 2090 2091 qeth_enqueue_reply(card, reply); 2092 2093 timeout = jiffies + event_timeout; 2094 2095 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2096 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 2097 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, 2098 (addr_t) iob, 0, 0, event_timeout); 2099 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 2100 if (rc) { 2101 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 2102 CARD_DEVID(card), rc); 2103 QETH_CARD_TEXT_(card, 2, " err%d", rc); 2104 qeth_dequeue_reply(card, reply); 2105 qeth_put_reply(reply); 2106 qeth_release_buffer(channel, iob); 2107 atomic_set(&channel->irq_pending, 0); 2108 wake_up(&card->wait_q); 2109 return rc; 2110 } 2111 2112 /* we have only one long running ipassist, since we can ensure 2113 process context of this command we can sleep */ 2114 if (cmd && cmd->hdr.command == IPA_CMD_SETIP && 2115 cmd->hdr.prot_version == QETH_PROT_IPV4) { 2116 if (!wait_event_timeout(reply->wait_q, 2117 atomic_read(&reply->received), event_timeout)) 2118 goto time_err; 2119 } else { 2120 while (!atomic_read(&reply->received)) { 2121 if (time_after(jiffies, timeout)) 2122 goto time_err; 2123 cpu_relax(); 2124 } 2125 } 2126 2127 qeth_dequeue_reply(card, reply); 2128 rc = reply->rc; 2129 qeth_put_reply(reply); 2130 return rc; 2131 2132 time_err: 2133 qeth_dequeue_reply(card, reply); 2134 qeth_put_reply(reply); 2135 return -ETIME; 2136 } 2137 2138 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2139 unsigned long data) 2140 { 2141 struct qeth_cmd_buffer *iob; 2142 2143 QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); 2144 2145 iob = (struct qeth_cmd_buffer *) data; 2146 memcpy(&card->token.cm_filter_r, 2147 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2148 QETH_MPC_TOKEN_LENGTH); 2149 return 0; 2150 } 2151 2152 static int qeth_cm_enable(struct qeth_card *card) 2153 { 2154 int rc; 2155 struct qeth_cmd_buffer *iob; 2156 2157 QETH_DBF_TEXT(SETUP, 2, "cmenable"); 2158 2159 iob = qeth_wait_for_buffer(&card->write); 2160 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); 2161 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2162 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2163 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2164 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2165 2166 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, 2167 qeth_cm_enable_cb, NULL); 2168 return rc; 2169 } 2170 2171 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2172 unsigned long data) 2173 { 2174 struct qeth_cmd_buffer *iob; 2175 2176 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); 2177 2178 iob = (struct qeth_cmd_buffer *) data; 2179 memcpy(&card->token.cm_connection_r, 2180 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2181 QETH_MPC_TOKEN_LENGTH); 2182 return 0; 2183 } 2184 2185 static int qeth_cm_setup(struct qeth_card *card) 2186 { 2187 int rc; 2188 struct qeth_cmd_buffer *iob; 2189 2190 QETH_DBF_TEXT(SETUP, 2, "cmsetup"); 2191 2192 iob = qeth_wait_for_buffer(&card->write); 2193 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); 2194 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2195 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2196 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2197 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2198 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2199 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2200 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, 2201 qeth_cm_setup_cb, NULL); 2202 return rc; 2203 } 2204 2205 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2206 { 2207 struct net_device *dev = card->dev; 2208 unsigned int new_mtu; 2209 2210 if (!max_mtu) { 2211 /* IQD needs accurate max MTU to set up its RX buffers: */ 2212 if (IS_IQD(card)) 2213 return -EINVAL; 2214 /* tolerate quirky HW: */ 2215 max_mtu = ETH_MAX_MTU; 2216 } 2217 2218 rtnl_lock(); 2219 if (IS_IQD(card)) { 2220 /* move any device with default MTU to new max MTU: */ 2221 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2222 2223 /* adjust RX buffer size to new max MTU: */ 2224 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2225 if (dev->max_mtu && dev->max_mtu != max_mtu) 2226 qeth_free_qdio_buffers(card); 2227 } else { 2228 if (dev->mtu) 2229 new_mtu = dev->mtu; 2230 /* default MTUs for first setup: */ 2231 else if (IS_LAYER2(card)) 2232 new_mtu = ETH_DATA_LEN; 2233 else 2234 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2235 } 2236 2237 dev->max_mtu = max_mtu; 2238 dev->mtu = min(new_mtu, max_mtu); 2239 rtnl_unlock(); 2240 return 0; 2241 } 2242 2243 static int qeth_get_mtu_outof_framesize(int framesize) 2244 { 2245 switch (framesize) { 2246 case 0x4000: 2247 return 8192; 2248 case 0x6000: 2249 return 16384; 2250 case 0xa000: 2251 return 32768; 2252 case 0xffff: 2253 return 57344; 2254 default: 2255 return 0; 2256 } 2257 } 2258 2259 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2260 unsigned long data) 2261 { 2262 __u16 mtu, framesize; 2263 __u16 len; 2264 __u8 link_type; 2265 struct qeth_cmd_buffer *iob; 2266 2267 QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); 2268 2269 iob = (struct qeth_cmd_buffer *) data; 2270 memcpy(&card->token.ulp_filter_r, 2271 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2272 QETH_MPC_TOKEN_LENGTH); 2273 if (card->info.type == QETH_CARD_TYPE_IQD) { 2274 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2275 mtu = qeth_get_mtu_outof_framesize(framesize); 2276 } else { 2277 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2278 } 2279 *(u16 *)reply->param = mtu; 2280 2281 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2282 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2283 memcpy(&link_type, 2284 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2285 card->info.link_type = link_type; 2286 } else 2287 card->info.link_type = 0; 2288 QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); 2289 return 0; 2290 } 2291 2292 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2293 { 2294 if (IS_OSN(card)) 2295 return QETH_PROT_OSN2; 2296 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2297 } 2298 2299 static int qeth_ulp_enable(struct qeth_card *card) 2300 { 2301 u8 prot_type = qeth_mpc_select_prot_type(card); 2302 struct qeth_cmd_buffer *iob; 2303 u16 max_mtu; 2304 int rc; 2305 2306 /*FIXME: trace view callbacks*/ 2307 QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); 2308 2309 iob = qeth_wait_for_buffer(&card->write); 2310 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); 2311 2312 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2313 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2314 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2315 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2316 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2317 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2318 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, 2319 qeth_ulp_enable_cb, &max_mtu); 2320 if (rc) 2321 return rc; 2322 return qeth_update_max_mtu(card, max_mtu); 2323 } 2324 2325 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2326 unsigned long data) 2327 { 2328 struct qeth_cmd_buffer *iob; 2329 2330 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); 2331 2332 iob = (struct qeth_cmd_buffer *) data; 2333 memcpy(&card->token.ulp_connection_r, 2334 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2335 QETH_MPC_TOKEN_LENGTH); 2336 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2337 3)) { 2338 QETH_DBF_TEXT(SETUP, 2, "olmlimit"); 2339 dev_err(&card->gdev->dev, "A connection could not be " 2340 "established because of an OLM limit\n"); 2341 return -EMLINK; 2342 } 2343 return 0; 2344 } 2345 2346 static int qeth_ulp_setup(struct qeth_card *card) 2347 { 2348 int rc; 2349 __u16 temp; 2350 struct qeth_cmd_buffer *iob; 2351 struct ccw_dev_id dev_id; 2352 2353 QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); 2354 2355 iob = qeth_wait_for_buffer(&card->write); 2356 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); 2357 2358 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2359 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2360 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2361 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2362 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2363 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2364 2365 ccw_device_get_id(CARD_DDEV(card), &dev_id); 2366 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); 2367 temp = (card->info.cula << 8) + card->info.unit_addr2; 2368 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2369 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, 2370 qeth_ulp_setup_cb, NULL); 2371 return rc; 2372 } 2373 2374 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) 2375 { 2376 struct qeth_qdio_out_buffer *newbuf; 2377 2378 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); 2379 if (!newbuf) 2380 return -ENOMEM; 2381 2382 newbuf->buffer = q->qdio_bufs[bidx]; 2383 skb_queue_head_init(&newbuf->skb_list); 2384 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2385 newbuf->q = q; 2386 newbuf->next_pending = q->bufs[bidx]; 2387 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2388 q->bufs[bidx] = newbuf; 2389 return 0; 2390 } 2391 2392 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2393 { 2394 if (!q) 2395 return; 2396 2397 qeth_clear_outq_buffers(q, 1); 2398 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2399 kfree(q); 2400 } 2401 2402 static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) 2403 { 2404 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2405 2406 if (!q) 2407 return NULL; 2408 2409 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 2410 kfree(q); 2411 return NULL; 2412 } 2413 return q; 2414 } 2415 2416 static int qeth_alloc_qdio_buffers(struct qeth_card *card) 2417 { 2418 int i, j; 2419 2420 QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); 2421 2422 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2423 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2424 return 0; 2425 2426 QETH_DBF_TEXT(SETUP, 2, "inq"); 2427 card->qdio.in_q = qeth_alloc_qdio_queue(); 2428 if (!card->qdio.in_q) 2429 goto out_nomem; 2430 2431 /* inbound buffer pool */ 2432 if (qeth_alloc_buffer_pool(card)) 2433 goto out_freeinq; 2434 2435 /* outbound */ 2436 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2437 card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); 2438 if (!card->qdio.out_qs[i]) 2439 goto out_freeoutq; 2440 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); 2441 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); 2442 card->qdio.out_qs[i]->queue_no = i; 2443 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2444 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2445 WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); 2446 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) 2447 goto out_freeoutqbufs; 2448 } 2449 } 2450 2451 /* completion */ 2452 if (qeth_alloc_cq(card)) 2453 goto out_freeoutq; 2454 2455 return 0; 2456 2457 out_freeoutqbufs: 2458 while (j > 0) { 2459 --j; 2460 kmem_cache_free(qeth_qdio_outbuf_cache, 2461 card->qdio.out_qs[i]->bufs[j]); 2462 card->qdio.out_qs[i]->bufs[j] = NULL; 2463 } 2464 out_freeoutq: 2465 while (i > 0) { 2466 qeth_free_output_queue(card->qdio.out_qs[--i]); 2467 card->qdio.out_qs[i] = NULL; 2468 } 2469 qeth_free_buffer_pool(card); 2470 out_freeinq: 2471 qeth_free_qdio_queue(card->qdio.in_q); 2472 card->qdio.in_q = NULL; 2473 out_nomem: 2474 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2475 return -ENOMEM; 2476 } 2477 2478 static void qeth_free_qdio_buffers(struct qeth_card *card) 2479 { 2480 int i, j; 2481 2482 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2483 QETH_QDIO_UNINITIALIZED) 2484 return; 2485 2486 qeth_free_cq(card); 2487 cancel_delayed_work_sync(&card->buffer_reclaim_work); 2488 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2489 if (card->qdio.in_q->bufs[j].rx_skb) 2490 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2491 } 2492 qeth_free_qdio_queue(card->qdio.in_q); 2493 card->qdio.in_q = NULL; 2494 /* inbound buffer pool */ 2495 qeth_free_buffer_pool(card); 2496 /* free outbound qdio_qs */ 2497 for (i = 0; i < card->qdio.no_out_queues; i++) { 2498 qeth_free_output_queue(card->qdio.out_qs[i]); 2499 card->qdio.out_qs[i] = NULL; 2500 } 2501 } 2502 2503 static void qeth_create_qib_param_field(struct qeth_card *card, 2504 char *param_field) 2505 { 2506 2507 param_field[0] = _ascebc['P']; 2508 param_field[1] = _ascebc['C']; 2509 param_field[2] = _ascebc['I']; 2510 param_field[3] = _ascebc['T']; 2511 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card); 2512 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card); 2513 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card); 2514 } 2515 2516 static void qeth_create_qib_param_field_blkt(struct qeth_card *card, 2517 char *param_field) 2518 { 2519 param_field[16] = _ascebc['B']; 2520 param_field[17] = _ascebc['L']; 2521 param_field[18] = _ascebc['K']; 2522 param_field[19] = _ascebc['T']; 2523 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total; 2524 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet; 2525 *((unsigned int *) (¶m_field[28])) = 2526 card->info.blkt.inter_packet_jumbo; 2527 } 2528 2529 static int qeth_qdio_activate(struct qeth_card *card) 2530 { 2531 QETH_DBF_TEXT(SETUP, 3, "qdioact"); 2532 return qdio_activate(CARD_DDEV(card)); 2533 } 2534 2535 static int qeth_dm_act(struct qeth_card *card) 2536 { 2537 int rc; 2538 struct qeth_cmd_buffer *iob; 2539 2540 QETH_DBF_TEXT(SETUP, 2, "dmact"); 2541 2542 iob = qeth_wait_for_buffer(&card->write); 2543 memcpy(iob->data, DM_ACT, DM_ACT_SIZE); 2544 2545 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2546 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2547 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2548 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2549 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); 2550 return rc; 2551 } 2552 2553 static int qeth_mpc_initialize(struct qeth_card *card) 2554 { 2555 int rc; 2556 2557 QETH_DBF_TEXT(SETUP, 2, "mpcinit"); 2558 2559 rc = qeth_issue_next_read(card); 2560 if (rc) { 2561 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2562 return rc; 2563 } 2564 rc = qeth_cm_enable(card); 2565 if (rc) { 2566 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 2567 goto out_qdio; 2568 } 2569 rc = qeth_cm_setup(card); 2570 if (rc) { 2571 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 2572 goto out_qdio; 2573 } 2574 rc = qeth_ulp_enable(card); 2575 if (rc) { 2576 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 2577 goto out_qdio; 2578 } 2579 rc = qeth_ulp_setup(card); 2580 if (rc) { 2581 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 2582 goto out_qdio; 2583 } 2584 rc = qeth_alloc_qdio_buffers(card); 2585 if (rc) { 2586 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 2587 goto out_qdio; 2588 } 2589 rc = qeth_qdio_establish(card); 2590 if (rc) { 2591 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 2592 qeth_free_qdio_buffers(card); 2593 goto out_qdio; 2594 } 2595 rc = qeth_qdio_activate(card); 2596 if (rc) { 2597 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); 2598 goto out_qdio; 2599 } 2600 rc = qeth_dm_act(card); 2601 if (rc) { 2602 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); 2603 goto out_qdio; 2604 } 2605 2606 return 0; 2607 out_qdio: 2608 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 2609 qdio_free(CARD_DDEV(card)); 2610 return rc; 2611 } 2612 2613 void qeth_print_status_message(struct qeth_card *card) 2614 { 2615 switch (card->info.type) { 2616 case QETH_CARD_TYPE_OSD: 2617 case QETH_CARD_TYPE_OSM: 2618 case QETH_CARD_TYPE_OSX: 2619 /* VM will use a non-zero first character 2620 * to indicate a HiperSockets like reporting 2621 * of the level OSA sets the first character to zero 2622 * */ 2623 if (!card->info.mcl_level[0]) { 2624 sprintf(card->info.mcl_level, "%02x%02x", 2625 card->info.mcl_level[2], 2626 card->info.mcl_level[3]); 2627 break; 2628 } 2629 /* fallthrough */ 2630 case QETH_CARD_TYPE_IQD: 2631 if ((card->info.guestlan) || 2632 (card->info.mcl_level[0] & 0x80)) { 2633 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2634 card->info.mcl_level[0]]; 2635 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2636 card->info.mcl_level[1]]; 2637 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2638 card->info.mcl_level[2]]; 2639 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2640 card->info.mcl_level[3]]; 2641 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2642 } 2643 break; 2644 default: 2645 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2646 } 2647 dev_info(&card->gdev->dev, 2648 "Device is a%s card%s%s%s\nwith link type %s.\n", 2649 qeth_get_cardname(card), 2650 (card->info.mcl_level[0]) ? " (level: " : "", 2651 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2652 (card->info.mcl_level[0]) ? ")" : "", 2653 qeth_get_cardname_short(card)); 2654 } 2655 EXPORT_SYMBOL_GPL(qeth_print_status_message); 2656 2657 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2658 { 2659 struct qeth_buffer_pool_entry *entry; 2660 2661 QETH_CARD_TEXT(card, 5, "inwrklst"); 2662 2663 list_for_each_entry(entry, 2664 &card->qdio.init_pool.entry_list, init_list) { 2665 qeth_put_buffer_pool_entry(card, entry); 2666 } 2667 } 2668 2669 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2670 struct qeth_card *card) 2671 { 2672 struct list_head *plh; 2673 struct qeth_buffer_pool_entry *entry; 2674 int i, free; 2675 struct page *page; 2676 2677 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2678 return NULL; 2679 2680 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { 2681 entry = list_entry(plh, struct qeth_buffer_pool_entry, list); 2682 free = 1; 2683 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2684 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2685 free = 0; 2686 break; 2687 } 2688 } 2689 if (free) { 2690 list_del_init(&entry->list); 2691 return entry; 2692 } 2693 } 2694 2695 /* no free buffer in pool so take first one and swap pages */ 2696 entry = list_entry(card->qdio.in_buf_pool.entry_list.next, 2697 struct qeth_buffer_pool_entry, list); 2698 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2699 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2700 page = alloc_page(GFP_ATOMIC); 2701 if (!page) { 2702 return NULL; 2703 } else { 2704 free_page((unsigned long)entry->elements[i]); 2705 entry->elements[i] = page_address(page); 2706 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2707 } 2708 } 2709 } 2710 list_del_init(&entry->list); 2711 return entry; 2712 } 2713 2714 static int qeth_init_input_buffer(struct qeth_card *card, 2715 struct qeth_qdio_buffer *buf) 2716 { 2717 struct qeth_buffer_pool_entry *pool_entry; 2718 int i; 2719 2720 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2721 buf->rx_skb = netdev_alloc_skb(card->dev, 2722 QETH_RX_PULL_LEN + ETH_HLEN); 2723 if (!buf->rx_skb) 2724 return 1; 2725 } 2726 2727 pool_entry = qeth_find_free_buffer_pool_entry(card); 2728 if (!pool_entry) 2729 return 1; 2730 2731 /* 2732 * since the buffer is accessed only from the input_tasklet 2733 * there shouldn't be a need to synchronize; also, since we use 2734 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2735 * buffers 2736 */ 2737 2738 buf->pool_entry = pool_entry; 2739 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2740 buf->buffer->element[i].length = PAGE_SIZE; 2741 buf->buffer->element[i].addr = pool_entry->elements[i]; 2742 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2743 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2744 else 2745 buf->buffer->element[i].eflags = 0; 2746 buf->buffer->element[i].sflags = 0; 2747 } 2748 return 0; 2749 } 2750 2751 int qeth_init_qdio_queues(struct qeth_card *card) 2752 { 2753 int i, j; 2754 int rc; 2755 2756 QETH_DBF_TEXT(SETUP, 2, "initqdqs"); 2757 2758 /* inbound queue */ 2759 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2760 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2761 qeth_initialize_working_pool_list(card); 2762 /*give only as many buffers to hardware as we have buffer pool entries*/ 2763 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) 2764 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2765 card->qdio.in_q->next_buf_to_init = 2766 card->qdio.in_buf_pool.buf_count - 1; 2767 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2768 card->qdio.in_buf_pool.buf_count - 1); 2769 if (rc) { 2770 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2771 return rc; 2772 } 2773 2774 /* completion */ 2775 rc = qeth_cq_init(card); 2776 if (rc) { 2777 return rc; 2778 } 2779 2780 /* outbound queue */ 2781 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2782 qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs, 2783 QDIO_MAX_BUFFERS_PER_Q); 2784 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2785 qeth_clear_output_buffer(card->qdio.out_qs[i], 2786 card->qdio.out_qs[i]->bufs[j]); 2787 } 2788 card->qdio.out_qs[i]->card = card; 2789 card->qdio.out_qs[i]->next_buf_to_fill = 0; 2790 card->qdio.out_qs[i]->do_pack = 0; 2791 atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); 2792 atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); 2793 atomic_set(&card->qdio.out_qs[i]->state, 2794 QETH_OUT_Q_UNLOCKED); 2795 } 2796 return 0; 2797 } 2798 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); 2799 2800 static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) 2801 { 2802 switch (link_type) { 2803 case QETH_LINK_TYPE_HSTR: 2804 return 2; 2805 default: 2806 return 1; 2807 } 2808 } 2809 2810 static void qeth_fill_ipacmd_header(struct qeth_card *card, 2811 struct qeth_ipa_cmd *cmd, 2812 enum qeth_ipa_cmds command, 2813 enum qeth_prot_versions prot) 2814 { 2815 cmd->hdr.command = command; 2816 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; 2817 /* cmd->hdr.seqno is set by qeth_send_control_data() */ 2818 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); 2819 cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port; 2820 cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1; 2821 cmd->hdr.param_count = 1; 2822 cmd->hdr.prot_version = prot; 2823 } 2824 2825 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2826 u16 cmd_length) 2827 { 2828 u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length; 2829 u8 prot_type = qeth_mpc_select_prot_type(card); 2830 2831 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2832 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2833 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2834 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2835 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2836 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2837 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2838 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2839 } 2840 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 2841 2842 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, 2843 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) 2844 { 2845 struct qeth_cmd_buffer *iob; 2846 2847 iob = qeth_get_buffer(&card->write); 2848 if (iob) { 2849 qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd)); 2850 qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot); 2851 } else { 2852 dev_warn(&card->gdev->dev, 2853 "The qeth driver ran out of channel command buffers\n"); 2854 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers", 2855 CARD_DEVID(card)); 2856 } 2857 2858 return iob; 2859 } 2860 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); 2861 2862 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 2863 struct qeth_reply *reply, unsigned long data) 2864 { 2865 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2866 2867 return (cmd->hdr.return_code) ? -EIO : 0; 2868 } 2869 2870 /** 2871 * qeth_send_ipa_cmd() - send an IPA command 2872 * 2873 * See qeth_send_control_data() for explanation of the arguments. 2874 */ 2875 2876 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2877 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 2878 unsigned long), 2879 void *reply_param) 2880 { 2881 u16 length; 2882 int rc; 2883 2884 QETH_CARD_TEXT(card, 4, "sendipa"); 2885 2886 if (reply_cb == NULL) 2887 reply_cb = qeth_send_ipa_cmd_cb; 2888 memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); 2889 rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param); 2890 if (rc == -ETIME) { 2891 qeth_clear_ipacmd_list(card); 2892 qeth_schedule_recovery(card); 2893 } 2894 return rc; 2895 } 2896 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 2897 2898 static int qeth_send_startlan_cb(struct qeth_card *card, 2899 struct qeth_reply *reply, unsigned long data) 2900 { 2901 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2902 2903 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 2904 return -ENETDOWN; 2905 2906 return (cmd->hdr.return_code) ? -EIO : 0; 2907 } 2908 2909 static int qeth_send_startlan(struct qeth_card *card) 2910 { 2911 struct qeth_cmd_buffer *iob; 2912 2913 QETH_DBF_TEXT(SETUP, 2, "strtlan"); 2914 2915 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); 2916 if (!iob) 2917 return -ENOMEM; 2918 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 2919 } 2920 2921 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 2922 { 2923 if (!cmd->hdr.return_code) 2924 cmd->hdr.return_code = 2925 cmd->data.setadapterparms.hdr.return_code; 2926 return cmd->hdr.return_code; 2927 } 2928 2929 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 2930 struct qeth_reply *reply, unsigned long data) 2931 { 2932 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2933 2934 QETH_CARD_TEXT(card, 3, "quyadpcb"); 2935 if (qeth_setadpparms_inspect_rc(cmd)) 2936 return -EIO; 2937 2938 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 2939 card->info.link_type = 2940 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 2941 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); 2942 } 2943 card->options.adp.supported_funcs = 2944 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 2945 return 0; 2946 } 2947 2948 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 2949 __u32 command, __u32 cmdlen) 2950 { 2951 struct qeth_cmd_buffer *iob; 2952 struct qeth_ipa_cmd *cmd; 2953 2954 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, 2955 QETH_PROT_IPV4); 2956 if (iob) { 2957 cmd = __ipa_cmd(iob); 2958 cmd->data.setadapterparms.hdr.cmdlength = cmdlen; 2959 cmd->data.setadapterparms.hdr.command_code = command; 2960 cmd->data.setadapterparms.hdr.used_total = 1; 2961 cmd->data.setadapterparms.hdr.seq_no = 1; 2962 } 2963 2964 return iob; 2965 } 2966 2967 static int qeth_query_setadapterparms(struct qeth_card *card) 2968 { 2969 int rc; 2970 struct qeth_cmd_buffer *iob; 2971 2972 QETH_CARD_TEXT(card, 3, "queryadp"); 2973 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 2974 sizeof(struct qeth_ipacmd_setadpparms)); 2975 if (!iob) 2976 return -ENOMEM; 2977 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 2978 return rc; 2979 } 2980 2981 static int qeth_query_ipassists_cb(struct qeth_card *card, 2982 struct qeth_reply *reply, unsigned long data) 2983 { 2984 struct qeth_ipa_cmd *cmd; 2985 2986 QETH_DBF_TEXT(SETUP, 2, "qipasscb"); 2987 2988 cmd = (struct qeth_ipa_cmd *) data; 2989 2990 switch (cmd->hdr.return_code) { 2991 case IPA_RC_SUCCESS: 2992 break; 2993 case IPA_RC_NOTSUPP: 2994 case IPA_RC_L2_UNSUPPORTED_CMD: 2995 QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); 2996 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; 2997 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; 2998 return -EOPNOTSUPP; 2999 default: 3000 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3001 CARD_DEVID(card), cmd->hdr.return_code); 3002 return -EIO; 3003 } 3004 3005 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 3006 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 3007 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 3008 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { 3009 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 3010 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 3011 } else 3012 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3013 CARD_DEVID(card)); 3014 return 0; 3015 } 3016 3017 static int qeth_query_ipassists(struct qeth_card *card, 3018 enum qeth_prot_versions prot) 3019 { 3020 int rc; 3021 struct qeth_cmd_buffer *iob; 3022 3023 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); 3024 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); 3025 if (!iob) 3026 return -ENOMEM; 3027 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3028 return rc; 3029 } 3030 3031 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3032 struct qeth_reply *reply, unsigned long data) 3033 { 3034 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3035 struct qeth_query_switch_attributes *attrs; 3036 struct qeth_switch_info *sw_info; 3037 3038 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3039 if (qeth_setadpparms_inspect_rc(cmd)) 3040 return -EIO; 3041 3042 sw_info = (struct qeth_switch_info *)reply->param; 3043 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3044 sw_info->capabilities = attrs->capabilities; 3045 sw_info->settings = attrs->settings; 3046 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3047 sw_info->settings); 3048 return 0; 3049 } 3050 3051 int qeth_query_switch_attributes(struct qeth_card *card, 3052 struct qeth_switch_info *sw_info) 3053 { 3054 struct qeth_cmd_buffer *iob; 3055 3056 QETH_CARD_TEXT(card, 2, "qswiattr"); 3057 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3058 return -EOPNOTSUPP; 3059 if (!netif_carrier_ok(card->dev)) 3060 return -ENOMEDIUM; 3061 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 3062 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 3063 if (!iob) 3064 return -ENOMEM; 3065 return qeth_send_ipa_cmd(card, iob, 3066 qeth_query_switch_attributes_cb, sw_info); 3067 } 3068 3069 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3070 struct qeth_reply *reply, unsigned long data) 3071 { 3072 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3073 u16 rc = cmd->hdr.return_code; 3074 3075 if (rc) { 3076 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3077 return -EIO; 3078 } 3079 3080 card->info.diagass_support = cmd->data.diagass.ext; 3081 return 0; 3082 } 3083 3084 static int qeth_query_setdiagass(struct qeth_card *card) 3085 { 3086 struct qeth_cmd_buffer *iob; 3087 struct qeth_ipa_cmd *cmd; 3088 3089 QETH_DBF_TEXT(SETUP, 2, "qdiagass"); 3090 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3091 if (!iob) 3092 return -ENOMEM; 3093 cmd = __ipa_cmd(iob); 3094 cmd->data.diagass.subcmd_len = 16; 3095 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; 3096 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3097 } 3098 3099 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3100 { 3101 unsigned long info = get_zeroed_page(GFP_KERNEL); 3102 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3103 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3104 struct ccw_dev_id ccwid; 3105 int level; 3106 3107 tid->chpid = card->info.chpid; 3108 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3109 tid->ssid = ccwid.ssid; 3110 tid->devno = ccwid.devno; 3111 if (!info) 3112 return; 3113 level = stsi(NULL, 0, 0, 0); 3114 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3115 tid->lparnr = info222->lpar_number; 3116 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3117 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3118 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3119 } 3120 free_page(info); 3121 return; 3122 } 3123 3124 static int qeth_hw_trap_cb(struct qeth_card *card, 3125 struct qeth_reply *reply, unsigned long data) 3126 { 3127 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3128 u16 rc = cmd->hdr.return_code; 3129 3130 if (rc) { 3131 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3132 return -EIO; 3133 } 3134 return 0; 3135 } 3136 3137 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3138 { 3139 struct qeth_cmd_buffer *iob; 3140 struct qeth_ipa_cmd *cmd; 3141 3142 QETH_DBF_TEXT(SETUP, 2, "diagtrap"); 3143 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3144 if (!iob) 3145 return -ENOMEM; 3146 cmd = __ipa_cmd(iob); 3147 cmd->data.diagass.subcmd_len = 80; 3148 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; 3149 cmd->data.diagass.type = 1; 3150 cmd->data.diagass.action = action; 3151 switch (action) { 3152 case QETH_DIAGS_TRAP_ARM: 3153 cmd->data.diagass.options = 0x0003; 3154 cmd->data.diagass.ext = 0x00010000 + 3155 sizeof(struct qeth_trap_id); 3156 qeth_get_trap_id(card, 3157 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3158 break; 3159 case QETH_DIAGS_TRAP_DISARM: 3160 cmd->data.diagass.options = 0x0001; 3161 break; 3162 case QETH_DIAGS_TRAP_CAPTURE: 3163 break; 3164 } 3165 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3166 } 3167 EXPORT_SYMBOL_GPL(qeth_hw_trap); 3168 3169 static int qeth_check_qdio_errors(struct qeth_card *card, 3170 struct qdio_buffer *buf, 3171 unsigned int qdio_error, 3172 const char *dbftext) 3173 { 3174 if (qdio_error) { 3175 QETH_CARD_TEXT(card, 2, dbftext); 3176 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3177 buf->element[15].sflags); 3178 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3179 buf->element[14].sflags); 3180 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3181 if ((buf->element[15].sflags) == 0x12) { 3182 QETH_CARD_STAT_INC(card, rx_dropped); 3183 return 0; 3184 } else 3185 return 1; 3186 } 3187 return 0; 3188 } 3189 3190 static void qeth_queue_input_buffer(struct qeth_card *card, int index) 3191 { 3192 struct qeth_qdio_q *queue = card->qdio.in_q; 3193 struct list_head *lh; 3194 int count; 3195 int i; 3196 int rc; 3197 int newcount = 0; 3198 3199 count = (index < queue->next_buf_to_init)? 3200 card->qdio.in_buf_pool.buf_count - 3201 (queue->next_buf_to_init - index) : 3202 card->qdio.in_buf_pool.buf_count - 3203 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); 3204 /* only requeue at a certain threshold to avoid SIGAs */ 3205 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3206 for (i = queue->next_buf_to_init; 3207 i < queue->next_buf_to_init + count; ++i) { 3208 if (qeth_init_input_buffer(card, 3209 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { 3210 break; 3211 } else { 3212 newcount++; 3213 } 3214 } 3215 3216 if (newcount < count) { 3217 /* we are in memory shortage so we switch back to 3218 traditional skb allocation and drop packages */ 3219 atomic_set(&card->force_alloc_skb, 3); 3220 count = newcount; 3221 } else { 3222 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3223 } 3224 3225 if (!count) { 3226 i = 0; 3227 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3228 i++; 3229 if (i == card->qdio.in_buf_pool.buf_count) { 3230 QETH_CARD_TEXT(card, 2, "qsarbw"); 3231 card->reclaim_index = index; 3232 schedule_delayed_work( 3233 &card->buffer_reclaim_work, 3234 QETH_RECLAIM_WORK_TIME); 3235 } 3236 return; 3237 } 3238 3239 /* 3240 * according to old code it should be avoided to requeue all 3241 * 128 buffers in order to benefit from PCI avoidance. 3242 * this function keeps at least one buffer (the buffer at 3243 * 'index') un-requeued -> this buffer is the first buffer that 3244 * will be requeued the next time 3245 */ 3246 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3247 queue->next_buf_to_init, count); 3248 if (rc) { 3249 QETH_CARD_TEXT(card, 2, "qinberr"); 3250 } 3251 queue->next_buf_to_init = (queue->next_buf_to_init + count) % 3252 QDIO_MAX_BUFFERS_PER_Q; 3253 } 3254 } 3255 3256 static void qeth_buffer_reclaim_work(struct work_struct *work) 3257 { 3258 struct qeth_card *card = container_of(work, struct qeth_card, 3259 buffer_reclaim_work.work); 3260 3261 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); 3262 qeth_queue_input_buffer(card, card->reclaim_index); 3263 } 3264 3265 static void qeth_handle_send_error(struct qeth_card *card, 3266 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3267 { 3268 int sbalf15 = buffer->buffer->element[15].sflags; 3269 3270 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3271 if (card->info.type == QETH_CARD_TYPE_IQD) { 3272 if (sbalf15 == 0) { 3273 qdio_err = 0; 3274 } else { 3275 qdio_err = 1; 3276 } 3277 } 3278 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3279 3280 if (!qdio_err) 3281 return; 3282 3283 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3284 return; 3285 3286 QETH_CARD_TEXT(card, 1, "lnkfail"); 3287 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3288 (u16)qdio_err, (u8)sbalf15); 3289 } 3290 3291 /** 3292 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3293 * @queue: queue to check for packing buffer 3294 * 3295 * Returns number of buffers that were prepared for flush. 3296 */ 3297 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3298 { 3299 struct qeth_qdio_out_buffer *buffer; 3300 3301 buffer = queue->bufs[queue->next_buf_to_fill]; 3302 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3303 (buffer->next_element_to_fill > 0)) { 3304 /* it's a packing buffer */ 3305 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3306 queue->next_buf_to_fill = 3307 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; 3308 return 1; 3309 } 3310 return 0; 3311 } 3312 3313 /* 3314 * Switched to packing state if the number of used buffers on a queue 3315 * reaches a certain limit. 3316 */ 3317 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3318 { 3319 if (!queue->do_pack) { 3320 if (atomic_read(&queue->used_buffers) 3321 >= QETH_HIGH_WATERMARK_PACK){ 3322 /* switch non-PACKING -> PACKING */ 3323 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3324 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3325 queue->do_pack = 1; 3326 } 3327 } 3328 } 3329 3330 /* 3331 * Switches from packing to non-packing mode. If there is a packing 3332 * buffer on the queue this buffer will be prepared to be flushed. 3333 * In that case 1 is returned to inform the caller. If no buffer 3334 * has to be flushed, zero is returned. 3335 */ 3336 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3337 { 3338 if (queue->do_pack) { 3339 if (atomic_read(&queue->used_buffers) 3340 <= QETH_LOW_WATERMARK_PACK) { 3341 /* switch PACKING -> non-PACKING */ 3342 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3343 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3344 queue->do_pack = 0; 3345 return qeth_prep_flush_pack_buffer(queue); 3346 } 3347 } 3348 return 0; 3349 } 3350 3351 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3352 int count) 3353 { 3354 struct qeth_qdio_out_buffer *buf; 3355 int rc; 3356 int i; 3357 unsigned int qdio_flags; 3358 3359 for (i = index; i < index + count; ++i) { 3360 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3361 buf = queue->bufs[bidx]; 3362 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3363 SBAL_EFLAGS_LAST_ENTRY; 3364 3365 if (queue->bufstates) 3366 queue->bufstates[bidx].user = buf; 3367 3368 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 3369 continue; 3370 3371 if (!queue->do_pack) { 3372 if ((atomic_read(&queue->used_buffers) >= 3373 (QETH_HIGH_WATERMARK_PACK - 3374 QETH_WATERMARK_PACK_FUZZ)) && 3375 !atomic_read(&queue->set_pci_flags_count)) { 3376 /* it's likely that we'll go to packing 3377 * mode soon */ 3378 atomic_inc(&queue->set_pci_flags_count); 3379 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3380 } 3381 } else { 3382 if (!atomic_read(&queue->set_pci_flags_count)) { 3383 /* 3384 * there's no outstanding PCI any more, so we 3385 * have to request a PCI to be sure the the PCI 3386 * will wake at some time in the future then we 3387 * can flush packed buffers that might still be 3388 * hanging around, which can happen if no 3389 * further send was requested by the stack 3390 */ 3391 atomic_inc(&queue->set_pci_flags_count); 3392 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3393 } 3394 } 3395 } 3396 3397 QETH_TXQ_STAT_ADD(queue, bufs, count); 3398 netif_trans_update(queue->card->dev); 3399 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 3400 if (atomic_read(&queue->set_pci_flags_count)) 3401 qdio_flags |= QDIO_FLAG_PCI_OUT; 3402 atomic_add(count, &queue->used_buffers); 3403 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 3404 queue->queue_no, index, count); 3405 if (rc) { 3406 QETH_TXQ_STAT_ADD(queue, tx_errors, count); 3407 /* ignore temporary SIGA errors without busy condition */ 3408 if (rc == -ENOBUFS) 3409 return; 3410 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3411 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3412 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3413 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3414 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3415 3416 /* this must not happen under normal circumstances. if it 3417 * happens something is really wrong -> recover */ 3418 qeth_schedule_recovery(queue->card); 3419 return; 3420 } 3421 } 3422 3423 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3424 { 3425 int index; 3426 int flush_cnt = 0; 3427 int q_was_packing = 0; 3428 3429 /* 3430 * check if weed have to switch to non-packing mode or if 3431 * we have to get a pci flag out on the queue 3432 */ 3433 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3434 !atomic_read(&queue->set_pci_flags_count)) { 3435 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == 3436 QETH_OUT_Q_UNLOCKED) { 3437 /* 3438 * If we get in here, there was no action in 3439 * do_send_packet. So, we check if there is a 3440 * packing buffer to be flushed here. 3441 */ 3442 netif_stop_queue(queue->card->dev); 3443 index = queue->next_buf_to_fill; 3444 q_was_packing = queue->do_pack; 3445 /* queue->do_pack may change */ 3446 barrier(); 3447 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); 3448 if (!flush_cnt && 3449 !atomic_read(&queue->set_pci_flags_count)) 3450 flush_cnt += qeth_prep_flush_pack_buffer(queue); 3451 if (q_was_packing) 3452 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3453 if (flush_cnt) 3454 qeth_flush_buffers(queue, index, flush_cnt); 3455 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3456 } 3457 } 3458 } 3459 3460 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, 3461 unsigned long card_ptr) 3462 { 3463 struct qeth_card *card = (struct qeth_card *)card_ptr; 3464 3465 if (card->dev->flags & IFF_UP) 3466 napi_schedule(&card->napi); 3467 } 3468 3469 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3470 { 3471 int rc; 3472 3473 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3474 rc = -1; 3475 goto out; 3476 } else { 3477 if (card->options.cq == cq) { 3478 rc = 0; 3479 goto out; 3480 } 3481 3482 if (card->state != CARD_STATE_DOWN && 3483 card->state != CARD_STATE_RECOVER) { 3484 rc = -1; 3485 goto out; 3486 } 3487 3488 qeth_free_qdio_buffers(card); 3489 card->options.cq = cq; 3490 rc = 0; 3491 } 3492 out: 3493 return rc; 3494 3495 } 3496 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3497 3498 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3499 unsigned int queue, int first_element, 3500 int count) 3501 { 3502 struct qeth_qdio_q *cq = card->qdio.c_q; 3503 int i; 3504 int rc; 3505 3506 if (!qeth_is_cq(card, queue)) 3507 return; 3508 3509 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3510 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3511 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3512 3513 if (qdio_err) { 3514 netif_stop_queue(card->dev); 3515 qeth_schedule_recovery(card); 3516 return; 3517 } 3518 3519 for (i = first_element; i < first_element + count; ++i) { 3520 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3521 struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; 3522 int e = 0; 3523 3524 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3525 buffer->element[e].addr) { 3526 unsigned long phys_aob_addr; 3527 3528 phys_aob_addr = (unsigned long) buffer->element[e].addr; 3529 qeth_qdio_handle_aob(card, phys_aob_addr); 3530 ++e; 3531 } 3532 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3533 } 3534 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3535 card->qdio.c_q->next_buf_to_init, 3536 count); 3537 if (rc) { 3538 dev_warn(&card->gdev->dev, 3539 "QDIO reported an error, rc=%i\n", rc); 3540 QETH_CARD_TEXT(card, 2, "qcqherr"); 3541 } 3542 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init 3543 + count) % QDIO_MAX_BUFFERS_PER_Q; 3544 } 3545 3546 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3547 unsigned int qdio_err, int queue, 3548 int first_elem, int count, 3549 unsigned long card_ptr) 3550 { 3551 struct qeth_card *card = (struct qeth_card *)card_ptr; 3552 3553 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3554 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3555 3556 if (qeth_is_cq(card, queue)) 3557 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); 3558 else if (qdio_err) 3559 qeth_schedule_recovery(card); 3560 } 3561 3562 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3563 unsigned int qdio_error, int __queue, 3564 int first_element, int count, 3565 unsigned long card_ptr) 3566 { 3567 struct qeth_card *card = (struct qeth_card *) card_ptr; 3568 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3569 struct qeth_qdio_out_buffer *buffer; 3570 int i; 3571 3572 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3573 if (qdio_error & QDIO_ERROR_FATAL) { 3574 QETH_CARD_TEXT(card, 2, "achkcond"); 3575 netif_stop_queue(card->dev); 3576 qeth_schedule_recovery(card); 3577 return; 3578 } 3579 3580 for (i = first_element; i < (first_element + count); ++i) { 3581 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3582 buffer = queue->bufs[bidx]; 3583 qeth_handle_send_error(card, buffer, qdio_error); 3584 3585 if (queue->bufstates && 3586 (queue->bufstates[bidx].flags & 3587 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { 3588 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); 3589 3590 if (atomic_cmpxchg(&buffer->state, 3591 QETH_QDIO_BUF_PRIMED, 3592 QETH_QDIO_BUF_PENDING) == 3593 QETH_QDIO_BUF_PRIMED) { 3594 qeth_notify_skbs(queue, buffer, 3595 TX_NOTIFY_PENDING); 3596 } 3597 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); 3598 3599 /* prepare the queue slot for re-use: */ 3600 qeth_scrub_qdio_buffer(buffer->buffer, 3601 QETH_MAX_BUFFER_ELEMENTS(card)); 3602 if (qeth_init_qdio_out_buf(queue, bidx)) { 3603 QETH_CARD_TEXT(card, 2, "outofbuf"); 3604 qeth_schedule_recovery(card); 3605 } 3606 } else { 3607 if (card->options.cq == QETH_CQ_ENABLED) { 3608 enum iucv_tx_notify n; 3609 3610 n = qeth_compute_cq_notification( 3611 buffer->buffer->element[15].sflags, 0); 3612 qeth_notify_skbs(queue, buffer, n); 3613 } 3614 3615 qeth_clear_output_buffer(queue, buffer); 3616 } 3617 qeth_cleanup_handled_pending(queue, bidx, 0); 3618 } 3619 atomic_sub(count, &queue->used_buffers); 3620 /* check if we need to do something on this outbound queue */ 3621 if (card->info.type != QETH_CARD_TYPE_IQD) 3622 qeth_check_outbound_queue(queue); 3623 3624 netif_wake_queue(queue->card->dev); 3625 } 3626 3627 /* We cannot use outbound queue 3 for unicast packets on HiperSockets */ 3628 static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) 3629 { 3630 if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3)) 3631 return 2; 3632 return queue_num; 3633 } 3634 3635 /** 3636 * Note: Function assumes that we have 4 outbound queues. 3637 */ 3638 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3639 int ipv) 3640 { 3641 __be16 *tci; 3642 u8 tos; 3643 3644 switch (card->qdio.do_prio_queueing) { 3645 case QETH_PRIO_Q_ING_TOS: 3646 case QETH_PRIO_Q_ING_PREC: 3647 switch (ipv) { 3648 case 4: 3649 tos = ipv4_get_dsfield(ip_hdr(skb)); 3650 break; 3651 case 6: 3652 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3653 break; 3654 default: 3655 return card->qdio.default_out_queue; 3656 } 3657 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3658 return qeth_cut_iqd_prio(card, ~tos >> 6 & 3); 3659 if (tos & IPTOS_MINCOST) 3660 return qeth_cut_iqd_prio(card, 3); 3661 if (tos & IPTOS_RELIABILITY) 3662 return 2; 3663 if (tos & IPTOS_THROUGHPUT) 3664 return 1; 3665 if (tos & IPTOS_LOWDELAY) 3666 return 0; 3667 break; 3668 case QETH_PRIO_Q_ING_SKB: 3669 if (skb->priority > 5) 3670 return 0; 3671 return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); 3672 case QETH_PRIO_Q_ING_VLAN: 3673 tci = &((struct ethhdr *)skb->data)->h_proto; 3674 if (be16_to_cpu(*tci) == ETH_P_8021Q) 3675 return qeth_cut_iqd_prio(card, 3676 ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3); 3677 break; 3678 default: 3679 break; 3680 } 3681 return card->qdio.default_out_queue; 3682 } 3683 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3684 3685 /** 3686 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3687 * @skb: SKB address 3688 * 3689 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3690 * fragmented part of the SKB. Returns zero for linear SKB. 3691 */ 3692 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3693 { 3694 int cnt, elements = 0; 3695 3696 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3697 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; 3698 3699 elements += qeth_get_elements_for_range( 3700 (addr_t)skb_frag_address(frag), 3701 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3702 } 3703 return elements; 3704 } 3705 3706 /** 3707 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3708 * to transmit an skb. 3709 * @skb: the skb to operate on. 3710 * @data_offset: skip this part of the skb's linear data 3711 * 3712 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3713 * skb's data (both its linear part and paged fragments). 3714 */ 3715 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3716 { 3717 unsigned int elements = qeth_get_elements_for_frags(skb); 3718 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3719 addr_t start = (addr_t)skb->data + data_offset; 3720 3721 if (start != end) 3722 elements += qeth_get_elements_for_range(start, end); 3723 return elements; 3724 } 3725 EXPORT_SYMBOL_GPL(qeth_count_elements); 3726 3727 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3728 MAX_TCP_HEADER) 3729 3730 /** 3731 * qeth_add_hw_header() - add a HW header to an skb. 3732 * @skb: skb that the HW header should be added to. 3733 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3734 * it contains a valid pointer to a qeth_hdr. 3735 * @hdr_len: length of the HW header. 3736 * @proto_len: length of protocol headers that need to be in same page as the 3737 * HW header. 3738 * 3739 * Returns the pushed length. If the header can't be pushed on 3740 * (eg. because it would cross a page boundary), it is allocated from 3741 * the cache instead and 0 is returned. 3742 * The number of needed buffer elements is returned in @elements. 3743 * Error to create the hdr is indicated by returning with < 0. 3744 */ 3745 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3746 struct sk_buff *skb, struct qeth_hdr **hdr, 3747 unsigned int hdr_len, unsigned int proto_len, 3748 unsigned int *elements) 3749 { 3750 const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card); 3751 const unsigned int contiguous = proto_len ? proto_len : 1; 3752 unsigned int __elements; 3753 addr_t start, end; 3754 bool push_ok; 3755 int rc; 3756 3757 check_layout: 3758 start = (addr_t)skb->data - hdr_len; 3759 end = (addr_t)skb->data; 3760 3761 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3762 /* Push HW header into same page as first protocol header. */ 3763 push_ok = true; 3764 /* ... but TSO always needs a separate element for headers: */ 3765 if (skb_is_gso(skb)) 3766 __elements = 1 + qeth_count_elements(skb, proto_len); 3767 else 3768 __elements = qeth_count_elements(skb, 0); 3769 } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) { 3770 /* Push HW header into a new page. */ 3771 push_ok = true; 3772 __elements = 1 + qeth_count_elements(skb, 0); 3773 } else { 3774 /* Use header cache, copy protocol headers up. */ 3775 push_ok = false; 3776 __elements = 1 + qeth_count_elements(skb, proto_len); 3777 } 3778 3779 /* Compress skb to fit into one IO buffer: */ 3780 if (__elements > max_elements) { 3781 if (!skb_is_nonlinear(skb)) { 3782 /* Drop it, no easy way of shrinking it further. */ 3783 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3784 max_elements, __elements, skb->len); 3785 return -E2BIG; 3786 } 3787 3788 rc = skb_linearize(skb); 3789 if (rc) { 3790 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3791 return rc; 3792 } 3793 3794 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3795 /* Linearization changed the layout, re-evaluate: */ 3796 goto check_layout; 3797 } 3798 3799 *elements = __elements; 3800 /* Add the header: */ 3801 if (push_ok) { 3802 *hdr = skb_push(skb, hdr_len); 3803 return hdr_len; 3804 } 3805 /* fall back */ 3806 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3807 return -E2BIG; 3808 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 3809 if (!*hdr) 3810 return -ENOMEM; 3811 /* Copy protocol headers behind HW header: */ 3812 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3813 return 0; 3814 } 3815 3816 static void __qeth_fill_buffer(struct sk_buff *skb, 3817 struct qeth_qdio_out_buffer *buf, 3818 bool is_first_elem, unsigned int offset) 3819 { 3820 struct qdio_buffer *buffer = buf->buffer; 3821 int element = buf->next_element_to_fill; 3822 int length = skb_headlen(skb) - offset; 3823 char *data = skb->data + offset; 3824 int length_here, cnt; 3825 3826 /* map linear part into buffer element(s) */ 3827 while (length > 0) { 3828 /* length_here is the remaining amount of data in this page */ 3829 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); 3830 if (length < length_here) 3831 length_here = length; 3832 3833 buffer->element[element].addr = data; 3834 buffer->element[element].length = length_here; 3835 length -= length_here; 3836 if (is_first_elem) { 3837 is_first_elem = false; 3838 if (length || skb_is_nonlinear(skb)) 3839 /* skb needs additional elements */ 3840 buffer->element[element].eflags = 3841 SBAL_EFLAGS_FIRST_FRAG; 3842 else 3843 buffer->element[element].eflags = 0; 3844 } else { 3845 buffer->element[element].eflags = 3846 SBAL_EFLAGS_MIDDLE_FRAG; 3847 } 3848 data += length_here; 3849 element++; 3850 } 3851 3852 /* map page frags into buffer element(s) */ 3853 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3854 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3855 3856 data = skb_frag_address(frag); 3857 length = skb_frag_size(frag); 3858 while (length > 0) { 3859 length_here = PAGE_SIZE - 3860 ((unsigned long) data % PAGE_SIZE); 3861 if (length < length_here) 3862 length_here = length; 3863 3864 buffer->element[element].addr = data; 3865 buffer->element[element].length = length_here; 3866 buffer->element[element].eflags = 3867 SBAL_EFLAGS_MIDDLE_FRAG; 3868 length -= length_here; 3869 data += length_here; 3870 element++; 3871 } 3872 } 3873 3874 if (buffer->element[element - 1].eflags) 3875 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 3876 buf->next_element_to_fill = element; 3877 } 3878 3879 /** 3880 * qeth_fill_buffer() - map skb into an output buffer 3881 * @queue: QDIO queue to submit the buffer on 3882 * @buf: buffer to transport the skb 3883 * @skb: skb to map into the buffer 3884 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3885 * from qeth_core_header_cache. 3886 * @offset: when mapping the skb, start at skb->data + offset 3887 * @hd_len: if > 0, build a dedicated header element of this size 3888 */ 3889 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, 3890 struct qeth_qdio_out_buffer *buf, 3891 struct sk_buff *skb, struct qeth_hdr *hdr, 3892 unsigned int offset, unsigned int hd_len) 3893 { 3894 struct qdio_buffer *buffer = buf->buffer; 3895 bool is_first_elem = true; 3896 3897 __skb_queue_tail(&buf->skb_list, skb); 3898 3899 /* build dedicated header element */ 3900 if (hd_len) { 3901 int element = buf->next_element_to_fill; 3902 is_first_elem = false; 3903 3904 buffer->element[element].addr = hdr; 3905 buffer->element[element].length = hd_len; 3906 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 3907 /* remember to free cache-allocated qeth_hdr: */ 3908 buf->is_header[element] = ((void *)hdr != skb->data); 3909 buf->next_element_to_fill++; 3910 } 3911 3912 __qeth_fill_buffer(skb, buf, is_first_elem, offset); 3913 3914 if (!queue->do_pack) { 3915 QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); 3916 } else { 3917 QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); 3918 3919 QETH_TXQ_STAT_INC(queue, skbs_pack); 3920 /* If the buffer still has free elements, keep using it. */ 3921 if (buf->next_element_to_fill < 3922 QETH_MAX_BUFFER_ELEMENTS(queue->card)) 3923 return 0; 3924 } 3925 3926 /* flush out the buffer */ 3927 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 3928 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % 3929 QDIO_MAX_BUFFERS_PER_Q; 3930 return 1; 3931 } 3932 3933 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, 3934 struct sk_buff *skb, struct qeth_hdr *hdr, 3935 unsigned int offset, unsigned int hd_len) 3936 { 3937 int index = queue->next_buf_to_fill; 3938 struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; 3939 3940 /* 3941 * check if buffer is empty to make sure that we do not 'overtake' 3942 * ourselves and try to fill a buffer that is already primed 3943 */ 3944 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3945 return -EBUSY; 3946 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3947 qeth_flush_buffers(queue, index, 1); 3948 return 0; 3949 } 3950 3951 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3952 struct sk_buff *skb, struct qeth_hdr *hdr, 3953 unsigned int offset, unsigned int hd_len, 3954 int elements_needed) 3955 { 3956 struct qeth_qdio_out_buffer *buffer; 3957 int start_index; 3958 int flush_count = 0; 3959 int do_pack = 0; 3960 int tmp; 3961 int rc = 0; 3962 3963 /* spin until we get the queue ... */ 3964 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3965 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3966 start_index = queue->next_buf_to_fill; 3967 buffer = queue->bufs[queue->next_buf_to_fill]; 3968 /* 3969 * check if buffer is empty to make sure that we do not 'overtake' 3970 * ourselves and try to fill a buffer that is already primed 3971 */ 3972 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { 3973 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3974 return -EBUSY; 3975 } 3976 /* check if we need to switch packing state of this queue */ 3977 qeth_switch_to_packing_if_needed(queue); 3978 if (queue->do_pack) { 3979 do_pack = 1; 3980 /* does packet fit in current buffer? */ 3981 if ((QETH_MAX_BUFFER_ELEMENTS(card) - 3982 buffer->next_element_to_fill) < elements_needed) { 3983 /* ... no -> set state PRIMED */ 3984 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3985 flush_count++; 3986 queue->next_buf_to_fill = 3987 (queue->next_buf_to_fill + 1) % 3988 QDIO_MAX_BUFFERS_PER_Q; 3989 buffer = queue->bufs[queue->next_buf_to_fill]; 3990 /* we did a step forward, so check buffer state 3991 * again */ 3992 if (atomic_read(&buffer->state) != 3993 QETH_QDIO_BUF_EMPTY) { 3994 qeth_flush_buffers(queue, start_index, 3995 flush_count); 3996 atomic_set(&queue->state, 3997 QETH_OUT_Q_UNLOCKED); 3998 rc = -EBUSY; 3999 goto out; 4000 } 4001 } 4002 } 4003 4004 flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, 4005 hd_len); 4006 if (flush_count) 4007 qeth_flush_buffers(queue, start_index, flush_count); 4008 else if (!atomic_read(&queue->set_pci_flags_count)) 4009 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 4010 /* 4011 * queue->state will go from LOCKED -> UNLOCKED or from 4012 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us 4013 * (switch packing state or flush buffer to get another pci flag out). 4014 * In that case we will enter this loop 4015 */ 4016 while (atomic_dec_return(&queue->state)) { 4017 start_index = queue->next_buf_to_fill; 4018 /* check if we can go back to non-packing state */ 4019 tmp = qeth_switch_to_nonpacking_if_needed(queue); 4020 /* 4021 * check if we need to flush a packing buffer to get a pci 4022 * flag out on the queue 4023 */ 4024 if (!tmp && !atomic_read(&queue->set_pci_flags_count)) 4025 tmp = qeth_prep_flush_pack_buffer(queue); 4026 if (tmp) { 4027 qeth_flush_buffers(queue, start_index, tmp); 4028 flush_count += tmp; 4029 } 4030 } 4031 out: 4032 /* at this point the queue is UNLOCKED again */ 4033 if (do_pack) 4034 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4035 4036 return rc; 4037 } 4038 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 4039 4040 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4041 unsigned int payload_len, struct sk_buff *skb, 4042 unsigned int proto_len) 4043 { 4044 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4045 4046 ext->hdr_tot_len = sizeof(*ext); 4047 ext->imb_hdr_no = 1; 4048 ext->hdr_type = 1; 4049 ext->hdr_version = 1; 4050 ext->hdr_len = 28; 4051 ext->payload_len = payload_len; 4052 ext->mss = skb_shinfo(skb)->gso_size; 4053 ext->dg_hdr_len = proto_len; 4054 } 4055 4056 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4057 struct qeth_qdio_out_q *queue, int ipv, int cast_type, 4058 void (*fill_header)(struct qeth_qdio_out_q *queue, 4059 struct qeth_hdr *hdr, struct sk_buff *skb, 4060 int ipv, int cast_type, 4061 unsigned int data_len)) 4062 { 4063 unsigned int proto_len, hw_hdr_len; 4064 unsigned int frame_len = skb->len; 4065 bool is_tso = skb_is_gso(skb); 4066 unsigned int data_offset = 0; 4067 struct qeth_hdr *hdr = NULL; 4068 unsigned int hd_len = 0; 4069 unsigned int elements; 4070 int push_len, rc; 4071 bool is_sg; 4072 4073 if (is_tso) { 4074 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4075 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4076 } else { 4077 hw_hdr_len = sizeof(struct qeth_hdr); 4078 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4079 } 4080 4081 rc = skb_cow_head(skb, hw_hdr_len); 4082 if (rc) 4083 return rc; 4084 4085 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4086 &elements); 4087 if (push_len < 0) 4088 return push_len; 4089 if (is_tso || !push_len) { 4090 /* HW header needs its own buffer element. */ 4091 hd_len = hw_hdr_len + proto_len; 4092 data_offset = push_len + proto_len; 4093 } 4094 memset(hdr, 0, hw_hdr_len); 4095 fill_header(queue, hdr, skb, ipv, cast_type, frame_len); 4096 if (is_tso) 4097 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4098 frame_len - proto_len, skb, proto_len); 4099 4100 is_sg = skb_is_nonlinear(skb); 4101 if (IS_IQD(card)) { 4102 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, 4103 hd_len); 4104 } else { 4105 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4106 skb_orphan(skb); 4107 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4108 hd_len, elements); 4109 } 4110 4111 if (!rc) { 4112 QETH_TXQ_STAT_ADD(queue, buf_elements, elements); 4113 if (is_sg) 4114 QETH_TXQ_STAT_INC(queue, skbs_sg); 4115 if (is_tso) { 4116 QETH_TXQ_STAT_INC(queue, skbs_tso); 4117 QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); 4118 } 4119 } else { 4120 if (!push_len) 4121 kmem_cache_free(qeth_core_header_cache, hdr); 4122 if (rc == -EBUSY) 4123 /* roll back to ETH header */ 4124 skb_pull(skb, push_len); 4125 } 4126 return rc; 4127 } 4128 EXPORT_SYMBOL_GPL(qeth_xmit); 4129 4130 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4131 struct qeth_reply *reply, unsigned long data) 4132 { 4133 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4134 struct qeth_ipacmd_setadpparms *setparms; 4135 4136 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4137 4138 setparms = &(cmd->data.setadapterparms); 4139 if (qeth_setadpparms_inspect_rc(cmd)) { 4140 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4141 setparms->data.mode = SET_PROMISC_MODE_OFF; 4142 } 4143 card->info.promisc_mode = setparms->data.mode; 4144 return (cmd->hdr.return_code) ? -EIO : 0; 4145 } 4146 4147 void qeth_setadp_promisc_mode(struct qeth_card *card) 4148 { 4149 enum qeth_ipa_promisc_modes mode; 4150 struct net_device *dev = card->dev; 4151 struct qeth_cmd_buffer *iob; 4152 struct qeth_ipa_cmd *cmd; 4153 4154 QETH_CARD_TEXT(card, 4, "setprom"); 4155 4156 if (((dev->flags & IFF_PROMISC) && 4157 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || 4158 (!(dev->flags & IFF_PROMISC) && 4159 (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) 4160 return; 4161 mode = SET_PROMISC_MODE_OFF; 4162 if (dev->flags & IFF_PROMISC) 4163 mode = SET_PROMISC_MODE_ON; 4164 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4165 4166 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4167 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8); 4168 if (!iob) 4169 return; 4170 cmd = __ipa_cmd(iob); 4171 cmd->data.setadapterparms.data.mode = mode; 4172 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4173 } 4174 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4175 4176 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4177 struct qeth_reply *reply, unsigned long data) 4178 { 4179 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4180 struct qeth_ipacmd_setadpparms *adp_cmd; 4181 4182 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4183 if (qeth_setadpparms_inspect_rc(cmd)) 4184 return -EIO; 4185 4186 adp_cmd = &cmd->data.setadapterparms; 4187 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4188 return -EADDRNOTAVAIL; 4189 4190 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4191 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4192 return -EADDRNOTAVAIL; 4193 4194 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4195 return 0; 4196 } 4197 4198 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4199 { 4200 int rc; 4201 struct qeth_cmd_buffer *iob; 4202 struct qeth_ipa_cmd *cmd; 4203 4204 QETH_CARD_TEXT(card, 4, "chgmac"); 4205 4206 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4207 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4208 sizeof(struct qeth_change_addr)); 4209 if (!iob) 4210 return -ENOMEM; 4211 cmd = __ipa_cmd(iob); 4212 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4213 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4214 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4215 card->dev->dev_addr); 4216 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4217 NULL); 4218 return rc; 4219 } 4220 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4221 4222 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4223 struct qeth_reply *reply, unsigned long data) 4224 { 4225 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4226 struct qeth_set_access_ctrl *access_ctrl_req; 4227 int fallback = *(int *)reply->param; 4228 4229 QETH_CARD_TEXT(card, 4, "setaccb"); 4230 if (cmd->hdr.return_code) 4231 return -EIO; 4232 qeth_setadpparms_inspect_rc(cmd); 4233 4234 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4235 QETH_DBF_TEXT_(SETUP, 2, "setaccb"); 4236 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4237 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", 4238 cmd->data.setadapterparms.hdr.return_code); 4239 if (cmd->data.setadapterparms.hdr.return_code != 4240 SET_ACCESS_CTRL_RC_SUCCESS) 4241 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4242 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4243 cmd->data.setadapterparms.hdr.return_code); 4244 switch (cmd->data.setadapterparms.hdr.return_code) { 4245 case SET_ACCESS_CTRL_RC_SUCCESS: 4246 if (card->options.isolation == ISOLATION_MODE_NONE) { 4247 dev_info(&card->gdev->dev, 4248 "QDIO data connection isolation is deactivated\n"); 4249 } else { 4250 dev_info(&card->gdev->dev, 4251 "QDIO data connection isolation is activated\n"); 4252 } 4253 break; 4254 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4255 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4256 CARD_DEVID(card)); 4257 if (fallback) 4258 card->options.isolation = card->options.prev_isolation; 4259 break; 4260 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4261 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4262 CARD_DEVID(card)); 4263 if (fallback) 4264 card->options.isolation = card->options.prev_isolation; 4265 break; 4266 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4267 dev_err(&card->gdev->dev, "Adapter does not " 4268 "support QDIO data connection isolation\n"); 4269 break; 4270 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4271 dev_err(&card->gdev->dev, 4272 "Adapter is dedicated. " 4273 "QDIO data connection isolation not supported\n"); 4274 if (fallback) 4275 card->options.isolation = card->options.prev_isolation; 4276 break; 4277 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4278 dev_err(&card->gdev->dev, 4279 "TSO does not permit QDIO data connection isolation\n"); 4280 if (fallback) 4281 card->options.isolation = card->options.prev_isolation; 4282 break; 4283 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4284 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4285 "support reflective relay mode\n"); 4286 if (fallback) 4287 card->options.isolation = card->options.prev_isolation; 4288 break; 4289 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4290 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4291 "enabled at the adjacent switch port"); 4292 if (fallback) 4293 card->options.isolation = card->options.prev_isolation; 4294 break; 4295 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4296 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4297 "at the adjacent switch failed\n"); 4298 break; 4299 default: 4300 /* this should never happen */ 4301 if (fallback) 4302 card->options.isolation = card->options.prev_isolation; 4303 break; 4304 } 4305 return (cmd->hdr.return_code) ? -EIO : 0; 4306 } 4307 4308 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4309 enum qeth_ipa_isolation_modes isolation, int fallback) 4310 { 4311 int rc; 4312 struct qeth_cmd_buffer *iob; 4313 struct qeth_ipa_cmd *cmd; 4314 struct qeth_set_access_ctrl *access_ctrl_req; 4315 4316 QETH_CARD_TEXT(card, 4, "setacctl"); 4317 4318 QETH_DBF_TEXT_(SETUP, 2, "setacctl"); 4319 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4320 4321 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4322 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4323 sizeof(struct qeth_set_access_ctrl)); 4324 if (!iob) 4325 return -ENOMEM; 4326 cmd = __ipa_cmd(iob); 4327 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4328 access_ctrl_req->subcmd_code = isolation; 4329 4330 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4331 &fallback); 4332 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); 4333 return rc; 4334 } 4335 4336 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) 4337 { 4338 int rc = 0; 4339 4340 QETH_CARD_TEXT(card, 4, "setactlo"); 4341 4342 if ((card->info.type == QETH_CARD_TYPE_OSD || 4343 card->info.type == QETH_CARD_TYPE_OSX) && 4344 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4345 rc = qeth_setadpparms_set_access_ctrl(card, 4346 card->options.isolation, fallback); 4347 if (rc) { 4348 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4349 rc, CARD_DEVID(card)); 4350 rc = -EOPNOTSUPP; 4351 } 4352 } else if (card->options.isolation != ISOLATION_MODE_NONE) { 4353 card->options.isolation = ISOLATION_MODE_NONE; 4354 4355 dev_err(&card->gdev->dev, "Adapter does not " 4356 "support QDIO data connection isolation\n"); 4357 rc = -EOPNOTSUPP; 4358 } 4359 return rc; 4360 } 4361 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); 4362 4363 void qeth_tx_timeout(struct net_device *dev) 4364 { 4365 struct qeth_card *card; 4366 4367 card = dev->ml_priv; 4368 QETH_CARD_TEXT(card, 4, "txtimeo"); 4369 QETH_CARD_STAT_INC(card, tx_errors); 4370 qeth_schedule_recovery(card); 4371 } 4372 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4373 4374 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4375 { 4376 struct qeth_card *card = dev->ml_priv; 4377 int rc = 0; 4378 4379 switch (regnum) { 4380 case MII_BMCR: /* Basic mode control register */ 4381 rc = BMCR_FULLDPLX; 4382 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4383 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4384 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4385 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4386 rc |= BMCR_SPEED100; 4387 break; 4388 case MII_BMSR: /* Basic mode status register */ 4389 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4390 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4391 BMSR_100BASE4; 4392 break; 4393 case MII_PHYSID1: /* PHYS ID 1 */ 4394 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4395 dev->dev_addr[2]; 4396 rc = (rc >> 5) & 0xFFFF; 4397 break; 4398 case MII_PHYSID2: /* PHYS ID 2 */ 4399 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4400 break; 4401 case MII_ADVERTISE: /* Advertisement control reg */ 4402 rc = ADVERTISE_ALL; 4403 break; 4404 case MII_LPA: /* Link partner ability reg */ 4405 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4406 LPA_100BASE4 | LPA_LPACK; 4407 break; 4408 case MII_EXPANSION: /* Expansion register */ 4409 break; 4410 case MII_DCOUNTER: /* disconnect counter */ 4411 break; 4412 case MII_FCSCOUNTER: /* false carrier counter */ 4413 break; 4414 case MII_NWAYTEST: /* N-way auto-neg test register */ 4415 break; 4416 case MII_RERRCOUNTER: /* rx error counter */ 4417 rc = card->stats.rx_errors; 4418 break; 4419 case MII_SREVISION: /* silicon revision */ 4420 break; 4421 case MII_RESV1: /* reserved 1 */ 4422 break; 4423 case MII_LBRERROR: /* loopback, rx, bypass error */ 4424 break; 4425 case MII_PHYADDR: /* physical address */ 4426 break; 4427 case MII_RESV2: /* reserved 2 */ 4428 break; 4429 case MII_TPISTATUS: /* TPI status for 10mbps */ 4430 break; 4431 case MII_NCONFIG: /* network interface config */ 4432 break; 4433 default: 4434 break; 4435 } 4436 return rc; 4437 } 4438 4439 static int qeth_snmp_command_cb(struct qeth_card *card, 4440 struct qeth_reply *reply, unsigned long sdata) 4441 { 4442 struct qeth_ipa_cmd *cmd; 4443 struct qeth_arp_query_info *qinfo; 4444 unsigned char *data; 4445 void *snmp_data; 4446 __u16 data_len; 4447 4448 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4449 4450 cmd = (struct qeth_ipa_cmd *) sdata; 4451 data = (unsigned char *)((char *)cmd - reply->offset); 4452 qinfo = (struct qeth_arp_query_info *) reply->param; 4453 4454 if (cmd->hdr.return_code) { 4455 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4456 return -EIO; 4457 } 4458 if (cmd->data.setadapterparms.hdr.return_code) { 4459 cmd->hdr.return_code = 4460 cmd->data.setadapterparms.hdr.return_code; 4461 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4462 return -EIO; 4463 } 4464 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); 4465 if (cmd->data.setadapterparms.hdr.seq_no == 1) { 4466 snmp_data = &cmd->data.setadapterparms.data.snmp; 4467 data_len -= offsetof(struct qeth_ipa_cmd, 4468 data.setadapterparms.data.snmp); 4469 } else { 4470 snmp_data = &cmd->data.setadapterparms.data.snmp.request; 4471 data_len -= offsetof(struct qeth_ipa_cmd, 4472 data.setadapterparms.data.snmp.request); 4473 } 4474 4475 /* check if there is enough room in userspace */ 4476 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4477 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4478 return -ENOSPC; 4479 } 4480 QETH_CARD_TEXT_(card, 4, "snore%i", 4481 cmd->data.setadapterparms.hdr.used_total); 4482 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4483 cmd->data.setadapterparms.hdr.seq_no); 4484 /*copy entries to user buffer*/ 4485 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4486 qinfo->udata_offset += data_len; 4487 4488 /* check if all replies received ... */ 4489 QETH_CARD_TEXT_(card, 4, "srtot%i", 4490 cmd->data.setadapterparms.hdr.used_total); 4491 QETH_CARD_TEXT_(card, 4, "srseq%i", 4492 cmd->data.setadapterparms.hdr.seq_no); 4493 if (cmd->data.setadapterparms.hdr.seq_no < 4494 cmd->data.setadapterparms.hdr.used_total) 4495 return 1; 4496 return 0; 4497 } 4498 4499 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4500 { 4501 struct qeth_cmd_buffer *iob; 4502 struct qeth_ipa_cmd *cmd; 4503 struct qeth_snmp_ureq *ureq; 4504 unsigned int req_len; 4505 struct qeth_arp_query_info qinfo = {0, }; 4506 int rc = 0; 4507 4508 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4509 4510 if (card->info.guestlan) 4511 return -EOPNOTSUPP; 4512 4513 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4514 IS_LAYER3(card)) 4515 return -EOPNOTSUPP; 4516 4517 /* skip 4 bytes (data_len struct member) to get req_len */ 4518 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) 4519 return -EFAULT; 4520 if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE - 4521 sizeof(struct qeth_ipacmd_hdr) - 4522 sizeof(struct qeth_ipacmd_setadpparms_hdr))) 4523 return -EINVAL; 4524 ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); 4525 if (IS_ERR(ureq)) { 4526 QETH_CARD_TEXT(card, 2, "snmpnome"); 4527 return PTR_ERR(ureq); 4528 } 4529 qinfo.udata_len = ureq->hdr.data_len; 4530 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4531 if (!qinfo.udata) { 4532 kfree(ureq); 4533 return -ENOMEM; 4534 } 4535 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4536 4537 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, 4538 QETH_SNMP_SETADP_CMDLENGTH + req_len); 4539 if (!iob) { 4540 rc = -ENOMEM; 4541 goto out; 4542 } 4543 4544 /* for large requests, fix-up the length fields: */ 4545 qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len); 4546 4547 cmd = __ipa_cmd(iob); 4548 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); 4549 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4550 if (rc) 4551 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4552 CARD_DEVID(card), rc); 4553 else { 4554 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4555 rc = -EFAULT; 4556 } 4557 out: 4558 kfree(ureq); 4559 kfree(qinfo.udata); 4560 return rc; 4561 } 4562 4563 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4564 struct qeth_reply *reply, unsigned long data) 4565 { 4566 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4567 struct qeth_qoat_priv *priv; 4568 char *resdata; 4569 int resdatalen; 4570 4571 QETH_CARD_TEXT(card, 3, "qoatcb"); 4572 if (qeth_setadpparms_inspect_rc(cmd)) 4573 return -EIO; 4574 4575 priv = (struct qeth_qoat_priv *)reply->param; 4576 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4577 resdata = (char *)data + 28; 4578 4579 if (resdatalen > (priv->buffer_len - priv->response_len)) 4580 return -ENOSPC; 4581 4582 memcpy((priv->buffer + priv->response_len), resdata, 4583 resdatalen); 4584 priv->response_len += resdatalen; 4585 4586 if (cmd->data.setadapterparms.hdr.seq_no < 4587 cmd->data.setadapterparms.hdr.used_total) 4588 return 1; 4589 return 0; 4590 } 4591 4592 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4593 { 4594 int rc = 0; 4595 struct qeth_cmd_buffer *iob; 4596 struct qeth_ipa_cmd *cmd; 4597 struct qeth_query_oat *oat_req; 4598 struct qeth_query_oat_data oat_data; 4599 struct qeth_qoat_priv priv; 4600 void __user *tmp; 4601 4602 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4603 4604 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4605 rc = -EOPNOTSUPP; 4606 goto out; 4607 } 4608 4609 if (copy_from_user(&oat_data, udata, 4610 sizeof(struct qeth_query_oat_data))) { 4611 rc = -EFAULT; 4612 goto out; 4613 } 4614 4615 priv.buffer_len = oat_data.buffer_len; 4616 priv.response_len = 0; 4617 priv.buffer = vzalloc(oat_data.buffer_len); 4618 if (!priv.buffer) { 4619 rc = -ENOMEM; 4620 goto out; 4621 } 4622 4623 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4624 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4625 sizeof(struct qeth_query_oat)); 4626 if (!iob) { 4627 rc = -ENOMEM; 4628 goto out_free; 4629 } 4630 cmd = __ipa_cmd(iob); 4631 oat_req = &cmd->data.setadapterparms.data.query_oat; 4632 oat_req->subcmd_code = oat_data.command; 4633 4634 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, 4635 &priv); 4636 if (!rc) { 4637 if (is_compat_task()) 4638 tmp = compat_ptr(oat_data.ptr); 4639 else 4640 tmp = (void __user *)(unsigned long)oat_data.ptr; 4641 4642 if (copy_to_user(tmp, priv.buffer, 4643 priv.response_len)) { 4644 rc = -EFAULT; 4645 goto out_free; 4646 } 4647 4648 oat_data.response_len = priv.response_len; 4649 4650 if (copy_to_user(udata, &oat_data, 4651 sizeof(struct qeth_query_oat_data))) 4652 rc = -EFAULT; 4653 } 4654 4655 out_free: 4656 vfree(priv.buffer); 4657 out: 4658 return rc; 4659 } 4660 4661 static int qeth_query_card_info_cb(struct qeth_card *card, 4662 struct qeth_reply *reply, unsigned long data) 4663 { 4664 struct carrier_info *carrier_info = (struct carrier_info *)reply->param; 4665 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4666 struct qeth_query_card_info *card_info; 4667 4668 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4669 if (qeth_setadpparms_inspect_rc(cmd)) 4670 return -EIO; 4671 4672 card_info = &cmd->data.setadapterparms.data.card_info; 4673 carrier_info->card_type = card_info->card_type; 4674 carrier_info->port_mode = card_info->port_mode; 4675 carrier_info->port_speed = card_info->port_speed; 4676 return 0; 4677 } 4678 4679 int qeth_query_card_info(struct qeth_card *card, 4680 struct carrier_info *carrier_info) 4681 { 4682 struct qeth_cmd_buffer *iob; 4683 4684 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4685 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4686 return -EOPNOTSUPP; 4687 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 4688 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 4689 if (!iob) 4690 return -ENOMEM; 4691 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4692 (void *)carrier_info); 4693 } 4694 4695 /** 4696 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4697 * @card: pointer to a qeth_card 4698 * 4699 * Returns 4700 * 0, if a MAC address has been set for the card's netdevice 4701 * a return code, for various error conditions 4702 */ 4703 int qeth_vm_request_mac(struct qeth_card *card) 4704 { 4705 struct diag26c_mac_resp *response; 4706 struct diag26c_mac_req *request; 4707 struct ccw_dev_id id; 4708 int rc; 4709 4710 QETH_DBF_TEXT(SETUP, 2, "vmreqmac"); 4711 4712 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 4713 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 4714 if (!request || !response) { 4715 rc = -ENOMEM; 4716 goto out; 4717 } 4718 4719 ccw_device_get_id(CARD_DDEV(card), &id); 4720 request->resp_buf_len = sizeof(*response); 4721 request->resp_version = DIAG26C_VERSION2; 4722 request->op_code = DIAG26C_GET_MAC; 4723 request->devno = id.devno; 4724 4725 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4726 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 4727 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4728 if (rc) 4729 goto out; 4730 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 4731 4732 if (request->resp_buf_len < sizeof(*response) || 4733 response->version != request->resp_version) { 4734 rc = -EIO; 4735 QETH_DBF_TEXT(SETUP, 2, "badresp"); 4736 QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len, 4737 sizeof(request->resp_buf_len)); 4738 } else if (!is_valid_ether_addr(response->mac)) { 4739 rc = -EINVAL; 4740 QETH_DBF_TEXT(SETUP, 2, "badmac"); 4741 QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN); 4742 } else { 4743 ether_addr_copy(card->dev->dev_addr, response->mac); 4744 } 4745 4746 out: 4747 kfree(response); 4748 kfree(request); 4749 return rc; 4750 } 4751 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 4752 4753 static int qeth_get_qdio_q_format(struct qeth_card *card) 4754 { 4755 if (card->info.type == QETH_CARD_TYPE_IQD) 4756 return QDIO_IQDIO_QFMT; 4757 else 4758 return QDIO_QETH_QFMT; 4759 } 4760 4761 static void qeth_determine_capabilities(struct qeth_card *card) 4762 { 4763 int rc; 4764 int length; 4765 char *prcd; 4766 struct ccw_device *ddev; 4767 int ddev_offline = 0; 4768 4769 QETH_DBF_TEXT(SETUP, 2, "detcapab"); 4770 ddev = CARD_DDEV(card); 4771 if (!ddev->online) { 4772 ddev_offline = 1; 4773 rc = ccw_device_set_online(ddev); 4774 if (rc) { 4775 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 4776 goto out; 4777 } 4778 } 4779 4780 rc = qeth_read_conf_data(card, (void **) &prcd, &length); 4781 if (rc) { 4782 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 4783 CARD_DEVID(card), rc); 4784 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 4785 goto out_offline; 4786 } 4787 qeth_configure_unitaddr(card, prcd); 4788 if (ddev_offline) 4789 qeth_configure_blkt_default(card, prcd); 4790 kfree(prcd); 4791 4792 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4793 if (rc) 4794 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 4795 4796 QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); 4797 QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1); 4798 QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2); 4799 QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3); 4800 QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); 4801 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 4802 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 4803 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 4804 dev_info(&card->gdev->dev, 4805 "Completion Queueing supported\n"); 4806 } else { 4807 card->options.cq = QETH_CQ_NOTAVAILABLE; 4808 } 4809 4810 4811 out_offline: 4812 if (ddev_offline == 1) 4813 ccw_device_set_offline(ddev); 4814 out: 4815 return; 4816 } 4817 4818 static void qeth_qdio_establish_cq(struct qeth_card *card, 4819 struct qdio_buffer **in_sbal_ptrs, 4820 void (**queue_start_poll) 4821 (struct ccw_device *, int, 4822 unsigned long)) 4823 { 4824 int i; 4825 4826 if (card->options.cq == QETH_CQ_ENABLED) { 4827 int offset = QDIO_MAX_BUFFERS_PER_Q * 4828 (card->qdio.no_in_queues - 1); 4829 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4830 in_sbal_ptrs[offset + i] = (struct qdio_buffer *) 4831 virt_to_phys(card->qdio.c_q->bufs[i].buffer); 4832 } 4833 4834 queue_start_poll[card->qdio.no_in_queues - 1] = NULL; 4835 } 4836 } 4837 4838 static int qeth_qdio_establish(struct qeth_card *card) 4839 { 4840 struct qdio_initialize init_data; 4841 char *qib_param_field; 4842 struct qdio_buffer **in_sbal_ptrs; 4843 void (**queue_start_poll) (struct ccw_device *, int, unsigned long); 4844 struct qdio_buffer **out_sbal_ptrs; 4845 int i, j, k; 4846 int rc = 0; 4847 4848 QETH_DBF_TEXT(SETUP, 2, "qdioest"); 4849 4850 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q, 4851 GFP_KERNEL); 4852 if (!qib_param_field) { 4853 rc = -ENOMEM; 4854 goto out_free_nothing; 4855 } 4856 4857 qeth_create_qib_param_field(card, qib_param_field); 4858 qeth_create_qib_param_field_blkt(card, qib_param_field); 4859 4860 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q, 4861 sizeof(void *), 4862 GFP_KERNEL); 4863 if (!in_sbal_ptrs) { 4864 rc = -ENOMEM; 4865 goto out_free_qib_param; 4866 } 4867 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4868 in_sbal_ptrs[i] = (struct qdio_buffer *) 4869 virt_to_phys(card->qdio.in_q->bufs[i].buffer); 4870 } 4871 4872 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), 4873 GFP_KERNEL); 4874 if (!queue_start_poll) { 4875 rc = -ENOMEM; 4876 goto out_free_in_sbals; 4877 } 4878 for (i = 0; i < card->qdio.no_in_queues; ++i) 4879 queue_start_poll[i] = qeth_qdio_start_poll; 4880 4881 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); 4882 4883 out_sbal_ptrs = 4884 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q, 4885 sizeof(void *), 4886 GFP_KERNEL); 4887 if (!out_sbal_ptrs) { 4888 rc = -ENOMEM; 4889 goto out_free_queue_start_poll; 4890 } 4891 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) 4892 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { 4893 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( 4894 card->qdio.out_qs[i]->bufs[j]->buffer); 4895 } 4896 4897 memset(&init_data, 0, sizeof(struct qdio_initialize)); 4898 init_data.cdev = CARD_DDEV(card); 4899 init_data.q_format = qeth_get_qdio_q_format(card); 4900 init_data.qib_param_field_format = 0; 4901 init_data.qib_param_field = qib_param_field; 4902 init_data.no_input_qs = card->qdio.no_in_queues; 4903 init_data.no_output_qs = card->qdio.no_out_queues; 4904 init_data.input_handler = qeth_qdio_input_handler; 4905 init_data.output_handler = qeth_qdio_output_handler; 4906 init_data.queue_start_poll_array = queue_start_poll; 4907 init_data.int_parm = (unsigned long) card; 4908 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 4909 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 4910 init_data.output_sbal_state_array = card->qdio.out_bufstates; 4911 init_data.scan_threshold = 4912 (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; 4913 4914 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 4915 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 4916 rc = qdio_allocate(&init_data); 4917 if (rc) { 4918 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4919 goto out; 4920 } 4921 rc = qdio_establish(&init_data); 4922 if (rc) { 4923 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4924 qdio_free(CARD_DDEV(card)); 4925 } 4926 } 4927 4928 switch (card->options.cq) { 4929 case QETH_CQ_ENABLED: 4930 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 4931 break; 4932 case QETH_CQ_DISABLED: 4933 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 4934 break; 4935 default: 4936 break; 4937 } 4938 out: 4939 kfree(out_sbal_ptrs); 4940 out_free_queue_start_poll: 4941 kfree(queue_start_poll); 4942 out_free_in_sbals: 4943 kfree(in_sbal_ptrs); 4944 out_free_qib_param: 4945 kfree(qib_param_field); 4946 out_free_nothing: 4947 return rc; 4948 } 4949 4950 static void qeth_core_free_card(struct qeth_card *card) 4951 { 4952 QETH_DBF_TEXT(SETUP, 2, "freecrd"); 4953 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 4954 qeth_clean_channel(&card->read); 4955 qeth_clean_channel(&card->write); 4956 qeth_clean_channel(&card->data); 4957 destroy_workqueue(card->event_wq); 4958 qeth_free_qdio_buffers(card); 4959 unregister_service_level(&card->qeth_service_level); 4960 dev_set_drvdata(&card->gdev->dev, NULL); 4961 kfree(card); 4962 } 4963 4964 void qeth_trace_features(struct qeth_card *card) 4965 { 4966 QETH_CARD_TEXT(card, 2, "features"); 4967 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 4968 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 4969 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 4970 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 4971 sizeof(card->info.diagass_support)); 4972 } 4973 EXPORT_SYMBOL_GPL(qeth_trace_features); 4974 4975 static struct ccw_device_id qeth_ids[] = { 4976 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 4977 .driver_info = QETH_CARD_TYPE_OSD}, 4978 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 4979 .driver_info = QETH_CARD_TYPE_IQD}, 4980 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 4981 .driver_info = QETH_CARD_TYPE_OSN}, 4982 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 4983 .driver_info = QETH_CARD_TYPE_OSM}, 4984 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 4985 .driver_info = QETH_CARD_TYPE_OSX}, 4986 {}, 4987 }; 4988 MODULE_DEVICE_TABLE(ccw, qeth_ids); 4989 4990 static struct ccw_driver qeth_ccw_driver = { 4991 .driver = { 4992 .owner = THIS_MODULE, 4993 .name = "qeth", 4994 }, 4995 .ids = qeth_ids, 4996 .probe = ccwgroup_probe_ccwdev, 4997 .remove = ccwgroup_remove_ccwdev, 4998 }; 4999 5000 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5001 { 5002 int retries = 3; 5003 int rc; 5004 5005 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 5006 atomic_set(&card->force_alloc_skb, 0); 5007 qeth_update_from_chp_desc(card); 5008 retry: 5009 if (retries < 3) 5010 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5011 CARD_DEVID(card)); 5012 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 5013 ccw_device_set_offline(CARD_DDEV(card)); 5014 ccw_device_set_offline(CARD_WDEV(card)); 5015 ccw_device_set_offline(CARD_RDEV(card)); 5016 qdio_free(CARD_DDEV(card)); 5017 rc = ccw_device_set_online(CARD_RDEV(card)); 5018 if (rc) 5019 goto retriable; 5020 rc = ccw_device_set_online(CARD_WDEV(card)); 5021 if (rc) 5022 goto retriable; 5023 rc = ccw_device_set_online(CARD_DDEV(card)); 5024 if (rc) 5025 goto retriable; 5026 retriable: 5027 if (rc == -ERESTARTSYS) { 5028 QETH_DBF_TEXT(SETUP, 2, "break1"); 5029 return rc; 5030 } else if (rc) { 5031 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 5032 if (--retries < 0) 5033 goto out; 5034 else 5035 goto retry; 5036 } 5037 qeth_determine_capabilities(card); 5038 qeth_init_tokens(card); 5039 qeth_init_func_level(card); 5040 rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb); 5041 if (rc == -ERESTARTSYS) { 5042 QETH_DBF_TEXT(SETUP, 2, "break2"); 5043 return rc; 5044 } else if (rc) { 5045 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 5046 if (--retries < 0) 5047 goto out; 5048 else 5049 goto retry; 5050 } 5051 rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb); 5052 if (rc == -ERESTARTSYS) { 5053 QETH_DBF_TEXT(SETUP, 2, "break3"); 5054 return rc; 5055 } else if (rc) { 5056 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 5057 if (--retries < 0) 5058 goto out; 5059 else 5060 goto retry; 5061 } 5062 card->read_or_write_problem = 0; 5063 rc = qeth_mpc_initialize(card); 5064 if (rc) { 5065 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 5066 goto out; 5067 } 5068 5069 rc = qeth_send_startlan(card); 5070 if (rc) { 5071 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 5072 if (rc == -ENETDOWN) { 5073 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5074 *carrier_ok = false; 5075 } else { 5076 goto out; 5077 } 5078 } else { 5079 *carrier_ok = true; 5080 } 5081 5082 card->options.ipa4.supported_funcs = 0; 5083 card->options.ipa6.supported_funcs = 0; 5084 card->options.adp.supported_funcs = 0; 5085 card->options.sbp.supported_funcs = 0; 5086 card->info.diagass_support = 0; 5087 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5088 if (rc == -ENOMEM) 5089 goto out; 5090 if (qeth_is_supported(card, IPA_IPV6)) { 5091 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5092 if (rc == -ENOMEM) 5093 goto out; 5094 } 5095 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5096 rc = qeth_query_setadapterparms(card); 5097 if (rc < 0) { 5098 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); 5099 goto out; 5100 } 5101 } 5102 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5103 rc = qeth_query_setdiagass(card); 5104 if (rc < 0) { 5105 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); 5106 goto out; 5107 } 5108 } 5109 return 0; 5110 out: 5111 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5112 "an error on the device\n"); 5113 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5114 CARD_DEVID(card), rc); 5115 return rc; 5116 } 5117 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 5118 5119 static void qeth_create_skb_frag(struct qdio_buffer_element *element, 5120 struct sk_buff *skb, int offset, int data_len) 5121 { 5122 struct page *page = virt_to_page(element->addr); 5123 unsigned int next_frag; 5124 5125 /* first fill the linear space */ 5126 if (!skb->len) { 5127 unsigned int linear = min(data_len, skb_tailroom(skb)); 5128 5129 skb_put_data(skb, element->addr + offset, linear); 5130 data_len -= linear; 5131 if (!data_len) 5132 return; 5133 offset += linear; 5134 /* fall through to add page frag for remaining data */ 5135 } 5136 5137 next_frag = skb_shinfo(skb)->nr_frags; 5138 get_page(page); 5139 skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len); 5140 } 5141 5142 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5143 { 5144 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5145 } 5146 5147 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, 5148 struct qeth_qdio_buffer *qethbuffer, 5149 struct qdio_buffer_element **__element, int *__offset, 5150 struct qeth_hdr **hdr) 5151 { 5152 struct qdio_buffer_element *element = *__element; 5153 struct qdio_buffer *buffer = qethbuffer->buffer; 5154 int offset = *__offset; 5155 struct sk_buff *skb; 5156 int skb_len = 0; 5157 void *data_ptr; 5158 int data_len; 5159 int headroom = 0; 5160 int use_rx_sg = 0; 5161 5162 /* qeth_hdr must not cross element boundaries */ 5163 while (element->length < offset + sizeof(struct qeth_hdr)) { 5164 if (qeth_is_last_sbale(element)) 5165 return NULL; 5166 element++; 5167 offset = 0; 5168 } 5169 *hdr = element->addr + offset; 5170 5171 offset += sizeof(struct qeth_hdr); 5172 switch ((*hdr)->hdr.l2.id) { 5173 case QETH_HEADER_TYPE_LAYER2: 5174 skb_len = (*hdr)->hdr.l2.pkt_length; 5175 break; 5176 case QETH_HEADER_TYPE_LAYER3: 5177 skb_len = (*hdr)->hdr.l3.length; 5178 headroom = ETH_HLEN; 5179 break; 5180 case QETH_HEADER_TYPE_OSN: 5181 skb_len = (*hdr)->hdr.osn.pdu_length; 5182 headroom = sizeof(struct qeth_hdr); 5183 break; 5184 default: 5185 break; 5186 } 5187 5188 if (!skb_len) 5189 return NULL; 5190 5191 if (((skb_len >= card->options.rx_sg_cb) && 5192 (!(card->info.type == QETH_CARD_TYPE_OSN)) && 5193 (!atomic_read(&card->force_alloc_skb))) || 5194 (card->options.cq == QETH_CQ_ENABLED)) 5195 use_rx_sg = 1; 5196 5197 if (use_rx_sg && qethbuffer->rx_skb) { 5198 /* QETH_CQ_ENABLED only: */ 5199 skb = qethbuffer->rx_skb; 5200 qethbuffer->rx_skb = NULL; 5201 } else { 5202 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len; 5203 5204 skb = napi_alloc_skb(&card->napi, linear + headroom); 5205 } 5206 if (!skb) 5207 goto no_mem; 5208 if (headroom) 5209 skb_reserve(skb, headroom); 5210 5211 data_ptr = element->addr + offset; 5212 while (skb_len) { 5213 data_len = min(skb_len, (int)(element->length - offset)); 5214 if (data_len) { 5215 if (use_rx_sg) 5216 qeth_create_skb_frag(element, skb, offset, 5217 data_len); 5218 else 5219 skb_put_data(skb, data_ptr, data_len); 5220 } 5221 skb_len -= data_len; 5222 if (skb_len) { 5223 if (qeth_is_last_sbale(element)) { 5224 QETH_CARD_TEXT(card, 4, "unexeob"); 5225 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5226 dev_kfree_skb_any(skb); 5227 QETH_CARD_STAT_INC(card, rx_errors); 5228 return NULL; 5229 } 5230 element++; 5231 offset = 0; 5232 data_ptr = element->addr; 5233 } else { 5234 offset += data_len; 5235 } 5236 } 5237 *__element = element; 5238 *__offset = offset; 5239 if (use_rx_sg) { 5240 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5241 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5242 skb_shinfo(skb)->nr_frags); 5243 } 5244 return skb; 5245 no_mem: 5246 if (net_ratelimit()) { 5247 QETH_CARD_TEXT(card, 2, "noskbmem"); 5248 } 5249 QETH_CARD_STAT_INC(card, rx_dropped); 5250 return NULL; 5251 } 5252 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); 5253 5254 int qeth_poll(struct napi_struct *napi, int budget) 5255 { 5256 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5257 int work_done = 0; 5258 struct qeth_qdio_buffer *buffer; 5259 int done; 5260 int new_budget = budget; 5261 5262 while (1) { 5263 if (!card->rx.b_count) { 5264 card->rx.qdio_err = 0; 5265 card->rx.b_count = qdio_get_next_buffers( 5266 card->data.ccwdev, 0, &card->rx.b_index, 5267 &card->rx.qdio_err); 5268 if (card->rx.b_count <= 0) { 5269 card->rx.b_count = 0; 5270 break; 5271 } 5272 card->rx.b_element = 5273 &card->qdio.in_q->bufs[card->rx.b_index] 5274 .buffer->element[0]; 5275 card->rx.e_offset = 0; 5276 } 5277 5278 while (card->rx.b_count) { 5279 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5280 if (!(card->rx.qdio_err && 5281 qeth_check_qdio_errors(card, buffer->buffer, 5282 card->rx.qdio_err, "qinerr"))) 5283 work_done += 5284 card->discipline->process_rx_buffer( 5285 card, new_budget, &done); 5286 else 5287 done = 1; 5288 5289 if (done) { 5290 QETH_CARD_STAT_INC(card, rx_bufs); 5291 qeth_put_buffer_pool_entry(card, 5292 buffer->pool_entry); 5293 qeth_queue_input_buffer(card, card->rx.b_index); 5294 card->rx.b_count--; 5295 if (card->rx.b_count) { 5296 card->rx.b_index = 5297 (card->rx.b_index + 1) % 5298 QDIO_MAX_BUFFERS_PER_Q; 5299 card->rx.b_element = 5300 &card->qdio.in_q 5301 ->bufs[card->rx.b_index] 5302 .buffer->element[0]; 5303 card->rx.e_offset = 0; 5304 } 5305 } 5306 5307 if (work_done >= budget) 5308 goto out; 5309 else 5310 new_budget = budget - work_done; 5311 } 5312 } 5313 5314 napi_complete_done(napi, work_done); 5315 if (qdio_start_irq(card->data.ccwdev, 0)) 5316 napi_schedule(&card->napi); 5317 out: 5318 return work_done; 5319 } 5320 EXPORT_SYMBOL_GPL(qeth_poll); 5321 5322 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 5323 { 5324 if (!cmd->hdr.return_code) 5325 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5326 return cmd->hdr.return_code; 5327 } 5328 5329 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 5330 struct qeth_reply *reply, 5331 unsigned long data) 5332 { 5333 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5334 struct qeth_ipa_caps *caps = reply->param; 5335 5336 if (qeth_setassparms_inspect_rc(cmd)) 5337 return -EIO; 5338 5339 caps->supported = cmd->data.setassparms.data.caps.supported; 5340 caps->enabled = cmd->data.setassparms.data.caps.enabled; 5341 return 0; 5342 } 5343 5344 int qeth_setassparms_cb(struct qeth_card *card, 5345 struct qeth_reply *reply, unsigned long data) 5346 { 5347 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5348 5349 QETH_CARD_TEXT(card, 4, "defadpcb"); 5350 5351 if (cmd->hdr.return_code) 5352 return -EIO; 5353 5354 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5355 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 5356 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 5357 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 5358 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 5359 return 0; 5360 } 5361 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 5362 5363 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 5364 enum qeth_ipa_funcs ipa_func, 5365 __u16 cmd_code, __u16 len, 5366 enum qeth_prot_versions prot) 5367 { 5368 struct qeth_cmd_buffer *iob; 5369 struct qeth_ipa_cmd *cmd; 5370 5371 QETH_CARD_TEXT(card, 4, "getasscm"); 5372 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); 5373 5374 if (iob) { 5375 cmd = __ipa_cmd(iob); 5376 cmd->data.setassparms.hdr.assist_no = ipa_func; 5377 cmd->data.setassparms.hdr.length = 8 + len; 5378 cmd->data.setassparms.hdr.command_code = cmd_code; 5379 } 5380 5381 return iob; 5382 } 5383 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 5384 5385 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 5386 enum qeth_ipa_funcs ipa_func, 5387 u16 cmd_code, long data, 5388 enum qeth_prot_versions prot) 5389 { 5390 int length = 0; 5391 struct qeth_cmd_buffer *iob; 5392 5393 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 5394 if (data) 5395 length = sizeof(__u32); 5396 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 5397 if (!iob) 5398 return -ENOMEM; 5399 5400 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data; 5401 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 5402 } 5403 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 5404 5405 static void qeth_unregister_dbf_views(void) 5406 { 5407 int x; 5408 for (x = 0; x < QETH_DBF_INFOS; x++) { 5409 debug_unregister(qeth_dbf[x].id); 5410 qeth_dbf[x].id = NULL; 5411 } 5412 } 5413 5414 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 5415 { 5416 char dbf_txt_buf[32]; 5417 va_list args; 5418 5419 if (!debug_level_enabled(id, level)) 5420 return; 5421 va_start(args, fmt); 5422 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 5423 va_end(args); 5424 debug_text_event(id, level, dbf_txt_buf); 5425 } 5426 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 5427 5428 static int qeth_register_dbf_views(void) 5429 { 5430 int ret; 5431 int x; 5432 5433 for (x = 0; x < QETH_DBF_INFOS; x++) { 5434 /* register the areas */ 5435 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 5436 qeth_dbf[x].pages, 5437 qeth_dbf[x].areas, 5438 qeth_dbf[x].len); 5439 if (qeth_dbf[x].id == NULL) { 5440 qeth_unregister_dbf_views(); 5441 return -ENOMEM; 5442 } 5443 5444 /* register a view */ 5445 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 5446 if (ret) { 5447 qeth_unregister_dbf_views(); 5448 return ret; 5449 } 5450 5451 /* set a passing level */ 5452 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 5453 } 5454 5455 return 0; 5456 } 5457 5458 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 5459 5460 int qeth_core_load_discipline(struct qeth_card *card, 5461 enum qeth_discipline_id discipline) 5462 { 5463 mutex_lock(&qeth_mod_mutex); 5464 switch (discipline) { 5465 case QETH_DISCIPLINE_LAYER3: 5466 card->discipline = try_then_request_module( 5467 symbol_get(qeth_l3_discipline), "qeth_l3"); 5468 break; 5469 case QETH_DISCIPLINE_LAYER2: 5470 card->discipline = try_then_request_module( 5471 symbol_get(qeth_l2_discipline), "qeth_l2"); 5472 break; 5473 default: 5474 break; 5475 } 5476 mutex_unlock(&qeth_mod_mutex); 5477 5478 if (!card->discipline) { 5479 dev_err(&card->gdev->dev, "There is no kernel module to " 5480 "support discipline %d\n", discipline); 5481 return -EINVAL; 5482 } 5483 5484 card->options.layer = discipline; 5485 return 0; 5486 } 5487 5488 void qeth_core_free_discipline(struct qeth_card *card) 5489 { 5490 if (IS_LAYER2(card)) 5491 symbol_put(qeth_l2_discipline); 5492 else 5493 symbol_put(qeth_l3_discipline); 5494 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 5495 card->discipline = NULL; 5496 } 5497 5498 const struct device_type qeth_generic_devtype = { 5499 .name = "qeth_generic", 5500 .groups = qeth_generic_attr_groups, 5501 }; 5502 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 5503 5504 static const struct device_type qeth_osn_devtype = { 5505 .name = "qeth_osn", 5506 .groups = qeth_osn_attr_groups, 5507 }; 5508 5509 #define DBF_NAME_LEN 20 5510 5511 struct qeth_dbf_entry { 5512 char dbf_name[DBF_NAME_LEN]; 5513 debug_info_t *dbf_info; 5514 struct list_head dbf_list; 5515 }; 5516 5517 static LIST_HEAD(qeth_dbf_list); 5518 static DEFINE_MUTEX(qeth_dbf_list_mutex); 5519 5520 static debug_info_t *qeth_get_dbf_entry(char *name) 5521 { 5522 struct qeth_dbf_entry *entry; 5523 debug_info_t *rc = NULL; 5524 5525 mutex_lock(&qeth_dbf_list_mutex); 5526 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 5527 if (strcmp(entry->dbf_name, name) == 0) { 5528 rc = entry->dbf_info; 5529 break; 5530 } 5531 } 5532 mutex_unlock(&qeth_dbf_list_mutex); 5533 return rc; 5534 } 5535 5536 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 5537 { 5538 struct qeth_dbf_entry *new_entry; 5539 5540 card->debug = debug_register(name, 2, 1, 8); 5541 if (!card->debug) { 5542 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 5543 goto err; 5544 } 5545 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 5546 goto err_dbg; 5547 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 5548 if (!new_entry) 5549 goto err_dbg; 5550 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 5551 new_entry->dbf_info = card->debug; 5552 mutex_lock(&qeth_dbf_list_mutex); 5553 list_add(&new_entry->dbf_list, &qeth_dbf_list); 5554 mutex_unlock(&qeth_dbf_list_mutex); 5555 5556 return 0; 5557 5558 err_dbg: 5559 debug_unregister(card->debug); 5560 err: 5561 return -ENOMEM; 5562 } 5563 5564 static void qeth_clear_dbf_list(void) 5565 { 5566 struct qeth_dbf_entry *entry, *tmp; 5567 5568 mutex_lock(&qeth_dbf_list_mutex); 5569 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 5570 list_del(&entry->dbf_list); 5571 debug_unregister(entry->dbf_info); 5572 kfree(entry); 5573 } 5574 mutex_unlock(&qeth_dbf_list_mutex); 5575 } 5576 5577 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 5578 { 5579 struct net_device *dev; 5580 5581 switch (card->info.type) { 5582 case QETH_CARD_TYPE_IQD: 5583 dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup); 5584 break; 5585 case QETH_CARD_TYPE_OSN: 5586 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); 5587 break; 5588 default: 5589 dev = alloc_etherdev(0); 5590 } 5591 5592 if (!dev) 5593 return NULL; 5594 5595 dev->ml_priv = card; 5596 dev->watchdog_timeo = QETH_TX_TIMEOUT; 5597 dev->min_mtu = IS_OSN(card) ? 64 : 576; 5598 /* initialized when device first goes online: */ 5599 dev->max_mtu = 0; 5600 dev->mtu = 0; 5601 SET_NETDEV_DEV(dev, &card->gdev->dev); 5602 netif_carrier_off(dev); 5603 5604 if (IS_OSN(card)) { 5605 dev->ethtool_ops = &qeth_osn_ethtool_ops; 5606 } else { 5607 dev->ethtool_ops = &qeth_ethtool_ops; 5608 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5609 dev->hw_features |= NETIF_F_SG; 5610 dev->vlan_features |= NETIF_F_SG; 5611 if (IS_IQD(card)) 5612 dev->features |= NETIF_F_SG; 5613 } 5614 5615 return dev; 5616 } 5617 5618 struct net_device *qeth_clone_netdev(struct net_device *orig) 5619 { 5620 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 5621 5622 if (!clone) 5623 return NULL; 5624 5625 clone->dev_port = orig->dev_port; 5626 return clone; 5627 } 5628 5629 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 5630 { 5631 struct qeth_card *card; 5632 struct device *dev; 5633 int rc; 5634 enum qeth_discipline_id enforced_disc; 5635 char dbf_name[DBF_NAME_LEN]; 5636 5637 QETH_DBF_TEXT(SETUP, 2, "probedev"); 5638 5639 dev = &gdev->dev; 5640 if (!get_device(dev)) 5641 return -ENODEV; 5642 5643 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 5644 5645 card = qeth_alloc_card(gdev); 5646 if (!card) { 5647 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 5648 rc = -ENOMEM; 5649 goto err_dev; 5650 } 5651 5652 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 5653 dev_name(&gdev->dev)); 5654 card->debug = qeth_get_dbf_entry(dbf_name); 5655 if (!card->debug) { 5656 rc = qeth_add_dbf_entry(card, dbf_name); 5657 if (rc) 5658 goto err_card; 5659 } 5660 5661 qeth_setup_card(card); 5662 qeth_update_from_chp_desc(card); 5663 5664 card->dev = qeth_alloc_netdev(card); 5665 if (!card->dev) { 5666 rc = -ENOMEM; 5667 goto err_card; 5668 } 5669 5670 qeth_determine_capabilities(card); 5671 enforced_disc = qeth_enforce_discipline(card); 5672 switch (enforced_disc) { 5673 case QETH_DISCIPLINE_UNDETERMINED: 5674 gdev->dev.type = &qeth_generic_devtype; 5675 break; 5676 default: 5677 card->info.layer_enforced = true; 5678 rc = qeth_core_load_discipline(card, enforced_disc); 5679 if (rc) 5680 goto err_load; 5681 5682 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) 5683 ? card->discipline->devtype 5684 : &qeth_osn_devtype; 5685 rc = card->discipline->setup(card->gdev); 5686 if (rc) 5687 goto err_disc; 5688 break; 5689 } 5690 5691 return 0; 5692 5693 err_disc: 5694 qeth_core_free_discipline(card); 5695 err_load: 5696 free_netdev(card->dev); 5697 err_card: 5698 qeth_core_free_card(card); 5699 err_dev: 5700 put_device(dev); 5701 return rc; 5702 } 5703 5704 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 5705 { 5706 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5707 5708 QETH_DBF_TEXT(SETUP, 2, "removedv"); 5709 5710 if (card->discipline) { 5711 card->discipline->remove(gdev); 5712 qeth_core_free_discipline(card); 5713 } 5714 5715 free_netdev(card->dev); 5716 qeth_core_free_card(card); 5717 put_device(&gdev->dev); 5718 } 5719 5720 static int qeth_core_set_online(struct ccwgroup_device *gdev) 5721 { 5722 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5723 int rc = 0; 5724 enum qeth_discipline_id def_discipline; 5725 5726 if (!card->discipline) { 5727 if (card->info.type == QETH_CARD_TYPE_IQD) 5728 def_discipline = QETH_DISCIPLINE_LAYER3; 5729 else 5730 def_discipline = QETH_DISCIPLINE_LAYER2; 5731 rc = qeth_core_load_discipline(card, def_discipline); 5732 if (rc) 5733 goto err; 5734 rc = card->discipline->setup(card->gdev); 5735 if (rc) { 5736 qeth_core_free_discipline(card); 5737 goto err; 5738 } 5739 } 5740 rc = card->discipline->set_online(gdev); 5741 err: 5742 return rc; 5743 } 5744 5745 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 5746 { 5747 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5748 return card->discipline->set_offline(gdev); 5749 } 5750 5751 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 5752 { 5753 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5754 qeth_set_allowed_threads(card, 0, 1); 5755 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 5756 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5757 qeth_qdio_clear_card(card, 0); 5758 qeth_clear_qdio_buffers(card); 5759 qdio_free(CARD_DDEV(card)); 5760 } 5761 5762 static int qeth_core_freeze(struct ccwgroup_device *gdev) 5763 { 5764 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5765 if (card->discipline && card->discipline->freeze) 5766 return card->discipline->freeze(gdev); 5767 return 0; 5768 } 5769 5770 static int qeth_core_thaw(struct ccwgroup_device *gdev) 5771 { 5772 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5773 if (card->discipline && card->discipline->thaw) 5774 return card->discipline->thaw(gdev); 5775 return 0; 5776 } 5777 5778 static int qeth_core_restore(struct ccwgroup_device *gdev) 5779 { 5780 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5781 if (card->discipline && card->discipline->restore) 5782 return card->discipline->restore(gdev); 5783 return 0; 5784 } 5785 5786 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 5787 size_t count) 5788 { 5789 int err; 5790 5791 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 5792 buf); 5793 5794 return err ? err : count; 5795 } 5796 static DRIVER_ATTR_WO(group); 5797 5798 static struct attribute *qeth_drv_attrs[] = { 5799 &driver_attr_group.attr, 5800 NULL, 5801 }; 5802 static struct attribute_group qeth_drv_attr_group = { 5803 .attrs = qeth_drv_attrs, 5804 }; 5805 static const struct attribute_group *qeth_drv_attr_groups[] = { 5806 &qeth_drv_attr_group, 5807 NULL, 5808 }; 5809 5810 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 5811 .driver = { 5812 .groups = qeth_drv_attr_groups, 5813 .owner = THIS_MODULE, 5814 .name = "qeth", 5815 }, 5816 .ccw_driver = &qeth_ccw_driver, 5817 .setup = qeth_core_probe_device, 5818 .remove = qeth_core_remove_device, 5819 .set_online = qeth_core_set_online, 5820 .set_offline = qeth_core_set_offline, 5821 .shutdown = qeth_core_shutdown, 5822 .prepare = NULL, 5823 .complete = NULL, 5824 .freeze = qeth_core_freeze, 5825 .thaw = qeth_core_thaw, 5826 .restore = qeth_core_restore, 5827 }; 5828 5829 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 5830 { 5831 struct ccwgroup_device *gdev; 5832 struct qeth_card *card; 5833 5834 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 5835 if (!gdev) 5836 return NULL; 5837 5838 card = dev_get_drvdata(&gdev->dev); 5839 put_device(&gdev->dev); 5840 return card; 5841 } 5842 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 5843 5844 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 5845 { 5846 struct qeth_card *card = dev->ml_priv; 5847 struct mii_ioctl_data *mii_data; 5848 int rc = 0; 5849 5850 if (!card) 5851 return -ENODEV; 5852 5853 switch (cmd) { 5854 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 5855 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 5856 break; 5857 case SIOC_QETH_GET_CARD_TYPE: 5858 if ((card->info.type == QETH_CARD_TYPE_OSD || 5859 card->info.type == QETH_CARD_TYPE_OSM || 5860 card->info.type == QETH_CARD_TYPE_OSX) && 5861 !card->info.guestlan) 5862 return 1; 5863 else 5864 return 0; 5865 case SIOCGMIIPHY: 5866 mii_data = if_mii(rq); 5867 mii_data->phy_id = 0; 5868 break; 5869 case SIOCGMIIREG: 5870 mii_data = if_mii(rq); 5871 if (mii_data->phy_id != 0) 5872 rc = -EINVAL; 5873 else 5874 mii_data->val_out = qeth_mdio_read(dev, 5875 mii_data->phy_id, mii_data->reg_num); 5876 break; 5877 case SIOC_QETH_QUERY_OAT: 5878 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 5879 break; 5880 default: 5881 if (card->discipline->do_ioctl) 5882 rc = card->discipline->do_ioctl(dev, rq, cmd); 5883 else 5884 rc = -EOPNOTSUPP; 5885 } 5886 if (rc) 5887 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 5888 return rc; 5889 } 5890 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 5891 5892 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 5893 unsigned long data) 5894 { 5895 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5896 u32 *features = reply->param; 5897 5898 if (qeth_setassparms_inspect_rc(cmd)) 5899 return -EIO; 5900 5901 *features = cmd->data.setassparms.data.flags_32bit; 5902 return 0; 5903 } 5904 5905 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 5906 enum qeth_prot_versions prot) 5907 { 5908 return qeth_send_simple_setassparms_prot(card, cstype, 5909 IPA_CMD_ASS_STOP, 0, prot); 5910 } 5911 5912 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 5913 enum qeth_prot_versions prot) 5914 { 5915 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 5916 struct qeth_cmd_buffer *iob; 5917 struct qeth_ipa_caps caps; 5918 u32 features; 5919 int rc; 5920 5921 /* some L3 HW requires combined L3+L4 csum offload: */ 5922 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 5923 cstype == IPA_OUTBOUND_CHECKSUM) 5924 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 5925 5926 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 5927 prot); 5928 if (!iob) 5929 return -ENOMEM; 5930 5931 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 5932 if (rc) 5933 return rc; 5934 5935 if ((required_features & features) != required_features) { 5936 qeth_set_csum_off(card, cstype, prot); 5937 return -EOPNOTSUPP; 5938 } 5939 5940 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4, 5941 prot); 5942 if (!iob) { 5943 qeth_set_csum_off(card, cstype, prot); 5944 return -ENOMEM; 5945 } 5946 5947 if (features & QETH_IPA_CHECKSUM_LP2LP) 5948 required_features |= QETH_IPA_CHECKSUM_LP2LP; 5949 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 5950 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 5951 if (rc) { 5952 qeth_set_csum_off(card, cstype, prot); 5953 return rc; 5954 } 5955 5956 if (!qeth_ipa_caps_supported(&caps, required_features) || 5957 !qeth_ipa_caps_enabled(&caps, required_features)) { 5958 qeth_set_csum_off(card, cstype, prot); 5959 return -EOPNOTSUPP; 5960 } 5961 5962 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 5963 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 5964 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) && 5965 cstype == IPA_OUTBOUND_CHECKSUM) 5966 dev_warn(&card->gdev->dev, 5967 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", 5968 QETH_CARD_IFNAME(card)); 5969 return 0; 5970 } 5971 5972 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 5973 enum qeth_prot_versions prot) 5974 { 5975 return on ? qeth_set_csum_on(card, cstype, prot) : 5976 qeth_set_csum_off(card, cstype, prot); 5977 } 5978 5979 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 5980 unsigned long data) 5981 { 5982 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5983 struct qeth_tso_start_data *tso_data = reply->param; 5984 5985 if (qeth_setassparms_inspect_rc(cmd)) 5986 return -EIO; 5987 5988 tso_data->mss = cmd->data.setassparms.data.tso.mss; 5989 tso_data->supported = cmd->data.setassparms.data.tso.supported; 5990 return 0; 5991 } 5992 5993 static int qeth_set_tso_off(struct qeth_card *card, 5994 enum qeth_prot_versions prot) 5995 { 5996 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 5997 IPA_CMD_ASS_STOP, 0, prot); 5998 } 5999 6000 static int qeth_set_tso_on(struct qeth_card *card, 6001 enum qeth_prot_versions prot) 6002 { 6003 struct qeth_tso_start_data tso_data; 6004 struct qeth_cmd_buffer *iob; 6005 struct qeth_ipa_caps caps; 6006 int rc; 6007 6008 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6009 IPA_CMD_ASS_START, 0, prot); 6010 if (!iob) 6011 return -ENOMEM; 6012 6013 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6014 if (rc) 6015 return rc; 6016 6017 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6018 qeth_set_tso_off(card, prot); 6019 return -EOPNOTSUPP; 6020 } 6021 6022 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6023 IPA_CMD_ASS_ENABLE, sizeof(caps), prot); 6024 if (!iob) { 6025 qeth_set_tso_off(card, prot); 6026 return -ENOMEM; 6027 } 6028 6029 /* enable TSO capability */ 6030 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6031 QETH_IPA_LARGE_SEND_TCP; 6032 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6033 if (rc) { 6034 qeth_set_tso_off(card, prot); 6035 return rc; 6036 } 6037 6038 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6039 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6040 qeth_set_tso_off(card, prot); 6041 return -EOPNOTSUPP; 6042 } 6043 6044 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6045 tso_data.mss); 6046 return 0; 6047 } 6048 6049 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6050 enum qeth_prot_versions prot) 6051 { 6052 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6053 } 6054 6055 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6056 { 6057 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6058 int rc_ipv6; 6059 6060 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6061 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6062 QETH_PROT_IPV4); 6063 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6064 /* no/one Offload Assist available, so the rc is trivial */ 6065 return rc_ipv4; 6066 6067 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6068 QETH_PROT_IPV6); 6069 6070 if (on) 6071 /* enable: success if any Assist is active */ 6072 return (rc_ipv6) ? rc_ipv4 : 0; 6073 6074 /* disable: failure if any Assist is still active */ 6075 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6076 } 6077 6078 /** 6079 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6080 * @dev: a net_device 6081 */ 6082 void qeth_enable_hw_features(struct net_device *dev) 6083 { 6084 struct qeth_card *card = dev->ml_priv; 6085 netdev_features_t features; 6086 6087 features = dev->features; 6088 /* force-off any feature that might need an IPA sequence. 6089 * netdev_update_features() will restart them. 6090 */ 6091 dev->features &= ~dev->hw_features; 6092 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6093 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6094 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6095 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6096 } 6097 netdev_update_features(dev); 6098 if (features != dev->features) 6099 dev_warn(&card->gdev->dev, 6100 "Device recovery failed to restore all offload features\n"); 6101 } 6102 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6103 6104 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6105 { 6106 struct qeth_card *card = dev->ml_priv; 6107 netdev_features_t changed = dev->features ^ features; 6108 int rc = 0; 6109 6110 QETH_DBF_TEXT(SETUP, 2, "setfeat"); 6111 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); 6112 6113 if ((changed & NETIF_F_IP_CSUM)) { 6114 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6115 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4); 6116 if (rc) 6117 changed ^= NETIF_F_IP_CSUM; 6118 } 6119 if (changed & NETIF_F_IPV6_CSUM) { 6120 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6121 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6); 6122 if (rc) 6123 changed ^= NETIF_F_IPV6_CSUM; 6124 } 6125 if (changed & NETIF_F_RXCSUM) { 6126 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6127 if (rc) 6128 changed ^= NETIF_F_RXCSUM; 6129 } 6130 if (changed & NETIF_F_TSO) { 6131 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6132 QETH_PROT_IPV4); 6133 if (rc) 6134 changed ^= NETIF_F_TSO; 6135 } 6136 if (changed & NETIF_F_TSO6) { 6137 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6138 QETH_PROT_IPV6); 6139 if (rc) 6140 changed ^= NETIF_F_TSO6; 6141 } 6142 6143 /* everything changed successfully? */ 6144 if ((dev->features ^ features) == changed) 6145 return 0; 6146 /* something went wrong. save changed features and return error */ 6147 dev->features ^= changed; 6148 return -EIO; 6149 } 6150 EXPORT_SYMBOL_GPL(qeth_set_features); 6151 6152 netdev_features_t qeth_fix_features(struct net_device *dev, 6153 netdev_features_t features) 6154 { 6155 struct qeth_card *card = dev->ml_priv; 6156 6157 QETH_DBF_TEXT(SETUP, 2, "fixfeat"); 6158 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6159 features &= ~NETIF_F_IP_CSUM; 6160 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6161 features &= ~NETIF_F_IPV6_CSUM; 6162 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6163 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6164 features &= ~NETIF_F_RXCSUM; 6165 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6166 features &= ~NETIF_F_TSO; 6167 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6168 features &= ~NETIF_F_TSO6; 6169 6170 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); 6171 return features; 6172 } 6173 EXPORT_SYMBOL_GPL(qeth_fix_features); 6174 6175 netdev_features_t qeth_features_check(struct sk_buff *skb, 6176 struct net_device *dev, 6177 netdev_features_t features) 6178 { 6179 /* GSO segmentation builds skbs with 6180 * a (small) linear part for the headers, and 6181 * page frags for the data. 6182 * Compared to a linear skb, the header-only part consumes an 6183 * additional buffer element. This reduces buffer utilization, and 6184 * hurts throughput. So compress small segments into one element. 6185 */ 6186 if (netif_needs_gso(skb, features)) { 6187 /* match skb_segment(): */ 6188 unsigned int doffset = skb->data - skb_mac_header(skb); 6189 unsigned int hsize = skb_shinfo(skb)->gso_size; 6190 unsigned int hroom = skb_headroom(skb); 6191 6192 /* linearize only if resulting skb allocations are order-0: */ 6193 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6194 features &= ~NETIF_F_SG; 6195 } 6196 6197 return vlan_features_check(skb, features); 6198 } 6199 EXPORT_SYMBOL_GPL(qeth_features_check); 6200 6201 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6202 { 6203 struct qeth_card *card = dev->ml_priv; 6204 struct qeth_qdio_out_q *queue; 6205 unsigned int i; 6206 6207 QETH_CARD_TEXT(card, 5, "getstat"); 6208 6209 stats->rx_packets = card->stats.rx_packets; 6210 stats->rx_bytes = card->stats.rx_bytes; 6211 stats->rx_errors = card->stats.rx_errors; 6212 stats->rx_dropped = card->stats.rx_dropped; 6213 stats->multicast = card->stats.rx_multicast; 6214 stats->tx_errors = card->stats.tx_errors; 6215 6216 for (i = 0; i < card->qdio.no_out_queues; i++) { 6217 queue = card->qdio.out_qs[i]; 6218 6219 stats->tx_packets += queue->stats.tx_packets; 6220 stats->tx_bytes += queue->stats.tx_bytes; 6221 stats->tx_errors += queue->stats.tx_errors; 6222 stats->tx_dropped += queue->stats.tx_dropped; 6223 stats->tx_carrier_errors += queue->stats.tx_carrier_errors; 6224 } 6225 } 6226 EXPORT_SYMBOL_GPL(qeth_get_stats64); 6227 6228 int qeth_open(struct net_device *dev) 6229 { 6230 struct qeth_card *card = dev->ml_priv; 6231 6232 QETH_CARD_TEXT(card, 4, "qethopen"); 6233 if (card->state == CARD_STATE_UP) 6234 return 0; 6235 if (card->state != CARD_STATE_SOFTSETUP) 6236 return -ENODEV; 6237 6238 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) 6239 return -EIO; 6240 6241 card->data.state = CH_STATE_UP; 6242 card->state = CARD_STATE_UP; 6243 netif_start_queue(dev); 6244 6245 napi_enable(&card->napi); 6246 local_bh_disable(); 6247 napi_schedule(&card->napi); 6248 /* kick-start the NAPI softirq: */ 6249 local_bh_enable(); 6250 return 0; 6251 } 6252 EXPORT_SYMBOL_GPL(qeth_open); 6253 6254 int qeth_stop(struct net_device *dev) 6255 { 6256 struct qeth_card *card = dev->ml_priv; 6257 6258 QETH_CARD_TEXT(card, 4, "qethstop"); 6259 netif_tx_disable(dev); 6260 if (card->state == CARD_STATE_UP) { 6261 card->state = CARD_STATE_SOFTSETUP; 6262 napi_disable(&card->napi); 6263 } 6264 return 0; 6265 } 6266 EXPORT_SYMBOL_GPL(qeth_stop); 6267 6268 static int __init qeth_core_init(void) 6269 { 6270 int rc; 6271 6272 pr_info("loading core functions\n"); 6273 6274 qeth_wq = create_singlethread_workqueue("qeth_wq"); 6275 if (!qeth_wq) { 6276 rc = -ENOMEM; 6277 goto out_err; 6278 } 6279 6280 rc = qeth_register_dbf_views(); 6281 if (rc) 6282 goto dbf_err; 6283 qeth_core_root_dev = root_device_register("qeth"); 6284 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6285 if (rc) 6286 goto register_err; 6287 qeth_core_header_cache = 6288 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 6289 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 6290 0, NULL); 6291 if (!qeth_core_header_cache) { 6292 rc = -ENOMEM; 6293 goto slab_err; 6294 } 6295 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 6296 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 6297 if (!qeth_qdio_outbuf_cache) { 6298 rc = -ENOMEM; 6299 goto cqslab_err; 6300 } 6301 rc = ccw_driver_register(&qeth_ccw_driver); 6302 if (rc) 6303 goto ccw_err; 6304 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 6305 if (rc) 6306 goto ccwgroup_err; 6307 6308 return 0; 6309 6310 ccwgroup_err: 6311 ccw_driver_unregister(&qeth_ccw_driver); 6312 ccw_err: 6313 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6314 cqslab_err: 6315 kmem_cache_destroy(qeth_core_header_cache); 6316 slab_err: 6317 root_device_unregister(qeth_core_root_dev); 6318 register_err: 6319 qeth_unregister_dbf_views(); 6320 dbf_err: 6321 destroy_workqueue(qeth_wq); 6322 out_err: 6323 pr_err("Initializing the qeth device driver failed\n"); 6324 return rc; 6325 } 6326 6327 static void __exit qeth_core_exit(void) 6328 { 6329 qeth_clear_dbf_list(); 6330 destroy_workqueue(qeth_wq); 6331 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 6332 ccw_driver_unregister(&qeth_ccw_driver); 6333 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6334 kmem_cache_destroy(qeth_core_header_cache); 6335 root_device_unregister(qeth_core_root_dev); 6336 qeth_unregister_dbf_views(); 6337 pr_info("core functions removed\n"); 6338 } 6339 6340 module_init(qeth_core_init); 6341 module_exit(qeth_core_exit); 6342 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 6343 MODULE_DESCRIPTION("qeth core functions"); 6344 MODULE_LICENSE("GPL"); 6345