1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/export.h> 15 #include <linux/module.h> 16 #include <linux/moduleparam.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/kernel.h> 20 #include <linux/log2.h> 21 #include <linux/io.h> 22 #include <linux/ip.h> 23 #include <linux/tcp.h> 24 #include <linux/mii.h> 25 #include <linux/mm.h> 26 #include <linux/kthread.h> 27 #include <linux/slab.h> 28 #include <linux/if_vlan.h> 29 #include <linux/netdevice.h> 30 #include <linux/netdev_features.h> 31 #include <linux/rcutree.h> 32 #include <linux/skbuff.h> 33 #include <linux/vmalloc.h> 34 35 #include <net/iucv/af_iucv.h> 36 #include <net/dsfield.h> 37 #include <net/sock.h> 38 39 #include <asm/ebcdic.h> 40 #include <asm/chpid.h> 41 #include <asm/sysinfo.h> 42 #include <asm/diag.h> 43 #include <asm/cio.h> 44 #include <asm/ccwdev.h> 45 #include <asm/cpcmd.h> 46 47 #include "qeth_core.h" 48 49 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 50 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 51 /* N P A M L V H */ 52 [QETH_DBF_SETUP] = {"qeth_setup", 53 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 54 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 55 &debug_sprintf_view, NULL}, 56 [QETH_DBF_CTRL] = {"qeth_control", 57 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 58 }; 59 EXPORT_SYMBOL_GPL(qeth_dbf); 60 61 static struct kmem_cache *qeth_core_header_cache; 62 static struct kmem_cache *qeth_qdio_outbuf_cache; 63 static struct kmem_cache *qeth_qaob_cache; 64 65 static struct device *qeth_core_root_dev; 66 static struct dentry *qeth_debugfs_root; 67 static struct lock_class_key qdio_out_skb_queue_key; 68 69 static void qeth_issue_next_read_cb(struct qeth_card *card, 70 struct qeth_cmd_buffer *iob, 71 unsigned int data_length); 72 static int qeth_qdio_establish(struct qeth_card *); 73 static void qeth_free_qdio_queues(struct qeth_card *card); 74 75 static const char *qeth_get_cardname(struct qeth_card *card) 76 { 77 if (IS_VM_NIC(card)) { 78 switch (card->info.type) { 79 case QETH_CARD_TYPE_OSD: 80 return " Virtual NIC QDIO"; 81 case QETH_CARD_TYPE_IQD: 82 return " Virtual NIC Hiper"; 83 case QETH_CARD_TYPE_OSM: 84 return " Virtual NIC QDIO - OSM"; 85 case QETH_CARD_TYPE_OSX: 86 return " Virtual NIC QDIO - OSX"; 87 default: 88 return " unknown"; 89 } 90 } else { 91 switch (card->info.type) { 92 case QETH_CARD_TYPE_OSD: 93 return " OSD Express"; 94 case QETH_CARD_TYPE_IQD: 95 return " HiperSockets"; 96 case QETH_CARD_TYPE_OSM: 97 return " OSM QDIO"; 98 case QETH_CARD_TYPE_OSX: 99 return " OSX QDIO"; 100 default: 101 return " unknown"; 102 } 103 } 104 return " n/a"; 105 } 106 107 /* max length to be returned: 14 */ 108 const char *qeth_get_cardname_short(struct qeth_card *card) 109 { 110 if (IS_VM_NIC(card)) { 111 switch (card->info.type) { 112 case QETH_CARD_TYPE_OSD: 113 return "Virt.NIC QDIO"; 114 case QETH_CARD_TYPE_IQD: 115 return "Virt.NIC Hiper"; 116 case QETH_CARD_TYPE_OSM: 117 return "Virt.NIC OSM"; 118 case QETH_CARD_TYPE_OSX: 119 return "Virt.NIC OSX"; 120 default: 121 return "unknown"; 122 } 123 } else { 124 switch (card->info.type) { 125 case QETH_CARD_TYPE_OSD: 126 switch (card->info.link_type) { 127 case QETH_LINK_TYPE_FAST_ETH: 128 return "OSD_100"; 129 case QETH_LINK_TYPE_HSTR: 130 return "HSTR"; 131 case QETH_LINK_TYPE_GBIT_ETH: 132 return "OSD_1000"; 133 case QETH_LINK_TYPE_10GBIT_ETH: 134 return "OSD_10GIG"; 135 case QETH_LINK_TYPE_25GBIT_ETH: 136 return "OSD_25GIG"; 137 case QETH_LINK_TYPE_LANE_ETH100: 138 return "OSD_FE_LANE"; 139 case QETH_LINK_TYPE_LANE_TR: 140 return "OSD_TR_LANE"; 141 case QETH_LINK_TYPE_LANE_ETH1000: 142 return "OSD_GbE_LANE"; 143 case QETH_LINK_TYPE_LANE: 144 return "OSD_ATM_LANE"; 145 default: 146 return "OSD_Express"; 147 } 148 case QETH_CARD_TYPE_IQD: 149 return "HiperSockets"; 150 case QETH_CARD_TYPE_OSM: 151 return "OSM_1000"; 152 case QETH_CARD_TYPE_OSX: 153 return "OSX_10GIG"; 154 default: 155 return "unknown"; 156 } 157 } 158 return "n/a"; 159 } 160 161 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 162 int clear_start_mask) 163 { 164 unsigned long flags; 165 166 spin_lock_irqsave(&card->thread_mask_lock, flags); 167 card->thread_allowed_mask = threads; 168 if (clear_start_mask) 169 card->thread_start_mask &= threads; 170 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 171 wake_up(&card->wait_q); 172 } 173 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 174 175 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 176 { 177 unsigned long flags; 178 int rc = 0; 179 180 spin_lock_irqsave(&card->thread_mask_lock, flags); 181 rc = (card->thread_running_mask & threads); 182 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 183 return rc; 184 } 185 EXPORT_SYMBOL_GPL(qeth_threads_running); 186 187 static void qeth_clear_working_pool_list(struct qeth_card *card) 188 { 189 struct qeth_buffer_pool_entry *pool_entry, *tmp; 190 struct qeth_qdio_q *queue = card->qdio.in_q; 191 unsigned int i; 192 193 QETH_CARD_TEXT(card, 5, "clwrklst"); 194 list_for_each_entry_safe(pool_entry, tmp, 195 &card->qdio.in_buf_pool.entry_list, list) 196 list_del(&pool_entry->list); 197 198 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 199 queue->bufs[i].pool_entry = NULL; 200 } 201 202 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 203 { 204 unsigned int i; 205 206 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 207 if (entry->elements[i]) 208 __free_page(entry->elements[i]); 209 } 210 211 kfree(entry); 212 } 213 214 static void qeth_free_buffer_pool(struct qeth_card *card) 215 { 216 struct qeth_buffer_pool_entry *entry, *tmp; 217 218 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 219 init_list) { 220 list_del(&entry->init_list); 221 qeth_free_pool_entry(entry); 222 } 223 } 224 225 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 226 { 227 struct qeth_buffer_pool_entry *entry; 228 unsigned int i; 229 230 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 231 if (!entry) 232 return NULL; 233 234 for (i = 0; i < pages; i++) { 235 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 236 237 if (!entry->elements[i]) { 238 qeth_free_pool_entry(entry); 239 return NULL; 240 } 241 } 242 243 return entry; 244 } 245 246 static int qeth_alloc_buffer_pool(struct qeth_card *card) 247 { 248 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 249 unsigned int i; 250 251 QETH_CARD_TEXT(card, 5, "alocpool"); 252 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 253 struct qeth_buffer_pool_entry *entry; 254 255 entry = qeth_alloc_pool_entry(buf_elements); 256 if (!entry) { 257 qeth_free_buffer_pool(card); 258 return -ENOMEM; 259 } 260 261 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 262 } 263 return 0; 264 } 265 266 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 267 { 268 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 269 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 270 struct qeth_buffer_pool_entry *entry, *tmp; 271 int delta = count - pool->buf_count; 272 LIST_HEAD(entries); 273 274 QETH_CARD_TEXT(card, 2, "realcbp"); 275 276 /* Defer until pool is allocated: */ 277 if (list_empty(&pool->entry_list)) 278 goto out; 279 280 /* Remove entries from the pool: */ 281 while (delta < 0) { 282 entry = list_first_entry(&pool->entry_list, 283 struct qeth_buffer_pool_entry, 284 init_list); 285 list_del(&entry->init_list); 286 qeth_free_pool_entry(entry); 287 288 delta++; 289 } 290 291 /* Allocate additional entries: */ 292 while (delta > 0) { 293 entry = qeth_alloc_pool_entry(buf_elements); 294 if (!entry) { 295 list_for_each_entry_safe(entry, tmp, &entries, 296 init_list) { 297 list_del(&entry->init_list); 298 qeth_free_pool_entry(entry); 299 } 300 301 return -ENOMEM; 302 } 303 304 list_add(&entry->init_list, &entries); 305 306 delta--; 307 } 308 309 list_splice(&entries, &pool->entry_list); 310 311 out: 312 card->qdio.in_buf_pool.buf_count = count; 313 pool->buf_count = count; 314 return 0; 315 } 316 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 317 318 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 319 { 320 if (!q) 321 return; 322 323 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 324 kfree(q); 325 } 326 327 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 328 { 329 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 330 int i; 331 332 if (!q) 333 return NULL; 334 335 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 336 kfree(q); 337 return NULL; 338 } 339 340 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 341 q->bufs[i].buffer = q->qdio_bufs[i]; 342 343 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 344 return q; 345 } 346 347 static int qeth_cq_init(struct qeth_card *card) 348 { 349 int rc; 350 351 if (card->options.cq == QETH_CQ_ENABLED) { 352 QETH_CARD_TEXT(card, 2, "cqinit"); 353 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 354 QDIO_MAX_BUFFERS_PER_Q); 355 card->qdio.c_q->next_buf_to_init = 127; 356 357 rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127); 358 if (rc) { 359 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 360 goto out; 361 } 362 } 363 rc = 0; 364 out: 365 return rc; 366 } 367 368 static void qeth_free_cq(struct qeth_card *card) 369 { 370 if (card->qdio.c_q) { 371 qeth_free_qdio_queue(card->qdio.c_q); 372 card->qdio.c_q = NULL; 373 } 374 } 375 376 static int qeth_alloc_cq(struct qeth_card *card) 377 { 378 if (card->options.cq == QETH_CQ_ENABLED) { 379 QETH_CARD_TEXT(card, 2, "cqon"); 380 if (!card->qdio.c_q) { 381 card->qdio.c_q = qeth_alloc_qdio_queue(); 382 if (!card->qdio.c_q) { 383 dev_err(&card->gdev->dev, 384 "Failed to create completion queue\n"); 385 return -ENOMEM; 386 } 387 } 388 } else { 389 QETH_CARD_TEXT(card, 2, "nocq"); 390 qeth_free_cq(card); 391 } 392 return 0; 393 } 394 395 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 396 int delayed) 397 { 398 enum iucv_tx_notify n; 399 400 switch (sbalf15) { 401 case 0: 402 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 403 break; 404 case 4: 405 case 16: 406 case 17: 407 case 18: 408 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 409 TX_NOTIFY_UNREACHABLE; 410 break; 411 default: 412 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 413 TX_NOTIFY_GENERALERROR; 414 break; 415 } 416 417 return n; 418 } 419 420 static void qeth_put_cmd(struct qeth_cmd_buffer *iob) 421 { 422 if (refcount_dec_and_test(&iob->ref_count)) { 423 kfree(iob->data); 424 kfree(iob); 425 } 426 } 427 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 428 void *data) 429 { 430 ccw->cmd_code = cmd_code; 431 ccw->flags = flags | CCW_FLAG_SLI; 432 ccw->count = len; 433 ccw->cda = virt_to_dma32(data); 434 } 435 436 static int __qeth_issue_next_read(struct qeth_card *card) 437 { 438 struct qeth_cmd_buffer *iob = card->read_cmd; 439 struct qeth_channel *channel = iob->channel; 440 struct ccw1 *ccw = __ccw_from_cmd(iob); 441 int rc; 442 443 QETH_CARD_TEXT(card, 5, "issnxrd"); 444 if (channel->state != CH_STATE_UP) 445 return -EIO; 446 447 memset(iob->data, 0, iob->length); 448 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 449 iob->callback = qeth_issue_next_read_cb; 450 /* keep the cmd alive after completion: */ 451 qeth_get_cmd(iob); 452 453 QETH_CARD_TEXT(card, 6, "noirqpnd"); 454 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 455 if (!rc) { 456 channel->active_cmd = iob; 457 } else { 458 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 459 rc, CARD_DEVID(card)); 460 qeth_unlock_channel(card, channel); 461 qeth_put_cmd(iob); 462 card->read_or_write_problem = 1; 463 qeth_schedule_recovery(card); 464 } 465 return rc; 466 } 467 468 static int qeth_issue_next_read(struct qeth_card *card) 469 { 470 int ret; 471 472 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 473 ret = __qeth_issue_next_read(card); 474 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 475 476 return ret; 477 } 478 479 static void qeth_enqueue_cmd(struct qeth_card *card, 480 struct qeth_cmd_buffer *iob) 481 { 482 spin_lock_irq(&card->lock); 483 list_add_tail(&iob->list_entry, &card->cmd_waiter_list); 484 spin_unlock_irq(&card->lock); 485 } 486 487 static void qeth_dequeue_cmd(struct qeth_card *card, 488 struct qeth_cmd_buffer *iob) 489 { 490 spin_lock_irq(&card->lock); 491 list_del(&iob->list_entry); 492 spin_unlock_irq(&card->lock); 493 } 494 495 static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 496 { 497 iob->rc = reason; 498 complete(&iob->done); 499 } 500 501 static void qeth_flush_local_addrs4(struct qeth_card *card) 502 { 503 struct qeth_local_addr *addr; 504 struct hlist_node *tmp; 505 unsigned int i; 506 507 spin_lock_irq(&card->local_addrs4_lock); 508 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 509 hash_del_rcu(&addr->hnode); 510 kfree_rcu(addr, rcu); 511 } 512 spin_unlock_irq(&card->local_addrs4_lock); 513 } 514 515 static void qeth_flush_local_addrs6(struct qeth_card *card) 516 { 517 struct qeth_local_addr *addr; 518 struct hlist_node *tmp; 519 unsigned int i; 520 521 spin_lock_irq(&card->local_addrs6_lock); 522 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 523 hash_del_rcu(&addr->hnode); 524 kfree_rcu(addr, rcu); 525 } 526 spin_unlock_irq(&card->local_addrs6_lock); 527 } 528 529 static void qeth_flush_local_addrs(struct qeth_card *card) 530 { 531 qeth_flush_local_addrs4(card); 532 qeth_flush_local_addrs6(card); 533 } 534 535 static void qeth_add_local_addrs4(struct qeth_card *card, 536 struct qeth_ipacmd_local_addrs4 *cmd) 537 { 538 unsigned int i; 539 540 if (cmd->addr_length != 541 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 542 dev_err_ratelimited(&card->gdev->dev, 543 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 544 cmd->addr_length); 545 return; 546 } 547 548 spin_lock(&card->local_addrs4_lock); 549 for (i = 0; i < cmd->count; i++) { 550 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 551 struct qeth_local_addr *addr; 552 bool duplicate = false; 553 554 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 555 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 556 duplicate = true; 557 break; 558 } 559 } 560 561 if (duplicate) 562 continue; 563 564 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 565 if (!addr) { 566 dev_err(&card->gdev->dev, 567 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 568 &cmd->addrs[i].addr); 569 continue; 570 } 571 572 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 573 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 574 } 575 spin_unlock(&card->local_addrs4_lock); 576 } 577 578 static void qeth_add_local_addrs6(struct qeth_card *card, 579 struct qeth_ipacmd_local_addrs6 *cmd) 580 { 581 unsigned int i; 582 583 if (cmd->addr_length != 584 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 585 dev_err_ratelimited(&card->gdev->dev, 586 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 587 cmd->addr_length); 588 return; 589 } 590 591 spin_lock(&card->local_addrs6_lock); 592 for (i = 0; i < cmd->count; i++) { 593 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 594 struct qeth_local_addr *addr; 595 bool duplicate = false; 596 597 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 598 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 599 duplicate = true; 600 break; 601 } 602 } 603 604 if (duplicate) 605 continue; 606 607 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 608 if (!addr) { 609 dev_err(&card->gdev->dev, 610 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 611 &cmd->addrs[i].addr); 612 continue; 613 } 614 615 addr->addr = cmd->addrs[i].addr; 616 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 617 } 618 spin_unlock(&card->local_addrs6_lock); 619 } 620 621 static void qeth_del_local_addrs4(struct qeth_card *card, 622 struct qeth_ipacmd_local_addrs4 *cmd) 623 { 624 unsigned int i; 625 626 if (cmd->addr_length != 627 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 628 dev_err_ratelimited(&card->gdev->dev, 629 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 630 cmd->addr_length); 631 return; 632 } 633 634 spin_lock(&card->local_addrs4_lock); 635 for (i = 0; i < cmd->count; i++) { 636 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 637 unsigned int key = ipv4_addr_hash(addr->addr); 638 struct qeth_local_addr *tmp; 639 640 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 641 if (tmp->addr.s6_addr32[3] == addr->addr) { 642 hash_del_rcu(&tmp->hnode); 643 kfree_rcu(tmp, rcu); 644 break; 645 } 646 } 647 } 648 spin_unlock(&card->local_addrs4_lock); 649 } 650 651 static void qeth_del_local_addrs6(struct qeth_card *card, 652 struct qeth_ipacmd_local_addrs6 *cmd) 653 { 654 unsigned int i; 655 656 if (cmd->addr_length != 657 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 658 dev_err_ratelimited(&card->gdev->dev, 659 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 660 cmd->addr_length); 661 return; 662 } 663 664 spin_lock(&card->local_addrs6_lock); 665 for (i = 0; i < cmd->count; i++) { 666 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 667 u32 key = ipv6_addr_hash(&addr->addr); 668 struct qeth_local_addr *tmp; 669 670 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 671 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 672 hash_del_rcu(&tmp->hnode); 673 kfree_rcu(tmp, rcu); 674 break; 675 } 676 } 677 } 678 spin_unlock(&card->local_addrs6_lock); 679 } 680 681 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 682 struct sk_buff *skb) 683 { 684 struct qeth_local_addr *tmp; 685 bool is_local = false; 686 unsigned int key; 687 __be32 next_hop; 688 689 if (hash_empty(card->local_addrs4)) 690 return false; 691 692 rcu_read_lock(); 693 next_hop = qeth_next_hop_v4_rcu(skb, 694 qeth_dst_check_rcu(skb, htons(ETH_P_IP))); 695 key = ipv4_addr_hash(next_hop); 696 697 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 698 if (tmp->addr.s6_addr32[3] == next_hop) { 699 is_local = true; 700 break; 701 } 702 } 703 rcu_read_unlock(); 704 705 return is_local; 706 } 707 708 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 709 struct sk_buff *skb) 710 { 711 struct qeth_local_addr *tmp; 712 struct in6_addr *next_hop; 713 bool is_local = false; 714 u32 key; 715 716 if (hash_empty(card->local_addrs6)) 717 return false; 718 719 rcu_read_lock(); 720 next_hop = qeth_next_hop_v6_rcu(skb, 721 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6))); 722 key = ipv6_addr_hash(next_hop); 723 724 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 725 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 726 is_local = true; 727 break; 728 } 729 } 730 rcu_read_unlock(); 731 732 return is_local; 733 } 734 735 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 736 { 737 struct qeth_card *card = m->private; 738 struct qeth_local_addr *tmp; 739 unsigned int i; 740 741 rcu_read_lock(); 742 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 743 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 744 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 745 seq_printf(m, "%pI6c\n", &tmp->addr); 746 rcu_read_unlock(); 747 748 return 0; 749 } 750 751 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 752 753 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 754 struct qeth_card *card) 755 { 756 const char *ipa_name; 757 int com = cmd->hdr.command; 758 759 ipa_name = qeth_get_ipa_cmd_name(com); 760 761 if (rc) 762 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 763 ipa_name, com, CARD_DEVID(card), rc, 764 qeth_get_ipa_msg(rc)); 765 else 766 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 767 ipa_name, com, CARD_DEVID(card)); 768 } 769 770 static void qeth_default_link_info(struct qeth_card *card) 771 { 772 struct qeth_link_info *link_info = &card->info.link_info; 773 774 QETH_CARD_TEXT(card, 2, "dftlinfo"); 775 link_info->duplex = DUPLEX_FULL; 776 777 if (IS_IQD(card) || IS_VM_NIC(card)) { 778 link_info->speed = SPEED_10000; 779 link_info->port = PORT_FIBRE; 780 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 781 } else { 782 switch (card->info.link_type) { 783 case QETH_LINK_TYPE_FAST_ETH: 784 case QETH_LINK_TYPE_LANE_ETH100: 785 link_info->speed = SPEED_100; 786 link_info->port = PORT_TP; 787 break; 788 case QETH_LINK_TYPE_GBIT_ETH: 789 case QETH_LINK_TYPE_LANE_ETH1000: 790 link_info->speed = SPEED_1000; 791 link_info->port = PORT_FIBRE; 792 break; 793 case QETH_LINK_TYPE_10GBIT_ETH: 794 link_info->speed = SPEED_10000; 795 link_info->port = PORT_FIBRE; 796 break; 797 case QETH_LINK_TYPE_25GBIT_ETH: 798 link_info->speed = SPEED_25000; 799 link_info->port = PORT_FIBRE; 800 break; 801 default: 802 dev_info(&card->gdev->dev, 803 "Unknown link type %x\n", 804 card->info.link_type); 805 link_info->speed = SPEED_UNKNOWN; 806 link_info->port = PORT_OTHER; 807 } 808 809 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 810 } 811 } 812 813 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 814 struct qeth_ipa_cmd *cmd) 815 { 816 QETH_CARD_TEXT(card, 5, "chkipad"); 817 818 if (IS_IPA_REPLY(cmd)) { 819 if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 820 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 821 return cmd; 822 } 823 824 /* handle unsolicited event: */ 825 switch (cmd->hdr.command) { 826 case IPA_CMD_STOPLAN: 827 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 828 dev_err(&card->gdev->dev, 829 "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n", 830 netdev_name(card->dev)); 831 /* Set offline, then probably fail to set online: */ 832 qeth_schedule_recovery(card); 833 } else { 834 /* stay online for subsequent STARTLAN */ 835 dev_warn(&card->gdev->dev, 836 "The link for interface %s on CHPID 0x%X failed\n", 837 netdev_name(card->dev), card->info.chpid); 838 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 839 netif_carrier_off(card->dev); 840 qeth_default_link_info(card); 841 } 842 return NULL; 843 case IPA_CMD_STARTLAN: 844 dev_info(&card->gdev->dev, 845 "The link for %s on CHPID 0x%X has been restored\n", 846 netdev_name(card->dev), card->info.chpid); 847 if (card->info.hwtrap) 848 card->info.hwtrap = 2; 849 qeth_schedule_recovery(card); 850 return NULL; 851 case IPA_CMD_SETBRIDGEPORT_IQD: 852 case IPA_CMD_SETBRIDGEPORT_OSA: 853 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 854 if (card->discipline->control_event_handler(card, cmd)) 855 return cmd; 856 return NULL; 857 case IPA_CMD_REGISTER_LOCAL_ADDR: 858 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 859 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 860 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 861 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 862 863 QETH_CARD_TEXT(card, 3, "irla"); 864 return NULL; 865 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 866 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 867 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 868 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 869 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 870 871 QETH_CARD_TEXT(card, 3, "urla"); 872 return NULL; 873 default: 874 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 875 return cmd; 876 } 877 } 878 879 static void qeth_clear_ipacmd_list(struct qeth_card *card) 880 { 881 struct qeth_cmd_buffer *iob; 882 unsigned long flags; 883 884 QETH_CARD_TEXT(card, 4, "clipalst"); 885 886 spin_lock_irqsave(&card->lock, flags); 887 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) 888 qeth_notify_cmd(iob, -ECANCELED); 889 spin_unlock_irqrestore(&card->lock, flags); 890 } 891 892 static int qeth_check_idx_response(struct qeth_card *card, 893 unsigned char *buffer) 894 { 895 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 896 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 897 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 898 buffer[4]); 899 QETH_CARD_TEXT(card, 2, "ckidxres"); 900 QETH_CARD_TEXT(card, 2, " idxterm"); 901 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 902 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 903 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 904 dev_err(&card->gdev->dev, 905 "The device does not support the configured transport mode\n"); 906 return -EPROTONOSUPPORT; 907 } 908 return -EIO; 909 } 910 return 0; 911 } 912 913 static void qeth_release_buffer_cb(struct qeth_card *card, 914 struct qeth_cmd_buffer *iob, 915 unsigned int data_length) 916 { 917 qeth_put_cmd(iob); 918 } 919 920 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 921 { 922 qeth_notify_cmd(iob, rc); 923 qeth_put_cmd(iob); 924 } 925 926 static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 927 unsigned int length, 928 unsigned int ccws, long timeout) 929 { 930 struct qeth_cmd_buffer *iob; 931 932 if (length > QETH_BUFSIZE) 933 return NULL; 934 935 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 936 if (!iob) 937 return NULL; 938 939 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 940 GFP_KERNEL | GFP_DMA); 941 if (!iob->data) { 942 kfree(iob); 943 return NULL; 944 } 945 946 init_completion(&iob->done); 947 spin_lock_init(&iob->lock); 948 refcount_set(&iob->ref_count, 1); 949 iob->channel = channel; 950 iob->timeout = timeout; 951 iob->length = length; 952 return iob; 953 } 954 955 static void qeth_issue_next_read_cb(struct qeth_card *card, 956 struct qeth_cmd_buffer *iob, 957 unsigned int data_length) 958 { 959 struct qeth_cmd_buffer *request = NULL; 960 struct qeth_ipa_cmd *cmd = NULL; 961 struct qeth_reply *reply = NULL; 962 struct qeth_cmd_buffer *tmp; 963 unsigned long flags; 964 int rc = 0; 965 966 QETH_CARD_TEXT(card, 4, "sndctlcb"); 967 rc = qeth_check_idx_response(card, iob->data); 968 switch (rc) { 969 case 0: 970 break; 971 case -EIO: 972 qeth_schedule_recovery(card); 973 fallthrough; 974 default: 975 qeth_clear_ipacmd_list(card); 976 goto err_idx; 977 } 978 979 cmd = __ipa_reply(iob); 980 if (cmd) { 981 cmd = qeth_check_ipa_data(card, cmd); 982 if (!cmd) 983 goto out; 984 } 985 986 /* match against pending cmd requests */ 987 spin_lock_irqsave(&card->lock, flags); 988 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { 989 if (tmp->match && tmp->match(tmp, iob)) { 990 request = tmp; 991 /* take the object outside the lock */ 992 qeth_get_cmd(request); 993 break; 994 } 995 } 996 spin_unlock_irqrestore(&card->lock, flags); 997 998 if (!request) 999 goto out; 1000 1001 reply = &request->reply; 1002 if (!reply->callback) { 1003 rc = 0; 1004 goto no_callback; 1005 } 1006 1007 spin_lock_irqsave(&request->lock, flags); 1008 if (request->rc) 1009 /* Bail out when the requestor has already left: */ 1010 rc = request->rc; 1011 else 1012 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 1013 (unsigned long)iob); 1014 spin_unlock_irqrestore(&request->lock, flags); 1015 1016 no_callback: 1017 if (rc <= 0) 1018 qeth_notify_cmd(request, rc); 1019 qeth_put_cmd(request); 1020 out: 1021 memcpy(&card->seqno.pdu_hdr_ack, 1022 QETH_PDU_HEADER_SEQ_NO(iob->data), 1023 QETH_SEQ_NO_LENGTH); 1024 __qeth_issue_next_read(card); 1025 err_idx: 1026 qeth_put_cmd(iob); 1027 } 1028 1029 static int qeth_set_thread_start_bit(struct qeth_card *card, 1030 unsigned long thread) 1031 { 1032 unsigned long flags; 1033 int rc = 0; 1034 1035 spin_lock_irqsave(&card->thread_mask_lock, flags); 1036 if (!(card->thread_allowed_mask & thread)) 1037 rc = -EPERM; 1038 else if (card->thread_start_mask & thread) 1039 rc = -EBUSY; 1040 else 1041 card->thread_start_mask |= thread; 1042 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1043 1044 return rc; 1045 } 1046 1047 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1048 unsigned long thread) 1049 { 1050 unsigned long flags; 1051 1052 spin_lock_irqsave(&card->thread_mask_lock, flags); 1053 card->thread_start_mask &= ~thread; 1054 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1055 wake_up(&card->wait_q); 1056 } 1057 1058 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1059 unsigned long thread) 1060 { 1061 unsigned long flags; 1062 1063 spin_lock_irqsave(&card->thread_mask_lock, flags); 1064 card->thread_running_mask &= ~thread; 1065 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1066 wake_up_all(&card->wait_q); 1067 } 1068 1069 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1070 { 1071 unsigned long flags; 1072 int rc = 0; 1073 1074 spin_lock_irqsave(&card->thread_mask_lock, flags); 1075 if (card->thread_start_mask & thread) { 1076 if ((card->thread_allowed_mask & thread) && 1077 !(card->thread_running_mask & thread)) { 1078 rc = 1; 1079 card->thread_start_mask &= ~thread; 1080 card->thread_running_mask |= thread; 1081 } else 1082 rc = -EPERM; 1083 } 1084 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1085 return rc; 1086 } 1087 1088 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1089 { 1090 int rc = 0; 1091 1092 wait_event(card->wait_q, 1093 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1094 return rc; 1095 } 1096 1097 int qeth_schedule_recovery(struct qeth_card *card) 1098 { 1099 int rc; 1100 1101 QETH_CARD_TEXT(card, 2, "startrec"); 1102 1103 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1104 if (!rc) 1105 schedule_work(&card->kernel_thread_starter); 1106 1107 return rc; 1108 } 1109 1110 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1111 struct irb *irb) 1112 { 1113 int dstat, cstat; 1114 char *sense; 1115 1116 sense = (char *) irb->ecw; 1117 cstat = irb->scsw.cmd.cstat; 1118 dstat = irb->scsw.cmd.dstat; 1119 1120 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1121 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1122 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1123 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1124 dev_warn(&cdev->dev, "The qeth device driver " 1125 "failed to recover an error on the device\n"); 1126 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1127 CCW_DEVID(cdev), dstat, cstat); 1128 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1129 16, 1, irb, 64, 1); 1130 return -EIO; 1131 } 1132 1133 if (dstat & DEV_STAT_UNIT_CHECK) { 1134 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1135 SENSE_RESETTING_EVENT_FLAG) { 1136 QETH_CARD_TEXT(card, 2, "REVIND"); 1137 return -EIO; 1138 } 1139 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1140 SENSE_COMMAND_REJECT_FLAG) { 1141 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1142 return -EIO; 1143 } 1144 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1145 QETH_CARD_TEXT(card, 2, "AFFE"); 1146 return -EIO; 1147 } 1148 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1149 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1150 return 0; 1151 } 1152 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1153 return -EIO; 1154 } 1155 return 0; 1156 } 1157 1158 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1159 struct irb *irb) 1160 { 1161 if (!IS_ERR(irb)) 1162 return 0; 1163 1164 switch (PTR_ERR(irb)) { 1165 case -EIO: 1166 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1167 CCW_DEVID(cdev)); 1168 QETH_CARD_TEXT(card, 2, "ckirberr"); 1169 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1170 return -EIO; 1171 case -ETIMEDOUT: 1172 dev_warn(&cdev->dev, "A hardware operation timed out" 1173 " on the device\n"); 1174 QETH_CARD_TEXT(card, 2, "ckirberr"); 1175 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1176 return -ETIMEDOUT; 1177 default: 1178 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1179 PTR_ERR(irb), CCW_DEVID(cdev)); 1180 QETH_CARD_TEXT(card, 2, "ckirberr"); 1181 QETH_CARD_TEXT(card, 2, " rc???"); 1182 return PTR_ERR(irb); 1183 } 1184 } 1185 1186 /** 1187 * qeth_irq() - qeth interrupt handler 1188 * @cdev: ccw device 1189 * @intparm: expect pointer to iob 1190 * @irb: Interruption Response Block 1191 * 1192 * In the good path: 1193 * corresponding qeth channel is locked with last used iob as active_cmd. 1194 * But this function is also called for error interrupts. 1195 * 1196 * Caller ensures that: 1197 * Interrupts are disabled; ccw device lock is held; 1198 * 1199 */ 1200 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1201 struct irb *irb) 1202 { 1203 int rc; 1204 int cstat, dstat; 1205 struct qeth_cmd_buffer *iob = NULL; 1206 struct ccwgroup_device *gdev; 1207 struct qeth_channel *channel; 1208 struct qeth_card *card; 1209 1210 /* while we hold the ccwdev lock, this stays valid: */ 1211 gdev = dev_get_drvdata(&cdev->dev); 1212 card = dev_get_drvdata(&gdev->dev); 1213 1214 QETH_CARD_TEXT(card, 5, "irq"); 1215 1216 if (card->read.ccwdev == cdev) { 1217 channel = &card->read; 1218 QETH_CARD_TEXT(card, 5, "read"); 1219 } else if (card->write.ccwdev == cdev) { 1220 channel = &card->write; 1221 QETH_CARD_TEXT(card, 5, "write"); 1222 } else { 1223 channel = &card->data; 1224 QETH_CARD_TEXT(card, 5, "data"); 1225 } 1226 1227 if (intparm == 0) { 1228 QETH_CARD_TEXT(card, 5, "irqunsol"); 1229 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1230 QETH_CARD_TEXT(card, 5, "irqunexp"); 1231 1232 dev_err(&cdev->dev, 1233 "Received IRQ with intparm %lx, expected %px\n", 1234 intparm, channel->active_cmd); 1235 if (channel->active_cmd) 1236 qeth_cancel_cmd(channel->active_cmd, -EIO); 1237 } else { 1238 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1239 } 1240 1241 rc = qeth_check_irb_error(card, cdev, irb); 1242 if (rc) { 1243 /* IO was terminated, free its resources. */ 1244 qeth_unlock_channel(card, channel); 1245 if (iob) 1246 qeth_cancel_cmd(iob, rc); 1247 return; 1248 } 1249 1250 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1251 channel->state = CH_STATE_STOPPED; 1252 wake_up(&card->wait_q); 1253 } 1254 1255 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1256 channel->state = CH_STATE_HALTED; 1257 wake_up(&card->wait_q); 1258 } 1259 1260 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1261 SCSW_FCTL_HALT_FUNC))) { 1262 qeth_cancel_cmd(iob, -ECANCELED); 1263 iob = NULL; 1264 } 1265 1266 cstat = irb->scsw.cmd.cstat; 1267 dstat = irb->scsw.cmd.dstat; 1268 1269 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1270 (dstat & DEV_STAT_UNIT_CHECK) || 1271 (cstat)) { 1272 if (irb->esw.esw0.erw.cons) { 1273 dev_warn(&channel->ccwdev->dev, 1274 "The qeth device driver failed to recover " 1275 "an error on the device\n"); 1276 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1277 CCW_DEVID(channel->ccwdev), cstat, 1278 dstat); 1279 print_hex_dump(KERN_WARNING, "qeth: irb ", 1280 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1281 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1282 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1283 } 1284 1285 rc = qeth_get_problem(card, cdev, irb); 1286 if (rc) { 1287 card->read_or_write_problem = 1; 1288 qeth_unlock_channel(card, channel); 1289 if (iob) 1290 qeth_cancel_cmd(iob, rc); 1291 qeth_clear_ipacmd_list(card); 1292 qeth_schedule_recovery(card); 1293 return; 1294 } 1295 } 1296 1297 if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) { 1298 /* channel command hasn't started: retry. 1299 * active_cmd is still set to last iob 1300 */ 1301 QETH_CARD_TEXT(card, 2, "irqcc1"); 1302 rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob), 1303 (addr_t)iob, 0, 0, iob->timeout); 1304 if (rc) { 1305 QETH_DBF_MESSAGE(2, 1306 "ccw retry on %x failed, rc = %i\n", 1307 CARD_DEVID(card), rc); 1308 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1309 qeth_unlock_channel(card, channel); 1310 qeth_cancel_cmd(iob, rc); 1311 } 1312 return; 1313 } 1314 1315 qeth_unlock_channel(card, channel); 1316 1317 if (iob) { 1318 /* sanity check: */ 1319 if (irb->scsw.cmd.count > iob->length) { 1320 qeth_cancel_cmd(iob, -EIO); 1321 return; 1322 } 1323 if (iob->callback) 1324 iob->callback(card, iob, 1325 iob->length - irb->scsw.cmd.count); 1326 } 1327 } 1328 1329 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1330 struct qeth_qdio_out_buffer *buf, 1331 enum iucv_tx_notify notification) 1332 { 1333 struct sk_buff *skb; 1334 1335 skb_queue_walk(&buf->skb_list, skb) { 1336 struct sock *sk = skb->sk; 1337 1338 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1339 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1340 if (sk && sk->sk_family == PF_IUCV) 1341 iucv_sk(sk)->sk_txnotify(sk, notification); 1342 } 1343 } 1344 1345 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, 1346 struct qeth_qdio_out_buffer *buf, bool error, 1347 int budget) 1348 { 1349 struct sk_buff *skb; 1350 1351 /* Empty buffer? */ 1352 if (buf->next_element_to_fill == 0) 1353 return; 1354 1355 QETH_TXQ_STAT_INC(queue, bufs); 1356 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1357 if (error) { 1358 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1359 } else { 1360 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1361 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1362 } 1363 1364 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1365 unsigned int bytes = qdisc_pkt_len(skb); 1366 bool is_tso = skb_is_gso(skb); 1367 unsigned int packets; 1368 1369 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1370 if (!error) { 1371 if (skb->ip_summed == CHECKSUM_PARTIAL) 1372 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1373 if (skb_is_nonlinear(skb)) 1374 QETH_TXQ_STAT_INC(queue, skbs_sg); 1375 if (is_tso) { 1376 QETH_TXQ_STAT_INC(queue, skbs_tso); 1377 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1378 } 1379 } 1380 1381 napi_consume_skb(skb, budget); 1382 } 1383 } 1384 1385 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1386 struct qeth_qdio_out_buffer *buf, 1387 bool error, int budget) 1388 { 1389 int i; 1390 1391 /* is PCI flag set on buffer? */ 1392 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) { 1393 atomic_dec(&queue->set_pci_flags_count); 1394 QETH_TXQ_STAT_INC(queue, completion_irq); 1395 } 1396 1397 qeth_tx_complete_buf(queue, buf, error, budget); 1398 1399 for (i = 0; i < queue->max_elements; ++i) { 1400 void *data = dma64_to_virt(buf->buffer->element[i].addr); 1401 1402 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) 1403 kmem_cache_free(qeth_core_header_cache, data); 1404 } 1405 1406 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1407 buf->next_element_to_fill = 0; 1408 buf->frames = 0; 1409 buf->bytes = 0; 1410 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1411 } 1412 1413 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) 1414 { 1415 if (buf->aob) 1416 kmem_cache_free(qeth_qaob_cache, buf->aob); 1417 kmem_cache_free(qeth_qdio_outbuf_cache, buf); 1418 } 1419 1420 static void qeth_tx_complete_pending_bufs(struct qeth_card *card, 1421 struct qeth_qdio_out_q *queue, 1422 bool drain, int budget) 1423 { 1424 struct qeth_qdio_out_buffer *buf, *tmp; 1425 1426 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { 1427 struct qeth_qaob_priv1 *priv; 1428 struct qaob *aob = buf->aob; 1429 enum iucv_tx_notify notify; 1430 unsigned int i; 1431 1432 priv = (struct qeth_qaob_priv1 *)&aob->user1; 1433 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) { 1434 QETH_CARD_TEXT(card, 5, "fp"); 1435 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); 1436 1437 notify = drain ? TX_NOTIFY_GENERALERROR : 1438 qeth_compute_cq_notification(aob->aorc, 1); 1439 qeth_notify_skbs(queue, buf, notify); 1440 qeth_tx_complete_buf(queue, buf, drain, budget); 1441 1442 for (i = 0; 1443 i < aob->sb_count && i < queue->max_elements; 1444 i++) { 1445 void *data = dma64_to_virt(aob->sba[i]); 1446 1447 if (test_bit(i, buf->from_kmem_cache) && data) 1448 kmem_cache_free(qeth_core_header_cache, 1449 data); 1450 } 1451 1452 list_del(&buf->list_entry); 1453 qeth_free_out_buf(buf); 1454 } 1455 } 1456 } 1457 1458 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1459 { 1460 int j; 1461 1462 qeth_tx_complete_pending_bufs(q->card, q, true, 0); 1463 1464 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1465 if (!q->bufs[j]) 1466 continue; 1467 1468 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1469 if (free) { 1470 qeth_free_out_buf(q->bufs[j]); 1471 q->bufs[j] = NULL; 1472 } 1473 } 1474 } 1475 1476 static void qeth_drain_output_queues(struct qeth_card *card) 1477 { 1478 int i; 1479 1480 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1481 /* clear outbound buffers to free skbs */ 1482 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1483 if (card->qdio.out_qs[i]) 1484 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1485 } 1486 } 1487 1488 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1489 { 1490 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1491 1492 if (card->qdio.no_out_queues == max) 1493 return; 1494 1495 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1496 qeth_free_qdio_queues(card); 1497 1498 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1499 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1500 1501 card->qdio.no_out_queues = max; 1502 } 1503 1504 static int qeth_update_from_chp_desc(struct qeth_card *card) 1505 { 1506 struct ccw_device *ccwdev; 1507 struct channel_path_desc_fmt0 *chp_dsc; 1508 1509 QETH_CARD_TEXT(card, 2, "chp_desc"); 1510 1511 ccwdev = card->data.ccwdev; 1512 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1513 if (!chp_dsc) 1514 return -ENOMEM; 1515 1516 card->info.func_level = 0x4100 + chp_dsc->desc; 1517 1518 if (IS_OSD(card) || IS_OSX(card)) 1519 /* CHPP field bit 6 == 1 -> single queue */ 1520 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1521 1522 kfree(chp_dsc); 1523 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1524 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1525 return 0; 1526 } 1527 1528 static void qeth_init_qdio_info(struct qeth_card *card) 1529 { 1530 QETH_CARD_TEXT(card, 4, "intqdinf"); 1531 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1532 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1533 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1534 1535 /* inbound */ 1536 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1537 if (IS_IQD(card)) 1538 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1539 else 1540 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1541 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1542 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1543 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1544 } 1545 1546 static void qeth_set_initial_options(struct qeth_card *card) 1547 { 1548 card->options.route4.type = NO_ROUTER; 1549 card->options.route6.type = NO_ROUTER; 1550 card->options.isolation = ISOLATION_MODE_NONE; 1551 card->options.cq = QETH_CQ_DISABLED; 1552 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1553 } 1554 1555 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1556 { 1557 unsigned long flags; 1558 int rc = 0; 1559 1560 spin_lock_irqsave(&card->thread_mask_lock, flags); 1561 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1562 (u8) card->thread_start_mask, 1563 (u8) card->thread_allowed_mask, 1564 (u8) card->thread_running_mask); 1565 rc = (card->thread_start_mask & thread); 1566 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1567 return rc; 1568 } 1569 1570 static int qeth_do_reset(void *data); 1571 static void qeth_start_kernel_thread(struct work_struct *work) 1572 { 1573 struct task_struct *ts; 1574 struct qeth_card *card = container_of(work, struct qeth_card, 1575 kernel_thread_starter); 1576 QETH_CARD_TEXT(card, 2, "strthrd"); 1577 1578 if (card->read.state != CH_STATE_UP && 1579 card->write.state != CH_STATE_UP) 1580 return; 1581 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1582 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1583 if (IS_ERR(ts)) { 1584 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1585 qeth_clear_thread_running_bit(card, 1586 QETH_RECOVER_THREAD); 1587 } 1588 } 1589 } 1590 1591 static void qeth_buffer_reclaim_work(struct work_struct *); 1592 static void qeth_setup_card(struct qeth_card *card) 1593 { 1594 QETH_CARD_TEXT(card, 2, "setupcrd"); 1595 1596 card->info.type = CARD_RDEV(card)->id.driver_info; 1597 card->state = CARD_STATE_DOWN; 1598 spin_lock_init(&card->lock); 1599 spin_lock_init(&card->thread_mask_lock); 1600 mutex_init(&card->conf_mutex); 1601 mutex_init(&card->discipline_mutex); 1602 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1603 INIT_LIST_HEAD(&card->cmd_waiter_list); 1604 init_waitqueue_head(&card->wait_q); 1605 qeth_set_initial_options(card); 1606 /* IP address takeover */ 1607 INIT_LIST_HEAD(&card->ipato.entries); 1608 qeth_init_qdio_info(card); 1609 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1610 hash_init(card->rx_mode_addrs); 1611 hash_init(card->local_addrs4); 1612 hash_init(card->local_addrs6); 1613 spin_lock_init(&card->local_addrs4_lock); 1614 spin_lock_init(&card->local_addrs6_lock); 1615 } 1616 1617 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1618 { 1619 struct qeth_card *card = container_of(slr, struct qeth_card, 1620 qeth_service_level); 1621 if (card->info.mcl_level[0]) 1622 seq_printf(m, "qeth: %s firmware level %s\n", 1623 CARD_BUS_ID(card), card->info.mcl_level); 1624 } 1625 1626 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1627 { 1628 struct qeth_card *card; 1629 1630 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1631 card = kzalloc(sizeof(*card), GFP_KERNEL); 1632 if (!card) 1633 goto out; 1634 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1635 1636 card->gdev = gdev; 1637 dev_set_drvdata(&gdev->dev, card); 1638 CARD_RDEV(card) = gdev->cdev[0]; 1639 CARD_WDEV(card) = gdev->cdev[1]; 1640 CARD_DDEV(card) = gdev->cdev[2]; 1641 1642 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1643 dev_name(&gdev->dev)); 1644 if (!card->event_wq) 1645 goto out_wq; 1646 1647 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1648 if (!card->read_cmd) 1649 goto out_read_cmd; 1650 1651 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1652 qeth_debugfs_root); 1653 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1654 &qeth_debugfs_local_addr_fops); 1655 1656 card->qeth_service_level.seq_print = qeth_core_sl_print; 1657 register_service_level(&card->qeth_service_level); 1658 return card; 1659 1660 out_read_cmd: 1661 destroy_workqueue(card->event_wq); 1662 out_wq: 1663 dev_set_drvdata(&gdev->dev, NULL); 1664 kfree(card); 1665 out: 1666 return NULL; 1667 } 1668 1669 static int qeth_clear_channel(struct qeth_card *card, 1670 struct qeth_channel *channel) 1671 { 1672 int rc; 1673 1674 QETH_CARD_TEXT(card, 3, "clearch"); 1675 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1676 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1677 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1678 1679 if (rc) 1680 return rc; 1681 rc = wait_event_interruptible_timeout(card->wait_q, 1682 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1683 if (rc == -ERESTARTSYS) 1684 return rc; 1685 if (channel->state != CH_STATE_STOPPED) 1686 return -ETIME; 1687 channel->state = CH_STATE_DOWN; 1688 return 0; 1689 } 1690 1691 static int qeth_halt_channel(struct qeth_card *card, 1692 struct qeth_channel *channel) 1693 { 1694 int rc; 1695 1696 QETH_CARD_TEXT(card, 3, "haltch"); 1697 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1698 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1699 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1700 1701 if (rc) 1702 return rc; 1703 rc = wait_event_interruptible_timeout(card->wait_q, 1704 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1705 if (rc == -ERESTARTSYS) 1706 return rc; 1707 if (channel->state != CH_STATE_HALTED) 1708 return -ETIME; 1709 return 0; 1710 } 1711 1712 static int qeth_stop_channel(struct qeth_channel *channel) 1713 { 1714 struct ccw_device *cdev = channel->ccwdev; 1715 int rc; 1716 1717 rc = ccw_device_set_offline(cdev); 1718 1719 spin_lock_irq(get_ccwdev_lock(cdev)); 1720 if (channel->active_cmd) 1721 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1722 channel->active_cmd); 1723 1724 cdev->handler = NULL; 1725 spin_unlock_irq(get_ccwdev_lock(cdev)); 1726 1727 return rc; 1728 } 1729 1730 static int qeth_start_channel(struct qeth_channel *channel) 1731 { 1732 struct ccw_device *cdev = channel->ccwdev; 1733 int rc; 1734 1735 channel->state = CH_STATE_DOWN; 1736 xchg(&channel->active_cmd, NULL); 1737 1738 spin_lock_irq(get_ccwdev_lock(cdev)); 1739 cdev->handler = qeth_irq; 1740 spin_unlock_irq(get_ccwdev_lock(cdev)); 1741 1742 rc = ccw_device_set_online(cdev); 1743 if (rc) 1744 goto err; 1745 1746 return 0; 1747 1748 err: 1749 spin_lock_irq(get_ccwdev_lock(cdev)); 1750 cdev->handler = NULL; 1751 spin_unlock_irq(get_ccwdev_lock(cdev)); 1752 return rc; 1753 } 1754 1755 static int qeth_halt_channels(struct qeth_card *card) 1756 { 1757 int rc1 = 0, rc2 = 0, rc3 = 0; 1758 1759 QETH_CARD_TEXT(card, 3, "haltchs"); 1760 rc1 = qeth_halt_channel(card, &card->read); 1761 rc2 = qeth_halt_channel(card, &card->write); 1762 rc3 = qeth_halt_channel(card, &card->data); 1763 if (rc1) 1764 return rc1; 1765 if (rc2) 1766 return rc2; 1767 return rc3; 1768 } 1769 1770 static int qeth_clear_channels(struct qeth_card *card) 1771 { 1772 int rc1 = 0, rc2 = 0, rc3 = 0; 1773 1774 QETH_CARD_TEXT(card, 3, "clearchs"); 1775 rc1 = qeth_clear_channel(card, &card->read); 1776 rc2 = qeth_clear_channel(card, &card->write); 1777 rc3 = qeth_clear_channel(card, &card->data); 1778 if (rc1) 1779 return rc1; 1780 if (rc2) 1781 return rc2; 1782 return rc3; 1783 } 1784 1785 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1786 { 1787 int rc = 0; 1788 1789 QETH_CARD_TEXT(card, 3, "clhacrd"); 1790 1791 if (halt) 1792 rc = qeth_halt_channels(card); 1793 if (rc) 1794 return rc; 1795 return qeth_clear_channels(card); 1796 } 1797 1798 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1799 { 1800 int rc = 0; 1801 1802 QETH_CARD_TEXT(card, 3, "qdioclr"); 1803 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1804 QETH_QDIO_CLEANING)) { 1805 case QETH_QDIO_ESTABLISHED: 1806 if (IS_IQD(card)) 1807 rc = qdio_shutdown(CARD_DDEV(card), 1808 QDIO_FLAG_CLEANUP_USING_HALT); 1809 else 1810 rc = qdio_shutdown(CARD_DDEV(card), 1811 QDIO_FLAG_CLEANUP_USING_CLEAR); 1812 if (rc) 1813 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1814 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1815 break; 1816 case QETH_QDIO_CLEANING: 1817 return rc; 1818 default: 1819 break; 1820 } 1821 rc = qeth_clear_halt_card(card, use_halt); 1822 if (rc) 1823 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1824 return rc; 1825 } 1826 1827 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1828 { 1829 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1830 struct diag26c_vnic_resp *response = NULL; 1831 struct diag26c_vnic_req *request = NULL; 1832 struct ccw_dev_id id; 1833 char userid[80]; 1834 int rc = 0; 1835 1836 QETH_CARD_TEXT(card, 2, "vmlayer"); 1837 1838 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1839 if (rc) 1840 goto out; 1841 1842 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1843 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1844 if (!request || !response) { 1845 rc = -ENOMEM; 1846 goto out; 1847 } 1848 1849 ccw_device_get_id(CARD_RDEV(card), &id); 1850 request->resp_buf_len = sizeof(*response); 1851 request->resp_version = DIAG26C_VERSION6_VM65918; 1852 request->req_format = DIAG26C_VNIC_INFO; 1853 ASCEBC(userid, 8); 1854 memcpy(&request->sys_name, userid, 8); 1855 request->devno = id.devno; 1856 1857 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1858 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1859 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1860 if (rc) 1861 goto out; 1862 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1863 1864 if (request->resp_buf_len < sizeof(*response) || 1865 response->version != request->resp_version) { 1866 rc = -EIO; 1867 goto out; 1868 } 1869 1870 if (response->protocol == VNIC_INFO_PROT_L2) 1871 disc = QETH_DISCIPLINE_LAYER2; 1872 else if (response->protocol == VNIC_INFO_PROT_L3) 1873 disc = QETH_DISCIPLINE_LAYER3; 1874 1875 out: 1876 kfree(response); 1877 kfree(request); 1878 if (rc) 1879 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1880 return disc; 1881 } 1882 1883 /* Determine whether the device requires a specific layer discipline */ 1884 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1885 { 1886 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1887 1888 if (IS_OSM(card)) 1889 disc = QETH_DISCIPLINE_LAYER2; 1890 else if (IS_VM_NIC(card)) 1891 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1892 qeth_vm_detect_layer(card); 1893 1894 switch (disc) { 1895 case QETH_DISCIPLINE_LAYER2: 1896 QETH_CARD_TEXT(card, 3, "force l2"); 1897 break; 1898 case QETH_DISCIPLINE_LAYER3: 1899 QETH_CARD_TEXT(card, 3, "force l3"); 1900 break; 1901 default: 1902 QETH_CARD_TEXT(card, 3, "force no"); 1903 } 1904 1905 return disc; 1906 } 1907 1908 static void qeth_set_blkt_defaults(struct qeth_card *card) 1909 { 1910 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1911 1912 if (card->info.use_v1_blkt) { 1913 card->info.blkt.time_total = 0; 1914 card->info.blkt.inter_packet = 0; 1915 card->info.blkt.inter_packet_jumbo = 0; 1916 } else { 1917 card->info.blkt.time_total = 250; 1918 card->info.blkt.inter_packet = 5; 1919 card->info.blkt.inter_packet_jumbo = 15; 1920 } 1921 } 1922 1923 static void qeth_idx_init(struct qeth_card *card) 1924 { 1925 memset(&card->seqno, 0, sizeof(card->seqno)); 1926 1927 card->token.issuer_rm_w = 0x00010103UL; 1928 card->token.cm_filter_w = 0x00010108UL; 1929 card->token.cm_connection_w = 0x0001010aUL; 1930 card->token.ulp_filter_w = 0x0001010bUL; 1931 card->token.ulp_connection_w = 0x0001010dUL; 1932 1933 switch (card->info.type) { 1934 case QETH_CARD_TYPE_IQD: 1935 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1936 break; 1937 case QETH_CARD_TYPE_OSD: 1938 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1939 break; 1940 default: 1941 break; 1942 } 1943 } 1944 1945 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1946 struct qeth_cmd_buffer *iob) 1947 { 1948 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1949 QETH_SEQ_NO_LENGTH); 1950 if (iob->channel == &card->write) 1951 card->seqno.trans_hdr++; 1952 } 1953 1954 static int qeth_peer_func_level(int level) 1955 { 1956 if ((level & 0xff) == 8) 1957 return (level & 0xff) + 0x400; 1958 if (((level >> 8) & 3) == 1) 1959 return (level & 0xff) + 0x200; 1960 return level; 1961 } 1962 1963 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1964 struct qeth_cmd_buffer *iob) 1965 { 1966 qeth_idx_finalize_cmd(card, iob); 1967 1968 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1969 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1970 card->seqno.pdu_hdr++; 1971 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1972 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1973 1974 iob->callback = qeth_release_buffer_cb; 1975 } 1976 1977 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 1978 struct qeth_cmd_buffer *reply) 1979 { 1980 /* MPC cmds are issued strictly in sequence. */ 1981 return !IS_IPA(reply->data); 1982 } 1983 1984 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1985 const void *data, 1986 unsigned int data_length) 1987 { 1988 struct qeth_cmd_buffer *iob; 1989 1990 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1991 if (!iob) 1992 return NULL; 1993 1994 memcpy(iob->data, data, data_length); 1995 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1996 iob->data); 1997 iob->finalize = qeth_mpc_finalize_cmd; 1998 iob->match = qeth_mpc_match_reply; 1999 return iob; 2000 } 2001 2002 /** 2003 * qeth_send_control_data() - send control command to the card 2004 * @card: qeth_card structure pointer 2005 * @iob: qeth_cmd_buffer pointer 2006 * @reply_cb: callback function pointer 2007 * cb_card: pointer to the qeth_card structure 2008 * cb_reply: pointer to the qeth_reply structure 2009 * cb_cmd: pointer to the original iob for non-IPA 2010 * commands, or to the qeth_ipa_cmd structure 2011 * for the IPA commands. 2012 * @reply_param: private pointer passed to the callback 2013 * 2014 * Callback function gets called one or more times, with cb_cmd 2015 * pointing to the response returned by the hardware. Callback 2016 * function must return 2017 * > 0 if more reply blocks are expected, 2018 * 0 if the last or only reply block is received, and 2019 * < 0 on error. 2020 * Callback function can get the value of the reply_param pointer from the 2021 * field 'param' of the structure qeth_reply. 2022 */ 2023 2024 static int qeth_send_control_data(struct qeth_card *card, 2025 struct qeth_cmd_buffer *iob, 2026 int (*reply_cb)(struct qeth_card *cb_card, 2027 struct qeth_reply *cb_reply, 2028 unsigned long cb_cmd), 2029 void *reply_param) 2030 { 2031 struct qeth_channel *channel = iob->channel; 2032 struct qeth_reply *reply = &iob->reply; 2033 long timeout = iob->timeout; 2034 int rc; 2035 2036 QETH_CARD_TEXT(card, 2, "sendctl"); 2037 2038 reply->callback = reply_cb; 2039 reply->param = reply_param; 2040 2041 timeout = wait_event_interruptible_timeout(card->wait_q, 2042 qeth_trylock_channel(channel, iob), 2043 timeout); 2044 if (timeout <= 0) { 2045 qeth_put_cmd(iob); 2046 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2047 } 2048 2049 if (iob->finalize) 2050 iob->finalize(card, iob); 2051 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 2052 2053 qeth_enqueue_cmd(card, iob); 2054 2055 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 2056 qeth_get_cmd(iob); 2057 2058 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2059 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 2060 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 2061 (addr_t) iob, 0, 0, timeout); 2062 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 2063 if (rc) { 2064 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 2065 CARD_DEVID(card), rc); 2066 QETH_CARD_TEXT_(card, 2, " err%d", rc); 2067 qeth_dequeue_cmd(card, iob); 2068 qeth_put_cmd(iob); 2069 qeth_unlock_channel(card, channel); 2070 goto out; 2071 } 2072 2073 timeout = wait_for_completion_interruptible_timeout(&iob->done, 2074 timeout); 2075 if (timeout <= 0) 2076 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2077 2078 qeth_dequeue_cmd(card, iob); 2079 2080 if (reply_cb) { 2081 /* Wait until the callback for a late reply has completed: */ 2082 spin_lock_irq(&iob->lock); 2083 if (rc) 2084 /* Zap any callback that's still pending: */ 2085 iob->rc = rc; 2086 spin_unlock_irq(&iob->lock); 2087 } 2088 2089 if (!rc) 2090 rc = iob->rc; 2091 2092 out: 2093 qeth_put_cmd(iob); 2094 return rc; 2095 } 2096 2097 struct qeth_node_desc { 2098 struct node_descriptor nd1; 2099 struct node_descriptor nd2; 2100 struct node_descriptor nd3; 2101 }; 2102 2103 static void qeth_read_conf_data_cb(struct qeth_card *card, 2104 struct qeth_cmd_buffer *iob, 2105 unsigned int data_length) 2106 { 2107 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2108 int rc = 0; 2109 u8 *tag; 2110 2111 QETH_CARD_TEXT(card, 2, "cfgunit"); 2112 2113 if (data_length < sizeof(*nd)) { 2114 rc = -EINVAL; 2115 goto out; 2116 } 2117 2118 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2119 nd->nd1.plant[1] == _ascebc['M']; 2120 tag = (u8 *)&nd->nd1.tag; 2121 card->info.chpid = tag[0]; 2122 card->info.unit_addr2 = tag[1]; 2123 2124 tag = (u8 *)&nd->nd2.tag; 2125 card->info.cula = tag[1]; 2126 2127 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2128 nd->nd3.model[1] == 0xF0 && 2129 nd->nd3.model[2] >= 0xF1 && 2130 nd->nd3.model[2] <= 0xF4; 2131 2132 out: 2133 qeth_notify_cmd(iob, rc); 2134 qeth_put_cmd(iob); 2135 } 2136 2137 static int qeth_read_conf_data(struct qeth_card *card) 2138 { 2139 struct qeth_channel *channel = &card->data; 2140 struct qeth_cmd_buffer *iob; 2141 struct ciw *ciw; 2142 2143 /* scan for RCD command in extended SenseID data */ 2144 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2145 if (!ciw || ciw->cmd == 0) 2146 return -EOPNOTSUPP; 2147 if (ciw->count < sizeof(struct qeth_node_desc)) 2148 return -EINVAL; 2149 2150 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2151 if (!iob) 2152 return -ENOMEM; 2153 2154 iob->callback = qeth_read_conf_data_cb; 2155 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2156 iob->data); 2157 2158 return qeth_send_control_data(card, iob, NULL, NULL); 2159 } 2160 2161 static int qeth_idx_check_activate_response(struct qeth_card *card, 2162 struct qeth_channel *channel, 2163 struct qeth_cmd_buffer *iob) 2164 { 2165 int rc; 2166 2167 rc = qeth_check_idx_response(card, iob->data); 2168 if (rc) 2169 return rc; 2170 2171 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2172 return 0; 2173 2174 /* negative reply: */ 2175 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2176 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2177 2178 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2179 case QETH_IDX_ACT_ERR_EXCL: 2180 dev_err(&channel->ccwdev->dev, 2181 "The adapter is used exclusively by another host\n"); 2182 return -EBUSY; 2183 case QETH_IDX_ACT_ERR_AUTH: 2184 case QETH_IDX_ACT_ERR_AUTH_USER: 2185 dev_err(&channel->ccwdev->dev, 2186 "Setting the device online failed because of insufficient authorization\n"); 2187 return -EPERM; 2188 default: 2189 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2190 CCW_DEVID(channel->ccwdev)); 2191 return -EIO; 2192 } 2193 } 2194 2195 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2196 struct qeth_cmd_buffer *iob, 2197 unsigned int data_length) 2198 { 2199 struct qeth_channel *channel = iob->channel; 2200 u16 peer_level; 2201 int rc; 2202 2203 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2204 2205 rc = qeth_idx_check_activate_response(card, channel, iob); 2206 if (rc) 2207 goto out; 2208 2209 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2210 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2211 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2212 CCW_DEVID(channel->ccwdev), 2213 card->info.func_level, peer_level); 2214 rc = -EINVAL; 2215 goto out; 2216 } 2217 2218 memcpy(&card->token.issuer_rm_r, 2219 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2220 QETH_MPC_TOKEN_LENGTH); 2221 memcpy(&card->info.mcl_level[0], 2222 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2223 2224 out: 2225 qeth_notify_cmd(iob, rc); 2226 qeth_put_cmd(iob); 2227 } 2228 2229 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2230 struct qeth_cmd_buffer *iob, 2231 unsigned int data_length) 2232 { 2233 struct qeth_channel *channel = iob->channel; 2234 u16 peer_level; 2235 int rc; 2236 2237 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2238 2239 rc = qeth_idx_check_activate_response(card, channel, iob); 2240 if (rc) 2241 goto out; 2242 2243 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2244 if ((peer_level & ~0x0100) != 2245 qeth_peer_func_level(card->info.func_level)) { 2246 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2247 CCW_DEVID(channel->ccwdev), 2248 card->info.func_level, peer_level); 2249 rc = -EINVAL; 2250 } 2251 2252 out: 2253 qeth_notify_cmd(iob, rc); 2254 qeth_put_cmd(iob); 2255 } 2256 2257 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2258 struct qeth_cmd_buffer *iob) 2259 { 2260 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2261 u8 port = ((u8)card->dev->dev_port) | 0x80; 2262 struct ccw1 *ccw = __ccw_from_cmd(iob); 2263 2264 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2265 iob->data); 2266 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2267 iob->finalize = qeth_idx_finalize_cmd; 2268 2269 port |= QETH_IDX_ACT_INVAL_FRAME; 2270 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2271 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2272 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2273 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2274 &card->info.func_level, 2); 2275 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2276 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2277 } 2278 2279 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2280 { 2281 struct qeth_channel *channel = &card->read; 2282 struct qeth_cmd_buffer *iob; 2283 int rc; 2284 2285 QETH_CARD_TEXT(card, 2, "idxread"); 2286 2287 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2288 if (!iob) 2289 return -ENOMEM; 2290 2291 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2292 qeth_idx_setup_activate_cmd(card, iob); 2293 iob->callback = qeth_idx_activate_read_channel_cb; 2294 2295 rc = qeth_send_control_data(card, iob, NULL, NULL); 2296 if (rc) 2297 return rc; 2298 2299 channel->state = CH_STATE_UP; 2300 return 0; 2301 } 2302 2303 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2304 { 2305 struct qeth_channel *channel = &card->write; 2306 struct qeth_cmd_buffer *iob; 2307 int rc; 2308 2309 QETH_CARD_TEXT(card, 2, "idxwrite"); 2310 2311 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2312 if (!iob) 2313 return -ENOMEM; 2314 2315 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2316 qeth_idx_setup_activate_cmd(card, iob); 2317 iob->callback = qeth_idx_activate_write_channel_cb; 2318 2319 rc = qeth_send_control_data(card, iob, NULL, NULL); 2320 if (rc) 2321 return rc; 2322 2323 channel->state = CH_STATE_UP; 2324 return 0; 2325 } 2326 2327 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2328 unsigned long data) 2329 { 2330 struct qeth_cmd_buffer *iob; 2331 2332 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2333 2334 iob = (struct qeth_cmd_buffer *) data; 2335 memcpy(&card->token.cm_filter_r, 2336 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2337 QETH_MPC_TOKEN_LENGTH); 2338 return 0; 2339 } 2340 2341 static int qeth_cm_enable(struct qeth_card *card) 2342 { 2343 struct qeth_cmd_buffer *iob; 2344 2345 QETH_CARD_TEXT(card, 2, "cmenable"); 2346 2347 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2348 if (!iob) 2349 return -ENOMEM; 2350 2351 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2352 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2353 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2354 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2355 2356 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2357 } 2358 2359 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2360 unsigned long data) 2361 { 2362 struct qeth_cmd_buffer *iob; 2363 2364 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2365 2366 iob = (struct qeth_cmd_buffer *) data; 2367 memcpy(&card->token.cm_connection_r, 2368 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2369 QETH_MPC_TOKEN_LENGTH); 2370 return 0; 2371 } 2372 2373 static int qeth_cm_setup(struct qeth_card *card) 2374 { 2375 struct qeth_cmd_buffer *iob; 2376 2377 QETH_CARD_TEXT(card, 2, "cmsetup"); 2378 2379 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2380 if (!iob) 2381 return -ENOMEM; 2382 2383 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2384 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2385 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2386 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2387 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2388 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2389 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2390 } 2391 2392 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2393 { 2394 if (link_type == QETH_LINK_TYPE_LANE_TR || 2395 link_type == QETH_LINK_TYPE_HSTR) { 2396 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2397 return false; 2398 } 2399 2400 return true; 2401 } 2402 2403 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2404 { 2405 struct net_device *dev = card->dev; 2406 unsigned int new_mtu; 2407 2408 if (!max_mtu) { 2409 /* IQD needs accurate max MTU to set up its RX buffers: */ 2410 if (IS_IQD(card)) 2411 return -EINVAL; 2412 /* tolerate quirky HW: */ 2413 max_mtu = ETH_MAX_MTU; 2414 } 2415 2416 rtnl_lock(); 2417 if (IS_IQD(card)) { 2418 /* move any device with default MTU to new max MTU: */ 2419 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2420 2421 /* adjust RX buffer size to new max MTU: */ 2422 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2423 if (dev->max_mtu && dev->max_mtu != max_mtu) 2424 qeth_free_qdio_queues(card); 2425 } else { 2426 if (dev->mtu) 2427 new_mtu = dev->mtu; 2428 /* default MTUs for first setup: */ 2429 else if (IS_LAYER2(card)) 2430 new_mtu = ETH_DATA_LEN; 2431 else 2432 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2433 } 2434 2435 dev->max_mtu = max_mtu; 2436 dev->mtu = min(new_mtu, max_mtu); 2437 rtnl_unlock(); 2438 return 0; 2439 } 2440 2441 static int qeth_get_mtu_outof_framesize(int framesize) 2442 { 2443 switch (framesize) { 2444 case 0x4000: 2445 return 8192; 2446 case 0x6000: 2447 return 16384; 2448 case 0xa000: 2449 return 32768; 2450 case 0xffff: 2451 return 57344; 2452 default: 2453 return 0; 2454 } 2455 } 2456 2457 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2458 unsigned long data) 2459 { 2460 __u16 mtu, framesize; 2461 __u16 len; 2462 struct qeth_cmd_buffer *iob; 2463 u8 link_type = 0; 2464 2465 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2466 2467 iob = (struct qeth_cmd_buffer *) data; 2468 memcpy(&card->token.ulp_filter_r, 2469 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2470 QETH_MPC_TOKEN_LENGTH); 2471 if (IS_IQD(card)) { 2472 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2473 mtu = qeth_get_mtu_outof_framesize(framesize); 2474 } else { 2475 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2476 } 2477 *(u16 *)reply->param = mtu; 2478 2479 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2480 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2481 memcpy(&link_type, 2482 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2483 if (!qeth_is_supported_link_type(card, link_type)) 2484 return -EPROTONOSUPPORT; 2485 } 2486 2487 card->info.link_type = link_type; 2488 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2489 return 0; 2490 } 2491 2492 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2493 { 2494 return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3; 2495 } 2496 2497 static int qeth_ulp_enable(struct qeth_card *card) 2498 { 2499 u8 prot_type = qeth_mpc_select_prot_type(card); 2500 struct qeth_cmd_buffer *iob; 2501 u16 max_mtu; 2502 int rc; 2503 2504 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2505 2506 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2507 if (!iob) 2508 return -ENOMEM; 2509 2510 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2511 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2512 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2513 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2514 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2515 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2516 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2517 if (rc) 2518 return rc; 2519 return qeth_update_max_mtu(card, max_mtu); 2520 } 2521 2522 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2523 unsigned long data) 2524 { 2525 struct qeth_cmd_buffer *iob; 2526 2527 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2528 2529 iob = (struct qeth_cmd_buffer *) data; 2530 memcpy(&card->token.ulp_connection_r, 2531 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2532 QETH_MPC_TOKEN_LENGTH); 2533 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2534 3)) { 2535 QETH_CARD_TEXT(card, 2, "olmlimit"); 2536 dev_err(&card->gdev->dev, "A connection could not be " 2537 "established because of an OLM limit\n"); 2538 return -EMLINK; 2539 } 2540 return 0; 2541 } 2542 2543 static int qeth_ulp_setup(struct qeth_card *card) 2544 { 2545 __u16 temp; 2546 struct qeth_cmd_buffer *iob; 2547 2548 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2549 2550 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2551 if (!iob) 2552 return -ENOMEM; 2553 2554 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2555 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2556 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2557 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2558 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2559 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2560 2561 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2562 temp = (card->info.cula << 8) + card->info.unit_addr2; 2563 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2564 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2565 } 2566 2567 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, 2568 gfp_t gfp) 2569 { 2570 struct qeth_qdio_out_buffer *newbuf; 2571 2572 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); 2573 if (!newbuf) 2574 return -ENOMEM; 2575 2576 newbuf->buffer = q->qdio_bufs[bidx]; 2577 skb_queue_head_init(&newbuf->skb_list); 2578 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2579 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2580 q->bufs[bidx] = newbuf; 2581 return 0; 2582 } 2583 2584 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2585 { 2586 if (!q) 2587 return; 2588 2589 qeth_drain_output_queue(q, true); 2590 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2591 kfree(q); 2592 } 2593 2594 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2595 { 2596 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2597 unsigned int i; 2598 2599 if (!q) 2600 return NULL; 2601 2602 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) 2603 goto err_qdio_bufs; 2604 2605 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 2606 if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) 2607 goto err_out_bufs; 2608 } 2609 2610 return q; 2611 2612 err_out_bufs: 2613 while (i > 0) 2614 qeth_free_out_buf(q->bufs[--i]); 2615 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2616 err_qdio_bufs: 2617 kfree(q); 2618 return NULL; 2619 } 2620 2621 static void qeth_tx_completion_timer(struct timer_list *timer) 2622 { 2623 struct qeth_qdio_out_q *queue = timer_container_of(queue, timer, 2624 timer); 2625 2626 napi_schedule(&queue->napi); 2627 QETH_TXQ_STAT_INC(queue, completion_timer); 2628 } 2629 2630 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2631 { 2632 unsigned int i; 2633 2634 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2635 2636 /* completion */ 2637 if (qeth_alloc_cq(card)) 2638 goto out_err; 2639 2640 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2641 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2642 return 0; 2643 2644 /* inbound buffer pool */ 2645 if (qeth_alloc_buffer_pool(card)) 2646 goto out_buffer_pool; 2647 2648 /* outbound */ 2649 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2650 struct qeth_qdio_out_q *queue; 2651 2652 queue = qeth_alloc_output_queue(); 2653 if (!queue) 2654 goto out_freeoutq; 2655 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2656 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2657 card->qdio.out_qs[i] = queue; 2658 queue->card = card; 2659 queue->queue_no = i; 2660 INIT_LIST_HEAD(&queue->pending_bufs); 2661 spin_lock_init(&queue->lock); 2662 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2663 if (IS_IQD(card)) { 2664 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2665 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2666 queue->rescan_usecs = QETH_TX_TIMER_USECS; 2667 } else { 2668 queue->coalesce_usecs = USEC_PER_SEC; 2669 queue->max_coalesced_frames = 0; 2670 queue->rescan_usecs = 10 * USEC_PER_SEC; 2671 } 2672 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2673 } 2674 2675 return 0; 2676 2677 out_freeoutq: 2678 while (i > 0) { 2679 qeth_free_output_queue(card->qdio.out_qs[--i]); 2680 card->qdio.out_qs[i] = NULL; 2681 } 2682 qeth_free_buffer_pool(card); 2683 out_buffer_pool: 2684 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2685 qeth_free_cq(card); 2686 out_err: 2687 return -ENOMEM; 2688 } 2689 2690 static void qeth_free_qdio_queues(struct qeth_card *card) 2691 { 2692 int i, j; 2693 2694 qeth_free_cq(card); 2695 2696 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2697 QETH_QDIO_UNINITIALIZED) 2698 return; 2699 2700 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2701 if (card->qdio.in_q->bufs[j].rx_skb) { 2702 consume_skb(card->qdio.in_q->bufs[j].rx_skb); 2703 card->qdio.in_q->bufs[j].rx_skb = NULL; 2704 } 2705 } 2706 2707 /* inbound buffer pool */ 2708 qeth_free_buffer_pool(card); 2709 /* free outbound qdio_qs */ 2710 for (i = 0; i < card->qdio.no_out_queues; i++) { 2711 qeth_free_output_queue(card->qdio.out_qs[i]); 2712 card->qdio.out_qs[i] = NULL; 2713 } 2714 } 2715 2716 static void qeth_fill_qib_parms(struct qeth_card *card, 2717 struct qeth_qib_parms *parms) 2718 { 2719 struct qeth_qdio_out_q *queue; 2720 unsigned int i; 2721 2722 parms->pcit_magic[0] = 'P'; 2723 parms->pcit_magic[1] = 'C'; 2724 parms->pcit_magic[2] = 'I'; 2725 parms->pcit_magic[3] = 'T'; 2726 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2727 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2728 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2729 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2730 2731 parms->blkt_magic[0] = 'B'; 2732 parms->blkt_magic[1] = 'L'; 2733 parms->blkt_magic[2] = 'K'; 2734 parms->blkt_magic[3] = 'T'; 2735 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2736 parms->blkt_total = card->info.blkt.time_total; 2737 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2738 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2739 2740 /* Prio-queueing implicitly uses the default priorities: */ 2741 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2742 return; 2743 2744 parms->pque_magic[0] = 'P'; 2745 parms->pque_magic[1] = 'Q'; 2746 parms->pque_magic[2] = 'U'; 2747 parms->pque_magic[3] = 'E'; 2748 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2749 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2750 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2751 2752 qeth_for_each_output_queue(card, queue, i) 2753 parms->pque_priority[i] = queue->priority; 2754 } 2755 2756 static int qeth_qdio_activate(struct qeth_card *card) 2757 { 2758 QETH_CARD_TEXT(card, 3, "qdioact"); 2759 return qdio_activate(CARD_DDEV(card)); 2760 } 2761 2762 static int qeth_dm_act(struct qeth_card *card) 2763 { 2764 struct qeth_cmd_buffer *iob; 2765 2766 QETH_CARD_TEXT(card, 2, "dmact"); 2767 2768 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2769 if (!iob) 2770 return -ENOMEM; 2771 2772 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2773 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2774 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2775 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2776 return qeth_send_control_data(card, iob, NULL, NULL); 2777 } 2778 2779 static int qeth_mpc_initialize(struct qeth_card *card) 2780 { 2781 int rc; 2782 2783 QETH_CARD_TEXT(card, 2, "mpcinit"); 2784 2785 rc = qeth_issue_next_read(card); 2786 if (rc) { 2787 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2788 return rc; 2789 } 2790 rc = qeth_cm_enable(card); 2791 if (rc) { 2792 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2793 return rc; 2794 } 2795 rc = qeth_cm_setup(card); 2796 if (rc) { 2797 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2798 return rc; 2799 } 2800 rc = qeth_ulp_enable(card); 2801 if (rc) { 2802 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2803 return rc; 2804 } 2805 rc = qeth_ulp_setup(card); 2806 if (rc) { 2807 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2808 return rc; 2809 } 2810 rc = qeth_alloc_qdio_queues(card); 2811 if (rc) { 2812 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2813 return rc; 2814 } 2815 rc = qeth_qdio_establish(card); 2816 if (rc) { 2817 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2818 qeth_free_qdio_queues(card); 2819 return rc; 2820 } 2821 rc = qeth_qdio_activate(card); 2822 if (rc) { 2823 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2824 return rc; 2825 } 2826 rc = qeth_dm_act(card); 2827 if (rc) { 2828 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2829 return rc; 2830 } 2831 2832 return 0; 2833 } 2834 2835 static void qeth_print_status_message(struct qeth_card *card) 2836 { 2837 switch (card->info.type) { 2838 case QETH_CARD_TYPE_OSD: 2839 case QETH_CARD_TYPE_OSM: 2840 case QETH_CARD_TYPE_OSX: 2841 /* VM will use a non-zero first character 2842 * to indicate a HiperSockets like reporting 2843 * of the level OSA sets the first character to zero 2844 * */ 2845 if (!card->info.mcl_level[0]) { 2846 scnprintf(card->info.mcl_level, 2847 sizeof(card->info.mcl_level), 2848 "%02x%02x", 2849 card->info.mcl_level[2], 2850 card->info.mcl_level[3]); 2851 break; 2852 } 2853 fallthrough; 2854 case QETH_CARD_TYPE_IQD: 2855 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2856 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2857 card->info.mcl_level[0]]; 2858 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2859 card->info.mcl_level[1]]; 2860 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2861 card->info.mcl_level[2]]; 2862 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2863 card->info.mcl_level[3]]; 2864 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2865 } 2866 break; 2867 default: 2868 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2869 } 2870 dev_info(&card->gdev->dev, 2871 "Device is a%s card%s%s%s\nwith link type %s.\n", 2872 qeth_get_cardname(card), 2873 (card->info.mcl_level[0]) ? " (level: " : "", 2874 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2875 (card->info.mcl_level[0]) ? ")" : "", 2876 qeth_get_cardname_short(card)); 2877 } 2878 2879 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2880 { 2881 struct qeth_buffer_pool_entry *entry; 2882 2883 QETH_CARD_TEXT(card, 5, "inwrklst"); 2884 2885 list_for_each_entry(entry, 2886 &card->qdio.init_pool.entry_list, init_list) { 2887 qeth_put_buffer_pool_entry(card, entry); 2888 } 2889 } 2890 2891 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2892 struct qeth_card *card) 2893 { 2894 struct qeth_buffer_pool_entry *entry; 2895 int i, free; 2896 2897 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2898 return NULL; 2899 2900 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2901 free = 1; 2902 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2903 if (page_count(entry->elements[i]) > 1) { 2904 free = 0; 2905 break; 2906 } 2907 } 2908 if (free) { 2909 list_del_init(&entry->list); 2910 return entry; 2911 } 2912 } 2913 2914 /* no free buffer in pool so take first one and swap pages */ 2915 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2916 struct qeth_buffer_pool_entry, list); 2917 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2918 if (page_count(entry->elements[i]) > 1) { 2919 struct page *page = dev_alloc_page(); 2920 2921 if (!page) 2922 return NULL; 2923 2924 __free_page(entry->elements[i]); 2925 entry->elements[i] = page; 2926 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2927 } 2928 } 2929 list_del_init(&entry->list); 2930 return entry; 2931 } 2932 2933 static int qeth_init_input_buffer(struct qeth_card *card, 2934 struct qeth_qdio_buffer *buf) 2935 { 2936 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2937 int i; 2938 2939 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2940 buf->rx_skb = netdev_alloc_skb(card->dev, 2941 ETH_HLEN + 2942 sizeof(struct ipv6hdr)); 2943 if (!buf->rx_skb) 2944 return -ENOMEM; 2945 } 2946 2947 if (!pool_entry) { 2948 pool_entry = qeth_find_free_buffer_pool_entry(card); 2949 if (!pool_entry) 2950 return -ENOBUFS; 2951 2952 buf->pool_entry = pool_entry; 2953 } 2954 2955 /* 2956 * since the buffer is accessed only from the input_tasklet 2957 * there shouldn't be a need to synchronize; also, since we use 2958 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2959 * buffers 2960 */ 2961 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2962 buf->buffer->element[i].length = PAGE_SIZE; 2963 buf->buffer->element[i].addr = u64_to_dma64( 2964 page_to_phys(pool_entry->elements[i])); 2965 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2966 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2967 else 2968 buf->buffer->element[i].eflags = 0; 2969 buf->buffer->element[i].sflags = 0; 2970 } 2971 return 0; 2972 } 2973 2974 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 2975 struct qeth_qdio_out_q *queue) 2976 { 2977 if (!IS_IQD(card) || 2978 qeth_iqd_is_mcast_queue(card, queue) || 2979 card->options.cq == QETH_CQ_ENABLED || 2980 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 2981 return 1; 2982 2983 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 2984 } 2985 2986 static int qeth_init_qdio_queues(struct qeth_card *card) 2987 { 2988 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 2989 unsigned int i; 2990 int rc; 2991 2992 QETH_CARD_TEXT(card, 2, "initqdqs"); 2993 2994 /* inbound queue */ 2995 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2996 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2997 2998 qeth_initialize_working_pool_list(card); 2999 /*give only as many buffers to hardware as we have buffer pool entries*/ 3000 for (i = 0; i < rx_bufs; i++) { 3001 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 3002 if (rc) 3003 return rc; 3004 } 3005 3006 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 3007 rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs); 3008 if (rc) { 3009 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 3010 return rc; 3011 } 3012 3013 /* completion */ 3014 rc = qeth_cq_init(card); 3015 if (rc) { 3016 return rc; 3017 } 3018 3019 /* outbound queue */ 3020 for (i = 0; i < card->qdio.no_out_queues; ++i) { 3021 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 3022 3023 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 3024 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 3025 queue->next_buf_to_fill = 0; 3026 queue->do_pack = 0; 3027 queue->prev_hdr = NULL; 3028 queue->coalesced_frames = 0; 3029 queue->bulk_start = 0; 3030 queue->bulk_count = 0; 3031 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 3032 atomic_set(&queue->used_buffers, 0); 3033 atomic_set(&queue->set_pci_flags_count, 0); 3034 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 3035 } 3036 return 0; 3037 } 3038 3039 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 3040 struct qeth_cmd_buffer *iob) 3041 { 3042 qeth_mpc_finalize_cmd(card, iob); 3043 3044 /* override with IPA-specific values: */ 3045 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 3046 } 3047 3048 static void qeth_prepare_ipa_cmd(struct qeth_card *card, 3049 struct qeth_cmd_buffer *iob, u16 cmd_length) 3050 { 3051 u8 prot_type = qeth_mpc_select_prot_type(card); 3052 u16 total_length = iob->length; 3053 3054 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 3055 iob->data); 3056 iob->finalize = qeth_ipa_finalize_cmd; 3057 3058 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 3059 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 3060 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 3061 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 3062 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 3063 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 3064 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 3065 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 3066 } 3067 3068 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 3069 struct qeth_cmd_buffer *reply) 3070 { 3071 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 3072 3073 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 3074 } 3075 3076 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 3077 enum qeth_ipa_cmds cmd_code, 3078 enum qeth_prot_versions prot, 3079 unsigned int data_length) 3080 { 3081 struct qeth_cmd_buffer *iob; 3082 struct qeth_ipacmd_hdr *hdr; 3083 3084 data_length += offsetof(struct qeth_ipa_cmd, data); 3085 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 3086 QETH_IPA_TIMEOUT); 3087 if (!iob) 3088 return NULL; 3089 3090 qeth_prepare_ipa_cmd(card, iob, data_length); 3091 iob->match = qeth_ipa_match_reply; 3092 3093 hdr = &__ipa_cmd(iob)->hdr; 3094 hdr->command = cmd_code; 3095 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3096 /* hdr->seqno is set by qeth_send_control_data() */ 3097 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3098 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3099 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3100 hdr->param_count = 1; 3101 hdr->prot_version = prot; 3102 return iob; 3103 } 3104 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3105 3106 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3107 struct qeth_reply *reply, unsigned long data) 3108 { 3109 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3110 3111 return (cmd->hdr.return_code) ? -EIO : 0; 3112 } 3113 3114 /* 3115 * qeth_send_ipa_cmd() - send an IPA command 3116 * 3117 * See qeth_send_control_data() for explanation of the arguments. 3118 */ 3119 3120 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3121 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3122 unsigned long), 3123 void *reply_param) 3124 { 3125 int rc; 3126 3127 QETH_CARD_TEXT(card, 4, "sendipa"); 3128 3129 if (card->read_or_write_problem) { 3130 qeth_put_cmd(iob); 3131 return -EIO; 3132 } 3133 3134 if (reply_cb == NULL) 3135 reply_cb = qeth_send_ipa_cmd_cb; 3136 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3137 if (rc == -ETIME) { 3138 qeth_clear_ipacmd_list(card); 3139 qeth_schedule_recovery(card); 3140 } 3141 return rc; 3142 } 3143 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3144 3145 static int qeth_send_startlan_cb(struct qeth_card *card, 3146 struct qeth_reply *reply, unsigned long data) 3147 { 3148 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3149 3150 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3151 return -ENETDOWN; 3152 3153 return (cmd->hdr.return_code) ? -EIO : 0; 3154 } 3155 3156 static int qeth_send_startlan(struct qeth_card *card) 3157 { 3158 struct qeth_cmd_buffer *iob; 3159 3160 QETH_CARD_TEXT(card, 2, "strtlan"); 3161 3162 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3163 if (!iob) 3164 return -ENOMEM; 3165 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3166 } 3167 3168 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3169 { 3170 if (!cmd->hdr.return_code) 3171 cmd->hdr.return_code = 3172 cmd->data.setadapterparms.hdr.return_code; 3173 return cmd->hdr.return_code; 3174 } 3175 3176 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3177 struct qeth_reply *reply, unsigned long data) 3178 { 3179 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3180 struct qeth_query_cmds_supp *query_cmd; 3181 3182 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3183 if (qeth_setadpparms_inspect_rc(cmd)) 3184 return -EIO; 3185 3186 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3187 if (query_cmd->lan_type & 0x7f) { 3188 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3189 return -EPROTONOSUPPORT; 3190 3191 card->info.link_type = query_cmd->lan_type; 3192 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3193 } 3194 3195 card->options.adp.supported = query_cmd->supported_cmds; 3196 return 0; 3197 } 3198 3199 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3200 enum qeth_ipa_setadp_cmd adp_cmd, 3201 unsigned int data_length) 3202 { 3203 struct qeth_ipacmd_setadpparms_hdr *hdr; 3204 struct qeth_cmd_buffer *iob; 3205 3206 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3207 data_length + 3208 offsetof(struct qeth_ipacmd_setadpparms, 3209 data)); 3210 if (!iob) 3211 return NULL; 3212 3213 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3214 hdr->cmdlength = sizeof(*hdr) + data_length; 3215 hdr->command_code = adp_cmd; 3216 hdr->used_total = 1; 3217 hdr->seq_no = 1; 3218 return iob; 3219 } 3220 3221 static int qeth_query_setadapterparms(struct qeth_card *card) 3222 { 3223 int rc; 3224 struct qeth_cmd_buffer *iob; 3225 3226 QETH_CARD_TEXT(card, 3, "queryadp"); 3227 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3228 SETADP_DATA_SIZEOF(query_cmds_supp)); 3229 if (!iob) 3230 return -ENOMEM; 3231 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3232 return rc; 3233 } 3234 3235 static int qeth_query_ipassists_cb(struct qeth_card *card, 3236 struct qeth_reply *reply, unsigned long data) 3237 { 3238 struct qeth_ipa_cmd *cmd; 3239 3240 QETH_CARD_TEXT(card, 2, "qipasscb"); 3241 3242 cmd = (struct qeth_ipa_cmd *) data; 3243 3244 switch (cmd->hdr.return_code) { 3245 case IPA_RC_SUCCESS: 3246 break; 3247 case IPA_RC_NOTSUPP: 3248 case IPA_RC_L2_UNSUPPORTED_CMD: 3249 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3250 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3251 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3252 return -EOPNOTSUPP; 3253 default: 3254 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3255 CARD_DEVID(card), cmd->hdr.return_code); 3256 return -EIO; 3257 } 3258 3259 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3260 card->options.ipa4 = cmd->hdr.assists; 3261 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3262 card->options.ipa6 = cmd->hdr.assists; 3263 else 3264 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3265 CARD_DEVID(card)); 3266 return 0; 3267 } 3268 3269 static int qeth_query_ipassists(struct qeth_card *card, 3270 enum qeth_prot_versions prot) 3271 { 3272 int rc; 3273 struct qeth_cmd_buffer *iob; 3274 3275 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3276 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3277 if (!iob) 3278 return -ENOMEM; 3279 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3280 return rc; 3281 } 3282 3283 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3284 struct qeth_reply *reply, unsigned long data) 3285 { 3286 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3287 struct qeth_query_switch_attributes *attrs; 3288 struct qeth_switch_info *sw_info; 3289 3290 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3291 if (qeth_setadpparms_inspect_rc(cmd)) 3292 return -EIO; 3293 3294 sw_info = (struct qeth_switch_info *)reply->param; 3295 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3296 sw_info->capabilities = attrs->capabilities; 3297 sw_info->settings = attrs->settings; 3298 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3299 sw_info->settings); 3300 return 0; 3301 } 3302 3303 int qeth_query_switch_attributes(struct qeth_card *card, 3304 struct qeth_switch_info *sw_info) 3305 { 3306 struct qeth_cmd_buffer *iob; 3307 3308 QETH_CARD_TEXT(card, 2, "qswiattr"); 3309 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3310 return -EOPNOTSUPP; 3311 if (!netif_carrier_ok(card->dev)) 3312 return -ENOMEDIUM; 3313 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3314 if (!iob) 3315 return -ENOMEM; 3316 return qeth_send_ipa_cmd(card, iob, 3317 qeth_query_switch_attributes_cb, sw_info); 3318 } 3319 3320 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3321 enum qeth_diags_cmds sub_cmd, 3322 unsigned int data_length) 3323 { 3324 struct qeth_ipacmd_diagass *cmd; 3325 struct qeth_cmd_buffer *iob; 3326 3327 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3328 DIAG_HDR_LEN + data_length); 3329 if (!iob) 3330 return NULL; 3331 3332 cmd = &__ipa_cmd(iob)->data.diagass; 3333 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3334 cmd->subcmd = sub_cmd; 3335 return iob; 3336 } 3337 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3338 3339 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3340 struct qeth_reply *reply, unsigned long data) 3341 { 3342 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3343 u16 rc = cmd->hdr.return_code; 3344 3345 if (rc) { 3346 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3347 return -EIO; 3348 } 3349 3350 card->info.diagass_support = cmd->data.diagass.ext; 3351 return 0; 3352 } 3353 3354 static int qeth_query_setdiagass(struct qeth_card *card) 3355 { 3356 struct qeth_cmd_buffer *iob; 3357 3358 QETH_CARD_TEXT(card, 2, "qdiagass"); 3359 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3360 if (!iob) 3361 return -ENOMEM; 3362 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3363 } 3364 3365 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3366 { 3367 unsigned long info = get_zeroed_page(GFP_KERNEL); 3368 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3369 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3370 struct ccw_dev_id ccwid; 3371 int level; 3372 3373 tid->chpid = card->info.chpid; 3374 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3375 tid->ssid = ccwid.ssid; 3376 tid->devno = ccwid.devno; 3377 if (!info) 3378 return; 3379 level = stsi(NULL, 0, 0, 0); 3380 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3381 tid->lparnr = info222->lpar_number; 3382 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3383 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3384 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3385 } 3386 free_page(info); 3387 } 3388 3389 static int qeth_hw_trap_cb(struct qeth_card *card, 3390 struct qeth_reply *reply, unsigned long data) 3391 { 3392 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3393 u16 rc = cmd->hdr.return_code; 3394 3395 if (rc) { 3396 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3397 return -EIO; 3398 } 3399 return 0; 3400 } 3401 3402 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3403 { 3404 struct qeth_cmd_buffer *iob; 3405 struct qeth_ipa_cmd *cmd; 3406 3407 QETH_CARD_TEXT(card, 2, "diagtrap"); 3408 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3409 if (!iob) 3410 return -ENOMEM; 3411 cmd = __ipa_cmd(iob); 3412 cmd->data.diagass.type = 1; 3413 cmd->data.diagass.action = action; 3414 switch (action) { 3415 case QETH_DIAGS_TRAP_ARM: 3416 cmd->data.diagass.options = 0x0003; 3417 cmd->data.diagass.ext = 0x00010000 + 3418 sizeof(struct qeth_trap_id); 3419 qeth_get_trap_id(card, 3420 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3421 break; 3422 case QETH_DIAGS_TRAP_DISARM: 3423 cmd->data.diagass.options = 0x0001; 3424 break; 3425 case QETH_DIAGS_TRAP_CAPTURE: 3426 break; 3427 } 3428 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3429 } 3430 3431 static int qeth_check_qdio_errors(struct qeth_card *card, 3432 struct qdio_buffer *buf, 3433 unsigned int qdio_error, 3434 const char *dbftext) 3435 { 3436 if (qdio_error) { 3437 QETH_CARD_TEXT(card, 2, dbftext); 3438 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3439 buf->element[15].sflags); 3440 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3441 buf->element[14].sflags); 3442 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3443 if ((buf->element[15].sflags) == 0x12) { 3444 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3445 return 0; 3446 } else 3447 return 1; 3448 } 3449 return 0; 3450 } 3451 3452 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3453 unsigned int count) 3454 { 3455 struct qeth_qdio_q *queue = card->qdio.in_q; 3456 struct list_head *lh; 3457 int i; 3458 int rc; 3459 int newcount = 0; 3460 3461 /* only requeue at a certain threshold to avoid SIGAs */ 3462 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3463 for (i = queue->next_buf_to_init; 3464 i < queue->next_buf_to_init + count; ++i) { 3465 if (qeth_init_input_buffer(card, 3466 &queue->bufs[QDIO_BUFNR(i)])) { 3467 break; 3468 } else { 3469 newcount++; 3470 } 3471 } 3472 3473 if (newcount < count) { 3474 /* we are in memory shortage so we switch back to 3475 traditional skb allocation and drop packages */ 3476 atomic_set(&card->force_alloc_skb, 3); 3477 count = newcount; 3478 } else { 3479 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3480 } 3481 3482 if (!count) { 3483 i = 0; 3484 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3485 i++; 3486 if (i == card->qdio.in_buf_pool.buf_count) { 3487 QETH_CARD_TEXT(card, 2, "qsarbw"); 3488 schedule_delayed_work( 3489 &card->buffer_reclaim_work, 3490 QETH_RECLAIM_WORK_TIME); 3491 } 3492 return 0; 3493 } 3494 3495 rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 3496 queue->next_buf_to_init, 3497 count); 3498 if (rc) { 3499 QETH_CARD_TEXT(card, 2, "qinberr"); 3500 } 3501 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3502 count); 3503 return count; 3504 } 3505 3506 return 0; 3507 } 3508 3509 static void qeth_buffer_reclaim_work(struct work_struct *work) 3510 { 3511 struct qeth_card *card = container_of(to_delayed_work(work), 3512 struct qeth_card, 3513 buffer_reclaim_work); 3514 3515 local_bh_disable(); 3516 napi_schedule(&card->napi); 3517 /* kick-start the NAPI softirq: */ 3518 local_bh_enable(); 3519 } 3520 3521 static void qeth_handle_send_error(struct qeth_card *card, 3522 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3523 { 3524 int sbalf15 = buffer->buffer->element[15].sflags; 3525 3526 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3527 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3528 3529 if (!qdio_err) 3530 return; 3531 3532 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3533 return; 3534 3535 QETH_CARD_TEXT(card, 1, "lnkfail"); 3536 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3537 (u16)qdio_err, (u8)sbalf15); 3538 } 3539 3540 /** 3541 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3542 * @queue: queue to check for packing buffer 3543 * 3544 * Returns number of buffers that were prepared for flush. 3545 */ 3546 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3547 { 3548 struct qeth_qdio_out_buffer *buffer; 3549 3550 buffer = queue->bufs[queue->next_buf_to_fill]; 3551 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3552 (buffer->next_element_to_fill > 0)) { 3553 /* it's a packing buffer */ 3554 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3555 queue->next_buf_to_fill = 3556 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3557 return 1; 3558 } 3559 return 0; 3560 } 3561 3562 /* 3563 * Switched to packing state if the number of used buffers on a queue 3564 * reaches a certain limit. 3565 */ 3566 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3567 { 3568 if (!queue->do_pack) { 3569 if (atomic_read(&queue->used_buffers) 3570 >= QETH_HIGH_WATERMARK_PACK){ 3571 /* switch non-PACKING -> PACKING */ 3572 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3573 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3574 queue->do_pack = 1; 3575 } 3576 } 3577 } 3578 3579 /* 3580 * Switches from packing to non-packing mode. If there is a packing 3581 * buffer on the queue this buffer will be prepared to be flushed. 3582 * In that case 1 is returned to inform the caller. If no buffer 3583 * has to be flushed, zero is returned. 3584 */ 3585 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3586 { 3587 if (queue->do_pack) { 3588 if (atomic_read(&queue->used_buffers) 3589 <= QETH_LOW_WATERMARK_PACK) { 3590 /* switch PACKING -> non-PACKING */ 3591 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3592 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3593 queue->do_pack = 0; 3594 return qeth_prep_flush_pack_buffer(queue); 3595 } 3596 } 3597 return 0; 3598 } 3599 3600 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3601 int count) 3602 { 3603 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3604 struct qeth_card *card = queue->card; 3605 unsigned int frames, usecs; 3606 struct qaob *aob = NULL; 3607 int rc; 3608 int i; 3609 3610 for (i = index; i < index + count; ++i) { 3611 unsigned int bidx = QDIO_BUFNR(i); 3612 struct sk_buff *skb; 3613 3614 buf = queue->bufs[bidx]; 3615 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3616 SBAL_EFLAGS_LAST_ENTRY; 3617 queue->coalesced_frames += buf->frames; 3618 3619 if (IS_IQD(card)) { 3620 skb_queue_walk(&buf->skb_list, skb) 3621 skb_tx_timestamp(skb); 3622 } 3623 } 3624 3625 if (IS_IQD(card)) { 3626 if (card->options.cq == QETH_CQ_ENABLED && 3627 !qeth_iqd_is_mcast_queue(card, queue) && 3628 count == 1) { 3629 if (!buf->aob) 3630 buf->aob = kmem_cache_zalloc(qeth_qaob_cache, 3631 GFP_ATOMIC); 3632 if (buf->aob) { 3633 struct qeth_qaob_priv1 *priv; 3634 3635 aob = buf->aob; 3636 priv = (struct qeth_qaob_priv1 *)&aob->user1; 3637 priv->state = QETH_QAOB_ISSUED; 3638 priv->queue_no = queue->queue_no; 3639 } 3640 } 3641 } else { 3642 if (!queue->do_pack) { 3643 if ((atomic_read(&queue->used_buffers) >= 3644 (QETH_HIGH_WATERMARK_PACK - 3645 QETH_WATERMARK_PACK_FUZZ)) && 3646 !atomic_read(&queue->set_pci_flags_count)) { 3647 /* it's likely that we'll go to packing 3648 * mode soon */ 3649 atomic_inc(&queue->set_pci_flags_count); 3650 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3651 } 3652 } else { 3653 if (!atomic_read(&queue->set_pci_flags_count)) { 3654 /* 3655 * there's no outstanding PCI any more, so we 3656 * have to request a PCI to be sure the PCI 3657 * will wake at some time in the future then we 3658 * can flush packed buffers that might still be 3659 * hanging around, which can happen if no 3660 * further send was requested by the stack 3661 */ 3662 atomic_inc(&queue->set_pci_flags_count); 3663 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3664 } 3665 } 3666 } 3667 3668 QETH_TXQ_STAT_INC(queue, doorbell); 3669 rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no, 3670 index, count, aob); 3671 3672 switch (rc) { 3673 case 0: 3674 case -ENOBUFS: 3675 /* ignore temporary SIGA errors without busy condition */ 3676 3677 /* Fake the TX completion interrupt: */ 3678 frames = READ_ONCE(queue->max_coalesced_frames); 3679 usecs = READ_ONCE(queue->coalesce_usecs); 3680 3681 if (frames && queue->coalesced_frames >= frames) { 3682 napi_schedule(&queue->napi); 3683 queue->coalesced_frames = 0; 3684 QETH_TXQ_STAT_INC(queue, coal_frames); 3685 } else if (qeth_use_tx_irqs(card) && 3686 atomic_read(&queue->used_buffers) >= 32) { 3687 /* Old behaviour carried over from the qdio layer: */ 3688 napi_schedule(&queue->napi); 3689 QETH_TXQ_STAT_INC(queue, coal_frames); 3690 } else if (usecs) { 3691 qeth_tx_arm_timer(queue, usecs); 3692 } 3693 3694 break; 3695 default: 3696 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3697 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3698 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3699 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3700 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3701 3702 /* this must not happen under normal circumstances. if it 3703 * happens something is really wrong -> recover */ 3704 qeth_schedule_recovery(queue->card); 3705 } 3706 } 3707 3708 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3709 { 3710 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3711 3712 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3713 queue->prev_hdr = NULL; 3714 queue->bulk_count = 0; 3715 } 3716 3717 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3718 { 3719 /* 3720 * check if we have to switch to non-packing mode or if 3721 * we have to get a pci flag out on the queue 3722 */ 3723 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3724 !atomic_read(&queue->set_pci_flags_count)) { 3725 unsigned int index, flush_cnt; 3726 3727 spin_lock(&queue->lock); 3728 3729 index = queue->next_buf_to_fill; 3730 3731 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3732 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3733 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3734 3735 if (flush_cnt) { 3736 qeth_flush_buffers(queue, index, flush_cnt); 3737 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3738 } 3739 3740 spin_unlock(&queue->lock); 3741 } 3742 } 3743 3744 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3745 { 3746 struct qeth_card *card = (struct qeth_card *)card_ptr; 3747 3748 napi_schedule_irqoff(&card->napi); 3749 } 3750 3751 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3752 { 3753 if (card->options.cq == QETH_CQ_NOTAVAILABLE) 3754 return -1; 3755 3756 card->options.cq = cq; 3757 return 0; 3758 } 3759 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3760 3761 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob) 3762 { 3763 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1; 3764 unsigned int queue_no = priv->queue_no; 3765 3766 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1)); 3767 3768 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING && 3769 queue_no < card->qdio.no_out_queues) 3770 napi_schedule(&card->qdio.out_qs[queue_no]->napi); 3771 } 3772 3773 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3774 unsigned int queue, int first_element, 3775 int count) 3776 { 3777 struct qeth_qdio_q *cq = card->qdio.c_q; 3778 int i; 3779 int rc; 3780 3781 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3782 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3783 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3784 3785 if (qdio_err) { 3786 netif_tx_stop_all_queues(card->dev); 3787 qeth_schedule_recovery(card); 3788 return; 3789 } 3790 3791 for (i = first_element; i < first_element + count; ++i) { 3792 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3793 int e = 0; 3794 3795 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3796 buffer->element[e].addr) { 3797 dma64_t phys_aob_addr = buffer->element[e].addr; 3798 3799 qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr)); 3800 ++e; 3801 } 3802 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3803 } 3804 rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue, 3805 cq->next_buf_to_init, count); 3806 if (rc) { 3807 dev_warn(&card->gdev->dev, 3808 "QDIO reported an error, rc=%i\n", rc); 3809 QETH_CARD_TEXT(card, 2, "qcqherr"); 3810 } 3811 3812 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3813 } 3814 3815 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3816 unsigned int qdio_err, int queue, 3817 int first_elem, int count, 3818 unsigned long card_ptr) 3819 { 3820 struct qeth_card *card = (struct qeth_card *)card_ptr; 3821 3822 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3823 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3824 3825 if (qdio_err) 3826 qeth_schedule_recovery(card); 3827 } 3828 3829 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3830 unsigned int qdio_error, int __queue, 3831 int first_element, int count, 3832 unsigned long card_ptr) 3833 { 3834 struct qeth_card *card = (struct qeth_card *) card_ptr; 3835 3836 QETH_CARD_TEXT(card, 2, "achkcond"); 3837 netif_tx_stop_all_queues(card->dev); 3838 qeth_schedule_recovery(card); 3839 } 3840 3841 /* 3842 * Note: Function assumes that we have 4 outbound queues. 3843 */ 3844 static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3845 { 3846 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3847 u8 tos; 3848 3849 switch (card->qdio.do_prio_queueing) { 3850 case QETH_PRIO_Q_ING_TOS: 3851 case QETH_PRIO_Q_ING_PREC: 3852 switch (vlan_get_protocol(skb)) { 3853 case htons(ETH_P_IP): 3854 tos = ipv4_get_dsfield(ip_hdr(skb)); 3855 break; 3856 case htons(ETH_P_IPV6): 3857 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3858 break; 3859 default: 3860 return card->qdio.default_out_queue; 3861 } 3862 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3863 return ~tos >> 6 & 3; 3864 if (tos & IPTOS_MINCOST) 3865 return 3; 3866 if (tos & IPTOS_RELIABILITY) 3867 return 2; 3868 if (tos & IPTOS_THROUGHPUT) 3869 return 1; 3870 if (tos & IPTOS_LOWDELAY) 3871 return 0; 3872 break; 3873 case QETH_PRIO_Q_ING_SKB: 3874 if (skb->priority > 5) 3875 return 0; 3876 return ~skb->priority >> 1 & 3; 3877 case QETH_PRIO_Q_ING_VLAN: 3878 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3879 return ~ntohs(veth->h_vlan_TCI) >> 3880 (VLAN_PRIO_SHIFT + 1) & 3; 3881 break; 3882 case QETH_PRIO_Q_ING_FIXED: 3883 return card->qdio.default_out_queue; 3884 default: 3885 break; 3886 } 3887 return card->qdio.default_out_queue; 3888 } 3889 3890 /** 3891 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3892 * @skb: SKB address 3893 * 3894 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3895 * fragmented part of the SKB. Returns zero for linear SKB. 3896 */ 3897 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3898 { 3899 int cnt, elements = 0; 3900 3901 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3902 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3903 3904 elements += qeth_get_elements_for_range( 3905 (addr_t)skb_frag_address(frag), 3906 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3907 } 3908 return elements; 3909 } 3910 3911 /** 3912 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3913 * to transmit an skb. 3914 * @skb: the skb to operate on. 3915 * @data_offset: skip this part of the skb's linear data 3916 * 3917 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3918 * skb's data (both its linear part and paged fragments). 3919 */ 3920 static unsigned int qeth_count_elements(struct sk_buff *skb, 3921 unsigned int data_offset) 3922 { 3923 unsigned int elements = qeth_get_elements_for_frags(skb); 3924 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3925 addr_t start = (addr_t)skb->data + data_offset; 3926 3927 if (start != end) 3928 elements += qeth_get_elements_for_range(start, end); 3929 return elements; 3930 } 3931 3932 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3933 MAX_TCP_HEADER) 3934 3935 /** 3936 * qeth_add_hw_header() - add a HW header to an skb. 3937 * @queue: TX queue that the skb will be placed on. 3938 * @skb: skb that the HW header should be added to. 3939 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3940 * it contains a valid pointer to a qeth_hdr. 3941 * @hdr_len: length of the HW header. 3942 * @proto_len: length of protocol headers that need to be in same page as the 3943 * HW header. 3944 * @elements: returns the required number of buffer elements for this skb. 3945 * 3946 * Returns the pushed length. If the header can't be pushed on 3947 * (eg. because it would cross a page boundary), it is allocated from 3948 * the cache instead and 0 is returned. 3949 * The number of needed buffer elements is returned in @elements. 3950 * Error to create the hdr is indicated by returning with < 0. 3951 */ 3952 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3953 struct sk_buff *skb, struct qeth_hdr **hdr, 3954 unsigned int hdr_len, unsigned int proto_len, 3955 unsigned int *elements) 3956 { 3957 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 3958 const unsigned int contiguous = proto_len ? proto_len : 1; 3959 const unsigned int max_elements = queue->max_elements; 3960 unsigned int __elements; 3961 addr_t start, end; 3962 bool push_ok; 3963 int rc; 3964 3965 check_layout: 3966 start = (addr_t)skb->data - hdr_len; 3967 end = (addr_t)skb->data; 3968 3969 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3970 /* Push HW header into same page as first protocol header. */ 3971 push_ok = true; 3972 /* ... but TSO always needs a separate element for headers: */ 3973 if (skb_is_gso(skb)) 3974 __elements = 1 + qeth_count_elements(skb, proto_len); 3975 else 3976 __elements = qeth_count_elements(skb, 0); 3977 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3978 /* Push HW header into preceding page, flush with skb->data. */ 3979 push_ok = true; 3980 __elements = 1 + qeth_count_elements(skb, 0); 3981 } else { 3982 /* Use header cache, copy protocol headers up. */ 3983 push_ok = false; 3984 __elements = 1 + qeth_count_elements(skb, proto_len); 3985 } 3986 3987 /* Compress skb to fit into one IO buffer: */ 3988 if (__elements > max_elements) { 3989 if (!skb_is_nonlinear(skb)) { 3990 /* Drop it, no easy way of shrinking it further. */ 3991 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3992 max_elements, __elements, skb->len); 3993 return -E2BIG; 3994 } 3995 3996 rc = skb_linearize(skb); 3997 if (rc) { 3998 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3999 return rc; 4000 } 4001 4002 QETH_TXQ_STAT_INC(queue, skbs_linearized); 4003 /* Linearization changed the layout, re-evaluate: */ 4004 goto check_layout; 4005 } 4006 4007 *elements = __elements; 4008 /* Add the header: */ 4009 if (push_ok) { 4010 *hdr = skb_push(skb, hdr_len); 4011 return hdr_len; 4012 } 4013 4014 /* Fall back to cache element with known-good alignment: */ 4015 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 4016 return -E2BIG; 4017 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 4018 if (!*hdr) 4019 return -ENOMEM; 4020 /* Copy protocol headers behind HW header: */ 4021 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 4022 return 0; 4023 } 4024 4025 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 4026 struct sk_buff *curr_skb, 4027 struct qeth_hdr *curr_hdr) 4028 { 4029 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 4030 struct qeth_hdr *prev_hdr = queue->prev_hdr; 4031 4032 if (!prev_hdr) 4033 return true; 4034 4035 /* All packets must have the same target: */ 4036 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 4037 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 4038 4039 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 4040 eth_hdr(curr_skb)->h_dest) && 4041 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 4042 } 4043 4044 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 4045 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 4046 } 4047 4048 /** 4049 * qeth_fill_buffer() - map skb into an output buffer 4050 * @buf: buffer to transport the skb 4051 * @skb: skb to map into the buffer 4052 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 4053 * from qeth_core_header_cache. 4054 * @offset: when mapping the skb, start at skb->data + offset 4055 * @hd_len: if > 0, build a dedicated header element of this size 4056 */ 4057 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 4058 struct sk_buff *skb, struct qeth_hdr *hdr, 4059 unsigned int offset, unsigned int hd_len) 4060 { 4061 struct qdio_buffer *buffer = buf->buffer; 4062 int element = buf->next_element_to_fill; 4063 int length = skb_headlen(skb) - offset; 4064 char *data = skb->data + offset; 4065 unsigned int elem_length, cnt; 4066 bool is_first_elem = true; 4067 4068 __skb_queue_tail(&buf->skb_list, skb); 4069 4070 /* build dedicated element for HW Header */ 4071 if (hd_len) { 4072 is_first_elem = false; 4073 4074 buffer->element[element].addr = virt_to_dma64(hdr); 4075 buffer->element[element].length = hd_len; 4076 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4077 4078 /* HW header is allocated from cache: */ 4079 if ((void *)hdr != skb->data) 4080 __set_bit(element, buf->from_kmem_cache); 4081 /* HW header was pushed and is contiguous with linear part: */ 4082 else if (length > 0 && !PAGE_ALIGNED(data) && 4083 (data == (char *)hdr + hd_len)) 4084 buffer->element[element].eflags |= 4085 SBAL_EFLAGS_CONTIGUOUS; 4086 4087 element++; 4088 } 4089 4090 /* map linear part into buffer element(s) */ 4091 while (length > 0) { 4092 elem_length = min_t(unsigned int, length, 4093 PAGE_SIZE - offset_in_page(data)); 4094 4095 buffer->element[element].addr = virt_to_dma64(data); 4096 buffer->element[element].length = elem_length; 4097 length -= elem_length; 4098 if (is_first_elem) { 4099 is_first_elem = false; 4100 if (length || skb_is_nonlinear(skb)) 4101 /* skb needs additional elements */ 4102 buffer->element[element].eflags = 4103 SBAL_EFLAGS_FIRST_FRAG; 4104 else 4105 buffer->element[element].eflags = 0; 4106 } else { 4107 buffer->element[element].eflags = 4108 SBAL_EFLAGS_MIDDLE_FRAG; 4109 } 4110 4111 data += elem_length; 4112 element++; 4113 } 4114 4115 /* map page frags into buffer element(s) */ 4116 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4117 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4118 4119 data = skb_frag_address(frag); 4120 length = skb_frag_size(frag); 4121 while (length > 0) { 4122 elem_length = min_t(unsigned int, length, 4123 PAGE_SIZE - offset_in_page(data)); 4124 4125 buffer->element[element].addr = virt_to_dma64(data); 4126 buffer->element[element].length = elem_length; 4127 buffer->element[element].eflags = 4128 SBAL_EFLAGS_MIDDLE_FRAG; 4129 4130 length -= elem_length; 4131 data += elem_length; 4132 element++; 4133 } 4134 } 4135 4136 if (buffer->element[element - 1].eflags) 4137 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4138 buf->next_element_to_fill = element; 4139 return element; 4140 } 4141 4142 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4143 struct sk_buff *skb, unsigned int elements, 4144 struct qeth_hdr *hdr, unsigned int offset, 4145 unsigned int hd_len) 4146 { 4147 unsigned int bytes = qdisc_pkt_len(skb); 4148 struct qeth_qdio_out_buffer *buffer; 4149 unsigned int next_element; 4150 struct netdev_queue *txq; 4151 bool stopped = false; 4152 bool flush; 4153 4154 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4155 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4156 4157 /* Just a sanity check, the wake/stop logic should ensure that we always 4158 * get a free buffer. 4159 */ 4160 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4161 return -EBUSY; 4162 4163 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4164 4165 if (flush || 4166 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4167 if (buffer->next_element_to_fill > 0) { 4168 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4169 queue->bulk_count++; 4170 } 4171 4172 if (queue->bulk_count >= queue->bulk_max) 4173 flush = true; 4174 4175 if (flush) 4176 qeth_flush_queue(queue); 4177 4178 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4179 queue->bulk_count)]; 4180 4181 /* Sanity-check again: */ 4182 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4183 return -EBUSY; 4184 } 4185 4186 if (buffer->next_element_to_fill == 0 && 4187 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4188 /* If a TX completion happens right _here_ and misses to wake 4189 * the txq, then our re-check below will catch the race. 4190 */ 4191 QETH_TXQ_STAT_INC(queue, stopped); 4192 netif_tx_stop_queue(txq); 4193 stopped = true; 4194 } 4195 4196 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4197 buffer->bytes += bytes; 4198 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4199 queue->prev_hdr = hdr; 4200 4201 flush = __netdev_tx_sent_queue(txq, bytes, 4202 !stopped && netdev_xmit_more()); 4203 4204 if (flush || next_element >= queue->max_elements) { 4205 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4206 queue->bulk_count++; 4207 4208 if (queue->bulk_count >= queue->bulk_max) 4209 flush = true; 4210 4211 if (flush) 4212 qeth_flush_queue(queue); 4213 } 4214 4215 if (stopped && !qeth_out_queue_is_full(queue)) 4216 netif_tx_start_queue(txq); 4217 return 0; 4218 } 4219 4220 static int qeth_do_send_packet(struct qeth_card *card, 4221 struct qeth_qdio_out_q *queue, 4222 struct sk_buff *skb, struct qeth_hdr *hdr, 4223 unsigned int offset, unsigned int hd_len, 4224 unsigned int elements_needed) 4225 { 4226 unsigned int start_index = queue->next_buf_to_fill; 4227 struct qeth_qdio_out_buffer *buffer; 4228 unsigned int next_element; 4229 struct netdev_queue *txq; 4230 bool stopped = false; 4231 int flush_count = 0; 4232 int do_pack = 0; 4233 int rc = 0; 4234 4235 buffer = queue->bufs[queue->next_buf_to_fill]; 4236 4237 /* Just a sanity check, the wake/stop logic should ensure that we always 4238 * get a free buffer. 4239 */ 4240 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4241 return -EBUSY; 4242 4243 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4244 4245 /* check if we need to switch packing state of this queue */ 4246 qeth_switch_to_packing_if_needed(queue); 4247 if (queue->do_pack) { 4248 do_pack = 1; 4249 /* does packet fit in current buffer? */ 4250 if (buffer->next_element_to_fill + elements_needed > 4251 queue->max_elements) { 4252 /* ... no -> set state PRIMED */ 4253 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4254 flush_count++; 4255 queue->next_buf_to_fill = 4256 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4257 buffer = queue->bufs[queue->next_buf_to_fill]; 4258 4259 /* We stepped forward, so sanity-check again: */ 4260 if (atomic_read(&buffer->state) != 4261 QETH_QDIO_BUF_EMPTY) { 4262 qeth_flush_buffers(queue, start_index, 4263 flush_count); 4264 rc = -EBUSY; 4265 goto out; 4266 } 4267 } 4268 } 4269 4270 if (buffer->next_element_to_fill == 0 && 4271 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4272 /* If a TX completion happens right _here_ and misses to wake 4273 * the txq, then our re-check below will catch the race. 4274 */ 4275 QETH_TXQ_STAT_INC(queue, stopped); 4276 netif_tx_stop_queue(txq); 4277 stopped = true; 4278 } 4279 4280 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4281 buffer->bytes += qdisc_pkt_len(skb); 4282 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4283 4284 if (queue->do_pack) 4285 QETH_TXQ_STAT_INC(queue, skbs_pack); 4286 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4287 flush_count++; 4288 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4289 queue->next_buf_to_fill = 4290 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4291 } 4292 4293 if (flush_count) 4294 qeth_flush_buffers(queue, start_index, flush_count); 4295 4296 out: 4297 if (do_pack) 4298 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4299 4300 if (stopped && !qeth_out_queue_is_full(queue)) 4301 netif_tx_start_queue(txq); 4302 return rc; 4303 } 4304 4305 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4306 unsigned int payload_len, struct sk_buff *skb, 4307 unsigned int proto_len) 4308 { 4309 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4310 4311 ext->hdr_tot_len = sizeof(*ext); 4312 ext->imb_hdr_no = 1; 4313 ext->hdr_type = 1; 4314 ext->hdr_version = 1; 4315 ext->hdr_len = 28; 4316 ext->payload_len = payload_len; 4317 ext->mss = skb_shinfo(skb)->gso_size; 4318 ext->dg_hdr_len = proto_len; 4319 } 4320 4321 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4322 struct qeth_qdio_out_q *queue, __be16 proto, 4323 void (*fill_header)(struct qeth_qdio_out_q *queue, 4324 struct qeth_hdr *hdr, struct sk_buff *skb, 4325 __be16 proto, unsigned int data_len)) 4326 { 4327 unsigned int proto_len, hw_hdr_len; 4328 unsigned int frame_len = skb->len; 4329 bool is_tso = skb_is_gso(skb); 4330 unsigned int data_offset = 0; 4331 struct qeth_hdr *hdr = NULL; 4332 unsigned int hd_len = 0; 4333 unsigned int elements; 4334 int push_len, rc; 4335 4336 if (is_tso) { 4337 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4338 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4339 } else { 4340 hw_hdr_len = sizeof(struct qeth_hdr); 4341 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4342 } 4343 4344 rc = skb_cow_head(skb, hw_hdr_len); 4345 if (rc) 4346 return rc; 4347 4348 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4349 &elements); 4350 if (push_len < 0) 4351 return push_len; 4352 if (is_tso || !push_len) { 4353 /* HW header needs its own buffer element. */ 4354 hd_len = hw_hdr_len + proto_len; 4355 data_offset = push_len + proto_len; 4356 } 4357 memset(hdr, 0, hw_hdr_len); 4358 fill_header(queue, hdr, skb, proto, frame_len); 4359 if (is_tso) 4360 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4361 frame_len - proto_len, skb, proto_len); 4362 4363 if (IS_IQD(card)) { 4364 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4365 hd_len); 4366 } else { 4367 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4368 skb_orphan(skb); 4369 spin_lock(&queue->lock); 4370 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4371 hd_len, elements); 4372 spin_unlock(&queue->lock); 4373 } 4374 4375 if (rc && !push_len) 4376 kmem_cache_free(qeth_core_header_cache, hdr); 4377 4378 return rc; 4379 } 4380 EXPORT_SYMBOL_GPL(qeth_xmit); 4381 4382 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4383 struct qeth_reply *reply, unsigned long data) 4384 { 4385 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4386 struct qeth_ipacmd_setadpparms *setparms; 4387 4388 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4389 4390 setparms = &(cmd->data.setadapterparms); 4391 if (qeth_setadpparms_inspect_rc(cmd)) { 4392 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4393 setparms->data.mode = SET_PROMISC_MODE_OFF; 4394 } 4395 card->info.promisc_mode = setparms->data.mode; 4396 return (cmd->hdr.return_code) ? -EIO : 0; 4397 } 4398 4399 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4400 { 4401 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4402 SET_PROMISC_MODE_OFF; 4403 struct qeth_cmd_buffer *iob; 4404 struct qeth_ipa_cmd *cmd; 4405 4406 QETH_CARD_TEXT(card, 4, "setprom"); 4407 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4408 4409 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4410 SETADP_DATA_SIZEOF(mode)); 4411 if (!iob) 4412 return; 4413 cmd = __ipa_cmd(iob); 4414 cmd->data.setadapterparms.data.mode = mode; 4415 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4416 } 4417 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4418 4419 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4420 struct qeth_reply *reply, unsigned long data) 4421 { 4422 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4423 struct qeth_ipacmd_setadpparms *adp_cmd; 4424 4425 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4426 if (qeth_setadpparms_inspect_rc(cmd)) 4427 return -EIO; 4428 4429 adp_cmd = &cmd->data.setadapterparms; 4430 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4431 return -EADDRNOTAVAIL; 4432 4433 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4434 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4435 return -EADDRNOTAVAIL; 4436 4437 eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr); 4438 return 0; 4439 } 4440 4441 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4442 { 4443 int rc; 4444 struct qeth_cmd_buffer *iob; 4445 struct qeth_ipa_cmd *cmd; 4446 4447 QETH_CARD_TEXT(card, 4, "chgmac"); 4448 4449 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4450 SETADP_DATA_SIZEOF(change_addr)); 4451 if (!iob) 4452 return -ENOMEM; 4453 cmd = __ipa_cmd(iob); 4454 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4455 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4456 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4457 card->dev->dev_addr); 4458 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4459 NULL); 4460 return rc; 4461 } 4462 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4463 4464 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4465 struct qeth_reply *reply, unsigned long data) 4466 { 4467 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4468 struct qeth_set_access_ctrl *access_ctrl_req; 4469 4470 QETH_CARD_TEXT(card, 4, "setaccb"); 4471 4472 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4473 QETH_CARD_TEXT_(card, 2, "rc=%d", 4474 cmd->data.setadapterparms.hdr.return_code); 4475 if (cmd->data.setadapterparms.hdr.return_code != 4476 SET_ACCESS_CTRL_RC_SUCCESS) 4477 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4478 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4479 cmd->data.setadapterparms.hdr.return_code); 4480 switch (qeth_setadpparms_inspect_rc(cmd)) { 4481 case SET_ACCESS_CTRL_RC_SUCCESS: 4482 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4483 dev_info(&card->gdev->dev, 4484 "QDIO data connection isolation is deactivated\n"); 4485 else 4486 dev_info(&card->gdev->dev, 4487 "QDIO data connection isolation is activated\n"); 4488 return 0; 4489 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4490 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4491 CARD_DEVID(card)); 4492 return 0; 4493 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4494 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4495 CARD_DEVID(card)); 4496 return 0; 4497 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4498 dev_err(&card->gdev->dev, "Adapter does not " 4499 "support QDIO data connection isolation\n"); 4500 return -EOPNOTSUPP; 4501 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4502 dev_err(&card->gdev->dev, 4503 "Adapter is dedicated. " 4504 "QDIO data connection isolation not supported\n"); 4505 return -EOPNOTSUPP; 4506 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4507 dev_err(&card->gdev->dev, 4508 "TSO does not permit QDIO data connection isolation\n"); 4509 return -EPERM; 4510 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4511 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4512 "support reflective relay mode\n"); 4513 return -EOPNOTSUPP; 4514 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4515 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4516 "enabled at the adjacent switch port"); 4517 return -EREMOTEIO; 4518 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4519 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4520 "at the adjacent switch failed\n"); 4521 /* benign error while disabling ISOLATION_MODE_FWD */ 4522 return 0; 4523 default: 4524 return -EIO; 4525 } 4526 } 4527 4528 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4529 enum qeth_ipa_isolation_modes mode) 4530 { 4531 int rc; 4532 struct qeth_cmd_buffer *iob; 4533 struct qeth_ipa_cmd *cmd; 4534 struct qeth_set_access_ctrl *access_ctrl_req; 4535 4536 QETH_CARD_TEXT(card, 4, "setacctl"); 4537 4538 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4539 dev_err(&card->gdev->dev, 4540 "Adapter does not support QDIO data connection isolation\n"); 4541 return -EOPNOTSUPP; 4542 } 4543 4544 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4545 SETADP_DATA_SIZEOF(set_access_ctrl)); 4546 if (!iob) 4547 return -ENOMEM; 4548 cmd = __ipa_cmd(iob); 4549 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4550 access_ctrl_req->subcmd_code = mode; 4551 4552 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4553 NULL); 4554 if (rc) { 4555 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4556 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4557 rc, CARD_DEVID(card)); 4558 } 4559 4560 return rc; 4561 } 4562 4563 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4564 { 4565 struct qeth_card *card; 4566 4567 card = dev->ml_priv; 4568 QETH_CARD_TEXT(card, 4, "txtimeo"); 4569 qeth_schedule_recovery(card); 4570 } 4571 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4572 4573 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4574 { 4575 struct qeth_card *card = dev->ml_priv; 4576 int rc = 0; 4577 4578 switch (regnum) { 4579 case MII_BMCR: /* Basic mode control register */ 4580 rc = BMCR_FULLDPLX; 4581 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4582 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4583 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4584 rc |= BMCR_SPEED100; 4585 break; 4586 case MII_BMSR: /* Basic mode status register */ 4587 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4588 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4589 BMSR_100BASE4; 4590 break; 4591 case MII_PHYSID1: /* PHYS ID 1 */ 4592 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4593 dev->dev_addr[2]; 4594 rc = (rc >> 5) & 0xFFFF; 4595 break; 4596 case MII_PHYSID2: /* PHYS ID 2 */ 4597 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4598 break; 4599 case MII_ADVERTISE: /* Advertisement control reg */ 4600 rc = ADVERTISE_ALL; 4601 break; 4602 case MII_LPA: /* Link partner ability reg */ 4603 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4604 LPA_100BASE4 | LPA_LPACK; 4605 break; 4606 case MII_EXPANSION: /* Expansion register */ 4607 break; 4608 case MII_DCOUNTER: /* disconnect counter */ 4609 break; 4610 case MII_FCSCOUNTER: /* false carrier counter */ 4611 break; 4612 case MII_NWAYTEST: /* N-way auto-neg test register */ 4613 break; 4614 case MII_RERRCOUNTER: /* rx error counter */ 4615 rc = card->stats.rx_length_errors + 4616 card->stats.rx_frame_errors + 4617 card->stats.rx_fifo_errors; 4618 break; 4619 case MII_SREVISION: /* silicon revision */ 4620 break; 4621 case MII_RESV1: /* reserved 1 */ 4622 break; 4623 case MII_LBRERROR: /* loopback, rx, bypass error */ 4624 break; 4625 case MII_PHYADDR: /* physical address */ 4626 break; 4627 case MII_RESV2: /* reserved 2 */ 4628 break; 4629 case MII_TPISTATUS: /* TPI status for 10mbps */ 4630 break; 4631 case MII_NCONFIG: /* network interface config */ 4632 break; 4633 default: 4634 break; 4635 } 4636 return rc; 4637 } 4638 4639 static int qeth_snmp_command_cb(struct qeth_card *card, 4640 struct qeth_reply *reply, unsigned long data) 4641 { 4642 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4643 struct qeth_arp_query_info *qinfo = reply->param; 4644 struct qeth_ipacmd_setadpparms *adp_cmd; 4645 unsigned int data_len; 4646 void *snmp_data; 4647 4648 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4649 4650 if (cmd->hdr.return_code) { 4651 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4652 return -EIO; 4653 } 4654 if (cmd->data.setadapterparms.hdr.return_code) { 4655 cmd->hdr.return_code = 4656 cmd->data.setadapterparms.hdr.return_code; 4657 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4658 return -EIO; 4659 } 4660 4661 adp_cmd = &cmd->data.setadapterparms; 4662 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4663 if (adp_cmd->hdr.seq_no == 1) { 4664 snmp_data = &adp_cmd->data.snmp; 4665 } else { 4666 snmp_data = &adp_cmd->data.snmp.request; 4667 data_len -= offsetof(struct qeth_snmp_cmd, request); 4668 } 4669 4670 /* check if there is enough room in userspace */ 4671 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4672 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4673 return -ENOSPC; 4674 } 4675 QETH_CARD_TEXT_(card, 4, "snore%i", 4676 cmd->data.setadapterparms.hdr.used_total); 4677 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4678 cmd->data.setadapterparms.hdr.seq_no); 4679 /*copy entries to user buffer*/ 4680 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4681 qinfo->udata_offset += data_len; 4682 4683 if (cmd->data.setadapterparms.hdr.seq_no < 4684 cmd->data.setadapterparms.hdr.used_total) 4685 return 1; 4686 return 0; 4687 } 4688 4689 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4690 { 4691 struct qeth_snmp_ureq __user *ureq; 4692 struct qeth_cmd_buffer *iob; 4693 unsigned int req_len; 4694 struct qeth_arp_query_info qinfo = {0, }; 4695 int rc = 0; 4696 4697 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4698 4699 if (IS_VM_NIC(card)) 4700 return -EOPNOTSUPP; 4701 4702 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4703 IS_LAYER3(card)) 4704 return -EOPNOTSUPP; 4705 4706 ureq = (struct qeth_snmp_ureq __user *) udata; 4707 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4708 get_user(req_len, &ureq->hdr.req_len)) 4709 return -EFAULT; 4710 4711 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4712 if (req_len > QETH_BUFSIZE) 4713 return -EINVAL; 4714 4715 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4716 if (!iob) 4717 return -ENOMEM; 4718 4719 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4720 &ureq->cmd, req_len)) { 4721 qeth_put_cmd(iob); 4722 return -EFAULT; 4723 } 4724 4725 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4726 if (!qinfo.udata) { 4727 qeth_put_cmd(iob); 4728 return -ENOMEM; 4729 } 4730 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4731 4732 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4733 if (rc) 4734 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4735 CARD_DEVID(card), rc); 4736 else { 4737 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4738 rc = -EFAULT; 4739 } 4740 4741 kfree(qinfo.udata); 4742 return rc; 4743 } 4744 4745 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4746 struct qeth_reply *reply, 4747 unsigned long data) 4748 { 4749 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4750 struct qeth_qoat_priv *priv = reply->param; 4751 int resdatalen; 4752 4753 QETH_CARD_TEXT(card, 3, "qoatcb"); 4754 if (qeth_setadpparms_inspect_rc(cmd)) 4755 return -EIO; 4756 4757 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4758 4759 if (resdatalen > (priv->buffer_len - priv->response_len)) 4760 return -ENOSPC; 4761 4762 memcpy(priv->buffer + priv->response_len, 4763 &cmd->data.setadapterparms.hdr, resdatalen); 4764 priv->response_len += resdatalen; 4765 4766 if (cmd->data.setadapterparms.hdr.seq_no < 4767 cmd->data.setadapterparms.hdr.used_total) 4768 return 1; 4769 return 0; 4770 } 4771 4772 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4773 { 4774 int rc = 0; 4775 struct qeth_cmd_buffer *iob; 4776 struct qeth_ipa_cmd *cmd; 4777 struct qeth_query_oat *oat_req; 4778 struct qeth_query_oat_data oat_data; 4779 struct qeth_qoat_priv priv; 4780 void __user *tmp; 4781 4782 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4783 4784 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4785 return -EOPNOTSUPP; 4786 4787 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4788 return -EFAULT; 4789 4790 priv.buffer_len = oat_data.buffer_len; 4791 priv.response_len = 0; 4792 priv.buffer = vzalloc(oat_data.buffer_len); 4793 if (!priv.buffer) 4794 return -ENOMEM; 4795 4796 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4797 SETADP_DATA_SIZEOF(query_oat)); 4798 if (!iob) { 4799 rc = -ENOMEM; 4800 goto out_free; 4801 } 4802 cmd = __ipa_cmd(iob); 4803 oat_req = &cmd->data.setadapterparms.data.query_oat; 4804 oat_req->subcmd_code = oat_data.command; 4805 4806 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4807 if (!rc) { 4808 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4809 u64_to_user_ptr(oat_data.ptr); 4810 oat_data.response_len = priv.response_len; 4811 4812 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4813 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4814 rc = -EFAULT; 4815 } 4816 4817 out_free: 4818 vfree(priv.buffer); 4819 return rc; 4820 } 4821 4822 static int qeth_init_link_info_oat_cb(struct qeth_card *card, 4823 struct qeth_reply *reply_priv, 4824 unsigned long data) 4825 { 4826 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4827 struct qeth_link_info *link_info = reply_priv->param; 4828 struct qeth_query_oat_physical_if *phys_if; 4829 struct qeth_query_oat_reply *reply; 4830 4831 QETH_CARD_TEXT(card, 2, "qoatincb"); 4832 if (qeth_setadpparms_inspect_rc(cmd)) 4833 return -EIO; 4834 4835 /* Multi-part reply is unexpected, don't bother: */ 4836 if (cmd->data.setadapterparms.hdr.used_total > 1) 4837 return -EINVAL; 4838 4839 /* Expect the reply to start with phys_if data: */ 4840 reply = &cmd->data.setadapterparms.data.query_oat.reply[0]; 4841 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF || 4842 reply->length < sizeof(*reply)) 4843 return -EINVAL; 4844 4845 phys_if = &reply->phys_if; 4846 4847 switch (phys_if->speed_duplex) { 4848 case QETH_QOAT_PHYS_SPEED_10M_HALF: 4849 link_info->speed = SPEED_10; 4850 link_info->duplex = DUPLEX_HALF; 4851 break; 4852 case QETH_QOAT_PHYS_SPEED_10M_FULL: 4853 link_info->speed = SPEED_10; 4854 link_info->duplex = DUPLEX_FULL; 4855 break; 4856 case QETH_QOAT_PHYS_SPEED_100M_HALF: 4857 link_info->speed = SPEED_100; 4858 link_info->duplex = DUPLEX_HALF; 4859 break; 4860 case QETH_QOAT_PHYS_SPEED_100M_FULL: 4861 link_info->speed = SPEED_100; 4862 link_info->duplex = DUPLEX_FULL; 4863 break; 4864 case QETH_QOAT_PHYS_SPEED_1000M_HALF: 4865 link_info->speed = SPEED_1000; 4866 link_info->duplex = DUPLEX_HALF; 4867 break; 4868 case QETH_QOAT_PHYS_SPEED_1000M_FULL: 4869 link_info->speed = SPEED_1000; 4870 link_info->duplex = DUPLEX_FULL; 4871 break; 4872 case QETH_QOAT_PHYS_SPEED_10G_FULL: 4873 link_info->speed = SPEED_10000; 4874 link_info->duplex = DUPLEX_FULL; 4875 break; 4876 case QETH_QOAT_PHYS_SPEED_25G_FULL: 4877 link_info->speed = SPEED_25000; 4878 link_info->duplex = DUPLEX_FULL; 4879 break; 4880 case QETH_QOAT_PHYS_SPEED_UNKNOWN: 4881 default: 4882 link_info->speed = SPEED_UNKNOWN; 4883 link_info->duplex = DUPLEX_UNKNOWN; 4884 break; 4885 } 4886 4887 switch (phys_if->media_type) { 4888 case QETH_QOAT_PHYS_MEDIA_COPPER: 4889 link_info->port = PORT_TP; 4890 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4891 break; 4892 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT: 4893 link_info->port = PORT_FIBRE; 4894 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4895 break; 4896 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG: 4897 link_info->port = PORT_FIBRE; 4898 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG; 4899 break; 4900 default: 4901 link_info->port = PORT_OTHER; 4902 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4903 break; 4904 } 4905 4906 return 0; 4907 } 4908 4909 static void qeth_init_link_info(struct qeth_card *card) 4910 { 4911 qeth_default_link_info(card); 4912 4913 /* Get more accurate data via QUERY OAT: */ 4914 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4915 struct qeth_link_info link_info; 4916 struct qeth_cmd_buffer *iob; 4917 4918 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4919 SETADP_DATA_SIZEOF(query_oat)); 4920 if (iob) { 4921 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob); 4922 struct qeth_query_oat *oat_req; 4923 4924 oat_req = &cmd->data.setadapterparms.data.query_oat; 4925 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE; 4926 4927 if (!qeth_send_ipa_cmd(card, iob, 4928 qeth_init_link_info_oat_cb, 4929 &link_info)) { 4930 if (link_info.speed != SPEED_UNKNOWN) 4931 card->info.link_info.speed = link_info.speed; 4932 if (link_info.duplex != DUPLEX_UNKNOWN) 4933 card->info.link_info.duplex = link_info.duplex; 4934 if (link_info.port != PORT_OTHER) 4935 card->info.link_info.port = link_info.port; 4936 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN) 4937 card->info.link_info.link_mode = link_info.link_mode; 4938 } 4939 } 4940 } 4941 } 4942 4943 /** 4944 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4945 * @card: pointer to a qeth_card 4946 * 4947 * Returns 4948 * 0, if a MAC address has been set for the card's netdevice 4949 * a return code, for various error conditions 4950 */ 4951 int qeth_vm_request_mac(struct qeth_card *card) 4952 { 4953 struct diag26c_mac_resp *response; 4954 struct diag26c_mac_req *request; 4955 int rc; 4956 4957 QETH_CARD_TEXT(card, 2, "vmreqmac"); 4958 4959 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 4960 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 4961 if (!request || !response) { 4962 rc = -ENOMEM; 4963 goto out; 4964 } 4965 4966 request->resp_buf_len = sizeof(*response); 4967 request->resp_version = DIAG26C_VERSION2; 4968 request->op_code = DIAG26C_GET_MAC; 4969 request->devno = card->info.ddev_devno; 4970 4971 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4972 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 4973 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4974 if (rc) 4975 goto out; 4976 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 4977 4978 if (request->resp_buf_len < sizeof(*response) || 4979 response->version != request->resp_version) { 4980 rc = -EIO; 4981 QETH_CARD_TEXT(card, 2, "badresp"); 4982 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 4983 sizeof(request->resp_buf_len)); 4984 } else if (!is_valid_ether_addr(response->mac)) { 4985 rc = -EINVAL; 4986 QETH_CARD_TEXT(card, 2, "badmac"); 4987 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 4988 } else { 4989 eth_hw_addr_set(card->dev, response->mac); 4990 } 4991 4992 out: 4993 kfree(response); 4994 kfree(request); 4995 return rc; 4996 } 4997 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 4998 4999 static void qeth_determine_capabilities(struct qeth_card *card) 5000 { 5001 struct qeth_channel *channel = &card->data; 5002 struct ccw_device *ddev = channel->ccwdev; 5003 int rc; 5004 int ddev_offline = 0; 5005 5006 QETH_CARD_TEXT(card, 2, "detcapab"); 5007 if (!ddev->online) { 5008 ddev_offline = 1; 5009 rc = qeth_start_channel(channel); 5010 if (rc) { 5011 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5012 goto out; 5013 } 5014 } 5015 5016 rc = qeth_read_conf_data(card); 5017 if (rc) { 5018 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 5019 CARD_DEVID(card), rc); 5020 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5021 goto out_offline; 5022 } 5023 5024 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 5025 if (rc) 5026 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5027 5028 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 5029 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 5030 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 5031 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 5032 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 5033 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 5034 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 5035 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 5036 dev_info(&card->gdev->dev, 5037 "Completion Queueing supported\n"); 5038 } else { 5039 card->options.cq = QETH_CQ_NOTAVAILABLE; 5040 } 5041 5042 out_offline: 5043 if (ddev_offline == 1) 5044 qeth_stop_channel(channel); 5045 out: 5046 return; 5047 } 5048 5049 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5050 { 5051 struct qeth_card_info *info = &card->info; 5052 struct ccw_device *cdev = CARD_DDEV(card); 5053 struct ccw_dev_id dev_id; 5054 5055 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5056 ccw_device_get_id(cdev, &dev_id); 5057 5058 info->ddev_devno = dev_id.devno; 5059 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5060 !ccw_device_get_iid(cdev, &info->iid) && 5061 !ccw_device_get_chid(cdev, 0, &info->chid); 5062 info->ssid = dev_id.ssid; 5063 5064 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5065 info->chid, info->chpid); 5066 5067 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5068 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5069 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5070 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5071 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5072 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5073 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5074 } 5075 5076 static int qeth_qdio_establish(struct qeth_card *card) 5077 { 5078 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5079 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5080 struct qeth_qib_parms *qib_parms = NULL; 5081 struct qdio_initialize init_data; 5082 unsigned int no_input_qs = 1; 5083 unsigned int i; 5084 int rc = 0; 5085 5086 QETH_CARD_TEXT(card, 2, "qdioest"); 5087 5088 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5089 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5090 if (!qib_parms) 5091 return -ENOMEM; 5092 5093 qeth_fill_qib_parms(card, qib_parms); 5094 } 5095 5096 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5097 if (card->options.cq == QETH_CQ_ENABLED) { 5098 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5099 no_input_qs++; 5100 } 5101 5102 for (i = 0; i < card->qdio.no_out_queues; i++) 5103 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5104 5105 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5106 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5107 QDIO_QETH_QFMT; 5108 init_data.qib_param_field_format = 0; 5109 init_data.qib_param_field = (void *)qib_parms; 5110 init_data.no_input_qs = no_input_qs; 5111 init_data.no_output_qs = card->qdio.no_out_queues; 5112 init_data.input_handler = qeth_qdio_input_handler; 5113 init_data.output_handler = qeth_qdio_output_handler; 5114 init_data.irq_poll = qeth_qdio_poll; 5115 init_data.int_parm = (unsigned long) card; 5116 init_data.input_sbal_addr_array = in_sbal_ptrs; 5117 init_data.output_sbal_addr_array = out_sbal_ptrs; 5118 5119 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5120 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5121 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5122 init_data.no_output_qs); 5123 if (rc) { 5124 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5125 goto out; 5126 } 5127 rc = qdio_establish(CARD_DDEV(card), &init_data); 5128 if (rc) { 5129 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5130 qdio_free(CARD_DDEV(card)); 5131 } 5132 } 5133 5134 switch (card->options.cq) { 5135 case QETH_CQ_ENABLED: 5136 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5137 break; 5138 case QETH_CQ_DISABLED: 5139 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5140 break; 5141 default: 5142 break; 5143 } 5144 5145 out: 5146 kfree(qib_parms); 5147 return rc; 5148 } 5149 5150 static void qeth_core_free_card(struct qeth_card *card) 5151 { 5152 QETH_CARD_TEXT(card, 2, "freecrd"); 5153 5154 unregister_service_level(&card->qeth_service_level); 5155 debugfs_remove_recursive(card->debugfs); 5156 qeth_put_cmd(card->read_cmd); 5157 destroy_workqueue(card->event_wq); 5158 dev_set_drvdata(&card->gdev->dev, NULL); 5159 kfree(card); 5160 } 5161 5162 static void qeth_trace_features(struct qeth_card *card) 5163 { 5164 QETH_CARD_TEXT(card, 2, "features"); 5165 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5166 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5167 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5168 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5169 sizeof(card->info.diagass_support)); 5170 } 5171 5172 static struct ccw_device_id qeth_ids[] = { 5173 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5174 .driver_info = QETH_CARD_TYPE_OSD}, 5175 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5176 .driver_info = QETH_CARD_TYPE_IQD}, 5177 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5178 .driver_info = QETH_CARD_TYPE_OSM}, 5179 #ifdef CONFIG_QETH_OSX 5180 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5181 .driver_info = QETH_CARD_TYPE_OSX}, 5182 #endif 5183 {}, 5184 }; 5185 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5186 5187 static struct ccw_driver qeth_ccw_driver = { 5188 .driver = { 5189 .owner = THIS_MODULE, 5190 .name = "qeth", 5191 }, 5192 .ids = qeth_ids, 5193 .probe = ccwgroup_probe_ccwdev, 5194 .remove = ccwgroup_remove_ccwdev, 5195 }; 5196 5197 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5198 { 5199 int retries = 3; 5200 int rc; 5201 5202 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5203 atomic_set(&card->force_alloc_skb, 0); 5204 rc = qeth_update_from_chp_desc(card); 5205 if (rc) 5206 return rc; 5207 retry: 5208 if (retries < 3) 5209 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5210 CARD_DEVID(card)); 5211 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5212 qeth_stop_channel(&card->data); 5213 qeth_stop_channel(&card->write); 5214 qeth_stop_channel(&card->read); 5215 qdio_free(CARD_DDEV(card)); 5216 5217 rc = qeth_start_channel(&card->read); 5218 if (rc) 5219 goto retriable; 5220 rc = qeth_start_channel(&card->write); 5221 if (rc) 5222 goto retriable; 5223 rc = qeth_start_channel(&card->data); 5224 if (rc) 5225 goto retriable; 5226 retriable: 5227 if (rc == -ERESTARTSYS) { 5228 QETH_CARD_TEXT(card, 2, "break1"); 5229 return rc; 5230 } else if (rc) { 5231 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5232 if (--retries < 0) 5233 goto out; 5234 else 5235 goto retry; 5236 } 5237 5238 qeth_determine_capabilities(card); 5239 qeth_read_ccw_conf_data(card); 5240 qeth_idx_init(card); 5241 5242 rc = qeth_idx_activate_read_channel(card); 5243 if (rc == -EINTR) { 5244 QETH_CARD_TEXT(card, 2, "break2"); 5245 return rc; 5246 } else if (rc) { 5247 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5248 if (--retries < 0) 5249 goto out; 5250 else 5251 goto retry; 5252 } 5253 5254 rc = qeth_idx_activate_write_channel(card); 5255 if (rc == -EINTR) { 5256 QETH_CARD_TEXT(card, 2, "break3"); 5257 return rc; 5258 } else if (rc) { 5259 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5260 if (--retries < 0) 5261 goto out; 5262 else 5263 goto retry; 5264 } 5265 card->read_or_write_problem = 0; 5266 rc = qeth_mpc_initialize(card); 5267 if (rc) { 5268 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5269 goto out; 5270 } 5271 5272 rc = qeth_send_startlan(card); 5273 if (rc) { 5274 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5275 if (rc == -ENETDOWN) { 5276 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5277 *carrier_ok = false; 5278 } else { 5279 goto out; 5280 } 5281 } else { 5282 *carrier_ok = true; 5283 } 5284 5285 card->options.ipa4.supported = 0; 5286 card->options.ipa6.supported = 0; 5287 card->options.adp.supported = 0; 5288 card->options.sbp.supported_funcs = 0; 5289 card->info.diagass_support = 0; 5290 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5291 if (rc == -ENOMEM) 5292 goto out; 5293 if (qeth_is_supported(card, IPA_IPV6)) { 5294 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5295 if (rc == -ENOMEM) 5296 goto out; 5297 } 5298 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5299 rc = qeth_query_setadapterparms(card); 5300 if (rc < 0) { 5301 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5302 goto out; 5303 } 5304 } 5305 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5306 rc = qeth_query_setdiagass(card); 5307 if (rc) 5308 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5309 } 5310 5311 qeth_trace_features(card); 5312 5313 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5314 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5315 card->info.hwtrap = 0; 5316 5317 if (card->options.isolation != ISOLATION_MODE_NONE) { 5318 rc = qeth_setadpparms_set_access_ctrl(card, 5319 card->options.isolation); 5320 if (rc) 5321 goto out; 5322 } 5323 5324 qeth_init_link_info(card); 5325 5326 rc = qeth_init_qdio_queues(card); 5327 if (rc) { 5328 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5329 goto out; 5330 } 5331 5332 return 0; 5333 out: 5334 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5335 "an error on the device\n"); 5336 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5337 CARD_DEVID(card), rc); 5338 return rc; 5339 } 5340 5341 static int qeth_set_online(struct qeth_card *card, 5342 const struct qeth_discipline *disc) 5343 { 5344 bool carrier_ok; 5345 int rc; 5346 5347 mutex_lock(&card->conf_mutex); 5348 QETH_CARD_TEXT(card, 2, "setonlin"); 5349 5350 rc = qeth_hardsetup_card(card, &carrier_ok); 5351 if (rc) { 5352 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5353 rc = -ENODEV; 5354 goto err_hardsetup; 5355 } 5356 5357 qeth_print_status_message(card); 5358 5359 if (card->dev->reg_state != NETREG_REGISTERED) 5360 /* no need for locking / error handling at this early stage: */ 5361 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5362 5363 rc = disc->set_online(card, carrier_ok); 5364 if (rc) 5365 goto err_online; 5366 5367 /* let user_space know that device is online */ 5368 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5369 5370 mutex_unlock(&card->conf_mutex); 5371 return 0; 5372 5373 err_online: 5374 err_hardsetup: 5375 qeth_qdio_clear_card(card, 0); 5376 qeth_clear_working_pool_list(card); 5377 qeth_flush_local_addrs(card); 5378 5379 qeth_stop_channel(&card->data); 5380 qeth_stop_channel(&card->write); 5381 qeth_stop_channel(&card->read); 5382 qdio_free(CARD_DDEV(card)); 5383 5384 mutex_unlock(&card->conf_mutex); 5385 return rc; 5386 } 5387 5388 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 5389 bool resetting) 5390 { 5391 int rc, rc2, rc3; 5392 5393 mutex_lock(&card->conf_mutex); 5394 QETH_CARD_TEXT(card, 3, "setoffl"); 5395 5396 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5397 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5398 card->info.hwtrap = 1; 5399 } 5400 5401 /* cancel any stalled cmd that might block the rtnl: */ 5402 qeth_clear_ipacmd_list(card); 5403 5404 rtnl_lock(); 5405 netif_device_detach(card->dev); 5406 netif_carrier_off(card->dev); 5407 rtnl_unlock(); 5408 5409 cancel_work_sync(&card->rx_mode_work); 5410 5411 disc->set_offline(card); 5412 5413 qeth_qdio_clear_card(card, 0); 5414 qeth_drain_output_queues(card); 5415 qeth_clear_working_pool_list(card); 5416 qeth_flush_local_addrs(card); 5417 card->info.promisc_mode = 0; 5418 qeth_default_link_info(card); 5419 5420 rc = qeth_stop_channel(&card->data); 5421 rc2 = qeth_stop_channel(&card->write); 5422 rc3 = qeth_stop_channel(&card->read); 5423 if (!rc) 5424 rc = (rc2) ? rc2 : rc3; 5425 if (rc) 5426 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5427 qdio_free(CARD_DDEV(card)); 5428 5429 /* let user_space know that device is offline */ 5430 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5431 5432 mutex_unlock(&card->conf_mutex); 5433 return 0; 5434 } 5435 EXPORT_SYMBOL_GPL(qeth_set_offline); 5436 5437 static int qeth_do_reset(void *data) 5438 { 5439 const struct qeth_discipline *disc; 5440 struct qeth_card *card = data; 5441 int rc; 5442 5443 /* Lock-free, other users will block until we are done. */ 5444 disc = card->discipline; 5445 5446 QETH_CARD_TEXT(card, 2, "recover1"); 5447 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5448 return 0; 5449 QETH_CARD_TEXT(card, 2, "recover2"); 5450 dev_warn(&card->gdev->dev, 5451 "A recovery process has been started for the device\n"); 5452 5453 qeth_set_offline(card, disc, true); 5454 rc = qeth_set_online(card, disc); 5455 if (!rc) { 5456 dev_info(&card->gdev->dev, 5457 "Device successfully recovered!\n"); 5458 } else { 5459 qeth_set_offline(card, disc, true); 5460 ccwgroup_set_offline(card->gdev, false); 5461 dev_warn(&card->gdev->dev, 5462 "The qeth device driver failed to recover an error on the device\n"); 5463 } 5464 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5465 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5466 return 0; 5467 } 5468 5469 #if IS_ENABLED(CONFIG_QETH_L3) 5470 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5471 struct qeth_hdr *hdr) 5472 { 5473 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5474 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5475 struct net_device *dev = skb->dev; 5476 5477 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5478 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5479 "FAKELL", skb->len); 5480 return; 5481 } 5482 5483 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5484 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5485 ETH_P_IP; 5486 unsigned char tg_addr[ETH_ALEN]; 5487 5488 skb_reset_network_header(skb); 5489 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5490 case QETH_CAST_MULTICAST: 5491 if (prot == ETH_P_IP) 5492 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5493 else 5494 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5495 QETH_CARD_STAT_INC(card, rx_multicast); 5496 break; 5497 case QETH_CAST_BROADCAST: 5498 ether_addr_copy(tg_addr, dev->broadcast); 5499 QETH_CARD_STAT_INC(card, rx_multicast); 5500 break; 5501 default: 5502 if (card->options.sniffer) 5503 skb->pkt_type = PACKET_OTHERHOST; 5504 ether_addr_copy(tg_addr, dev->dev_addr); 5505 } 5506 5507 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5508 dev_hard_header(skb, dev, prot, tg_addr, 5509 &l3_hdr->next_hop.rx.src_mac, skb->len); 5510 else 5511 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5512 skb->len); 5513 } 5514 5515 /* copy VLAN tag from hdr into skb */ 5516 if (!card->options.sniffer && 5517 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5518 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5519 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5520 l3_hdr->vlan_id : 5521 l3_hdr->next_hop.rx.vlan_id; 5522 5523 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5524 } 5525 } 5526 #endif 5527 5528 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5529 bool uses_frags, bool is_cso) 5530 { 5531 struct napi_struct *napi = &card->napi; 5532 5533 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5534 skb->ip_summed = CHECKSUM_UNNECESSARY; 5535 QETH_CARD_STAT_INC(card, rx_skb_csum); 5536 } else { 5537 skb->ip_summed = CHECKSUM_NONE; 5538 } 5539 5540 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5541 QETH_CARD_STAT_INC(card, rx_packets); 5542 if (skb_is_nonlinear(skb)) { 5543 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5544 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5545 skb_shinfo(skb)->nr_frags); 5546 } 5547 5548 if (uses_frags) { 5549 napi_gro_frags(napi); 5550 } else { 5551 skb->protocol = eth_type_trans(skb, skb->dev); 5552 napi_gro_receive(napi, skb); 5553 } 5554 } 5555 5556 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5557 { 5558 struct page *page = virt_to_page(data); 5559 unsigned int next_frag; 5560 5561 next_frag = skb_shinfo(skb)->nr_frags; 5562 get_page(page); 5563 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5564 data_len); 5565 } 5566 5567 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5568 { 5569 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5570 } 5571 5572 static int qeth_extract_skb(struct qeth_card *card, 5573 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5574 int *__offset) 5575 { 5576 struct qeth_priv *priv = netdev_priv(card->dev); 5577 struct qdio_buffer *buffer = qethbuffer->buffer; 5578 struct napi_struct *napi = &card->napi; 5579 struct qdio_buffer_element *element; 5580 unsigned int linear_len = 0; 5581 bool uses_frags = false; 5582 int offset = *__offset; 5583 bool use_rx_sg = false; 5584 unsigned int headroom; 5585 struct qeth_hdr *hdr; 5586 struct sk_buff *skb; 5587 int skb_len = 0; 5588 bool is_cso; 5589 5590 element = &buffer->element[*element_no]; 5591 5592 next_packet: 5593 /* qeth_hdr must not cross element boundaries */ 5594 while (element->length < offset + sizeof(struct qeth_hdr)) { 5595 if (qeth_is_last_sbale(element)) 5596 return -ENODATA; 5597 element++; 5598 offset = 0; 5599 } 5600 5601 hdr = dma64_to_virt(element->addr) + offset; 5602 offset += sizeof(*hdr); 5603 skb = NULL; 5604 5605 switch (hdr->hdr.l2.id) { 5606 case QETH_HEADER_TYPE_LAYER2: 5607 skb_len = hdr->hdr.l2.pkt_length; 5608 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5609 5610 linear_len = ETH_HLEN; 5611 headroom = 0; 5612 break; 5613 case QETH_HEADER_TYPE_LAYER3: 5614 skb_len = hdr->hdr.l3.length; 5615 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5616 5617 if (!IS_LAYER3(card)) { 5618 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5619 goto walk_packet; 5620 } 5621 5622 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5623 linear_len = ETH_HLEN; 5624 headroom = 0; 5625 break; 5626 } 5627 5628 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5629 linear_len = sizeof(struct ipv6hdr); 5630 else 5631 linear_len = sizeof(struct iphdr); 5632 headroom = ETH_HLEN; 5633 break; 5634 default: 5635 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5636 QETH_CARD_STAT_INC(card, rx_frame_errors); 5637 else 5638 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5639 5640 /* Can't determine packet length, drop the whole buffer. */ 5641 return -EPROTONOSUPPORT; 5642 } 5643 5644 if (skb_len < linear_len) { 5645 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5646 goto walk_packet; 5647 } 5648 5649 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5650 (skb_len > READ_ONCE(priv->rx_copybreak) && 5651 !atomic_read(&card->force_alloc_skb)); 5652 5653 if (use_rx_sg) { 5654 /* QETH_CQ_ENABLED only: */ 5655 if (qethbuffer->rx_skb && 5656 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5657 skb = qethbuffer->rx_skb; 5658 qethbuffer->rx_skb = NULL; 5659 goto use_skb; 5660 } 5661 5662 skb = napi_get_frags(napi); 5663 if (!skb) { 5664 /* -ENOMEM, no point in falling back further. */ 5665 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5666 goto walk_packet; 5667 } 5668 5669 if (skb_tailroom(skb) >= linear_len + headroom) { 5670 uses_frags = true; 5671 goto use_skb; 5672 } 5673 5674 netdev_info_once(card->dev, 5675 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5676 linear_len + headroom, skb_tailroom(skb)); 5677 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5678 } 5679 5680 linear_len = skb_len; 5681 skb = napi_alloc_skb(napi, linear_len + headroom); 5682 if (!skb) { 5683 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5684 goto walk_packet; 5685 } 5686 5687 use_skb: 5688 if (headroom) 5689 skb_reserve(skb, headroom); 5690 walk_packet: 5691 while (skb_len) { 5692 int data_len = min(skb_len, (int)(element->length - offset)); 5693 char *data = dma64_to_virt(element->addr) + offset; 5694 5695 skb_len -= data_len; 5696 offset += data_len; 5697 5698 /* Extract data from current element: */ 5699 if (skb && data_len) { 5700 if (linear_len) { 5701 unsigned int copy_len; 5702 5703 copy_len = min_t(unsigned int, linear_len, 5704 data_len); 5705 5706 skb_put_data(skb, data, copy_len); 5707 linear_len -= copy_len; 5708 data_len -= copy_len; 5709 data += copy_len; 5710 } 5711 5712 if (data_len) 5713 qeth_create_skb_frag(skb, data, data_len); 5714 } 5715 5716 /* Step forward to next element: */ 5717 if (skb_len) { 5718 if (qeth_is_last_sbale(element)) { 5719 QETH_CARD_TEXT(card, 4, "unexeob"); 5720 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5721 if (skb) { 5722 if (uses_frags) 5723 napi_free_frags(napi); 5724 else 5725 kfree_skb(skb); 5726 QETH_CARD_STAT_INC(card, 5727 rx_length_errors); 5728 } 5729 return -EMSGSIZE; 5730 } 5731 element++; 5732 offset = 0; 5733 } 5734 } 5735 5736 /* This packet was skipped, go get another one: */ 5737 if (!skb) 5738 goto next_packet; 5739 5740 *element_no = element - &buffer->element[0]; 5741 *__offset = offset; 5742 5743 #if IS_ENABLED(CONFIG_QETH_L3) 5744 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3) 5745 qeth_l3_rebuild_skb(card, skb, hdr); 5746 #endif 5747 5748 qeth_receive_skb(card, skb, uses_frags, is_cso); 5749 return 0; 5750 } 5751 5752 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5753 struct qeth_qdio_buffer *buf, bool *done) 5754 { 5755 unsigned int work_done = 0; 5756 5757 while (budget) { 5758 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5759 &card->rx.e_offset)) { 5760 *done = true; 5761 break; 5762 } 5763 5764 work_done++; 5765 budget--; 5766 } 5767 5768 return work_done; 5769 } 5770 5771 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5772 { 5773 struct qeth_rx *ctx = &card->rx; 5774 unsigned int work_done = 0; 5775 5776 while (budget > 0) { 5777 struct qeth_qdio_buffer *buffer; 5778 unsigned int skbs_done = 0; 5779 bool done = false; 5780 5781 /* Fetch completed RX buffers: */ 5782 if (!card->rx.b_count) { 5783 card->rx.qdio_err = 0; 5784 card->rx.b_count = 5785 qdio_inspect_input_queue(CARD_DDEV(card), 0, 5786 &card->rx.b_index, 5787 &card->rx.qdio_err); 5788 if (card->rx.b_count <= 0) { 5789 card->rx.b_count = 0; 5790 break; 5791 } 5792 } 5793 5794 /* Process one completed RX buffer: */ 5795 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5796 if (!(card->rx.qdio_err && 5797 qeth_check_qdio_errors(card, buffer->buffer, 5798 card->rx.qdio_err, "qinerr"))) 5799 skbs_done = qeth_extract_skbs(card, budget, buffer, 5800 &done); 5801 else 5802 done = true; 5803 5804 work_done += skbs_done; 5805 budget -= skbs_done; 5806 5807 if (done) { 5808 QETH_CARD_STAT_INC(card, rx_bufs); 5809 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 5810 buffer->pool_entry = NULL; 5811 card->rx.b_count--; 5812 ctx->bufs_refill++; 5813 ctx->bufs_refill -= qeth_rx_refill_queue(card, 5814 ctx->bufs_refill); 5815 5816 /* Step forward to next buffer: */ 5817 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 5818 card->rx.buf_element = 0; 5819 card->rx.e_offset = 0; 5820 } 5821 } 5822 5823 return work_done; 5824 } 5825 5826 static void qeth_cq_poll(struct qeth_card *card) 5827 { 5828 unsigned int work_done = 0; 5829 5830 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 5831 unsigned int start, error; 5832 int completed; 5833 5834 completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start, 5835 &error); 5836 if (completed <= 0) 5837 return; 5838 5839 qeth_qdio_cq_handler(card, error, 1, start, completed); 5840 work_done += completed; 5841 } 5842 } 5843 5844 int qeth_poll(struct napi_struct *napi, int budget) 5845 { 5846 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5847 unsigned int work_done; 5848 5849 work_done = qeth_rx_poll(card, budget); 5850 5851 if (qeth_use_tx_irqs(card)) { 5852 struct qeth_qdio_out_q *queue; 5853 unsigned int i; 5854 5855 qeth_for_each_output_queue(card, queue, i) { 5856 if (!qeth_out_queue_is_empty(queue)) 5857 napi_schedule(&queue->napi); 5858 } 5859 } 5860 5861 if (card->options.cq == QETH_CQ_ENABLED) 5862 qeth_cq_poll(card); 5863 5864 if (budget) { 5865 struct qeth_rx *ctx = &card->rx; 5866 5867 /* Process any substantial refill backlog: */ 5868 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 5869 5870 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 5871 if (work_done >= budget) 5872 return work_done; 5873 } 5874 5875 if (napi_complete_done(napi, work_done) && 5876 qdio_start_irq(CARD_DDEV(card))) 5877 napi_schedule(napi); 5878 5879 return work_done; 5880 } 5881 EXPORT_SYMBOL_GPL(qeth_poll); 5882 5883 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5884 unsigned int bidx, unsigned int qdio_error, 5885 int budget) 5886 { 5887 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5888 u8 sflags = buffer->buffer->element[15].sflags; 5889 struct qeth_card *card = queue->card; 5890 bool error = !!qdio_error; 5891 5892 if (qdio_error == QDIO_ERROR_SLSB_PENDING) { 5893 struct qaob *aob = buffer->aob; 5894 struct qeth_qaob_priv1 *priv; 5895 enum iucv_tx_notify notify; 5896 5897 if (!aob) { 5898 netdev_WARN_ONCE(card->dev, 5899 "Pending TX buffer %#x without QAOB on TX queue %u\n", 5900 bidx, queue->queue_no); 5901 qeth_schedule_recovery(card); 5902 return; 5903 } 5904 5905 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5906 5907 priv = (struct qeth_qaob_priv1 *)&aob->user1; 5908 /* QAOB hasn't completed yet: */ 5909 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) { 5910 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5911 5912 /* Prepare the queue slot for immediate re-use: */ 5913 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5914 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) { 5915 QETH_CARD_TEXT(card, 2, "outofbuf"); 5916 qeth_schedule_recovery(card); 5917 } 5918 5919 list_add(&buffer->list_entry, &queue->pending_bufs); 5920 /* Skip clearing the buffer: */ 5921 return; 5922 } 5923 5924 /* QAOB already completed: */ 5925 notify = qeth_compute_cq_notification(aob->aorc, 0); 5926 qeth_notify_skbs(queue, buffer, notify); 5927 error = !!aob->aorc; 5928 memset(aob, 0, sizeof(*aob)); 5929 } else if (card->options.cq == QETH_CQ_ENABLED) { 5930 qeth_notify_skbs(queue, buffer, 5931 qeth_compute_cq_notification(sflags, 0)); 5932 } 5933 5934 qeth_clear_output_buffer(queue, buffer, error, budget); 5935 } 5936 5937 static int qeth_tx_poll(struct napi_struct *napi, int budget) 5938 { 5939 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 5940 unsigned int queue_no = queue->queue_no; 5941 struct qeth_card *card = queue->card; 5942 struct net_device *dev = card->dev; 5943 unsigned int work_done = 0; 5944 struct netdev_queue *txq; 5945 5946 if (IS_IQD(card)) 5947 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 5948 else 5949 txq = netdev_get_tx_queue(dev, queue_no); 5950 5951 while (1) { 5952 unsigned int start, error, i; 5953 unsigned int packets = 0; 5954 unsigned int bytes = 0; 5955 int completed; 5956 5957 qeth_tx_complete_pending_bufs(card, queue, false, budget); 5958 5959 if (qeth_out_queue_is_empty(queue)) { 5960 napi_complete(napi); 5961 return 0; 5962 } 5963 5964 /* Give the CPU a breather: */ 5965 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 5966 QETH_TXQ_STAT_INC(queue, completion_yield); 5967 if (napi_complete_done(napi, 0)) 5968 napi_schedule(napi); 5969 return 0; 5970 } 5971 5972 completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no, 5973 &start, &error); 5974 if (completed <= 0) { 5975 /* Ensure we see TX completion for pending work: */ 5976 if (napi_complete_done(napi, 0) && 5977 !atomic_read(&queue->set_pci_flags_count)) 5978 qeth_tx_arm_timer(queue, queue->rescan_usecs); 5979 return 0; 5980 } 5981 5982 for (i = start; i < start + completed; i++) { 5983 struct qeth_qdio_out_buffer *buffer; 5984 unsigned int bidx = QDIO_BUFNR(i); 5985 5986 buffer = queue->bufs[bidx]; 5987 packets += buffer->frames; 5988 bytes += buffer->bytes; 5989 5990 qeth_handle_send_error(card, buffer, error); 5991 if (IS_IQD(card)) 5992 qeth_iqd_tx_complete(queue, bidx, error, budget); 5993 else 5994 qeth_clear_output_buffer(queue, buffer, error, 5995 budget); 5996 } 5997 5998 atomic_sub(completed, &queue->used_buffers); 5999 work_done += completed; 6000 if (IS_IQD(card)) 6001 netdev_tx_completed_queue(txq, packets, bytes); 6002 else 6003 qeth_check_outbound_queue(queue); 6004 6005 /* xmit may have observed the full-condition, but not yet 6006 * stopped the txq. In which case the code below won't trigger. 6007 * So before returning, xmit will re-check the txq's fill level 6008 * and wake it up if needed. 6009 */ 6010 if (netif_tx_queue_stopped(txq) && 6011 !qeth_out_queue_is_full(queue)) 6012 netif_tx_wake_queue(txq); 6013 } 6014 } 6015 6016 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 6017 { 6018 if (!cmd->hdr.return_code) 6019 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6020 return cmd->hdr.return_code; 6021 } 6022 6023 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 6024 struct qeth_reply *reply, 6025 unsigned long data) 6026 { 6027 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6028 struct qeth_ipa_caps *caps = reply->param; 6029 6030 if (qeth_setassparms_inspect_rc(cmd)) 6031 return -EIO; 6032 6033 caps->supported = cmd->data.setassparms.data.caps.supported; 6034 caps->enabled = cmd->data.setassparms.data.caps.enabled; 6035 return 0; 6036 } 6037 6038 int qeth_setassparms_cb(struct qeth_card *card, 6039 struct qeth_reply *reply, unsigned long data) 6040 { 6041 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6042 6043 QETH_CARD_TEXT(card, 4, "defadpcb"); 6044 6045 if (cmd->hdr.return_code) 6046 return -EIO; 6047 6048 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6049 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 6050 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 6051 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 6052 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 6053 return 0; 6054 } 6055 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6056 6057 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6058 enum qeth_ipa_funcs ipa_func, 6059 u16 cmd_code, 6060 unsigned int data_length, 6061 enum qeth_prot_versions prot) 6062 { 6063 struct qeth_ipacmd_setassparms *setassparms; 6064 struct qeth_ipacmd_setassparms_hdr *hdr; 6065 struct qeth_cmd_buffer *iob; 6066 6067 QETH_CARD_TEXT(card, 4, "getasscm"); 6068 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6069 data_length + 6070 offsetof(struct qeth_ipacmd_setassparms, 6071 data)); 6072 if (!iob) 6073 return NULL; 6074 6075 setassparms = &__ipa_cmd(iob)->data.setassparms; 6076 setassparms->assist_no = ipa_func; 6077 6078 hdr = &setassparms->hdr; 6079 hdr->length = sizeof(*hdr) + data_length; 6080 hdr->command_code = cmd_code; 6081 return iob; 6082 } 6083 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6084 6085 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6086 enum qeth_ipa_funcs ipa_func, 6087 u16 cmd_code, u32 *data, 6088 enum qeth_prot_versions prot) 6089 { 6090 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6091 struct qeth_cmd_buffer *iob; 6092 6093 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6094 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6095 if (!iob) 6096 return -ENOMEM; 6097 6098 if (data) 6099 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6100 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6101 } 6102 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6103 6104 static void qeth_unregister_dbf_views(void) 6105 { 6106 int x; 6107 6108 for (x = 0; x < QETH_DBF_INFOS; x++) { 6109 debug_unregister(qeth_dbf[x].id); 6110 qeth_dbf[x].id = NULL; 6111 } 6112 } 6113 6114 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6115 { 6116 char dbf_txt_buf[32]; 6117 va_list args; 6118 6119 if (!debug_level_enabled(id, level)) 6120 return; 6121 va_start(args, fmt); 6122 vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6123 va_end(args); 6124 debug_text_event(id, level, dbf_txt_buf); 6125 } 6126 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6127 6128 static int qeth_register_dbf_views(void) 6129 { 6130 int ret; 6131 int x; 6132 6133 for (x = 0; x < QETH_DBF_INFOS; x++) { 6134 /* register the areas */ 6135 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6136 qeth_dbf[x].pages, 6137 qeth_dbf[x].areas, 6138 qeth_dbf[x].len); 6139 if (qeth_dbf[x].id == NULL) { 6140 qeth_unregister_dbf_views(); 6141 return -ENOMEM; 6142 } 6143 6144 /* register a view */ 6145 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6146 if (ret) { 6147 qeth_unregister_dbf_views(); 6148 return ret; 6149 } 6150 6151 /* set a passing level */ 6152 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6153 } 6154 6155 return 0; 6156 } 6157 6158 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6159 6160 int qeth_setup_discipline(struct qeth_card *card, 6161 enum qeth_discipline_id discipline) 6162 { 6163 int rc; 6164 6165 mutex_lock(&qeth_mod_mutex); 6166 switch (discipline) { 6167 case QETH_DISCIPLINE_LAYER3: 6168 card->discipline = try_then_request_module( 6169 symbol_get(qeth_l3_discipline), "qeth_l3"); 6170 break; 6171 case QETH_DISCIPLINE_LAYER2: 6172 card->discipline = try_then_request_module( 6173 symbol_get(qeth_l2_discipline), "qeth_l2"); 6174 break; 6175 default: 6176 break; 6177 } 6178 mutex_unlock(&qeth_mod_mutex); 6179 6180 if (!card->discipline) { 6181 dev_err(&card->gdev->dev, "There is no kernel module to " 6182 "support discipline %d\n", discipline); 6183 return -EINVAL; 6184 } 6185 6186 rc = card->discipline->setup(card->gdev); 6187 if (rc) { 6188 if (discipline == QETH_DISCIPLINE_LAYER2) 6189 symbol_put(qeth_l2_discipline); 6190 else 6191 symbol_put(qeth_l3_discipline); 6192 card->discipline = NULL; 6193 6194 return rc; 6195 } 6196 6197 card->options.layer = discipline; 6198 return 0; 6199 } 6200 6201 void qeth_remove_discipline(struct qeth_card *card) 6202 { 6203 card->discipline->remove(card->gdev); 6204 6205 if (IS_LAYER2(card)) 6206 symbol_put(qeth_l2_discipline); 6207 else 6208 symbol_put(qeth_l3_discipline); 6209 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6210 card->discipline = NULL; 6211 } 6212 6213 static const struct device_type qeth_generic_devtype = { 6214 .name = "qeth_generic", 6215 }; 6216 6217 #define DBF_NAME_LEN 20 6218 6219 struct qeth_dbf_entry { 6220 char dbf_name[DBF_NAME_LEN]; 6221 debug_info_t *dbf_info; 6222 struct list_head dbf_list; 6223 }; 6224 6225 static LIST_HEAD(qeth_dbf_list); 6226 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6227 6228 static debug_info_t *qeth_get_dbf_entry(char *name) 6229 { 6230 struct qeth_dbf_entry *entry; 6231 debug_info_t *rc = NULL; 6232 6233 mutex_lock(&qeth_dbf_list_mutex); 6234 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6235 if (strcmp(entry->dbf_name, name) == 0) { 6236 rc = entry->dbf_info; 6237 break; 6238 } 6239 } 6240 mutex_unlock(&qeth_dbf_list_mutex); 6241 return rc; 6242 } 6243 6244 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6245 { 6246 struct qeth_dbf_entry *new_entry; 6247 6248 card->debug = debug_register(name, 2, 1, 8); 6249 if (!card->debug) { 6250 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6251 goto err; 6252 } 6253 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6254 goto err_dbg; 6255 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6256 if (!new_entry) 6257 goto err_dbg; 6258 strscpy(new_entry->dbf_name, name, sizeof(new_entry->dbf_name)); 6259 new_entry->dbf_info = card->debug; 6260 mutex_lock(&qeth_dbf_list_mutex); 6261 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6262 mutex_unlock(&qeth_dbf_list_mutex); 6263 6264 return 0; 6265 6266 err_dbg: 6267 debug_unregister(card->debug); 6268 err: 6269 return -ENOMEM; 6270 } 6271 6272 static void qeth_clear_dbf_list(void) 6273 { 6274 struct qeth_dbf_entry *entry, *tmp; 6275 6276 mutex_lock(&qeth_dbf_list_mutex); 6277 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6278 list_del(&entry->dbf_list); 6279 debug_unregister(entry->dbf_info); 6280 kfree(entry); 6281 } 6282 mutex_unlock(&qeth_dbf_list_mutex); 6283 } 6284 6285 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6286 { 6287 struct net_device *dev; 6288 struct qeth_priv *priv; 6289 6290 switch (card->info.type) { 6291 case QETH_CARD_TYPE_IQD: 6292 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6293 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6294 break; 6295 case QETH_CARD_TYPE_OSM: 6296 dev = alloc_etherdev(sizeof(*priv)); 6297 break; 6298 default: 6299 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6300 } 6301 6302 if (!dev) 6303 return NULL; 6304 6305 priv = netdev_priv(dev); 6306 priv->rx_copybreak = QETH_RX_COPYBREAK; 6307 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6308 6309 dev->ml_priv = card; 6310 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6311 dev->min_mtu = 576; 6312 /* initialized when device first goes online: */ 6313 dev->max_mtu = 0; 6314 dev->mtu = 0; 6315 SET_NETDEV_DEV(dev, &card->gdev->dev); 6316 netif_carrier_off(dev); 6317 6318 dev->ethtool_ops = &qeth_ethtool_ops; 6319 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6320 dev->hw_features |= NETIF_F_SG; 6321 dev->vlan_features |= NETIF_F_SG; 6322 if (IS_IQD(card)) 6323 dev->features |= NETIF_F_SG; 6324 6325 return dev; 6326 } 6327 6328 struct net_device *qeth_clone_netdev(struct net_device *orig) 6329 { 6330 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6331 6332 if (!clone) 6333 return NULL; 6334 6335 clone->dev_port = orig->dev_port; 6336 return clone; 6337 } 6338 6339 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6340 { 6341 struct qeth_card *card; 6342 struct device *dev; 6343 int rc; 6344 enum qeth_discipline_id enforced_disc; 6345 char dbf_name[DBF_NAME_LEN]; 6346 6347 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6348 6349 dev = &gdev->dev; 6350 if (!get_device(dev)) 6351 return -ENODEV; 6352 6353 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6354 6355 card = qeth_alloc_card(gdev); 6356 if (!card) { 6357 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6358 rc = -ENOMEM; 6359 goto err_dev; 6360 } 6361 6362 scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6363 dev_name(&gdev->dev)); 6364 card->debug = qeth_get_dbf_entry(dbf_name); 6365 if (!card->debug) { 6366 rc = qeth_add_dbf_entry(card, dbf_name); 6367 if (rc) 6368 goto err_card; 6369 } 6370 6371 qeth_setup_card(card); 6372 card->dev = qeth_alloc_netdev(card); 6373 if (!card->dev) { 6374 rc = -ENOMEM; 6375 goto err_card; 6376 } 6377 6378 qeth_determine_capabilities(card); 6379 qeth_set_blkt_defaults(card); 6380 6381 card->qdio.in_q = qeth_alloc_qdio_queue(); 6382 if (!card->qdio.in_q) { 6383 rc = -ENOMEM; 6384 goto err_rx_queue; 6385 } 6386 6387 card->qdio.no_out_queues = card->dev->num_tx_queues; 6388 rc = qeth_update_from_chp_desc(card); 6389 if (rc) 6390 goto err_chp_desc; 6391 6392 gdev->dev.groups = qeth_dev_groups; 6393 6394 enforced_disc = qeth_enforce_discipline(card); 6395 switch (enforced_disc) { 6396 case QETH_DISCIPLINE_UNDETERMINED: 6397 gdev->dev.type = &qeth_generic_devtype; 6398 break; 6399 default: 6400 card->info.layer_enforced = true; 6401 /* It's so early that we don't need the discipline_mutex yet. */ 6402 rc = qeth_setup_discipline(card, enforced_disc); 6403 if (rc) 6404 goto err_setup_disc; 6405 6406 break; 6407 } 6408 6409 return 0; 6410 6411 err_setup_disc: 6412 err_chp_desc: 6413 qeth_free_qdio_queue(card->qdio.in_q); 6414 err_rx_queue: 6415 free_netdev(card->dev); 6416 err_card: 6417 qeth_core_free_card(card); 6418 err_dev: 6419 put_device(dev); 6420 return rc; 6421 } 6422 6423 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6424 { 6425 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6426 6427 QETH_CARD_TEXT(card, 2, "removedv"); 6428 6429 mutex_lock(&card->discipline_mutex); 6430 if (card->discipline) 6431 qeth_remove_discipline(card); 6432 mutex_unlock(&card->discipline_mutex); 6433 6434 qeth_free_qdio_queues(card); 6435 6436 qeth_free_qdio_queue(card->qdio.in_q); 6437 free_netdev(card->dev); 6438 qeth_core_free_card(card); 6439 put_device(&gdev->dev); 6440 } 6441 6442 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6443 { 6444 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6445 int rc = 0; 6446 enum qeth_discipline_id def_discipline; 6447 6448 mutex_lock(&card->discipline_mutex); 6449 if (!card->discipline) { 6450 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6451 QETH_DISCIPLINE_LAYER2; 6452 rc = qeth_setup_discipline(card, def_discipline); 6453 if (rc) 6454 goto err; 6455 } 6456 6457 rc = qeth_set_online(card, card->discipline); 6458 6459 err: 6460 mutex_unlock(&card->discipline_mutex); 6461 return rc; 6462 } 6463 6464 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6465 { 6466 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6467 int rc; 6468 6469 mutex_lock(&card->discipline_mutex); 6470 rc = qeth_set_offline(card, card->discipline, false); 6471 mutex_unlock(&card->discipline_mutex); 6472 6473 return rc; 6474 } 6475 6476 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6477 { 6478 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6479 6480 qeth_set_allowed_threads(card, 0, 1); 6481 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6482 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6483 qeth_qdio_clear_card(card, 0); 6484 qeth_drain_output_queues(card); 6485 qdio_free(CARD_DDEV(card)); 6486 } 6487 6488 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6489 size_t count) 6490 { 6491 int err; 6492 6493 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6494 buf); 6495 6496 return err ? err : count; 6497 } 6498 static DRIVER_ATTR_WO(group); 6499 6500 static struct attribute *qeth_drv_attrs[] = { 6501 &driver_attr_group.attr, 6502 NULL, 6503 }; 6504 static struct attribute_group qeth_drv_attr_group = { 6505 .attrs = qeth_drv_attrs, 6506 }; 6507 static const struct attribute_group *qeth_drv_attr_groups[] = { 6508 &qeth_drv_attr_group, 6509 NULL, 6510 }; 6511 6512 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6513 .driver = { 6514 .groups = qeth_drv_attr_groups, 6515 .owner = THIS_MODULE, 6516 .name = "qeth", 6517 }, 6518 .ccw_driver = &qeth_ccw_driver, 6519 .setup = qeth_core_probe_device, 6520 .remove = qeth_core_remove_device, 6521 .set_online = qeth_core_set_online, 6522 .set_offline = qeth_core_set_offline, 6523 .shutdown = qeth_core_shutdown, 6524 }; 6525 6526 int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) 6527 { 6528 struct qeth_card *card = dev->ml_priv; 6529 int rc = 0; 6530 6531 switch (cmd) { 6532 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6533 rc = qeth_snmp_command(card, data); 6534 break; 6535 case SIOC_QETH_GET_CARD_TYPE: 6536 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6537 !IS_VM_NIC(card)) 6538 return 1; 6539 return 0; 6540 case SIOC_QETH_QUERY_OAT: 6541 rc = qeth_query_oat_command(card, data); 6542 break; 6543 default: 6544 rc = -EOPNOTSUPP; 6545 } 6546 if (rc) 6547 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6548 return rc; 6549 } 6550 EXPORT_SYMBOL_GPL(qeth_siocdevprivate); 6551 6552 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6553 { 6554 struct qeth_card *card = dev->ml_priv; 6555 struct mii_ioctl_data *mii_data; 6556 int rc = 0; 6557 6558 switch (cmd) { 6559 case SIOCGMIIPHY: 6560 mii_data = if_mii(rq); 6561 mii_data->phy_id = 0; 6562 break; 6563 case SIOCGMIIREG: 6564 mii_data = if_mii(rq); 6565 if (mii_data->phy_id != 0) 6566 rc = -EINVAL; 6567 else 6568 mii_data->val_out = qeth_mdio_read(dev, 6569 mii_data->phy_id, mii_data->reg_num); 6570 break; 6571 default: 6572 return -EOPNOTSUPP; 6573 } 6574 if (rc) 6575 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6576 return rc; 6577 } 6578 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6579 6580 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6581 unsigned long data) 6582 { 6583 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6584 u32 *features = reply->param; 6585 6586 if (qeth_setassparms_inspect_rc(cmd)) 6587 return -EIO; 6588 6589 *features = cmd->data.setassparms.data.flags_32bit; 6590 return 0; 6591 } 6592 6593 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6594 enum qeth_prot_versions prot) 6595 { 6596 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6597 NULL, prot); 6598 } 6599 6600 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6601 enum qeth_prot_versions prot, u8 *lp2lp) 6602 { 6603 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6604 struct qeth_cmd_buffer *iob; 6605 struct qeth_ipa_caps caps; 6606 u32 features; 6607 int rc; 6608 6609 /* some L3 HW requires combined L3+L4 csum offload: */ 6610 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6611 cstype == IPA_OUTBOUND_CHECKSUM) 6612 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6613 6614 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6615 prot); 6616 if (!iob) 6617 return -ENOMEM; 6618 6619 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6620 if (rc) 6621 return rc; 6622 6623 if ((required_features & features) != required_features) { 6624 qeth_set_csum_off(card, cstype, prot); 6625 return -EOPNOTSUPP; 6626 } 6627 6628 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6629 SETASS_DATA_SIZEOF(flags_32bit), 6630 prot); 6631 if (!iob) { 6632 qeth_set_csum_off(card, cstype, prot); 6633 return -ENOMEM; 6634 } 6635 6636 if (features & QETH_IPA_CHECKSUM_LP2LP) 6637 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6638 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6639 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6640 if (rc) { 6641 qeth_set_csum_off(card, cstype, prot); 6642 return rc; 6643 } 6644 6645 if (!qeth_ipa_caps_supported(&caps, required_features) || 6646 !qeth_ipa_caps_enabled(&caps, required_features)) { 6647 qeth_set_csum_off(card, cstype, prot); 6648 return -EOPNOTSUPP; 6649 } 6650 6651 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6652 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6653 6654 if (lp2lp) 6655 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6656 6657 return 0; 6658 } 6659 6660 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6661 enum qeth_prot_versions prot, u8 *lp2lp) 6662 { 6663 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6664 qeth_set_csum_off(card, cstype, prot); 6665 } 6666 6667 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6668 unsigned long data) 6669 { 6670 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6671 struct qeth_tso_start_data *tso_data = reply->param; 6672 6673 if (qeth_setassparms_inspect_rc(cmd)) 6674 return -EIO; 6675 6676 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6677 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6678 return 0; 6679 } 6680 6681 static int qeth_set_tso_off(struct qeth_card *card, 6682 enum qeth_prot_versions prot) 6683 { 6684 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6685 IPA_CMD_ASS_STOP, NULL, prot); 6686 } 6687 6688 static int qeth_set_tso_on(struct qeth_card *card, 6689 enum qeth_prot_versions prot) 6690 { 6691 struct qeth_tso_start_data tso_data; 6692 struct qeth_cmd_buffer *iob; 6693 struct qeth_ipa_caps caps; 6694 int rc; 6695 6696 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6697 IPA_CMD_ASS_START, 0, prot); 6698 if (!iob) 6699 return -ENOMEM; 6700 6701 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6702 if (rc) 6703 return rc; 6704 6705 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6706 qeth_set_tso_off(card, prot); 6707 return -EOPNOTSUPP; 6708 } 6709 6710 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6711 IPA_CMD_ASS_ENABLE, 6712 SETASS_DATA_SIZEOF(caps), prot); 6713 if (!iob) { 6714 qeth_set_tso_off(card, prot); 6715 return -ENOMEM; 6716 } 6717 6718 /* enable TSO capability */ 6719 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6720 QETH_IPA_LARGE_SEND_TCP; 6721 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6722 if (rc) { 6723 qeth_set_tso_off(card, prot); 6724 return rc; 6725 } 6726 6727 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6728 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6729 qeth_set_tso_off(card, prot); 6730 return -EOPNOTSUPP; 6731 } 6732 6733 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6734 tso_data.mss); 6735 return 0; 6736 } 6737 6738 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6739 enum qeth_prot_versions prot) 6740 { 6741 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6742 } 6743 6744 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6745 { 6746 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6747 int rc_ipv6; 6748 6749 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6750 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6751 QETH_PROT_IPV4, NULL); 6752 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6753 /* no/one Offload Assist available, so the rc is trivial */ 6754 return rc_ipv4; 6755 6756 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6757 QETH_PROT_IPV6, NULL); 6758 6759 if (on) 6760 /* enable: success if any Assist is active */ 6761 return (rc_ipv6) ? rc_ipv4 : 0; 6762 6763 /* disable: failure if any Assist is still active */ 6764 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6765 } 6766 6767 /** 6768 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6769 * @dev: a net_device 6770 */ 6771 void qeth_enable_hw_features(struct net_device *dev) 6772 { 6773 struct qeth_card *card = dev->ml_priv; 6774 netdev_features_t features; 6775 6776 features = dev->features; 6777 /* force-off any feature that might need an IPA sequence. 6778 * netdev_update_features() will restart them. 6779 */ 6780 dev->features &= ~dev->hw_features; 6781 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6782 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6783 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6784 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6785 } 6786 netdev_update_features(dev); 6787 if (features != dev->features) 6788 dev_warn(&card->gdev->dev, 6789 "Device recovery failed to restore all offload features\n"); 6790 } 6791 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6792 6793 static void qeth_check_restricted_features(struct qeth_card *card, 6794 netdev_features_t changed, 6795 netdev_features_t actual) 6796 { 6797 netdev_features_t ipv6_features = NETIF_F_TSO6; 6798 netdev_features_t ipv4_features = NETIF_F_TSO; 6799 6800 if (!card->info.has_lp2lp_cso_v6) 6801 ipv6_features |= NETIF_F_IPV6_CSUM; 6802 if (!card->info.has_lp2lp_cso_v4) 6803 ipv4_features |= NETIF_F_IP_CSUM; 6804 6805 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6806 qeth_flush_local_addrs6(card); 6807 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6808 qeth_flush_local_addrs4(card); 6809 } 6810 6811 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6812 { 6813 struct qeth_card *card = dev->ml_priv; 6814 netdev_features_t changed = dev->features ^ features; 6815 int rc = 0; 6816 6817 QETH_CARD_TEXT(card, 2, "setfeat"); 6818 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6819 6820 if ((changed & NETIF_F_IP_CSUM)) { 6821 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6822 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 6823 &card->info.has_lp2lp_cso_v4); 6824 if (rc) 6825 changed ^= NETIF_F_IP_CSUM; 6826 } 6827 if (changed & NETIF_F_IPV6_CSUM) { 6828 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6829 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 6830 &card->info.has_lp2lp_cso_v6); 6831 if (rc) 6832 changed ^= NETIF_F_IPV6_CSUM; 6833 } 6834 if (changed & NETIF_F_RXCSUM) { 6835 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6836 if (rc) 6837 changed ^= NETIF_F_RXCSUM; 6838 } 6839 if (changed & NETIF_F_TSO) { 6840 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6841 QETH_PROT_IPV4); 6842 if (rc) 6843 changed ^= NETIF_F_TSO; 6844 } 6845 if (changed & NETIF_F_TSO6) { 6846 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6847 QETH_PROT_IPV6); 6848 if (rc) 6849 changed ^= NETIF_F_TSO6; 6850 } 6851 6852 qeth_check_restricted_features(card, dev->features ^ features, 6853 dev->features ^ changed); 6854 6855 /* everything changed successfully? */ 6856 if ((dev->features ^ features) == changed) 6857 return 0; 6858 /* something went wrong. save changed features and return error */ 6859 dev->features ^= changed; 6860 return -EIO; 6861 } 6862 EXPORT_SYMBOL_GPL(qeth_set_features); 6863 6864 netdev_features_t qeth_fix_features(struct net_device *dev, 6865 netdev_features_t features) 6866 { 6867 struct qeth_card *card = dev->ml_priv; 6868 6869 QETH_CARD_TEXT(card, 2, "fixfeat"); 6870 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6871 features &= ~NETIF_F_IP_CSUM; 6872 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6873 features &= ~NETIF_F_IPV6_CSUM; 6874 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6875 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6876 features &= ~NETIF_F_RXCSUM; 6877 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6878 features &= ~NETIF_F_TSO; 6879 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6880 features &= ~NETIF_F_TSO6; 6881 6882 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6883 return features; 6884 } 6885 EXPORT_SYMBOL_GPL(qeth_fix_features); 6886 6887 netdev_features_t qeth_features_check(struct sk_buff *skb, 6888 struct net_device *dev, 6889 netdev_features_t features) 6890 { 6891 struct qeth_card *card = dev->ml_priv; 6892 6893 /* Traffic with local next-hop is not eligible for some offloads: */ 6894 if (skb->ip_summed == CHECKSUM_PARTIAL && 6895 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 6896 netdev_features_t restricted = 0; 6897 6898 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 6899 restricted |= NETIF_F_ALL_TSO; 6900 6901 switch (vlan_get_protocol(skb)) { 6902 case htons(ETH_P_IP): 6903 if (!card->info.has_lp2lp_cso_v4) 6904 restricted |= NETIF_F_IP_CSUM; 6905 6906 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 6907 features &= ~restricted; 6908 break; 6909 case htons(ETH_P_IPV6): 6910 if (!card->info.has_lp2lp_cso_v6) 6911 restricted |= NETIF_F_IPV6_CSUM; 6912 6913 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 6914 features &= ~restricted; 6915 break; 6916 default: 6917 break; 6918 } 6919 } 6920 6921 /* GSO segmentation builds skbs with 6922 * a (small) linear part for the headers, and 6923 * page frags for the data. 6924 * Compared to a linear skb, the header-only part consumes an 6925 * additional buffer element. This reduces buffer utilization, and 6926 * hurts throughput. So compress small segments into one element. 6927 */ 6928 if (netif_needs_gso(skb, features)) { 6929 /* match skb_segment(): */ 6930 unsigned int doffset = skb->data - skb_mac_header(skb); 6931 unsigned int hsize = skb_shinfo(skb)->gso_size; 6932 unsigned int hroom = skb_headroom(skb); 6933 6934 /* linearize only if resulting skb allocations are order-0: */ 6935 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6936 features &= ~NETIF_F_SG; 6937 } 6938 6939 return vlan_features_check(skb, features); 6940 } 6941 EXPORT_SYMBOL_GPL(qeth_features_check); 6942 6943 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6944 { 6945 struct qeth_card *card = dev->ml_priv; 6946 struct qeth_qdio_out_q *queue; 6947 unsigned int i; 6948 6949 QETH_CARD_TEXT(card, 5, "getstat"); 6950 6951 stats->rx_packets = card->stats.rx_packets; 6952 stats->rx_bytes = card->stats.rx_bytes; 6953 stats->rx_errors = card->stats.rx_length_errors + 6954 card->stats.rx_frame_errors + 6955 card->stats.rx_fifo_errors; 6956 stats->rx_dropped = card->stats.rx_dropped_nomem + 6957 card->stats.rx_dropped_notsupp + 6958 card->stats.rx_dropped_runt; 6959 stats->multicast = card->stats.rx_multicast; 6960 stats->rx_length_errors = card->stats.rx_length_errors; 6961 stats->rx_frame_errors = card->stats.rx_frame_errors; 6962 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 6963 6964 for (i = 0; i < card->qdio.no_out_queues; i++) { 6965 queue = card->qdio.out_qs[i]; 6966 6967 stats->tx_packets += queue->stats.tx_packets; 6968 stats->tx_bytes += queue->stats.tx_bytes; 6969 stats->tx_errors += queue->stats.tx_errors; 6970 stats->tx_dropped += queue->stats.tx_dropped; 6971 } 6972 } 6973 EXPORT_SYMBOL_GPL(qeth_get_stats64); 6974 6975 #define TC_IQD_UCAST 0 6976 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 6977 unsigned int ucast_txqs) 6978 { 6979 unsigned int prio; 6980 6981 /* IQD requires mcast traffic to be placed on a dedicated queue, and 6982 * qeth_iqd_select_queue() deals with this. 6983 * For unicast traffic, we defer the queue selection to the stack. 6984 * By installing a trivial prio map that spans over only the unicast 6985 * queues, we can encourage the stack to spread the ucast traffic evenly 6986 * without selecting the mcast queue. 6987 */ 6988 6989 /* One traffic class, spanning over all active ucast queues: */ 6990 netdev_set_num_tc(dev, 1); 6991 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 6992 QETH_IQD_MIN_UCAST_TXQ); 6993 6994 /* Map all priorities to this traffic class: */ 6995 for (prio = 0; prio <= TC_BITMASK; prio++) 6996 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 6997 } 6998 6999 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 7000 { 7001 struct net_device *dev = card->dev; 7002 int rc; 7003 7004 /* Per netif_setup_tc(), adjust the mapping first: */ 7005 if (IS_IQD(card)) 7006 qeth_iqd_set_prio_tc_map(dev, count - 1); 7007 7008 rc = netif_set_real_num_tx_queues(dev, count); 7009 7010 if (rc && IS_IQD(card)) 7011 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 7012 7013 return rc; 7014 } 7015 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 7016 7017 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 7018 u8 cast_type, struct net_device *sb_dev) 7019 { 7020 u16 txq; 7021 7022 if (cast_type != RTN_UNICAST) 7023 return QETH_IQD_MCAST_TXQ; 7024 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 7025 return QETH_IQD_MIN_UCAST_TXQ; 7026 7027 txq = netdev_pick_tx(dev, skb, sb_dev); 7028 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 7029 } 7030 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 7031 7032 u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb, 7033 struct net_device *sb_dev) 7034 { 7035 struct qeth_card *card = dev->ml_priv; 7036 7037 if (qeth_uses_tx_prio_queueing(card)) 7038 return qeth_get_priority_queue(card, skb); 7039 7040 return netdev_pick_tx(dev, skb, sb_dev); 7041 } 7042 EXPORT_SYMBOL_GPL(qeth_osa_select_queue); 7043 7044 int qeth_open(struct net_device *dev) 7045 { 7046 struct qeth_card *card = dev->ml_priv; 7047 struct qeth_qdio_out_q *queue; 7048 unsigned int i; 7049 7050 QETH_CARD_TEXT(card, 4, "qethopen"); 7051 7052 card->data.state = CH_STATE_UP; 7053 netif_tx_start_all_queues(dev); 7054 7055 qeth_for_each_output_queue(card, queue, i) { 7056 netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll); 7057 napi_enable(&queue->napi); 7058 } 7059 napi_enable(&card->napi); 7060 7061 local_bh_disable(); 7062 qeth_for_each_output_queue(card, queue, i) { 7063 napi_schedule(&queue->napi); 7064 } 7065 napi_schedule(&card->napi); 7066 /* kick-start the NAPI softirq: */ 7067 local_bh_enable(); 7068 7069 return 0; 7070 } 7071 EXPORT_SYMBOL_GPL(qeth_open); 7072 7073 int qeth_stop(struct net_device *dev) 7074 { 7075 struct qeth_card *card = dev->ml_priv; 7076 struct qeth_qdio_out_q *queue; 7077 unsigned int i; 7078 7079 QETH_CARD_TEXT(card, 4, "qethstop"); 7080 7081 napi_disable(&card->napi); 7082 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7083 qdio_stop_irq(CARD_DDEV(card)); 7084 7085 /* Quiesce the NAPI instances: */ 7086 qeth_for_each_output_queue(card, queue, i) 7087 napi_disable(&queue->napi); 7088 7089 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7090 netif_tx_disable(dev); 7091 7092 qeth_for_each_output_queue(card, queue, i) { 7093 timer_delete_sync(&queue->timer); 7094 /* Queues may get re-allocated, so remove the NAPIs. */ 7095 netif_napi_del(&queue->napi); 7096 } 7097 7098 return 0; 7099 } 7100 EXPORT_SYMBOL_GPL(qeth_stop); 7101 7102 static int __init qeth_core_init(void) 7103 { 7104 int rc; 7105 7106 pr_info("loading core functions\n"); 7107 7108 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7109 7110 rc = qeth_register_dbf_views(); 7111 if (rc) 7112 goto dbf_err; 7113 qeth_core_root_dev = root_device_register("qeth"); 7114 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7115 if (rc) 7116 goto register_err; 7117 qeth_core_header_cache = 7118 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7119 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7120 0, NULL); 7121 if (!qeth_core_header_cache) { 7122 rc = -ENOMEM; 7123 goto slab_err; 7124 } 7125 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7126 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7127 if (!qeth_qdio_outbuf_cache) { 7128 rc = -ENOMEM; 7129 goto cqslab_err; 7130 } 7131 7132 qeth_qaob_cache = kmem_cache_create("qeth_qaob", 7133 sizeof(struct qaob), 7134 sizeof(struct qaob), 7135 0, NULL); 7136 if (!qeth_qaob_cache) { 7137 rc = -ENOMEM; 7138 goto qaob_err; 7139 } 7140 7141 rc = ccw_driver_register(&qeth_ccw_driver); 7142 if (rc) 7143 goto ccw_err; 7144 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7145 if (rc) 7146 goto ccwgroup_err; 7147 7148 return 0; 7149 7150 ccwgroup_err: 7151 ccw_driver_unregister(&qeth_ccw_driver); 7152 ccw_err: 7153 kmem_cache_destroy(qeth_qaob_cache); 7154 qaob_err: 7155 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7156 cqslab_err: 7157 kmem_cache_destroy(qeth_core_header_cache); 7158 slab_err: 7159 root_device_unregister(qeth_core_root_dev); 7160 register_err: 7161 qeth_unregister_dbf_views(); 7162 dbf_err: 7163 debugfs_remove_recursive(qeth_debugfs_root); 7164 pr_err("Initializing the qeth device driver failed\n"); 7165 return rc; 7166 } 7167 7168 static void __exit qeth_core_exit(void) 7169 { 7170 qeth_clear_dbf_list(); 7171 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7172 ccw_driver_unregister(&qeth_ccw_driver); 7173 kmem_cache_destroy(qeth_qaob_cache); 7174 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7175 kmem_cache_destroy(qeth_core_header_cache); 7176 root_device_unregister(qeth_core_root_dev); 7177 qeth_unregister_dbf_views(); 7178 debugfs_remove_recursive(qeth_debugfs_root); 7179 pr_info("core functions removed\n"); 7180 } 7181 7182 module_init(qeth_core_init); 7183 module_exit(qeth_core_exit); 7184 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7185 MODULE_DESCRIPTION("qeth core functions"); 7186 MODULE_LICENSE("GPL"); 7187