1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/bitops.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/etherdevice.h> 20 #include <linux/ip.h> 21 #include <linux/in.h> 22 #include <linux/ipv6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/igmp.h> 25 #include <linux/slab.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/skbuff.h> 29 30 #include <net/ip.h> 31 #include <net/arp.h> 32 #include <net/route.h> 33 #include <net/ipv6.h> 34 #include <net/ip6_route.h> 35 #include <net/ip6_fib.h> 36 #include <net/iucv/af_iucv.h> 37 #include <linux/hashtable.h> 38 39 #include "qeth_l3.h" 40 41 42 static int qeth_l3_set_offline(struct ccwgroup_device *); 43 static void qeth_l3_set_rx_mode(struct net_device *dev); 44 static int qeth_l3_register_addr_entry(struct qeth_card *, 45 struct qeth_ipaddr *); 46 static int qeth_l3_deregister_addr_entry(struct qeth_card *, 47 struct qeth_ipaddr *); 48 49 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 50 { 51 sprintf(buf, "%pI4", addr); 52 } 53 54 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 55 { 56 sprintf(buf, "%pI6", addr); 57 } 58 59 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 60 char *buf) 61 { 62 if (proto == QETH_PROT_IPV4) 63 qeth_l3_ipaddr4_to_string(addr, buf); 64 else if (proto == QETH_PROT_IPV6) 65 qeth_l3_ipaddr6_to_string(addr, buf); 66 } 67 68 static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot) 69 { 70 struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 71 72 if (addr) 73 qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot); 74 return addr; 75 } 76 77 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, 78 struct qeth_ipaddr *query) 79 { 80 u64 key = qeth_l3_ipaddr_hash(query); 81 struct qeth_ipaddr *addr; 82 83 if (query->is_multicast) { 84 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) 85 if (qeth_l3_addr_match_ip(addr, query)) 86 return addr; 87 } else { 88 hash_for_each_possible(card->ip_htable, addr, hnode, key) 89 if (qeth_l3_addr_match_ip(addr, query)) 90 return addr; 91 } 92 return NULL; 93 } 94 95 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 96 { 97 int i, j; 98 u8 octet; 99 100 for (i = 0; i < len; ++i) { 101 octet = addr[i]; 102 for (j = 7; j >= 0; --j) { 103 bits[i*8 + j] = octet & 1; 104 octet >>= 1; 105 } 106 } 107 } 108 109 static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 110 struct qeth_ipaddr *addr) 111 { 112 struct qeth_ipato_entry *ipatoe; 113 u8 addr_bits[128] = {0, }; 114 u8 ipatoe_bits[128] = {0, }; 115 int rc = 0; 116 117 if (!card->ipato.enabled) 118 return false; 119 if (addr->type != QETH_IP_TYPE_NORMAL) 120 return false; 121 122 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 123 (addr->proto == QETH_PROT_IPV4)? 4:16); 124 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 125 if (addr->proto != ipatoe->proto) 126 continue; 127 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, 128 (ipatoe->proto == QETH_PROT_IPV4) ? 129 4 : 16); 130 if (addr->proto == QETH_PROT_IPV4) 131 rc = !memcmp(addr_bits, ipatoe_bits, 132 min(32, ipatoe->mask_bits)); 133 else 134 rc = !memcmp(addr_bits, ipatoe_bits, 135 min(128, ipatoe->mask_bits)); 136 if (rc) 137 break; 138 } 139 /* invert? */ 140 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) 141 rc = !rc; 142 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) 143 rc = !rc; 144 145 return rc; 146 } 147 148 static int qeth_l3_delete_ip(struct qeth_card *card, 149 struct qeth_ipaddr *tmp_addr) 150 { 151 int rc = 0; 152 struct qeth_ipaddr *addr; 153 154 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 155 QETH_CARD_TEXT(card, 2, "delrxip"); 156 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 157 QETH_CARD_TEXT(card, 2, "delvipa"); 158 else 159 QETH_CARD_TEXT(card, 2, "delip"); 160 161 if (tmp_addr->proto == QETH_PROT_IPV4) 162 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 163 else { 164 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 165 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 166 } 167 168 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 169 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) 170 return -ENOENT; 171 172 addr->ref_counter--; 173 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) 174 return rc; 175 if (addr->in_progress) 176 return -EINPROGRESS; 177 178 if (qeth_card_hw_is_reachable(card)) 179 rc = qeth_l3_deregister_addr_entry(card, addr); 180 181 hash_del(&addr->hnode); 182 kfree(addr); 183 184 return rc; 185 } 186 187 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 188 { 189 int rc = 0; 190 struct qeth_ipaddr *addr; 191 char buf[40]; 192 193 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 194 QETH_CARD_TEXT(card, 2, "addrxip"); 195 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 196 QETH_CARD_TEXT(card, 2, "addvipa"); 197 else 198 QETH_CARD_TEXT(card, 2, "addip"); 199 200 if (tmp_addr->proto == QETH_PROT_IPV4) 201 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 202 else { 203 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 204 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 205 } 206 207 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 208 if (addr) { 209 if (tmp_addr->type != QETH_IP_TYPE_NORMAL) 210 return -EADDRINUSE; 211 if (qeth_l3_addr_match_all(addr, tmp_addr)) { 212 addr->ref_counter++; 213 return 0; 214 } 215 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, 216 buf); 217 dev_warn(&card->gdev->dev, 218 "Registering IP address %s failed\n", buf); 219 return -EADDRINUSE; 220 } else { 221 addr = qeth_l3_get_addr_buffer(tmp_addr->proto); 222 if (!addr) 223 return -ENOMEM; 224 225 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 226 addr->ref_counter = 1; 227 228 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { 229 QETH_CARD_TEXT(card, 2, "tkovaddr"); 230 addr->ipato = 1; 231 } 232 hash_add(card->ip_htable, &addr->hnode, 233 qeth_l3_ipaddr_hash(addr)); 234 235 if (!qeth_card_hw_is_reachable(card)) { 236 addr->disp_flag = QETH_DISP_ADDR_ADD; 237 return 0; 238 } 239 240 /* qeth_l3_register_addr_entry can go to sleep 241 * if we add a IPV4 addr. It is caused by the reason 242 * that SETIP ipa cmd starts ARP staff for IPV4 addr. 243 * Thus we should unlock spinlock, and make a protection 244 * using in_progress variable to indicate that there is 245 * an hardware operation with this IPV4 address 246 */ 247 if (addr->proto == QETH_PROT_IPV4) { 248 addr->in_progress = 1; 249 mutex_unlock(&card->ip_lock); 250 rc = qeth_l3_register_addr_entry(card, addr); 251 mutex_lock(&card->ip_lock); 252 addr->in_progress = 0; 253 } else 254 rc = qeth_l3_register_addr_entry(card, addr); 255 256 if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { 257 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 258 if (addr->ref_counter < 1) { 259 qeth_l3_deregister_addr_entry(card, addr); 260 hash_del(&addr->hnode); 261 kfree(addr); 262 } 263 } else { 264 hash_del(&addr->hnode); 265 kfree(addr); 266 } 267 } 268 return rc; 269 } 270 271 static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr, 272 bool add) 273 { 274 int rc; 275 276 mutex_lock(&card->ip_lock); 277 rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr); 278 mutex_unlock(&card->ip_lock); 279 280 return rc; 281 } 282 283 static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) 284 { 285 struct qeth_ipaddr *addr; 286 struct hlist_node *tmp; 287 int i; 288 289 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 290 hash_del(&addr->hnode); 291 kfree(addr); 292 } 293 } 294 295 static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) 296 { 297 struct qeth_ipaddr *addr; 298 struct hlist_node *tmp; 299 int i; 300 301 QETH_CARD_TEXT(card, 4, "clearip"); 302 303 mutex_lock(&card->ip_lock); 304 305 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 306 if (!recover) { 307 hash_del(&addr->hnode); 308 kfree(addr); 309 continue; 310 } 311 addr->disp_flag = QETH_DISP_ADDR_ADD; 312 } 313 314 mutex_unlock(&card->ip_lock); 315 } 316 317 static void qeth_l3_recover_ip(struct qeth_card *card) 318 { 319 struct qeth_ipaddr *addr; 320 struct hlist_node *tmp; 321 int i; 322 int rc; 323 324 QETH_CARD_TEXT(card, 4, "recovrip"); 325 326 mutex_lock(&card->ip_lock); 327 328 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 329 if (addr->disp_flag == QETH_DISP_ADDR_ADD) { 330 if (addr->proto == QETH_PROT_IPV4) { 331 addr->in_progress = 1; 332 mutex_unlock(&card->ip_lock); 333 rc = qeth_l3_register_addr_entry(card, addr); 334 mutex_lock(&card->ip_lock); 335 addr->in_progress = 0; 336 } else 337 rc = qeth_l3_register_addr_entry(card, addr); 338 339 if (!rc) { 340 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 341 if (addr->ref_counter < 1) 342 qeth_l3_delete_ip(card, addr); 343 } else { 344 hash_del(&addr->hnode); 345 kfree(addr); 346 } 347 } 348 } 349 350 mutex_unlock(&card->ip_lock); 351 } 352 353 static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, 354 unsigned long data) 355 { 356 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 357 358 switch (cmd->hdr.return_code) { 359 case IPA_RC_SUCCESS: 360 return 0; 361 case IPA_RC_DUPLICATE_IP_ADDRESS: 362 return -EADDRINUSE; 363 case IPA_RC_MC_ADDR_NOT_FOUND: 364 return -ENOENT; 365 case IPA_RC_LAN_OFFLINE: 366 return -ENETDOWN; 367 default: 368 return -EIO; 369 } 370 } 371 372 static int qeth_l3_send_setdelmc(struct qeth_card *card, 373 struct qeth_ipaddr *addr, int ipacmd) 374 { 375 struct qeth_cmd_buffer *iob; 376 struct qeth_ipa_cmd *cmd; 377 378 QETH_CARD_TEXT(card, 4, "setdelmc"); 379 380 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 381 if (!iob) 382 return -ENOMEM; 383 cmd = __ipa_cmd(iob); 384 ether_addr_copy(cmd->data.setdelipm.mac, addr->mac); 385 if (addr->proto == QETH_PROT_IPV6) 386 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, 387 sizeof(struct in6_addr)); 388 else 389 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); 390 391 return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); 392 } 393 394 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) 395 { 396 int i, j; 397 for (i = 0; i < 16; i++) { 398 j = (len) - (i * 8); 399 if (j >= 8) 400 netmask[i] = 0xff; 401 else if (j > 0) 402 netmask[i] = (u8)(0xFF00 >> j); 403 else 404 netmask[i] = 0; 405 } 406 } 407 408 static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set) 409 { 410 switch (addr->type) { 411 case QETH_IP_TYPE_RXIP: 412 return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 413 case QETH_IP_TYPE_VIPA: 414 return (set) ? QETH_IPA_SETIP_VIPA_FLAG : 415 QETH_IPA_DELIP_VIPA_FLAG; 416 default: 417 return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 418 } 419 } 420 421 static int qeth_l3_send_setdelip(struct qeth_card *card, 422 struct qeth_ipaddr *addr, 423 enum qeth_ipa_cmds ipacmd) 424 { 425 struct qeth_cmd_buffer *iob; 426 struct qeth_ipa_cmd *cmd; 427 __u8 netmask[16]; 428 u32 flags; 429 430 QETH_CARD_TEXT(card, 4, "setdelip"); 431 432 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 433 if (!iob) 434 return -ENOMEM; 435 cmd = __ipa_cmd(iob); 436 437 flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP); 438 QETH_CARD_TEXT_(card, 4, "flags%02X", flags); 439 440 if (addr->proto == QETH_PROT_IPV6) { 441 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 442 sizeof(struct in6_addr)); 443 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); 444 memcpy(cmd->data.setdelip6.mask, netmask, 445 sizeof(struct in6_addr)); 446 cmd->data.setdelip6.flags = flags; 447 } else { 448 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); 449 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); 450 cmd->data.setdelip4.flags = flags; 451 } 452 453 return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); 454 } 455 456 static int qeth_l3_send_setrouting(struct qeth_card *card, 457 enum qeth_routing_types type, enum qeth_prot_versions prot) 458 { 459 int rc; 460 struct qeth_ipa_cmd *cmd; 461 struct qeth_cmd_buffer *iob; 462 463 QETH_CARD_TEXT(card, 4, "setroutg"); 464 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); 465 if (!iob) 466 return -ENOMEM; 467 cmd = __ipa_cmd(iob); 468 cmd->data.setrtg.type = (type); 469 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 470 471 return rc; 472 } 473 474 static int qeth_l3_correct_routing_type(struct qeth_card *card, 475 enum qeth_routing_types *type, enum qeth_prot_versions prot) 476 { 477 if (IS_IQD(card)) { 478 switch (*type) { 479 case NO_ROUTER: 480 case PRIMARY_CONNECTOR: 481 case SECONDARY_CONNECTOR: 482 case MULTICAST_ROUTER: 483 return 0; 484 default: 485 goto out_inval; 486 } 487 } else { 488 switch (*type) { 489 case NO_ROUTER: 490 case PRIMARY_ROUTER: 491 case SECONDARY_ROUTER: 492 return 0; 493 case MULTICAST_ROUTER: 494 if (qeth_is_ipafunc_supported(card, prot, 495 IPA_OSA_MC_ROUTER)) 496 return 0; 497 default: 498 goto out_inval; 499 } 500 } 501 out_inval: 502 *type = NO_ROUTER; 503 return -EINVAL; 504 } 505 506 int qeth_l3_setrouting_v4(struct qeth_card *card) 507 { 508 int rc; 509 510 QETH_CARD_TEXT(card, 3, "setrtg4"); 511 512 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, 513 QETH_PROT_IPV4); 514 if (rc) 515 return rc; 516 517 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 518 QETH_PROT_IPV4); 519 if (rc) { 520 card->options.route4.type = NO_ROUTER; 521 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", 522 rc, CARD_DEVID(card)); 523 } 524 return rc; 525 } 526 527 int qeth_l3_setrouting_v6(struct qeth_card *card) 528 { 529 int rc = 0; 530 531 QETH_CARD_TEXT(card, 3, "setrtg6"); 532 533 if (!qeth_is_supported(card, IPA_IPV6)) 534 return 0; 535 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, 536 QETH_PROT_IPV6); 537 if (rc) 538 return rc; 539 540 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 541 QETH_PROT_IPV6); 542 if (rc) { 543 card->options.route6.type = NO_ROUTER; 544 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", 545 rc, CARD_DEVID(card)); 546 } 547 return rc; 548 } 549 550 /* 551 * IP address takeover related functions 552 */ 553 554 /** 555 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. 556 * 557 * Caller must hold ip_lock. 558 */ 559 void qeth_l3_update_ipato(struct qeth_card *card) 560 { 561 struct qeth_ipaddr *addr; 562 unsigned int i; 563 564 hash_for_each(card->ip_htable, i, addr, hnode) { 565 if (addr->type != QETH_IP_TYPE_NORMAL) 566 continue; 567 addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr); 568 } 569 } 570 571 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 572 { 573 struct qeth_ipato_entry *ipatoe, *tmp; 574 575 mutex_lock(&card->ip_lock); 576 577 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 578 list_del(&ipatoe->entry); 579 kfree(ipatoe); 580 } 581 582 qeth_l3_update_ipato(card); 583 mutex_unlock(&card->ip_lock); 584 } 585 586 int qeth_l3_add_ipato_entry(struct qeth_card *card, 587 struct qeth_ipato_entry *new) 588 { 589 struct qeth_ipato_entry *ipatoe; 590 int rc = 0; 591 592 QETH_CARD_TEXT(card, 2, "addipato"); 593 594 mutex_lock(&card->ip_lock); 595 596 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 597 if (ipatoe->proto != new->proto) 598 continue; 599 if (!memcmp(ipatoe->addr, new->addr, 600 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && 601 (ipatoe->mask_bits == new->mask_bits)) { 602 rc = -EEXIST; 603 break; 604 } 605 } 606 607 if (!rc) { 608 list_add_tail(&new->entry, &card->ipato.entries); 609 qeth_l3_update_ipato(card); 610 } 611 612 mutex_unlock(&card->ip_lock); 613 614 return rc; 615 } 616 617 int qeth_l3_del_ipato_entry(struct qeth_card *card, 618 enum qeth_prot_versions proto, u8 *addr, 619 int mask_bits) 620 { 621 struct qeth_ipato_entry *ipatoe, *tmp; 622 int rc = -ENOENT; 623 624 QETH_CARD_TEXT(card, 2, "delipato"); 625 626 mutex_lock(&card->ip_lock); 627 628 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 629 if (ipatoe->proto != proto) 630 continue; 631 if (!memcmp(ipatoe->addr, addr, 632 (proto == QETH_PROT_IPV4)? 4:16) && 633 (ipatoe->mask_bits == mask_bits)) { 634 list_del(&ipatoe->entry); 635 qeth_l3_update_ipato(card); 636 kfree(ipatoe); 637 rc = 0; 638 } 639 } 640 641 mutex_unlock(&card->ip_lock); 642 return rc; 643 } 644 645 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, 646 enum qeth_ip_types type, 647 enum qeth_prot_versions proto) 648 { 649 struct qeth_ipaddr addr; 650 651 qeth_l3_init_ipaddr(&addr, type, proto); 652 if (proto == QETH_PROT_IPV4) 653 memcpy(&addr.u.a4.addr, ip, 4); 654 else 655 memcpy(&addr.u.a6.addr, ip, 16); 656 657 return qeth_l3_modify_ip(card, &addr, add); 658 } 659 660 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) 661 { 662 struct qeth_ipaddr addr; 663 unsigned int i; 664 665 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); 666 addr.u.a6.addr.s6_addr[0] = 0xfe; 667 addr.u.a6.addr.s6_addr[1] = 0x80; 668 for (i = 0; i < 8; i++) 669 addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; 670 671 return qeth_l3_modify_ip(card, &addr, add); 672 } 673 674 static int qeth_l3_register_addr_entry(struct qeth_card *card, 675 struct qeth_ipaddr *addr) 676 { 677 char buf[50]; 678 int rc = 0; 679 int cnt = 3; 680 681 if (card->options.sniffer) 682 return 0; 683 684 if (addr->proto == QETH_PROT_IPV4) { 685 QETH_CARD_TEXT(card, 2, "setaddr4"); 686 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 687 } else if (addr->proto == QETH_PROT_IPV6) { 688 QETH_CARD_TEXT(card, 2, "setaddr6"); 689 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 690 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 691 } else { 692 QETH_CARD_TEXT(card, 2, "setaddr?"); 693 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 694 } 695 do { 696 if (addr->is_multicast) 697 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); 698 else 699 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP); 700 if (rc) 701 QETH_CARD_TEXT(card, 2, "failed"); 702 } while ((--cnt > 0) && rc); 703 if (rc) { 704 QETH_CARD_TEXT(card, 2, "FAILED"); 705 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); 706 dev_warn(&card->gdev->dev, 707 "Registering IP address %s failed\n", buf); 708 } 709 return rc; 710 } 711 712 static int qeth_l3_deregister_addr_entry(struct qeth_card *card, 713 struct qeth_ipaddr *addr) 714 { 715 int rc = 0; 716 717 if (card->options.sniffer) 718 return 0; 719 720 if (addr->proto == QETH_PROT_IPV4) { 721 QETH_CARD_TEXT(card, 2, "deladdr4"); 722 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 723 } else if (addr->proto == QETH_PROT_IPV6) { 724 QETH_CARD_TEXT(card, 2, "deladdr6"); 725 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 726 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 727 } else { 728 QETH_CARD_TEXT(card, 2, "deladdr?"); 729 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 730 } 731 if (addr->is_multicast) 732 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); 733 else 734 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP); 735 if (rc) 736 QETH_CARD_TEXT(card, 2, "failed"); 737 738 return rc; 739 } 740 741 static int qeth_l3_setadapter_parms(struct qeth_card *card) 742 { 743 int rc = 0; 744 745 QETH_CARD_TEXT(card, 2, "setadprm"); 746 747 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { 748 rc = qeth_setadpparms_change_macaddr(card); 749 if (rc) 750 dev_warn(&card->gdev->dev, "Reading the adapter MAC" 751 " address failed\n"); 752 } 753 754 return rc; 755 } 756 757 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 758 { 759 int rc; 760 761 QETH_CARD_TEXT(card, 3, "ipaarp"); 762 763 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 764 dev_info(&card->gdev->dev, 765 "ARP processing not supported on %s!\n", 766 QETH_CARD_IFNAME(card)); 767 return 0; 768 } 769 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 770 IPA_CMD_ASS_START, 0); 771 if (rc) { 772 dev_warn(&card->gdev->dev, 773 "Starting ARP processing support for %s failed\n", 774 QETH_CARD_IFNAME(card)); 775 } 776 return rc; 777 } 778 779 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) 780 { 781 int rc; 782 783 QETH_CARD_TEXT(card, 3, "stsrcmac"); 784 785 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { 786 dev_info(&card->gdev->dev, 787 "Inbound source MAC-address not supported on %s\n", 788 QETH_CARD_IFNAME(card)); 789 return -EOPNOTSUPP; 790 } 791 792 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, 793 IPA_CMD_ASS_START, 0); 794 if (rc) 795 dev_warn(&card->gdev->dev, 796 "Starting source MAC-address support for %s failed\n", 797 QETH_CARD_IFNAME(card)); 798 return rc; 799 } 800 801 static int qeth_l3_start_ipa_vlan(struct qeth_card *card) 802 { 803 int rc = 0; 804 805 QETH_CARD_TEXT(card, 3, "strtvlan"); 806 807 if (!qeth_is_supported(card, IPA_FULL_VLAN)) { 808 dev_info(&card->gdev->dev, 809 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); 810 return -EOPNOTSUPP; 811 } 812 813 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, 814 IPA_CMD_ASS_START, 0); 815 if (rc) { 816 dev_warn(&card->gdev->dev, 817 "Starting VLAN support for %s failed\n", 818 QETH_CARD_IFNAME(card)); 819 } else { 820 dev_info(&card->gdev->dev, "VLAN enabled\n"); 821 } 822 return rc; 823 } 824 825 static int qeth_l3_start_ipa_multicast(struct qeth_card *card) 826 { 827 int rc; 828 829 QETH_CARD_TEXT(card, 3, "stmcast"); 830 831 if (!qeth_is_supported(card, IPA_MULTICASTING)) { 832 dev_info(&card->gdev->dev, 833 "Multicast not supported on %s\n", 834 QETH_CARD_IFNAME(card)); 835 return -EOPNOTSUPP; 836 } 837 838 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, 839 IPA_CMD_ASS_START, 0); 840 if (rc) { 841 dev_warn(&card->gdev->dev, 842 "Starting multicast support for %s failed\n", 843 QETH_CARD_IFNAME(card)); 844 } else { 845 dev_info(&card->gdev->dev, "Multicast enabled\n"); 846 card->dev->flags |= IFF_MULTICAST; 847 } 848 return rc; 849 } 850 851 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 852 { 853 int rc; 854 855 QETH_CARD_TEXT(card, 3, "softipv6"); 856 857 if (IS_IQD(card)) 858 goto out; 859 860 rc = qeth_send_simple_setassparms(card, IPA_IPV6, 861 IPA_CMD_ASS_START, 3); 862 if (rc) { 863 dev_err(&card->gdev->dev, 864 "Activating IPv6 support for %s failed\n", 865 QETH_CARD_IFNAME(card)); 866 return rc; 867 } 868 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, 869 IPA_CMD_ASS_START, 0); 870 if (rc) { 871 dev_err(&card->gdev->dev, 872 "Activating IPv6 support for %s failed\n", 873 QETH_CARD_IFNAME(card)); 874 return rc; 875 } 876 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, 877 IPA_CMD_ASS_START, 0); 878 if (rc) { 879 dev_warn(&card->gdev->dev, 880 "Enabling the passthrough mode for %s failed\n", 881 QETH_CARD_IFNAME(card)); 882 return rc; 883 } 884 out: 885 dev_info(&card->gdev->dev, "IPV6 enabled\n"); 886 return 0; 887 } 888 889 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) 890 { 891 QETH_CARD_TEXT(card, 3, "strtipv6"); 892 893 if (!qeth_is_supported(card, IPA_IPV6)) { 894 dev_info(&card->gdev->dev, 895 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); 896 return 0; 897 } 898 return qeth_l3_softsetup_ipv6(card); 899 } 900 901 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) 902 { 903 int rc; 904 905 QETH_CARD_TEXT(card, 3, "stbrdcst"); 906 card->info.broadcast_capable = 0; 907 if (!qeth_is_supported(card, IPA_FILTERING)) { 908 dev_info(&card->gdev->dev, 909 "Broadcast not supported on %s\n", 910 QETH_CARD_IFNAME(card)); 911 rc = -EOPNOTSUPP; 912 goto out; 913 } 914 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 915 IPA_CMD_ASS_START, 0); 916 if (rc) { 917 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " 918 "%s failed\n", QETH_CARD_IFNAME(card)); 919 goto out; 920 } 921 922 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 923 IPA_CMD_ASS_CONFIGURE, 1); 924 if (rc) { 925 dev_warn(&card->gdev->dev, 926 "Setting up broadcast filtering for %s failed\n", 927 QETH_CARD_IFNAME(card)); 928 goto out; 929 } 930 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; 931 dev_info(&card->gdev->dev, "Broadcast enabled\n"); 932 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 933 IPA_CMD_ASS_ENABLE, 1); 934 if (rc) { 935 dev_warn(&card->gdev->dev, "Setting up broadcast echo " 936 "filtering for %s failed\n", QETH_CARD_IFNAME(card)); 937 goto out; 938 } 939 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; 940 out: 941 if (card->info.broadcast_capable) 942 card->dev->flags |= IFF_BROADCAST; 943 else 944 card->dev->flags &= ~IFF_BROADCAST; 945 return rc; 946 } 947 948 static int qeth_l3_start_ipassists(struct qeth_card *card) 949 { 950 QETH_CARD_TEXT(card, 3, "strtipas"); 951 952 if (qeth_set_access_ctrl_online(card, 0)) 953 return -EIO; 954 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 955 qeth_l3_start_ipa_source_mac(card); /* go on*/ 956 qeth_l3_start_ipa_vlan(card); /* go on*/ 957 qeth_l3_start_ipa_multicast(card); /* go on*/ 958 qeth_l3_start_ipa_ipv6(card); /* go on*/ 959 qeth_l3_start_ipa_broadcast(card); /* go on*/ 960 return 0; 961 } 962 963 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 964 struct qeth_reply *reply, unsigned long data) 965 { 966 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 967 968 if (cmd->hdr.return_code) 969 return -EIO; 970 971 ether_addr_copy(card->dev->dev_addr, 972 cmd->data.create_destroy_addr.unique_id); 973 return 0; 974 } 975 976 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) 977 { 978 int rc = 0; 979 struct qeth_cmd_buffer *iob; 980 struct qeth_ipa_cmd *cmd; 981 982 QETH_CARD_TEXT(card, 2, "hsrmac"); 983 984 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 985 QETH_PROT_IPV6); 986 if (!iob) 987 return -ENOMEM; 988 cmd = __ipa_cmd(iob); 989 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 990 card->info.unique_id; 991 992 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, 993 NULL); 994 return rc; 995 } 996 997 static int qeth_l3_get_unique_id_cb(struct qeth_card *card, 998 struct qeth_reply *reply, unsigned long data) 999 { 1000 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1001 1002 if (cmd->hdr.return_code == 0) { 1003 card->info.unique_id = *((__u16 *) 1004 &cmd->data.create_destroy_addr.unique_id[6]); 1005 return 0; 1006 } 1007 1008 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1009 UNIQUE_ID_NOT_BY_CARD; 1010 dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); 1011 return -EIO; 1012 } 1013 1014 static int qeth_l3_get_unique_id(struct qeth_card *card) 1015 { 1016 int rc = 0; 1017 struct qeth_cmd_buffer *iob; 1018 struct qeth_ipa_cmd *cmd; 1019 1020 QETH_CARD_TEXT(card, 2, "guniqeid"); 1021 1022 if (!qeth_is_supported(card, IPA_IPV6)) { 1023 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1024 UNIQUE_ID_NOT_BY_CARD; 1025 return 0; 1026 } 1027 1028 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1029 QETH_PROT_IPV6); 1030 if (!iob) 1031 return -ENOMEM; 1032 cmd = __ipa_cmd(iob); 1033 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1034 card->info.unique_id; 1035 1036 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); 1037 return rc; 1038 } 1039 1040 static int 1041 qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, 1042 unsigned long data) 1043 { 1044 struct qeth_ipa_cmd *cmd; 1045 __u16 rc; 1046 1047 QETH_CARD_TEXT(card, 2, "diastrcb"); 1048 1049 cmd = (struct qeth_ipa_cmd *)data; 1050 rc = cmd->hdr.return_code; 1051 if (rc) 1052 QETH_CARD_TEXT_(card, 2, "dxter%x", rc); 1053 switch (cmd->data.diagass.action) { 1054 case QETH_DIAGS_CMD_TRACE_QUERY: 1055 break; 1056 case QETH_DIAGS_CMD_TRACE_DISABLE: 1057 switch (rc) { 1058 case 0: 1059 case IPA_RC_INVALID_SUBCMD: 1060 card->info.promisc_mode = SET_PROMISC_MODE_OFF; 1061 dev_info(&card->gdev->dev, "The HiperSockets network " 1062 "traffic analyzer is deactivated\n"); 1063 break; 1064 default: 1065 break; 1066 } 1067 break; 1068 case QETH_DIAGS_CMD_TRACE_ENABLE: 1069 switch (rc) { 1070 case 0: 1071 card->info.promisc_mode = SET_PROMISC_MODE_ON; 1072 dev_info(&card->gdev->dev, "The HiperSockets network " 1073 "traffic analyzer is activated\n"); 1074 break; 1075 case IPA_RC_HARDWARE_AUTH_ERROR: 1076 dev_warn(&card->gdev->dev, "The device is not " 1077 "authorized to run as a HiperSockets network " 1078 "traffic analyzer\n"); 1079 break; 1080 case IPA_RC_TRACE_ALREADY_ACTIVE: 1081 dev_warn(&card->gdev->dev, "A HiperSockets " 1082 "network traffic analyzer is already " 1083 "active in the HiperSockets LAN\n"); 1084 break; 1085 default: 1086 break; 1087 } 1088 break; 1089 default: 1090 QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n", 1091 cmd->data.diagass.action, CARD_DEVID(card)); 1092 } 1093 1094 return rc ? -EIO : 0; 1095 } 1096 1097 static int 1098 qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) 1099 { 1100 struct qeth_cmd_buffer *iob; 1101 struct qeth_ipa_cmd *cmd; 1102 1103 QETH_CARD_TEXT(card, 2, "diagtrac"); 1104 1105 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 1106 if (!iob) 1107 return -ENOMEM; 1108 cmd = __ipa_cmd(iob); 1109 cmd->data.diagass.subcmd_len = 16; 1110 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; 1111 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET; 1112 cmd->data.diagass.action = diags_cmd; 1113 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); 1114 } 1115 1116 static void 1117 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) 1118 { 1119 struct ip_mc_list *im4; 1120 struct qeth_ipaddr *tmp, *ipm; 1121 1122 QETH_CARD_TEXT(card, 4, "addmc"); 1123 1124 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1125 if (!tmp) 1126 return; 1127 1128 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; 1129 im4 = rcu_dereference(im4->next_rcu)) { 1130 ip_eth_mc_map(im4->multiaddr, tmp->mac); 1131 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); 1132 tmp->is_multicast = 1; 1133 1134 ipm = qeth_l3_find_addr_by_ip(card, tmp); 1135 if (ipm) { 1136 /* for mcast, by-IP match means full match */ 1137 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1138 } else { 1139 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1140 if (!ipm) 1141 continue; 1142 ether_addr_copy(ipm->mac, tmp->mac); 1143 ipm->u.a4.addr = be32_to_cpu(im4->multiaddr); 1144 ipm->is_multicast = 1; 1145 ipm->disp_flag = QETH_DISP_ADDR_ADD; 1146 hash_add(card->ip_mc_htable, 1147 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1148 } 1149 } 1150 1151 kfree(tmp); 1152 } 1153 1154 /* called with rcu_read_lock */ 1155 static void qeth_l3_add_vlan_mc(struct qeth_card *card) 1156 { 1157 struct in_device *in_dev; 1158 u16 vid; 1159 1160 QETH_CARD_TEXT(card, 4, "addmcvl"); 1161 1162 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1163 return; 1164 1165 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1166 struct net_device *netdev; 1167 1168 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), 1169 vid); 1170 if (netdev == NULL || 1171 !(netdev->flags & IFF_UP)) 1172 continue; 1173 in_dev = __in_dev_get_rcu(netdev); 1174 if (!in_dev) 1175 continue; 1176 qeth_l3_add_mc_to_hash(card, in_dev); 1177 } 1178 } 1179 1180 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) 1181 { 1182 struct in_device *in4_dev; 1183 1184 QETH_CARD_TEXT(card, 4, "chkmcv4"); 1185 1186 rcu_read_lock(); 1187 in4_dev = __in_dev_get_rcu(card->dev); 1188 if (in4_dev == NULL) 1189 goto unlock; 1190 qeth_l3_add_mc_to_hash(card, in4_dev); 1191 qeth_l3_add_vlan_mc(card); 1192 unlock: 1193 rcu_read_unlock(); 1194 } 1195 1196 static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, 1197 struct inet6_dev *in6_dev) 1198 { 1199 struct qeth_ipaddr *ipm; 1200 struct ifmcaddr6 *im6; 1201 struct qeth_ipaddr *tmp; 1202 1203 QETH_CARD_TEXT(card, 4, "addmc6"); 1204 1205 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1206 if (!tmp) 1207 return; 1208 1209 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1210 ipv6_eth_mc_map(&im6->mca_addr, tmp->mac); 1211 memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr, 1212 sizeof(struct in6_addr)); 1213 tmp->is_multicast = 1; 1214 1215 ipm = qeth_l3_find_addr_by_ip(card, tmp); 1216 if (ipm) { 1217 /* for mcast, by-IP match means full match */ 1218 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1219 continue; 1220 } 1221 1222 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1223 if (!ipm) 1224 continue; 1225 1226 ether_addr_copy(ipm->mac, tmp->mac); 1227 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, 1228 sizeof(struct in6_addr)); 1229 ipm->is_multicast = 1; 1230 ipm->disp_flag = QETH_DISP_ADDR_ADD; 1231 hash_add(card->ip_mc_htable, 1232 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1233 1234 } 1235 kfree(tmp); 1236 } 1237 1238 /* called with rcu_read_lock */ 1239 static void qeth_l3_add_vlan_mc6(struct qeth_card *card) 1240 { 1241 struct inet6_dev *in_dev; 1242 u16 vid; 1243 1244 QETH_CARD_TEXT(card, 4, "admc6vl"); 1245 1246 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1247 return; 1248 1249 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1250 struct net_device *netdev; 1251 1252 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), 1253 vid); 1254 if (netdev == NULL || 1255 !(netdev->flags & IFF_UP)) 1256 continue; 1257 in_dev = in6_dev_get(netdev); 1258 if (!in_dev) 1259 continue; 1260 read_lock_bh(&in_dev->lock); 1261 qeth_l3_add_mc6_to_hash(card, in_dev); 1262 read_unlock_bh(&in_dev->lock); 1263 in6_dev_put(in_dev); 1264 } 1265 } 1266 1267 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) 1268 { 1269 struct inet6_dev *in6_dev; 1270 1271 QETH_CARD_TEXT(card, 4, "chkmcv6"); 1272 1273 if (!qeth_is_supported(card, IPA_IPV6)) 1274 return ; 1275 in6_dev = in6_dev_get(card->dev); 1276 if (!in6_dev) 1277 return; 1278 1279 rcu_read_lock(); 1280 read_lock_bh(&in6_dev->lock); 1281 qeth_l3_add_mc6_to_hash(card, in6_dev); 1282 qeth_l3_add_vlan_mc6(card); 1283 read_unlock_bh(&in6_dev->lock); 1284 rcu_read_unlock(); 1285 in6_dev_put(in6_dev); 1286 } 1287 1288 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, 1289 __be16 proto, u16 vid) 1290 { 1291 struct qeth_card *card = dev->ml_priv; 1292 1293 set_bit(vid, card->active_vlans); 1294 return 0; 1295 } 1296 1297 static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, 1298 __be16 proto, u16 vid) 1299 { 1300 struct qeth_card *card = dev->ml_priv; 1301 1302 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 1303 1304 clear_bit(vid, card->active_vlans); 1305 qeth_l3_set_rx_mode(dev); 1306 return 0; 1307 } 1308 1309 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 1310 struct qeth_hdr *hdr) 1311 { 1312 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1313 u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 1314 ETH_P_IP; 1315 unsigned char tg_addr[ETH_ALEN]; 1316 1317 skb_reset_network_header(skb); 1318 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { 1319 case QETH_CAST_MULTICAST: 1320 if (prot == ETH_P_IP) 1321 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 1322 else 1323 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 1324 QETH_CARD_STAT_INC(card, rx_multicast); 1325 break; 1326 case QETH_CAST_BROADCAST: 1327 ether_addr_copy(tg_addr, card->dev->broadcast); 1328 QETH_CARD_STAT_INC(card, rx_multicast); 1329 break; 1330 default: 1331 if (card->options.sniffer) 1332 skb->pkt_type = PACKET_OTHERHOST; 1333 ether_addr_copy(tg_addr, card->dev->dev_addr); 1334 } 1335 1336 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 1337 card->dev->header_ops->create(skb, card->dev, prot, 1338 tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac, 1339 skb->len); 1340 else 1341 card->dev->header_ops->create(skb, card->dev, prot, 1342 tg_addr, "FAKELL", skb->len); 1343 } 1344 1345 skb->protocol = eth_type_trans(skb, card->dev); 1346 1347 /* copy VLAN tag from hdr into skb */ 1348 if (!card->options.sniffer && 1349 (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 1350 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 1351 u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 1352 hdr->hdr.l3.vlan_id : 1353 hdr->hdr.l3.next_hop.rx.vlan_id; 1354 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 1355 } 1356 1357 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); 1358 } 1359 1360 static int qeth_l3_process_inbound_buffer(struct qeth_card *card, 1361 int budget, int *done) 1362 { 1363 struct net_device *dev = card->dev; 1364 int work_done = 0; 1365 struct sk_buff *skb; 1366 struct qeth_hdr *hdr; 1367 unsigned int len; 1368 __u16 magic; 1369 1370 *done = 0; 1371 WARN_ON_ONCE(!budget); 1372 while (budget) { 1373 skb = qeth_core_get_next_skb(card, 1374 &card->qdio.in_q->bufs[card->rx.b_index], 1375 &card->rx.b_element, &card->rx.e_offset, &hdr); 1376 if (!skb) { 1377 *done = 1; 1378 break; 1379 } 1380 switch (hdr->hdr.l3.id) { 1381 case QETH_HEADER_TYPE_LAYER3: 1382 magic = *(__u16 *)skb->data; 1383 if (IS_IQD(card) && magic == ETH_P_AF_IUCV) { 1384 len = skb->len; 1385 dev_hard_header(skb, dev, ETH_P_AF_IUCV, 1386 dev->dev_addr, "FAKELL", len); 1387 skb->protocol = eth_type_trans(skb, dev); 1388 netif_receive_skb(skb); 1389 } else { 1390 qeth_l3_rebuild_skb(card, skb, hdr); 1391 len = skb->len; 1392 napi_gro_receive(&card->napi, skb); 1393 } 1394 break; 1395 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 1396 skb->protocol = eth_type_trans(skb, skb->dev); 1397 len = skb->len; 1398 netif_receive_skb(skb); 1399 break; 1400 default: 1401 dev_kfree_skb_any(skb); 1402 QETH_CARD_TEXT(card, 3, "inbunkno"); 1403 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); 1404 continue; 1405 } 1406 work_done++; 1407 budget--; 1408 QETH_CARD_STAT_INC(card, rx_packets); 1409 QETH_CARD_STAT_ADD(card, rx_bytes, len); 1410 } 1411 return work_done; 1412 } 1413 1414 static void qeth_l3_stop_card(struct qeth_card *card) 1415 { 1416 QETH_CARD_TEXT(card, 2, "stopcard"); 1417 1418 qeth_set_allowed_threads(card, 0, 1); 1419 1420 cancel_work_sync(&card->rx_mode_work); 1421 qeth_l3_drain_rx_mode_cache(card); 1422 1423 if (card->options.sniffer && 1424 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) 1425 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1426 1427 if (card->state == CARD_STATE_SOFTSETUP) { 1428 qeth_l3_clear_ip_htable(card, 1); 1429 qeth_clear_ipacmd_list(card); 1430 card->state = CARD_STATE_HARDSETUP; 1431 } 1432 if (card->state == CARD_STATE_HARDSETUP) { 1433 qeth_qdio_clear_card(card, 0); 1434 qeth_drain_output_queues(card); 1435 qeth_clear_working_pool_list(card); 1436 card->state = CARD_STATE_DOWN; 1437 } 1438 1439 qeth_clear_cmd_buffers(&card->write); 1440 flush_workqueue(card->event_wq); 1441 } 1442 1443 /* 1444 * test for and Switch promiscuous mode (on or off) 1445 * either for guestlan or HiperSocket Sniffer 1446 */ 1447 static void 1448 qeth_l3_handle_promisc_mode(struct qeth_card *card) 1449 { 1450 struct net_device *dev = card->dev; 1451 1452 if (((dev->flags & IFF_PROMISC) && 1453 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || 1454 (!(dev->flags & IFF_PROMISC) && 1455 (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) 1456 return; 1457 1458 if (IS_VM_NIC(card)) { /* Guestlan trace */ 1459 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 1460 qeth_setadp_promisc_mode(card); 1461 } else if (card->options.sniffer && /* HiperSockets trace */ 1462 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 1463 if (dev->flags & IFF_PROMISC) { 1464 QETH_CARD_TEXT(card, 3, "+promisc"); 1465 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); 1466 } else { 1467 QETH_CARD_TEXT(card, 3, "-promisc"); 1468 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1469 } 1470 } 1471 } 1472 1473 static void qeth_l3_rx_mode_work(struct work_struct *work) 1474 { 1475 struct qeth_card *card = container_of(work, struct qeth_card, 1476 rx_mode_work); 1477 struct qeth_ipaddr *addr; 1478 struct hlist_node *tmp; 1479 int i, rc; 1480 1481 QETH_CARD_TEXT(card, 3, "setmulti"); 1482 1483 if (!card->options.sniffer) { 1484 qeth_l3_add_multicast_ipv4(card); 1485 qeth_l3_add_multicast_ipv6(card); 1486 1487 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 1488 switch (addr->disp_flag) { 1489 case QETH_DISP_ADDR_DELETE: 1490 rc = qeth_l3_deregister_addr_entry(card, addr); 1491 if (!rc || rc == -ENOENT) { 1492 hash_del(&addr->hnode); 1493 kfree(addr); 1494 } 1495 break; 1496 case QETH_DISP_ADDR_ADD: 1497 rc = qeth_l3_register_addr_entry(card, addr); 1498 if (rc && rc != -ENETDOWN) { 1499 hash_del(&addr->hnode); 1500 kfree(addr); 1501 break; 1502 } 1503 addr->ref_counter = 1; 1504 /* fall through */ 1505 default: 1506 /* for next call to set_rx_mode(): */ 1507 addr->disp_flag = QETH_DISP_ADDR_DELETE; 1508 } 1509 } 1510 1511 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 1512 return; 1513 } 1514 qeth_l3_handle_promisc_mode(card); 1515 } 1516 1517 static int qeth_l3_arp_makerc(u16 rc) 1518 { 1519 switch (rc) { 1520 case IPA_RC_SUCCESS: 1521 return 0; 1522 case QETH_IPA_ARP_RC_NOTSUPP: 1523 case QETH_IPA_ARP_RC_Q_NOTSUPP: 1524 return -EOPNOTSUPP; 1525 case QETH_IPA_ARP_RC_OUT_OF_RANGE: 1526 return -EINVAL; 1527 case QETH_IPA_ARP_RC_Q_NO_DATA: 1528 return -ENOENT; 1529 default: 1530 return -EIO; 1531 } 1532 } 1533 1534 static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply, 1535 unsigned long data) 1536 { 1537 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1538 1539 qeth_setassparms_cb(card, reply, data); 1540 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1541 } 1542 1543 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 1544 { 1545 struct qeth_cmd_buffer *iob; 1546 int rc; 1547 1548 QETH_CARD_TEXT(card, 3, "arpstnoe"); 1549 1550 /* 1551 * currently GuestLAN only supports the ARP assist function 1552 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; 1553 * thus we say EOPNOTSUPP for this ARP function 1554 */ 1555 if (IS_VM_NIC(card)) 1556 return -EOPNOTSUPP; 1557 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1558 return -EOPNOTSUPP; 1559 } 1560 1561 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1562 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4, 1563 QETH_PROT_IPV4); 1564 if (!iob) 1565 return -ENOMEM; 1566 1567 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries; 1568 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1569 if (rc) 1570 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", 1571 CARD_DEVID(card), rc); 1572 return rc; 1573 } 1574 1575 static __u32 get_arp_entry_size(struct qeth_card *card, 1576 struct qeth_arp_query_data *qdata, 1577 struct qeth_arp_entrytype *type, __u8 strip_entries) 1578 { 1579 __u32 rc; 1580 __u8 is_hsi; 1581 1582 is_hsi = qdata->reply_bits == 5; 1583 if (type->ip == QETHARP_IP_ADDR_V4) { 1584 QETH_CARD_TEXT(card, 4, "arpev4"); 1585 if (strip_entries) { 1586 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : 1587 sizeof(struct qeth_arp_qi_entry7_short); 1588 } else { 1589 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : 1590 sizeof(struct qeth_arp_qi_entry7); 1591 } 1592 } else if (type->ip == QETHARP_IP_ADDR_V6) { 1593 QETH_CARD_TEXT(card, 4, "arpev6"); 1594 if (strip_entries) { 1595 rc = is_hsi ? 1596 sizeof(struct qeth_arp_qi_entry5_short_ipv6) : 1597 sizeof(struct qeth_arp_qi_entry7_short_ipv6); 1598 } else { 1599 rc = is_hsi ? 1600 sizeof(struct qeth_arp_qi_entry5_ipv6) : 1601 sizeof(struct qeth_arp_qi_entry7_ipv6); 1602 } 1603 } else { 1604 QETH_CARD_TEXT(card, 4, "arpinv"); 1605 rc = 0; 1606 } 1607 1608 return rc; 1609 } 1610 1611 static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) 1612 { 1613 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || 1614 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); 1615 } 1616 1617 static int qeth_l3_arp_query_cb(struct qeth_card *card, 1618 struct qeth_reply *reply, unsigned long data) 1619 { 1620 struct qeth_ipa_cmd *cmd; 1621 struct qeth_arp_query_data *qdata; 1622 struct qeth_arp_query_info *qinfo; 1623 int e; 1624 int entrybytes_done; 1625 int stripped_bytes; 1626 __u8 do_strip_entries; 1627 1628 QETH_CARD_TEXT(card, 3, "arpquecb"); 1629 1630 qinfo = (struct qeth_arp_query_info *) reply->param; 1631 cmd = (struct qeth_ipa_cmd *) data; 1632 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); 1633 if (cmd->hdr.return_code) { 1634 QETH_CARD_TEXT(card, 4, "arpcberr"); 1635 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1636 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1637 } 1638 if (cmd->data.setassparms.hdr.return_code) { 1639 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 1640 QETH_CARD_TEXT(card, 4, "setaperr"); 1641 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1642 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1643 } 1644 qdata = &cmd->data.setassparms.data.query_arp; 1645 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); 1646 1647 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; 1648 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; 1649 entrybytes_done = 0; 1650 for (e = 0; e < qdata->no_entries; ++e) { 1651 char *cur_entry; 1652 __u32 esize; 1653 struct qeth_arp_entrytype *etype; 1654 1655 cur_entry = &qdata->data + entrybytes_done; 1656 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; 1657 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { 1658 QETH_CARD_TEXT(card, 4, "pmis"); 1659 QETH_CARD_TEXT_(card, 4, "%i", etype->ip); 1660 break; 1661 } 1662 esize = get_arp_entry_size(card, qdata, etype, 1663 do_strip_entries); 1664 QETH_CARD_TEXT_(card, 5, "esz%i", esize); 1665 if (!esize) 1666 break; 1667 1668 if ((qinfo->udata_len - qinfo->udata_offset) < esize) { 1669 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC); 1670 memset(qinfo->udata, 0, 4); 1671 return -ENOSPC; 1672 } 1673 1674 memcpy(qinfo->udata + qinfo->udata_offset, 1675 &qdata->data + entrybytes_done + stripped_bytes, 1676 esize); 1677 entrybytes_done += esize + stripped_bytes; 1678 qinfo->udata_offset += esize; 1679 ++qinfo->no_entries; 1680 } 1681 /* check if all replies received ... */ 1682 if (cmd->data.setassparms.hdr.seq_no < 1683 cmd->data.setassparms.hdr.number_of_replies) 1684 return 1; 1685 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); 1686 memcpy(qinfo->udata, &qinfo->no_entries, 4); 1687 /* keep STRIP_ENTRIES flag so the user program can distinguish 1688 * stripped entries from normal ones */ 1689 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 1690 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 1691 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 1692 QETH_CARD_TEXT_(card, 4, "rc%i", 0); 1693 return 0; 1694 } 1695 1696 static int qeth_l3_query_arp_cache_info(struct qeth_card *card, 1697 enum qeth_prot_versions prot, 1698 struct qeth_arp_query_info *qinfo) 1699 { 1700 struct qeth_cmd_buffer *iob; 1701 struct qeth_ipa_cmd *cmd; 1702 int rc; 1703 1704 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); 1705 1706 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1707 IPA_CMD_ASS_ARP_QUERY_INFO, 1708 sizeof(struct qeth_arp_query_data) 1709 - sizeof(char), 1710 prot); 1711 if (!iob) 1712 return -ENOMEM; 1713 cmd = __ipa_cmd(iob); 1714 cmd->data.setassparms.data.query_arp.request_bits = 0x000F; 1715 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo); 1716 if (rc) 1717 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", 1718 CARD_DEVID(card), rc); 1719 return rc; 1720 } 1721 1722 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 1723 { 1724 struct qeth_arp_query_info qinfo = {0, }; 1725 int rc; 1726 1727 QETH_CARD_TEXT(card, 3, "arpquery"); 1728 1729 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 1730 IPA_ARP_PROCESSING)) { 1731 QETH_CARD_TEXT(card, 3, "arpqnsup"); 1732 rc = -EOPNOTSUPP; 1733 goto out; 1734 } 1735 /* get size of userspace buffer and mask_bits -> 6 bytes */ 1736 if (copy_from_user(&qinfo, udata, 6)) { 1737 rc = -EFAULT; 1738 goto out; 1739 } 1740 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 1741 if (!qinfo.udata) { 1742 rc = -ENOMEM; 1743 goto out; 1744 } 1745 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 1746 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); 1747 if (rc) { 1748 if (copy_to_user(udata, qinfo.udata, 4)) 1749 rc = -EFAULT; 1750 goto free_and_out; 1751 } 1752 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { 1753 /* fails in case of GuestLAN QDIO mode */ 1754 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); 1755 } 1756 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { 1757 QETH_CARD_TEXT(card, 4, "qactf"); 1758 rc = -EFAULT; 1759 goto free_and_out; 1760 } 1761 QETH_CARD_TEXT(card, 4, "qacts"); 1762 1763 free_and_out: 1764 kfree(qinfo.udata); 1765 out: 1766 return rc; 1767 } 1768 1769 static int qeth_l3_arp_modify_entry(struct qeth_card *card, 1770 struct qeth_arp_cache_entry *entry, 1771 enum qeth_arp_process_subcmds arp_cmd) 1772 { 1773 struct qeth_arp_cache_entry *cmd_entry; 1774 struct qeth_cmd_buffer *iob; 1775 int rc; 1776 1777 if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY) 1778 QETH_CARD_TEXT(card, 3, "arpadd"); 1779 else 1780 QETH_CARD_TEXT(card, 3, "arpdel"); 1781 1782 /* 1783 * currently GuestLAN only supports the ARP assist function 1784 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; 1785 * thus we say EOPNOTSUPP for this ARP function 1786 */ 1787 if (IS_VM_NIC(card)) 1788 return -EOPNOTSUPP; 1789 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1790 return -EOPNOTSUPP; 1791 } 1792 1793 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd, 1794 sizeof(*cmd_entry), QETH_PROT_IPV4); 1795 if (!iob) 1796 return -ENOMEM; 1797 1798 cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; 1799 ether_addr_copy(cmd_entry->macaddr, entry->macaddr); 1800 memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); 1801 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1802 if (rc) 1803 QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", 1804 arp_cmd, CARD_DEVID(card), rc); 1805 return rc; 1806 } 1807 1808 static int qeth_l3_arp_flush_cache(struct qeth_card *card) 1809 { 1810 struct qeth_cmd_buffer *iob; 1811 int rc; 1812 1813 QETH_CARD_TEXT(card, 3, "arpflush"); 1814 1815 /* 1816 * currently GuestLAN only supports the ARP assist function 1817 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; 1818 * thus we say EOPNOTSUPP for this ARP function 1819 */ 1820 if (IS_VM_NIC(card) || IS_IQD(card)) 1821 return -EOPNOTSUPP; 1822 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1823 return -EOPNOTSUPP; 1824 } 1825 1826 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1827 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0, 1828 QETH_PROT_IPV4); 1829 if (!iob) 1830 return -ENOMEM; 1831 1832 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1833 if (rc) 1834 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", 1835 CARD_DEVID(card), rc); 1836 return rc; 1837 } 1838 1839 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1840 { 1841 struct qeth_card *card = dev->ml_priv; 1842 struct qeth_arp_cache_entry arp_entry; 1843 enum qeth_arp_process_subcmds arp_cmd; 1844 int rc = 0; 1845 1846 switch (cmd) { 1847 case SIOC_QETH_ARP_SET_NO_ENTRIES: 1848 if (!capable(CAP_NET_ADMIN)) { 1849 rc = -EPERM; 1850 break; 1851 } 1852 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); 1853 break; 1854 case SIOC_QETH_ARP_QUERY_INFO: 1855 if (!capable(CAP_NET_ADMIN)) { 1856 rc = -EPERM; 1857 break; 1858 } 1859 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 1860 break; 1861 case SIOC_QETH_ARP_ADD_ENTRY: 1862 case SIOC_QETH_ARP_REMOVE_ENTRY: 1863 if (!capable(CAP_NET_ADMIN)) 1864 return -EPERM; 1865 if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry))) 1866 return -EFAULT; 1867 1868 arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ? 1869 IPA_CMD_ASS_ARP_ADD_ENTRY : 1870 IPA_CMD_ASS_ARP_REMOVE_ENTRY; 1871 return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd); 1872 case SIOC_QETH_ARP_FLUSH_CACHE: 1873 if (!capable(CAP_NET_ADMIN)) { 1874 rc = -EPERM; 1875 break; 1876 } 1877 rc = qeth_l3_arp_flush_cache(card); 1878 break; 1879 default: 1880 rc = -EOPNOTSUPP; 1881 } 1882 return rc; 1883 } 1884 1885 static int qeth_l3_get_cast_type(struct sk_buff *skb) 1886 { 1887 int ipv = qeth_get_ip_version(skb); 1888 struct neighbour *n = NULL; 1889 struct dst_entry *dst; 1890 1891 rcu_read_lock(); 1892 dst = skb_dst(skb); 1893 if (dst) { 1894 struct rt6_info *rt = (struct rt6_info *) dst; 1895 1896 dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0); 1897 if (dst) 1898 n = dst_neigh_lookup_skb(dst, skb); 1899 } 1900 1901 if (n) { 1902 int cast_type = n->type; 1903 1904 rcu_read_unlock(); 1905 neigh_release(n); 1906 if ((cast_type == RTN_BROADCAST) || 1907 (cast_type == RTN_MULTICAST) || 1908 (cast_type == RTN_ANYCAST)) 1909 return cast_type; 1910 return RTN_UNICAST; 1911 } 1912 rcu_read_unlock(); 1913 1914 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ 1915 switch (ipv) { 1916 case 4: 1917 if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) 1918 return RTN_BROADCAST; 1919 return ipv4_is_multicast(ip_hdr(skb)->daddr) ? 1920 RTN_MULTICAST : RTN_UNICAST; 1921 case 6: 1922 return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? 1923 RTN_MULTICAST : RTN_UNICAST; 1924 default: 1925 /* ... and MAC address */ 1926 return qeth_get_ether_cast_type(skb); 1927 } 1928 } 1929 1930 static u8 qeth_l3_cast_type_to_flag(int cast_type) 1931 { 1932 if (cast_type == RTN_MULTICAST) 1933 return QETH_CAST_MULTICAST; 1934 if (cast_type == RTN_ANYCAST) 1935 return QETH_CAST_ANYCAST; 1936 if (cast_type == RTN_BROADCAST) 1937 return QETH_CAST_BROADCAST; 1938 return QETH_CAST_UNICAST; 1939 } 1940 1941 static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, 1942 struct qeth_hdr *hdr, struct sk_buff *skb, 1943 int ipv, int cast_type, unsigned int data_len) 1944 { 1945 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 1946 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 1947 struct qeth_card *card = queue->card; 1948 struct dst_entry *dst; 1949 1950 hdr->hdr.l3.length = data_len; 1951 1952 if (skb_is_gso(skb)) { 1953 hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO; 1954 } else { 1955 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 1956 1957 if (skb->protocol == htons(ETH_P_AF_IUCV)) { 1958 l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 1959 l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80); 1960 memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2], 1961 iucv_trans_hdr(skb)->destUserID, 8); 1962 return; 1963 } 1964 1965 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1966 qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); 1967 /* some HW requires combined L3+L4 csum offload: */ 1968 if (ipv == 4) 1969 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; 1970 QETH_TXQ_STAT_INC(queue, skbs_csum); 1971 } 1972 } 1973 1974 if (ipv == 4 || IS_IQD(card)) { 1975 /* NETIF_F_HW_VLAN_CTAG_TX */ 1976 if (skb_vlan_tag_present(skb)) { 1977 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME; 1978 hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); 1979 } 1980 } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { 1981 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG; 1982 hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); 1983 } 1984 1985 l3_hdr->flags = qeth_l3_cast_type_to_flag(cast_type); 1986 1987 /* OSA only: */ 1988 if (!ipv) { 1989 l3_hdr->flags |= QETH_HDR_PASSTHRU; 1990 return; 1991 } 1992 1993 rcu_read_lock(); 1994 dst = skb_dst(skb); 1995 1996 if (ipv == 4) { 1997 struct rtable *rt; 1998 1999 if (dst) 2000 dst = dst_check(dst, 0); 2001 rt = (struct rtable *) dst; 2002 2003 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ? 2004 rt_nexthop(rt, ip_hdr(skb)->daddr) : 2005 ip_hdr(skb)->daddr; 2006 } else { 2007 /* IPv6 */ 2008 struct rt6_info *rt; 2009 2010 if (dst) { 2011 rt = (struct rt6_info *) dst; 2012 dst = dst_check(dst, rt6_get_cookie(rt)); 2013 } 2014 rt = (struct rt6_info *) dst; 2015 2016 if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 2017 l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway; 2018 else 2019 l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr; 2020 2021 hdr->hdr.l3.flags |= QETH_HDR_IPV6; 2022 if (!IS_IQD(card)) 2023 hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; 2024 } 2025 rcu_read_unlock(); 2026 } 2027 2028 static void qeth_l3_fixup_headers(struct sk_buff *skb) 2029 { 2030 struct iphdr *iph = ip_hdr(skb); 2031 2032 /* this is safe, IPv6 traffic takes a different path */ 2033 if (skb->ip_summed == CHECKSUM_PARTIAL) 2034 iph->check = 0; 2035 if (skb_is_gso(skb)) { 2036 iph->tot_len = 0; 2037 tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr, 2038 iph->daddr, 0); 2039 } 2040 } 2041 2042 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, 2043 struct qeth_qdio_out_q *queue, int ipv, int cast_type) 2044 { 2045 unsigned int hw_hdr_len; 2046 int rc; 2047 2048 /* re-use the L2 header area for the HW header: */ 2049 hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) : 2050 sizeof(struct qeth_hdr); 2051 rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); 2052 if (rc) 2053 return rc; 2054 skb_pull(skb, ETH_HLEN); 2055 2056 qeth_l3_fixup_headers(skb); 2057 return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header); 2058 } 2059 2060 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, 2061 struct net_device *dev) 2062 { 2063 struct qeth_card *card = dev->ml_priv; 2064 u16 txq = skb_get_queue_mapping(skb); 2065 int ipv = qeth_get_ip_version(skb); 2066 struct qeth_qdio_out_q *queue; 2067 int tx_bytes = skb->len; 2068 int cast_type, rc; 2069 2070 if (IS_IQD(card)) { 2071 queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; 2072 2073 if (card->options.sniffer) 2074 goto tx_drop; 2075 if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || 2076 (card->options.cq == QETH_CQ_ENABLED && 2077 skb->protocol != htons(ETH_P_AF_IUCV))) 2078 goto tx_drop; 2079 2080 if (txq == QETH_IQD_MCAST_TXQ) 2081 cast_type = qeth_l3_get_cast_type(skb); 2082 else 2083 cast_type = RTN_UNICAST; 2084 } else { 2085 queue = card->qdio.out_qs[txq]; 2086 cast_type = qeth_l3_get_cast_type(skb); 2087 } 2088 2089 if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) 2090 goto tx_drop; 2091 2092 if (ipv == 4 || IS_IQD(card)) 2093 rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); 2094 else 2095 rc = qeth_xmit(card, skb, queue, ipv, cast_type, 2096 qeth_l3_fill_header); 2097 2098 if (!rc) { 2099 QETH_TXQ_STAT_INC(queue, tx_packets); 2100 QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); 2101 return NETDEV_TX_OK; 2102 } 2103 2104 tx_drop: 2105 QETH_TXQ_STAT_INC(queue, tx_dropped); 2106 kfree_skb(skb); 2107 return NETDEV_TX_OK; 2108 } 2109 2110 static void qeth_l3_set_rx_mode(struct net_device *dev) 2111 { 2112 struct qeth_card *card = dev->ml_priv; 2113 2114 schedule_work(&card->rx_mode_work); 2115 } 2116 2117 /* 2118 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting 2119 * NOARP on the netdevice is no option because it also turns off neighbor 2120 * solicitation. For IPv4 we install a neighbor_setup function. We don't want 2121 * arp resolution but we want the hard header (packet socket will work 2122 * e.g. tcpdump) 2123 */ 2124 static int qeth_l3_neigh_setup_noarp(struct neighbour *n) 2125 { 2126 n->nud_state = NUD_NOARP; 2127 memcpy(n->ha, "FAKELL", 6); 2128 n->output = n->ops->connected_output; 2129 return 0; 2130 } 2131 2132 static int 2133 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) 2134 { 2135 if (np->tbl->family == AF_INET) 2136 np->neigh_setup = qeth_l3_neigh_setup_noarp; 2137 2138 return 0; 2139 } 2140 2141 static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, 2142 struct net_device *dev, 2143 netdev_features_t features) 2144 { 2145 if (qeth_get_ip_version(skb) != 4) 2146 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2147 return qeth_features_check(skb, dev, features); 2148 } 2149 2150 static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 2151 struct net_device *sb_dev) 2152 { 2153 return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb), 2154 sb_dev); 2155 } 2156 2157 static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, 2158 struct net_device *sb_dev) 2159 { 2160 struct qeth_card *card = dev->ml_priv; 2161 2162 return qeth_get_priority_queue(card, skb); 2163 } 2164 2165 static const struct net_device_ops qeth_l3_netdev_ops = { 2166 .ndo_open = qeth_open, 2167 .ndo_stop = qeth_stop, 2168 .ndo_get_stats64 = qeth_get_stats64, 2169 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2170 .ndo_select_queue = qeth_l3_iqd_select_queue, 2171 .ndo_validate_addr = eth_validate_addr, 2172 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2173 .ndo_do_ioctl = qeth_do_ioctl, 2174 .ndo_fix_features = qeth_fix_features, 2175 .ndo_set_features = qeth_set_features, 2176 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2177 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2178 .ndo_tx_timeout = qeth_tx_timeout, 2179 }; 2180 2181 static const struct net_device_ops qeth_l3_osa_netdev_ops = { 2182 .ndo_open = qeth_open, 2183 .ndo_stop = qeth_stop, 2184 .ndo_get_stats64 = qeth_get_stats64, 2185 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2186 .ndo_features_check = qeth_l3_osa_features_check, 2187 .ndo_select_queue = qeth_l3_osa_select_queue, 2188 .ndo_validate_addr = eth_validate_addr, 2189 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2190 .ndo_do_ioctl = qeth_do_ioctl, 2191 .ndo_fix_features = qeth_fix_features, 2192 .ndo_set_features = qeth_set_features, 2193 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2194 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2195 .ndo_tx_timeout = qeth_tx_timeout, 2196 .ndo_neigh_setup = qeth_l3_neigh_setup, 2197 }; 2198 2199 static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) 2200 { 2201 unsigned int headroom; 2202 int rc; 2203 2204 if (IS_OSD(card) || IS_OSX(card)) { 2205 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 2206 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 2207 pr_info("qeth_l3: ignoring TR device\n"); 2208 return -ENODEV; 2209 } 2210 2211 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 2212 2213 /*IPv6 address autoconfiguration stuff*/ 2214 qeth_l3_get_unique_id(card); 2215 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 2216 card->dev->dev_id = card->info.unique_id & 0xffff; 2217 2218 if (!IS_VM_NIC(card)) { 2219 card->dev->features |= NETIF_F_SG; 2220 card->dev->hw_features |= NETIF_F_TSO | 2221 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2222 card->dev->vlan_features |= NETIF_F_TSO | 2223 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2224 } 2225 2226 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { 2227 card->dev->hw_features |= NETIF_F_IPV6_CSUM; 2228 card->dev->vlan_features |= NETIF_F_IPV6_CSUM; 2229 } 2230 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { 2231 card->dev->hw_features |= NETIF_F_TSO6; 2232 card->dev->vlan_features |= NETIF_F_TSO6; 2233 } 2234 2235 /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */ 2236 if (card->dev->hw_features & NETIF_F_TSO6) 2237 headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN; 2238 else if (card->dev->hw_features & NETIF_F_TSO) 2239 headroom = sizeof(struct qeth_hdr_tso); 2240 else 2241 headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; 2242 } else if (IS_IQD(card)) { 2243 card->dev->flags |= IFF_NOARP; 2244 card->dev->netdev_ops = &qeth_l3_netdev_ops; 2245 headroom = sizeof(struct qeth_hdr) - ETH_HLEN; 2246 2247 rc = qeth_l3_iqd_read_initial_mac(card); 2248 if (rc) 2249 goto out; 2250 } else 2251 return -ENODEV; 2252 2253 card->dev->needed_headroom = headroom; 2254 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 2255 NETIF_F_HW_VLAN_CTAG_RX | 2256 NETIF_F_HW_VLAN_CTAG_FILTER; 2257 2258 netif_keep_dst(card->dev); 2259 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) 2260 netif_set_gso_max_size(card->dev, 2261 PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); 2262 2263 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 2264 rc = register_netdev(card->dev); 2265 if (!rc && carrier_ok) 2266 netif_carrier_on(card->dev); 2267 2268 out: 2269 if (rc) 2270 card->dev->netdev_ops = NULL; 2271 return rc; 2272 } 2273 2274 static const struct device_type qeth_l3_devtype = { 2275 .name = "qeth_layer3", 2276 .groups = qeth_l3_attr_groups, 2277 }; 2278 2279 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 2280 { 2281 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2282 int rc; 2283 2284 hash_init(card->ip_htable); 2285 mutex_init(&card->ip_lock); 2286 card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0, 2287 dev_name(&gdev->dev)); 2288 if (!card->cmd_wq) 2289 return -ENOMEM; 2290 2291 if (gdev->dev.type == &qeth_generic_devtype) { 2292 rc = qeth_l3_create_device_attributes(&gdev->dev); 2293 if (rc) { 2294 destroy_workqueue(card->cmd_wq); 2295 return rc; 2296 } 2297 } 2298 2299 hash_init(card->ip_mc_htable); 2300 INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); 2301 return 0; 2302 } 2303 2304 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) 2305 { 2306 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2307 2308 if (cgdev->dev.type == &qeth_generic_devtype) 2309 qeth_l3_remove_device_attributes(&cgdev->dev); 2310 2311 qeth_set_allowed_threads(card, 0, 1); 2312 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2313 2314 if (cgdev->state == CCWGROUP_ONLINE) 2315 qeth_l3_set_offline(cgdev); 2316 2317 cancel_work_sync(&card->close_dev_work); 2318 if (qeth_netdev_is_registered(card->dev)) 2319 unregister_netdev(card->dev); 2320 2321 flush_workqueue(card->cmd_wq); 2322 destroy_workqueue(card->cmd_wq); 2323 qeth_l3_clear_ip_htable(card, 0); 2324 qeth_l3_clear_ipato_list(card); 2325 } 2326 2327 static int qeth_l3_set_online(struct ccwgroup_device *gdev) 2328 { 2329 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2330 struct net_device *dev = card->dev; 2331 int rc = 0; 2332 bool carrier_ok; 2333 2334 mutex_lock(&card->discipline_mutex); 2335 mutex_lock(&card->conf_mutex); 2336 QETH_CARD_TEXT(card, 2, "setonlin"); 2337 2338 rc = qeth_core_hardsetup_card(card, &carrier_ok); 2339 if (rc) { 2340 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 2341 rc = -ENODEV; 2342 goto out_remove; 2343 } 2344 2345 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { 2346 if (card->info.hwtrap && 2347 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) 2348 card->info.hwtrap = 0; 2349 } else 2350 card->info.hwtrap = 0; 2351 2352 card->state = CARD_STATE_HARDSETUP; 2353 qeth_print_status_message(card); 2354 2355 /* softsetup */ 2356 QETH_CARD_TEXT(card, 2, "softsetp"); 2357 2358 rc = qeth_l3_setadapter_parms(card); 2359 if (rc) 2360 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 2361 if (!card->options.sniffer) { 2362 rc = qeth_l3_start_ipassists(card); 2363 if (rc) { 2364 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2365 goto out_remove; 2366 } 2367 rc = qeth_l3_setrouting_v4(card); 2368 if (rc) 2369 QETH_CARD_TEXT_(card, 2, "4err%04x", rc); 2370 rc = qeth_l3_setrouting_v6(card); 2371 if (rc) 2372 QETH_CARD_TEXT_(card, 2, "5err%04x", rc); 2373 } 2374 2375 rc = qeth_init_qdio_queues(card); 2376 if (rc) { 2377 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2378 rc = -ENODEV; 2379 goto out_remove; 2380 } 2381 card->state = CARD_STATE_SOFTSETUP; 2382 2383 qeth_set_allowed_threads(card, 0xffffffff, 0); 2384 qeth_l3_recover_ip(card); 2385 2386 if (!qeth_netdev_is_registered(dev)) { 2387 rc = qeth_l3_setup_netdev(card, carrier_ok); 2388 if (rc) 2389 goto out_remove; 2390 } else { 2391 rtnl_lock(); 2392 if (carrier_ok) 2393 netif_carrier_on(dev); 2394 else 2395 netif_carrier_off(dev); 2396 2397 netif_device_attach(dev); 2398 qeth_enable_hw_features(dev); 2399 2400 if (card->info.open_when_online) { 2401 card->info.open_when_online = 0; 2402 dev_open(dev, NULL); 2403 } 2404 rtnl_unlock(); 2405 } 2406 qeth_trace_features(card); 2407 /* let user_space know that device is online */ 2408 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 2409 mutex_unlock(&card->conf_mutex); 2410 mutex_unlock(&card->discipline_mutex); 2411 return 0; 2412 out_remove: 2413 qeth_l3_stop_card(card); 2414 ccw_device_set_offline(CARD_DDEV(card)); 2415 ccw_device_set_offline(CARD_WDEV(card)); 2416 ccw_device_set_offline(CARD_RDEV(card)); 2417 qdio_free(CARD_DDEV(card)); 2418 2419 mutex_unlock(&card->conf_mutex); 2420 mutex_unlock(&card->discipline_mutex); 2421 return rc; 2422 } 2423 2424 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, 2425 int recovery_mode) 2426 { 2427 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2428 int rc = 0, rc2 = 0, rc3 = 0; 2429 2430 mutex_lock(&card->discipline_mutex); 2431 mutex_lock(&card->conf_mutex); 2432 QETH_CARD_TEXT(card, 3, "setoffl"); 2433 2434 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { 2435 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 2436 card->info.hwtrap = 1; 2437 } 2438 2439 rtnl_lock(); 2440 card->info.open_when_online = card->dev->flags & IFF_UP; 2441 dev_close(card->dev); 2442 netif_device_detach(card->dev); 2443 netif_carrier_off(card->dev); 2444 rtnl_unlock(); 2445 2446 qeth_l3_stop_card(card); 2447 if (card->options.cq == QETH_CQ_ENABLED) { 2448 rtnl_lock(); 2449 call_netdevice_notifiers(NETDEV_REBOOT, card->dev); 2450 rtnl_unlock(); 2451 } 2452 rc = ccw_device_set_offline(CARD_DDEV(card)); 2453 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 2454 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 2455 if (!rc) 2456 rc = (rc2) ? rc2 : rc3; 2457 if (rc) 2458 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2459 qdio_free(CARD_DDEV(card)); 2460 2461 /* let user_space know that device is offline */ 2462 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); 2463 mutex_unlock(&card->conf_mutex); 2464 mutex_unlock(&card->discipline_mutex); 2465 return 0; 2466 } 2467 2468 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) 2469 { 2470 return __qeth_l3_set_offline(cgdev, 0); 2471 } 2472 2473 static int qeth_l3_recover(void *ptr) 2474 { 2475 struct qeth_card *card; 2476 int rc = 0; 2477 2478 card = (struct qeth_card *) ptr; 2479 QETH_CARD_TEXT(card, 2, "recover1"); 2480 QETH_CARD_HEX(card, 2, &card, sizeof(void *)); 2481 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 2482 return 0; 2483 QETH_CARD_TEXT(card, 2, "recover2"); 2484 dev_warn(&card->gdev->dev, 2485 "A recovery process has been started for the device\n"); 2486 __qeth_l3_set_offline(card->gdev, 1); 2487 rc = qeth_l3_set_online(card->gdev); 2488 if (!rc) 2489 dev_info(&card->gdev->dev, 2490 "Device successfully recovered!\n"); 2491 else { 2492 ccwgroup_set_offline(card->gdev); 2493 dev_warn(&card->gdev->dev, "The qeth device driver " 2494 "failed to recover an error on the device\n"); 2495 } 2496 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 2497 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 2498 return 0; 2499 } 2500 2501 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) 2502 { 2503 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2504 2505 qeth_set_allowed_threads(card, 0, 1); 2506 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2507 if (gdev->state == CCWGROUP_OFFLINE) 2508 return 0; 2509 2510 qeth_l3_set_offline(gdev); 2511 return 0; 2512 } 2513 2514 static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) 2515 { 2516 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2517 int rc; 2518 2519 rc = qeth_l3_set_online(gdev); 2520 2521 qeth_set_allowed_threads(card, 0xffffffff, 0); 2522 if (rc) 2523 dev_warn(&card->gdev->dev, "The qeth device driver " 2524 "failed to recover an error on the device\n"); 2525 return rc; 2526 } 2527 2528 /* Returns zero if the command is successfully "consumed" */ 2529 static int qeth_l3_control_event(struct qeth_card *card, 2530 struct qeth_ipa_cmd *cmd) 2531 { 2532 return 1; 2533 } 2534 2535 struct qeth_discipline qeth_l3_discipline = { 2536 .devtype = &qeth_l3_devtype, 2537 .process_rx_buffer = qeth_l3_process_inbound_buffer, 2538 .recover = qeth_l3_recover, 2539 .setup = qeth_l3_probe_device, 2540 .remove = qeth_l3_remove_device, 2541 .set_online = qeth_l3_set_online, 2542 .set_offline = qeth_l3_set_offline, 2543 .freeze = qeth_l3_pm_suspend, 2544 .thaw = qeth_l3_pm_resume, 2545 .restore = qeth_l3_pm_resume, 2546 .do_ioctl = qeth_l3_do_ioctl, 2547 .control_event_handler = qeth_l3_control_event, 2548 }; 2549 EXPORT_SYMBOL_GPL(qeth_l3_discipline); 2550 2551 static int qeth_l3_handle_ip_event(struct qeth_card *card, 2552 struct qeth_ipaddr *addr, 2553 unsigned long event) 2554 { 2555 switch (event) { 2556 case NETDEV_UP: 2557 qeth_l3_modify_ip(card, addr, true); 2558 return NOTIFY_OK; 2559 case NETDEV_DOWN: 2560 qeth_l3_modify_ip(card, addr, false); 2561 return NOTIFY_OK; 2562 default: 2563 return NOTIFY_DONE; 2564 } 2565 } 2566 2567 struct qeth_l3_ip_event_work { 2568 struct work_struct work; 2569 struct qeth_card *card; 2570 struct qeth_ipaddr addr; 2571 }; 2572 2573 #define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) 2574 2575 static void qeth_l3_add_ip_worker(struct work_struct *work) 2576 { 2577 struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); 2578 2579 qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true); 2580 kfree(work); 2581 } 2582 2583 static void qeth_l3_delete_ip_worker(struct work_struct *work) 2584 { 2585 struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); 2586 2587 qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false); 2588 kfree(work); 2589 } 2590 2591 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) 2592 { 2593 if (is_vlan_dev(dev)) 2594 dev = vlan_dev_real_dev(dev); 2595 if (dev->netdev_ops == &qeth_l3_osa_netdev_ops || 2596 dev->netdev_ops == &qeth_l3_netdev_ops) 2597 return (struct qeth_card *) dev->ml_priv; 2598 return NULL; 2599 } 2600 2601 static int qeth_l3_ip_event(struct notifier_block *this, 2602 unsigned long event, void *ptr) 2603 { 2604 2605 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 2606 struct net_device *dev = ifa->ifa_dev->dev; 2607 struct qeth_ipaddr addr; 2608 struct qeth_card *card; 2609 2610 if (dev_net(dev) != &init_net) 2611 return NOTIFY_DONE; 2612 2613 card = qeth_l3_get_card_from_dev(dev); 2614 if (!card) 2615 return NOTIFY_DONE; 2616 QETH_CARD_TEXT(card, 3, "ipevent"); 2617 2618 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); 2619 addr.u.a4.addr = be32_to_cpu(ifa->ifa_address); 2620 addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask); 2621 2622 return qeth_l3_handle_ip_event(card, &addr, event); 2623 } 2624 2625 static struct notifier_block qeth_l3_ip_notifier = { 2626 qeth_l3_ip_event, 2627 NULL, 2628 }; 2629 2630 static int qeth_l3_ip6_event(struct notifier_block *this, 2631 unsigned long event, void *ptr) 2632 { 2633 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 2634 struct net_device *dev = ifa->idev->dev; 2635 struct qeth_l3_ip_event_work *ip_work; 2636 struct qeth_card *card; 2637 2638 if (event != NETDEV_UP && event != NETDEV_DOWN) 2639 return NOTIFY_DONE; 2640 2641 card = qeth_l3_get_card_from_dev(dev); 2642 if (!card) 2643 return NOTIFY_DONE; 2644 QETH_CARD_TEXT(card, 3, "ip6event"); 2645 if (!qeth_is_supported(card, IPA_IPV6)) 2646 return NOTIFY_DONE; 2647 2648 ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC); 2649 if (!ip_work) 2650 return NOTIFY_DONE; 2651 2652 if (event == NETDEV_UP) 2653 INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); 2654 else 2655 INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); 2656 2657 ip_work->card = card; 2658 qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL, 2659 QETH_PROT_IPV6); 2660 ip_work->addr.u.a6.addr = ifa->addr; 2661 ip_work->addr.u.a6.pfxlen = ifa->prefix_len; 2662 2663 queue_work(card->cmd_wq, &ip_work->work); 2664 return NOTIFY_OK; 2665 } 2666 2667 static struct notifier_block qeth_l3_ip6_notifier = { 2668 qeth_l3_ip6_event, 2669 NULL, 2670 }; 2671 2672 static int qeth_l3_register_notifiers(void) 2673 { 2674 int rc; 2675 2676 QETH_DBF_TEXT(SETUP, 5, "regnotif"); 2677 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); 2678 if (rc) 2679 return rc; 2680 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); 2681 if (rc) { 2682 unregister_inetaddr_notifier(&qeth_l3_ip_notifier); 2683 return rc; 2684 } 2685 return 0; 2686 } 2687 2688 static void qeth_l3_unregister_notifiers(void) 2689 { 2690 QETH_DBF_TEXT(SETUP, 5, "unregnot"); 2691 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 2692 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 2693 } 2694 2695 static int __init qeth_l3_init(void) 2696 { 2697 pr_info("register layer 3 discipline\n"); 2698 return qeth_l3_register_notifiers(); 2699 } 2700 2701 static void __exit qeth_l3_exit(void) 2702 { 2703 qeth_l3_unregister_notifiers(); 2704 pr_info("unregister layer 3 discipline\n"); 2705 } 2706 2707 module_init(qeth_l3_init); 2708 module_exit(qeth_l3_exit); 2709 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 2710 MODULE_DESCRIPTION("qeth layer 3 discipline"); 2711 MODULE_LICENSE("GPL"); 2712