1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/bitops.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/etherdevice.h> 20 #include <linux/ip.h> 21 #include <linux/in.h> 22 #include <linux/ipv6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/igmp.h> 25 #include <linux/slab.h> 26 #include <linux/if_ether.h> 27 #include <linux/if_vlan.h> 28 #include <linux/skbuff.h> 29 30 #include <net/ip.h> 31 #include <net/arp.h> 32 #include <net/route.h> 33 #include <net/ipv6.h> 34 #include <net/ip6_route.h> 35 #include <net/iucv/af_iucv.h> 36 #include <linux/hashtable.h> 37 38 #include "qeth_l3.h" 39 40 41 static int qeth_l3_set_offline(struct ccwgroup_device *); 42 static void qeth_l3_set_rx_mode(struct net_device *dev); 43 static int qeth_l3_register_addr_entry(struct qeth_card *, 44 struct qeth_ipaddr *); 45 static int qeth_l3_deregister_addr_entry(struct qeth_card *, 46 struct qeth_ipaddr *); 47 48 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 49 { 50 sprintf(buf, "%pI4", addr); 51 } 52 53 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 54 { 55 sprintf(buf, "%pI6", addr); 56 } 57 58 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 59 char *buf) 60 { 61 if (proto == QETH_PROT_IPV4) 62 qeth_l3_ipaddr4_to_string(addr, buf); 63 else if (proto == QETH_PROT_IPV6) 64 qeth_l3_ipaddr6_to_string(addr, buf); 65 } 66 67 static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot) 68 { 69 struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 70 71 if (addr) 72 qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot); 73 return addr; 74 } 75 76 static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, 77 struct qeth_ipaddr *query) 78 { 79 u32 key = qeth_l3_ipaddr_hash(query); 80 struct qeth_ipaddr *addr; 81 82 if (query->is_multicast) { 83 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) 84 if (qeth_l3_addr_match_ip(addr, query)) 85 return addr; 86 } else { 87 hash_for_each_possible(card->ip_htable, addr, hnode, key) 88 if (qeth_l3_addr_match_ip(addr, query)) 89 return addr; 90 } 91 return NULL; 92 } 93 94 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 95 { 96 int i, j; 97 u8 octet; 98 99 for (i = 0; i < len; ++i) { 100 octet = addr[i]; 101 for (j = 7; j >= 0; --j) { 102 bits[i*8 + j] = octet & 1; 103 octet >>= 1; 104 } 105 } 106 } 107 108 static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 109 struct qeth_ipaddr *addr) 110 { 111 struct qeth_ipato_entry *ipatoe; 112 u8 addr_bits[128] = {0, }; 113 u8 ipatoe_bits[128] = {0, }; 114 int rc = 0; 115 116 if (!card->ipato.enabled) 117 return false; 118 if (addr->type != QETH_IP_TYPE_NORMAL) 119 return false; 120 121 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 122 (addr->proto == QETH_PROT_IPV4)? 4:16); 123 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 124 if (addr->proto != ipatoe->proto) 125 continue; 126 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, 127 (ipatoe->proto == QETH_PROT_IPV4) ? 128 4 : 16); 129 if (addr->proto == QETH_PROT_IPV4) 130 rc = !memcmp(addr_bits, ipatoe_bits, 131 min(32, ipatoe->mask_bits)); 132 else 133 rc = !memcmp(addr_bits, ipatoe_bits, 134 min(128, ipatoe->mask_bits)); 135 if (rc) 136 break; 137 } 138 /* invert? */ 139 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) 140 rc = !rc; 141 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) 142 rc = !rc; 143 144 return rc; 145 } 146 147 static int qeth_l3_delete_ip(struct qeth_card *card, 148 struct qeth_ipaddr *tmp_addr) 149 { 150 int rc = 0; 151 struct qeth_ipaddr *addr; 152 153 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 154 QETH_CARD_TEXT(card, 2, "delrxip"); 155 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 156 QETH_CARD_TEXT(card, 2, "delvipa"); 157 else 158 QETH_CARD_TEXT(card, 2, "delip"); 159 160 if (tmp_addr->proto == QETH_PROT_IPV4) 161 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 162 else { 163 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 164 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 165 } 166 167 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 168 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) 169 return -ENOENT; 170 171 addr->ref_counter--; 172 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) 173 return rc; 174 if (addr->in_progress) 175 return -EINPROGRESS; 176 177 if (qeth_card_hw_is_reachable(card)) 178 rc = qeth_l3_deregister_addr_entry(card, addr); 179 180 hash_del(&addr->hnode); 181 kfree(addr); 182 183 return rc; 184 } 185 186 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 187 { 188 int rc = 0; 189 struct qeth_ipaddr *addr; 190 char buf[40]; 191 192 if (tmp_addr->type == QETH_IP_TYPE_RXIP) 193 QETH_CARD_TEXT(card, 2, "addrxip"); 194 else if (tmp_addr->type == QETH_IP_TYPE_VIPA) 195 QETH_CARD_TEXT(card, 2, "addvipa"); 196 else 197 QETH_CARD_TEXT(card, 2, "addip"); 198 199 if (tmp_addr->proto == QETH_PROT_IPV4) 200 QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); 201 else { 202 QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); 203 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 204 } 205 206 addr = qeth_l3_find_addr_by_ip(card, tmp_addr); 207 if (addr) { 208 if (tmp_addr->type != QETH_IP_TYPE_NORMAL) 209 return -EADDRINUSE; 210 if (qeth_l3_addr_match_all(addr, tmp_addr)) { 211 addr->ref_counter++; 212 return 0; 213 } 214 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, 215 buf); 216 dev_warn(&card->gdev->dev, 217 "Registering IP address %s failed\n", buf); 218 return -EADDRINUSE; 219 } else { 220 addr = qeth_l3_get_addr_buffer(tmp_addr->proto); 221 if (!addr) 222 return -ENOMEM; 223 224 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 225 addr->ref_counter = 1; 226 227 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { 228 QETH_CARD_TEXT(card, 2, "tkovaddr"); 229 addr->ipato = 1; 230 } 231 hash_add(card->ip_htable, &addr->hnode, 232 qeth_l3_ipaddr_hash(addr)); 233 234 if (!qeth_card_hw_is_reachable(card)) { 235 addr->disp_flag = QETH_DISP_ADDR_ADD; 236 return 0; 237 } 238 239 /* qeth_l3_register_addr_entry can go to sleep 240 * if we add a IPV4 addr. It is caused by the reason 241 * that SETIP ipa cmd starts ARP staff for IPV4 addr. 242 * Thus we should unlock spinlock, and make a protection 243 * using in_progress variable to indicate that there is 244 * an hardware operation with this IPV4 address 245 */ 246 if (addr->proto == QETH_PROT_IPV4) { 247 addr->in_progress = 1; 248 mutex_unlock(&card->ip_lock); 249 rc = qeth_l3_register_addr_entry(card, addr); 250 mutex_lock(&card->ip_lock); 251 addr->in_progress = 0; 252 } else 253 rc = qeth_l3_register_addr_entry(card, addr); 254 255 if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { 256 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 257 if (addr->ref_counter < 1) { 258 qeth_l3_deregister_addr_entry(card, addr); 259 hash_del(&addr->hnode); 260 kfree(addr); 261 } 262 } else { 263 hash_del(&addr->hnode); 264 kfree(addr); 265 } 266 } 267 return rc; 268 } 269 270 static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr, 271 bool add) 272 { 273 int rc; 274 275 mutex_lock(&card->ip_lock); 276 rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr); 277 mutex_unlock(&card->ip_lock); 278 279 return rc; 280 } 281 282 static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) 283 { 284 struct qeth_ipaddr *addr; 285 struct hlist_node *tmp; 286 int i; 287 288 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 289 hash_del(&addr->hnode); 290 kfree(addr); 291 } 292 } 293 294 static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) 295 { 296 struct qeth_ipaddr *addr; 297 struct hlist_node *tmp; 298 int i; 299 300 QETH_CARD_TEXT(card, 4, "clearip"); 301 302 mutex_lock(&card->ip_lock); 303 304 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 305 if (!recover) { 306 hash_del(&addr->hnode); 307 kfree(addr); 308 continue; 309 } 310 addr->disp_flag = QETH_DISP_ADDR_ADD; 311 } 312 313 mutex_unlock(&card->ip_lock); 314 } 315 316 static void qeth_l3_recover_ip(struct qeth_card *card) 317 { 318 struct qeth_ipaddr *addr; 319 struct hlist_node *tmp; 320 int i; 321 int rc; 322 323 QETH_CARD_TEXT(card, 4, "recovrip"); 324 325 mutex_lock(&card->ip_lock); 326 327 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 328 if (addr->disp_flag == QETH_DISP_ADDR_ADD) { 329 if (addr->proto == QETH_PROT_IPV4) { 330 addr->in_progress = 1; 331 mutex_unlock(&card->ip_lock); 332 rc = qeth_l3_register_addr_entry(card, addr); 333 mutex_lock(&card->ip_lock); 334 addr->in_progress = 0; 335 } else 336 rc = qeth_l3_register_addr_entry(card, addr); 337 338 if (!rc) { 339 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 340 if (addr->ref_counter < 1) 341 qeth_l3_delete_ip(card, addr); 342 } else { 343 hash_del(&addr->hnode); 344 kfree(addr); 345 } 346 } 347 } 348 349 mutex_unlock(&card->ip_lock); 350 } 351 352 static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, 353 unsigned long data) 354 { 355 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 356 357 switch (cmd->hdr.return_code) { 358 case IPA_RC_SUCCESS: 359 return 0; 360 case IPA_RC_DUPLICATE_IP_ADDRESS: 361 return -EADDRINUSE; 362 case IPA_RC_MC_ADDR_NOT_FOUND: 363 return -ENOENT; 364 case IPA_RC_LAN_OFFLINE: 365 return -ENETDOWN; 366 default: 367 return -EIO; 368 } 369 } 370 371 static int qeth_l3_send_setdelmc(struct qeth_card *card, 372 struct qeth_ipaddr *addr, int ipacmd) 373 { 374 struct qeth_cmd_buffer *iob; 375 struct qeth_ipa_cmd *cmd; 376 377 QETH_CARD_TEXT(card, 4, "setdelmc"); 378 379 iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto, 380 IPA_DATA_SIZEOF(setdelipm)); 381 if (!iob) 382 return -ENOMEM; 383 cmd = __ipa_cmd(iob); 384 if (addr->proto == QETH_PROT_IPV6) { 385 cmd->data.setdelipm.ip = addr->u.a6.addr; 386 ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac); 387 } else { 388 cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr; 389 ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac); 390 } 391 392 return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); 393 } 394 395 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) 396 { 397 int i, j; 398 for (i = 0; i < 16; i++) { 399 j = (len) - (i * 8); 400 if (j >= 8) 401 netmask[i] = 0xff; 402 else if (j > 0) 403 netmask[i] = (u8)(0xFF00 >> j); 404 else 405 netmask[i] = 0; 406 } 407 } 408 409 static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set) 410 { 411 switch (addr->type) { 412 case QETH_IP_TYPE_RXIP: 413 return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 414 case QETH_IP_TYPE_VIPA: 415 return (set) ? QETH_IPA_SETIP_VIPA_FLAG : 416 QETH_IPA_DELIP_VIPA_FLAG; 417 default: 418 return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; 419 } 420 } 421 422 static int qeth_l3_send_setdelip(struct qeth_card *card, 423 struct qeth_ipaddr *addr, 424 enum qeth_ipa_cmds ipacmd) 425 { 426 struct qeth_cmd_buffer *iob; 427 struct qeth_ipa_cmd *cmd; 428 __u8 netmask[16]; 429 u32 flags; 430 431 QETH_CARD_TEXT(card, 4, "setdelip"); 432 433 iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto, 434 IPA_DATA_SIZEOF(setdelip6)); 435 if (!iob) 436 return -ENOMEM; 437 cmd = __ipa_cmd(iob); 438 439 flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP); 440 QETH_CARD_TEXT_(card, 4, "flags%02X", flags); 441 442 if (addr->proto == QETH_PROT_IPV6) { 443 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 444 sizeof(struct in6_addr)); 445 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); 446 memcpy(cmd->data.setdelip6.mask, netmask, 447 sizeof(struct in6_addr)); 448 cmd->data.setdelip6.flags = flags; 449 } else { 450 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); 451 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); 452 cmd->data.setdelip4.flags = flags; 453 } 454 455 return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); 456 } 457 458 static int qeth_l3_send_setrouting(struct qeth_card *card, 459 enum qeth_routing_types type, enum qeth_prot_versions prot) 460 { 461 int rc; 462 struct qeth_ipa_cmd *cmd; 463 struct qeth_cmd_buffer *iob; 464 465 QETH_CARD_TEXT(card, 4, "setroutg"); 466 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot, 467 IPA_DATA_SIZEOF(setrtg)); 468 if (!iob) 469 return -ENOMEM; 470 cmd = __ipa_cmd(iob); 471 cmd->data.setrtg.type = (type); 472 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 473 474 return rc; 475 } 476 477 static int qeth_l3_correct_routing_type(struct qeth_card *card, 478 enum qeth_routing_types *type, enum qeth_prot_versions prot) 479 { 480 if (IS_IQD(card)) { 481 switch (*type) { 482 case NO_ROUTER: 483 case PRIMARY_CONNECTOR: 484 case SECONDARY_CONNECTOR: 485 case MULTICAST_ROUTER: 486 return 0; 487 default: 488 goto out_inval; 489 } 490 } else { 491 switch (*type) { 492 case NO_ROUTER: 493 case PRIMARY_ROUTER: 494 case SECONDARY_ROUTER: 495 return 0; 496 case MULTICAST_ROUTER: 497 if (qeth_is_ipafunc_supported(card, prot, 498 IPA_OSA_MC_ROUTER)) 499 return 0; 500 default: 501 goto out_inval; 502 } 503 } 504 out_inval: 505 *type = NO_ROUTER; 506 return -EINVAL; 507 } 508 509 int qeth_l3_setrouting_v4(struct qeth_card *card) 510 { 511 int rc; 512 513 QETH_CARD_TEXT(card, 3, "setrtg4"); 514 515 rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, 516 QETH_PROT_IPV4); 517 if (rc) 518 return rc; 519 520 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 521 QETH_PROT_IPV4); 522 if (rc) { 523 card->options.route4.type = NO_ROUTER; 524 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", 525 rc, CARD_DEVID(card)); 526 } 527 return rc; 528 } 529 530 int qeth_l3_setrouting_v6(struct qeth_card *card) 531 { 532 int rc = 0; 533 534 QETH_CARD_TEXT(card, 3, "setrtg6"); 535 536 if (!qeth_is_supported(card, IPA_IPV6)) 537 return 0; 538 rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, 539 QETH_PROT_IPV6); 540 if (rc) 541 return rc; 542 543 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 544 QETH_PROT_IPV6); 545 if (rc) { 546 card->options.route6.type = NO_ROUTER; 547 QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", 548 rc, CARD_DEVID(card)); 549 } 550 return rc; 551 } 552 553 /* 554 * IP address takeover related functions 555 */ 556 557 /** 558 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. 559 * 560 * Caller must hold ip_lock. 561 */ 562 void qeth_l3_update_ipato(struct qeth_card *card) 563 { 564 struct qeth_ipaddr *addr; 565 unsigned int i; 566 567 hash_for_each(card->ip_htable, i, addr, hnode) { 568 if (addr->type != QETH_IP_TYPE_NORMAL) 569 continue; 570 addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr); 571 } 572 } 573 574 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 575 { 576 struct qeth_ipato_entry *ipatoe, *tmp; 577 578 mutex_lock(&card->ip_lock); 579 580 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 581 list_del(&ipatoe->entry); 582 kfree(ipatoe); 583 } 584 585 qeth_l3_update_ipato(card); 586 mutex_unlock(&card->ip_lock); 587 } 588 589 int qeth_l3_add_ipato_entry(struct qeth_card *card, 590 struct qeth_ipato_entry *new) 591 { 592 struct qeth_ipato_entry *ipatoe; 593 int rc = 0; 594 595 QETH_CARD_TEXT(card, 2, "addipato"); 596 597 mutex_lock(&card->ip_lock); 598 599 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 600 if (ipatoe->proto != new->proto) 601 continue; 602 if (!memcmp(ipatoe->addr, new->addr, 603 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && 604 (ipatoe->mask_bits == new->mask_bits)) { 605 rc = -EEXIST; 606 break; 607 } 608 } 609 610 if (!rc) { 611 list_add_tail(&new->entry, &card->ipato.entries); 612 qeth_l3_update_ipato(card); 613 } 614 615 mutex_unlock(&card->ip_lock); 616 617 return rc; 618 } 619 620 int qeth_l3_del_ipato_entry(struct qeth_card *card, 621 enum qeth_prot_versions proto, u8 *addr, 622 int mask_bits) 623 { 624 struct qeth_ipato_entry *ipatoe, *tmp; 625 int rc = -ENOENT; 626 627 QETH_CARD_TEXT(card, 2, "delipato"); 628 629 mutex_lock(&card->ip_lock); 630 631 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 632 if (ipatoe->proto != proto) 633 continue; 634 if (!memcmp(ipatoe->addr, addr, 635 (proto == QETH_PROT_IPV4)? 4:16) && 636 (ipatoe->mask_bits == mask_bits)) { 637 list_del(&ipatoe->entry); 638 qeth_l3_update_ipato(card); 639 kfree(ipatoe); 640 rc = 0; 641 } 642 } 643 644 mutex_unlock(&card->ip_lock); 645 return rc; 646 } 647 648 int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, 649 enum qeth_ip_types type, 650 enum qeth_prot_versions proto) 651 { 652 struct qeth_ipaddr addr; 653 654 qeth_l3_init_ipaddr(&addr, type, proto); 655 if (proto == QETH_PROT_IPV4) 656 memcpy(&addr.u.a4.addr, ip, 4); 657 else 658 memcpy(&addr.u.a6.addr, ip, 16); 659 660 return qeth_l3_modify_ip(card, &addr, add); 661 } 662 663 int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) 664 { 665 struct qeth_ipaddr addr; 666 unsigned int i; 667 668 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); 669 addr.u.a6.addr.s6_addr[0] = 0xfe; 670 addr.u.a6.addr.s6_addr[1] = 0x80; 671 for (i = 0; i < 8; i++) 672 addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; 673 674 return qeth_l3_modify_ip(card, &addr, add); 675 } 676 677 static int qeth_l3_register_addr_entry(struct qeth_card *card, 678 struct qeth_ipaddr *addr) 679 { 680 char buf[50]; 681 int rc = 0; 682 int cnt = 3; 683 684 if (card->options.sniffer) 685 return 0; 686 687 if (addr->proto == QETH_PROT_IPV4) { 688 QETH_CARD_TEXT(card, 2, "setaddr4"); 689 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 690 } else if (addr->proto == QETH_PROT_IPV6) { 691 QETH_CARD_TEXT(card, 2, "setaddr6"); 692 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 693 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 694 } else { 695 QETH_CARD_TEXT(card, 2, "setaddr?"); 696 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 697 } 698 do { 699 if (addr->is_multicast) 700 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); 701 else 702 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP); 703 if (rc) 704 QETH_CARD_TEXT(card, 2, "failed"); 705 } while ((--cnt > 0) && rc); 706 if (rc) { 707 QETH_CARD_TEXT(card, 2, "FAILED"); 708 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); 709 dev_warn(&card->gdev->dev, 710 "Registering IP address %s failed\n", buf); 711 } 712 return rc; 713 } 714 715 static int qeth_l3_deregister_addr_entry(struct qeth_card *card, 716 struct qeth_ipaddr *addr) 717 { 718 int rc = 0; 719 720 if (card->options.sniffer) 721 return 0; 722 723 if (addr->proto == QETH_PROT_IPV4) { 724 QETH_CARD_TEXT(card, 2, "deladdr4"); 725 QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); 726 } else if (addr->proto == QETH_PROT_IPV6) { 727 QETH_CARD_TEXT(card, 2, "deladdr6"); 728 QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); 729 QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); 730 } else { 731 QETH_CARD_TEXT(card, 2, "deladdr?"); 732 QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); 733 } 734 if (addr->is_multicast) 735 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); 736 else 737 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP); 738 if (rc) 739 QETH_CARD_TEXT(card, 2, "failed"); 740 741 return rc; 742 } 743 744 static int qeth_l3_setadapter_parms(struct qeth_card *card) 745 { 746 int rc = 0; 747 748 QETH_CARD_TEXT(card, 2, "setadprm"); 749 750 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { 751 rc = qeth_setadpparms_change_macaddr(card); 752 if (rc) 753 dev_warn(&card->gdev->dev, "Reading the adapter MAC" 754 " address failed\n"); 755 } 756 757 return rc; 758 } 759 760 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 761 { 762 int rc; 763 764 QETH_CARD_TEXT(card, 3, "ipaarp"); 765 766 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 767 dev_info(&card->gdev->dev, 768 "ARP processing not supported on %s!\n", 769 QETH_CARD_IFNAME(card)); 770 return 0; 771 } 772 rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, 773 IPA_CMD_ASS_START, NULL); 774 if (rc) { 775 dev_warn(&card->gdev->dev, 776 "Starting ARP processing support for %s failed\n", 777 QETH_CARD_IFNAME(card)); 778 } 779 return rc; 780 } 781 782 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) 783 { 784 int rc; 785 786 QETH_CARD_TEXT(card, 3, "stsrcmac"); 787 788 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { 789 dev_info(&card->gdev->dev, 790 "Inbound source MAC-address not supported on %s\n", 791 QETH_CARD_IFNAME(card)); 792 return -EOPNOTSUPP; 793 } 794 795 rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, 796 IPA_CMD_ASS_START, NULL); 797 if (rc) 798 dev_warn(&card->gdev->dev, 799 "Starting source MAC-address support for %s failed\n", 800 QETH_CARD_IFNAME(card)); 801 return rc; 802 } 803 804 static int qeth_l3_start_ipa_vlan(struct qeth_card *card) 805 { 806 int rc = 0; 807 808 QETH_CARD_TEXT(card, 3, "strtvlan"); 809 810 if (!qeth_is_supported(card, IPA_FULL_VLAN)) { 811 dev_info(&card->gdev->dev, 812 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); 813 return -EOPNOTSUPP; 814 } 815 816 rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, 817 IPA_CMD_ASS_START, NULL); 818 if (rc) { 819 dev_warn(&card->gdev->dev, 820 "Starting VLAN support for %s failed\n", 821 QETH_CARD_IFNAME(card)); 822 } else { 823 dev_info(&card->gdev->dev, "VLAN enabled\n"); 824 } 825 return rc; 826 } 827 828 static int qeth_l3_start_ipa_multicast(struct qeth_card *card) 829 { 830 int rc; 831 832 QETH_CARD_TEXT(card, 3, "stmcast"); 833 834 if (!qeth_is_supported(card, IPA_MULTICASTING)) { 835 dev_info(&card->gdev->dev, 836 "Multicast not supported on %s\n", 837 QETH_CARD_IFNAME(card)); 838 return -EOPNOTSUPP; 839 } 840 841 rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, 842 IPA_CMD_ASS_START, NULL); 843 if (rc) { 844 dev_warn(&card->gdev->dev, 845 "Starting multicast support for %s failed\n", 846 QETH_CARD_IFNAME(card)); 847 } else { 848 dev_info(&card->gdev->dev, "Multicast enabled\n"); 849 card->dev->flags |= IFF_MULTICAST; 850 } 851 return rc; 852 } 853 854 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 855 { 856 u32 ipv6_data = 3; 857 int rc; 858 859 QETH_CARD_TEXT(card, 3, "softipv6"); 860 861 if (IS_IQD(card)) 862 goto out; 863 864 rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START, 865 &ipv6_data); 866 if (rc) { 867 dev_err(&card->gdev->dev, 868 "Activating IPv6 support for %s failed\n", 869 QETH_CARD_IFNAME(card)); 870 return rc; 871 } 872 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START, 873 NULL); 874 if (rc) { 875 dev_err(&card->gdev->dev, 876 "Activating IPv6 support for %s failed\n", 877 QETH_CARD_IFNAME(card)); 878 return rc; 879 } 880 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, 881 IPA_CMD_ASS_START, NULL); 882 if (rc) { 883 dev_warn(&card->gdev->dev, 884 "Enabling the passthrough mode for %s failed\n", 885 QETH_CARD_IFNAME(card)); 886 return rc; 887 } 888 out: 889 dev_info(&card->gdev->dev, "IPV6 enabled\n"); 890 return 0; 891 } 892 893 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) 894 { 895 QETH_CARD_TEXT(card, 3, "strtipv6"); 896 897 if (!qeth_is_supported(card, IPA_IPV6)) { 898 dev_info(&card->gdev->dev, 899 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); 900 return 0; 901 } 902 return qeth_l3_softsetup_ipv6(card); 903 } 904 905 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) 906 { 907 u32 filter_data = 1; 908 int rc; 909 910 QETH_CARD_TEXT(card, 3, "stbrdcst"); 911 card->info.broadcast_capable = 0; 912 if (!qeth_is_supported(card, IPA_FILTERING)) { 913 dev_info(&card->gdev->dev, 914 "Broadcast not supported on %s\n", 915 QETH_CARD_IFNAME(card)); 916 rc = -EOPNOTSUPP; 917 goto out; 918 } 919 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 920 IPA_CMD_ASS_START, NULL); 921 if (rc) { 922 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " 923 "%s failed\n", QETH_CARD_IFNAME(card)); 924 goto out; 925 } 926 927 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 928 IPA_CMD_ASS_CONFIGURE, &filter_data); 929 if (rc) { 930 dev_warn(&card->gdev->dev, 931 "Setting up broadcast filtering for %s failed\n", 932 QETH_CARD_IFNAME(card)); 933 goto out; 934 } 935 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; 936 dev_info(&card->gdev->dev, "Broadcast enabled\n"); 937 rc = qeth_send_simple_setassparms(card, IPA_FILTERING, 938 IPA_CMD_ASS_ENABLE, &filter_data); 939 if (rc) { 940 dev_warn(&card->gdev->dev, "Setting up broadcast echo " 941 "filtering for %s failed\n", QETH_CARD_IFNAME(card)); 942 goto out; 943 } 944 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; 945 out: 946 if (card->info.broadcast_capable) 947 card->dev->flags |= IFF_BROADCAST; 948 else 949 card->dev->flags &= ~IFF_BROADCAST; 950 return rc; 951 } 952 953 static int qeth_l3_start_ipassists(struct qeth_card *card) 954 { 955 QETH_CARD_TEXT(card, 3, "strtipas"); 956 957 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 958 qeth_l3_start_ipa_source_mac(card); /* go on*/ 959 qeth_l3_start_ipa_vlan(card); /* go on*/ 960 qeth_l3_start_ipa_multicast(card); /* go on*/ 961 qeth_l3_start_ipa_ipv6(card); /* go on*/ 962 qeth_l3_start_ipa_broadcast(card); /* go on*/ 963 return 0; 964 } 965 966 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 967 struct qeth_reply *reply, unsigned long data) 968 { 969 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 970 971 if (cmd->hdr.return_code) 972 return -EIO; 973 974 ether_addr_copy(card->dev->dev_addr, 975 cmd->data.create_destroy_addr.unique_id); 976 return 0; 977 } 978 979 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) 980 { 981 int rc = 0; 982 struct qeth_cmd_buffer *iob; 983 struct qeth_ipa_cmd *cmd; 984 985 QETH_CARD_TEXT(card, 2, "hsrmac"); 986 987 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, 988 IPA_DATA_SIZEOF(create_destroy_addr)); 989 if (!iob) 990 return -ENOMEM; 991 cmd = __ipa_cmd(iob); 992 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 993 card->info.unique_id; 994 995 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, 996 NULL); 997 return rc; 998 } 999 1000 static int qeth_l3_get_unique_id_cb(struct qeth_card *card, 1001 struct qeth_reply *reply, unsigned long data) 1002 { 1003 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1004 1005 if (cmd->hdr.return_code == 0) { 1006 card->info.unique_id = *((__u16 *) 1007 &cmd->data.create_destroy_addr.unique_id[6]); 1008 return 0; 1009 } 1010 1011 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1012 UNIQUE_ID_NOT_BY_CARD; 1013 dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); 1014 return -EIO; 1015 } 1016 1017 static int qeth_l3_get_unique_id(struct qeth_card *card) 1018 { 1019 int rc = 0; 1020 struct qeth_cmd_buffer *iob; 1021 struct qeth_ipa_cmd *cmd; 1022 1023 QETH_CARD_TEXT(card, 2, "guniqeid"); 1024 1025 if (!qeth_is_supported(card, IPA_IPV6)) { 1026 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1027 UNIQUE_ID_NOT_BY_CARD; 1028 return 0; 1029 } 1030 1031 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, 1032 IPA_DATA_SIZEOF(create_destroy_addr)); 1033 if (!iob) 1034 return -ENOMEM; 1035 cmd = __ipa_cmd(iob); 1036 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1037 card->info.unique_id; 1038 1039 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); 1040 return rc; 1041 } 1042 1043 static int 1044 qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, 1045 unsigned long data) 1046 { 1047 struct qeth_ipa_cmd *cmd; 1048 __u16 rc; 1049 1050 QETH_CARD_TEXT(card, 2, "diastrcb"); 1051 1052 cmd = (struct qeth_ipa_cmd *)data; 1053 rc = cmd->hdr.return_code; 1054 if (rc) 1055 QETH_CARD_TEXT_(card, 2, "dxter%x", rc); 1056 switch (cmd->data.diagass.action) { 1057 case QETH_DIAGS_CMD_TRACE_QUERY: 1058 break; 1059 case QETH_DIAGS_CMD_TRACE_DISABLE: 1060 switch (rc) { 1061 case 0: 1062 case IPA_RC_INVALID_SUBCMD: 1063 card->info.promisc_mode = SET_PROMISC_MODE_OFF; 1064 dev_info(&card->gdev->dev, "The HiperSockets network " 1065 "traffic analyzer is deactivated\n"); 1066 break; 1067 default: 1068 break; 1069 } 1070 break; 1071 case QETH_DIAGS_CMD_TRACE_ENABLE: 1072 switch (rc) { 1073 case 0: 1074 card->info.promisc_mode = SET_PROMISC_MODE_ON; 1075 dev_info(&card->gdev->dev, "The HiperSockets network " 1076 "traffic analyzer is activated\n"); 1077 break; 1078 case IPA_RC_HARDWARE_AUTH_ERROR: 1079 dev_warn(&card->gdev->dev, "The device is not " 1080 "authorized to run as a HiperSockets network " 1081 "traffic analyzer\n"); 1082 break; 1083 case IPA_RC_TRACE_ALREADY_ACTIVE: 1084 dev_warn(&card->gdev->dev, "A HiperSockets " 1085 "network traffic analyzer is already " 1086 "active in the HiperSockets LAN\n"); 1087 break; 1088 default: 1089 break; 1090 } 1091 break; 1092 default: 1093 QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n", 1094 cmd->data.diagass.action, CARD_DEVID(card)); 1095 } 1096 1097 return rc ? -EIO : 0; 1098 } 1099 1100 static int 1101 qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) 1102 { 1103 struct qeth_cmd_buffer *iob; 1104 struct qeth_ipa_cmd *cmd; 1105 1106 QETH_CARD_TEXT(card, 2, "diagtrac"); 1107 1108 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0); 1109 if (!iob) 1110 return -ENOMEM; 1111 cmd = __ipa_cmd(iob); 1112 cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET; 1113 cmd->data.diagass.action = diags_cmd; 1114 return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); 1115 } 1116 1117 static void 1118 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) 1119 { 1120 struct ip_mc_list *im4; 1121 struct qeth_ipaddr *tmp, *ipm; 1122 1123 QETH_CARD_TEXT(card, 4, "addmc"); 1124 1125 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1126 if (!tmp) 1127 return; 1128 1129 for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; 1130 im4 = rcu_dereference(im4->next_rcu)) { 1131 tmp->u.a4.addr = im4->multiaddr; 1132 tmp->is_multicast = 1; 1133 1134 ipm = qeth_l3_find_addr_by_ip(card, tmp); 1135 if (ipm) { 1136 /* for mcast, by-IP match means full match */ 1137 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1138 } else { 1139 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1140 if (!ipm) 1141 continue; 1142 1143 ipm->u.a4.addr = im4->multiaddr; 1144 ipm->is_multicast = 1; 1145 ipm->disp_flag = QETH_DISP_ADDR_ADD; 1146 hash_add(card->ip_mc_htable, 1147 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1148 } 1149 } 1150 1151 kfree(tmp); 1152 } 1153 1154 /* called with rcu_read_lock */ 1155 static void qeth_l3_add_vlan_mc(struct qeth_card *card) 1156 { 1157 struct in_device *in_dev; 1158 u16 vid; 1159 1160 QETH_CARD_TEXT(card, 4, "addmcvl"); 1161 1162 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1163 return; 1164 1165 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1166 struct net_device *netdev; 1167 1168 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), 1169 vid); 1170 if (netdev == NULL || 1171 !(netdev->flags & IFF_UP)) 1172 continue; 1173 in_dev = __in_dev_get_rcu(netdev); 1174 if (!in_dev) 1175 continue; 1176 qeth_l3_add_mc_to_hash(card, in_dev); 1177 } 1178 } 1179 1180 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) 1181 { 1182 struct in_device *in4_dev; 1183 1184 QETH_CARD_TEXT(card, 4, "chkmcv4"); 1185 1186 rcu_read_lock(); 1187 in4_dev = __in_dev_get_rcu(card->dev); 1188 if (in4_dev == NULL) 1189 goto unlock; 1190 qeth_l3_add_mc_to_hash(card, in4_dev); 1191 qeth_l3_add_vlan_mc(card); 1192 unlock: 1193 rcu_read_unlock(); 1194 } 1195 1196 static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, 1197 struct inet6_dev *in6_dev) 1198 { 1199 struct qeth_ipaddr *ipm; 1200 struct ifmcaddr6 *im6; 1201 struct qeth_ipaddr *tmp; 1202 1203 QETH_CARD_TEXT(card, 4, "addmc6"); 1204 1205 tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1206 if (!tmp) 1207 return; 1208 1209 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1210 tmp->u.a6.addr = im6->mca_addr; 1211 tmp->is_multicast = 1; 1212 1213 ipm = qeth_l3_find_addr_by_ip(card, tmp); 1214 if (ipm) { 1215 /* for mcast, by-IP match means full match */ 1216 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1217 continue; 1218 } 1219 1220 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1221 if (!ipm) 1222 continue; 1223 1224 ipm->u.a6.addr = im6->mca_addr; 1225 ipm->is_multicast = 1; 1226 ipm->disp_flag = QETH_DISP_ADDR_ADD; 1227 hash_add(card->ip_mc_htable, 1228 &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); 1229 1230 } 1231 kfree(tmp); 1232 } 1233 1234 /* called with rcu_read_lock */ 1235 static void qeth_l3_add_vlan_mc6(struct qeth_card *card) 1236 { 1237 struct inet6_dev *in_dev; 1238 u16 vid; 1239 1240 QETH_CARD_TEXT(card, 4, "admc6vl"); 1241 1242 if (!qeth_is_supported(card, IPA_FULL_VLAN)) 1243 return; 1244 1245 for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { 1246 struct net_device *netdev; 1247 1248 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), 1249 vid); 1250 if (netdev == NULL || 1251 !(netdev->flags & IFF_UP)) 1252 continue; 1253 in_dev = in6_dev_get(netdev); 1254 if (!in_dev) 1255 continue; 1256 read_lock_bh(&in_dev->lock); 1257 qeth_l3_add_mc6_to_hash(card, in_dev); 1258 read_unlock_bh(&in_dev->lock); 1259 in6_dev_put(in_dev); 1260 } 1261 } 1262 1263 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) 1264 { 1265 struct inet6_dev *in6_dev; 1266 1267 QETH_CARD_TEXT(card, 4, "chkmcv6"); 1268 1269 if (!qeth_is_supported(card, IPA_IPV6)) 1270 return ; 1271 in6_dev = in6_dev_get(card->dev); 1272 if (!in6_dev) 1273 return; 1274 1275 rcu_read_lock(); 1276 read_lock_bh(&in6_dev->lock); 1277 qeth_l3_add_mc6_to_hash(card, in6_dev); 1278 qeth_l3_add_vlan_mc6(card); 1279 read_unlock_bh(&in6_dev->lock); 1280 rcu_read_unlock(); 1281 in6_dev_put(in6_dev); 1282 } 1283 1284 static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, 1285 __be16 proto, u16 vid) 1286 { 1287 struct qeth_card *card = dev->ml_priv; 1288 1289 set_bit(vid, card->active_vlans); 1290 return 0; 1291 } 1292 1293 static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, 1294 __be16 proto, u16 vid) 1295 { 1296 struct qeth_card *card = dev->ml_priv; 1297 1298 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 1299 1300 clear_bit(vid, card->active_vlans); 1301 qeth_l3_set_rx_mode(dev); 1302 return 0; 1303 } 1304 1305 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 1306 struct qeth_hdr *hdr) 1307 { 1308 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 1309 struct net_device *dev = skb->dev; 1310 1311 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 1312 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 1313 "FAKELL", skb->len); 1314 return; 1315 } 1316 1317 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1318 u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 1319 ETH_P_IP; 1320 unsigned char tg_addr[ETH_ALEN]; 1321 1322 skb_reset_network_header(skb); 1323 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { 1324 case QETH_CAST_MULTICAST: 1325 if (prot == ETH_P_IP) 1326 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 1327 else 1328 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 1329 QETH_CARD_STAT_INC(card, rx_multicast); 1330 break; 1331 case QETH_CAST_BROADCAST: 1332 ether_addr_copy(tg_addr, card->dev->broadcast); 1333 QETH_CARD_STAT_INC(card, rx_multicast); 1334 break; 1335 default: 1336 if (card->options.sniffer) 1337 skb->pkt_type = PACKET_OTHERHOST; 1338 ether_addr_copy(tg_addr, card->dev->dev_addr); 1339 } 1340 1341 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 1342 card->dev->header_ops->create(skb, card->dev, prot, 1343 tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac, 1344 skb->len); 1345 else 1346 card->dev->header_ops->create(skb, card->dev, prot, 1347 tg_addr, "FAKELL", skb->len); 1348 } 1349 1350 /* copy VLAN tag from hdr into skb */ 1351 if (!card->options.sniffer && 1352 (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 1353 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 1354 u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 1355 hdr->hdr.l3.vlan_id : 1356 hdr->hdr.l3.next_hop.rx.vlan_id; 1357 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 1358 } 1359 1360 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); 1361 } 1362 1363 static int qeth_l3_process_inbound_buffer(struct qeth_card *card, 1364 int budget, int *done) 1365 { 1366 int work_done = 0; 1367 struct sk_buff *skb; 1368 struct qeth_hdr *hdr; 1369 unsigned int len; 1370 1371 *done = 0; 1372 WARN_ON_ONCE(!budget); 1373 while (budget) { 1374 skb = qeth_core_get_next_skb(card, 1375 &card->qdio.in_q->bufs[card->rx.b_index], 1376 &card->rx.b_element, &card->rx.e_offset, &hdr); 1377 if (!skb) { 1378 *done = 1; 1379 break; 1380 } 1381 switch (hdr->hdr.l3.id) { 1382 case QETH_HEADER_TYPE_LAYER3: 1383 qeth_l3_rebuild_skb(card, skb, hdr); 1384 /* fall through */ 1385 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 1386 skb->protocol = eth_type_trans(skb, skb->dev); 1387 len = skb->len; 1388 napi_gro_receive(&card->napi, skb); 1389 break; 1390 default: 1391 dev_kfree_skb_any(skb); 1392 QETH_CARD_TEXT(card, 3, "inbunkno"); 1393 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); 1394 continue; 1395 } 1396 work_done++; 1397 budget--; 1398 QETH_CARD_STAT_INC(card, rx_packets); 1399 QETH_CARD_STAT_ADD(card, rx_bytes, len); 1400 } 1401 return work_done; 1402 } 1403 1404 static void qeth_l3_stop_card(struct qeth_card *card) 1405 { 1406 QETH_CARD_TEXT(card, 2, "stopcard"); 1407 1408 qeth_set_allowed_threads(card, 0, 1); 1409 1410 cancel_work_sync(&card->rx_mode_work); 1411 qeth_l3_drain_rx_mode_cache(card); 1412 1413 if (card->options.sniffer && 1414 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) 1415 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1416 1417 if (card->state == CARD_STATE_SOFTSETUP) { 1418 qeth_l3_clear_ip_htable(card, 1); 1419 qeth_clear_ipacmd_list(card); 1420 card->state = CARD_STATE_HARDSETUP; 1421 } 1422 if (card->state == CARD_STATE_HARDSETUP) { 1423 qeth_qdio_clear_card(card, 0); 1424 qeth_drain_output_queues(card); 1425 qeth_clear_working_pool_list(card); 1426 card->state = CARD_STATE_DOWN; 1427 } 1428 1429 flush_workqueue(card->event_wq); 1430 } 1431 1432 static void qeth_l3_set_promisc_mode(struct qeth_card *card) 1433 { 1434 bool enable = card->dev->flags & IFF_PROMISC; 1435 1436 if (card->info.promisc_mode == enable) 1437 return; 1438 1439 if (IS_VM_NIC(card)) { /* Guestlan trace */ 1440 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 1441 qeth_setadp_promisc_mode(card, enable); 1442 } else if (card->options.sniffer && /* HiperSockets trace */ 1443 qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 1444 if (enable) { 1445 QETH_CARD_TEXT(card, 3, "+promisc"); 1446 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); 1447 } else { 1448 QETH_CARD_TEXT(card, 3, "-promisc"); 1449 qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); 1450 } 1451 } 1452 } 1453 1454 static void qeth_l3_rx_mode_work(struct work_struct *work) 1455 { 1456 struct qeth_card *card = container_of(work, struct qeth_card, 1457 rx_mode_work); 1458 struct qeth_ipaddr *addr; 1459 struct hlist_node *tmp; 1460 int i, rc; 1461 1462 QETH_CARD_TEXT(card, 3, "setmulti"); 1463 1464 if (!card->options.sniffer) { 1465 qeth_l3_add_multicast_ipv4(card); 1466 qeth_l3_add_multicast_ipv6(card); 1467 1468 hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { 1469 switch (addr->disp_flag) { 1470 case QETH_DISP_ADDR_DELETE: 1471 rc = qeth_l3_deregister_addr_entry(card, addr); 1472 if (!rc || rc == -ENOENT) { 1473 hash_del(&addr->hnode); 1474 kfree(addr); 1475 } 1476 break; 1477 case QETH_DISP_ADDR_ADD: 1478 rc = qeth_l3_register_addr_entry(card, addr); 1479 if (rc && rc != -ENETDOWN) { 1480 hash_del(&addr->hnode); 1481 kfree(addr); 1482 break; 1483 } 1484 addr->ref_counter = 1; 1485 /* fall through */ 1486 default: 1487 /* for next call to set_rx_mode(): */ 1488 addr->disp_flag = QETH_DISP_ADDR_DELETE; 1489 } 1490 } 1491 } 1492 1493 qeth_l3_set_promisc_mode(card); 1494 } 1495 1496 static int qeth_l3_arp_makerc(u16 rc) 1497 { 1498 switch (rc) { 1499 case IPA_RC_SUCCESS: 1500 return 0; 1501 case QETH_IPA_ARP_RC_NOTSUPP: 1502 case QETH_IPA_ARP_RC_Q_NOTSUPP: 1503 return -EOPNOTSUPP; 1504 case QETH_IPA_ARP_RC_OUT_OF_RANGE: 1505 return -EINVAL; 1506 case QETH_IPA_ARP_RC_Q_NO_DATA: 1507 return -ENOENT; 1508 default: 1509 return -EIO; 1510 } 1511 } 1512 1513 static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply, 1514 unsigned long data) 1515 { 1516 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 1517 1518 qeth_setassparms_cb(card, reply, data); 1519 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1520 } 1521 1522 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 1523 { 1524 struct qeth_cmd_buffer *iob; 1525 int rc; 1526 1527 QETH_CARD_TEXT(card, 3, "arpstnoe"); 1528 1529 /* 1530 * currently GuestLAN only supports the ARP assist function 1531 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; 1532 * thus we say EOPNOTSUPP for this ARP function 1533 */ 1534 if (IS_VM_NIC(card)) 1535 return -EOPNOTSUPP; 1536 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1537 return -EOPNOTSUPP; 1538 } 1539 1540 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1541 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 1542 SETASS_DATA_SIZEOF(flags_32bit), 1543 QETH_PROT_IPV4); 1544 if (!iob) 1545 return -ENOMEM; 1546 1547 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries; 1548 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1549 if (rc) 1550 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", 1551 CARD_DEVID(card), rc); 1552 return rc; 1553 } 1554 1555 static __u32 get_arp_entry_size(struct qeth_card *card, 1556 struct qeth_arp_query_data *qdata, 1557 struct qeth_arp_entrytype *type, __u8 strip_entries) 1558 { 1559 __u32 rc; 1560 __u8 is_hsi; 1561 1562 is_hsi = qdata->reply_bits == 5; 1563 if (type->ip == QETHARP_IP_ADDR_V4) { 1564 QETH_CARD_TEXT(card, 4, "arpev4"); 1565 if (strip_entries) { 1566 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : 1567 sizeof(struct qeth_arp_qi_entry7_short); 1568 } else { 1569 rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : 1570 sizeof(struct qeth_arp_qi_entry7); 1571 } 1572 } else if (type->ip == QETHARP_IP_ADDR_V6) { 1573 QETH_CARD_TEXT(card, 4, "arpev6"); 1574 if (strip_entries) { 1575 rc = is_hsi ? 1576 sizeof(struct qeth_arp_qi_entry5_short_ipv6) : 1577 sizeof(struct qeth_arp_qi_entry7_short_ipv6); 1578 } else { 1579 rc = is_hsi ? 1580 sizeof(struct qeth_arp_qi_entry5_ipv6) : 1581 sizeof(struct qeth_arp_qi_entry7_ipv6); 1582 } 1583 } else { 1584 QETH_CARD_TEXT(card, 4, "arpinv"); 1585 rc = 0; 1586 } 1587 1588 return rc; 1589 } 1590 1591 static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) 1592 { 1593 return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || 1594 (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); 1595 } 1596 1597 static int qeth_l3_arp_query_cb(struct qeth_card *card, 1598 struct qeth_reply *reply, unsigned long data) 1599 { 1600 struct qeth_ipa_cmd *cmd; 1601 struct qeth_arp_query_data *qdata; 1602 struct qeth_arp_query_info *qinfo; 1603 int e; 1604 int entrybytes_done; 1605 int stripped_bytes; 1606 __u8 do_strip_entries; 1607 1608 QETH_CARD_TEXT(card, 3, "arpquecb"); 1609 1610 qinfo = (struct qeth_arp_query_info *) reply->param; 1611 cmd = (struct qeth_ipa_cmd *) data; 1612 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); 1613 if (cmd->hdr.return_code) { 1614 QETH_CARD_TEXT(card, 4, "arpcberr"); 1615 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1616 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1617 } 1618 if (cmd->data.setassparms.hdr.return_code) { 1619 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 1620 QETH_CARD_TEXT(card, 4, "setaperr"); 1621 QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); 1622 return qeth_l3_arp_makerc(cmd->hdr.return_code); 1623 } 1624 qdata = &cmd->data.setassparms.data.query_arp; 1625 QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); 1626 1627 do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; 1628 stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; 1629 entrybytes_done = 0; 1630 for (e = 0; e < qdata->no_entries; ++e) { 1631 char *cur_entry; 1632 __u32 esize; 1633 struct qeth_arp_entrytype *etype; 1634 1635 cur_entry = &qdata->data + entrybytes_done; 1636 etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; 1637 if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { 1638 QETH_CARD_TEXT(card, 4, "pmis"); 1639 QETH_CARD_TEXT_(card, 4, "%i", etype->ip); 1640 break; 1641 } 1642 esize = get_arp_entry_size(card, qdata, etype, 1643 do_strip_entries); 1644 QETH_CARD_TEXT_(card, 5, "esz%i", esize); 1645 if (!esize) 1646 break; 1647 1648 if ((qinfo->udata_len - qinfo->udata_offset) < esize) { 1649 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC); 1650 memset(qinfo->udata, 0, 4); 1651 return -ENOSPC; 1652 } 1653 1654 memcpy(qinfo->udata + qinfo->udata_offset, 1655 &qdata->data + entrybytes_done + stripped_bytes, 1656 esize); 1657 entrybytes_done += esize + stripped_bytes; 1658 qinfo->udata_offset += esize; 1659 ++qinfo->no_entries; 1660 } 1661 /* check if all replies received ... */ 1662 if (cmd->data.setassparms.hdr.seq_no < 1663 cmd->data.setassparms.hdr.number_of_replies) 1664 return 1; 1665 QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); 1666 memcpy(qinfo->udata, &qinfo->no_entries, 4); 1667 /* keep STRIP_ENTRIES flag so the user program can distinguish 1668 * stripped entries from normal ones */ 1669 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 1670 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 1671 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 1672 QETH_CARD_TEXT_(card, 4, "rc%i", 0); 1673 return 0; 1674 } 1675 1676 static int qeth_l3_query_arp_cache_info(struct qeth_card *card, 1677 enum qeth_prot_versions prot, 1678 struct qeth_arp_query_info *qinfo) 1679 { 1680 struct qeth_cmd_buffer *iob; 1681 struct qeth_ipa_cmd *cmd; 1682 int rc; 1683 1684 QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); 1685 1686 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1687 IPA_CMD_ASS_ARP_QUERY_INFO, 1688 SETASS_DATA_SIZEOF(query_arp), prot); 1689 if (!iob) 1690 return -ENOMEM; 1691 cmd = __ipa_cmd(iob); 1692 cmd->data.setassparms.data.query_arp.request_bits = 0x000F; 1693 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo); 1694 if (rc) 1695 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", 1696 CARD_DEVID(card), rc); 1697 return rc; 1698 } 1699 1700 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 1701 { 1702 struct qeth_arp_query_info qinfo = {0, }; 1703 int rc; 1704 1705 QETH_CARD_TEXT(card, 3, "arpquery"); 1706 1707 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 1708 IPA_ARP_PROCESSING)) { 1709 QETH_CARD_TEXT(card, 3, "arpqnsup"); 1710 rc = -EOPNOTSUPP; 1711 goto out; 1712 } 1713 /* get size of userspace buffer and mask_bits -> 6 bytes */ 1714 if (copy_from_user(&qinfo, udata, 6)) { 1715 rc = -EFAULT; 1716 goto out; 1717 } 1718 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 1719 if (!qinfo.udata) { 1720 rc = -ENOMEM; 1721 goto out; 1722 } 1723 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 1724 rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); 1725 if (rc) { 1726 if (copy_to_user(udata, qinfo.udata, 4)) 1727 rc = -EFAULT; 1728 goto free_and_out; 1729 } 1730 if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { 1731 /* fails in case of GuestLAN QDIO mode */ 1732 qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); 1733 } 1734 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { 1735 QETH_CARD_TEXT(card, 4, "qactf"); 1736 rc = -EFAULT; 1737 goto free_and_out; 1738 } 1739 QETH_CARD_TEXT(card, 4, "qacts"); 1740 1741 free_and_out: 1742 kfree(qinfo.udata); 1743 out: 1744 return rc; 1745 } 1746 1747 static int qeth_l3_arp_modify_entry(struct qeth_card *card, 1748 struct qeth_arp_cache_entry *entry, 1749 enum qeth_arp_process_subcmds arp_cmd) 1750 { 1751 struct qeth_arp_cache_entry *cmd_entry; 1752 struct qeth_cmd_buffer *iob; 1753 int rc; 1754 1755 if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY) 1756 QETH_CARD_TEXT(card, 3, "arpadd"); 1757 else 1758 QETH_CARD_TEXT(card, 3, "arpdel"); 1759 1760 /* 1761 * currently GuestLAN only supports the ARP assist function 1762 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; 1763 * thus we say EOPNOTSUPP for this ARP function 1764 */ 1765 if (IS_VM_NIC(card)) 1766 return -EOPNOTSUPP; 1767 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1768 return -EOPNOTSUPP; 1769 } 1770 1771 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd, 1772 SETASS_DATA_SIZEOF(arp_entry), 1773 QETH_PROT_IPV4); 1774 if (!iob) 1775 return -ENOMEM; 1776 1777 cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; 1778 ether_addr_copy(cmd_entry->macaddr, entry->macaddr); 1779 memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); 1780 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1781 if (rc) 1782 QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", 1783 arp_cmd, CARD_DEVID(card), rc); 1784 return rc; 1785 } 1786 1787 static int qeth_l3_arp_flush_cache(struct qeth_card *card) 1788 { 1789 struct qeth_cmd_buffer *iob; 1790 int rc; 1791 1792 QETH_CARD_TEXT(card, 3, "arpflush"); 1793 1794 /* 1795 * currently GuestLAN only supports the ARP assist function 1796 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; 1797 * thus we say EOPNOTSUPP for this ARP function 1798 */ 1799 if (IS_VM_NIC(card) || IS_IQD(card)) 1800 return -EOPNOTSUPP; 1801 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1802 return -EOPNOTSUPP; 1803 } 1804 1805 iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 1806 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0, 1807 QETH_PROT_IPV4); 1808 if (!iob) 1809 return -ENOMEM; 1810 1811 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); 1812 if (rc) 1813 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", 1814 CARD_DEVID(card), rc); 1815 return rc; 1816 } 1817 1818 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1819 { 1820 struct qeth_card *card = dev->ml_priv; 1821 struct qeth_arp_cache_entry arp_entry; 1822 enum qeth_arp_process_subcmds arp_cmd; 1823 int rc = 0; 1824 1825 switch (cmd) { 1826 case SIOC_QETH_ARP_SET_NO_ENTRIES: 1827 if (!capable(CAP_NET_ADMIN)) { 1828 rc = -EPERM; 1829 break; 1830 } 1831 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); 1832 break; 1833 case SIOC_QETH_ARP_QUERY_INFO: 1834 if (!capable(CAP_NET_ADMIN)) { 1835 rc = -EPERM; 1836 break; 1837 } 1838 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 1839 break; 1840 case SIOC_QETH_ARP_ADD_ENTRY: 1841 case SIOC_QETH_ARP_REMOVE_ENTRY: 1842 if (!capable(CAP_NET_ADMIN)) 1843 return -EPERM; 1844 if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry))) 1845 return -EFAULT; 1846 1847 arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ? 1848 IPA_CMD_ASS_ARP_ADD_ENTRY : 1849 IPA_CMD_ASS_ARP_REMOVE_ENTRY; 1850 return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd); 1851 case SIOC_QETH_ARP_FLUSH_CACHE: 1852 if (!capable(CAP_NET_ADMIN)) { 1853 rc = -EPERM; 1854 break; 1855 } 1856 rc = qeth_l3_arp_flush_cache(card); 1857 break; 1858 default: 1859 rc = -EOPNOTSUPP; 1860 } 1861 return rc; 1862 } 1863 1864 static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst, 1865 int ipv) 1866 { 1867 struct neighbour *n = NULL; 1868 1869 if (dst) 1870 n = dst_neigh_lookup_skb(dst, skb); 1871 1872 if (n) { 1873 int cast_type = n->type; 1874 1875 neigh_release(n); 1876 if ((cast_type == RTN_BROADCAST) || 1877 (cast_type == RTN_MULTICAST) || 1878 (cast_type == RTN_ANYCAST)) 1879 return cast_type; 1880 return RTN_UNICAST; 1881 } 1882 1883 /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ 1884 switch (ipv) { 1885 case 4: 1886 if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) 1887 return RTN_BROADCAST; 1888 return ipv4_is_multicast(ip_hdr(skb)->daddr) ? 1889 RTN_MULTICAST : RTN_UNICAST; 1890 case 6: 1891 return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? 1892 RTN_MULTICAST : RTN_UNICAST; 1893 default: 1894 /* ... and MAC address */ 1895 return qeth_get_ether_cast_type(skb); 1896 } 1897 } 1898 1899 static int qeth_l3_get_cast_type(struct sk_buff *skb) 1900 { 1901 int ipv = qeth_get_ip_version(skb); 1902 struct dst_entry *dst; 1903 int cast_type; 1904 1905 rcu_read_lock(); 1906 dst = qeth_dst_check_rcu(skb, ipv); 1907 cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv); 1908 rcu_read_unlock(); 1909 1910 return cast_type; 1911 } 1912 1913 static u8 qeth_l3_cast_type_to_flag(int cast_type) 1914 { 1915 if (cast_type == RTN_MULTICAST) 1916 return QETH_CAST_MULTICAST; 1917 if (cast_type == RTN_ANYCAST) 1918 return QETH_CAST_ANYCAST; 1919 if (cast_type == RTN_BROADCAST) 1920 return QETH_CAST_BROADCAST; 1921 return QETH_CAST_UNICAST; 1922 } 1923 1924 static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, 1925 struct qeth_hdr *hdr, struct sk_buff *skb, 1926 int ipv, unsigned int data_len) 1927 { 1928 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 1929 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 1930 struct qeth_card *card = queue->card; 1931 struct dst_entry *dst; 1932 int cast_type; 1933 1934 hdr->hdr.l3.length = data_len; 1935 1936 if (skb_is_gso(skb)) { 1937 hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO; 1938 } else { 1939 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 1940 1941 if (skb->protocol == htons(ETH_P_AF_IUCV)) { 1942 l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 1943 l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80); 1944 memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2], 1945 iucv_trans_hdr(skb)->destUserID, 8); 1946 return; 1947 } 1948 1949 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1950 qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); 1951 /* some HW requires combined L3+L4 csum offload: */ 1952 if (ipv == 4) 1953 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; 1954 } 1955 } 1956 1957 if (ipv == 4 || IS_IQD(card)) { 1958 /* NETIF_F_HW_VLAN_CTAG_TX */ 1959 if (skb_vlan_tag_present(skb)) { 1960 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME; 1961 hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); 1962 } 1963 } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { 1964 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG; 1965 hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); 1966 } 1967 1968 rcu_read_lock(); 1969 dst = qeth_dst_check_rcu(skb, ipv); 1970 1971 if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ) 1972 cast_type = RTN_UNICAST; 1973 else 1974 cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv); 1975 l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type); 1976 1977 if (ipv == 4) { 1978 struct rtable *rt = (struct rtable *) dst; 1979 1980 *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ? 1981 rt_nexthop(rt, ip_hdr(skb)->daddr) : 1982 ip_hdr(skb)->daddr; 1983 } else if (ipv == 6) { 1984 struct rt6_info *rt = (struct rt6_info *) dst; 1985 1986 if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 1987 l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway; 1988 else 1989 l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr; 1990 1991 hdr->hdr.l3.flags |= QETH_HDR_IPV6; 1992 if (!IS_IQD(card)) 1993 hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; 1994 } else { 1995 /* OSA only: */ 1996 l3_hdr->flags |= QETH_HDR_PASSTHRU; 1997 } 1998 rcu_read_unlock(); 1999 } 2000 2001 static void qeth_l3_fixup_headers(struct sk_buff *skb) 2002 { 2003 struct iphdr *iph = ip_hdr(skb); 2004 2005 /* this is safe, IPv6 traffic takes a different path */ 2006 if (skb->ip_summed == CHECKSUM_PARTIAL) 2007 iph->check = 0; 2008 if (skb_is_gso(skb)) { 2009 iph->tot_len = 0; 2010 tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr, 2011 iph->daddr, 0); 2012 } 2013 } 2014 2015 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, 2016 struct qeth_qdio_out_q *queue, int ipv) 2017 { 2018 unsigned int hw_hdr_len; 2019 int rc; 2020 2021 /* re-use the L2 header area for the HW header: */ 2022 hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) : 2023 sizeof(struct qeth_hdr); 2024 rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); 2025 if (rc) 2026 return rc; 2027 skb_pull(skb, ETH_HLEN); 2028 2029 qeth_l3_fixup_headers(skb); 2030 return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); 2031 } 2032 2033 static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, 2034 struct net_device *dev) 2035 { 2036 struct qeth_card *card = dev->ml_priv; 2037 u16 txq = skb_get_queue_mapping(skb); 2038 int ipv = qeth_get_ip_version(skb); 2039 struct qeth_qdio_out_q *queue; 2040 int rc; 2041 2042 if (!skb_is_gso(skb)) 2043 qdisc_skb_cb(skb)->pkt_len = skb->len; 2044 if (IS_IQD(card)) { 2045 queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; 2046 2047 if (card->options.sniffer) 2048 goto tx_drop; 2049 if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || 2050 (card->options.cq == QETH_CQ_ENABLED && 2051 skb->protocol != htons(ETH_P_AF_IUCV))) 2052 goto tx_drop; 2053 } else { 2054 queue = card->qdio.out_qs[txq]; 2055 } 2056 2057 if (!(dev->flags & IFF_BROADCAST) && 2058 qeth_l3_get_cast_type(skb) == RTN_BROADCAST) 2059 goto tx_drop; 2060 2061 if (ipv == 4 || IS_IQD(card)) 2062 rc = qeth_l3_xmit(card, skb, queue, ipv); 2063 else 2064 rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); 2065 2066 if (!rc) 2067 return NETDEV_TX_OK; 2068 2069 tx_drop: 2070 QETH_TXQ_STAT_INC(queue, tx_dropped); 2071 kfree_skb(skb); 2072 return NETDEV_TX_OK; 2073 } 2074 2075 static void qeth_l3_set_rx_mode(struct net_device *dev) 2076 { 2077 struct qeth_card *card = dev->ml_priv; 2078 2079 schedule_work(&card->rx_mode_work); 2080 } 2081 2082 /* 2083 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting 2084 * NOARP on the netdevice is no option because it also turns off neighbor 2085 * solicitation. For IPv4 we install a neighbor_setup function. We don't want 2086 * arp resolution but we want the hard header (packet socket will work 2087 * e.g. tcpdump) 2088 */ 2089 static int qeth_l3_neigh_setup_noarp(struct neighbour *n) 2090 { 2091 n->nud_state = NUD_NOARP; 2092 memcpy(n->ha, "FAKELL", 6); 2093 n->output = n->ops->connected_output; 2094 return 0; 2095 } 2096 2097 static int 2098 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) 2099 { 2100 if (np->tbl->family == AF_INET) 2101 np->neigh_setup = qeth_l3_neigh_setup_noarp; 2102 2103 return 0; 2104 } 2105 2106 static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, 2107 struct net_device *dev, 2108 netdev_features_t features) 2109 { 2110 if (qeth_get_ip_version(skb) != 4) 2111 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 2112 return qeth_features_check(skb, dev, features); 2113 } 2114 2115 static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 2116 struct net_device *sb_dev) 2117 { 2118 return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb), 2119 sb_dev); 2120 } 2121 2122 static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, 2123 struct net_device *sb_dev) 2124 { 2125 struct qeth_card *card = dev->ml_priv; 2126 2127 return qeth_get_priority_queue(card, skb); 2128 } 2129 2130 static const struct net_device_ops qeth_l3_netdev_ops = { 2131 .ndo_open = qeth_open, 2132 .ndo_stop = qeth_stop, 2133 .ndo_get_stats64 = qeth_get_stats64, 2134 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2135 .ndo_select_queue = qeth_l3_iqd_select_queue, 2136 .ndo_validate_addr = eth_validate_addr, 2137 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2138 .ndo_do_ioctl = qeth_do_ioctl, 2139 .ndo_fix_features = qeth_fix_features, 2140 .ndo_set_features = qeth_set_features, 2141 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2142 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2143 .ndo_tx_timeout = qeth_tx_timeout, 2144 }; 2145 2146 static const struct net_device_ops qeth_l3_osa_netdev_ops = { 2147 .ndo_open = qeth_open, 2148 .ndo_stop = qeth_stop, 2149 .ndo_get_stats64 = qeth_get_stats64, 2150 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2151 .ndo_features_check = qeth_l3_osa_features_check, 2152 .ndo_select_queue = qeth_l3_osa_select_queue, 2153 .ndo_validate_addr = eth_validate_addr, 2154 .ndo_set_rx_mode = qeth_l3_set_rx_mode, 2155 .ndo_do_ioctl = qeth_do_ioctl, 2156 .ndo_fix_features = qeth_fix_features, 2157 .ndo_set_features = qeth_set_features, 2158 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 2159 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 2160 .ndo_tx_timeout = qeth_tx_timeout, 2161 .ndo_neigh_setup = qeth_l3_neigh_setup, 2162 }; 2163 2164 static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) 2165 { 2166 unsigned int headroom; 2167 int rc; 2168 2169 if (IS_OSD(card) || IS_OSX(card)) { 2170 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 2171 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 2172 pr_info("qeth_l3: ignoring TR device\n"); 2173 return -ENODEV; 2174 } 2175 2176 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 2177 2178 /*IPv6 address autoconfiguration stuff*/ 2179 qeth_l3_get_unique_id(card); 2180 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 2181 card->dev->dev_id = card->info.unique_id & 0xffff; 2182 2183 if (!IS_VM_NIC(card)) { 2184 card->dev->features |= NETIF_F_SG; 2185 card->dev->hw_features |= NETIF_F_TSO | 2186 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2187 card->dev->vlan_features |= NETIF_F_TSO | 2188 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2189 } 2190 2191 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { 2192 card->dev->hw_features |= NETIF_F_IPV6_CSUM; 2193 card->dev->vlan_features |= NETIF_F_IPV6_CSUM; 2194 } 2195 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { 2196 card->dev->hw_features |= NETIF_F_TSO6; 2197 card->dev->vlan_features |= NETIF_F_TSO6; 2198 } 2199 2200 /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */ 2201 if (card->dev->hw_features & NETIF_F_TSO6) 2202 headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN; 2203 else if (card->dev->hw_features & NETIF_F_TSO) 2204 headroom = sizeof(struct qeth_hdr_tso); 2205 else 2206 headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; 2207 } else if (IS_IQD(card)) { 2208 card->dev->flags |= IFF_NOARP; 2209 card->dev->netdev_ops = &qeth_l3_netdev_ops; 2210 headroom = sizeof(struct qeth_hdr) - ETH_HLEN; 2211 2212 rc = qeth_l3_iqd_read_initial_mac(card); 2213 if (rc) 2214 goto out; 2215 } else 2216 return -ENODEV; 2217 2218 card->dev->needed_headroom = headroom; 2219 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 2220 NETIF_F_HW_VLAN_CTAG_RX | 2221 NETIF_F_HW_VLAN_CTAG_FILTER; 2222 2223 netif_keep_dst(card->dev); 2224 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) 2225 netif_set_gso_max_size(card->dev, 2226 PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); 2227 2228 netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); 2229 rc = register_netdev(card->dev); 2230 if (!rc && carrier_ok) 2231 netif_carrier_on(card->dev); 2232 2233 out: 2234 if (rc) 2235 card->dev->netdev_ops = NULL; 2236 return rc; 2237 } 2238 2239 static const struct device_type qeth_l3_devtype = { 2240 .name = "qeth_layer3", 2241 .groups = qeth_l3_attr_groups, 2242 }; 2243 2244 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 2245 { 2246 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2247 int rc; 2248 2249 hash_init(card->ip_htable); 2250 mutex_init(&card->ip_lock); 2251 card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0, 2252 dev_name(&gdev->dev)); 2253 if (!card->cmd_wq) 2254 return -ENOMEM; 2255 2256 if (gdev->dev.type == &qeth_generic_devtype) { 2257 rc = qeth_l3_create_device_attributes(&gdev->dev); 2258 if (rc) { 2259 destroy_workqueue(card->cmd_wq); 2260 return rc; 2261 } 2262 } 2263 2264 hash_init(card->ip_mc_htable); 2265 INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); 2266 return 0; 2267 } 2268 2269 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) 2270 { 2271 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2272 2273 if (cgdev->dev.type == &qeth_generic_devtype) 2274 qeth_l3_remove_device_attributes(&cgdev->dev); 2275 2276 qeth_set_allowed_threads(card, 0, 1); 2277 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2278 2279 if (cgdev->state == CCWGROUP_ONLINE) 2280 qeth_l3_set_offline(cgdev); 2281 2282 cancel_work_sync(&card->close_dev_work); 2283 if (qeth_netdev_is_registered(card->dev)) 2284 unregister_netdev(card->dev); 2285 2286 flush_workqueue(card->cmd_wq); 2287 destroy_workqueue(card->cmd_wq); 2288 qeth_l3_clear_ip_htable(card, 0); 2289 qeth_l3_clear_ipato_list(card); 2290 } 2291 2292 static int qeth_l3_set_online(struct ccwgroup_device *gdev) 2293 { 2294 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2295 struct net_device *dev = card->dev; 2296 int rc = 0; 2297 bool carrier_ok; 2298 2299 mutex_lock(&card->discipline_mutex); 2300 mutex_lock(&card->conf_mutex); 2301 QETH_CARD_TEXT(card, 2, "setonlin"); 2302 2303 rc = qeth_core_hardsetup_card(card, &carrier_ok); 2304 if (rc) { 2305 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 2306 rc = -ENODEV; 2307 goto out_remove; 2308 } 2309 2310 card->state = CARD_STATE_HARDSETUP; 2311 qeth_print_status_message(card); 2312 2313 /* softsetup */ 2314 QETH_CARD_TEXT(card, 2, "softsetp"); 2315 2316 rc = qeth_l3_setadapter_parms(card); 2317 if (rc) 2318 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 2319 if (!card->options.sniffer) { 2320 rc = qeth_l3_start_ipassists(card); 2321 if (rc) { 2322 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2323 goto out_remove; 2324 } 2325 rc = qeth_l3_setrouting_v4(card); 2326 if (rc) 2327 QETH_CARD_TEXT_(card, 2, "4err%04x", rc); 2328 rc = qeth_l3_setrouting_v6(card); 2329 if (rc) 2330 QETH_CARD_TEXT_(card, 2, "5err%04x", rc); 2331 } 2332 2333 rc = qeth_init_qdio_queues(card); 2334 if (rc) { 2335 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2336 rc = -ENODEV; 2337 goto out_remove; 2338 } 2339 card->state = CARD_STATE_SOFTSETUP; 2340 2341 qeth_set_allowed_threads(card, 0xffffffff, 0); 2342 qeth_l3_recover_ip(card); 2343 2344 if (!qeth_netdev_is_registered(dev)) { 2345 rc = qeth_l3_setup_netdev(card, carrier_ok); 2346 if (rc) 2347 goto out_remove; 2348 } else { 2349 rtnl_lock(); 2350 if (carrier_ok) 2351 netif_carrier_on(dev); 2352 else 2353 netif_carrier_off(dev); 2354 2355 netif_device_attach(dev); 2356 qeth_enable_hw_features(dev); 2357 2358 if (card->info.open_when_online) { 2359 card->info.open_when_online = 0; 2360 dev_open(dev, NULL); 2361 } 2362 rtnl_unlock(); 2363 } 2364 qeth_trace_features(card); 2365 /* let user_space know that device is online */ 2366 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 2367 mutex_unlock(&card->conf_mutex); 2368 mutex_unlock(&card->discipline_mutex); 2369 return 0; 2370 out_remove: 2371 qeth_l3_stop_card(card); 2372 ccw_device_set_offline(CARD_DDEV(card)); 2373 ccw_device_set_offline(CARD_WDEV(card)); 2374 ccw_device_set_offline(CARD_RDEV(card)); 2375 qdio_free(CARD_DDEV(card)); 2376 2377 mutex_unlock(&card->conf_mutex); 2378 mutex_unlock(&card->discipline_mutex); 2379 return rc; 2380 } 2381 2382 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, 2383 int recovery_mode) 2384 { 2385 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 2386 int rc = 0, rc2 = 0, rc3 = 0; 2387 2388 mutex_lock(&card->discipline_mutex); 2389 mutex_lock(&card->conf_mutex); 2390 QETH_CARD_TEXT(card, 3, "setoffl"); 2391 2392 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { 2393 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 2394 card->info.hwtrap = 1; 2395 } 2396 2397 rtnl_lock(); 2398 card->info.open_when_online = card->dev->flags & IFF_UP; 2399 dev_close(card->dev); 2400 netif_device_detach(card->dev); 2401 netif_carrier_off(card->dev); 2402 rtnl_unlock(); 2403 2404 qeth_l3_stop_card(card); 2405 if (card->options.cq == QETH_CQ_ENABLED) { 2406 rtnl_lock(); 2407 call_netdevice_notifiers(NETDEV_REBOOT, card->dev); 2408 rtnl_unlock(); 2409 } 2410 rc = ccw_device_set_offline(CARD_DDEV(card)); 2411 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 2412 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 2413 if (!rc) 2414 rc = (rc2) ? rc2 : rc3; 2415 if (rc) 2416 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2417 qdio_free(CARD_DDEV(card)); 2418 2419 /* let user_space know that device is offline */ 2420 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); 2421 mutex_unlock(&card->conf_mutex); 2422 mutex_unlock(&card->discipline_mutex); 2423 return 0; 2424 } 2425 2426 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) 2427 { 2428 return __qeth_l3_set_offline(cgdev, 0); 2429 } 2430 2431 static int qeth_l3_recover(void *ptr) 2432 { 2433 struct qeth_card *card; 2434 int rc = 0; 2435 2436 card = (struct qeth_card *) ptr; 2437 QETH_CARD_TEXT(card, 2, "recover1"); 2438 QETH_CARD_HEX(card, 2, &card, sizeof(void *)); 2439 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 2440 return 0; 2441 QETH_CARD_TEXT(card, 2, "recover2"); 2442 dev_warn(&card->gdev->dev, 2443 "A recovery process has been started for the device\n"); 2444 __qeth_l3_set_offline(card->gdev, 1); 2445 rc = qeth_l3_set_online(card->gdev); 2446 if (!rc) 2447 dev_info(&card->gdev->dev, 2448 "Device successfully recovered!\n"); 2449 else { 2450 ccwgroup_set_offline(card->gdev); 2451 dev_warn(&card->gdev->dev, "The qeth device driver " 2452 "failed to recover an error on the device\n"); 2453 } 2454 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 2455 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 2456 return 0; 2457 } 2458 2459 /* Returns zero if the command is successfully "consumed" */ 2460 static int qeth_l3_control_event(struct qeth_card *card, 2461 struct qeth_ipa_cmd *cmd) 2462 { 2463 return 1; 2464 } 2465 2466 struct qeth_discipline qeth_l3_discipline = { 2467 .devtype = &qeth_l3_devtype, 2468 .process_rx_buffer = qeth_l3_process_inbound_buffer, 2469 .recover = qeth_l3_recover, 2470 .setup = qeth_l3_probe_device, 2471 .remove = qeth_l3_remove_device, 2472 .set_online = qeth_l3_set_online, 2473 .set_offline = qeth_l3_set_offline, 2474 .do_ioctl = qeth_l3_do_ioctl, 2475 .control_event_handler = qeth_l3_control_event, 2476 }; 2477 EXPORT_SYMBOL_GPL(qeth_l3_discipline); 2478 2479 static int qeth_l3_handle_ip_event(struct qeth_card *card, 2480 struct qeth_ipaddr *addr, 2481 unsigned long event) 2482 { 2483 switch (event) { 2484 case NETDEV_UP: 2485 qeth_l3_modify_ip(card, addr, true); 2486 return NOTIFY_OK; 2487 case NETDEV_DOWN: 2488 qeth_l3_modify_ip(card, addr, false); 2489 return NOTIFY_OK; 2490 default: 2491 return NOTIFY_DONE; 2492 } 2493 } 2494 2495 struct qeth_l3_ip_event_work { 2496 struct work_struct work; 2497 struct qeth_card *card; 2498 struct qeth_ipaddr addr; 2499 }; 2500 2501 #define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) 2502 2503 static void qeth_l3_add_ip_worker(struct work_struct *work) 2504 { 2505 struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); 2506 2507 qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true); 2508 kfree(work); 2509 } 2510 2511 static void qeth_l3_delete_ip_worker(struct work_struct *work) 2512 { 2513 struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); 2514 2515 qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false); 2516 kfree(work); 2517 } 2518 2519 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) 2520 { 2521 if (is_vlan_dev(dev)) 2522 dev = vlan_dev_real_dev(dev); 2523 if (dev->netdev_ops == &qeth_l3_osa_netdev_ops || 2524 dev->netdev_ops == &qeth_l3_netdev_ops) 2525 return (struct qeth_card *) dev->ml_priv; 2526 return NULL; 2527 } 2528 2529 static int qeth_l3_ip_event(struct notifier_block *this, 2530 unsigned long event, void *ptr) 2531 { 2532 2533 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 2534 struct net_device *dev = ifa->ifa_dev->dev; 2535 struct qeth_ipaddr addr; 2536 struct qeth_card *card; 2537 2538 if (dev_net(dev) != &init_net) 2539 return NOTIFY_DONE; 2540 2541 card = qeth_l3_get_card_from_dev(dev); 2542 if (!card) 2543 return NOTIFY_DONE; 2544 QETH_CARD_TEXT(card, 3, "ipevent"); 2545 2546 qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); 2547 addr.u.a4.addr = ifa->ifa_address; 2548 addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask); 2549 2550 return qeth_l3_handle_ip_event(card, &addr, event); 2551 } 2552 2553 static struct notifier_block qeth_l3_ip_notifier = { 2554 qeth_l3_ip_event, 2555 NULL, 2556 }; 2557 2558 static int qeth_l3_ip6_event(struct notifier_block *this, 2559 unsigned long event, void *ptr) 2560 { 2561 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 2562 struct net_device *dev = ifa->idev->dev; 2563 struct qeth_l3_ip_event_work *ip_work; 2564 struct qeth_card *card; 2565 2566 if (event != NETDEV_UP && event != NETDEV_DOWN) 2567 return NOTIFY_DONE; 2568 2569 card = qeth_l3_get_card_from_dev(dev); 2570 if (!card) 2571 return NOTIFY_DONE; 2572 QETH_CARD_TEXT(card, 3, "ip6event"); 2573 if (!qeth_is_supported(card, IPA_IPV6)) 2574 return NOTIFY_DONE; 2575 2576 ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC); 2577 if (!ip_work) 2578 return NOTIFY_DONE; 2579 2580 if (event == NETDEV_UP) 2581 INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); 2582 else 2583 INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); 2584 2585 ip_work->card = card; 2586 qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL, 2587 QETH_PROT_IPV6); 2588 ip_work->addr.u.a6.addr = ifa->addr; 2589 ip_work->addr.u.a6.pfxlen = ifa->prefix_len; 2590 2591 queue_work(card->cmd_wq, &ip_work->work); 2592 return NOTIFY_OK; 2593 } 2594 2595 static struct notifier_block qeth_l3_ip6_notifier = { 2596 qeth_l3_ip6_event, 2597 NULL, 2598 }; 2599 2600 static int qeth_l3_register_notifiers(void) 2601 { 2602 int rc; 2603 2604 QETH_DBF_TEXT(SETUP, 5, "regnotif"); 2605 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); 2606 if (rc) 2607 return rc; 2608 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); 2609 if (rc) { 2610 unregister_inetaddr_notifier(&qeth_l3_ip_notifier); 2611 return rc; 2612 } 2613 return 0; 2614 } 2615 2616 static void qeth_l3_unregister_notifiers(void) 2617 { 2618 QETH_DBF_TEXT(SETUP, 5, "unregnot"); 2619 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 2620 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 2621 } 2622 2623 static int __init qeth_l3_init(void) 2624 { 2625 pr_info("register layer 3 discipline\n"); 2626 return qeth_l3_register_notifiers(); 2627 } 2628 2629 static void __exit qeth_l3_exit(void) 2630 { 2631 qeth_l3_unregister_notifiers(); 2632 pr_info("unregister layer 3 discipline\n"); 2633 } 2634 2635 module_init(qeth_l3_init); 2636 module_exit(qeth_l3_exit); 2637 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 2638 MODULE_DESCRIPTION("qeth layer 3 discipline"); 2639 MODULE_LICENSE("GPL"); 2640