1 /* 2 * drivers/s390/net/qeth_l3_main.c 3 * 4 * Copyright IBM Corp. 2007, 2009 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>, 7 * Thomas Spatzier <tspat@de.ibm.com>, 8 * Frank Blaschka <frank.blaschka@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "qeth" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/etherdevice.h> 20 #include <linux/mii.h> 21 #include <linux/ip.h> 22 #include <linux/ipv6.h> 23 #include <linux/inetdevice.h> 24 #include <linux/igmp.h> 25 26 #include <net/ip.h> 27 #include <net/arp.h> 28 #include <net/ip6_checksum.h> 29 30 #include "qeth_l3.h" 31 32 static int qeth_l3_set_offline(struct ccwgroup_device *); 33 static int qeth_l3_recover(void *); 34 static int qeth_l3_stop(struct net_device *); 35 static void qeth_l3_set_multicast_list(struct net_device *); 36 static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *); 37 static int qeth_l3_register_addr_entry(struct qeth_card *, 38 struct qeth_ipaddr *); 39 static int qeth_l3_deregister_addr_entry(struct qeth_card *, 40 struct qeth_ipaddr *); 41 static int __qeth_l3_set_online(struct ccwgroup_device *, int); 42 static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 43 44 45 static int qeth_l3_isxdigit(char *buf) 46 { 47 while (*buf) { 48 if (!isxdigit(*buf++)) 49 return 0; 50 } 51 return 1; 52 } 53 54 void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) 55 { 56 sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); 57 } 58 59 int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr) 60 { 61 int count = 0, rc = 0; 62 int in[4]; 63 char c; 64 65 rc = sscanf(buf, "%u.%u.%u.%u%c", 66 &in[0], &in[1], &in[2], &in[3], &c); 67 if (rc != 4 && (rc != 5 || c != '\n')) 68 return -EINVAL; 69 for (count = 0; count < 4; count++) { 70 if (in[count] > 255) 71 return -EINVAL; 72 addr[count] = in[count]; 73 } 74 return 0; 75 } 76 77 void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 78 { 79 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 80 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x", 81 addr[0], addr[1], addr[2], addr[3], 82 addr[4], addr[5], addr[6], addr[7], 83 addr[8], addr[9], addr[10], addr[11], 84 addr[12], addr[13], addr[14], addr[15]); 85 } 86 87 int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) 88 { 89 const char *end, *end_tmp, *start; 90 __u16 *in; 91 char num[5]; 92 int num2, cnt, out, found, save_cnt; 93 unsigned short in_tmp[8] = {0, }; 94 95 cnt = out = found = save_cnt = num2 = 0; 96 end = start = buf; 97 in = (__u16 *) addr; 98 memset(in, 0, 16); 99 while (*end) { 100 end = strchr(start, ':'); 101 if (end == NULL) { 102 end = buf + strlen(buf); 103 end_tmp = strchr(start, '\n'); 104 if (end_tmp != NULL) 105 end = end_tmp; 106 out = 1; 107 } 108 if ((end - start)) { 109 memset(num, 0, 5); 110 if ((end - start) > 4) 111 return -EINVAL; 112 memcpy(num, start, end - start); 113 if (!qeth_l3_isxdigit(num)) 114 return -EINVAL; 115 sscanf(start, "%x", &num2); 116 if (found) 117 in_tmp[save_cnt++] = num2; 118 else 119 in[cnt++] = num2; 120 if (out) 121 break; 122 } else { 123 if (found) 124 return -EINVAL; 125 found = 1; 126 } 127 start = ++end; 128 } 129 if (cnt + save_cnt > 8) 130 return -EINVAL; 131 cnt = 7; 132 while (save_cnt) 133 in[cnt--] = in_tmp[--save_cnt]; 134 return 0; 135 } 136 137 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, 138 char *buf) 139 { 140 if (proto == QETH_PROT_IPV4) 141 qeth_l3_ipaddr4_to_string(addr, buf); 142 else if (proto == QETH_PROT_IPV6) 143 qeth_l3_ipaddr6_to_string(addr, buf); 144 } 145 146 int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto, 147 __u8 *addr) 148 { 149 if (proto == QETH_PROT_IPV4) 150 return qeth_l3_string_to_ipaddr4(buf, addr); 151 else if (proto == QETH_PROT_IPV6) 152 return qeth_l3_string_to_ipaddr6(buf, addr); 153 else 154 return -EINVAL; 155 } 156 157 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 158 { 159 int i, j; 160 u8 octet; 161 162 for (i = 0; i < len; ++i) { 163 octet = addr[i]; 164 for (j = 7; j >= 0; --j) { 165 bits[i*8 + j] = octet & 1; 166 octet >>= 1; 167 } 168 } 169 } 170 171 static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 172 struct qeth_ipaddr *addr) 173 { 174 struct qeth_ipato_entry *ipatoe; 175 u8 addr_bits[128] = {0, }; 176 u8 ipatoe_bits[128] = {0, }; 177 int rc = 0; 178 179 if (!card->ipato.enabled) 180 return 0; 181 182 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 183 (addr->proto == QETH_PROT_IPV4)? 4:16); 184 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 185 if (addr->proto != ipatoe->proto) 186 continue; 187 qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, 188 (ipatoe->proto == QETH_PROT_IPV4) ? 189 4 : 16); 190 if (addr->proto == QETH_PROT_IPV4) 191 rc = !memcmp(addr_bits, ipatoe_bits, 192 min(32, ipatoe->mask_bits)); 193 else 194 rc = !memcmp(addr_bits, ipatoe_bits, 195 min(128, ipatoe->mask_bits)); 196 if (rc) 197 break; 198 } 199 /* invert? */ 200 if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) 201 rc = !rc; 202 else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) 203 rc = !rc; 204 205 return rc; 206 } 207 208 /* 209 * Add IP to be added to todo list. If there is already an "add todo" 210 * in this list we just incremenent the reference count. 211 * Returns 0 if we just incremented reference count. 212 */ 213 static int __qeth_l3_insert_ip_todo(struct qeth_card *card, 214 struct qeth_ipaddr *addr, int add) 215 { 216 struct qeth_ipaddr *tmp, *t; 217 int found = 0; 218 219 list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { 220 if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && 221 (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) 222 return 0; 223 if ((tmp->proto == QETH_PROT_IPV4) && 224 (addr->proto == QETH_PROT_IPV4) && 225 (tmp->type == addr->type) && 226 (tmp->is_multicast == addr->is_multicast) && 227 (tmp->u.a4.addr == addr->u.a4.addr) && 228 (tmp->u.a4.mask == addr->u.a4.mask)) { 229 found = 1; 230 break; 231 } 232 if ((tmp->proto == QETH_PROT_IPV6) && 233 (addr->proto == QETH_PROT_IPV6) && 234 (tmp->type == addr->type) && 235 (tmp->is_multicast == addr->is_multicast) && 236 (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) && 237 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, 238 sizeof(struct in6_addr)) == 0)) { 239 found = 1; 240 break; 241 } 242 } 243 if (found) { 244 if (addr->users != 0) 245 tmp->users += addr->users; 246 else 247 tmp->users += add ? 1 : -1; 248 if (tmp->users == 0) { 249 list_del(&tmp->entry); 250 kfree(tmp); 251 } 252 return 0; 253 } else { 254 if (addr->type == QETH_IP_TYPE_DEL_ALL_MC) 255 list_add(&addr->entry, card->ip_tbd_list); 256 else { 257 if (addr->users == 0) 258 addr->users += add ? 1 : -1; 259 if (add && (addr->type == QETH_IP_TYPE_NORMAL) && 260 qeth_l3_is_addr_covered_by_ipato(card, addr)) { 261 QETH_DBF_TEXT(TRACE, 2, "tkovaddr"); 262 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 263 } 264 list_add_tail(&addr->entry, card->ip_tbd_list); 265 } 266 return 1; 267 } 268 } 269 270 static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 271 { 272 unsigned long flags; 273 int rc = 0; 274 275 QETH_DBF_TEXT(TRACE, 4, "delip"); 276 277 if (addr->proto == QETH_PROT_IPV4) 278 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); 279 else { 280 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); 281 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); 282 } 283 spin_lock_irqsave(&card->ip_lock, flags); 284 rc = __qeth_l3_insert_ip_todo(card, addr, 0); 285 spin_unlock_irqrestore(&card->ip_lock, flags); 286 return rc; 287 } 288 289 static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 290 { 291 unsigned long flags; 292 int rc = 0; 293 294 QETH_DBF_TEXT(TRACE, 4, "addip"); 295 if (addr->proto == QETH_PROT_IPV4) 296 QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4); 297 else { 298 QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8); 299 QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8); 300 } 301 spin_lock_irqsave(&card->ip_lock, flags); 302 rc = __qeth_l3_insert_ip_todo(card, addr, 1); 303 spin_unlock_irqrestore(&card->ip_lock, flags); 304 return rc; 305 } 306 307 308 static struct qeth_ipaddr *qeth_l3_get_addr_buffer( 309 enum qeth_prot_versions prot) 310 { 311 struct qeth_ipaddr *addr; 312 313 addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); 314 if (addr == NULL) { 315 return NULL; 316 } 317 addr->type = QETH_IP_TYPE_NORMAL; 318 addr->proto = prot; 319 return addr; 320 } 321 322 static void qeth_l3_delete_mc_addresses(struct qeth_card *card) 323 { 324 struct qeth_ipaddr *iptodo; 325 unsigned long flags; 326 327 QETH_DBF_TEXT(TRACE, 4, "delmc"); 328 iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 329 if (!iptodo) { 330 QETH_DBF_TEXT(TRACE, 2, "dmcnomem"); 331 return; 332 } 333 iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; 334 spin_lock_irqsave(&card->ip_lock, flags); 335 if (!__qeth_l3_insert_ip_todo(card, iptodo, 0)) 336 kfree(iptodo); 337 spin_unlock_irqrestore(&card->ip_lock, flags); 338 } 339 340 /* 341 * Add/remove address to/from card's ip list, i.e. try to add or remove 342 * reference to/from an IP address that is already registered on the card. 343 * Returns: 344 * 0 address was on card and its reference count has been adjusted, 345 * but is still > 0, so nothing has to be done 346 * also returns 0 if card was not on card and the todo was to delete 347 * the address -> there is also nothing to be done 348 * 1 address was not on card and the todo is to add it to the card's ip 349 * list 350 * -1 address was on card and its reference count has been decremented 351 * to <= 0 by the todo -> address must be removed from card 352 */ 353 static int __qeth_l3_ref_ip_on_card(struct qeth_card *card, 354 struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr) 355 { 356 struct qeth_ipaddr *addr; 357 int found = 0; 358 359 list_for_each_entry(addr, &card->ip_list, entry) { 360 if ((addr->proto == QETH_PROT_IPV4) && 361 (todo->proto == QETH_PROT_IPV4) && 362 (addr->type == todo->type) && 363 (addr->u.a4.addr == todo->u.a4.addr) && 364 (addr->u.a4.mask == todo->u.a4.mask)) { 365 found = 1; 366 break; 367 } 368 if ((addr->proto == QETH_PROT_IPV6) && 369 (todo->proto == QETH_PROT_IPV6) && 370 (addr->type == todo->type) && 371 (addr->u.a6.pfxlen == todo->u.a6.pfxlen) && 372 (memcmp(&addr->u.a6.addr, &todo->u.a6.addr, 373 sizeof(struct in6_addr)) == 0)) { 374 found = 1; 375 break; 376 } 377 } 378 if (found) { 379 addr->users += todo->users; 380 if (addr->users <= 0) { 381 *__addr = addr; 382 return -1; 383 } else { 384 /* for VIPA and RXIP limit refcount to 1 */ 385 if (addr->type != QETH_IP_TYPE_NORMAL) 386 addr->users = 1; 387 return 0; 388 } 389 } 390 if (todo->users > 0) { 391 /* for VIPA and RXIP limit refcount to 1 */ 392 if (todo->type != QETH_IP_TYPE_NORMAL) 393 todo->users = 1; 394 return 1; 395 } else 396 return 0; 397 } 398 399 static void __qeth_l3_delete_all_mc(struct qeth_card *card, 400 unsigned long *flags) 401 { 402 struct list_head fail_list; 403 struct qeth_ipaddr *addr, *tmp; 404 int rc; 405 406 INIT_LIST_HEAD(&fail_list); 407 again: 408 list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { 409 if (addr->is_multicast) { 410 list_del(&addr->entry); 411 spin_unlock_irqrestore(&card->ip_lock, *flags); 412 rc = qeth_l3_deregister_addr_entry(card, addr); 413 spin_lock_irqsave(&card->ip_lock, *flags); 414 if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) 415 kfree(addr); 416 else 417 list_add_tail(&addr->entry, &fail_list); 418 goto again; 419 } 420 } 421 list_splice(&fail_list, &card->ip_list); 422 } 423 424 static void qeth_l3_set_ip_addr_list(struct qeth_card *card) 425 { 426 struct list_head *tbd_list; 427 struct qeth_ipaddr *todo, *addr; 428 unsigned long flags; 429 int rc; 430 431 QETH_DBF_TEXT(TRACE, 2, "sdiplist"); 432 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 433 434 spin_lock_irqsave(&card->ip_lock, flags); 435 tbd_list = card->ip_tbd_list; 436 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); 437 if (!card->ip_tbd_list) { 438 QETH_DBF_TEXT(TRACE, 0, "silnomem"); 439 card->ip_tbd_list = tbd_list; 440 spin_unlock_irqrestore(&card->ip_lock, flags); 441 return; 442 } else 443 INIT_LIST_HEAD(card->ip_tbd_list); 444 445 while (!list_empty(tbd_list)) { 446 todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry); 447 list_del(&todo->entry); 448 if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) { 449 __qeth_l3_delete_all_mc(card, &flags); 450 kfree(todo); 451 continue; 452 } 453 rc = __qeth_l3_ref_ip_on_card(card, todo, &addr); 454 if (rc == 0) { 455 /* nothing to be done; only adjusted refcount */ 456 kfree(todo); 457 } else if (rc == 1) { 458 /* new entry to be added to on-card list */ 459 spin_unlock_irqrestore(&card->ip_lock, flags); 460 rc = qeth_l3_register_addr_entry(card, todo); 461 spin_lock_irqsave(&card->ip_lock, flags); 462 if (!rc || (rc == IPA_RC_LAN_OFFLINE)) 463 list_add_tail(&todo->entry, &card->ip_list); 464 else 465 kfree(todo); 466 } else if (rc == -1) { 467 /* on-card entry to be removed */ 468 list_del_init(&addr->entry); 469 spin_unlock_irqrestore(&card->ip_lock, flags); 470 rc = qeth_l3_deregister_addr_entry(card, addr); 471 spin_lock_irqsave(&card->ip_lock, flags); 472 if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED)) 473 kfree(addr); 474 else 475 list_add_tail(&addr->entry, &card->ip_list); 476 kfree(todo); 477 } 478 } 479 spin_unlock_irqrestore(&card->ip_lock, flags); 480 kfree(tbd_list); 481 } 482 483 static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, 484 int recover) 485 { 486 struct qeth_ipaddr *addr, *tmp; 487 unsigned long flags; 488 489 QETH_DBF_TEXT(TRACE, 4, "clearip"); 490 spin_lock_irqsave(&card->ip_lock, flags); 491 /* clear todo list */ 492 list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) { 493 list_del(&addr->entry); 494 kfree(addr); 495 } 496 497 while (!list_empty(&card->ip_list)) { 498 addr = list_entry(card->ip_list.next, 499 struct qeth_ipaddr, entry); 500 list_del_init(&addr->entry); 501 if (clean) { 502 spin_unlock_irqrestore(&card->ip_lock, flags); 503 qeth_l3_deregister_addr_entry(card, addr); 504 spin_lock_irqsave(&card->ip_lock, flags); 505 } 506 if (!recover || addr->is_multicast) { 507 kfree(addr); 508 continue; 509 } 510 list_add_tail(&addr->entry, card->ip_tbd_list); 511 } 512 spin_unlock_irqrestore(&card->ip_lock, flags); 513 } 514 515 static int qeth_l3_address_exists_in_list(struct list_head *list, 516 struct qeth_ipaddr *addr, int same_type) 517 { 518 struct qeth_ipaddr *tmp; 519 520 list_for_each_entry(tmp, list, entry) { 521 if ((tmp->proto == QETH_PROT_IPV4) && 522 (addr->proto == QETH_PROT_IPV4) && 523 ((same_type && (tmp->type == addr->type)) || 524 (!same_type && (tmp->type != addr->type))) && 525 (tmp->u.a4.addr == addr->u.a4.addr)) 526 return 1; 527 528 if ((tmp->proto == QETH_PROT_IPV6) && 529 (addr->proto == QETH_PROT_IPV6) && 530 ((same_type && (tmp->type == addr->type)) || 531 (!same_type && (tmp->type != addr->type))) && 532 (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, 533 sizeof(struct in6_addr)) == 0)) 534 return 1; 535 536 } 537 return 0; 538 } 539 540 static int qeth_l3_send_setdelmc(struct qeth_card *card, 541 struct qeth_ipaddr *addr, int ipacmd) 542 { 543 int rc; 544 struct qeth_cmd_buffer *iob; 545 struct qeth_ipa_cmd *cmd; 546 547 QETH_DBF_TEXT(TRACE, 4, "setdelmc"); 548 549 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 550 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 551 memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); 552 if (addr->proto == QETH_PROT_IPV6) 553 memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, 554 sizeof(struct in6_addr)); 555 else 556 memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); 557 558 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 559 560 return rc; 561 } 562 563 static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) 564 { 565 int i, j; 566 for (i = 0; i < 16; i++) { 567 j = (len) - (i * 8); 568 if (j >= 8) 569 netmask[i] = 0xff; 570 else if (j > 0) 571 netmask[i] = (u8)(0xFF00 >> j); 572 else 573 netmask[i] = 0; 574 } 575 } 576 577 static int qeth_l3_send_setdelip(struct qeth_card *card, 578 struct qeth_ipaddr *addr, int ipacmd, unsigned int flags) 579 { 580 int rc; 581 struct qeth_cmd_buffer *iob; 582 struct qeth_ipa_cmd *cmd; 583 __u8 netmask[16]; 584 585 QETH_DBF_TEXT(TRACE, 4, "setdelip"); 586 QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags); 587 588 iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); 589 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 590 if (addr->proto == QETH_PROT_IPV6) { 591 memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, 592 sizeof(struct in6_addr)); 593 qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); 594 memcpy(cmd->data.setdelip6.mask, netmask, 595 sizeof(struct in6_addr)); 596 cmd->data.setdelip6.flags = flags; 597 } else { 598 memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); 599 memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); 600 cmd->data.setdelip4.flags = flags; 601 } 602 603 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 604 605 return rc; 606 } 607 608 static int qeth_l3_send_setrouting(struct qeth_card *card, 609 enum qeth_routing_types type, enum qeth_prot_versions prot) 610 { 611 int rc; 612 struct qeth_ipa_cmd *cmd; 613 struct qeth_cmd_buffer *iob; 614 615 QETH_DBF_TEXT(TRACE, 4, "setroutg"); 616 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); 617 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 618 cmd->data.setrtg.type = (type); 619 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 620 621 return rc; 622 } 623 624 static void qeth_l3_correct_routing_type(struct qeth_card *card, 625 enum qeth_routing_types *type, enum qeth_prot_versions prot) 626 { 627 if (card->info.type == QETH_CARD_TYPE_IQD) { 628 switch (*type) { 629 case NO_ROUTER: 630 case PRIMARY_CONNECTOR: 631 case SECONDARY_CONNECTOR: 632 case MULTICAST_ROUTER: 633 return; 634 default: 635 goto out_inval; 636 } 637 } else { 638 switch (*type) { 639 case NO_ROUTER: 640 case PRIMARY_ROUTER: 641 case SECONDARY_ROUTER: 642 return; 643 case MULTICAST_ROUTER: 644 if (qeth_is_ipafunc_supported(card, prot, 645 IPA_OSA_MC_ROUTER)) 646 return; 647 default: 648 goto out_inval; 649 } 650 } 651 out_inval: 652 *type = NO_ROUTER; 653 } 654 655 int qeth_l3_setrouting_v4(struct qeth_card *card) 656 { 657 int rc; 658 659 QETH_DBF_TEXT(TRACE, 3, "setrtg4"); 660 661 qeth_l3_correct_routing_type(card, &card->options.route4.type, 662 QETH_PROT_IPV4); 663 664 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 665 QETH_PROT_IPV4); 666 if (rc) { 667 card->options.route4.type = NO_ROUTER; 668 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 669 " on %s. Type set to 'no router'.\n", rc, 670 QETH_CARD_IFNAME(card)); 671 } 672 return rc; 673 } 674 675 int qeth_l3_setrouting_v6(struct qeth_card *card) 676 { 677 int rc = 0; 678 679 QETH_DBF_TEXT(TRACE, 3, "setrtg6"); 680 #ifdef CONFIG_QETH_IPV6 681 682 if (!qeth_is_supported(card, IPA_IPV6)) 683 return 0; 684 qeth_l3_correct_routing_type(card, &card->options.route6.type, 685 QETH_PROT_IPV6); 686 687 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 688 QETH_PROT_IPV6); 689 if (rc) { 690 card->options.route6.type = NO_ROUTER; 691 QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" 692 " on %s. Type set to 'no router'.\n", rc, 693 QETH_CARD_IFNAME(card)); 694 } 695 #endif 696 return rc; 697 } 698 699 /* 700 * IP address takeover related functions 701 */ 702 static void qeth_l3_clear_ipato_list(struct qeth_card *card) 703 { 704 705 struct qeth_ipato_entry *ipatoe, *tmp; 706 unsigned long flags; 707 708 spin_lock_irqsave(&card->ip_lock, flags); 709 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 710 list_del(&ipatoe->entry); 711 kfree(ipatoe); 712 } 713 spin_unlock_irqrestore(&card->ip_lock, flags); 714 } 715 716 int qeth_l3_add_ipato_entry(struct qeth_card *card, 717 struct qeth_ipato_entry *new) 718 { 719 struct qeth_ipato_entry *ipatoe; 720 unsigned long flags; 721 int rc = 0; 722 723 QETH_DBF_TEXT(TRACE, 2, "addipato"); 724 spin_lock_irqsave(&card->ip_lock, flags); 725 list_for_each_entry(ipatoe, &card->ipato.entries, entry) { 726 if (ipatoe->proto != new->proto) 727 continue; 728 if (!memcmp(ipatoe->addr, new->addr, 729 (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && 730 (ipatoe->mask_bits == new->mask_bits)) { 731 rc = -EEXIST; 732 break; 733 } 734 } 735 if (!rc) 736 list_add_tail(&new->entry, &card->ipato.entries); 737 738 spin_unlock_irqrestore(&card->ip_lock, flags); 739 return rc; 740 } 741 742 void qeth_l3_del_ipato_entry(struct qeth_card *card, 743 enum qeth_prot_versions proto, u8 *addr, int mask_bits) 744 { 745 struct qeth_ipato_entry *ipatoe, *tmp; 746 unsigned long flags; 747 748 QETH_DBF_TEXT(TRACE, 2, "delipato"); 749 spin_lock_irqsave(&card->ip_lock, flags); 750 list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { 751 if (ipatoe->proto != proto) 752 continue; 753 if (!memcmp(ipatoe->addr, addr, 754 (proto == QETH_PROT_IPV4)? 4:16) && 755 (ipatoe->mask_bits == mask_bits)) { 756 list_del(&ipatoe->entry); 757 kfree(ipatoe); 758 } 759 } 760 spin_unlock_irqrestore(&card->ip_lock, flags); 761 } 762 763 /* 764 * VIPA related functions 765 */ 766 int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, 767 const u8 *addr) 768 { 769 struct qeth_ipaddr *ipaddr; 770 unsigned long flags; 771 int rc = 0; 772 773 ipaddr = qeth_l3_get_addr_buffer(proto); 774 if (ipaddr) { 775 if (proto == QETH_PROT_IPV4) { 776 QETH_DBF_TEXT(TRACE, 2, "addvipa4"); 777 memcpy(&ipaddr->u.a4.addr, addr, 4); 778 ipaddr->u.a4.mask = 0; 779 } else if (proto == QETH_PROT_IPV6) { 780 QETH_DBF_TEXT(TRACE, 2, "addvipa6"); 781 memcpy(&ipaddr->u.a6.addr, addr, 16); 782 ipaddr->u.a6.pfxlen = 0; 783 } 784 ipaddr->type = QETH_IP_TYPE_VIPA; 785 ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG; 786 ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG; 787 } else 788 return -ENOMEM; 789 spin_lock_irqsave(&card->ip_lock, flags); 790 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || 791 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) 792 rc = -EEXIST; 793 spin_unlock_irqrestore(&card->ip_lock, flags); 794 if (rc) { 795 return rc; 796 } 797 if (!qeth_l3_add_ip(card, ipaddr)) 798 kfree(ipaddr); 799 qeth_l3_set_ip_addr_list(card); 800 return rc; 801 } 802 803 void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, 804 const u8 *addr) 805 { 806 struct qeth_ipaddr *ipaddr; 807 808 ipaddr = qeth_l3_get_addr_buffer(proto); 809 if (ipaddr) { 810 if (proto == QETH_PROT_IPV4) { 811 QETH_DBF_TEXT(TRACE, 2, "delvipa4"); 812 memcpy(&ipaddr->u.a4.addr, addr, 4); 813 ipaddr->u.a4.mask = 0; 814 } else if (proto == QETH_PROT_IPV6) { 815 QETH_DBF_TEXT(TRACE, 2, "delvipa6"); 816 memcpy(&ipaddr->u.a6.addr, addr, 16); 817 ipaddr->u.a6.pfxlen = 0; 818 } 819 ipaddr->type = QETH_IP_TYPE_VIPA; 820 } else 821 return; 822 if (!qeth_l3_delete_ip(card, ipaddr)) 823 kfree(ipaddr); 824 qeth_l3_set_ip_addr_list(card); 825 } 826 827 /* 828 * proxy ARP related functions 829 */ 830 int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, 831 const u8 *addr) 832 { 833 struct qeth_ipaddr *ipaddr; 834 unsigned long flags; 835 int rc = 0; 836 837 ipaddr = qeth_l3_get_addr_buffer(proto); 838 if (ipaddr) { 839 if (proto == QETH_PROT_IPV4) { 840 QETH_DBF_TEXT(TRACE, 2, "addrxip4"); 841 memcpy(&ipaddr->u.a4.addr, addr, 4); 842 ipaddr->u.a4.mask = 0; 843 } else if (proto == QETH_PROT_IPV6) { 844 QETH_DBF_TEXT(TRACE, 2, "addrxip6"); 845 memcpy(&ipaddr->u.a6.addr, addr, 16); 846 ipaddr->u.a6.pfxlen = 0; 847 } 848 ipaddr->type = QETH_IP_TYPE_RXIP; 849 ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG; 850 ipaddr->del_flags = 0; 851 } else 852 return -ENOMEM; 853 spin_lock_irqsave(&card->ip_lock, flags); 854 if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) || 855 qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) 856 rc = -EEXIST; 857 spin_unlock_irqrestore(&card->ip_lock, flags); 858 if (rc) { 859 return rc; 860 } 861 if (!qeth_l3_add_ip(card, ipaddr)) 862 kfree(ipaddr); 863 qeth_l3_set_ip_addr_list(card); 864 return 0; 865 } 866 867 void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, 868 const u8 *addr) 869 { 870 struct qeth_ipaddr *ipaddr; 871 872 ipaddr = qeth_l3_get_addr_buffer(proto); 873 if (ipaddr) { 874 if (proto == QETH_PROT_IPV4) { 875 QETH_DBF_TEXT(TRACE, 2, "addrxip4"); 876 memcpy(&ipaddr->u.a4.addr, addr, 4); 877 ipaddr->u.a4.mask = 0; 878 } else if (proto == QETH_PROT_IPV6) { 879 QETH_DBF_TEXT(TRACE, 2, "addrxip6"); 880 memcpy(&ipaddr->u.a6.addr, addr, 16); 881 ipaddr->u.a6.pfxlen = 0; 882 } 883 ipaddr->type = QETH_IP_TYPE_RXIP; 884 } else 885 return; 886 if (!qeth_l3_delete_ip(card, ipaddr)) 887 kfree(ipaddr); 888 qeth_l3_set_ip_addr_list(card); 889 } 890 891 static int qeth_l3_register_addr_entry(struct qeth_card *card, 892 struct qeth_ipaddr *addr) 893 { 894 char buf[50]; 895 int rc = 0; 896 int cnt = 3; 897 898 if (addr->proto == QETH_PROT_IPV4) { 899 QETH_DBF_TEXT(TRACE, 2, "setaddr4"); 900 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); 901 } else if (addr->proto == QETH_PROT_IPV6) { 902 QETH_DBF_TEXT(TRACE, 2, "setaddr6"); 903 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); 904 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); 905 } else { 906 QETH_DBF_TEXT(TRACE, 2, "setaddr?"); 907 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); 908 } 909 do { 910 if (addr->is_multicast) 911 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); 912 else 913 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP, 914 addr->set_flags); 915 if (rc) 916 QETH_DBF_TEXT(TRACE, 2, "failed"); 917 } while ((--cnt > 0) && rc); 918 if (rc) { 919 QETH_DBF_TEXT(TRACE, 2, "FAILED"); 920 qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); 921 dev_warn(&card->gdev->dev, 922 "Registering IP address %s failed\n", buf); 923 } 924 return rc; 925 } 926 927 static int qeth_l3_deregister_addr_entry(struct qeth_card *card, 928 struct qeth_ipaddr *addr) 929 { 930 int rc = 0; 931 932 if (addr->proto == QETH_PROT_IPV4) { 933 QETH_DBF_TEXT(TRACE, 2, "deladdr4"); 934 QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int)); 935 } else if (addr->proto == QETH_PROT_IPV6) { 936 QETH_DBF_TEXT(TRACE, 2, "deladdr6"); 937 QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8); 938 QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8); 939 } else { 940 QETH_DBF_TEXT(TRACE, 2, "deladdr?"); 941 QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr)); 942 } 943 if (addr->is_multicast) 944 rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); 945 else 946 rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP, 947 addr->del_flags); 948 if (rc) 949 QETH_DBF_TEXT(TRACE, 2, "failed"); 950 951 return rc; 952 } 953 954 static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) 955 { 956 if (cast_type == RTN_MULTICAST) 957 return QETH_CAST_MULTICAST; 958 if (cast_type == RTN_BROADCAST) 959 return QETH_CAST_BROADCAST; 960 return QETH_CAST_UNICAST; 961 } 962 963 static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) 964 { 965 u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6; 966 if (cast_type == RTN_MULTICAST) 967 return ct | QETH_CAST_MULTICAST; 968 if (cast_type == RTN_ANYCAST) 969 return ct | QETH_CAST_ANYCAST; 970 if (cast_type == RTN_BROADCAST) 971 return ct | QETH_CAST_BROADCAST; 972 return ct | QETH_CAST_UNICAST; 973 } 974 975 static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command, 976 __u32 mode) 977 { 978 int rc; 979 struct qeth_cmd_buffer *iob; 980 struct qeth_ipa_cmd *cmd; 981 982 QETH_DBF_TEXT(TRACE, 4, "adpmode"); 983 984 iob = qeth_get_adapter_cmd(card, command, 985 sizeof(struct qeth_ipacmd_setadpparms)); 986 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 987 cmd->data.setadapterparms.data.mode = mode; 988 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb, 989 NULL); 990 return rc; 991 } 992 993 static int qeth_l3_setadapter_hstr(struct qeth_card *card) 994 { 995 int rc; 996 997 QETH_DBF_TEXT(TRACE, 4, "adphstr"); 998 999 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) { 1000 rc = qeth_l3_send_setadp_mode(card, 1001 IPA_SETADP_SET_BROADCAST_MODE, 1002 card->options.broadcast_mode); 1003 if (rc) 1004 QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on " 1005 "device %s: x%x\n", 1006 CARD_BUS_ID(card), rc); 1007 rc = qeth_l3_send_setadp_mode(card, 1008 IPA_SETADP_ALTER_MAC_ADDRESS, 1009 card->options.macaddr_mode); 1010 if (rc) 1011 QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on " 1012 "device %s: x%x\n", CARD_BUS_ID(card), rc); 1013 return rc; 1014 } 1015 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) 1016 QETH_DBF_MESSAGE(2, "set adapter parameters not available " 1017 "to set broadcast mode, using ALLRINGS " 1018 "on device %s:\n", CARD_BUS_ID(card)); 1019 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) 1020 QETH_DBF_MESSAGE(2, "set adapter parameters not available " 1021 "to set macaddr mode, using NONCANONICAL " 1022 "on device %s:\n", CARD_BUS_ID(card)); 1023 return 0; 1024 } 1025 1026 static int qeth_l3_setadapter_parms(struct qeth_card *card) 1027 { 1028 int rc; 1029 1030 QETH_DBF_TEXT(SETUP, 2, "setadprm"); 1031 1032 if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 1033 dev_info(&card->gdev->dev, 1034 "set adapter parameters not supported.\n"); 1035 QETH_DBF_TEXT(SETUP, 2, " notsupp"); 1036 return 0; 1037 } 1038 rc = qeth_query_setadapterparms(card); 1039 if (rc) { 1040 QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " 1041 "0x%x\n", dev_name(&card->gdev->dev), rc); 1042 return rc; 1043 } 1044 if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { 1045 rc = qeth_setadpparms_change_macaddr(card); 1046 if (rc) 1047 dev_warn(&card->gdev->dev, "Reading the adapter MAC" 1048 " address failed\n"); 1049 } 1050 1051 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || 1052 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) 1053 rc = qeth_l3_setadapter_hstr(card); 1054 1055 return rc; 1056 } 1057 1058 static int qeth_l3_default_setassparms_cb(struct qeth_card *card, 1059 struct qeth_reply *reply, unsigned long data) 1060 { 1061 struct qeth_ipa_cmd *cmd; 1062 1063 QETH_DBF_TEXT(TRACE, 4, "defadpcb"); 1064 1065 cmd = (struct qeth_ipa_cmd *) data; 1066 if (cmd->hdr.return_code == 0) { 1067 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 1068 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 1069 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 1070 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 1071 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 1072 } 1073 if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && 1074 cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { 1075 card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; 1076 QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask); 1077 } 1078 return 0; 1079 } 1080 1081 static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( 1082 struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code, 1083 __u16 len, enum qeth_prot_versions prot) 1084 { 1085 struct qeth_cmd_buffer *iob; 1086 struct qeth_ipa_cmd *cmd; 1087 1088 QETH_DBF_TEXT(TRACE, 4, "getasscm"); 1089 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); 1090 1091 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1092 cmd->data.setassparms.hdr.assist_no = ipa_func; 1093 cmd->data.setassparms.hdr.length = 8 + len; 1094 cmd->data.setassparms.hdr.command_code = cmd_code; 1095 cmd->data.setassparms.hdr.return_code = 0; 1096 cmd->data.setassparms.hdr.seq_no = 0; 1097 1098 return iob; 1099 } 1100 1101 static int qeth_l3_send_setassparms(struct qeth_card *card, 1102 struct qeth_cmd_buffer *iob, __u16 len, long data, 1103 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 1104 unsigned long), 1105 void *reply_param) 1106 { 1107 int rc; 1108 struct qeth_ipa_cmd *cmd; 1109 1110 QETH_DBF_TEXT(TRACE, 4, "sendassp"); 1111 1112 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1113 if (len <= sizeof(__u32)) 1114 cmd->data.setassparms.data.flags_32bit = (__u32) data; 1115 else /* (len > sizeof(__u32)) */ 1116 memcpy(&cmd->data.setassparms.data, (void *) data, len); 1117 1118 rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); 1119 return rc; 1120 } 1121 1122 #ifdef CONFIG_QETH_IPV6 1123 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, 1124 enum qeth_ipa_funcs ipa_func, __u16 cmd_code) 1125 { 1126 int rc; 1127 struct qeth_cmd_buffer *iob; 1128 1129 QETH_DBF_TEXT(TRACE, 4, "simassp6"); 1130 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1131 0, QETH_PROT_IPV6); 1132 rc = qeth_l3_send_setassparms(card, iob, 0, 0, 1133 qeth_l3_default_setassparms_cb, NULL); 1134 return rc; 1135 } 1136 #endif 1137 1138 static int qeth_l3_send_simple_setassparms(struct qeth_card *card, 1139 enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data) 1140 { 1141 int rc; 1142 int length = 0; 1143 struct qeth_cmd_buffer *iob; 1144 1145 QETH_DBF_TEXT(TRACE, 4, "simassp4"); 1146 if (data) 1147 length = sizeof(__u32); 1148 iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 1149 length, QETH_PROT_IPV4); 1150 rc = qeth_l3_send_setassparms(card, iob, length, data, 1151 qeth_l3_default_setassparms_cb, NULL); 1152 return rc; 1153 } 1154 1155 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 1156 { 1157 int rc; 1158 1159 QETH_DBF_TEXT(TRACE, 3, "ipaarp"); 1160 1161 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 1162 dev_info(&card->gdev->dev, 1163 "ARP processing not supported on %s!\n", 1164 QETH_CARD_IFNAME(card)); 1165 return 0; 1166 } 1167 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, 1168 IPA_CMD_ASS_START, 0); 1169 if (rc) { 1170 dev_warn(&card->gdev->dev, 1171 "Starting ARP processing support for %s failed\n", 1172 QETH_CARD_IFNAME(card)); 1173 } 1174 return rc; 1175 } 1176 1177 static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) 1178 { 1179 int rc; 1180 1181 QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); 1182 1183 if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { 1184 dev_info(&card->gdev->dev, 1185 "Hardware IP fragmentation not supported on %s\n", 1186 QETH_CARD_IFNAME(card)); 1187 return -EOPNOTSUPP; 1188 } 1189 1190 rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, 1191 IPA_CMD_ASS_START, 0); 1192 if (rc) { 1193 dev_warn(&card->gdev->dev, 1194 "Starting IP fragmentation support for %s failed\n", 1195 QETH_CARD_IFNAME(card)); 1196 } else 1197 dev_info(&card->gdev->dev, 1198 "Hardware IP fragmentation enabled \n"); 1199 return rc; 1200 } 1201 1202 static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) 1203 { 1204 int rc; 1205 1206 QETH_DBF_TEXT(TRACE, 3, "stsrcmac"); 1207 1208 if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { 1209 dev_info(&card->gdev->dev, 1210 "Inbound source MAC-address not supported on %s\n", 1211 QETH_CARD_IFNAME(card)); 1212 return -EOPNOTSUPP; 1213 } 1214 1215 rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, 1216 IPA_CMD_ASS_START, 0); 1217 if (rc) 1218 dev_warn(&card->gdev->dev, 1219 "Starting source MAC-address support for %s failed\n", 1220 QETH_CARD_IFNAME(card)); 1221 return rc; 1222 } 1223 1224 static int qeth_l3_start_ipa_vlan(struct qeth_card *card) 1225 { 1226 int rc = 0; 1227 1228 QETH_DBF_TEXT(TRACE, 3, "strtvlan"); 1229 1230 if (!qeth_is_supported(card, IPA_FULL_VLAN)) { 1231 dev_info(&card->gdev->dev, 1232 "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); 1233 return -EOPNOTSUPP; 1234 } 1235 1236 rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, 1237 IPA_CMD_ASS_START, 0); 1238 if (rc) { 1239 dev_warn(&card->gdev->dev, 1240 "Starting VLAN support for %s failed\n", 1241 QETH_CARD_IFNAME(card)); 1242 } else { 1243 dev_info(&card->gdev->dev, "VLAN enabled\n"); 1244 } 1245 return rc; 1246 } 1247 1248 static int qeth_l3_start_ipa_multicast(struct qeth_card *card) 1249 { 1250 int rc; 1251 1252 QETH_DBF_TEXT(TRACE, 3, "stmcast"); 1253 1254 if (!qeth_is_supported(card, IPA_MULTICASTING)) { 1255 dev_info(&card->gdev->dev, 1256 "Multicast not supported on %s\n", 1257 QETH_CARD_IFNAME(card)); 1258 return -EOPNOTSUPP; 1259 } 1260 1261 rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, 1262 IPA_CMD_ASS_START, 0); 1263 if (rc) { 1264 dev_warn(&card->gdev->dev, 1265 "Starting multicast support for %s failed\n", 1266 QETH_CARD_IFNAME(card)); 1267 } else { 1268 dev_info(&card->gdev->dev, "Multicast enabled\n"); 1269 card->dev->flags |= IFF_MULTICAST; 1270 } 1271 return rc; 1272 } 1273 1274 static int qeth_l3_query_ipassists_cb(struct qeth_card *card, 1275 struct qeth_reply *reply, unsigned long data) 1276 { 1277 struct qeth_ipa_cmd *cmd; 1278 1279 QETH_DBF_TEXT(SETUP, 2, "qipasscb"); 1280 1281 cmd = (struct qeth_ipa_cmd *) data; 1282 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 1283 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 1284 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 1285 } else { 1286 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 1287 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 1288 } 1289 QETH_DBF_TEXT(SETUP, 2, "suppenbl"); 1290 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported); 1291 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled); 1292 return 0; 1293 } 1294 1295 static int qeth_l3_query_ipassists(struct qeth_card *card, 1296 enum qeth_prot_versions prot) 1297 { 1298 int rc; 1299 struct qeth_cmd_buffer *iob; 1300 1301 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); 1302 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); 1303 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL); 1304 return rc; 1305 } 1306 1307 #ifdef CONFIG_QETH_IPV6 1308 static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 1309 { 1310 int rc; 1311 1312 QETH_DBF_TEXT(TRACE, 3, "softipv6"); 1313 1314 if (card->info.type == QETH_CARD_TYPE_IQD) 1315 goto out; 1316 1317 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); 1318 if (rc) { 1319 dev_err(&card->gdev->dev, 1320 "Activating IPv6 support for %s failed\n", 1321 QETH_CARD_IFNAME(card)); 1322 return rc; 1323 } 1324 rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, 1325 IPA_CMD_ASS_START, 3); 1326 if (rc) { 1327 dev_err(&card->gdev->dev, 1328 "Activating IPv6 support for %s failed\n", 1329 QETH_CARD_IFNAME(card)); 1330 return rc; 1331 } 1332 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, 1333 IPA_CMD_ASS_START); 1334 if (rc) { 1335 dev_err(&card->gdev->dev, 1336 "Activating IPv6 support for %s failed\n", 1337 QETH_CARD_IFNAME(card)); 1338 return rc; 1339 } 1340 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, 1341 IPA_CMD_ASS_START); 1342 if (rc) { 1343 dev_warn(&card->gdev->dev, 1344 "Enabling the passthrough mode for %s failed\n", 1345 QETH_CARD_IFNAME(card)); 1346 return rc; 1347 } 1348 out: 1349 dev_info(&card->gdev->dev, "IPV6 enabled\n"); 1350 return 0; 1351 } 1352 #endif 1353 1354 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) 1355 { 1356 int rc = 0; 1357 1358 QETH_DBF_TEXT(TRACE, 3, "strtipv6"); 1359 1360 if (!qeth_is_supported(card, IPA_IPV6)) { 1361 dev_info(&card->gdev->dev, 1362 "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); 1363 return 0; 1364 } 1365 #ifdef CONFIG_QETH_IPV6 1366 rc = qeth_l3_softsetup_ipv6(card); 1367 #endif 1368 return rc ; 1369 } 1370 1371 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) 1372 { 1373 int rc; 1374 1375 QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); 1376 card->info.broadcast_capable = 0; 1377 if (!qeth_is_supported(card, IPA_FILTERING)) { 1378 dev_info(&card->gdev->dev, 1379 "Broadcast not supported on %s\n", 1380 QETH_CARD_IFNAME(card)); 1381 rc = -EOPNOTSUPP; 1382 goto out; 1383 } 1384 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, 1385 IPA_CMD_ASS_START, 0); 1386 if (rc) { 1387 dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " 1388 "%s failed\n", QETH_CARD_IFNAME(card)); 1389 goto out; 1390 } 1391 1392 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, 1393 IPA_CMD_ASS_CONFIGURE, 1); 1394 if (rc) { 1395 dev_warn(&card->gdev->dev, 1396 "Setting up broadcast filtering for %s failed\n", 1397 QETH_CARD_IFNAME(card)); 1398 goto out; 1399 } 1400 card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; 1401 dev_info(&card->gdev->dev, "Broadcast enabled\n"); 1402 rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, 1403 IPA_CMD_ASS_ENABLE, 1); 1404 if (rc) { 1405 dev_warn(&card->gdev->dev, "Setting up broadcast echo " 1406 "filtering for %s failed\n", QETH_CARD_IFNAME(card)); 1407 goto out; 1408 } 1409 card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; 1410 out: 1411 if (card->info.broadcast_capable) 1412 card->dev->flags |= IFF_BROADCAST; 1413 else 1414 card->dev->flags &= ~IFF_BROADCAST; 1415 return rc; 1416 } 1417 1418 static int qeth_l3_send_checksum_command(struct qeth_card *card) 1419 { 1420 int rc; 1421 1422 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, 1423 IPA_CMD_ASS_START, 0); 1424 if (rc) { 1425 dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " 1426 "failed, using SW checksumming\n", 1427 QETH_CARD_IFNAME(card)); 1428 return rc; 1429 } 1430 rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, 1431 IPA_CMD_ASS_ENABLE, 1432 card->info.csum_mask); 1433 if (rc) { 1434 dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " 1435 "failed, using SW checksumming\n", 1436 QETH_CARD_IFNAME(card)); 1437 return rc; 1438 } 1439 return 0; 1440 } 1441 1442 static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1443 { 1444 int rc = 0; 1445 1446 QETH_DBF_TEXT(TRACE, 3, "strtcsum"); 1447 1448 if (card->options.checksum_type == NO_CHECKSUMMING) { 1449 dev_info(&card->gdev->dev, 1450 "Using no checksumming on %s.\n", 1451 QETH_CARD_IFNAME(card)); 1452 return 0; 1453 } 1454 if (card->options.checksum_type == SW_CHECKSUMMING) { 1455 dev_info(&card->gdev->dev, 1456 "Using SW checksumming on %s.\n", 1457 QETH_CARD_IFNAME(card)); 1458 return 0; 1459 } 1460 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { 1461 dev_info(&card->gdev->dev, 1462 "Inbound HW Checksumming not " 1463 "supported on %s,\ncontinuing " 1464 "using Inbound SW Checksumming\n", 1465 QETH_CARD_IFNAME(card)); 1466 card->options.checksum_type = SW_CHECKSUMMING; 1467 return 0; 1468 } 1469 rc = qeth_l3_send_checksum_command(card); 1470 if (!rc) 1471 dev_info(&card->gdev->dev, 1472 "HW Checksumming (inbound) enabled\n"); 1473 1474 return rc; 1475 } 1476 1477 static int qeth_l3_start_ipa_tso(struct qeth_card *card) 1478 { 1479 int rc; 1480 1481 QETH_DBF_TEXT(TRACE, 3, "sttso"); 1482 1483 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { 1484 dev_info(&card->gdev->dev, 1485 "Outbound TSO not supported on %s\n", 1486 QETH_CARD_IFNAME(card)); 1487 rc = -EOPNOTSUPP; 1488 } else { 1489 rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, 1490 IPA_CMD_ASS_START, 0); 1491 if (rc) 1492 dev_warn(&card->gdev->dev, "Starting outbound TCP " 1493 "segmentation offload for %s failed\n", 1494 QETH_CARD_IFNAME(card)); 1495 else 1496 dev_info(&card->gdev->dev, 1497 "Outbound TSO enabled\n"); 1498 } 1499 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { 1500 card->options.large_send = QETH_LARGE_SEND_NO; 1501 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); 1502 } 1503 return rc; 1504 } 1505 1506 static int qeth_l3_start_ipassists(struct qeth_card *card) 1507 { 1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1512 qeth_l3_start_ipa_vlan(card); /* go on*/ 1513 qeth_l3_start_ipa_multicast(card); /* go on*/ 1514 qeth_l3_start_ipa_ipv6(card); /* go on*/ 1515 qeth_l3_start_ipa_broadcast(card); /* go on*/ 1516 qeth_l3_start_ipa_checksum(card); /* go on*/ 1517 qeth_l3_start_ipa_tso(card); /* go on*/ 1518 return 0; 1519 } 1520 1521 static int qeth_l3_put_unique_id(struct qeth_card *card) 1522 { 1523 1524 int rc = 0; 1525 struct qeth_cmd_buffer *iob; 1526 struct qeth_ipa_cmd *cmd; 1527 1528 QETH_DBF_TEXT(TRACE, 2, "puniqeid"); 1529 1530 if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == 1531 UNIQUE_ID_NOT_BY_CARD) 1532 return -1; 1533 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR, 1534 QETH_PROT_IPV6); 1535 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1536 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1537 card->info.unique_id; 1538 memcpy(&cmd->data.create_destroy_addr.unique_id[0], 1539 card->dev->dev_addr, OSA_ADDR_LEN); 1540 rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); 1541 return rc; 1542 } 1543 1544 static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, 1545 struct qeth_reply *reply, unsigned long data) 1546 { 1547 struct qeth_ipa_cmd *cmd; 1548 1549 cmd = (struct qeth_ipa_cmd *) data; 1550 if (cmd->hdr.return_code == 0) 1551 memcpy(card->dev->dev_addr, 1552 cmd->data.create_destroy_addr.unique_id, ETH_ALEN); 1553 else 1554 random_ether_addr(card->dev->dev_addr); 1555 1556 return 0; 1557 } 1558 1559 static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) 1560 { 1561 int rc = 0; 1562 struct qeth_cmd_buffer *iob; 1563 struct qeth_ipa_cmd *cmd; 1564 1565 QETH_DBF_TEXT(SETUP, 2, "hsrmac"); 1566 1567 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1568 QETH_PROT_IPV6); 1569 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1570 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1571 card->info.unique_id; 1572 1573 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, 1574 NULL); 1575 return rc; 1576 } 1577 1578 static int qeth_l3_get_unique_id_cb(struct qeth_card *card, 1579 struct qeth_reply *reply, unsigned long data) 1580 { 1581 struct qeth_ipa_cmd *cmd; 1582 1583 cmd = (struct qeth_ipa_cmd *) data; 1584 if (cmd->hdr.return_code == 0) 1585 card->info.unique_id = *((__u16 *) 1586 &cmd->data.create_destroy_addr.unique_id[6]); 1587 else { 1588 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1589 UNIQUE_ID_NOT_BY_CARD; 1590 dev_warn(&card->gdev->dev, "The network adapter failed to " 1591 "generate a unique ID\n"); 1592 } 1593 return 0; 1594 } 1595 1596 static int qeth_l3_get_unique_id(struct qeth_card *card) 1597 { 1598 int rc = 0; 1599 struct qeth_cmd_buffer *iob; 1600 struct qeth_ipa_cmd *cmd; 1601 1602 QETH_DBF_TEXT(SETUP, 2, "guniqeid"); 1603 1604 if (!qeth_is_supported(card, IPA_IPV6)) { 1605 card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | 1606 UNIQUE_ID_NOT_BY_CARD; 1607 return 0; 1608 } 1609 1610 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, 1611 QETH_PROT_IPV6); 1612 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 1613 *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = 1614 card->info.unique_id; 1615 1616 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); 1617 return rc; 1618 } 1619 1620 static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1621 struct net_device *dev) 1622 { 1623 if (dev->type == ARPHRD_IEEE802_TR) 1624 ip_tr_mc_map(ipm, mac); 1625 else 1626 ip_eth_mc_map(ipm, mac); 1627 } 1628 1629 static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) 1630 { 1631 struct qeth_ipaddr *ipm; 1632 struct ip_mc_list *im4; 1633 char buf[MAX_ADDR_LEN]; 1634 1635 QETH_DBF_TEXT(TRACE, 4, "addmc"); 1636 for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { 1637 qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); 1638 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1639 if (!ipm) 1640 continue; 1641 ipm->u.a4.addr = im4->multiaddr; 1642 memcpy(ipm->mac, buf, OSA_ADDR_LEN); 1643 ipm->is_multicast = 1; 1644 if (!qeth_l3_add_ip(card, ipm)) 1645 kfree(ipm); 1646 } 1647 } 1648 1649 static void qeth_l3_add_vlan_mc(struct qeth_card *card) 1650 { 1651 struct in_device *in_dev; 1652 struct vlan_group *vg; 1653 int i; 1654 1655 QETH_DBF_TEXT(TRACE, 4, "addmcvl"); 1656 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) 1657 return; 1658 1659 vg = card->vlangrp; 1660 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1661 struct net_device *netdev = vlan_group_get_device(vg, i); 1662 if (netdev == NULL || 1663 !(netdev->flags & IFF_UP)) 1664 continue; 1665 in_dev = in_dev_get(netdev); 1666 if (!in_dev) 1667 continue; 1668 read_lock(&in_dev->mc_list_lock); 1669 qeth_l3_add_mc(card, in_dev); 1670 read_unlock(&in_dev->mc_list_lock); 1671 in_dev_put(in_dev); 1672 } 1673 } 1674 1675 static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) 1676 { 1677 struct in_device *in4_dev; 1678 1679 QETH_DBF_TEXT(TRACE, 4, "chkmcv4"); 1680 in4_dev = in_dev_get(card->dev); 1681 if (in4_dev == NULL) 1682 return; 1683 read_lock(&in4_dev->mc_list_lock); 1684 qeth_l3_add_mc(card, in4_dev); 1685 qeth_l3_add_vlan_mc(card); 1686 read_unlock(&in4_dev->mc_list_lock); 1687 in_dev_put(in4_dev); 1688 } 1689 1690 #ifdef CONFIG_QETH_IPV6 1691 static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 1692 { 1693 struct qeth_ipaddr *ipm; 1694 struct ifmcaddr6 *im6; 1695 char buf[MAX_ADDR_LEN]; 1696 1697 QETH_DBF_TEXT(TRACE, 4, "addmc6"); 1698 for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { 1699 ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); 1700 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1701 if (!ipm) 1702 continue; 1703 ipm->is_multicast = 1; 1704 memcpy(ipm->mac, buf, OSA_ADDR_LEN); 1705 memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, 1706 sizeof(struct in6_addr)); 1707 if (!qeth_l3_add_ip(card, ipm)) 1708 kfree(ipm); 1709 } 1710 } 1711 1712 static void qeth_l3_add_vlan_mc6(struct qeth_card *card) 1713 { 1714 struct inet6_dev *in_dev; 1715 struct vlan_group *vg; 1716 int i; 1717 1718 QETH_DBF_TEXT(TRACE, 4, "admc6vl"); 1719 if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL)) 1720 return; 1721 1722 vg = card->vlangrp; 1723 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1724 struct net_device *netdev = vlan_group_get_device(vg, i); 1725 if (netdev == NULL || 1726 !(netdev->flags & IFF_UP)) 1727 continue; 1728 in_dev = in6_dev_get(netdev); 1729 if (!in_dev) 1730 continue; 1731 read_lock_bh(&in_dev->lock); 1732 qeth_l3_add_mc6(card, in_dev); 1733 read_unlock_bh(&in_dev->lock); 1734 in6_dev_put(in_dev); 1735 } 1736 } 1737 1738 static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) 1739 { 1740 struct inet6_dev *in6_dev; 1741 1742 QETH_DBF_TEXT(TRACE, 4, "chkmcv6"); 1743 if (!qeth_is_supported(card, IPA_IPV6)) 1744 return ; 1745 in6_dev = in6_dev_get(card->dev); 1746 if (in6_dev == NULL) 1747 return; 1748 read_lock_bh(&in6_dev->lock); 1749 qeth_l3_add_mc6(card, in6_dev); 1750 qeth_l3_add_vlan_mc6(card); 1751 read_unlock_bh(&in6_dev->lock); 1752 in6_dev_put(in6_dev); 1753 } 1754 #endif /* CONFIG_QETH_IPV6 */ 1755 1756 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, 1757 unsigned short vid) 1758 { 1759 struct in_device *in_dev; 1760 struct in_ifaddr *ifa; 1761 struct qeth_ipaddr *addr; 1762 1763 QETH_DBF_TEXT(TRACE, 4, "frvaddr4"); 1764 1765 in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid)); 1766 if (!in_dev) 1767 return; 1768 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 1769 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1770 if (addr) { 1771 addr->u.a4.addr = ifa->ifa_address; 1772 addr->u.a4.mask = ifa->ifa_mask; 1773 addr->type = QETH_IP_TYPE_NORMAL; 1774 if (!qeth_l3_delete_ip(card, addr)) 1775 kfree(addr); 1776 } 1777 } 1778 in_dev_put(in_dev); 1779 } 1780 1781 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, 1782 unsigned short vid) 1783 { 1784 #ifdef CONFIG_QETH_IPV6 1785 struct inet6_dev *in6_dev; 1786 struct inet6_ifaddr *ifa; 1787 struct qeth_ipaddr *addr; 1788 1789 QETH_DBF_TEXT(TRACE, 4, "frvaddr6"); 1790 1791 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); 1792 if (!in6_dev) 1793 return; 1794 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) { 1795 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1796 if (addr) { 1797 memcpy(&addr->u.a6.addr, &ifa->addr, 1798 sizeof(struct in6_addr)); 1799 addr->u.a6.pfxlen = ifa->prefix_len; 1800 addr->type = QETH_IP_TYPE_NORMAL; 1801 if (!qeth_l3_delete_ip(card, addr)) 1802 kfree(addr); 1803 } 1804 } 1805 in6_dev_put(in6_dev); 1806 #endif /* CONFIG_QETH_IPV6 */ 1807 } 1808 1809 static void qeth_l3_free_vlan_addresses(struct qeth_card *card, 1810 unsigned short vid) 1811 { 1812 if (!card->vlangrp) 1813 return; 1814 qeth_l3_free_vlan_addresses4(card, vid); 1815 qeth_l3_free_vlan_addresses6(card, vid); 1816 } 1817 1818 static void qeth_l3_vlan_rx_register(struct net_device *dev, 1819 struct vlan_group *grp) 1820 { 1821 struct qeth_card *card = dev->ml_priv; 1822 unsigned long flags; 1823 1824 QETH_DBF_TEXT(TRACE, 4, "vlanreg"); 1825 spin_lock_irqsave(&card->vlanlock, flags); 1826 card->vlangrp = grp; 1827 spin_unlock_irqrestore(&card->vlanlock, flags); 1828 } 1829 1830 static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1831 { 1832 return; 1833 } 1834 1835 static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1836 { 1837 struct qeth_card *card = dev->ml_priv; 1838 unsigned long flags; 1839 1840 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); 1841 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 1842 QETH_DBF_TEXT(TRACE, 3, "kidREC"); 1843 return; 1844 } 1845 spin_lock_irqsave(&card->vlanlock, flags); 1846 /* unregister IP addresses of vlan device */ 1847 qeth_l3_free_vlan_addresses(card, vid); 1848 vlan_group_set_device(card->vlangrp, vid, NULL); 1849 spin_unlock_irqrestore(&card->vlanlock, flags); 1850 qeth_l3_set_multicast_list(card->dev); 1851 } 1852 1853 static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, 1854 struct sk_buff *skb, struct qeth_hdr *hdr) 1855 { 1856 unsigned short vlan_id = 0; 1857 __be16 prot; 1858 struct iphdr *ip_hdr; 1859 unsigned char tg_addr[MAX_ADDR_LEN]; 1860 1861 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 1862 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : 1863 ETH_P_IP); 1864 switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { 1865 case QETH_CAST_MULTICAST: 1866 switch (prot) { 1867 #ifdef CONFIG_QETH_IPV6 1868 case __constant_htons(ETH_P_IPV6): 1869 ndisc_mc_map((struct in6_addr *) 1870 skb->data + 24, 1871 tg_addr, card->dev, 0); 1872 break; 1873 #endif 1874 case __constant_htons(ETH_P_IP): 1875 ip_hdr = (struct iphdr *)skb->data; 1876 (card->dev->type == ARPHRD_IEEE802_TR) ? 1877 ip_tr_mc_map(ip_hdr->daddr, tg_addr): 1878 ip_eth_mc_map(ip_hdr->daddr, tg_addr); 1879 break; 1880 default: 1881 memcpy(tg_addr, card->dev->broadcast, 1882 card->dev->addr_len); 1883 } 1884 card->stats.multicast++; 1885 skb->pkt_type = PACKET_MULTICAST; 1886 break; 1887 case QETH_CAST_BROADCAST: 1888 memcpy(tg_addr, card->dev->broadcast, 1889 card->dev->addr_len); 1890 card->stats.multicast++; 1891 skb->pkt_type = PACKET_BROADCAST; 1892 break; 1893 case QETH_CAST_UNICAST: 1894 case QETH_CAST_ANYCAST: 1895 case QETH_CAST_NOCAST: 1896 default: 1897 skb->pkt_type = PACKET_HOST; 1898 memcpy(tg_addr, card->dev->dev_addr, 1899 card->dev->addr_len); 1900 } 1901 if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 1902 card->dev->header_ops->create(skb, card->dev, prot, 1903 tg_addr, &hdr->hdr.l3.dest_addr[2], 1904 card->dev->addr_len); 1905 else 1906 card->dev->header_ops->create(skb, card->dev, prot, 1907 tg_addr, "FAKELL", card->dev->addr_len); 1908 } 1909 1910 #ifdef CONFIG_TR 1911 if (card->dev->type == ARPHRD_IEEE802_TR) 1912 skb->protocol = tr_type_trans(skb, card->dev); 1913 else 1914 #endif 1915 skb->protocol = eth_type_trans(skb, card->dev); 1916 1917 if (hdr->hdr.l3.ext_flags & 1918 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { 1919 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? 1920 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); 1921 } 1922 1923 switch (card->options.checksum_type) { 1924 case SW_CHECKSUMMING: 1925 skb->ip_summed = CHECKSUM_NONE; 1926 break; 1927 case NO_CHECKSUMMING: 1928 skb->ip_summed = CHECKSUM_UNNECESSARY; 1929 break; 1930 case HW_CHECKSUMMING: 1931 if ((hdr->hdr.l3.ext_flags & 1932 (QETH_HDR_EXT_CSUM_HDR_REQ | 1933 QETH_HDR_EXT_CSUM_TRANSP_REQ)) == 1934 (QETH_HDR_EXT_CSUM_HDR_REQ | 1935 QETH_HDR_EXT_CSUM_TRANSP_REQ)) 1936 skb->ip_summed = CHECKSUM_UNNECESSARY; 1937 else 1938 skb->ip_summed = CHECKSUM_NONE; 1939 } 1940 1941 return vlan_id; 1942 } 1943 1944 static void qeth_l3_process_inbound_buffer(struct qeth_card *card, 1945 struct qeth_qdio_buffer *buf, int index) 1946 { 1947 struct qdio_buffer_element *element; 1948 struct sk_buff *skb; 1949 struct qeth_hdr *hdr; 1950 int offset; 1951 __u16 vlan_tag = 0; 1952 unsigned int len; 1953 1954 /* get first element of current buffer */ 1955 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 1956 offset = 0; 1957 if (card->options.performance_stats) 1958 card->perf_stats.bufs_rec++; 1959 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, 1960 &offset, &hdr))) { 1961 skb->dev = card->dev; 1962 /* is device UP ? */ 1963 if (!(card->dev->flags & IFF_UP)) { 1964 dev_kfree_skb_any(skb); 1965 continue; 1966 } 1967 1968 switch (hdr->hdr.l3.id) { 1969 case QETH_HEADER_TYPE_LAYER3: 1970 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 1971 len = skb->len; 1972 if (vlan_tag) 1973 if (card->vlangrp) 1974 vlan_hwaccel_rx(skb, card->vlangrp, 1975 vlan_tag); 1976 else { 1977 dev_kfree_skb_any(skb); 1978 continue; 1979 } 1980 else 1981 netif_rx(skb); 1982 break; 1983 default: 1984 dev_kfree_skb_any(skb); 1985 QETH_DBF_TEXT(TRACE, 3, "inbunkno"); 1986 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 1987 continue; 1988 } 1989 1990 card->stats.rx_packets++; 1991 card->stats.rx_bytes += len; 1992 } 1993 } 1994 1995 static int qeth_l3_verify_vlan_dev(struct net_device *dev, 1996 struct qeth_card *card) 1997 { 1998 int rc = 0; 1999 struct vlan_group *vg; 2000 int i; 2001 2002 vg = card->vlangrp; 2003 if (!vg) 2004 return rc; 2005 2006 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 2007 if (vlan_group_get_device(vg, i) == dev) { 2008 rc = QETH_VLAN_CARD; 2009 break; 2010 } 2011 } 2012 2013 if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card)) 2014 return 0; 2015 2016 return rc; 2017 } 2018 2019 static int qeth_l3_verify_dev(struct net_device *dev) 2020 { 2021 struct qeth_card *card; 2022 unsigned long flags; 2023 int rc = 0; 2024 2025 read_lock_irqsave(&qeth_core_card_list.rwlock, flags); 2026 list_for_each_entry(card, &qeth_core_card_list.list, list) { 2027 if (card->dev == dev) { 2028 rc = QETH_REAL_CARD; 2029 break; 2030 } 2031 rc = qeth_l3_verify_vlan_dev(dev, card); 2032 if (rc) 2033 break; 2034 } 2035 read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); 2036 2037 return rc; 2038 } 2039 2040 static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) 2041 { 2042 struct qeth_card *card = NULL; 2043 int rc; 2044 2045 rc = qeth_l3_verify_dev(dev); 2046 if (rc == QETH_REAL_CARD) 2047 card = dev->ml_priv; 2048 else if (rc == QETH_VLAN_CARD) 2049 card = vlan_dev_real_dev(dev)->ml_priv; 2050 if (card && card->options.layer2) 2051 card = NULL; 2052 QETH_DBF_TEXT_(TRACE, 4, "%d", rc); 2053 return card ; 2054 } 2055 2056 static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) 2057 { 2058 int rc = 0; 2059 2060 QETH_DBF_TEXT(SETUP, 2, "stopcard"); 2061 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 2062 2063 qeth_set_allowed_threads(card, 0, 1); 2064 if (card->read.state == CH_STATE_UP && 2065 card->write.state == CH_STATE_UP && 2066 (card->state == CARD_STATE_UP)) { 2067 if (recovery_mode) 2068 qeth_l3_stop(card->dev); 2069 else { 2070 if (card->dev) { 2071 rtnl_lock(); 2072 dev_close(card->dev); 2073 rtnl_unlock(); 2074 } 2075 } 2076 if (!card->use_hard_stop) { 2077 rc = qeth_send_stoplan(card); 2078 if (rc) 2079 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2080 } 2081 card->state = CARD_STATE_SOFTSETUP; 2082 } 2083 if (card->state == CARD_STATE_SOFTSETUP) { 2084 qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1); 2085 qeth_clear_ipacmd_list(card); 2086 card->state = CARD_STATE_HARDSETUP; 2087 } 2088 if (card->state == CARD_STATE_HARDSETUP) { 2089 if (!card->use_hard_stop && 2090 (card->info.type != QETH_CARD_TYPE_IQD)) { 2091 rc = qeth_l3_put_unique_id(card); 2092 if (rc) 2093 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 2094 } 2095 qeth_qdio_clear_card(card, 0); 2096 qeth_clear_qdio_buffers(card); 2097 qeth_clear_working_pool_list(card); 2098 card->state = CARD_STATE_DOWN; 2099 } 2100 if (card->state == CARD_STATE_DOWN) { 2101 qeth_clear_cmd_buffers(&card->read); 2102 qeth_clear_cmd_buffers(&card->write); 2103 } 2104 card->use_hard_stop = 0; 2105 return rc; 2106 } 2107 2108 static void qeth_l3_set_multicast_list(struct net_device *dev) 2109 { 2110 struct qeth_card *card = dev->ml_priv; 2111 2112 QETH_DBF_TEXT(TRACE, 3, "setmulti"); 2113 if (qeth_threads_running(card, QETH_RECOVER_THREAD) && 2114 (card->state != CARD_STATE_UP)) 2115 return; 2116 qeth_l3_delete_mc_addresses(card); 2117 qeth_l3_add_multicast_ipv4(card); 2118 #ifdef CONFIG_QETH_IPV6 2119 qeth_l3_add_multicast_ipv6(card); 2120 #endif 2121 qeth_l3_set_ip_addr_list(card); 2122 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 2123 return; 2124 qeth_setadp_promisc_mode(card); 2125 } 2126 2127 static const char *qeth_l3_arp_get_error_cause(int *rc) 2128 { 2129 switch (*rc) { 2130 case QETH_IPA_ARP_RC_FAILED: 2131 *rc = -EIO; 2132 return "operation failed"; 2133 case QETH_IPA_ARP_RC_NOTSUPP: 2134 *rc = -EOPNOTSUPP; 2135 return "operation not supported"; 2136 case QETH_IPA_ARP_RC_OUT_OF_RANGE: 2137 *rc = -EINVAL; 2138 return "argument out of range"; 2139 case QETH_IPA_ARP_RC_Q_NOTSUPP: 2140 *rc = -EOPNOTSUPP; 2141 return "query operation not supported"; 2142 case QETH_IPA_ARP_RC_Q_NO_DATA: 2143 *rc = -ENOENT; 2144 return "no query data available"; 2145 default: 2146 return "unknown error"; 2147 } 2148 } 2149 2150 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) 2151 { 2152 int tmp; 2153 int rc; 2154 2155 QETH_DBF_TEXT(TRACE, 3, "arpstnoe"); 2156 2157 /* 2158 * currently GuestLAN only supports the ARP assist function 2159 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; 2160 * thus we say EOPNOTSUPP for this ARP function 2161 */ 2162 if (card->info.guestlan) 2163 return -EOPNOTSUPP; 2164 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2165 return -EOPNOTSUPP; 2166 } 2167 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, 2168 IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 2169 no_entries); 2170 if (rc) { 2171 tmp = rc; 2172 QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " 2173 "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), 2174 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2175 } 2176 return rc; 2177 } 2178 2179 static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 2180 struct qeth_arp_query_data *qdata, int entry_size, 2181 int uentry_size) 2182 { 2183 char *entry_ptr; 2184 char *uentry_ptr; 2185 int i; 2186 2187 entry_ptr = (char *)&qdata->data; 2188 uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset); 2189 for (i = 0; i < qdata->no_entries; ++i) { 2190 /* strip off 32 bytes "media specific information" */ 2191 memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32); 2192 entry_ptr += entry_size; 2193 uentry_ptr += uentry_size; 2194 } 2195 } 2196 2197 static int qeth_l3_arp_query_cb(struct qeth_card *card, 2198 struct qeth_reply *reply, unsigned long data) 2199 { 2200 struct qeth_ipa_cmd *cmd; 2201 struct qeth_arp_query_data *qdata; 2202 struct qeth_arp_query_info *qinfo; 2203 int entry_size; 2204 int uentry_size; 2205 int i; 2206 2207 QETH_DBF_TEXT(TRACE, 4, "arpquecb"); 2208 2209 qinfo = (struct qeth_arp_query_info *) reply->param; 2210 cmd = (struct qeth_ipa_cmd *) data; 2211 if (cmd->hdr.return_code) { 2212 QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code); 2213 return 0; 2214 } 2215 if (cmd->data.setassparms.hdr.return_code) { 2216 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 2217 QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code); 2218 return 0; 2219 } 2220 qdata = &cmd->data.setassparms.data.query_arp; 2221 switch (qdata->reply_bits) { 2222 case 5: 2223 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5); 2224 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2225 uentry_size = sizeof(struct qeth_arp_qi_entry5_short); 2226 break; 2227 case 7: 2228 /* fall through to default */ 2229 default: 2230 /* tr is the same as eth -> entry7 */ 2231 uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7); 2232 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2233 uentry_size = sizeof(struct qeth_arp_qi_entry7_short); 2234 break; 2235 } 2236 /* check if there is enough room in userspace */ 2237 if ((qinfo->udata_len - qinfo->udata_offset) < 2238 qdata->no_entries * uentry_size){ 2239 QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); 2240 cmd->hdr.return_code = -ENOMEM; 2241 goto out_error; 2242 } 2243 QETH_DBF_TEXT_(TRACE, 4, "anore%i", 2244 cmd->data.setassparms.hdr.number_of_replies); 2245 QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); 2246 QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries); 2247 2248 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { 2249 /* strip off "media specific information" */ 2250 qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size, 2251 uentry_size); 2252 } else 2253 /*copy entries to user buffer*/ 2254 memcpy(qinfo->udata + qinfo->udata_offset, 2255 (char *)&qdata->data, qdata->no_entries*uentry_size); 2256 2257 qinfo->no_entries += qdata->no_entries; 2258 qinfo->udata_offset += (qdata->no_entries*uentry_size); 2259 /* check if all replies received ... */ 2260 if (cmd->data.setassparms.hdr.seq_no < 2261 cmd->data.setassparms.hdr.number_of_replies) 2262 return 1; 2263 memcpy(qinfo->udata, &qinfo->no_entries, 4); 2264 /* keep STRIP_ENTRIES flag so the user program can distinguish 2265 * stripped entries from normal ones */ 2266 if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) 2267 qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; 2268 memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); 2269 return 0; 2270 out_error: 2271 i = 0; 2272 memcpy(qinfo->udata, &i, 4); 2273 return 0; 2274 } 2275 2276 static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, 2277 struct qeth_cmd_buffer *iob, int len, 2278 int (*reply_cb)(struct qeth_card *, struct qeth_reply *, 2279 unsigned long), 2280 void *reply_param) 2281 { 2282 QETH_DBF_TEXT(TRACE, 4, "sendarp"); 2283 2284 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2285 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2286 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2287 return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, 2288 reply_cb, reply_param); 2289 } 2290 2291 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) 2292 { 2293 struct qeth_cmd_buffer *iob; 2294 struct qeth_arp_query_info qinfo = {0, }; 2295 int tmp; 2296 int rc; 2297 2298 QETH_DBF_TEXT(TRACE, 3, "arpquery"); 2299 2300 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 2301 IPA_ARP_PROCESSING)) { 2302 return -EOPNOTSUPP; 2303 } 2304 /* get size of userspace buffer and mask_bits -> 6 bytes */ 2305 if (copy_from_user(&qinfo, udata, 6)) 2306 return -EFAULT; 2307 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 2308 if (!qinfo.udata) 2309 return -ENOMEM; 2310 qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; 2311 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2312 IPA_CMD_ASS_ARP_QUERY_INFO, 2313 sizeof(int), QETH_PROT_IPV4); 2314 2315 rc = qeth_l3_send_ipa_arp_cmd(card, iob, 2316 QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, 2317 qeth_l3_arp_query_cb, (void *)&qinfo); 2318 if (rc) { 2319 tmp = rc; 2320 QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s " 2321 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 2322 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2323 if (copy_to_user(udata, qinfo.udata, 4)) 2324 rc = -EFAULT; 2325 } else { 2326 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 2327 rc = -EFAULT; 2328 } 2329 kfree(qinfo.udata); 2330 return rc; 2331 } 2332 2333 static int qeth_l3_arp_add_entry(struct qeth_card *card, 2334 struct qeth_arp_cache_entry *entry) 2335 { 2336 struct qeth_cmd_buffer *iob; 2337 char buf[16]; 2338 int tmp; 2339 int rc; 2340 2341 QETH_DBF_TEXT(TRACE, 3, "arpadent"); 2342 2343 /* 2344 * currently GuestLAN only supports the ARP assist function 2345 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; 2346 * thus we say EOPNOTSUPP for this ARP function 2347 */ 2348 if (card->info.guestlan) 2349 return -EOPNOTSUPP; 2350 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2351 return -EOPNOTSUPP; 2352 } 2353 2354 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2355 IPA_CMD_ASS_ARP_ADD_ENTRY, 2356 sizeof(struct qeth_arp_cache_entry), 2357 QETH_PROT_IPV4); 2358 rc = qeth_l3_send_setassparms(card, iob, 2359 sizeof(struct qeth_arp_cache_entry), 2360 (unsigned long) entry, 2361 qeth_l3_default_setassparms_cb, NULL); 2362 if (rc) { 2363 tmp = rc; 2364 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 2365 QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " 2366 "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), 2367 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2368 } 2369 return rc; 2370 } 2371 2372 static int qeth_l3_arp_remove_entry(struct qeth_card *card, 2373 struct qeth_arp_cache_entry *entry) 2374 { 2375 struct qeth_cmd_buffer *iob; 2376 char buf[16] = {0, }; 2377 int tmp; 2378 int rc; 2379 2380 QETH_DBF_TEXT(TRACE, 3, "arprment"); 2381 2382 /* 2383 * currently GuestLAN only supports the ARP assist function 2384 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY; 2385 * thus we say EOPNOTSUPP for this ARP function 2386 */ 2387 if (card->info.guestlan) 2388 return -EOPNOTSUPP; 2389 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2390 return -EOPNOTSUPP; 2391 } 2392 memcpy(buf, entry, 12); 2393 iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING, 2394 IPA_CMD_ASS_ARP_REMOVE_ENTRY, 2395 12, 2396 QETH_PROT_IPV4); 2397 rc = qeth_l3_send_setassparms(card, iob, 2398 12, (unsigned long)buf, 2399 qeth_l3_default_setassparms_cb, NULL); 2400 if (rc) { 2401 tmp = rc; 2402 memset(buf, 0, 16); 2403 qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); 2404 QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" 2405 " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), 2406 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2407 } 2408 return rc; 2409 } 2410 2411 static int qeth_l3_arp_flush_cache(struct qeth_card *card) 2412 { 2413 int rc; 2414 int tmp; 2415 2416 QETH_DBF_TEXT(TRACE, 3, "arpflush"); 2417 2418 /* 2419 * currently GuestLAN only supports the ARP assist function 2420 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; 2421 * thus we say EOPNOTSUPP for this ARP function 2422 */ 2423 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) 2424 return -EOPNOTSUPP; 2425 if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { 2426 return -EOPNOTSUPP; 2427 } 2428 rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, 2429 IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); 2430 if (rc) { 2431 tmp = rc; 2432 QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " 2433 "(0x%x/%d)\n", QETH_CARD_IFNAME(card), 2434 qeth_l3_arp_get_error_cause(&rc), tmp, tmp); 2435 } 2436 return rc; 2437 } 2438 2439 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2440 { 2441 struct qeth_card *card = dev->ml_priv; 2442 struct qeth_arp_cache_entry arp_entry; 2443 struct mii_ioctl_data *mii_data; 2444 int rc = 0; 2445 2446 if (!card) 2447 return -ENODEV; 2448 2449 if ((card->state != CARD_STATE_UP) && 2450 (card->state != CARD_STATE_SOFTSETUP)) 2451 return -ENODEV; 2452 2453 switch (cmd) { 2454 case SIOC_QETH_ARP_SET_NO_ENTRIES: 2455 if (!capable(CAP_NET_ADMIN)) { 2456 rc = -EPERM; 2457 break; 2458 } 2459 rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); 2460 break; 2461 case SIOC_QETH_ARP_QUERY_INFO: 2462 if (!capable(CAP_NET_ADMIN)) { 2463 rc = -EPERM; 2464 break; 2465 } 2466 rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); 2467 break; 2468 case SIOC_QETH_ARP_ADD_ENTRY: 2469 if (!capable(CAP_NET_ADMIN)) { 2470 rc = -EPERM; 2471 break; 2472 } 2473 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 2474 sizeof(struct qeth_arp_cache_entry))) 2475 rc = -EFAULT; 2476 else 2477 rc = qeth_l3_arp_add_entry(card, &arp_entry); 2478 break; 2479 case SIOC_QETH_ARP_REMOVE_ENTRY: 2480 if (!capable(CAP_NET_ADMIN)) { 2481 rc = -EPERM; 2482 break; 2483 } 2484 if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, 2485 sizeof(struct qeth_arp_cache_entry))) 2486 rc = -EFAULT; 2487 else 2488 rc = qeth_l3_arp_remove_entry(card, &arp_entry); 2489 break; 2490 case SIOC_QETH_ARP_FLUSH_CACHE: 2491 if (!capable(CAP_NET_ADMIN)) { 2492 rc = -EPERM; 2493 break; 2494 } 2495 rc = qeth_l3_arp_flush_cache(card); 2496 break; 2497 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 2498 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 2499 break; 2500 case SIOC_QETH_GET_CARD_TYPE: 2501 if ((card->info.type == QETH_CARD_TYPE_OSAE) && 2502 !card->info.guestlan) 2503 return 1; 2504 return 0; 2505 break; 2506 case SIOCGMIIPHY: 2507 mii_data = if_mii(rq); 2508 mii_data->phy_id = 0; 2509 break; 2510 case SIOCGMIIREG: 2511 mii_data = if_mii(rq); 2512 if (mii_data->phy_id != 0) 2513 rc = -EINVAL; 2514 else 2515 mii_data->val_out = qeth_mdio_read(dev, 2516 mii_data->phy_id, 2517 mii_data->reg_num); 2518 break; 2519 default: 2520 rc = -EOPNOTSUPP; 2521 } 2522 if (rc) 2523 QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc); 2524 return rc; 2525 } 2526 2527 int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 2528 { 2529 int cast_type = RTN_UNSPEC; 2530 2531 if (skb_dst(skb) && skb_dst(skb)->neighbour) { 2532 cast_type = skb_dst(skb)->neighbour->type; 2533 if ((cast_type == RTN_BROADCAST) || 2534 (cast_type == RTN_MULTICAST) || 2535 (cast_type == RTN_ANYCAST)) 2536 return cast_type; 2537 else 2538 return RTN_UNSPEC; 2539 } 2540 /* try something else */ 2541 if (skb->protocol == ETH_P_IPV6) 2542 return (skb_network_header(skb)[24] == 0xff) ? 2543 RTN_MULTICAST : 0; 2544 else if (skb->protocol == ETH_P_IP) 2545 return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? 2546 RTN_MULTICAST : 0; 2547 /* ... */ 2548 if (!memcmp(skb->data, skb->dev->broadcast, 6)) 2549 return RTN_BROADCAST; 2550 else { 2551 u16 hdr_mac; 2552 2553 hdr_mac = *((u16 *)skb->data); 2554 /* tr multicast? */ 2555 switch (card->info.link_type) { 2556 case QETH_LINK_TYPE_HSTR: 2557 case QETH_LINK_TYPE_LANE_TR: 2558 if ((hdr_mac == QETH_TR_MAC_NC) || 2559 (hdr_mac == QETH_TR_MAC_C)) 2560 return RTN_MULTICAST; 2561 break; 2562 /* eth or so multicast? */ 2563 default: 2564 if ((hdr_mac == QETH_ETH_MAC_V4) || 2565 (hdr_mac == QETH_ETH_MAC_V6)) 2566 return RTN_MULTICAST; 2567 } 2568 } 2569 return cast_type; 2570 } 2571 2572 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2573 struct sk_buff *skb, int ipv, int cast_type) 2574 { 2575 memset(hdr, 0, sizeof(struct qeth_hdr)); 2576 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2577 hdr->hdr.l3.ext_flags = 0; 2578 2579 /* 2580 * before we're going to overwrite this location with next hop ip. 2581 * v6 uses passthrough, v4 sets the tag in the QDIO header. 2582 */ 2583 if (card->vlangrp && vlan_tx_tag_present(skb)) { 2584 if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) 2585 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; 2586 else 2587 hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; 2588 hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); 2589 } 2590 2591 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); 2592 if (ipv == 4) { 2593 /* IPv4 */ 2594 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); 2595 memset(hdr->hdr.l3.dest_addr, 0, 12); 2596 if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { 2597 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = 2598 *((u32 *) skb_dst(skb)->neighbour->primary_key); 2599 } else { 2600 /* fill in destination address used in ip header */ 2601 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = 2602 ip_hdr(skb)->daddr; 2603 } 2604 } else if (ipv == 6) { 2605 /* IPv6 */ 2606 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type); 2607 if (card->info.type == QETH_CARD_TYPE_IQD) 2608 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; 2609 if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { 2610 memcpy(hdr->hdr.l3.dest_addr, 2611 skb_dst(skb)->neighbour->primary_key, 16); 2612 } else { 2613 /* fill in destination address used in ip header */ 2614 memcpy(hdr->hdr.l3.dest_addr, 2615 &ipv6_hdr(skb)->daddr, 16); 2616 } 2617 } else { 2618 /* passthrough */ 2619 if ((skb->dev->type == ARPHRD_IEEE802_TR) && 2620 !memcmp(skb->data + sizeof(struct qeth_hdr) + 2621 sizeof(__u16), skb->dev->broadcast, 6)) { 2622 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 2623 QETH_HDR_PASSTHRU; 2624 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr), 2625 skb->dev->broadcast, 6)) { 2626 /* broadcast? */ 2627 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 2628 QETH_HDR_PASSTHRU; 2629 } else { 2630 hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ? 2631 QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU : 2632 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; 2633 } 2634 } 2635 } 2636 2637 static void qeth_tso_fill_header(struct qeth_card *card, 2638 struct qeth_hdr *qhdr, struct sk_buff *skb) 2639 { 2640 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; 2641 struct tcphdr *tcph = tcp_hdr(skb); 2642 struct iphdr *iph = ip_hdr(skb); 2643 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2644 2645 /*fix header to TSO values ...*/ 2646 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; 2647 /*set values which are fix for the first approach ...*/ 2648 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); 2649 hdr->ext.imb_hdr_no = 1; 2650 hdr->ext.hdr_type = 1; 2651 hdr->ext.hdr_version = 1; 2652 hdr->ext.hdr_len = 28; 2653 /*insert non-fix values */ 2654 hdr->ext.mss = skb_shinfo(skb)->gso_size; 2655 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); 2656 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - 2657 sizeof(struct qeth_hdr_tso)); 2658 tcph->check = 0; 2659 if (skb->protocol == ETH_P_IPV6) { 2660 ip6h->payload_len = 0; 2661 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 2662 0, IPPROTO_TCP, 0); 2663 } else { 2664 /*OSA want us to set these values ...*/ 2665 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 2666 0, IPPROTO_TCP, 0); 2667 iph->tot_len = 0; 2668 iph->check = 0; 2669 } 2670 } 2671 2672 static void qeth_tx_csum(struct sk_buff *skb) 2673 { 2674 __wsum csum; 2675 int offset; 2676 2677 skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); 2678 offset = skb->csum_start - skb_headroom(skb); 2679 BUG_ON(offset >= skb_headlen(skb)); 2680 csum = skb_checksum(skb, offset, skb->len - offset, 0); 2681 2682 offset += skb->csum_offset; 2683 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); 2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2685 } 2686 2687 static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2688 { 2689 int rc; 2690 u16 *tag; 2691 struct qeth_hdr *hdr = NULL; 2692 int elements_needed = 0; 2693 int elems; 2694 struct qeth_card *card = dev->ml_priv; 2695 struct sk_buff *new_skb = NULL; 2696 int ipv = qeth_get_ip_version(skb); 2697 int cast_type = qeth_l3_get_cast_type(card, skb); 2698 struct qeth_qdio_out_q *queue = card->qdio.out_qs 2699 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2700 int tx_bytes = skb->len; 2701 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2702 int data_offset = -1; 2703 int nr_frags; 2704 2705 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2706 (skb->protocol != htons(ETH_P_IPV6)) && 2707 (skb->protocol != htons(ETH_P_IP))) 2708 goto tx_drop; 2709 2710 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2711 card->stats.tx_carrier_errors++; 2712 goto tx_drop; 2713 } 2714 2715 if ((cast_type == RTN_BROADCAST) && 2716 (card->info.broadcast_capable == 0)) 2717 goto tx_drop; 2718 2719 if (card->options.performance_stats) { 2720 card->perf_stats.outbound_cnt++; 2721 card->perf_stats.outbound_start_time = qeth_get_micros(); 2722 } 2723 2724 if (skb_is_gso(skb)) 2725 large_send = card->options.large_send; 2726 else 2727 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2728 qeth_tx_csum(skb); 2729 if (card->options.performance_stats) 2730 card->perf_stats.tx_csum++; 2731 } 2732 2733 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2734 (skb_shinfo(skb)->nr_frags == 0)) { 2735 new_skb = skb; 2736 data_offset = ETH_HLEN; 2737 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2738 if (!hdr) 2739 goto tx_drop; 2740 elements_needed++; 2741 } else { 2742 /* create a clone with writeable headroom */ 2743 new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) 2744 + VLAN_HLEN); 2745 if (!new_skb) 2746 goto tx_drop; 2747 } 2748 2749 if (card->info.type == QETH_CARD_TYPE_IQD) { 2750 if (data_offset < 0) 2751 skb_pull(new_skb, ETH_HLEN); 2752 } else { 2753 if (new_skb->protocol == htons(ETH_P_IP)) { 2754 if (card->dev->type == ARPHRD_IEEE802_TR) 2755 skb_pull(new_skb, TR_HLEN); 2756 else 2757 skb_pull(new_skb, ETH_HLEN); 2758 } 2759 2760 if (new_skb->protocol == ETH_P_IPV6 && card->vlangrp && 2761 vlan_tx_tag_present(new_skb)) { 2762 skb_push(new_skb, VLAN_HLEN); 2763 skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); 2764 skb_copy_to_linear_data_offset(new_skb, 4, 2765 new_skb->data + 8, 4); 2766 skb_copy_to_linear_data_offset(new_skb, 8, 2767 new_skb->data + 12, 4); 2768 tag = (u16 *)(new_skb->data + 12); 2769 *tag = __constant_htons(ETH_P_8021Q); 2770 *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); 2771 new_skb->vlan_tci = 0; 2772 } 2773 } 2774 2775 netif_stop_queue(dev); 2776 2777 /* fix hardware limitation: as long as we do not have sbal 2778 * chaining we can not send long frag lists 2779 */ 2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2782 if (skb_linearize(new_skb)) 2783 goto tx_drop; 2784 } 2785 2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2787 (cast_type == RTN_UNSPEC)) { 2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2789 sizeof(struct qeth_hdr_tso)); 2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2792 qeth_tso_fill_header(card, hdr, new_skb); 2793 elements_needed++; 2794 } else { 2795 if (data_offset < 0) { 2796 hdr = (struct qeth_hdr *)skb_push(new_skb, 2797 sizeof(struct qeth_hdr)); 2798 qeth_l3_fill_header(card, hdr, new_skb, ipv, 2799 cast_type); 2800 } else { 2801 qeth_l3_fill_header(card, hdr, new_skb, ipv, 2802 cast_type); 2803 hdr->hdr.l3.length = new_skb->len - data_offset; 2804 } 2805 } 2806 2807 elems = qeth_get_elements_no(card, (void *)hdr, new_skb, 2808 elements_needed); 2809 if (!elems) { 2810 if (data_offset >= 0) 2811 kmem_cache_free(qeth_core_header_cache, hdr); 2812 goto tx_drop; 2813 } 2814 elements_needed += elems; 2815 nr_frags = skb_shinfo(new_skb)->nr_frags; 2816 2817 if (card->info.type != QETH_CARD_TYPE_IQD) 2818 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 2819 elements_needed); 2820 else 2821 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 2822 elements_needed, data_offset, 0); 2823 2824 if (!rc) { 2825 card->stats.tx_packets++; 2826 card->stats.tx_bytes += tx_bytes; 2827 if (new_skb != skb) 2828 dev_kfree_skb_any(skb); 2829 if (card->options.performance_stats) { 2830 if (large_send != QETH_LARGE_SEND_NO) { 2831 card->perf_stats.large_send_bytes += tx_bytes; 2832 card->perf_stats.large_send_cnt++; 2833 } 2834 if (nr_frags) { 2835 card->perf_stats.sg_skbs_sent++; 2836 /* nr_frags + skb->data */ 2837 card->perf_stats.sg_frags_sent += nr_frags + 1; 2838 } 2839 } 2840 rc = NETDEV_TX_OK; 2841 } else { 2842 if (data_offset >= 0) 2843 kmem_cache_free(qeth_core_header_cache, hdr); 2844 2845 if (rc == -EBUSY) { 2846 if (new_skb != skb) 2847 dev_kfree_skb_any(new_skb); 2848 return NETDEV_TX_BUSY; 2849 } else 2850 goto tx_drop; 2851 } 2852 2853 netif_wake_queue(dev); 2854 if (card->options.performance_stats) 2855 card->perf_stats.outbound_time += qeth_get_micros() - 2856 card->perf_stats.outbound_start_time; 2857 return rc; 2858 2859 tx_drop: 2860 card->stats.tx_dropped++; 2861 card->stats.tx_errors++; 2862 if ((new_skb != skb) && new_skb) 2863 dev_kfree_skb_any(new_skb); 2864 dev_kfree_skb_any(skb); 2865 netif_wake_queue(dev); 2866 return NETDEV_TX_OK; 2867 } 2868 2869 static int qeth_l3_open(struct net_device *dev) 2870 { 2871 struct qeth_card *card = dev->ml_priv; 2872 2873 QETH_DBF_TEXT(TRACE, 4, "qethopen"); 2874 if (card->state != CARD_STATE_SOFTSETUP) 2875 return -ENODEV; 2876 card->data.state = CH_STATE_UP; 2877 card->state = CARD_STATE_UP; 2878 netif_start_queue(dev); 2879 2880 if (!card->lan_online && netif_carrier_ok(dev)) 2881 netif_carrier_off(dev); 2882 return 0; 2883 } 2884 2885 static int qeth_l3_stop(struct net_device *dev) 2886 { 2887 struct qeth_card *card = dev->ml_priv; 2888 2889 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 2890 netif_tx_disable(dev); 2891 if (card->state == CARD_STATE_UP) 2892 card->state = CARD_STATE_SOFTSETUP; 2893 return 0; 2894 } 2895 2896 static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) 2897 { 2898 struct qeth_card *card = dev->ml_priv; 2899 2900 return (card->options.checksum_type == HW_CHECKSUMMING); 2901 } 2902 2903 static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2904 { 2905 struct qeth_card *card = dev->ml_priv; 2906 enum qeth_card_states old_state; 2907 enum qeth_checksum_types csum_type; 2908 2909 if ((card->state != CARD_STATE_UP) && 2910 (card->state != CARD_STATE_DOWN)) 2911 return -EPERM; 2912 2913 if (data) 2914 csum_type = HW_CHECKSUMMING; 2915 else 2916 csum_type = SW_CHECKSUMMING; 2917 2918 if (card->options.checksum_type != csum_type) { 2919 old_state = card->state; 2920 if (card->state == CARD_STATE_UP) 2921 __qeth_l3_set_offline(card->gdev, 1); 2922 card->options.checksum_type = csum_type; 2923 if (old_state == CARD_STATE_UP) 2924 __qeth_l3_set_online(card->gdev, 1); 2925 } 2926 return 0; 2927 } 2928 2929 static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2930 { 2931 struct qeth_card *card = dev->ml_priv; 2932 2933 if (data) { 2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 2935 if (card->info.type == QETH_CARD_TYPE_IQD) 2936 return -EPERM; 2937 else 2938 card->options.large_send = QETH_LARGE_SEND_TSO; 2939 dev->features |= NETIF_F_TSO; 2940 } 2941 } else { 2942 dev->features &= ~NETIF_F_TSO; 2943 card->options.large_send = QETH_LARGE_SEND_NO; 2944 } 2945 return 0; 2946 } 2947 2948 static const struct ethtool_ops qeth_l3_ethtool_ops = { 2949 .get_link = ethtool_op_get_link, 2950 .get_tx_csum = ethtool_op_get_tx_csum, 2951 .set_tx_csum = ethtool_op_set_tx_hw_csum, 2952 .get_rx_csum = qeth_l3_ethtool_get_rx_csum, 2953 .set_rx_csum = qeth_l3_ethtool_set_rx_csum, 2954 .get_sg = ethtool_op_get_sg, 2955 .set_sg = ethtool_op_set_sg, 2956 .get_tso = ethtool_op_get_tso, 2957 .set_tso = qeth_l3_ethtool_set_tso, 2958 .get_strings = qeth_core_get_strings, 2959 .get_ethtool_stats = qeth_core_get_ethtool_stats, 2960 .get_stats_count = qeth_core_get_stats_count, 2961 .get_drvinfo = qeth_core_get_drvinfo, 2962 .get_settings = qeth_core_ethtool_get_settings, 2963 }; 2964 2965 /* 2966 * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting 2967 * NOARP on the netdevice is no option because it also turns off neighbor 2968 * solicitation. For IPv4 we install a neighbor_setup function. We don't want 2969 * arp resolution but we want the hard header (packet socket will work 2970 * e.g. tcpdump) 2971 */ 2972 static int qeth_l3_neigh_setup_noarp(struct neighbour *n) 2973 { 2974 n->nud_state = NUD_NOARP; 2975 memcpy(n->ha, "FAKELL", 6); 2976 n->output = n->ops->connected_output; 2977 return 0; 2978 } 2979 2980 static int 2981 qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) 2982 { 2983 if (np->tbl->family == AF_INET) 2984 np->neigh_setup = qeth_l3_neigh_setup_noarp; 2985 2986 return 0; 2987 } 2988 2989 static const struct net_device_ops qeth_l3_netdev_ops = { 2990 .ndo_open = qeth_l3_open, 2991 .ndo_stop = qeth_l3_stop, 2992 .ndo_get_stats = qeth_get_stats, 2993 .ndo_start_xmit = qeth_l3_hard_start_xmit, 2994 .ndo_validate_addr = eth_validate_addr, 2995 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 2996 .ndo_do_ioctl = qeth_l3_do_ioctl, 2997 .ndo_change_mtu = qeth_change_mtu, 2998 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 2999 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3000 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3001 .ndo_tx_timeout = qeth_tx_timeout, 3002 }; 3003 3004 static const struct net_device_ops qeth_l3_osa_netdev_ops = { 3005 .ndo_open = qeth_l3_open, 3006 .ndo_stop = qeth_l3_stop, 3007 .ndo_get_stats = qeth_get_stats, 3008 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3009 .ndo_validate_addr = eth_validate_addr, 3010 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3011 .ndo_do_ioctl = qeth_l3_do_ioctl, 3012 .ndo_change_mtu = qeth_change_mtu, 3013 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3014 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3015 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3016 .ndo_tx_timeout = qeth_tx_timeout, 3017 .ndo_neigh_setup = qeth_l3_neigh_setup, 3018 }; 3019 3020 static int qeth_l3_setup_netdev(struct qeth_card *card) 3021 { 3022 if (card->info.type == QETH_CARD_TYPE_OSAE) { 3023 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 3024 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 3025 #ifdef CONFIG_TR 3026 card->dev = alloc_trdev(0); 3027 #endif 3028 if (!card->dev) 3029 return -ENODEV; 3030 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3031 } else { 3032 card->dev = alloc_etherdev(0); 3033 if (!card->dev) 3034 return -ENODEV; 3035 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 3036 3037 /*IPv6 address autoconfiguration stuff*/ 3038 qeth_l3_get_unique_id(card); 3039 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 3040 card->dev->dev_id = card->info.unique_id & 3041 0xffff; 3042 } 3043 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3044 card->dev = alloc_netdev(0, "hsi%d", ether_setup); 3045 if (!card->dev) 3046 return -ENODEV; 3047 card->dev->flags |= IFF_NOARP; 3048 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3049 qeth_l3_iqd_read_initial_mac(card); 3050 } else 3051 return -ENODEV; 3052 3053 card->dev->ml_priv = card; 3054 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 3055 card->dev->mtu = card->info.initial_mtu; 3056 SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops); 3057 card->dev->features |= NETIF_F_HW_VLAN_TX | 3058 NETIF_F_HW_VLAN_RX | 3059 NETIF_F_HW_VLAN_FILTER; 3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3061 3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3063 return register_netdev(card->dev); 3064 } 3065 3066 static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, 3067 unsigned int qdio_err, unsigned int queue, int first_element, 3068 int count, unsigned long card_ptr) 3069 { 3070 struct net_device *net_dev; 3071 struct qeth_card *card; 3072 struct qeth_qdio_buffer *buffer; 3073 int index; 3074 int i; 3075 3076 card = (struct qeth_card *) card_ptr; 3077 net_dev = card->dev; 3078 if (card->options.performance_stats) { 3079 card->perf_stats.inbound_cnt++; 3080 card->perf_stats.inbound_start_time = qeth_get_micros(); 3081 } 3082 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { 3083 QETH_DBF_TEXT(TRACE, 1, "qdinchk"); 3084 QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); 3085 QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", 3086 first_element, count); 3087 QETH_DBF_TEXT_(TRACE, 1, "%04X", queue); 3088 qeth_schedule_recovery(card); 3089 return; 3090 } 3091 for (i = first_element; i < (first_element + count); ++i) { 3092 index = i % QDIO_MAX_BUFFERS_PER_Q; 3093 buffer = &card->qdio.in_q->bufs[index]; 3094 if (!(qdio_err && 3095 qeth_check_qdio_errors(buffer->buffer, 3096 qdio_err, "qinerr"))) 3097 qeth_l3_process_inbound_buffer(card, buffer, index); 3098 /* clear buffer and give back to hardware */ 3099 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 3100 qeth_queue_input_buffer(card, index); 3101 } 3102 if (card->options.performance_stats) 3103 card->perf_stats.inbound_time += qeth_get_micros() - 3104 card->perf_stats.inbound_start_time; 3105 } 3106 3107 static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3108 { 3109 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3110 3111 qeth_l3_create_device_attributes(&gdev->dev); 3112 card->options.layer2 = 0; 3113 card->discipline.input_handler = (qdio_handler_t *) 3114 qeth_l3_qdio_input_handler; 3115 card->discipline.output_handler = (qdio_handler_t *) 3116 qeth_qdio_output_handler; 3117 card->discipline.recover = qeth_l3_recover; 3118 return 0; 3119 } 3120 3121 static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) 3122 { 3123 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 3124 3125 qeth_set_allowed_threads(card, 0, 1); 3126 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 3127 3128 if (cgdev->state == CCWGROUP_ONLINE) { 3129 card->use_hard_stop = 1; 3130 qeth_l3_set_offline(cgdev); 3131 } 3132 3133 if (card->dev) { 3134 unregister_netdev(card->dev); 3135 card->dev = NULL; 3136 } 3137 3138 qeth_l3_remove_device_attributes(&cgdev->dev); 3139 qeth_l3_clear_ip_list(card, 0, 0); 3140 qeth_l3_clear_ipato_list(card); 3141 return; 3142 } 3143 3144 static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) 3145 { 3146 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3147 int rc = 0; 3148 enum qeth_card_states recover_flag; 3149 3150 BUG_ON(!card); 3151 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3152 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 3153 3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3155 3156 recover_flag = card->state; 3157 rc = ccw_device_set_online(CARD_RDEV(card)); 3158 if (rc) { 3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3160 return -EIO; 3161 } 3162 rc = ccw_device_set_online(CARD_WDEV(card)); 3163 if (rc) { 3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3165 return -EIO; 3166 } 3167 rc = ccw_device_set_online(CARD_DDEV(card)); 3168 if (rc) { 3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3170 return -EIO; 3171 } 3172 3173 rc = qeth_core_hardsetup_card(card); 3174 if (rc) { 3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3176 goto out_remove; 3177 } 3178 3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3180 3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3182 goto out_remove; 3183 3184 card->state = CARD_STATE_HARDSETUP; 3185 qeth_print_status_message(card); 3186 3187 /* softsetup */ 3188 QETH_DBF_TEXT(SETUP, 2, "softsetp"); 3189 3190 rc = qeth_send_startlan(card); 3191 if (rc) { 3192 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3193 if (rc == 0xe080) { 3194 dev_warn(&card->gdev->dev, 3195 "The LAN is offline\n"); 3196 card->lan_online = 0; 3197 return 0; 3198 } 3199 goto out_remove; 3200 } else 3201 card->lan_online = 1; 3202 qeth_set_large_send(card, card->options.large_send); 3203 3204 rc = qeth_l3_setadapter_parms(card); 3205 if (rc) 3206 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3207 rc = qeth_l3_start_ipassists(card); 3208 if (rc) 3209 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3210 rc = qeth_l3_setrouting_v4(card); 3211 if (rc) 3212 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3213 rc = qeth_l3_setrouting_v6(card); 3214 if (rc) 3215 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 3216 netif_tx_disable(card->dev); 3217 3218 rc = qeth_init_qdio_queues(card); 3219 if (rc) { 3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3221 goto out_remove; 3222 } 3223 card->state = CARD_STATE_SOFTSETUP; 3224 netif_carrier_on(card->dev); 3225 3226 qeth_set_allowed_threads(card, 0xffffffff, 0); 3227 qeth_l3_set_ip_addr_list(card); 3228 if (recover_flag == CARD_STATE_RECOVER) { 3229 if (recovery_mode) 3230 qeth_l3_open(card->dev); 3231 else { 3232 rtnl_lock(); 3233 dev_open(card->dev); 3234 rtnl_unlock(); 3235 } 3236 qeth_l3_set_multicast_list(card->dev); 3237 } 3238 /* let user_space know that device is online */ 3239 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3240 return 0; 3241 out_remove: 3242 card->use_hard_stop = 1; 3243 qeth_l3_stop_card(card, 0); 3244 ccw_device_set_offline(CARD_DDEV(card)); 3245 ccw_device_set_offline(CARD_WDEV(card)); 3246 ccw_device_set_offline(CARD_RDEV(card)); 3247 if (recover_flag == CARD_STATE_RECOVER) 3248 card->state = CARD_STATE_RECOVER; 3249 else 3250 card->state = CARD_STATE_DOWN; 3251 return -ENODEV; 3252 } 3253 3254 static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3255 { 3256 return __qeth_l3_set_online(gdev, 0); 3257 } 3258 3259 static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, 3260 int recovery_mode) 3261 { 3262 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 3263 int rc = 0, rc2 = 0, rc3 = 0; 3264 enum qeth_card_states recover_flag; 3265 3266 QETH_DBF_TEXT(SETUP, 3, "setoffl"); 3267 QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); 3268 3269 if (card->dev && netif_carrier_ok(card->dev)) 3270 netif_carrier_off(card->dev); 3271 recover_flag = card->state; 3272 qeth_l3_stop_card(card, recovery_mode); 3273 rc = ccw_device_set_offline(CARD_DDEV(card)); 3274 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 3275 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 3276 if (!rc) 3277 rc = (rc2) ? rc2 : rc3; 3278 if (rc) 3279 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3280 if (recover_flag == CARD_STATE_UP) 3281 card->state = CARD_STATE_RECOVER; 3282 /* let user_space know that device is offline */ 3283 kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); 3284 return 0; 3285 } 3286 3287 static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) 3288 { 3289 return __qeth_l3_set_offline(cgdev, 0); 3290 } 3291 3292 static int qeth_l3_recover(void *ptr) 3293 { 3294 struct qeth_card *card; 3295 int rc = 0; 3296 3297 card = (struct qeth_card *) ptr; 3298 QETH_DBF_TEXT(TRACE, 2, "recover1"); 3299 QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *)); 3300 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 3301 return 0; 3302 QETH_DBF_TEXT(TRACE, 2, "recover2"); 3303 dev_warn(&card->gdev->dev, 3304 "A recovery process has been started for the device\n"); 3305 card->use_hard_stop = 1; 3306 __qeth_l3_set_offline(card->gdev, 1); 3307 rc = __qeth_l3_set_online(card->gdev, 1); 3308 /* don't run another scheduled recovery */ 3309 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 3310 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 3311 if (!rc) 3312 dev_info(&card->gdev->dev, 3313 "Device successfully recovered!\n"); 3314 else { 3315 rtnl_lock(); 3316 dev_close(card->dev); 3317 rtnl_unlock(); 3318 dev_warn(&card->gdev->dev, "The qeth device driver " 3319 "failed to recover an error on the device\n"); 3320 } 3321 return 0; 3322 } 3323 3324 static void qeth_l3_shutdown(struct ccwgroup_device *gdev) 3325 { 3326 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3327 qeth_l3_clear_ip_list(card, 0, 0); 3328 qeth_qdio_clear_card(card, 0); 3329 qeth_clear_qdio_buffers(card); 3330 } 3331 3332 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) 3333 { 3334 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3335 3336 if (card->dev) 3337 netif_device_detach(card->dev); 3338 qeth_set_allowed_threads(card, 0, 1); 3339 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 3340 if (gdev->state == CCWGROUP_OFFLINE) 3341 return 0; 3342 if (card->state == CARD_STATE_UP) { 3343 card->use_hard_stop = 1; 3344 __qeth_l3_set_offline(card->gdev, 1); 3345 } else 3346 __qeth_l3_set_offline(card->gdev, 0); 3347 return 0; 3348 } 3349 3350 static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) 3351 { 3352 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3353 int rc = 0; 3354 3355 if (gdev->state == CCWGROUP_OFFLINE) 3356 goto out; 3357 3358 if (card->state == CARD_STATE_RECOVER) { 3359 rc = __qeth_l3_set_online(card->gdev, 1); 3360 if (rc) { 3361 if (card->dev) { 3362 rtnl_lock(); 3363 dev_close(card->dev); 3364 rtnl_unlock(); 3365 } 3366 } 3367 } else 3368 rc = __qeth_l3_set_online(card->gdev, 0); 3369 out: 3370 qeth_set_allowed_threads(card, 0xffffffff, 0); 3371 if (card->dev) 3372 netif_device_attach(card->dev); 3373 if (rc) 3374 dev_warn(&card->gdev->dev, "The qeth device driver " 3375 "failed to recover an error on the device\n"); 3376 return rc; 3377 } 3378 3379 struct ccwgroup_driver qeth_l3_ccwgroup_driver = { 3380 .probe = qeth_l3_probe_device, 3381 .remove = qeth_l3_remove_device, 3382 .set_online = qeth_l3_set_online, 3383 .set_offline = qeth_l3_set_offline, 3384 .shutdown = qeth_l3_shutdown, 3385 .freeze = qeth_l3_pm_suspend, 3386 .thaw = qeth_l3_pm_resume, 3387 .restore = qeth_l3_pm_resume, 3388 }; 3389 EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); 3390 3391 static int qeth_l3_ip_event(struct notifier_block *this, 3392 unsigned long event, void *ptr) 3393 { 3394 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3395 struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev; 3396 struct qeth_ipaddr *addr; 3397 struct qeth_card *card; 3398 3399 if (dev_net(dev) != &init_net) 3400 return NOTIFY_DONE; 3401 3402 QETH_DBF_TEXT(TRACE, 3, "ipevent"); 3403 card = qeth_l3_get_card_from_dev(dev); 3404 if (!card) 3405 return NOTIFY_DONE; 3406 3407 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 3408 if (addr != NULL) { 3409 addr->u.a4.addr = ifa->ifa_address; 3410 addr->u.a4.mask = ifa->ifa_mask; 3411 addr->type = QETH_IP_TYPE_NORMAL; 3412 } else 3413 goto out; 3414 3415 switch (event) { 3416 case NETDEV_UP: 3417 if (!qeth_l3_add_ip(card, addr)) 3418 kfree(addr); 3419 break; 3420 case NETDEV_DOWN: 3421 if (!qeth_l3_delete_ip(card, addr)) 3422 kfree(addr); 3423 break; 3424 default: 3425 break; 3426 } 3427 qeth_l3_set_ip_addr_list(card); 3428 out: 3429 return NOTIFY_DONE; 3430 } 3431 3432 static struct notifier_block qeth_l3_ip_notifier = { 3433 qeth_l3_ip_event, 3434 NULL, 3435 }; 3436 3437 #ifdef CONFIG_QETH_IPV6 3438 /** 3439 * IPv6 event handler 3440 */ 3441 static int qeth_l3_ip6_event(struct notifier_block *this, 3442 unsigned long event, void *ptr) 3443 { 3444 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 3445 struct net_device *dev = (struct net_device *)ifa->idev->dev; 3446 struct qeth_ipaddr *addr; 3447 struct qeth_card *card; 3448 3449 QETH_DBF_TEXT(TRACE, 3, "ip6event"); 3450 3451 card = qeth_l3_get_card_from_dev(dev); 3452 if (!card) 3453 return NOTIFY_DONE; 3454 if (!qeth_is_supported(card, IPA_IPV6)) 3455 return NOTIFY_DONE; 3456 3457 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 3458 if (addr != NULL) { 3459 memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr)); 3460 addr->u.a6.pfxlen = ifa->prefix_len; 3461 addr->type = QETH_IP_TYPE_NORMAL; 3462 } else 3463 goto out; 3464 3465 switch (event) { 3466 case NETDEV_UP: 3467 if (!qeth_l3_add_ip(card, addr)) 3468 kfree(addr); 3469 break; 3470 case NETDEV_DOWN: 3471 if (!qeth_l3_delete_ip(card, addr)) 3472 kfree(addr); 3473 break; 3474 default: 3475 break; 3476 } 3477 qeth_l3_set_ip_addr_list(card); 3478 out: 3479 return NOTIFY_DONE; 3480 } 3481 3482 static struct notifier_block qeth_l3_ip6_notifier = { 3483 qeth_l3_ip6_event, 3484 NULL, 3485 }; 3486 #endif 3487 3488 static int qeth_l3_register_notifiers(void) 3489 { 3490 int rc; 3491 3492 QETH_DBF_TEXT(TRACE, 5, "regnotif"); 3493 rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); 3494 if (rc) 3495 return rc; 3496 #ifdef CONFIG_QETH_IPV6 3497 rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); 3498 if (rc) { 3499 unregister_inetaddr_notifier(&qeth_l3_ip_notifier); 3500 return rc; 3501 } 3502 #else 3503 pr_warning("There is no IPv6 support for the layer 3 discipline\n"); 3504 #endif 3505 return 0; 3506 } 3507 3508 static void qeth_l3_unregister_notifiers(void) 3509 { 3510 3511 QETH_DBF_TEXT(TRACE, 5, "unregnot"); 3512 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 3513 #ifdef CONFIG_QETH_IPV6 3514 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 3515 #endif /* QETH_IPV6 */ 3516 } 3517 3518 static int __init qeth_l3_init(void) 3519 { 3520 int rc = 0; 3521 3522 pr_info("register layer 3 discipline\n"); 3523 rc = qeth_l3_register_notifiers(); 3524 return rc; 3525 } 3526 3527 static void __exit qeth_l3_exit(void) 3528 { 3529 qeth_l3_unregister_notifiers(); 3530 pr_info("unregister layer 3 discipline\n"); 3531 } 3532 3533 module_init(qeth_l3_init); 3534 module_exit(qeth_l3_exit); 3535 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 3536 MODULE_DESCRIPTION("qeth layer 3 discipline"); 3537 MODULE_LICENSE("GPL"); 3538