1 /* viohs.c: LDOM Virtual I/O handshake helper layer. 2 * 3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/export.h> 8 #include <linux/string.h> 9 #include <linux/delay.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 13 #include <asm/ldc.h> 14 #include <asm/vio.h> 15 16 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len) 17 { 18 int err, limit = 1000; 19 20 err = -EINVAL; 21 while (limit-- > 0) { 22 err = ldc_write(vio->lp, data, len); 23 if (!err || (err != -EAGAIN)) 24 break; 25 udelay(1); 26 } 27 28 return err; 29 } 30 EXPORT_SYMBOL(vio_ldc_send); 31 32 static int send_ctrl(struct vio_driver_state *vio, 33 struct vio_msg_tag *tag, int len) 34 { 35 tag->sid = vio_send_sid(vio); 36 return vio_ldc_send(vio, tag, len); 37 } 38 39 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env) 40 { 41 tag->type = type; 42 tag->stype = stype; 43 tag->stype_env = stype_env; 44 } 45 46 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor) 47 { 48 struct vio_ver_info pkt; 49 50 vio->_local_sid = (u32) sched_clock(); 51 52 memset(&pkt, 0, sizeof(pkt)); 53 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO); 54 pkt.major = major; 55 pkt.minor = minor; 56 pkt.dev_class = vio->dev_class; 57 58 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n", 59 major, minor, vio->dev_class); 60 61 return send_ctrl(vio, &pkt.tag, sizeof(pkt)); 62 } 63 64 static int start_handshake(struct vio_driver_state *vio) 65 { 66 int err; 67 68 viodbg(HS, "START HANDSHAKE\n"); 69 70 vio->hs_state = VIO_HS_INVALID; 71 72 err = send_version(vio, 73 vio->ver_table[0].major, 74 vio->ver_table[0].minor); 75 if (err < 0) 76 return err; 77 78 return 0; 79 } 80 81 static void flush_rx_dring(struct vio_driver_state *vio) 82 { 83 struct vio_dring_state *dr; 84 u64 ident; 85 86 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG)); 87 88 dr = &vio->drings[VIO_DRIVER_RX_RING]; 89 ident = dr->ident; 90 91 BUG_ON(!vio->desc_buf); 92 kfree(vio->desc_buf); 93 vio->desc_buf = NULL; 94 95 memset(dr, 0, sizeof(*dr)); 96 dr->ident = ident; 97 } 98 99 void vio_link_state_change(struct vio_driver_state *vio, int event) 100 { 101 if (event == LDC_EVENT_UP) { 102 vio->hs_state = VIO_HS_INVALID; 103 104 switch (vio->dev_class) { 105 case VDEV_NETWORK: 106 case VDEV_NETWORK_SWITCH: 107 vio->dr_state = (VIO_DR_STATE_TXREQ | 108 VIO_DR_STATE_RXREQ); 109 break; 110 111 case VDEV_DISK: 112 vio->dr_state = VIO_DR_STATE_TXREQ; 113 break; 114 case VDEV_DISK_SERVER: 115 vio->dr_state = VIO_DR_STATE_RXREQ; 116 break; 117 } 118 start_handshake(vio); 119 } else if (event == LDC_EVENT_RESET) { 120 vio->hs_state = VIO_HS_INVALID; 121 122 if (vio->dr_state & VIO_DR_STATE_RXREG) 123 flush_rx_dring(vio); 124 125 vio->dr_state = 0x00; 126 memset(&vio->ver, 0, sizeof(vio->ver)); 127 128 ldc_disconnect(vio->lp); 129 } 130 } 131 EXPORT_SYMBOL(vio_link_state_change); 132 133 static int handshake_failure(struct vio_driver_state *vio) 134 { 135 struct vio_dring_state *dr; 136 137 /* XXX Put policy here... Perhaps start a timer to fire 138 * XXX in 100 ms, which will bring the link up and retry 139 * XXX the handshake. 140 */ 141 142 viodbg(HS, "HANDSHAKE FAILURE\n"); 143 144 vio->dr_state &= ~(VIO_DR_STATE_TXREG | 145 VIO_DR_STATE_RXREG); 146 147 dr = &vio->drings[VIO_DRIVER_RX_RING]; 148 memset(dr, 0, sizeof(*dr)); 149 150 kfree(vio->desc_buf); 151 vio->desc_buf = NULL; 152 vio->desc_buf_len = 0; 153 154 vio->hs_state = VIO_HS_INVALID; 155 156 return -ECONNRESET; 157 } 158 159 static int process_unknown(struct vio_driver_state *vio, void *arg) 160 { 161 struct vio_msg_tag *pkt = arg; 162 163 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n", 164 pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 165 166 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n", 167 vio->vdev->channel_id); 168 169 ldc_disconnect(vio->lp); 170 171 return -ECONNRESET; 172 } 173 174 static int send_dreg(struct vio_driver_state *vio) 175 { 176 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING]; 177 union { 178 struct vio_dring_register pkt; 179 char all[sizeof(struct vio_dring_register) + 180 (sizeof(struct ldc_trans_cookie) * 181 dr->ncookies)]; 182 } u; 183 int i; 184 185 memset(&u, 0, sizeof(u)); 186 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); 187 u.pkt.dring_ident = 0; 188 u.pkt.num_descr = dr->num_entries; 189 u.pkt.descr_size = dr->entry_size; 190 u.pkt.options = VIO_TX_DRING; 191 u.pkt.num_cookies = dr->ncookies; 192 193 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] " 194 "ncookies[%u]\n", 195 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options, 196 u.pkt.num_cookies); 197 198 for (i = 0; i < dr->ncookies; i++) { 199 u.pkt.cookies[i] = dr->cookies[i]; 200 201 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", 202 i, 203 (unsigned long long) u.pkt.cookies[i].cookie_addr, 204 (unsigned long long) u.pkt.cookies[i].cookie_size); 205 } 206 207 return send_ctrl(vio, &u.pkt.tag, sizeof(u)); 208 } 209 210 static int send_rdx(struct vio_driver_state *vio) 211 { 212 struct vio_rdx pkt; 213 214 memset(&pkt, 0, sizeof(pkt)); 215 216 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX); 217 218 viodbg(HS, "SEND RDX INFO\n"); 219 220 return send_ctrl(vio, &pkt.tag, sizeof(pkt)); 221 } 222 223 static int send_attr(struct vio_driver_state *vio) 224 { 225 return vio->ops->send_attr(vio); 226 } 227 228 static struct vio_version *find_by_major(struct vio_driver_state *vio, 229 u16 major) 230 { 231 struct vio_version *ret = NULL; 232 int i; 233 234 for (i = 0; i < vio->ver_table_entries; i++) { 235 struct vio_version *v = &vio->ver_table[i]; 236 if (v->major <= major) { 237 ret = v; 238 break; 239 } 240 } 241 return ret; 242 } 243 244 static int process_ver_info(struct vio_driver_state *vio, 245 struct vio_ver_info *pkt) 246 { 247 struct vio_version *vap; 248 int err; 249 250 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n", 251 pkt->major, pkt->minor, pkt->dev_class); 252 253 if (vio->hs_state != VIO_HS_INVALID) { 254 /* XXX Perhaps invoke start_handshake? XXX */ 255 memset(&vio->ver, 0, sizeof(vio->ver)); 256 vio->hs_state = VIO_HS_INVALID; 257 } 258 259 vap = find_by_major(vio, pkt->major); 260 261 vio->_peer_sid = pkt->tag.sid; 262 263 if (!vap) { 264 pkt->tag.stype = VIO_SUBTYPE_NACK; 265 pkt->major = 0; 266 pkt->minor = 0; 267 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n"); 268 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 269 } else if (vap->major != pkt->major) { 270 pkt->tag.stype = VIO_SUBTYPE_NACK; 271 pkt->major = vap->major; 272 pkt->minor = vap->minor; 273 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n", 274 pkt->major, pkt->minor); 275 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 276 } else { 277 struct vio_version ver = { 278 .major = pkt->major, 279 .minor = pkt->minor, 280 }; 281 if (ver.minor > vap->minor) 282 ver.minor = vap->minor; 283 pkt->minor = ver.minor; 284 pkt->tag.stype = VIO_SUBTYPE_ACK; 285 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n", 286 pkt->major, pkt->minor); 287 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 288 if (err > 0) { 289 vio->ver = ver; 290 vio->hs_state = VIO_HS_GOTVERS; 291 } 292 } 293 if (err < 0) 294 return handshake_failure(vio); 295 296 return 0; 297 } 298 299 static int process_ver_ack(struct vio_driver_state *vio, 300 struct vio_ver_info *pkt) 301 { 302 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n", 303 pkt->major, pkt->minor, pkt->dev_class); 304 305 if (vio->hs_state & VIO_HS_GOTVERS) { 306 if (vio->ver.major != pkt->major || 307 vio->ver.minor != pkt->minor) { 308 pkt->tag.stype = VIO_SUBTYPE_NACK; 309 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 310 return handshake_failure(vio); 311 } 312 } else { 313 vio->ver.major = pkt->major; 314 vio->ver.minor = pkt->minor; 315 vio->hs_state = VIO_HS_GOTVERS; 316 } 317 318 switch (vio->dev_class) { 319 case VDEV_NETWORK: 320 case VDEV_DISK: 321 if (send_attr(vio) < 0) 322 return handshake_failure(vio); 323 break; 324 325 default: 326 break; 327 } 328 329 return 0; 330 } 331 332 static int process_ver_nack(struct vio_driver_state *vio, 333 struct vio_ver_info *pkt) 334 { 335 struct vio_version *nver; 336 337 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n", 338 pkt->major, pkt->minor, pkt->dev_class); 339 340 if (pkt->major == 0 && pkt->minor == 0) 341 return handshake_failure(vio); 342 nver = find_by_major(vio, pkt->major); 343 if (!nver) 344 return handshake_failure(vio); 345 346 if (send_version(vio, nver->major, nver->minor) < 0) 347 return handshake_failure(vio); 348 349 return 0; 350 } 351 352 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt) 353 { 354 switch (pkt->tag.stype) { 355 case VIO_SUBTYPE_INFO: 356 return process_ver_info(vio, pkt); 357 358 case VIO_SUBTYPE_ACK: 359 return process_ver_ack(vio, pkt); 360 361 case VIO_SUBTYPE_NACK: 362 return process_ver_nack(vio, pkt); 363 364 default: 365 return handshake_failure(vio); 366 } 367 } 368 369 static int process_attr(struct vio_driver_state *vio, void *pkt) 370 { 371 int err; 372 373 if (!(vio->hs_state & VIO_HS_GOTVERS)) 374 return handshake_failure(vio); 375 376 err = vio->ops->handle_attr(vio, pkt); 377 if (err < 0) { 378 return handshake_failure(vio); 379 } else { 380 vio->hs_state |= VIO_HS_GOT_ATTR; 381 382 if ((vio->dr_state & VIO_DR_STATE_TXREQ) && 383 !(vio->hs_state & VIO_HS_SENT_DREG)) { 384 if (send_dreg(vio) < 0) 385 return handshake_failure(vio); 386 387 vio->hs_state |= VIO_HS_SENT_DREG; 388 } 389 } 390 return 0; 391 } 392 393 static int all_drings_registered(struct vio_driver_state *vio) 394 { 395 int need_rx, need_tx; 396 397 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ); 398 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ); 399 400 if (need_rx && 401 !(vio->dr_state & VIO_DR_STATE_RXREG)) 402 return 0; 403 404 if (need_tx && 405 !(vio->dr_state & VIO_DR_STATE_TXREG)) 406 return 0; 407 408 return 1; 409 } 410 411 static int process_dreg_info(struct vio_driver_state *vio, 412 struct vio_dring_register *pkt) 413 { 414 struct vio_dring_state *dr; 415 int i, len; 416 417 viodbg(HS, "GOT DRING_REG INFO ident[%llx] " 418 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 419 (unsigned long long) pkt->dring_ident, 420 pkt->num_descr, pkt->descr_size, pkt->options, 421 pkt->num_cookies); 422 423 if (!(vio->dr_state & VIO_DR_STATE_RXREQ)) 424 goto send_nack; 425 426 if (vio->dr_state & VIO_DR_STATE_RXREG) 427 goto send_nack; 428 429 BUG_ON(vio->desc_buf); 430 431 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC); 432 if (!vio->desc_buf) 433 goto send_nack; 434 435 vio->desc_buf_len = pkt->descr_size; 436 437 dr = &vio->drings[VIO_DRIVER_RX_RING]; 438 439 dr->num_entries = pkt->num_descr; 440 dr->entry_size = pkt->descr_size; 441 dr->ncookies = pkt->num_cookies; 442 for (i = 0; i < dr->ncookies; i++) { 443 dr->cookies[i] = pkt->cookies[i]; 444 445 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", 446 i, 447 (unsigned long long) 448 pkt->cookies[i].cookie_addr, 449 (unsigned long long) 450 pkt->cookies[i].cookie_size); 451 } 452 453 pkt->tag.stype = VIO_SUBTYPE_ACK; 454 pkt->dring_ident = ++dr->ident; 455 456 viodbg(HS, "SEND DRING_REG ACK ident[%llx]\n", 457 (unsigned long long) pkt->dring_ident); 458 459 len = (sizeof(*pkt) + 460 (dr->ncookies * sizeof(struct ldc_trans_cookie))); 461 if (send_ctrl(vio, &pkt->tag, len) < 0) 462 goto send_nack; 463 464 vio->dr_state |= VIO_DR_STATE_RXREG; 465 466 return 0; 467 468 send_nack: 469 pkt->tag.stype = VIO_SUBTYPE_NACK; 470 viodbg(HS, "SEND DRING_REG NACK\n"); 471 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 472 473 return handshake_failure(vio); 474 } 475 476 static int process_dreg_ack(struct vio_driver_state *vio, 477 struct vio_dring_register *pkt) 478 { 479 struct vio_dring_state *dr; 480 481 viodbg(HS, "GOT DRING_REG ACK ident[%llx] " 482 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 483 (unsigned long long) pkt->dring_ident, 484 pkt->num_descr, pkt->descr_size, pkt->options, 485 pkt->num_cookies); 486 487 dr = &vio->drings[VIO_DRIVER_TX_RING]; 488 489 if (!(vio->dr_state & VIO_DR_STATE_TXREQ)) 490 return handshake_failure(vio); 491 492 dr->ident = pkt->dring_ident; 493 vio->dr_state |= VIO_DR_STATE_TXREG; 494 495 if (all_drings_registered(vio)) { 496 if (send_rdx(vio) < 0) 497 return handshake_failure(vio); 498 vio->hs_state = VIO_HS_SENT_RDX; 499 } 500 return 0; 501 } 502 503 static int process_dreg_nack(struct vio_driver_state *vio, 504 struct vio_dring_register *pkt) 505 { 506 viodbg(HS, "GOT DRING_REG NACK ident[%llx] " 507 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 508 (unsigned long long) pkt->dring_ident, 509 pkt->num_descr, pkt->descr_size, pkt->options, 510 pkt->num_cookies); 511 512 return handshake_failure(vio); 513 } 514 515 static int process_dreg(struct vio_driver_state *vio, 516 struct vio_dring_register *pkt) 517 { 518 if (!(vio->hs_state & VIO_HS_GOTVERS)) 519 return handshake_failure(vio); 520 521 switch (pkt->tag.stype) { 522 case VIO_SUBTYPE_INFO: 523 return process_dreg_info(vio, pkt); 524 525 case VIO_SUBTYPE_ACK: 526 return process_dreg_ack(vio, pkt); 527 528 case VIO_SUBTYPE_NACK: 529 return process_dreg_nack(vio, pkt); 530 531 default: 532 return handshake_failure(vio); 533 } 534 } 535 536 static int process_dunreg(struct vio_driver_state *vio, 537 struct vio_dring_unregister *pkt) 538 { 539 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING]; 540 541 viodbg(HS, "GOT DRING_UNREG\n"); 542 543 if (pkt->dring_ident != dr->ident) 544 return 0; 545 546 vio->dr_state &= ~VIO_DR_STATE_RXREG; 547 548 memset(dr, 0, sizeof(*dr)); 549 550 kfree(vio->desc_buf); 551 vio->desc_buf = NULL; 552 vio->desc_buf_len = 0; 553 554 return 0; 555 } 556 557 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt) 558 { 559 viodbg(HS, "GOT RDX INFO\n"); 560 561 pkt->tag.stype = VIO_SUBTYPE_ACK; 562 viodbg(HS, "SEND RDX ACK\n"); 563 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0) 564 return handshake_failure(vio); 565 566 vio->hs_state |= VIO_HS_SENT_RDX_ACK; 567 return 0; 568 } 569 570 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt) 571 { 572 viodbg(HS, "GOT RDX ACK\n"); 573 574 if (!(vio->hs_state & VIO_HS_SENT_RDX)) 575 return handshake_failure(vio); 576 577 vio->hs_state |= VIO_HS_GOT_RDX_ACK; 578 return 0; 579 } 580 581 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt) 582 { 583 viodbg(HS, "GOT RDX NACK\n"); 584 585 return handshake_failure(vio); 586 } 587 588 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt) 589 { 590 if (!all_drings_registered(vio)) 591 handshake_failure(vio); 592 593 switch (pkt->tag.stype) { 594 case VIO_SUBTYPE_INFO: 595 return process_rdx_info(vio, pkt); 596 597 case VIO_SUBTYPE_ACK: 598 return process_rdx_ack(vio, pkt); 599 600 case VIO_SUBTYPE_NACK: 601 return process_rdx_nack(vio, pkt); 602 603 default: 604 return handshake_failure(vio); 605 } 606 } 607 608 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt) 609 { 610 struct vio_msg_tag *tag = pkt; 611 u8 prev_state = vio->hs_state; 612 int err; 613 614 switch (tag->stype_env) { 615 case VIO_VER_INFO: 616 err = process_ver(vio, pkt); 617 break; 618 619 case VIO_ATTR_INFO: 620 err = process_attr(vio, pkt); 621 break; 622 623 case VIO_DRING_REG: 624 err = process_dreg(vio, pkt); 625 break; 626 627 case VIO_DRING_UNREG: 628 err = process_dunreg(vio, pkt); 629 break; 630 631 case VIO_RDX: 632 err = process_rdx(vio, pkt); 633 break; 634 635 default: 636 err = process_unknown(vio, pkt); 637 break; 638 } 639 if (!err && 640 vio->hs_state != prev_state && 641 (vio->hs_state & VIO_HS_COMPLETE)) 642 vio->ops->handshake_complete(vio); 643 644 return err; 645 } 646 EXPORT_SYMBOL(vio_control_pkt_engine); 647 648 void vio_conn_reset(struct vio_driver_state *vio) 649 { 650 } 651 EXPORT_SYMBOL(vio_conn_reset); 652 653 /* The issue is that the Solaris virtual disk server just mirrors the 654 * SID values it gets from the client peer. So we work around that 655 * here in vio_{validate,send}_sid() so that the drivers don't need 656 * to be aware of this crap. 657 */ 658 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp) 659 { 660 u32 sid; 661 662 /* Always let VERSION+INFO packets through unchecked, they 663 * define the new SID. 664 */ 665 if (tp->type == VIO_TYPE_CTRL && 666 tp->stype == VIO_SUBTYPE_INFO && 667 tp->stype_env == VIO_VER_INFO) 668 return 0; 669 670 /* Ok, now figure out which SID to use. */ 671 switch (vio->dev_class) { 672 case VDEV_NETWORK: 673 case VDEV_NETWORK_SWITCH: 674 case VDEV_DISK_SERVER: 675 default: 676 sid = vio->_peer_sid; 677 break; 678 679 case VDEV_DISK: 680 sid = vio->_local_sid; 681 break; 682 } 683 684 if (sid == tp->sid) 685 return 0; 686 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n", 687 tp->sid, vio->_peer_sid, vio->_local_sid); 688 return -EINVAL; 689 } 690 EXPORT_SYMBOL(vio_validate_sid); 691 692 u32 vio_send_sid(struct vio_driver_state *vio) 693 { 694 switch (vio->dev_class) { 695 case VDEV_NETWORK: 696 case VDEV_NETWORK_SWITCH: 697 case VDEV_DISK: 698 default: 699 return vio->_local_sid; 700 701 case VDEV_DISK_SERVER: 702 return vio->_peer_sid; 703 } 704 } 705 EXPORT_SYMBOL(vio_send_sid); 706 707 int vio_ldc_alloc(struct vio_driver_state *vio, 708 struct ldc_channel_config *base_cfg, 709 void *event_arg) 710 { 711 struct ldc_channel_config cfg = *base_cfg; 712 struct ldc_channel *lp; 713 714 cfg.tx_irq = vio->vdev->tx_irq; 715 cfg.rx_irq = vio->vdev->rx_irq; 716 717 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg); 718 if (IS_ERR(lp)) 719 return PTR_ERR(lp); 720 721 vio->lp = lp; 722 723 return 0; 724 } 725 EXPORT_SYMBOL(vio_ldc_alloc); 726 727 void vio_ldc_free(struct vio_driver_state *vio) 728 { 729 ldc_free(vio->lp); 730 vio->lp = NULL; 731 732 kfree(vio->desc_buf); 733 vio->desc_buf = NULL; 734 vio->desc_buf_len = 0; 735 } 736 EXPORT_SYMBOL(vio_ldc_free); 737 738 void vio_port_up(struct vio_driver_state *vio) 739 { 740 unsigned long flags; 741 int err, state; 742 743 spin_lock_irqsave(&vio->lock, flags); 744 745 state = ldc_state(vio->lp); 746 747 err = 0; 748 if (state == LDC_STATE_INIT) { 749 err = ldc_bind(vio->lp, vio->name); 750 if (err) 751 printk(KERN_WARNING "%s: Port %lu bind failed, " 752 "err=%d\n", 753 vio->name, vio->vdev->channel_id, err); 754 } 755 756 if (!err) { 757 err = ldc_connect(vio->lp); 758 if (err) 759 printk(KERN_WARNING "%s: Port %lu connect failed, " 760 "err=%d\n", 761 vio->name, vio->vdev->channel_id, err); 762 } 763 if (err) { 764 unsigned long expires = jiffies + HZ; 765 766 expires = round_jiffies(expires); 767 mod_timer(&vio->timer, expires); 768 } 769 770 spin_unlock_irqrestore(&vio->lock, flags); 771 } 772 EXPORT_SYMBOL(vio_port_up); 773 774 static void vio_port_timer(unsigned long _arg) 775 { 776 struct vio_driver_state *vio = (struct vio_driver_state *) _arg; 777 778 vio_port_up(vio); 779 } 780 781 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, 782 u8 dev_class, struct vio_version *ver_table, 783 int ver_table_size, struct vio_driver_ops *ops, 784 char *name) 785 { 786 switch (dev_class) { 787 case VDEV_NETWORK: 788 case VDEV_NETWORK_SWITCH: 789 case VDEV_DISK: 790 case VDEV_DISK_SERVER: 791 break; 792 793 default: 794 return -EINVAL; 795 } 796 797 if (!ops->send_attr || 798 !ops->handle_attr || 799 !ops->handshake_complete) 800 return -EINVAL; 801 802 if (!ver_table || ver_table_size < 0) 803 return -EINVAL; 804 805 if (!name) 806 return -EINVAL; 807 808 spin_lock_init(&vio->lock); 809 810 vio->name = name; 811 812 vio->dev_class = dev_class; 813 vio->vdev = vdev; 814 815 vio->ver_table = ver_table; 816 vio->ver_table_entries = ver_table_size; 817 818 vio->ops = ops; 819 820 setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio); 821 822 return 0; 823 } 824 EXPORT_SYMBOL(vio_driver_init); 825