1 /* sunvdc.c: Sun LDOM Virtual Disk Client. 2 * 3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/module.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/blkdev.h> 10 #include <linux/hdreg.h> 11 #include <linux/genhd.h> 12 #include <linux/slab.h> 13 #include <linux/spinlock.h> 14 #include <linux/completion.h> 15 #include <linux/delay.h> 16 #include <linux/init.h> 17 #include <linux/list.h> 18 #include <linux/scatterlist.h> 19 20 #include <asm/vio.h> 21 #include <asm/ldc.h> 22 23 #define DRV_MODULE_NAME "sunvdc" 24 #define PFX DRV_MODULE_NAME ": " 25 #define DRV_MODULE_VERSION "1.0" 26 #define DRV_MODULE_RELDATE "June 25, 2007" 27 28 static char version[] __devinitdata = 29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 30 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 31 MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); 32 MODULE_LICENSE("GPL"); 33 MODULE_VERSION(DRV_MODULE_VERSION); 34 35 #define VDC_TX_RING_SIZE 256 36 37 #define WAITING_FOR_LINK_UP 0x01 38 #define WAITING_FOR_TX_SPACE 0x02 39 #define WAITING_FOR_GEN_CMD 0x04 40 #define WAITING_FOR_ANY -1 41 42 struct vdc_req_entry { 43 struct request *req; 44 }; 45 46 struct vdc_port { 47 struct vio_driver_state vio; 48 49 struct gendisk *disk; 50 51 struct vdc_completion *cmp; 52 53 u64 req_id; 54 u64 seq; 55 struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE]; 56 57 unsigned long ring_cookies; 58 59 u64 max_xfer_size; 60 u32 vdisk_block_size; 61 62 /* The server fills these in for us in the disk attribute 63 * ACK packet. 64 */ 65 u64 operations; 66 u32 vdisk_size; 67 u8 vdisk_type; 68 69 char disk_name[32]; 70 71 struct vio_disk_geom geom; 72 struct vio_disk_vtoc label; 73 }; 74 75 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) 76 { 77 return container_of(vio, struct vdc_port, vio); 78 } 79 80 /* Ordered from largest major to lowest */ 81 static struct vio_version vdc_versions[] = { 82 { .major = 1, .minor = 0 }, 83 }; 84 85 #define VDCBLK_NAME "vdisk" 86 static int vdc_major; 87 #define PARTITION_SHIFT 3 88 89 static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) 90 { 91 return vio_dring_avail(dr, VDC_TX_RING_SIZE); 92 } 93 94 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) 95 { 96 struct gendisk *disk = bdev->bd_disk; 97 struct vdc_port *port = disk->private_data; 98 99 geo->heads = (u8) port->geom.num_hd; 100 geo->sectors = (u8) port->geom.num_sec; 101 geo->cylinders = port->geom.num_cyl; 102 103 return 0; 104 } 105 106 static struct block_device_operations vdc_fops = { 107 .owner = THIS_MODULE, 108 .getgeo = vdc_getgeo, 109 }; 110 111 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) 112 { 113 if (vio->cmp && 114 (waiting_for == -1 || 115 vio->cmp->waiting_for == waiting_for)) { 116 vio->cmp->err = err; 117 complete(&vio->cmp->com); 118 vio->cmp = NULL; 119 } 120 } 121 122 static void vdc_handshake_complete(struct vio_driver_state *vio) 123 { 124 vdc_finish(vio, 0, WAITING_FOR_LINK_UP); 125 } 126 127 static int vdc_handle_unknown(struct vdc_port *port, void *arg) 128 { 129 struct vio_msg_tag *pkt = arg; 130 131 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", 132 pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 133 printk(KERN_ERR PFX "Resetting connection.\n"); 134 135 ldc_disconnect(port->vio.lp); 136 137 return -ECONNRESET; 138 } 139 140 static int vdc_send_attr(struct vio_driver_state *vio) 141 { 142 struct vdc_port *port = to_vdc_port(vio); 143 struct vio_disk_attr_info pkt; 144 145 memset(&pkt, 0, sizeof(pkt)); 146 147 pkt.tag.type = VIO_TYPE_CTRL; 148 pkt.tag.stype = VIO_SUBTYPE_INFO; 149 pkt.tag.stype_env = VIO_ATTR_INFO; 150 pkt.tag.sid = vio_send_sid(vio); 151 152 pkt.xfer_mode = VIO_DRING_MODE; 153 pkt.vdisk_block_size = port->vdisk_block_size; 154 pkt.max_xfer_size = port->max_xfer_size; 155 156 viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n", 157 pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size); 158 159 return vio_ldc_send(&port->vio, &pkt, sizeof(pkt)); 160 } 161 162 static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) 163 { 164 struct vdc_port *port = to_vdc_port(vio); 165 struct vio_disk_attr_info *pkt = arg; 166 167 viodbg(HS, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] " 168 "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n", 169 pkt->tag.stype, pkt->operations, 170 pkt->vdisk_size, pkt->vdisk_type, 171 pkt->xfer_mode, pkt->vdisk_block_size, 172 pkt->max_xfer_size); 173 174 if (pkt->tag.stype == VIO_SUBTYPE_ACK) { 175 switch (pkt->vdisk_type) { 176 case VD_DISK_TYPE_DISK: 177 case VD_DISK_TYPE_SLICE: 178 break; 179 180 default: 181 printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n", 182 vio->name, pkt->vdisk_type); 183 return -ECONNRESET; 184 } 185 186 if (pkt->vdisk_block_size > port->vdisk_block_size) { 187 printk(KERN_ERR PFX "%s: BLOCK size increased " 188 "%u --> %u\n", 189 vio->name, 190 port->vdisk_block_size, pkt->vdisk_block_size); 191 return -ECONNRESET; 192 } 193 194 port->operations = pkt->operations; 195 port->vdisk_size = pkt->vdisk_size; 196 port->vdisk_type = pkt->vdisk_type; 197 if (pkt->max_xfer_size < port->max_xfer_size) 198 port->max_xfer_size = pkt->max_xfer_size; 199 port->vdisk_block_size = pkt->vdisk_block_size; 200 return 0; 201 } else { 202 printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); 203 204 return -ECONNRESET; 205 } 206 } 207 208 static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc) 209 { 210 int err = desc->status; 211 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 213 } 214 215 static void vdc_end_request(struct request *req, int uptodate, int num_sectors) 216 { 217 if (end_that_request_first(req, uptodate, num_sectors)) 218 return; 219 add_disk_randomness(req->rq_disk); 220 end_that_request_last(req, uptodate); 221 } 222 223 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 224 unsigned int index) 225 { 226 struct vio_disk_desc *desc = vio_dring_entry(dr, index); 227 struct vdc_req_entry *rqe = &port->rq_arr[index]; 228 struct request *req; 229 230 if (unlikely(desc->hdr.state != VIO_DESC_DONE)) 231 return; 232 233 ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); 234 desc->hdr.state = VIO_DESC_FREE; 235 dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); 236 237 req = rqe->req; 238 if (req == NULL) { 239 vdc_end_special(port, desc); 240 return; 241 } 242 243 rqe->req = NULL; 244 245 vdc_end_request(req, !desc->status, desc->size >> 9); 246 247 if (blk_queue_stopped(port->disk->queue)) 248 blk_start_queue(port->disk->queue); 249 } 250 251 static int vdc_ack(struct vdc_port *port, void *msgbuf) 252 { 253 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 254 struct vio_dring_data *pkt = msgbuf; 255 256 if (unlikely(pkt->dring_ident != dr->ident || 257 pkt->start_idx != pkt->end_idx || 258 pkt->start_idx >= VDC_TX_RING_SIZE)) 259 return 0; 260 261 vdc_end_one(port, dr, pkt->start_idx); 262 263 return 0; 264 } 265 266 static int vdc_nack(struct vdc_port *port, void *msgbuf) 267 { 268 /* XXX Implement me XXX */ 269 return 0; 270 } 271 272 static void vdc_event(void *arg, int event) 273 { 274 struct vdc_port *port = arg; 275 struct vio_driver_state *vio = &port->vio; 276 unsigned long flags; 277 int err; 278 279 spin_lock_irqsave(&vio->lock, flags); 280 281 if (unlikely(event == LDC_EVENT_RESET || 282 event == LDC_EVENT_UP)) { 283 vio_link_state_change(vio, event); 284 spin_unlock_irqrestore(&vio->lock, flags); 285 return; 286 } 287 288 if (unlikely(event != LDC_EVENT_DATA_READY)) { 289 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 290 spin_unlock_irqrestore(&vio->lock, flags); 291 return; 292 } 293 294 err = 0; 295 while (1) { 296 union { 297 struct vio_msg_tag tag; 298 u64 raw[8]; 299 } msgbuf; 300 301 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 302 if (unlikely(err < 0)) { 303 if (err == -ECONNRESET) 304 vio_conn_reset(vio); 305 break; 306 } 307 if (err == 0) 308 break; 309 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 310 msgbuf.tag.type, 311 msgbuf.tag.stype, 312 msgbuf.tag.stype_env, 313 msgbuf.tag.sid); 314 err = vio_validate_sid(vio, &msgbuf.tag); 315 if (err < 0) 316 break; 317 318 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 319 if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) 320 err = vdc_ack(port, &msgbuf); 321 else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) 322 err = vdc_nack(port, &msgbuf); 323 else 324 err = vdc_handle_unknown(port, &msgbuf); 325 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 326 err = vio_control_pkt_engine(vio, &msgbuf); 327 } else { 328 err = vdc_handle_unknown(port, &msgbuf); 329 } 330 if (err < 0) 331 break; 332 } 333 if (err < 0) 334 vdc_finish(&port->vio, err, WAITING_FOR_ANY); 335 spin_unlock_irqrestore(&vio->lock, flags); 336 } 337 338 static int __vdc_tx_trigger(struct vdc_port *port) 339 { 340 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 341 struct vio_dring_data hdr = { 342 .tag = { 343 .type = VIO_TYPE_DATA, 344 .stype = VIO_SUBTYPE_INFO, 345 .stype_env = VIO_DRING_DATA, 346 .sid = vio_send_sid(&port->vio), 347 }, 348 .dring_ident = dr->ident, 349 .start_idx = dr->prod, 350 .end_idx = dr->prod, 351 }; 352 int err, delay; 353 354 hdr.seq = dr->snd_nxt; 355 delay = 1; 356 do { 357 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 358 if (err > 0) { 359 dr->snd_nxt++; 360 break; 361 } 362 udelay(delay); 363 if ((delay <<= 1) > 128) 364 delay = 128; 365 } while (err == -EAGAIN); 366 367 return err; 368 } 369 370 static int __send_request(struct request *req) 371 { 372 struct vdc_port *port = req->rq_disk->private_data; 373 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 374 struct scatterlist sg[port->ring_cookies]; 375 struct vdc_req_entry *rqe; 376 struct vio_disk_desc *desc; 377 unsigned int map_perm; 378 int nsg, err, i; 379 u64 len; 380 u8 op; 381 382 map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; 383 384 if (rq_data_dir(req) == READ) { 385 map_perm |= LDC_MAP_W; 386 op = VD_OP_BREAD; 387 } else { 388 map_perm |= LDC_MAP_R; 389 op = VD_OP_BWRITE; 390 } 391 392 sg_init_table(sg, port->ring_cookies); 393 nsg = blk_rq_map_sg(req->q, req, sg); 394 395 len = 0; 396 for (i = 0; i < nsg; i++) 397 len += sg[i].length; 398 399 if (unlikely(vdc_tx_dring_avail(dr) < 1)) { 400 blk_stop_queue(port->disk->queue); 401 err = -ENOMEM; 402 goto out; 403 } 404 405 desc = vio_dring_cur(dr); 406 407 err = ldc_map_sg(port->vio.lp, sg, nsg, 408 desc->cookies, port->ring_cookies, 409 map_perm); 410 if (err < 0) { 411 printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err); 412 return err; 413 } 414 415 rqe = &port->rq_arr[dr->prod]; 416 rqe->req = req; 417 418 desc->hdr.ack = VIO_ACK_ENABLE; 419 desc->req_id = port->req_id; 420 desc->operation = op; 421 if (port->vdisk_type == VD_DISK_TYPE_DISK) { 422 desc->slice = 0xff; 423 } else { 424 desc->slice = 0; 425 } 426 desc->status = ~0; 427 desc->offset = (req->sector << 9) / port->vdisk_block_size; 428 desc->size = len; 429 desc->ncookies = err; 430 431 /* This has to be a non-SMP write barrier because we are writing 432 * to memory which is shared with the peer LDOM. 433 */ 434 wmb(); 435 desc->hdr.state = VIO_DESC_READY; 436 437 err = __vdc_tx_trigger(port); 438 if (err < 0) { 439 printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); 440 } else { 441 port->req_id++; 442 dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); 443 } 444 out: 445 446 return err; 447 } 448 449 static void do_vdc_request(struct request_queue *q) 450 { 451 while (1) { 452 struct request *req = elv_next_request(q); 453 454 if (!req) 455 break; 456 457 blkdev_dequeue_request(req); 458 if (__send_request(req) < 0) 459 vdc_end_request(req, 0, req->hard_nr_sectors); 460 } 461 } 462 463 static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) 464 { 465 struct vio_dring_state *dr; 466 struct vio_completion comp; 467 struct vio_disk_desc *desc; 468 unsigned int map_perm; 469 unsigned long flags; 470 int op_len, err; 471 void *req_buf; 472 473 if (!(((u64)1 << ((u64)op - 1)) & port->operations)) 474 return -EOPNOTSUPP; 475 476 switch (op) { 477 case VD_OP_BREAD: 478 case VD_OP_BWRITE: 479 default: 480 return -EINVAL; 481 482 case VD_OP_FLUSH: 483 op_len = 0; 484 map_perm = 0; 485 break; 486 487 case VD_OP_GET_WCE: 488 op_len = sizeof(u32); 489 map_perm = LDC_MAP_W; 490 break; 491 492 case VD_OP_SET_WCE: 493 op_len = sizeof(u32); 494 map_perm = LDC_MAP_R; 495 break; 496 497 case VD_OP_GET_VTOC: 498 op_len = sizeof(struct vio_disk_vtoc); 499 map_perm = LDC_MAP_W; 500 break; 501 502 case VD_OP_SET_VTOC: 503 op_len = sizeof(struct vio_disk_vtoc); 504 map_perm = LDC_MAP_R; 505 break; 506 507 case VD_OP_GET_DISKGEOM: 508 op_len = sizeof(struct vio_disk_geom); 509 map_perm = LDC_MAP_W; 510 break; 511 512 case VD_OP_SET_DISKGEOM: 513 op_len = sizeof(struct vio_disk_geom); 514 map_perm = LDC_MAP_R; 515 break; 516 517 case VD_OP_SCSICMD: 518 op_len = 16; 519 map_perm = LDC_MAP_RW; 520 break; 521 522 case VD_OP_GET_DEVID: 523 op_len = sizeof(struct vio_disk_devid); 524 map_perm = LDC_MAP_W; 525 break; 526 527 case VD_OP_GET_EFI: 528 case VD_OP_SET_EFI: 529 return -EOPNOTSUPP; 530 break; 531 }; 532 533 map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; 534 535 op_len = (op_len + 7) & ~7; 536 req_buf = kzalloc(op_len, GFP_KERNEL); 537 if (!req_buf) 538 return -ENOMEM; 539 540 if (len > op_len) 541 len = op_len; 542 543 if (map_perm & LDC_MAP_R) 544 memcpy(req_buf, buf, len); 545 546 spin_lock_irqsave(&port->vio.lock, flags); 547 548 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 549 550 /* XXX If we want to use this code generically we have to 551 * XXX handle TX ring exhaustion etc. 552 */ 553 desc = vio_dring_cur(dr); 554 555 err = ldc_map_single(port->vio.lp, req_buf, op_len, 556 desc->cookies, port->ring_cookies, 557 map_perm); 558 if (err < 0) { 559 spin_unlock_irqrestore(&port->vio.lock, flags); 560 kfree(req_buf); 561 return err; 562 } 563 564 init_completion(&comp.com); 565 comp.waiting_for = WAITING_FOR_GEN_CMD; 566 port->vio.cmp = ∁ 567 568 desc->hdr.ack = VIO_ACK_ENABLE; 569 desc->req_id = port->req_id; 570 desc->operation = op; 571 desc->slice = 0; 572 desc->status = ~0; 573 desc->offset = 0; 574 desc->size = op_len; 575 desc->ncookies = err; 576 577 /* This has to be a non-SMP write barrier because we are writing 578 * to memory which is shared with the peer LDOM. 579 */ 580 wmb(); 581 desc->hdr.state = VIO_DESC_READY; 582 583 err = __vdc_tx_trigger(port); 584 if (err >= 0) { 585 port->req_id++; 586 dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); 587 spin_unlock_irqrestore(&port->vio.lock, flags); 588 589 wait_for_completion(&comp.com); 590 err = comp.err; 591 } else { 592 port->vio.cmp = NULL; 593 spin_unlock_irqrestore(&port->vio.lock, flags); 594 } 595 596 if (map_perm & LDC_MAP_W) 597 memcpy(buf, req_buf, len); 598 599 kfree(req_buf); 600 601 return err; 602 } 603 604 static int __devinit vdc_alloc_tx_ring(struct vdc_port *port) 605 { 606 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 607 unsigned long len, entry_size; 608 int ncookies; 609 void *dring; 610 611 entry_size = sizeof(struct vio_disk_desc) + 612 (sizeof(struct ldc_trans_cookie) * port->ring_cookies); 613 len = (VDC_TX_RING_SIZE * entry_size); 614 615 ncookies = VIO_MAX_RING_COOKIES; 616 dring = ldc_alloc_exp_dring(port->vio.lp, len, 617 dr->cookies, &ncookies, 618 (LDC_MAP_SHADOW | 619 LDC_MAP_DIRECT | 620 LDC_MAP_RW)); 621 if (IS_ERR(dring)) 622 return PTR_ERR(dring); 623 624 dr->base = dring; 625 dr->entry_size = entry_size; 626 dr->num_entries = VDC_TX_RING_SIZE; 627 dr->prod = dr->cons = 0; 628 dr->pending = VDC_TX_RING_SIZE; 629 dr->ncookies = ncookies; 630 631 return 0; 632 } 633 634 static void vdc_free_tx_ring(struct vdc_port *port) 635 { 636 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 637 638 if (dr->base) { 639 ldc_free_exp_dring(port->vio.lp, dr->base, 640 (dr->entry_size * dr->num_entries), 641 dr->cookies, dr->ncookies); 642 dr->base = NULL; 643 dr->entry_size = 0; 644 dr->num_entries = 0; 645 dr->pending = 0; 646 dr->ncookies = 0; 647 } 648 } 649 650 static int probe_disk(struct vdc_port *port) 651 { 652 struct vio_completion comp; 653 struct request_queue *q; 654 struct gendisk *g; 655 int err; 656 657 init_completion(&comp.com); 658 comp.err = 0; 659 comp.waiting_for = WAITING_FOR_LINK_UP; 660 port->vio.cmp = ∁ 661 662 vio_port_up(&port->vio); 663 664 wait_for_completion(&comp.com); 665 if (comp.err) 666 return comp.err; 667 668 err = generic_request(port, VD_OP_GET_VTOC, 669 &port->label, sizeof(port->label)); 670 if (err < 0) { 671 printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); 672 return err; 673 } 674 675 err = generic_request(port, VD_OP_GET_DISKGEOM, 676 &port->geom, sizeof(port->geom)); 677 if (err < 0) { 678 printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " 679 "error %d\n", err); 680 return err; 681 } 682 683 port->vdisk_size = ((u64)port->geom.num_cyl * 684 (u64)port->geom.num_hd * 685 (u64)port->geom.num_sec); 686 687 q = blk_init_queue(do_vdc_request, &port->vio.lock); 688 if (!q) { 689 printk(KERN_ERR PFX "%s: Could not allocate queue.\n", 690 port->vio.name); 691 return -ENOMEM; 692 } 693 g = alloc_disk(1 << PARTITION_SHIFT); 694 if (!g) { 695 printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", 696 port->vio.name); 697 blk_cleanup_queue(q); 698 return -ENOMEM; 699 } 700 701 port->disk = g; 702 703 blk_queue_max_hw_segments(q, port->ring_cookies); 704 blk_queue_max_phys_segments(q, port->ring_cookies); 705 blk_queue_max_sectors(q, port->max_xfer_size); 706 g->major = vdc_major; 707 g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; 708 strcpy(g->disk_name, port->disk_name); 709 710 g->fops = &vdc_fops; 711 g->queue = q; 712 g->private_data = port; 713 g->driverfs_dev = &port->vio.vdev->dev; 714 715 set_capacity(g, port->vdisk_size); 716 717 printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", 718 g->disk_name, 719 port->vdisk_size, (port->vdisk_size >> (20 - 9))); 720 721 add_disk(g); 722 723 return 0; 724 } 725 726 static struct ldc_channel_config vdc_ldc_cfg = { 727 .event = vdc_event, 728 .mtu = 64, 729 .mode = LDC_MODE_UNRELIABLE, 730 }; 731 732 static struct vio_driver_ops vdc_vio_ops = { 733 .send_attr = vdc_send_attr, 734 .handle_attr = vdc_handle_attr, 735 .handshake_complete = vdc_handshake_complete, 736 }; 737 738 static void print_version(void) 739 { 740 static int version_printed; 741 742 if (version_printed++ == 0) 743 printk(KERN_INFO "%s", version); 744 } 745 746 static int __devinit vdc_port_probe(struct vio_dev *vdev, 747 const struct vio_device_id *id) 748 { 749 struct mdesc_handle *hp; 750 struct vdc_port *port; 751 int err; 752 753 print_version(); 754 755 hp = mdesc_grab(); 756 757 err = -ENODEV; 758 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { 759 printk(KERN_ERR PFX "Port id [%lu] too large.\n", 760 vdev->dev_no); 761 goto err_out_release_mdesc; 762 } 763 764 port = kzalloc(sizeof(*port), GFP_KERNEL); 765 err = -ENOMEM; 766 if (!port) { 767 printk(KERN_ERR PFX "Cannot allocate vdc_port.\n"); 768 goto err_out_release_mdesc; 769 } 770 771 if (vdev->dev_no >= 26) 772 snprintf(port->disk_name, sizeof(port->disk_name), 773 VDCBLK_NAME "%c%c", 774 'a' + ((int)vdev->dev_no / 26) - 1, 775 'a' + ((int)vdev->dev_no % 26)); 776 else 777 snprintf(port->disk_name, sizeof(port->disk_name), 778 VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); 779 780 err = vio_driver_init(&port->vio, vdev, VDEV_DISK, 781 vdc_versions, ARRAY_SIZE(vdc_versions), 782 &vdc_vio_ops, port->disk_name); 783 if (err) 784 goto err_out_free_port; 785 786 port->vdisk_block_size = 512; 787 port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); 788 port->ring_cookies = ((port->max_xfer_size * 789 port->vdisk_block_size) / PAGE_SIZE) + 2; 790 791 err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); 792 if (err) 793 goto err_out_free_port; 794 795 err = vdc_alloc_tx_ring(port); 796 if (err) 797 goto err_out_free_ldc; 798 799 err = probe_disk(port); 800 if (err) 801 goto err_out_free_tx_ring; 802 803 dev_set_drvdata(&vdev->dev, port); 804 805 mdesc_release(hp); 806 807 return 0; 808 809 err_out_free_tx_ring: 810 vdc_free_tx_ring(port); 811 812 err_out_free_ldc: 813 vio_ldc_free(&port->vio); 814 815 err_out_free_port: 816 kfree(port); 817 818 err_out_release_mdesc: 819 mdesc_release(hp); 820 return err; 821 } 822 823 static int vdc_port_remove(struct vio_dev *vdev) 824 { 825 struct vdc_port *port = dev_get_drvdata(&vdev->dev); 826 827 if (port) { 828 del_timer_sync(&port->vio.timer); 829 830 vdc_free_tx_ring(port); 831 vio_ldc_free(&port->vio); 832 833 dev_set_drvdata(&vdev->dev, NULL); 834 835 kfree(port); 836 } 837 return 0; 838 } 839 840 static struct vio_device_id vdc_port_match[] = { 841 { 842 .type = "vdc-port", 843 }, 844 {}, 845 }; 846 MODULE_DEVICE_TABLE(vio, vdc_port_match); 847 848 static struct vio_driver vdc_port_driver = { 849 .id_table = vdc_port_match, 850 .probe = vdc_port_probe, 851 .remove = vdc_port_remove, 852 .driver = { 853 .name = "vdc_port", 854 .owner = THIS_MODULE, 855 } 856 }; 857 858 static int __init vdc_init(void) 859 { 860 int err; 861 862 err = register_blkdev(0, VDCBLK_NAME); 863 if (err < 0) 864 goto out_err; 865 866 vdc_major = err; 867 868 err = vio_register_driver(&vdc_port_driver); 869 if (err) 870 goto out_unregister_blkdev; 871 872 return 0; 873 874 out_unregister_blkdev: 875 unregister_blkdev(vdc_major, VDCBLK_NAME); 876 vdc_major = 0; 877 878 out_err: 879 return err; 880 } 881 882 static void __exit vdc_exit(void) 883 { 884 vio_unregister_driver(&vdc_port_driver); 885 unregister_blkdev(vdc_major, VDCBLK_NAME); 886 } 887 888 module_init(vdc_init); 889 module_exit(vdc_exit); 890