1 /* ------------------------------------------------------------ 2 * ibmvscsi.c 3 * (C) Copyright IBM Corporation 1994, 2004 4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com) 5 * Santiago Leon (santil@us.ibm.com) 6 * Dave Boutcher (sleddog@us.ibm.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 21 * USA 22 * 23 * ------------------------------------------------------------ 24 * Emulation of a SCSI host adapter for Virtual I/O devices 25 * 26 * This driver supports the SCSI adapter implemented by the IBM 27 * Power5 firmware. That SCSI adapter is not a physical adapter, 28 * but allows Linux SCSI peripheral drivers to directly 29 * access devices in another logical partition on the physical system. 30 * 31 * The virtual adapter(s) are present in the open firmware device 32 * tree just like real adapters. 33 * 34 * One of the capabilities provided on these systems is the ability 35 * to DMA between partitions. The architecture states that for VSCSI, 36 * the server side is allowed to DMA to and from the client. The client 37 * is never trusted to DMA to or from the server directly. 38 * 39 * Messages are sent between partitions on a "Command/Response Queue" 40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 41 * Senders cannot access the buffer directly, but send messages by 42 * making a hypervisor call and passing in the 16 bytes. The hypervisor 43 * puts the message in the next 16 byte space in round-robbin fashion, 44 * turns on the high order bit of the message (the valid bit), and 45 * generates an interrupt to the receiver (if interrupts are turned on.) 46 * The receiver just turns off the valid bit when they have copied out 47 * the message. 48 * 49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit 50 * (IU) (as defined in the T10 standard available at www.t10.org), gets 51 * a DMA address for the message, and sends it to the server as the 52 * payload of a CRQ message. The server DMAs the SRP IU and processes it, 53 * including doing any additional data transfers. When it is done, it 54 * DMAs the SRP response back to the same address as the request came from, 55 * and sends a CRQ message back to inform the client that the request has 56 * completed. 57 * 58 * Note that some of the underlying infrastructure is different between 59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and 60 * the older iSeries hypervisor models. To support both, some low level 61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. 62 * The Makefile should pick one, not two, not zero, of these. 63 * 64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor 65 * interfaces. It would be really nice to abstract this above an RDMA 66 * layer. 67 */ 68 69 #include <linux/module.h> 70 #include <linux/moduleparam.h> 71 #include <linux/dma-mapping.h> 72 #include <linux/delay.h> 73 #include <asm/vio.h> 74 #include <scsi/scsi.h> 75 #include <scsi/scsi_cmnd.h> 76 #include <scsi/scsi_host.h> 77 #include <scsi/scsi_device.h> 78 #include "ibmvscsi.h" 79 80 /* The values below are somewhat arbitrary default values, but 81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.) 82 * Note that there are 3 bits of channel value, 6 bits of id, and 83 * 5 bits of LUN. 84 */ 85 static int max_id = 64; 86 static int max_channel = 3; 87 static int init_timeout = 5; 88 static int max_requests = 50; 89 90 #define IBMVSCSI_VERSION "1.5.8" 91 92 MODULE_DESCRIPTION("IBM Virtual SCSI"); 93 MODULE_AUTHOR("Dave Boutcher"); 94 MODULE_LICENSE("GPL"); 95 MODULE_VERSION(IBMVSCSI_VERSION); 96 97 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); 98 MODULE_PARM_DESC(max_id, "Largest ID value for each channel"); 99 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); 100 MODULE_PARM_DESC(max_channel, "Largest channel value"); 101 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); 102 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 103 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); 104 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 105 106 /* ------------------------------------------------------------ 107 * Routines for the event pool and event structs 108 */ 109 /** 110 * initialize_event_pool: - Allocates and initializes the event pool for a host 111 * @pool: event_pool to be initialized 112 * @size: Number of events in pool 113 * @hostdata: ibmvscsi_host_data who owns the event pool 114 * 115 * Returns zero on success. 116 */ 117 static int initialize_event_pool(struct event_pool *pool, 118 int size, struct ibmvscsi_host_data *hostdata) 119 { 120 int i; 121 122 pool->size = size; 123 pool->next = 0; 124 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); 125 if (!pool->events) 126 return -ENOMEM; 127 128 pool->iu_storage = 129 dma_alloc_coherent(hostdata->dev, 130 pool->size * sizeof(*pool->iu_storage), 131 &pool->iu_token, 0); 132 if (!pool->iu_storage) { 133 kfree(pool->events); 134 return -ENOMEM; 135 } 136 137 for (i = 0; i < pool->size; ++i) { 138 struct srp_event_struct *evt = &pool->events[i]; 139 memset(&evt->crq, 0x00, sizeof(evt->crq)); 140 atomic_set(&evt->free, 1); 141 evt->crq.valid = 0x80; 142 evt->crq.IU_length = sizeof(*evt->xfer_iu); 143 evt->crq.IU_data_ptr = pool->iu_token + 144 sizeof(*evt->xfer_iu) * i; 145 evt->xfer_iu = pool->iu_storage + i; 146 evt->hostdata = hostdata; 147 evt->ext_list = NULL; 148 evt->ext_list_token = 0; 149 } 150 151 return 0; 152 } 153 154 /** 155 * release_event_pool: - Frees memory of an event pool of a host 156 * @pool: event_pool to be released 157 * @hostdata: ibmvscsi_host_data who owns the even pool 158 * 159 * Returns zero on success. 160 */ 161 static void release_event_pool(struct event_pool *pool, 162 struct ibmvscsi_host_data *hostdata) 163 { 164 int i, in_use = 0; 165 for (i = 0; i < pool->size; ++i) { 166 if (atomic_read(&pool->events[i].free) != 1) 167 ++in_use; 168 if (pool->events[i].ext_list) { 169 dma_free_coherent(hostdata->dev, 170 SG_ALL * sizeof(struct srp_direct_buf), 171 pool->events[i].ext_list, 172 pool->events[i].ext_list_token); 173 } 174 } 175 if (in_use) 176 printk(KERN_WARNING 177 "ibmvscsi: releasing event pool with %d " 178 "events still in use?\n", in_use); 179 kfree(pool->events); 180 dma_free_coherent(hostdata->dev, 181 pool->size * sizeof(*pool->iu_storage), 182 pool->iu_storage, pool->iu_token); 183 } 184 185 /** 186 * valid_event_struct: - Determines if event is valid. 187 * @pool: event_pool that contains the event 188 * @evt: srp_event_struct to be checked for validity 189 * 190 * Returns zero if event is invalid, one otherwise. 191 */ 192 static int valid_event_struct(struct event_pool *pool, 193 struct srp_event_struct *evt) 194 { 195 int index = evt - pool->events; 196 if (index < 0 || index >= pool->size) /* outside of bounds */ 197 return 0; 198 if (evt != pool->events + index) /* unaligned */ 199 return 0; 200 return 1; 201 } 202 203 /** 204 * ibmvscsi_free-event_struct: - Changes status of event to "free" 205 * @pool: event_pool that contains the event 206 * @evt: srp_event_struct to be modified 207 * 208 */ 209 static void free_event_struct(struct event_pool *pool, 210 struct srp_event_struct *evt) 211 { 212 if (!valid_event_struct(pool, evt)) { 213 printk(KERN_ERR 214 "ibmvscsi: Freeing invalid event_struct %p " 215 "(not in pool %p)\n", evt, pool->events); 216 return; 217 } 218 if (atomic_inc_return(&evt->free) != 1) { 219 printk(KERN_ERR 220 "ibmvscsi: Freeing event_struct %p " 221 "which is not in use!\n", evt); 222 return; 223 } 224 } 225 226 /** 227 * get_evt_struct: - Gets the next free event in pool 228 * @pool: event_pool that contains the events to be searched 229 * 230 * Returns the next event in "free" state, and NULL if none are free. 231 * Note that no synchronization is done here, we assume the host_lock 232 * will syncrhonze things. 233 */ 234 static struct srp_event_struct *get_event_struct(struct event_pool *pool) 235 { 236 int i; 237 int poolsize = pool->size; 238 int offset = pool->next; 239 240 for (i = 0; i < poolsize; i++) { 241 offset = (offset + 1) % poolsize; 242 if (!atomic_dec_if_positive(&pool->events[offset].free)) { 243 pool->next = offset; 244 return &pool->events[offset]; 245 } 246 } 247 248 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); 249 return NULL; 250 } 251 252 /** 253 * init_event_struct: Initialize fields in an event struct that are always 254 * required. 255 * @evt: The event 256 * @done: Routine to call when the event is responded to 257 * @format: SRP or MAD format 258 * @timeout: timeout value set in the CRQ 259 */ 260 static void init_event_struct(struct srp_event_struct *evt_struct, 261 void (*done) (struct srp_event_struct *), 262 u8 format, 263 int timeout) 264 { 265 evt_struct->cmnd = NULL; 266 evt_struct->cmnd_done = NULL; 267 evt_struct->sync_srp = NULL; 268 evt_struct->crq.format = format; 269 evt_struct->crq.timeout = timeout; 270 evt_struct->done = done; 271 } 272 273 /* ------------------------------------------------------------ 274 * Routines for receiving SCSI responses from the hosting partition 275 */ 276 277 /** 278 * set_srp_direction: Set the fields in the srp related to data 279 * direction and number of buffers based on the direction in 280 * the scsi_cmnd and the number of buffers 281 */ 282 static void set_srp_direction(struct scsi_cmnd *cmd, 283 struct srp_cmd *srp_cmd, 284 int numbuf) 285 { 286 u8 fmt; 287 288 if (numbuf == 0) 289 return; 290 291 if (numbuf == 1) 292 fmt = SRP_DATA_DESC_DIRECT; 293 else { 294 fmt = SRP_DATA_DESC_INDIRECT; 295 numbuf = min(numbuf, MAX_INDIRECT_BUFS); 296 297 if (cmd->sc_data_direction == DMA_TO_DEVICE) 298 srp_cmd->data_out_desc_cnt = numbuf; 299 else 300 srp_cmd->data_in_desc_cnt = numbuf; 301 } 302 303 if (cmd->sc_data_direction == DMA_TO_DEVICE) 304 srp_cmd->buf_fmt = fmt << 4; 305 else 306 srp_cmd->buf_fmt = fmt; 307 } 308 309 static void unmap_sg_list(int num_entries, 310 struct device *dev, 311 struct srp_direct_buf *md) 312 { 313 int i; 314 315 for (i = 0; i < num_entries; ++i) 316 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); 317 } 318 319 /** 320 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 321 * @cmd: srp_cmd whose additional_data member will be unmapped 322 * @dev: device for which the memory is mapped 323 * 324 */ 325 static void unmap_cmd_data(struct srp_cmd *cmd, 326 struct srp_event_struct *evt_struct, 327 struct device *dev) 328 { 329 u8 out_fmt, in_fmt; 330 331 out_fmt = cmd->buf_fmt >> 4; 332 in_fmt = cmd->buf_fmt & ((1U << 4) - 1); 333 334 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) 335 return; 336 else if (out_fmt == SRP_DATA_DESC_DIRECT || 337 in_fmt == SRP_DATA_DESC_DIRECT) { 338 struct srp_direct_buf *data = 339 (struct srp_direct_buf *) cmd->add_data; 340 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); 341 } else { 342 struct srp_indirect_buf *indirect = 343 (struct srp_indirect_buf *) cmd->add_data; 344 int num_mapped = indirect->table_desc.len / 345 sizeof(struct srp_direct_buf); 346 347 if (num_mapped <= MAX_INDIRECT_BUFS) { 348 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); 349 return; 350 } 351 352 unmap_sg_list(num_mapped, dev, evt_struct->ext_list); 353 } 354 } 355 356 static int map_sg_list(int num_entries, 357 struct scatterlist *sg, 358 struct srp_direct_buf *md) 359 { 360 int i; 361 u64 total_length = 0; 362 363 for (i = 0; i < num_entries; ++i) { 364 struct srp_direct_buf *descr = md + i; 365 struct scatterlist *sg_entry = &sg[i]; 366 descr->va = sg_dma_address(sg_entry); 367 descr->len = sg_dma_len(sg_entry); 368 descr->key = 0; 369 total_length += sg_dma_len(sg_entry); 370 } 371 return total_length; 372 } 373 374 /** 375 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields 376 * @cmd: Scsi_Cmnd with the scatterlist 377 * @srp_cmd: srp_cmd that contains the memory descriptor 378 * @dev: device for which to map dma memory 379 * 380 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. 381 * Returns 1 on success. 382 */ 383 static int map_sg_data(struct scsi_cmnd *cmd, 384 struct srp_event_struct *evt_struct, 385 struct srp_cmd *srp_cmd, struct device *dev) 386 { 387 388 int sg_mapped; 389 u64 total_length = 0; 390 struct scatterlist *sg = cmd->request_buffer; 391 struct srp_direct_buf *data = 392 (struct srp_direct_buf *) srp_cmd->add_data; 393 struct srp_indirect_buf *indirect = 394 (struct srp_indirect_buf *) data; 395 396 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); 397 398 if (sg_mapped == 0) 399 return 0; 400 401 set_srp_direction(cmd, srp_cmd, sg_mapped); 402 403 /* special case; we can use a single direct descriptor */ 404 if (sg_mapped == 1) { 405 data->va = sg_dma_address(&sg[0]); 406 data->len = sg_dma_len(&sg[0]); 407 data->key = 0; 408 return 1; 409 } 410 411 if (sg_mapped > SG_ALL) { 412 printk(KERN_ERR 413 "ibmvscsi: More than %d mapped sg entries, got %d\n", 414 SG_ALL, sg_mapped); 415 return 0; 416 } 417 418 indirect->table_desc.va = 0; 419 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); 420 indirect->table_desc.key = 0; 421 422 if (sg_mapped <= MAX_INDIRECT_BUFS) { 423 total_length = map_sg_list(sg_mapped, sg, 424 &indirect->desc_list[0]); 425 indirect->len = total_length; 426 return 1; 427 } 428 429 /* get indirect table */ 430 if (!evt_struct->ext_list) { 431 evt_struct->ext_list = (struct srp_direct_buf *) 432 dma_alloc_coherent(dev, 433 SG_ALL * sizeof(struct srp_direct_buf), 434 &evt_struct->ext_list_token, 0); 435 if (!evt_struct->ext_list) { 436 printk(KERN_ERR 437 "ibmvscsi: Can't allocate memory for indirect table\n"); 438 return 0; 439 440 } 441 } 442 443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); 444 445 indirect->len = total_length; 446 indirect->table_desc.va = evt_struct->ext_list_token; 447 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); 448 memcpy(indirect->desc_list, evt_struct->ext_list, 449 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); 450 451 return 1; 452 } 453 454 /** 455 * map_single_data: - Maps memory and initializes memory decriptor fields 456 * @cmd: struct scsi_cmnd with the memory to be mapped 457 * @srp_cmd: srp_cmd that contains the memory descriptor 458 * @dev: device for which to map dma memory 459 * 460 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. 461 * Returns 1 on success. 462 */ 463 static int map_single_data(struct scsi_cmnd *cmd, 464 struct srp_cmd *srp_cmd, struct device *dev) 465 { 466 struct srp_direct_buf *data = 467 (struct srp_direct_buf *) srp_cmd->add_data; 468 469 data->va = 470 dma_map_single(dev, cmd->request_buffer, 471 cmd->request_bufflen, 472 DMA_BIDIRECTIONAL); 473 if (dma_mapping_error(data->va)) { 474 printk(KERN_ERR 475 "ibmvscsi: Unable to map request_buffer for command!\n"); 476 return 0; 477 } 478 data->len = cmd->request_bufflen; 479 data->key = 0; 480 481 set_srp_direction(cmd, srp_cmd, 1); 482 483 return 1; 484 } 485 486 /** 487 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds 488 * @cmd: struct scsi_cmnd with the memory to be mapped 489 * @srp_cmd: srp_cmd that contains the memory descriptor 490 * @dev: dma device for which to map dma memory 491 * 492 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 493 * Returns 1 on success. 494 */ 495 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, 496 struct srp_event_struct *evt_struct, 497 struct srp_cmd *srp_cmd, struct device *dev) 498 { 499 switch (cmd->sc_data_direction) { 500 case DMA_FROM_DEVICE: 501 case DMA_TO_DEVICE: 502 break; 503 case DMA_NONE: 504 return 1; 505 case DMA_BIDIRECTIONAL: 506 printk(KERN_ERR 507 "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); 508 return 0; 509 default: 510 printk(KERN_ERR 511 "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", 512 cmd->sc_data_direction); 513 return 0; 514 } 515 516 if (!cmd->request_buffer) 517 return 1; 518 if (cmd->use_sg) 519 return map_sg_data(cmd, evt_struct, srp_cmd, dev); 520 return map_single_data(cmd, srp_cmd, dev); 521 } 522 523 /* ------------------------------------------------------------ 524 * Routines for sending and receiving SRPs 525 */ 526 /** 527 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() 528 * @evt_struct: evt_struct to be sent 529 * @hostdata: ibmvscsi_host_data of host 530 * 531 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) 532 * Note that this routine assumes that host_lock is held for synchronization 533 */ 534 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, 535 struct ibmvscsi_host_data *hostdata) 536 { 537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 538 int request_status; 539 int rc; 540 541 /* If we have exhausted our request limit, just fail this request. 542 * Note that there are rare cases involving driver generated requests 543 * (such as task management requests) that the mid layer may think we 544 * can handle more requests (can_queue) when we actually can't 545 */ 546 if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { 547 request_status = 548 atomic_dec_if_positive(&hostdata->request_limit); 549 /* If request limit was -1 when we started, it is now even 550 * less than that 551 */ 552 if (request_status < -1) 553 goto send_error; 554 /* Otherwise, if we have run out of requests */ 555 else if (request_status < 0) 556 goto send_busy; 557 } 558 559 /* Copy the IU into the transfer area */ 560 *evt_struct->xfer_iu = evt_struct->iu; 561 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; 562 563 /* Add this to the sent list. We need to do this 564 * before we actually send 565 * in case it comes back REALLY fast 566 */ 567 list_add_tail(&evt_struct->list, &hostdata->sent); 568 569 if ((rc = 570 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 571 list_del(&evt_struct->list); 572 573 printk(KERN_ERR "ibmvscsi: send error %d\n", 574 rc); 575 goto send_error; 576 } 577 578 return 0; 579 580 send_busy: 581 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 582 583 free_event_struct(&hostdata->pool, evt_struct); 584 return SCSI_MLQUEUE_HOST_BUSY; 585 586 send_error: 587 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 588 589 if (evt_struct->cmnd != NULL) { 590 evt_struct->cmnd->result = DID_ERROR << 16; 591 evt_struct->cmnd_done(evt_struct->cmnd); 592 } else if (evt_struct->done) 593 evt_struct->done(evt_struct); 594 595 free_event_struct(&hostdata->pool, evt_struct); 596 return 0; 597 } 598 599 /** 600 * handle_cmd_rsp: - Handle responses from commands 601 * @evt_struct: srp_event_struct to be handled 602 * 603 * Used as a callback by when sending scsi cmds. 604 * Gets called by ibmvscsi_handle_crq() 605 */ 606 static void handle_cmd_rsp(struct srp_event_struct *evt_struct) 607 { 608 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; 609 struct scsi_cmnd *cmnd = evt_struct->cmnd; 610 611 if (unlikely(rsp->opcode != SRP_RSP)) { 612 if (printk_ratelimit()) 613 printk(KERN_WARNING 614 "ibmvscsi: bad SRP RSP type %d\n", 615 rsp->opcode); 616 } 617 618 if (cmnd) { 619 cmnd->result = rsp->status; 620 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 621 memcpy(cmnd->sense_buffer, 622 rsp->data, 623 rsp->sense_data_len); 624 unmap_cmd_data(&evt_struct->iu.srp.cmd, 625 evt_struct, 626 evt_struct->hostdata->dev); 627 628 if (rsp->flags & SRP_RSP_FLAG_DOOVER) 629 cmnd->resid = rsp->data_out_res_cnt; 630 else if (rsp->flags & SRP_RSP_FLAG_DIOVER) 631 cmnd->resid = rsp->data_in_res_cnt; 632 } 633 634 if (evt_struct->cmnd_done) 635 evt_struct->cmnd_done(cmnd); 636 } 637 638 /** 639 * lun_from_dev: - Returns the lun of the scsi device 640 * @dev: struct scsi_device 641 * 642 */ 643 static inline u16 lun_from_dev(struct scsi_device *dev) 644 { 645 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; 646 } 647 648 /** 649 * ibmvscsi_queue: - The queuecommand function of the scsi template 650 * @cmd: struct scsi_cmnd to be executed 651 * @done: Callback function to be called when cmd is completed 652 */ 653 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, 654 void (*done) (struct scsi_cmnd *)) 655 { 656 struct srp_cmd *srp_cmd; 657 struct srp_event_struct *evt_struct; 658 struct srp_indirect_buf *indirect; 659 struct ibmvscsi_host_data *hostdata = 660 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 661 u16 lun = lun_from_dev(cmnd->device); 662 u8 out_fmt, in_fmt; 663 664 evt_struct = get_event_struct(&hostdata->pool); 665 if (!evt_struct) 666 return SCSI_MLQUEUE_HOST_BUSY; 667 668 /* Set up the actual SRP IU */ 669 srp_cmd = &evt_struct->iu.srp.cmd; 670 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 671 srp_cmd->opcode = SRP_CMD; 672 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 673 srp_cmd->lun = ((u64) lun) << 48; 674 675 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 676 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 677 free_event_struct(&hostdata->pool, evt_struct); 678 return SCSI_MLQUEUE_HOST_BUSY; 679 } 680 681 init_event_struct(evt_struct, 682 handle_cmd_rsp, 683 VIOSRP_SRP_FORMAT, 684 cmnd->timeout_per_command/HZ); 685 686 evt_struct->cmnd = cmnd; 687 evt_struct->cmnd_done = done; 688 689 /* Fix up dma address of the buffer itself */ 690 indirect = (struct srp_indirect_buf *) srp_cmd->add_data; 691 out_fmt = srp_cmd->buf_fmt >> 4; 692 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); 693 if ((in_fmt == SRP_DATA_DESC_INDIRECT || 694 out_fmt == SRP_DATA_DESC_INDIRECT) && 695 indirect->table_desc.va == 0) { 696 indirect->table_desc.va = evt_struct->crq.IU_data_ptr + 697 offsetof(struct srp_cmd, add_data) + 698 offsetof(struct srp_indirect_buf, desc_list); 699 } 700 701 return ibmvscsi_send_srp_event(evt_struct, hostdata); 702 } 703 704 /* ------------------------------------------------------------ 705 * Routines for driver initialization 706 */ 707 /** 708 * adapter_info_rsp: - Handle response to MAD adapter info request 709 * @evt_struct: srp_event_struct with the response 710 * 711 * Used as a "done" callback by when sending adapter_info. Gets called 712 * by ibmvscsi_handle_crq() 713 */ 714 static void adapter_info_rsp(struct srp_event_struct *evt_struct) 715 { 716 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 717 dma_unmap_single(hostdata->dev, 718 evt_struct->iu.mad.adapter_info.buffer, 719 evt_struct->iu.mad.adapter_info.common.length, 720 DMA_BIDIRECTIONAL); 721 722 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 723 printk("ibmvscsi: error %d getting adapter info\n", 724 evt_struct->xfer_iu->mad.adapter_info.common.status); 725 } else { 726 printk("ibmvscsi: host srp version: %s, " 727 "host partition %s (%d), OS %d, max io %u\n", 728 hostdata->madapter_info.srp_version, 729 hostdata->madapter_info.partition_name, 730 hostdata->madapter_info.partition_number, 731 hostdata->madapter_info.os_type, 732 hostdata->madapter_info.port_max_txu[0]); 733 734 if (hostdata->madapter_info.port_max_txu[0]) 735 hostdata->host->max_sectors = 736 hostdata->madapter_info.port_max_txu[0] >> 9; 737 738 if (hostdata->madapter_info.os_type == 3 && 739 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 740 printk("ibmvscsi: host (Ver. %s) doesn't support large" 741 "transfers\n", 742 hostdata->madapter_info.srp_version); 743 printk("ibmvscsi: limiting scatterlists to %d\n", 744 MAX_INDIRECT_BUFS); 745 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 746 } 747 } 748 } 749 750 /** 751 * send_mad_adapter_info: - Sends the mad adapter info request 752 * and stores the result so it can be retrieved with 753 * sysfs. We COULD consider causing a failure if the 754 * returned SRP version doesn't match ours. 755 * @hostdata: ibmvscsi_host_data of host 756 * 757 * Returns zero if successful. 758 */ 759 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) 760 { 761 struct viosrp_adapter_info *req; 762 struct srp_event_struct *evt_struct; 763 dma_addr_t addr; 764 765 evt_struct = get_event_struct(&hostdata->pool); 766 if (!evt_struct) { 767 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 768 "for ADAPTER_INFO_REQ!\n"); 769 return; 770 } 771 772 init_event_struct(evt_struct, 773 adapter_info_rsp, 774 VIOSRP_MAD_FORMAT, 775 init_timeout * HZ); 776 777 req = &evt_struct->iu.mad.adapter_info; 778 memset(req, 0x00, sizeof(*req)); 779 780 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 781 req->common.length = sizeof(hostdata->madapter_info); 782 req->buffer = addr = dma_map_single(hostdata->dev, 783 &hostdata->madapter_info, 784 sizeof(hostdata->madapter_info), 785 DMA_BIDIRECTIONAL); 786 787 if (dma_mapping_error(req->buffer)) { 788 printk(KERN_ERR 789 "ibmvscsi: Unable to map request_buffer " 790 "for adapter_info!\n"); 791 free_event_struct(&hostdata->pool, evt_struct); 792 return; 793 } 794 795 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { 796 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 797 dma_unmap_single(hostdata->dev, 798 addr, 799 sizeof(hostdata->madapter_info), 800 DMA_BIDIRECTIONAL); 801 } 802 }; 803 804 /** 805 * login_rsp: - Handle response to SRP login request 806 * @evt_struct: srp_event_struct with the response 807 * 808 * Used as a "done" callback by when sending srp_login. Gets called 809 * by ibmvscsi_handle_crq() 810 */ 811 static void login_rsp(struct srp_event_struct *evt_struct) 812 { 813 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 814 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { 815 case SRP_LOGIN_RSP: /* it worked! */ 816 break; 817 case SRP_LOGIN_REJ: /* refused! */ 818 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", 819 evt_struct->xfer_iu->srp.login_rej.reason); 820 /* Login failed. */ 821 atomic_set(&hostdata->request_limit, -1); 822 return; 823 default: 824 printk(KERN_ERR 825 "ibmvscsi: Invalid login response typecode 0x%02x!\n", 826 evt_struct->xfer_iu->srp.login_rsp.opcode); 827 /* Login failed. */ 828 atomic_set(&hostdata->request_limit, -1); 829 return; 830 } 831 832 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 833 834 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > 835 (max_requests - 2)) 836 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta = 837 max_requests - 2; 838 839 /* Now we know what the real request-limit is */ 840 atomic_set(&hostdata->request_limit, 841 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 842 843 hostdata->host->can_queue = 844 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2; 845 846 if (hostdata->host->can_queue < 1) { 847 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); 848 return; 849 } 850 851 /* If we had any pending I/Os, kick them */ 852 scsi_unblock_requests(hostdata->host); 853 854 send_mad_adapter_info(hostdata); 855 return; 856 } 857 858 /** 859 * send_srp_login: - Sends the srp login 860 * @hostdata: ibmvscsi_host_data of host 861 * 862 * Returns zero if successful. 863 */ 864 static int send_srp_login(struct ibmvscsi_host_data *hostdata) 865 { 866 int rc; 867 unsigned long flags; 868 struct srp_login_req *login; 869 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 870 if (!evt_struct) { 871 printk(KERN_ERR 872 "ibmvscsi: couldn't allocate an event for login req!\n"); 873 return FAILED; 874 } 875 876 init_event_struct(evt_struct, 877 login_rsp, 878 VIOSRP_SRP_FORMAT, 879 init_timeout * HZ); 880 881 login = &evt_struct->iu.srp.login_req; 882 memset(login, 0x00, sizeof(struct srp_login_req)); 883 login->opcode = SRP_LOGIN_REQ; 884 login->req_it_iu_len = sizeof(union srp_iu); 885 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 886 887 spin_lock_irqsave(hostdata->host->host_lock, flags); 888 /* Start out with a request limit of 1, since this is negotiated in 889 * the login request we are just sending 890 */ 891 atomic_set(&hostdata->request_limit, 1); 892 893 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 894 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 895 printk("ibmvscsic: sent SRP login\n"); 896 return rc; 897 }; 898 899 /** 900 * sync_completion: Signal that a synchronous command has completed 901 * Note that after returning from this call, the evt_struct is freed. 902 * the caller waiting on this completion shouldn't touch the evt_struct 903 * again. 904 */ 905 static void sync_completion(struct srp_event_struct *evt_struct) 906 { 907 /* copy the response back */ 908 if (evt_struct->sync_srp) 909 *evt_struct->sync_srp = *evt_struct->xfer_iu; 910 911 complete(&evt_struct->comp); 912 } 913 914 /** 915 * ibmvscsi_abort: Abort a command...from scsi host template 916 * send this over to the server and wait synchronously for the response 917 */ 918 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) 919 { 920 struct ibmvscsi_host_data *hostdata = 921 (struct ibmvscsi_host_data *)cmd->device->host->hostdata; 922 struct srp_tsk_mgmt *tsk_mgmt; 923 struct srp_event_struct *evt; 924 struct srp_event_struct *tmp_evt, *found_evt; 925 union viosrp_iu srp_rsp; 926 int rsp_rc; 927 unsigned long flags; 928 u16 lun = lun_from_dev(cmd->device); 929 930 /* First, find this command in our sent list so we can figure 931 * out the correct tag 932 */ 933 spin_lock_irqsave(hostdata->host->host_lock, flags); 934 found_evt = NULL; 935 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 936 if (tmp_evt->cmnd == cmd) { 937 found_evt = tmp_evt; 938 break; 939 } 940 } 941 942 if (!found_evt) { 943 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 944 return FAILED; 945 } 946 947 evt = get_event_struct(&hostdata->pool); 948 if (evt == NULL) { 949 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 950 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 951 return FAILED; 952 } 953 954 init_event_struct(evt, 955 sync_completion, 956 VIOSRP_SRP_FORMAT, 957 init_timeout * HZ); 958 959 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 960 961 /* Set up an abort SRP command */ 962 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 963 tsk_mgmt->opcode = SRP_TSK_MGMT; 964 tsk_mgmt->lun = ((u64) lun) << 48; 965 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 966 tsk_mgmt->task_tag = (u64) found_evt; 967 968 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", 969 tsk_mgmt->lun, tsk_mgmt->task_tag); 970 971 evt->sync_srp = &srp_rsp; 972 init_completion(&evt->comp); 973 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 974 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 975 if (rsp_rc != 0) { 976 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 977 return FAILED; 978 } 979 980 wait_for_completion(&evt->comp); 981 982 /* make sure we got a good response */ 983 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 984 if (printk_ratelimit()) 985 printk(KERN_WARNING 986 "ibmvscsi: abort bad SRP RSP type %d\n", 987 srp_rsp.srp.rsp.opcode); 988 return FAILED; 989 } 990 991 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) 992 rsp_rc = *((int *)srp_rsp.srp.rsp.data); 993 else 994 rsp_rc = srp_rsp.srp.rsp.status; 995 996 if (rsp_rc) { 997 if (printk_ratelimit()) 998 printk(KERN_WARNING 999 "ibmvscsi: abort code %d for task tag 0x%lx\n", 1000 rsp_rc, 1001 tsk_mgmt->task_tag); 1002 return FAILED; 1003 } 1004 1005 /* Because we dropped the spinlock above, it's possible 1006 * The event is no longer in our list. Make sure it didn't 1007 * complete while we were aborting 1008 */ 1009 spin_lock_irqsave(hostdata->host->host_lock, flags); 1010 found_evt = NULL; 1011 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 1012 if (tmp_evt->cmnd == cmd) { 1013 found_evt = tmp_evt; 1014 break; 1015 } 1016 } 1017 1018 if (found_evt == NULL) { 1019 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1020 printk(KERN_INFO 1021 "ibmvscsi: aborted task tag 0x%lx completed\n", 1022 tsk_mgmt->task_tag); 1023 return SUCCESS; 1024 } 1025 1026 printk(KERN_INFO 1027 "ibmvscsi: successfully aborted task tag 0x%lx\n", 1028 tsk_mgmt->task_tag); 1029 1030 cmd->result = (DID_ABORT << 16); 1031 list_del(&found_evt->list); 1032 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt, 1033 found_evt->hostdata->dev); 1034 free_event_struct(&found_evt->hostdata->pool, found_evt); 1035 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1036 atomic_inc(&hostdata->request_limit); 1037 return SUCCESS; 1038 } 1039 1040 /** 1041 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 1042 * template send this over to the server and wait synchronously for the 1043 * response 1044 */ 1045 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) 1046 { 1047 struct ibmvscsi_host_data *hostdata = 1048 (struct ibmvscsi_host_data *)cmd->device->host->hostdata; 1049 1050 struct srp_tsk_mgmt *tsk_mgmt; 1051 struct srp_event_struct *evt; 1052 struct srp_event_struct *tmp_evt, *pos; 1053 union viosrp_iu srp_rsp; 1054 int rsp_rc; 1055 unsigned long flags; 1056 u16 lun = lun_from_dev(cmd->device); 1057 1058 spin_lock_irqsave(hostdata->host->host_lock, flags); 1059 evt = get_event_struct(&hostdata->pool); 1060 if (evt == NULL) { 1061 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1062 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1063 return FAILED; 1064 } 1065 1066 init_event_struct(evt, 1067 sync_completion, 1068 VIOSRP_SRP_FORMAT, 1069 init_timeout * HZ); 1070 1071 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1072 1073 /* Set up a lun reset SRP command */ 1074 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1075 tsk_mgmt->opcode = SRP_TSK_MGMT; 1076 tsk_mgmt->lun = ((u64) lun) << 48; 1077 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1078 1079 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", 1080 tsk_mgmt->lun); 1081 1082 evt->sync_srp = &srp_rsp; 1083 init_completion(&evt->comp); 1084 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 1085 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1086 if (rsp_rc != 0) { 1087 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1088 return FAILED; 1089 } 1090 1091 wait_for_completion(&evt->comp); 1092 1093 /* make sure we got a good response */ 1094 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 1095 if (printk_ratelimit()) 1096 printk(KERN_WARNING 1097 "ibmvscsi: reset bad SRP RSP type %d\n", 1098 srp_rsp.srp.rsp.opcode); 1099 return FAILED; 1100 } 1101 1102 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) 1103 rsp_rc = *((int *)srp_rsp.srp.rsp.data); 1104 else 1105 rsp_rc = srp_rsp.srp.rsp.status; 1106 1107 if (rsp_rc) { 1108 if (printk_ratelimit()) 1109 printk(KERN_WARNING 1110 "ibmvscsi: reset code %d for task tag 0x%lx\n", 1111 rsp_rc, tsk_mgmt->task_tag); 1112 return FAILED; 1113 } 1114 1115 /* We need to find all commands for this LUN that have not yet been 1116 * responded to, and fail them with DID_RESET 1117 */ 1118 spin_lock_irqsave(hostdata->host->host_lock, flags); 1119 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1120 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { 1121 if (tmp_evt->cmnd) 1122 tmp_evt->cmnd->result = (DID_RESET << 16); 1123 list_del(&tmp_evt->list); 1124 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt, 1125 tmp_evt->hostdata->dev); 1126 free_event_struct(&tmp_evt->hostdata->pool, 1127 tmp_evt); 1128 atomic_inc(&hostdata->request_limit); 1129 if (tmp_evt->cmnd_done) 1130 tmp_evt->cmnd_done(tmp_evt->cmnd); 1131 else if (tmp_evt->done) 1132 tmp_evt->done(tmp_evt); 1133 } 1134 } 1135 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1136 return SUCCESS; 1137 } 1138 1139 /** 1140 * purge_requests: Our virtual adapter just shut down. purge any sent requests 1141 * @hostdata: the adapter 1142 */ 1143 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) 1144 { 1145 struct srp_event_struct *tmp_evt, *pos; 1146 unsigned long flags; 1147 1148 spin_lock_irqsave(hostdata->host->host_lock, flags); 1149 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1150 list_del(&tmp_evt->list); 1151 if (tmp_evt->cmnd) { 1152 tmp_evt->cmnd->result = (error_code << 16); 1153 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1154 tmp_evt, 1155 tmp_evt->hostdata->dev); 1156 if (tmp_evt->cmnd_done) 1157 tmp_evt->cmnd_done(tmp_evt->cmnd); 1158 } else { 1159 if (tmp_evt->done) { 1160 tmp_evt->done(tmp_evt); 1161 } 1162 } 1163 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); 1164 } 1165 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1166 } 1167 1168 /** 1169 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ 1170 * @crq: Command/Response queue 1171 * @hostdata: ibmvscsi_host_data of host 1172 * 1173 */ 1174 void ibmvscsi_handle_crq(struct viosrp_crq *crq, 1175 struct ibmvscsi_host_data *hostdata) 1176 { 1177 unsigned long flags; 1178 struct srp_event_struct *evt_struct = 1179 (struct srp_event_struct *)crq->IU_data_ptr; 1180 switch (crq->valid) { 1181 case 0xC0: /* initialization */ 1182 switch (crq->format) { 1183 case 0x01: /* Initialization message */ 1184 printk(KERN_INFO "ibmvscsi: partner initialized\n"); 1185 /* Send back a response */ 1186 if (ibmvscsi_send_crq(hostdata, 1187 0xC002000000000000LL, 0) == 0) { 1188 /* Now login */ 1189 send_srp_login(hostdata); 1190 } else { 1191 printk(KERN_ERR 1192 "ibmvscsi: Unable to send init rsp\n"); 1193 } 1194 1195 break; 1196 case 0x02: /* Initialization response */ 1197 printk(KERN_INFO 1198 "ibmvscsi: partner initialization complete\n"); 1199 1200 /* Now login */ 1201 send_srp_login(hostdata); 1202 break; 1203 default: 1204 printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); 1205 } 1206 return; 1207 case 0xFF: /* Hypervisor telling us the connection is closed */ 1208 scsi_block_requests(hostdata->host); 1209 atomic_set(&hostdata->request_limit, 0); 1210 if (crq->format == 0x06) { 1211 /* We need to re-setup the interpartition connection */ 1212 printk(KERN_INFO 1213 "ibmvscsi: Re-enabling adapter!\n"); 1214 purge_requests(hostdata, DID_REQUEUE); 1215 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, 1216 hostdata)) || 1217 (ibmvscsi_send_crq(hostdata, 1218 0xC001000000000000LL, 0))) { 1219 atomic_set(&hostdata->request_limit, 1220 -1); 1221 printk(KERN_ERR 1222 "ibmvscsi: error after" 1223 " enable\n"); 1224 } 1225 } else { 1226 printk(KERN_INFO 1227 "ibmvscsi: Virtual adapter failed rc %d!\n", 1228 crq->format); 1229 1230 purge_requests(hostdata, DID_ERROR); 1231 if ((ibmvscsi_reset_crq_queue(&hostdata->queue, 1232 hostdata)) || 1233 (ibmvscsi_send_crq(hostdata, 1234 0xC001000000000000LL, 0))) { 1235 atomic_set(&hostdata->request_limit, 1236 -1); 1237 printk(KERN_ERR 1238 "ibmvscsi: error after reset\n"); 1239 } 1240 } 1241 scsi_unblock_requests(hostdata->host); 1242 return; 1243 case 0x80: /* real payload */ 1244 break; 1245 default: 1246 printk(KERN_ERR 1247 "ibmvscsi: got an invalid message type 0x%02x\n", 1248 crq->valid); 1249 return; 1250 } 1251 1252 /* The only kind of payload CRQs we should get are responses to 1253 * things we send. Make sure this response is to something we 1254 * actually sent 1255 */ 1256 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1257 printk(KERN_ERR 1258 "ibmvscsi: returned correlation_token 0x%p is invalid!\n", 1259 (void *)crq->IU_data_ptr); 1260 return; 1261 } 1262 1263 if (atomic_read(&evt_struct->free)) { 1264 printk(KERN_ERR 1265 "ibmvscsi: received duplicate correlation_token 0x%p!\n", 1266 (void *)crq->IU_data_ptr); 1267 return; 1268 } 1269 1270 if (crq->format == VIOSRP_SRP_FORMAT) 1271 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, 1272 &hostdata->request_limit); 1273 1274 if (evt_struct->done) 1275 evt_struct->done(evt_struct); 1276 else 1277 printk(KERN_ERR 1278 "ibmvscsi: returned done() is NULL; not running it!\n"); 1279 1280 /* 1281 * Lock the host_lock before messing with these structures, since we 1282 * are running in a task context 1283 */ 1284 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); 1285 list_del(&evt_struct->list); 1286 free_event_struct(&evt_struct->hostdata->pool, evt_struct); 1287 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); 1288 } 1289 1290 /** 1291 * ibmvscsi_get_host_config: Send the command to the server to get host 1292 * configuration data. The data is opaque to us. 1293 */ 1294 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, 1295 unsigned char *buffer, int length) 1296 { 1297 struct viosrp_host_config *host_config; 1298 struct srp_event_struct *evt_struct; 1299 dma_addr_t addr; 1300 int rc; 1301 1302 evt_struct = get_event_struct(&hostdata->pool); 1303 if (!evt_struct) { 1304 printk(KERN_ERR 1305 "ibmvscsi: could't allocate event for HOST_CONFIG!\n"); 1306 return -1; 1307 } 1308 1309 init_event_struct(evt_struct, 1310 sync_completion, 1311 VIOSRP_MAD_FORMAT, 1312 init_timeout * HZ); 1313 1314 host_config = &evt_struct->iu.mad.host_config; 1315 1316 /* Set up a lun reset SRP command */ 1317 memset(host_config, 0x00, sizeof(*host_config)); 1318 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1319 host_config->common.length = length; 1320 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, 1321 length, 1322 DMA_BIDIRECTIONAL); 1323 1324 if (dma_mapping_error(host_config->buffer)) { 1325 printk(KERN_ERR 1326 "ibmvscsi: dma_mapping error " "getting host config\n"); 1327 free_event_struct(&hostdata->pool, evt_struct); 1328 return -1; 1329 } 1330 1331 init_completion(&evt_struct->comp); 1332 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1333 if (rc == 0) 1334 wait_for_completion(&evt_struct->comp); 1335 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); 1336 1337 return rc; 1338 } 1339 1340 /* ------------------------------------------------------------ 1341 * sysfs attributes 1342 */ 1343 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf) 1344 { 1345 struct Scsi_Host *shost = class_to_shost(class_dev); 1346 struct ibmvscsi_host_data *hostdata = 1347 (struct ibmvscsi_host_data *)shost->hostdata; 1348 int len; 1349 1350 len = snprintf(buf, PAGE_SIZE, "%s\n", 1351 hostdata->madapter_info.srp_version); 1352 return len; 1353 } 1354 1355 static struct class_device_attribute ibmvscsi_host_srp_version = { 1356 .attr = { 1357 .name = "srp_version", 1358 .mode = S_IRUGO, 1359 }, 1360 .show = show_host_srp_version, 1361 }; 1362 1363 static ssize_t show_host_partition_name(struct class_device *class_dev, 1364 char *buf) 1365 { 1366 struct Scsi_Host *shost = class_to_shost(class_dev); 1367 struct ibmvscsi_host_data *hostdata = 1368 (struct ibmvscsi_host_data *)shost->hostdata; 1369 int len; 1370 1371 len = snprintf(buf, PAGE_SIZE, "%s\n", 1372 hostdata->madapter_info.partition_name); 1373 return len; 1374 } 1375 1376 static struct class_device_attribute ibmvscsi_host_partition_name = { 1377 .attr = { 1378 .name = "partition_name", 1379 .mode = S_IRUGO, 1380 }, 1381 .show = show_host_partition_name, 1382 }; 1383 1384 static ssize_t show_host_partition_number(struct class_device *class_dev, 1385 char *buf) 1386 { 1387 struct Scsi_Host *shost = class_to_shost(class_dev); 1388 struct ibmvscsi_host_data *hostdata = 1389 (struct ibmvscsi_host_data *)shost->hostdata; 1390 int len; 1391 1392 len = snprintf(buf, PAGE_SIZE, "%d\n", 1393 hostdata->madapter_info.partition_number); 1394 return len; 1395 } 1396 1397 static struct class_device_attribute ibmvscsi_host_partition_number = { 1398 .attr = { 1399 .name = "partition_number", 1400 .mode = S_IRUGO, 1401 }, 1402 .show = show_host_partition_number, 1403 }; 1404 1405 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf) 1406 { 1407 struct Scsi_Host *shost = class_to_shost(class_dev); 1408 struct ibmvscsi_host_data *hostdata = 1409 (struct ibmvscsi_host_data *)shost->hostdata; 1410 int len; 1411 1412 len = snprintf(buf, PAGE_SIZE, "%d\n", 1413 hostdata->madapter_info.mad_version); 1414 return len; 1415 } 1416 1417 static struct class_device_attribute ibmvscsi_host_mad_version = { 1418 .attr = { 1419 .name = "mad_version", 1420 .mode = S_IRUGO, 1421 }, 1422 .show = show_host_mad_version, 1423 }; 1424 1425 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf) 1426 { 1427 struct Scsi_Host *shost = class_to_shost(class_dev); 1428 struct ibmvscsi_host_data *hostdata = 1429 (struct ibmvscsi_host_data *)shost->hostdata; 1430 int len; 1431 1432 len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); 1433 return len; 1434 } 1435 1436 static struct class_device_attribute ibmvscsi_host_os_type = { 1437 .attr = { 1438 .name = "os_type", 1439 .mode = S_IRUGO, 1440 }, 1441 .show = show_host_os_type, 1442 }; 1443 1444 static ssize_t show_host_config(struct class_device *class_dev, char *buf) 1445 { 1446 struct Scsi_Host *shost = class_to_shost(class_dev); 1447 struct ibmvscsi_host_data *hostdata = 1448 (struct ibmvscsi_host_data *)shost->hostdata; 1449 1450 /* returns null-terminated host config data */ 1451 if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) 1452 return strlen(buf); 1453 else 1454 return 0; 1455 } 1456 1457 static struct class_device_attribute ibmvscsi_host_config = { 1458 .attr = { 1459 .name = "config", 1460 .mode = S_IRUGO, 1461 }, 1462 .show = show_host_config, 1463 }; 1464 1465 static struct class_device_attribute *ibmvscsi_attrs[] = { 1466 &ibmvscsi_host_srp_version, 1467 &ibmvscsi_host_partition_name, 1468 &ibmvscsi_host_partition_number, 1469 &ibmvscsi_host_mad_version, 1470 &ibmvscsi_host_os_type, 1471 &ibmvscsi_host_config, 1472 NULL 1473 }; 1474 1475 /* ------------------------------------------------------------ 1476 * SCSI driver registration 1477 */ 1478 static struct scsi_host_template driver_template = { 1479 .module = THIS_MODULE, 1480 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, 1481 .proc_name = "ibmvscsi", 1482 .queuecommand = ibmvscsi_queuecommand, 1483 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1484 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1485 .cmd_per_lun = 16, 1486 .can_queue = 1, /* Updated after SRP_LOGIN */ 1487 .this_id = -1, 1488 .sg_tablesize = SG_ALL, 1489 .use_clustering = ENABLE_CLUSTERING, 1490 .shost_attrs = ibmvscsi_attrs, 1491 }; 1492 1493 /** 1494 * Called by bus code for each adapter 1495 */ 1496 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1497 { 1498 struct ibmvscsi_host_data *hostdata; 1499 struct Scsi_Host *host; 1500 struct device *dev = &vdev->dev; 1501 unsigned long wait_switch = 0; 1502 int rc; 1503 1504 vdev->dev.driver_data = NULL; 1505 1506 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1507 if (!host) { 1508 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1509 goto scsi_host_alloc_failed; 1510 } 1511 1512 hostdata = (struct ibmvscsi_host_data *)host->hostdata; 1513 memset(hostdata, 0x00, sizeof(*hostdata)); 1514 INIT_LIST_HEAD(&hostdata->sent); 1515 hostdata->host = host; 1516 hostdata->dev = dev; 1517 atomic_set(&hostdata->request_limit, -1); 1518 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ 1519 1520 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); 1521 if (rc != 0 && rc != H_RESOURCE) { 1522 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1523 goto init_crq_failed; 1524 } 1525 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1526 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); 1527 goto init_pool_failed; 1528 } 1529 1530 host->max_lun = 8; 1531 host->max_id = max_id; 1532 host->max_channel = max_channel; 1533 1534 if (scsi_add_host(hostdata->host, hostdata->dev)) 1535 goto add_host_failed; 1536 1537 /* Try to send an initialization message. Note that this is allowed 1538 * to fail if the other end is not acive. In that case we don't 1539 * want to scan 1540 */ 1541 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 1542 || rc == H_RESOURCE) { 1543 /* 1544 * Wait around max init_timeout secs for the adapter to finish 1545 * initializing. When we are done initializing, we will have a 1546 * valid request_limit. We don't want Linux scanning before 1547 * we are ready. 1548 */ 1549 for (wait_switch = jiffies + (init_timeout * HZ); 1550 time_before(jiffies, wait_switch) && 1551 atomic_read(&hostdata->request_limit) < 2;) { 1552 1553 msleep(10); 1554 } 1555 1556 /* if we now have a valid request_limit, initiate a scan */ 1557 if (atomic_read(&hostdata->request_limit) > 0) 1558 scsi_scan_host(host); 1559 } 1560 1561 vdev->dev.driver_data = hostdata; 1562 return 0; 1563 1564 add_host_failed: 1565 release_event_pool(&hostdata->pool, hostdata); 1566 init_pool_failed: 1567 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests); 1568 init_crq_failed: 1569 scsi_host_put(host); 1570 scsi_host_alloc_failed: 1571 return -1; 1572 } 1573 1574 static int ibmvscsi_remove(struct vio_dev *vdev) 1575 { 1576 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1577 release_event_pool(&hostdata->pool, hostdata); 1578 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, 1579 max_requests); 1580 1581 scsi_remove_host(hostdata->host); 1582 scsi_host_put(hostdata->host); 1583 1584 return 0; 1585 } 1586 1587 /** 1588 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 1589 * support. 1590 */ 1591 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { 1592 {"vscsi", "IBM,v-scsi"}, 1593 { "", "" } 1594 }; 1595 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1596 1597 static struct vio_driver ibmvscsi_driver = { 1598 .id_table = ibmvscsi_device_table, 1599 .probe = ibmvscsi_probe, 1600 .remove = ibmvscsi_remove, 1601 .driver = { 1602 .name = "ibmvscsi", 1603 .owner = THIS_MODULE, 1604 } 1605 }; 1606 1607 int __init ibmvscsi_module_init(void) 1608 { 1609 return vio_register_driver(&ibmvscsi_driver); 1610 } 1611 1612 void __exit ibmvscsi_module_exit(void) 1613 { 1614 vio_unregister_driver(&ibmvscsi_driver); 1615 } 1616 1617 module_init(ibmvscsi_module_init); 1618 module_exit(ibmvscsi_module_exit); 1619