1 /* ------------------------------------------------------------ 2 * ibmvscsi.c 3 * (C) Copyright IBM Corporation 1994, 2004 4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com) 5 * Santiago Leon (santil@us.ibm.com) 6 * Dave Boutcher (sleddog@us.ibm.com) 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 21 * USA 22 * 23 * ------------------------------------------------------------ 24 * Emulation of a SCSI host adapter for Virtual I/O devices 25 * 26 * This driver supports the SCSI adapter implemented by the IBM 27 * Power5 firmware. That SCSI adapter is not a physical adapter, 28 * but allows Linux SCSI peripheral drivers to directly 29 * access devices in another logical partition on the physical system. 30 * 31 * The virtual adapter(s) are present in the open firmware device 32 * tree just like real adapters. 33 * 34 * One of the capabilities provided on these systems is the ability 35 * to DMA between partitions. The architecture states that for VSCSI, 36 * the server side is allowed to DMA to and from the client. The client 37 * is never trusted to DMA to or from the server directly. 38 * 39 * Messages are sent between partitions on a "Command/Response Queue" 40 * (CRQ), which is just a buffer of 16 byte entries in the receiver's 41 * Senders cannot access the buffer directly, but send messages by 42 * making a hypervisor call and passing in the 16 bytes. The hypervisor 43 * puts the message in the next 16 byte space in round-robbin fashion, 44 * turns on the high order bit of the message (the valid bit), and 45 * generates an interrupt to the receiver (if interrupts are turned on.) 46 * The receiver just turns off the valid bit when they have copied out 47 * the message. 48 * 49 * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit 50 * (IU) (as defined in the T10 standard available at www.t10.org), gets 51 * a DMA address for the message, and sends it to the server as the 52 * payload of a CRQ message. The server DMAs the SRP IU and processes it, 53 * including doing any additional data transfers. When it is done, it 54 * DMAs the SRP response back to the same address as the request came from, 55 * and sends a CRQ message back to inform the client that the request has 56 * completed. 57 * 58 * Note that some of the underlying infrastructure is different between 59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and 60 * the older iSeries hypervisor models. To support both, some low level 61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c. 62 * The Makefile should pick one, not two, not zero, of these. 63 * 64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor 65 * interfaces. It would be really nice to abstract this above an RDMA 66 * layer. 67 */ 68 69 #include <linux/module.h> 70 #include <linux/moduleparam.h> 71 #include <linux/dma-mapping.h> 72 #include <linux/delay.h> 73 #include <asm/vio.h> 74 #include <scsi/scsi.h> 75 #include <scsi/scsi_cmnd.h> 76 #include <scsi/scsi_host.h> 77 #include <scsi/scsi_device.h> 78 #include "ibmvscsi.h" 79 80 /* The values below are somewhat arbitrary default values, but 81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.) 82 * Note that there are 3 bits of channel value, 6 bits of id, and 83 * 5 bits of LUN. 84 */ 85 static int max_id = 64; 86 static int max_channel = 3; 87 static int init_timeout = 5; 88 static int max_requests = 50; 89 90 #define IBMVSCSI_VERSION "1.5.8" 91 92 MODULE_DESCRIPTION("IBM Virtual SCSI"); 93 MODULE_AUTHOR("Dave Boutcher"); 94 MODULE_LICENSE("GPL"); 95 MODULE_VERSION(IBMVSCSI_VERSION); 96 97 module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR); 98 MODULE_PARM_DESC(max_id, "Largest ID value for each channel"); 99 module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR); 100 MODULE_PARM_DESC(max_channel, "Largest channel value"); 101 module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); 102 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 103 module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR); 104 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 105 106 /* ------------------------------------------------------------ 107 * Routines for the event pool and event structs 108 */ 109 /** 110 * initialize_event_pool: - Allocates and initializes the event pool for a host 111 * @pool: event_pool to be initialized 112 * @size: Number of events in pool 113 * @hostdata: ibmvscsi_host_data who owns the event pool 114 * 115 * Returns zero on success. 116 */ 117 static int initialize_event_pool(struct event_pool *pool, 118 int size, struct ibmvscsi_host_data *hostdata) 119 { 120 int i; 121 122 pool->size = size; 123 pool->next = 0; 124 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); 125 if (!pool->events) 126 return -ENOMEM; 127 128 pool->iu_storage = 129 dma_alloc_coherent(hostdata->dev, 130 pool->size * sizeof(*pool->iu_storage), 131 &pool->iu_token, 0); 132 if (!pool->iu_storage) { 133 kfree(pool->events); 134 return -ENOMEM; 135 } 136 137 for (i = 0; i < pool->size; ++i) { 138 struct srp_event_struct *evt = &pool->events[i]; 139 memset(&evt->crq, 0x00, sizeof(evt->crq)); 140 atomic_set(&evt->free, 1); 141 evt->crq.valid = 0x80; 142 evt->crq.IU_length = sizeof(*evt->xfer_iu); 143 evt->crq.IU_data_ptr = pool->iu_token + 144 sizeof(*evt->xfer_iu) * i; 145 evt->xfer_iu = pool->iu_storage + i; 146 evt->hostdata = hostdata; 147 evt->ext_list = NULL; 148 evt->ext_list_token = 0; 149 } 150 151 return 0; 152 } 153 154 /** 155 * release_event_pool: - Frees memory of an event pool of a host 156 * @pool: event_pool to be released 157 * @hostdata: ibmvscsi_host_data who owns the even pool 158 * 159 * Returns zero on success. 160 */ 161 static void release_event_pool(struct event_pool *pool, 162 struct ibmvscsi_host_data *hostdata) 163 { 164 int i, in_use = 0; 165 for (i = 0; i < pool->size; ++i) { 166 if (atomic_read(&pool->events[i].free) != 1) 167 ++in_use; 168 if (pool->events[i].ext_list) { 169 dma_free_coherent(hostdata->dev, 170 SG_ALL * sizeof(struct srp_direct_buf), 171 pool->events[i].ext_list, 172 pool->events[i].ext_list_token); 173 } 174 } 175 if (in_use) 176 printk(KERN_WARNING 177 "ibmvscsi: releasing event pool with %d " 178 "events still in use?\n", in_use); 179 kfree(pool->events); 180 dma_free_coherent(hostdata->dev, 181 pool->size * sizeof(*pool->iu_storage), 182 pool->iu_storage, pool->iu_token); 183 } 184 185 /** 186 * valid_event_struct: - Determines if event is valid. 187 * @pool: event_pool that contains the event 188 * @evt: srp_event_struct to be checked for validity 189 * 190 * Returns zero if event is invalid, one otherwise. 191 */ 192 static int valid_event_struct(struct event_pool *pool, 193 struct srp_event_struct *evt) 194 { 195 int index = evt - pool->events; 196 if (index < 0 || index >= pool->size) /* outside of bounds */ 197 return 0; 198 if (evt != pool->events + index) /* unaligned */ 199 return 0; 200 return 1; 201 } 202 203 /** 204 * ibmvscsi_free-event_struct: - Changes status of event to "free" 205 * @pool: event_pool that contains the event 206 * @evt: srp_event_struct to be modified 207 * 208 */ 209 static void free_event_struct(struct event_pool *pool, 210 struct srp_event_struct *evt) 211 { 212 if (!valid_event_struct(pool, evt)) { 213 printk(KERN_ERR 214 "ibmvscsi: Freeing invalid event_struct %p " 215 "(not in pool %p)\n", evt, pool->events); 216 return; 217 } 218 if (atomic_inc_return(&evt->free) != 1) { 219 printk(KERN_ERR 220 "ibmvscsi: Freeing event_struct %p " 221 "which is not in use!\n", evt); 222 return; 223 } 224 } 225 226 /** 227 * get_evt_struct: - Gets the next free event in pool 228 * @pool: event_pool that contains the events to be searched 229 * 230 * Returns the next event in "free" state, and NULL if none are free. 231 * Note that no synchronization is done here, we assume the host_lock 232 * will syncrhonze things. 233 */ 234 static struct srp_event_struct *get_event_struct(struct event_pool *pool) 235 { 236 int i; 237 int poolsize = pool->size; 238 int offset = pool->next; 239 240 for (i = 0; i < poolsize; i++) { 241 offset = (offset + 1) % poolsize; 242 if (!atomic_dec_if_positive(&pool->events[offset].free)) { 243 pool->next = offset; 244 return &pool->events[offset]; 245 } 246 } 247 248 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); 249 return NULL; 250 } 251 252 /** 253 * init_event_struct: Initialize fields in an event struct that are always 254 * required. 255 * @evt: The event 256 * @done: Routine to call when the event is responded to 257 * @format: SRP or MAD format 258 * @timeout: timeout value set in the CRQ 259 */ 260 static void init_event_struct(struct srp_event_struct *evt_struct, 261 void (*done) (struct srp_event_struct *), 262 u8 format, 263 int timeout) 264 { 265 evt_struct->cmnd = NULL; 266 evt_struct->cmnd_done = NULL; 267 evt_struct->sync_srp = NULL; 268 evt_struct->crq.format = format; 269 evt_struct->crq.timeout = timeout; 270 evt_struct->done = done; 271 } 272 273 /* ------------------------------------------------------------ 274 * Routines for receiving SCSI responses from the hosting partition 275 */ 276 277 /** 278 * set_srp_direction: Set the fields in the srp related to data 279 * direction and number of buffers based on the direction in 280 * the scsi_cmnd and the number of buffers 281 */ 282 static void set_srp_direction(struct scsi_cmnd *cmd, 283 struct srp_cmd *srp_cmd, 284 int numbuf) 285 { 286 u8 fmt; 287 288 if (numbuf == 0) 289 return; 290 291 if (numbuf == 1) 292 fmt = SRP_DATA_DESC_DIRECT; 293 else { 294 fmt = SRP_DATA_DESC_INDIRECT; 295 numbuf = min(numbuf, MAX_INDIRECT_BUFS); 296 297 if (cmd->sc_data_direction == DMA_TO_DEVICE) 298 srp_cmd->data_out_desc_cnt = numbuf; 299 else 300 srp_cmd->data_in_desc_cnt = numbuf; 301 } 302 303 if (cmd->sc_data_direction == DMA_TO_DEVICE) 304 srp_cmd->buf_fmt = fmt << 4; 305 else 306 srp_cmd->buf_fmt = fmt; 307 } 308 309 static void unmap_sg_list(int num_entries, 310 struct device *dev, 311 struct srp_direct_buf *md) 312 { 313 int i; 314 315 for (i = 0; i < num_entries; ++i) 316 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); 317 } 318 319 /** 320 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 321 * @cmd: srp_cmd whose additional_data member will be unmapped 322 * @dev: device for which the memory is mapped 323 * 324 */ 325 static void unmap_cmd_data(struct srp_cmd *cmd, 326 struct srp_event_struct *evt_struct, 327 struct device *dev) 328 { 329 u8 out_fmt, in_fmt; 330 331 out_fmt = cmd->buf_fmt >> 4; 332 in_fmt = cmd->buf_fmt & ((1U << 4) - 1); 333 334 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) 335 return; 336 else if (out_fmt == SRP_DATA_DESC_DIRECT || 337 in_fmt == SRP_DATA_DESC_DIRECT) { 338 struct srp_direct_buf *data = 339 (struct srp_direct_buf *) cmd->add_data; 340 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); 341 } else { 342 struct srp_indirect_buf *indirect = 343 (struct srp_indirect_buf *) cmd->add_data; 344 int num_mapped = indirect->table_desc.len / 345 sizeof(struct srp_direct_buf); 346 347 if (num_mapped <= MAX_INDIRECT_BUFS) { 348 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); 349 return; 350 } 351 352 unmap_sg_list(num_mapped, dev, evt_struct->ext_list); 353 } 354 } 355 356 static int map_sg_list(int num_entries, 357 struct scatterlist *sg, 358 struct srp_direct_buf *md) 359 { 360 int i; 361 u64 total_length = 0; 362 363 for (i = 0; i < num_entries; ++i) { 364 struct srp_direct_buf *descr = md + i; 365 struct scatterlist *sg_entry = &sg[i]; 366 descr->va = sg_dma_address(sg_entry); 367 descr->len = sg_dma_len(sg_entry); 368 descr->key = 0; 369 total_length += sg_dma_len(sg_entry); 370 } 371 return total_length; 372 } 373 374 /** 375 * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields 376 * @cmd: Scsi_Cmnd with the scatterlist 377 * @srp_cmd: srp_cmd that contains the memory descriptor 378 * @dev: device for which to map dma memory 379 * 380 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. 381 * Returns 1 on success. 382 */ 383 static int map_sg_data(struct scsi_cmnd *cmd, 384 struct srp_event_struct *evt_struct, 385 struct srp_cmd *srp_cmd, struct device *dev) 386 { 387 388 int sg_mapped; 389 u64 total_length = 0; 390 struct scatterlist *sg = cmd->request_buffer; 391 struct srp_direct_buf *data = 392 (struct srp_direct_buf *) srp_cmd->add_data; 393 struct srp_indirect_buf *indirect = 394 (struct srp_indirect_buf *) data; 395 396 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); 397 398 if (sg_mapped == 0) 399 return 0; 400 401 set_srp_direction(cmd, srp_cmd, sg_mapped); 402 403 /* special case; we can use a single direct descriptor */ 404 if (sg_mapped == 1) { 405 data->va = sg_dma_address(&sg[0]); 406 data->len = sg_dma_len(&sg[0]); 407 data->key = 0; 408 return 1; 409 } 410 411 if (sg_mapped > SG_ALL) { 412 printk(KERN_ERR 413 "ibmvscsi: More than %d mapped sg entries, got %d\n", 414 SG_ALL, sg_mapped); 415 return 0; 416 } 417 418 indirect->table_desc.va = 0; 419 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); 420 indirect->table_desc.key = 0; 421 422 if (sg_mapped <= MAX_INDIRECT_BUFS) { 423 total_length = map_sg_list(sg_mapped, sg, 424 &indirect->desc_list[0]); 425 indirect->len = total_length; 426 return 1; 427 } 428 429 /* get indirect table */ 430 if (!evt_struct->ext_list) { 431 evt_struct->ext_list = (struct srp_direct_buf *) 432 dma_alloc_coherent(dev, 433 SG_ALL * sizeof(struct srp_direct_buf), 434 &evt_struct->ext_list_token, 0); 435 if (!evt_struct->ext_list) { 436 printk(KERN_ERR 437 "ibmvscsi: Can't allocate memory for indirect table\n"); 438 return 0; 439 440 } 441 } 442 443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); 444 445 indirect->len = total_length; 446 indirect->table_desc.va = evt_struct->ext_list_token; 447 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); 448 memcpy(indirect->desc_list, evt_struct->ext_list, 449 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); 450 451 return 1; 452 } 453 454 /** 455 * map_single_data: - Maps memory and initializes memory decriptor fields 456 * @cmd: struct scsi_cmnd with the memory to be mapped 457 * @srp_cmd: srp_cmd that contains the memory descriptor 458 * @dev: device for which to map dma memory 459 * 460 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd. 461 * Returns 1 on success. 462 */ 463 static int map_single_data(struct scsi_cmnd *cmd, 464 struct srp_cmd *srp_cmd, struct device *dev) 465 { 466 struct srp_direct_buf *data = 467 (struct srp_direct_buf *) srp_cmd->add_data; 468 469 data->va = 470 dma_map_single(dev, cmd->request_buffer, 471 cmd->request_bufflen, 472 DMA_BIDIRECTIONAL); 473 if (dma_mapping_error(data->va)) { 474 printk(KERN_ERR 475 "ibmvscsi: Unable to map request_buffer for command!\n"); 476 return 0; 477 } 478 data->len = cmd->request_bufflen; 479 data->key = 0; 480 481 set_srp_direction(cmd, srp_cmd, 1); 482 483 return 1; 484 } 485 486 /** 487 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds 488 * @cmd: struct scsi_cmnd with the memory to be mapped 489 * @srp_cmd: srp_cmd that contains the memory descriptor 490 * @dev: dma device for which to map dma memory 491 * 492 * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 493 * Returns 1 on success. 494 */ 495 static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, 496 struct srp_event_struct *evt_struct, 497 struct srp_cmd *srp_cmd, struct device *dev) 498 { 499 switch (cmd->sc_data_direction) { 500 case DMA_FROM_DEVICE: 501 case DMA_TO_DEVICE: 502 break; 503 case DMA_NONE: 504 return 1; 505 case DMA_BIDIRECTIONAL: 506 printk(KERN_ERR 507 "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); 508 return 0; 509 default: 510 printk(KERN_ERR 511 "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", 512 cmd->sc_data_direction); 513 return 0; 514 } 515 516 if (!cmd->request_buffer) 517 return 1; 518 if (cmd->use_sg) 519 return map_sg_data(cmd, evt_struct, srp_cmd, dev); 520 return map_single_data(cmd, srp_cmd, dev); 521 } 522 523 /* ------------------------------------------------------------ 524 * Routines for sending and receiving SRPs 525 */ 526 /** 527 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() 528 * @evt_struct: evt_struct to be sent 529 * @hostdata: ibmvscsi_host_data of host 530 * 531 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) 532 * Note that this routine assumes that host_lock is held for synchronization 533 */ 534 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, 535 struct ibmvscsi_host_data *hostdata) 536 { 537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 538 int rc; 539 540 /* If we have exhausted our request limit, just fail this request. 541 * Note that there are rare cases involving driver generated requests 542 * (such as task management requests) that the mid layer may think we 543 * can handle more requests (can_queue) when we actually can't 544 */ 545 if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && 546 (atomic_dec_if_positive(&hostdata->request_limit) < 0)) 547 goto send_error; 548 549 /* Copy the IU into the transfer area */ 550 *evt_struct->xfer_iu = evt_struct->iu; 551 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; 552 553 /* Add this to the sent list. We need to do this 554 * before we actually send 555 * in case it comes back REALLY fast 556 */ 557 list_add_tail(&evt_struct->list, &hostdata->sent); 558 559 if ((rc = 560 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 561 list_del(&evt_struct->list); 562 563 printk(KERN_ERR "ibmvscsi: send error %d\n", 564 rc); 565 goto send_error; 566 } 567 568 return 0; 569 570 send_error: 571 unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); 572 573 free_event_struct(&hostdata->pool, evt_struct); 574 return SCSI_MLQUEUE_HOST_BUSY; 575 } 576 577 /** 578 * handle_cmd_rsp: - Handle responses from commands 579 * @evt_struct: srp_event_struct to be handled 580 * 581 * Used as a callback by when sending scsi cmds. 582 * Gets called by ibmvscsi_handle_crq() 583 */ 584 static void handle_cmd_rsp(struct srp_event_struct *evt_struct) 585 { 586 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; 587 struct scsi_cmnd *cmnd = evt_struct->cmnd; 588 589 if (unlikely(rsp->opcode != SRP_RSP)) { 590 if (printk_ratelimit()) 591 printk(KERN_WARNING 592 "ibmvscsi: bad SRP RSP type %d\n", 593 rsp->opcode); 594 } 595 596 if (cmnd) { 597 cmnd->result = rsp->status; 598 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 599 memcpy(cmnd->sense_buffer, 600 rsp->data, 601 rsp->sense_data_len); 602 unmap_cmd_data(&evt_struct->iu.srp.cmd, 603 evt_struct, 604 evt_struct->hostdata->dev); 605 606 if (rsp->flags & SRP_RSP_FLAG_DOOVER) 607 cmnd->resid = rsp->data_out_res_cnt; 608 else if (rsp->flags & SRP_RSP_FLAG_DIOVER) 609 cmnd->resid = rsp->data_in_res_cnt; 610 } 611 612 if (evt_struct->cmnd_done) 613 evt_struct->cmnd_done(cmnd); 614 } 615 616 /** 617 * lun_from_dev: - Returns the lun of the scsi device 618 * @dev: struct scsi_device 619 * 620 */ 621 static inline u16 lun_from_dev(struct scsi_device *dev) 622 { 623 return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun; 624 } 625 626 /** 627 * ibmvscsi_queue: - The queuecommand function of the scsi template 628 * @cmd: struct scsi_cmnd to be executed 629 * @done: Callback function to be called when cmd is completed 630 */ 631 static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, 632 void (*done) (struct scsi_cmnd *)) 633 { 634 struct srp_cmd *srp_cmd; 635 struct srp_event_struct *evt_struct; 636 struct srp_indirect_buf *indirect; 637 struct ibmvscsi_host_data *hostdata = 638 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 639 u16 lun = lun_from_dev(cmnd->device); 640 u8 out_fmt, in_fmt; 641 642 evt_struct = get_event_struct(&hostdata->pool); 643 if (!evt_struct) 644 return SCSI_MLQUEUE_HOST_BUSY; 645 646 /* Set up the actual SRP IU */ 647 srp_cmd = &evt_struct->iu.srp.cmd; 648 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 649 srp_cmd->opcode = SRP_CMD; 650 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 651 srp_cmd->lun = ((u64) lun) << 48; 652 653 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 654 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 655 free_event_struct(&hostdata->pool, evt_struct); 656 return SCSI_MLQUEUE_HOST_BUSY; 657 } 658 659 init_event_struct(evt_struct, 660 handle_cmd_rsp, 661 VIOSRP_SRP_FORMAT, 662 cmnd->timeout_per_command/HZ); 663 664 evt_struct->cmnd = cmnd; 665 evt_struct->cmnd_done = done; 666 667 /* Fix up dma address of the buffer itself */ 668 indirect = (struct srp_indirect_buf *) srp_cmd->add_data; 669 out_fmt = srp_cmd->buf_fmt >> 4; 670 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); 671 if ((in_fmt == SRP_DATA_DESC_INDIRECT || 672 out_fmt == SRP_DATA_DESC_INDIRECT) && 673 indirect->table_desc.va == 0) { 674 indirect->table_desc.va = evt_struct->crq.IU_data_ptr + 675 offsetof(struct srp_cmd, add_data) + 676 offsetof(struct srp_indirect_buf, desc_list); 677 } 678 679 return ibmvscsi_send_srp_event(evt_struct, hostdata); 680 } 681 682 /* ------------------------------------------------------------ 683 * Routines for driver initialization 684 */ 685 /** 686 * adapter_info_rsp: - Handle response to MAD adapter info request 687 * @evt_struct: srp_event_struct with the response 688 * 689 * Used as a "done" callback by when sending adapter_info. Gets called 690 * by ibmvscsi_handle_crq() 691 */ 692 static void adapter_info_rsp(struct srp_event_struct *evt_struct) 693 { 694 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 695 dma_unmap_single(hostdata->dev, 696 evt_struct->iu.mad.adapter_info.buffer, 697 evt_struct->iu.mad.adapter_info.common.length, 698 DMA_BIDIRECTIONAL); 699 700 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 701 printk("ibmvscsi: error %d getting adapter info\n", 702 evt_struct->xfer_iu->mad.adapter_info.common.status); 703 } else { 704 printk("ibmvscsi: host srp version: %s, " 705 "host partition %s (%d), OS %d, max io %u\n", 706 hostdata->madapter_info.srp_version, 707 hostdata->madapter_info.partition_name, 708 hostdata->madapter_info.partition_number, 709 hostdata->madapter_info.os_type, 710 hostdata->madapter_info.port_max_txu[0]); 711 712 if (hostdata->madapter_info.port_max_txu[0]) 713 hostdata->host->max_sectors = 714 hostdata->madapter_info.port_max_txu[0] >> 9; 715 716 if (hostdata->madapter_info.os_type == 3 && 717 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 718 printk("ibmvscsi: host (Ver. %s) doesn't support large" 719 "transfers\n", 720 hostdata->madapter_info.srp_version); 721 printk("ibmvscsi: limiting scatterlists to %d\n", 722 MAX_INDIRECT_BUFS); 723 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 724 } 725 } 726 } 727 728 /** 729 * send_mad_adapter_info: - Sends the mad adapter info request 730 * and stores the result so it can be retrieved with 731 * sysfs. We COULD consider causing a failure if the 732 * returned SRP version doesn't match ours. 733 * @hostdata: ibmvscsi_host_data of host 734 * 735 * Returns zero if successful. 736 */ 737 static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) 738 { 739 struct viosrp_adapter_info *req; 740 struct srp_event_struct *evt_struct; 741 dma_addr_t addr; 742 743 evt_struct = get_event_struct(&hostdata->pool); 744 if (!evt_struct) { 745 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 746 "for ADAPTER_INFO_REQ!\n"); 747 return; 748 } 749 750 init_event_struct(evt_struct, 751 adapter_info_rsp, 752 VIOSRP_MAD_FORMAT, 753 init_timeout * HZ); 754 755 req = &evt_struct->iu.mad.adapter_info; 756 memset(req, 0x00, sizeof(*req)); 757 758 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 759 req->common.length = sizeof(hostdata->madapter_info); 760 req->buffer = addr = dma_map_single(hostdata->dev, 761 &hostdata->madapter_info, 762 sizeof(hostdata->madapter_info), 763 DMA_BIDIRECTIONAL); 764 765 if (dma_mapping_error(req->buffer)) { 766 printk(KERN_ERR 767 "ibmvscsi: Unable to map request_buffer " 768 "for adapter_info!\n"); 769 free_event_struct(&hostdata->pool, evt_struct); 770 return; 771 } 772 773 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { 774 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 775 dma_unmap_single(hostdata->dev, 776 addr, 777 sizeof(hostdata->madapter_info), 778 DMA_BIDIRECTIONAL); 779 } 780 }; 781 782 /** 783 * login_rsp: - Handle response to SRP login request 784 * @evt_struct: srp_event_struct with the response 785 * 786 * Used as a "done" callback by when sending srp_login. Gets called 787 * by ibmvscsi_handle_crq() 788 */ 789 static void login_rsp(struct srp_event_struct *evt_struct) 790 { 791 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 792 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { 793 case SRP_LOGIN_RSP: /* it worked! */ 794 break; 795 case SRP_LOGIN_REJ: /* refused! */ 796 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", 797 evt_struct->xfer_iu->srp.login_rej.reason); 798 /* Login failed. */ 799 atomic_set(&hostdata->request_limit, -1); 800 return; 801 default: 802 printk(KERN_ERR 803 "ibmvscsi: Invalid login response typecode 0x%02x!\n", 804 evt_struct->xfer_iu->srp.login_rsp.opcode); 805 /* Login failed. */ 806 atomic_set(&hostdata->request_limit, -1); 807 return; 808 } 809 810 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 811 812 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > 813 (max_requests - 2)) 814 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta = 815 max_requests - 2; 816 817 /* Now we know what the real request-limit is */ 818 atomic_set(&hostdata->request_limit, 819 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 820 821 hostdata->host->can_queue = 822 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2; 823 824 if (hostdata->host->can_queue < 1) { 825 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); 826 return; 827 } 828 829 /* If we had any pending I/Os, kick them */ 830 scsi_unblock_requests(hostdata->host); 831 832 send_mad_adapter_info(hostdata); 833 return; 834 } 835 836 /** 837 * send_srp_login: - Sends the srp login 838 * @hostdata: ibmvscsi_host_data of host 839 * 840 * Returns zero if successful. 841 */ 842 static int send_srp_login(struct ibmvscsi_host_data *hostdata) 843 { 844 int rc; 845 unsigned long flags; 846 struct srp_login_req *login; 847 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 848 if (!evt_struct) { 849 printk(KERN_ERR 850 "ibmvscsi: couldn't allocate an event for login req!\n"); 851 return FAILED; 852 } 853 854 init_event_struct(evt_struct, 855 login_rsp, 856 VIOSRP_SRP_FORMAT, 857 init_timeout * HZ); 858 859 login = &evt_struct->iu.srp.login_req; 860 memset(login, 0x00, sizeof(struct srp_login_req)); 861 login->opcode = SRP_LOGIN_REQ; 862 login->req_it_iu_len = sizeof(union srp_iu); 863 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 864 865 spin_lock_irqsave(hostdata->host->host_lock, flags); 866 /* Start out with a request limit of 1, since this is negotiated in 867 * the login request we are just sending 868 */ 869 atomic_set(&hostdata->request_limit, 1); 870 871 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 872 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 873 printk("ibmvscsic: sent SRP login\n"); 874 return rc; 875 }; 876 877 /** 878 * sync_completion: Signal that a synchronous command has completed 879 * Note that after returning from this call, the evt_struct is freed. 880 * the caller waiting on this completion shouldn't touch the evt_struct 881 * again. 882 */ 883 static void sync_completion(struct srp_event_struct *evt_struct) 884 { 885 /* copy the response back */ 886 if (evt_struct->sync_srp) 887 *evt_struct->sync_srp = *evt_struct->xfer_iu; 888 889 complete(&evt_struct->comp); 890 } 891 892 /** 893 * ibmvscsi_abort: Abort a command...from scsi host template 894 * send this over to the server and wait synchronously for the response 895 */ 896 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) 897 { 898 struct ibmvscsi_host_data *hostdata = 899 (struct ibmvscsi_host_data *)cmd->device->host->hostdata; 900 struct srp_tsk_mgmt *tsk_mgmt; 901 struct srp_event_struct *evt; 902 struct srp_event_struct *tmp_evt, *found_evt; 903 union viosrp_iu srp_rsp; 904 int rsp_rc; 905 unsigned long flags; 906 u16 lun = lun_from_dev(cmd->device); 907 908 /* First, find this command in our sent list so we can figure 909 * out the correct tag 910 */ 911 spin_lock_irqsave(hostdata->host->host_lock, flags); 912 found_evt = NULL; 913 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 914 if (tmp_evt->cmnd == cmd) { 915 found_evt = tmp_evt; 916 break; 917 } 918 } 919 920 if (!found_evt) { 921 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 922 return FAILED; 923 } 924 925 evt = get_event_struct(&hostdata->pool); 926 if (evt == NULL) { 927 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 928 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 929 return FAILED; 930 } 931 932 init_event_struct(evt, 933 sync_completion, 934 VIOSRP_SRP_FORMAT, 935 init_timeout * HZ); 936 937 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 938 939 /* Set up an abort SRP command */ 940 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 941 tsk_mgmt->opcode = SRP_TSK_MGMT; 942 tsk_mgmt->lun = ((u64) lun) << 48; 943 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 944 tsk_mgmt->task_tag = (u64) found_evt; 945 946 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", 947 tsk_mgmt->lun, tsk_mgmt->task_tag); 948 949 evt->sync_srp = &srp_rsp; 950 init_completion(&evt->comp); 951 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 952 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 953 if (rsp_rc != 0) { 954 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 955 return FAILED; 956 } 957 958 wait_for_completion(&evt->comp); 959 960 /* make sure we got a good response */ 961 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 962 if (printk_ratelimit()) 963 printk(KERN_WARNING 964 "ibmvscsi: abort bad SRP RSP type %d\n", 965 srp_rsp.srp.rsp.opcode); 966 return FAILED; 967 } 968 969 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) 970 rsp_rc = *((int *)srp_rsp.srp.rsp.data); 971 else 972 rsp_rc = srp_rsp.srp.rsp.status; 973 974 if (rsp_rc) { 975 if (printk_ratelimit()) 976 printk(KERN_WARNING 977 "ibmvscsi: abort code %d for task tag 0x%lx\n", 978 rsp_rc, 979 tsk_mgmt->task_tag); 980 return FAILED; 981 } 982 983 /* Because we dropped the spinlock above, it's possible 984 * The event is no longer in our list. Make sure it didn't 985 * complete while we were aborting 986 */ 987 spin_lock_irqsave(hostdata->host->host_lock, flags); 988 found_evt = NULL; 989 list_for_each_entry(tmp_evt, &hostdata->sent, list) { 990 if (tmp_evt->cmnd == cmd) { 991 found_evt = tmp_evt; 992 break; 993 } 994 } 995 996 if (found_evt == NULL) { 997 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 998 printk(KERN_INFO 999 "ibmvscsi: aborted task tag 0x%lx completed\n", 1000 tsk_mgmt->task_tag); 1001 return SUCCESS; 1002 } 1003 1004 printk(KERN_INFO 1005 "ibmvscsi: successfully aborted task tag 0x%lx\n", 1006 tsk_mgmt->task_tag); 1007 1008 cmd->result = (DID_ABORT << 16); 1009 list_del(&found_evt->list); 1010 unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt, 1011 found_evt->hostdata->dev); 1012 free_event_struct(&found_evt->hostdata->pool, found_evt); 1013 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1014 atomic_inc(&hostdata->request_limit); 1015 return SUCCESS; 1016 } 1017 1018 /** 1019 * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 1020 * template send this over to the server and wait synchronously for the 1021 * response 1022 */ 1023 static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) 1024 { 1025 struct ibmvscsi_host_data *hostdata = 1026 (struct ibmvscsi_host_data *)cmd->device->host->hostdata; 1027 1028 struct srp_tsk_mgmt *tsk_mgmt; 1029 struct srp_event_struct *evt; 1030 struct srp_event_struct *tmp_evt, *pos; 1031 union viosrp_iu srp_rsp; 1032 int rsp_rc; 1033 unsigned long flags; 1034 u16 lun = lun_from_dev(cmd->device); 1035 1036 spin_lock_irqsave(hostdata->host->host_lock, flags); 1037 evt = get_event_struct(&hostdata->pool); 1038 if (evt == NULL) { 1039 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1040 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1041 return FAILED; 1042 } 1043 1044 init_event_struct(evt, 1045 sync_completion, 1046 VIOSRP_SRP_FORMAT, 1047 init_timeout * HZ); 1048 1049 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1050 1051 /* Set up a lun reset SRP command */ 1052 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1053 tsk_mgmt->opcode = SRP_TSK_MGMT; 1054 tsk_mgmt->lun = ((u64) lun) << 48; 1055 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1056 1057 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", 1058 tsk_mgmt->lun); 1059 1060 evt->sync_srp = &srp_rsp; 1061 init_completion(&evt->comp); 1062 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 1063 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1064 if (rsp_rc != 0) { 1065 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1066 return FAILED; 1067 } 1068 1069 wait_for_completion(&evt->comp); 1070 1071 /* make sure we got a good response */ 1072 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 1073 if (printk_ratelimit()) 1074 printk(KERN_WARNING 1075 "ibmvscsi: reset bad SRP RSP type %d\n", 1076 srp_rsp.srp.rsp.opcode); 1077 return FAILED; 1078 } 1079 1080 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) 1081 rsp_rc = *((int *)srp_rsp.srp.rsp.data); 1082 else 1083 rsp_rc = srp_rsp.srp.rsp.status; 1084 1085 if (rsp_rc) { 1086 if (printk_ratelimit()) 1087 printk(KERN_WARNING 1088 "ibmvscsi: reset code %d for task tag 0x%lx\n", 1089 rsp_rc, tsk_mgmt->task_tag); 1090 return FAILED; 1091 } 1092 1093 /* We need to find all commands for this LUN that have not yet been 1094 * responded to, and fail them with DID_RESET 1095 */ 1096 spin_lock_irqsave(hostdata->host->host_lock, flags); 1097 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1098 if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { 1099 if (tmp_evt->cmnd) 1100 tmp_evt->cmnd->result = (DID_RESET << 16); 1101 list_del(&tmp_evt->list); 1102 unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt, 1103 tmp_evt->hostdata->dev); 1104 free_event_struct(&tmp_evt->hostdata->pool, 1105 tmp_evt); 1106 atomic_inc(&hostdata->request_limit); 1107 if (tmp_evt->cmnd_done) 1108 tmp_evt->cmnd_done(tmp_evt->cmnd); 1109 else if (tmp_evt->done) 1110 tmp_evt->done(tmp_evt); 1111 } 1112 } 1113 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1114 return SUCCESS; 1115 } 1116 1117 /** 1118 * purge_requests: Our virtual adapter just shut down. purge any sent requests 1119 * @hostdata: the adapter 1120 */ 1121 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) 1122 { 1123 struct srp_event_struct *tmp_evt, *pos; 1124 unsigned long flags; 1125 1126 spin_lock_irqsave(hostdata->host->host_lock, flags); 1127 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1128 list_del(&tmp_evt->list); 1129 if (tmp_evt->cmnd) { 1130 tmp_evt->cmnd->result = (error_code << 16); 1131 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1132 tmp_evt, 1133 tmp_evt->hostdata->dev); 1134 if (tmp_evt->cmnd_done) 1135 tmp_evt->cmnd_done(tmp_evt->cmnd); 1136 } else { 1137 if (tmp_evt->done) { 1138 tmp_evt->done(tmp_evt); 1139 } 1140 } 1141 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt); 1142 } 1143 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1144 } 1145 1146 /** 1147 * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ 1148 * @crq: Command/Response queue 1149 * @hostdata: ibmvscsi_host_data of host 1150 * 1151 */ 1152 void ibmvscsi_handle_crq(struct viosrp_crq *crq, 1153 struct ibmvscsi_host_data *hostdata) 1154 { 1155 unsigned long flags; 1156 struct srp_event_struct *evt_struct = 1157 (struct srp_event_struct *)crq->IU_data_ptr; 1158 switch (crq->valid) { 1159 case 0xC0: /* initialization */ 1160 switch (crq->format) { 1161 case 0x01: /* Initialization message */ 1162 printk(KERN_INFO "ibmvscsi: partner initialized\n"); 1163 /* Send back a response */ 1164 if (ibmvscsi_send_crq(hostdata, 1165 0xC002000000000000LL, 0) == 0) { 1166 /* Now login */ 1167 send_srp_login(hostdata); 1168 } else { 1169 printk(KERN_ERR 1170 "ibmvscsi: Unable to send init rsp\n"); 1171 } 1172 1173 break; 1174 case 0x02: /* Initialization response */ 1175 printk(KERN_INFO 1176 "ibmvscsi: partner initialization complete\n"); 1177 1178 /* Now login */ 1179 send_srp_login(hostdata); 1180 break; 1181 default: 1182 printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); 1183 } 1184 return; 1185 case 0xFF: /* Hypervisor telling us the connection is closed */ 1186 scsi_block_requests(hostdata->host); 1187 if (crq->format == 0x06) { 1188 /* We need to re-setup the interpartition connection */ 1189 printk(KERN_INFO 1190 "ibmvscsi: Re-enabling adapter!\n"); 1191 atomic_set(&hostdata->request_limit, -1); 1192 purge_requests(hostdata, DID_REQUEUE); 1193 if (ibmvscsi_reenable_crq_queue(&hostdata->queue, 1194 hostdata) == 0) 1195 if (ibmvscsi_send_crq(hostdata, 1196 0xC001000000000000LL, 0)) 1197 printk(KERN_ERR 1198 "ibmvscsi: transmit error after" 1199 " enable\n"); 1200 } else { 1201 printk(KERN_INFO 1202 "ibmvscsi: Virtual adapter failed rc %d!\n", 1203 crq->format); 1204 1205 atomic_set(&hostdata->request_limit, -1); 1206 purge_requests(hostdata, DID_ERROR); 1207 ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); 1208 } 1209 scsi_unblock_requests(hostdata->host); 1210 return; 1211 case 0x80: /* real payload */ 1212 break; 1213 default: 1214 printk(KERN_ERR 1215 "ibmvscsi: got an invalid message type 0x%02x\n", 1216 crq->valid); 1217 return; 1218 } 1219 1220 /* The only kind of payload CRQs we should get are responses to 1221 * things we send. Make sure this response is to something we 1222 * actually sent 1223 */ 1224 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1225 printk(KERN_ERR 1226 "ibmvscsi: returned correlation_token 0x%p is invalid!\n", 1227 (void *)crq->IU_data_ptr); 1228 return; 1229 } 1230 1231 if (atomic_read(&evt_struct->free)) { 1232 printk(KERN_ERR 1233 "ibmvscsi: received duplicate correlation_token 0x%p!\n", 1234 (void *)crq->IU_data_ptr); 1235 return; 1236 } 1237 1238 if (crq->format == VIOSRP_SRP_FORMAT) 1239 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, 1240 &hostdata->request_limit); 1241 1242 if (evt_struct->done) 1243 evt_struct->done(evt_struct); 1244 else 1245 printk(KERN_ERR 1246 "ibmvscsi: returned done() is NULL; not running it!\n"); 1247 1248 /* 1249 * Lock the host_lock before messing with these structures, since we 1250 * are running in a task context 1251 */ 1252 spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags); 1253 list_del(&evt_struct->list); 1254 free_event_struct(&evt_struct->hostdata->pool, evt_struct); 1255 spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags); 1256 } 1257 1258 /** 1259 * ibmvscsi_get_host_config: Send the command to the server to get host 1260 * configuration data. The data is opaque to us. 1261 */ 1262 static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, 1263 unsigned char *buffer, int length) 1264 { 1265 struct viosrp_host_config *host_config; 1266 struct srp_event_struct *evt_struct; 1267 dma_addr_t addr; 1268 int rc; 1269 1270 evt_struct = get_event_struct(&hostdata->pool); 1271 if (!evt_struct) { 1272 printk(KERN_ERR 1273 "ibmvscsi: could't allocate event for HOST_CONFIG!\n"); 1274 return -1; 1275 } 1276 1277 init_event_struct(evt_struct, 1278 sync_completion, 1279 VIOSRP_MAD_FORMAT, 1280 init_timeout * HZ); 1281 1282 host_config = &evt_struct->iu.mad.host_config; 1283 1284 /* Set up a lun reset SRP command */ 1285 memset(host_config, 0x00, sizeof(*host_config)); 1286 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1287 host_config->common.length = length; 1288 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, 1289 length, 1290 DMA_BIDIRECTIONAL); 1291 1292 if (dma_mapping_error(host_config->buffer)) { 1293 printk(KERN_ERR 1294 "ibmvscsi: dma_mapping error " "getting host config\n"); 1295 free_event_struct(&hostdata->pool, evt_struct); 1296 return -1; 1297 } 1298 1299 init_completion(&evt_struct->comp); 1300 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1301 if (rc == 0) 1302 wait_for_completion(&evt_struct->comp); 1303 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); 1304 1305 return rc; 1306 } 1307 1308 /* ------------------------------------------------------------ 1309 * sysfs attributes 1310 */ 1311 static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf) 1312 { 1313 struct Scsi_Host *shost = class_to_shost(class_dev); 1314 struct ibmvscsi_host_data *hostdata = 1315 (struct ibmvscsi_host_data *)shost->hostdata; 1316 int len; 1317 1318 len = snprintf(buf, PAGE_SIZE, "%s\n", 1319 hostdata->madapter_info.srp_version); 1320 return len; 1321 } 1322 1323 static struct class_device_attribute ibmvscsi_host_srp_version = { 1324 .attr = { 1325 .name = "srp_version", 1326 .mode = S_IRUGO, 1327 }, 1328 .show = show_host_srp_version, 1329 }; 1330 1331 static ssize_t show_host_partition_name(struct class_device *class_dev, 1332 char *buf) 1333 { 1334 struct Scsi_Host *shost = class_to_shost(class_dev); 1335 struct ibmvscsi_host_data *hostdata = 1336 (struct ibmvscsi_host_data *)shost->hostdata; 1337 int len; 1338 1339 len = snprintf(buf, PAGE_SIZE, "%s\n", 1340 hostdata->madapter_info.partition_name); 1341 return len; 1342 } 1343 1344 static struct class_device_attribute ibmvscsi_host_partition_name = { 1345 .attr = { 1346 .name = "partition_name", 1347 .mode = S_IRUGO, 1348 }, 1349 .show = show_host_partition_name, 1350 }; 1351 1352 static ssize_t show_host_partition_number(struct class_device *class_dev, 1353 char *buf) 1354 { 1355 struct Scsi_Host *shost = class_to_shost(class_dev); 1356 struct ibmvscsi_host_data *hostdata = 1357 (struct ibmvscsi_host_data *)shost->hostdata; 1358 int len; 1359 1360 len = snprintf(buf, PAGE_SIZE, "%d\n", 1361 hostdata->madapter_info.partition_number); 1362 return len; 1363 } 1364 1365 static struct class_device_attribute ibmvscsi_host_partition_number = { 1366 .attr = { 1367 .name = "partition_number", 1368 .mode = S_IRUGO, 1369 }, 1370 .show = show_host_partition_number, 1371 }; 1372 1373 static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf) 1374 { 1375 struct Scsi_Host *shost = class_to_shost(class_dev); 1376 struct ibmvscsi_host_data *hostdata = 1377 (struct ibmvscsi_host_data *)shost->hostdata; 1378 int len; 1379 1380 len = snprintf(buf, PAGE_SIZE, "%d\n", 1381 hostdata->madapter_info.mad_version); 1382 return len; 1383 } 1384 1385 static struct class_device_attribute ibmvscsi_host_mad_version = { 1386 .attr = { 1387 .name = "mad_version", 1388 .mode = S_IRUGO, 1389 }, 1390 .show = show_host_mad_version, 1391 }; 1392 1393 static ssize_t show_host_os_type(struct class_device *class_dev, char *buf) 1394 { 1395 struct Scsi_Host *shost = class_to_shost(class_dev); 1396 struct ibmvscsi_host_data *hostdata = 1397 (struct ibmvscsi_host_data *)shost->hostdata; 1398 int len; 1399 1400 len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type); 1401 return len; 1402 } 1403 1404 static struct class_device_attribute ibmvscsi_host_os_type = { 1405 .attr = { 1406 .name = "os_type", 1407 .mode = S_IRUGO, 1408 }, 1409 .show = show_host_os_type, 1410 }; 1411 1412 static ssize_t show_host_config(struct class_device *class_dev, char *buf) 1413 { 1414 struct Scsi_Host *shost = class_to_shost(class_dev); 1415 struct ibmvscsi_host_data *hostdata = 1416 (struct ibmvscsi_host_data *)shost->hostdata; 1417 1418 /* returns null-terminated host config data */ 1419 if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0) 1420 return strlen(buf); 1421 else 1422 return 0; 1423 } 1424 1425 static struct class_device_attribute ibmvscsi_host_config = { 1426 .attr = { 1427 .name = "config", 1428 .mode = S_IRUGO, 1429 }, 1430 .show = show_host_config, 1431 }; 1432 1433 static struct class_device_attribute *ibmvscsi_attrs[] = { 1434 &ibmvscsi_host_srp_version, 1435 &ibmvscsi_host_partition_name, 1436 &ibmvscsi_host_partition_number, 1437 &ibmvscsi_host_mad_version, 1438 &ibmvscsi_host_os_type, 1439 &ibmvscsi_host_config, 1440 NULL 1441 }; 1442 1443 /* ------------------------------------------------------------ 1444 * SCSI driver registration 1445 */ 1446 static struct scsi_host_template driver_template = { 1447 .module = THIS_MODULE, 1448 .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION, 1449 .proc_name = "ibmvscsi", 1450 .queuecommand = ibmvscsi_queuecommand, 1451 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1452 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1453 .cmd_per_lun = 16, 1454 .can_queue = 1, /* Updated after SRP_LOGIN */ 1455 .this_id = -1, 1456 .sg_tablesize = SG_ALL, 1457 .use_clustering = ENABLE_CLUSTERING, 1458 .shost_attrs = ibmvscsi_attrs, 1459 }; 1460 1461 /** 1462 * Called by bus code for each adapter 1463 */ 1464 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1465 { 1466 struct ibmvscsi_host_data *hostdata; 1467 struct Scsi_Host *host; 1468 struct device *dev = &vdev->dev; 1469 unsigned long wait_switch = 0; 1470 1471 vdev->dev.driver_data = NULL; 1472 1473 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1474 if (!host) { 1475 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1476 goto scsi_host_alloc_failed; 1477 } 1478 1479 hostdata = (struct ibmvscsi_host_data *)host->hostdata; 1480 memset(hostdata, 0x00, sizeof(*hostdata)); 1481 INIT_LIST_HEAD(&hostdata->sent); 1482 hostdata->host = host; 1483 hostdata->dev = dev; 1484 atomic_set(&hostdata->request_limit, -1); 1485 hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ 1486 1487 if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, 1488 max_requests) != 0) { 1489 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1490 goto init_crq_failed; 1491 } 1492 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1493 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); 1494 goto init_pool_failed; 1495 } 1496 1497 host->max_lun = 8; 1498 host->max_id = max_id; 1499 host->max_channel = max_channel; 1500 1501 if (scsi_add_host(hostdata->host, hostdata->dev)) 1502 goto add_host_failed; 1503 1504 /* Try to send an initialization message. Note that this is allowed 1505 * to fail if the other end is not acive. In that case we don't 1506 * want to scan 1507 */ 1508 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { 1509 /* 1510 * Wait around max init_timeout secs for the adapter to finish 1511 * initializing. When we are done initializing, we will have a 1512 * valid request_limit. We don't want Linux scanning before 1513 * we are ready. 1514 */ 1515 for (wait_switch = jiffies + (init_timeout * HZ); 1516 time_before(jiffies, wait_switch) && 1517 atomic_read(&hostdata->request_limit) < 2;) { 1518 1519 msleep(10); 1520 } 1521 1522 /* if we now have a valid request_limit, initiate a scan */ 1523 if (atomic_read(&hostdata->request_limit) > 0) 1524 scsi_scan_host(host); 1525 } 1526 1527 vdev->dev.driver_data = hostdata; 1528 return 0; 1529 1530 add_host_failed: 1531 release_event_pool(&hostdata->pool, hostdata); 1532 init_pool_failed: 1533 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_requests); 1534 init_crq_failed: 1535 scsi_host_put(host); 1536 scsi_host_alloc_failed: 1537 return -1; 1538 } 1539 1540 static int ibmvscsi_remove(struct vio_dev *vdev) 1541 { 1542 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1543 release_event_pool(&hostdata->pool, hostdata); 1544 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, 1545 max_requests); 1546 1547 scsi_remove_host(hostdata->host); 1548 scsi_host_put(hostdata->host); 1549 1550 return 0; 1551 } 1552 1553 /** 1554 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 1555 * support. 1556 */ 1557 static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { 1558 {"vscsi", "IBM,v-scsi"}, 1559 { "", "" } 1560 }; 1561 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 1562 1563 static struct vio_driver ibmvscsi_driver = { 1564 .id_table = ibmvscsi_device_table, 1565 .probe = ibmvscsi_probe, 1566 .remove = ibmvscsi_remove, 1567 .driver = { 1568 .name = "ibmvscsi", 1569 .owner = THIS_MODULE, 1570 } 1571 }; 1572 1573 int __init ibmvscsi_module_init(void) 1574 { 1575 return vio_register_driver(&ibmvscsi_driver); 1576 } 1577 1578 void __exit ibmvscsi_module_exit(void) 1579 { 1580 vio_unregister_driver(&ibmvscsi_driver); 1581 } 1582 1583 module_init(ibmvscsi_module_init); 1584 module_exit(ibmvscsi_module_exit); 1585