1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * commctrl.c 26 * 27 * Abstract: Contains all routines for control of the AFA comm layer 28 * 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/spinlock.h> 36 #include <linux/slab.h> 37 #include <linux/completion.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> /* ssleep prototype */ 41 #include <linux/kthread.h> 42 #include <asm/semaphore.h> 43 #include <asm/uaccess.h> 44 45 #include "aacraid.h" 46 47 /** 48 * ioctl_send_fib - send a FIB from userspace 49 * @dev: adapter is being processed 50 * @arg: arguments to the ioctl call 51 * 52 * This routine sends a fib to the adapter on behalf of a user level 53 * program. 54 */ 55 # define AAC_DEBUG_PREAMBLE KERN_INFO 56 # define AAC_DEBUG_POSTAMBLE 57 58 static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) 59 { 60 struct hw_fib * kfib; 61 struct fib *fibptr; 62 struct hw_fib * hw_fib = (struct hw_fib *)0; 63 dma_addr_t hw_fib_pa = (dma_addr_t)0LL; 64 unsigned size; 65 int retval; 66 67 fibptr = aac_fib_alloc(dev); 68 if(fibptr == NULL) { 69 return -ENOMEM; 70 } 71 72 kfib = fibptr->hw_fib; 73 /* 74 * First copy in the header so that we can check the size field. 75 */ 76 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { 77 aac_fib_free(fibptr); 78 return -EFAULT; 79 } 80 /* 81 * Since we copy based on the fib header size, make sure that we 82 * will not overrun the buffer when we copy the memory. Return 83 * an error if we would. 84 */ 85 size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); 86 if (size < le16_to_cpu(kfib->header.SenderSize)) 87 size = le16_to_cpu(kfib->header.SenderSize); 88 if (size > dev->max_fib_size) { 89 if (size > 2048) { 90 retval = -EINVAL; 91 goto cleanup; 92 } 93 /* Highjack the hw_fib */ 94 hw_fib = fibptr->hw_fib; 95 hw_fib_pa = fibptr->hw_fib_pa; 96 fibptr->hw_fib = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa); 97 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); 98 memcpy(kfib, hw_fib, dev->max_fib_size); 99 } 100 101 if (copy_from_user(kfib, arg, size)) { 102 retval = -EFAULT; 103 goto cleanup; 104 } 105 106 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { 107 aac_adapter_interrupt(dev); 108 /* 109 * Since we didn't really send a fib, zero out the state to allow 110 * cleanup code not to assert. 111 */ 112 kfib->header.XferState = 0; 113 } else { 114 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, 115 le16_to_cpu(kfib->header.Size) , FsaNormal, 116 1, 1, NULL, NULL); 117 if (retval) { 118 goto cleanup; 119 } 120 if (aac_fib_complete(fibptr) != 0) { 121 retval = -EINVAL; 122 goto cleanup; 123 } 124 } 125 /* 126 * Make sure that the size returned by the adapter (which includes 127 * the header) is less than or equal to the size of a fib, so we 128 * don't corrupt application data. Then copy that size to the user 129 * buffer. (Don't try to add the header information again, since it 130 * was already included by the adapter.) 131 */ 132 133 retval = 0; 134 if (copy_to_user(arg, (void *)kfib, size)) 135 retval = -EFAULT; 136 cleanup: 137 if (hw_fib) { 138 pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); 139 fibptr->hw_fib_pa = hw_fib_pa; 140 fibptr->hw_fib = hw_fib; 141 } 142 if (retval != -EINTR) 143 aac_fib_free(fibptr); 144 return retval; 145 } 146 147 /** 148 * open_getadapter_fib - Get the next fib 149 * 150 * This routine will get the next Fib, if available, from the AdapterFibContext 151 * passed in from the user. 152 */ 153 154 static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) 155 { 156 struct aac_fib_context * fibctx; 157 int status; 158 159 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); 160 if (fibctx == NULL) { 161 status = -ENOMEM; 162 } else { 163 unsigned long flags; 164 struct list_head * entry; 165 struct aac_fib_context * context; 166 167 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; 168 fibctx->size = sizeof(struct aac_fib_context); 169 /* 170 * Yes yes, I know this could be an index, but we have a 171 * better guarantee of uniqueness for the locked loop below. 172 * Without the aid of a persistent history, this also helps 173 * reduce the chance that the opaque context would be reused. 174 */ 175 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); 176 /* 177 * Initialize the mutex used to wait for the next AIF. 178 */ 179 init_MUTEX_LOCKED(&fibctx->wait_sem); 180 fibctx->wait = 0; 181 /* 182 * Initialize the fibs and set the count of fibs on 183 * the list to 0. 184 */ 185 fibctx->count = 0; 186 INIT_LIST_HEAD(&fibctx->fib_list); 187 fibctx->jiffies = jiffies/HZ; 188 /* 189 * Now add this context onto the adapter's 190 * AdapterFibContext list. 191 */ 192 spin_lock_irqsave(&dev->fib_lock, flags); 193 /* Ensure that we have a unique identifier */ 194 entry = dev->fib_list.next; 195 while (entry != &dev->fib_list) { 196 context = list_entry(entry, struct aac_fib_context, next); 197 if (context->unique == fibctx->unique) { 198 /* Not unique (32 bits) */ 199 fibctx->unique++; 200 entry = dev->fib_list.next; 201 } else { 202 entry = entry->next; 203 } 204 } 205 list_add_tail(&fibctx->next, &dev->fib_list); 206 spin_unlock_irqrestore(&dev->fib_lock, flags); 207 if (copy_to_user(arg, &fibctx->unique, 208 sizeof(fibctx->unique))) { 209 status = -EFAULT; 210 } else { 211 status = 0; 212 } 213 } 214 return status; 215 } 216 217 /** 218 * next_getadapter_fib - get the next fib 219 * @dev: adapter to use 220 * @arg: ioctl argument 221 * 222 * This routine will get the next Fib, if available, from the AdapterFibContext 223 * passed in from the user. 224 */ 225 226 static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) 227 { 228 struct fib_ioctl f; 229 struct fib *fib; 230 struct aac_fib_context *fibctx; 231 int status; 232 struct list_head * entry; 233 unsigned long flags; 234 235 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) 236 return -EFAULT; 237 /* 238 * Verify that the HANDLE passed in was a valid AdapterFibContext 239 * 240 * Search the list of AdapterFibContext addresses on the adapter 241 * to be sure this is a valid address 242 */ 243 entry = dev->fib_list.next; 244 fibctx = NULL; 245 246 while (entry != &dev->fib_list) { 247 fibctx = list_entry(entry, struct aac_fib_context, next); 248 /* 249 * Extract the AdapterFibContext from the Input parameters. 250 */ 251 if (fibctx->unique == f.fibctx) { /* We found a winner */ 252 break; 253 } 254 entry = entry->next; 255 fibctx = NULL; 256 } 257 if (!fibctx) { 258 dprintk ((KERN_INFO "Fib Context not found\n")); 259 return -EINVAL; 260 } 261 262 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 263 (fibctx->size != sizeof(struct aac_fib_context))) { 264 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 265 return -EINVAL; 266 } 267 status = 0; 268 spin_lock_irqsave(&dev->fib_lock, flags); 269 /* 270 * If there are no fibs to send back, then either wait or return 271 * -EAGAIN 272 */ 273 return_fib: 274 if (!list_empty(&fibctx->fib_list)) { 275 struct list_head * entry; 276 /* 277 * Pull the next fib from the fibs 278 */ 279 entry = fibctx->fib_list.next; 280 list_del(entry); 281 282 fib = list_entry(entry, struct fib, fiblink); 283 fibctx->count--; 284 spin_unlock_irqrestore(&dev->fib_lock, flags); 285 if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) { 286 kfree(fib->hw_fib); 287 kfree(fib); 288 return -EFAULT; 289 } 290 /* 291 * Free the space occupied by this copy of the fib. 292 */ 293 kfree(fib->hw_fib); 294 kfree(fib); 295 status = 0; 296 } else { 297 spin_unlock_irqrestore(&dev->fib_lock, flags); 298 /* If someone killed the AIF aacraid thread, restart it */ 299 status = !dev->aif_thread; 300 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { 301 /* Be paranoid, be very paranoid! */ 302 kthread_stop(dev->thread); 303 ssleep(1); 304 dev->aif_thread = 0; 305 dev->thread = kthread_run(aac_command_thread, dev, dev->name); 306 ssleep(1); 307 } 308 if (f.wait) { 309 if(down_interruptible(&fibctx->wait_sem) < 0) { 310 status = -EINTR; 311 } else { 312 /* Lock again and retry */ 313 spin_lock_irqsave(&dev->fib_lock, flags); 314 goto return_fib; 315 } 316 } else { 317 status = -EAGAIN; 318 } 319 } 320 fibctx->jiffies = jiffies/HZ; 321 return status; 322 } 323 324 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) 325 { 326 struct fib *fib; 327 328 /* 329 * First free any FIBs that have not been consumed. 330 */ 331 while (!list_empty(&fibctx->fib_list)) { 332 struct list_head * entry; 333 /* 334 * Pull the next fib from the fibs 335 */ 336 entry = fibctx->fib_list.next; 337 list_del(entry); 338 fib = list_entry(entry, struct fib, fiblink); 339 fibctx->count--; 340 /* 341 * Free the space occupied by this copy of the fib. 342 */ 343 kfree(fib->hw_fib); 344 kfree(fib); 345 } 346 /* 347 * Remove the Context from the AdapterFibContext List 348 */ 349 list_del(&fibctx->next); 350 /* 351 * Invalidate context 352 */ 353 fibctx->type = 0; 354 /* 355 * Free the space occupied by the Context 356 */ 357 kfree(fibctx); 358 return 0; 359 } 360 361 /** 362 * close_getadapter_fib - close down user fib context 363 * @dev: adapter 364 * @arg: ioctl arguments 365 * 366 * This routine will close down the fibctx passed in from the user. 367 */ 368 369 static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) 370 { 371 struct aac_fib_context *fibctx; 372 int status; 373 unsigned long flags; 374 struct list_head * entry; 375 376 /* 377 * Verify that the HANDLE passed in was a valid AdapterFibContext 378 * 379 * Search the list of AdapterFibContext addresses on the adapter 380 * to be sure this is a valid address 381 */ 382 383 entry = dev->fib_list.next; 384 fibctx = NULL; 385 386 while(entry != &dev->fib_list) { 387 fibctx = list_entry(entry, struct aac_fib_context, next); 388 /* 389 * Extract the fibctx from the input parameters 390 */ 391 if (fibctx->unique == (u32)(unsigned long)arg) { 392 /* We found a winner */ 393 break; 394 } 395 entry = entry->next; 396 fibctx = NULL; 397 } 398 399 if (!fibctx) 400 return 0; /* Already gone */ 401 402 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 403 (fibctx->size != sizeof(struct aac_fib_context))) 404 return -EINVAL; 405 spin_lock_irqsave(&dev->fib_lock, flags); 406 status = aac_close_fib_context(dev, fibctx); 407 spin_unlock_irqrestore(&dev->fib_lock, flags); 408 return status; 409 } 410 411 /** 412 * check_revision - close down user fib context 413 * @dev: adapter 414 * @arg: ioctl arguments 415 * 416 * This routine returns the driver version. 417 * Under Linux, there have been no version incompatibilities, so this is 418 * simple! 419 */ 420 421 static int check_revision(struct aac_dev *dev, void __user *arg) 422 { 423 struct revision response; 424 char *driver_version = aac_driver_version; 425 u32 version; 426 427 response.compat = 1; 428 version = (simple_strtol(driver_version, 429 &driver_version, 10) << 24) | 0x00000400; 430 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; 431 version += simple_strtol(driver_version + 1, NULL, 10); 432 response.version = cpu_to_le32(version); 433 # if (defined(AAC_DRIVER_BUILD)) 434 response.build = cpu_to_le32(AAC_DRIVER_BUILD); 435 # else 436 response.build = cpu_to_le32(9999); 437 # endif 438 439 if (copy_to_user(arg, &response, sizeof(response))) 440 return -EFAULT; 441 return 0; 442 } 443 444 445 /** 446 * 447 * aac_send_raw_scb 448 * 449 */ 450 451 static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) 452 { 453 struct fib* srbfib; 454 int status; 455 struct aac_srb *srbcmd = NULL; 456 struct user_aac_srb *user_srbcmd = NULL; 457 struct user_aac_srb __user *user_srb = arg; 458 struct aac_srb_reply __user *user_reply; 459 struct aac_srb_reply* reply; 460 u32 fibsize = 0; 461 u32 flags = 0; 462 s32 rcode = 0; 463 u32 data_dir; 464 void __user *sg_user[32]; 465 void *sg_list[32]; 466 u32 sg_indx = 0; 467 u32 byte_count = 0; 468 u32 actual_fibsize = 0; 469 int i; 470 471 472 if (!capable(CAP_SYS_ADMIN)){ 473 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); 474 return -EPERM; 475 } 476 /* 477 * Allocate and initialize a Fib then setup a BlockWrite command 478 */ 479 if (!(srbfib = aac_fib_alloc(dev))) { 480 return -ENOMEM; 481 } 482 aac_fib_init(srbfib); 483 484 srbcmd = (struct aac_srb*) fib_data(srbfib); 485 486 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ 487 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ 488 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); 489 rcode = -EFAULT; 490 goto cleanup; 491 } 492 493 if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { 494 rcode = -EINVAL; 495 goto cleanup; 496 } 497 498 user_srbcmd = kmalloc(fibsize, GFP_KERNEL); 499 if (!user_srbcmd) { 500 dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); 501 rcode = -ENOMEM; 502 goto cleanup; 503 } 504 if(copy_from_user(user_srbcmd, user_srb,fibsize)){ 505 dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); 506 rcode = -EFAULT; 507 goto cleanup; 508 } 509 510 user_reply = arg+fibsize; 511 512 flags = user_srbcmd->flags; /* from user in cpu order */ 513 // Fix up srb for endian and force some values 514 515 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 516 srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 517 srbcmd->id = cpu_to_le32(user_srbcmd->id); 518 srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 519 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 520 srbcmd->flags = cpu_to_le32(flags); 521 srbcmd->retry_limit = 0; // Obsolete parameter 522 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 523 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); 524 525 switch (flags & (SRB_DataIn | SRB_DataOut)) { 526 case SRB_DataOut: 527 data_dir = DMA_TO_DEVICE; 528 break; 529 case (SRB_DataIn | SRB_DataOut): 530 data_dir = DMA_BIDIRECTIONAL; 531 break; 532 case SRB_DataIn: 533 data_dir = DMA_FROM_DEVICE; 534 break; 535 default: 536 data_dir = DMA_NONE; 537 } 538 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { 539 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", 540 le32_to_cpu(srbcmd->sg.count))); 541 rcode = -EINVAL; 542 goto cleanup; 543 } 544 if (dev->dac_support == 1) { 545 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 546 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; 547 struct user_sgmap* usg; 548 byte_count = 0; 549 550 /* 551 * This should also catch if user used the 32 bit sgmap 552 */ 553 actual_fibsize = sizeof(struct aac_srb) - 554 sizeof(struct sgentry) + 555 ((upsg->count & 0xff) * 556 sizeof(struct sgentry)); 557 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 558 dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); 559 rcode = -EINVAL; 560 goto cleanup; 561 } 562 usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) 563 + sizeof(struct sgmap), GFP_KERNEL); 564 if (!usg) { 565 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); 566 rcode = -ENOMEM; 567 goto cleanup; 568 } 569 memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) 570 + sizeof(struct sgmap)); 571 actual_fibsize = sizeof(struct aac_srb) - 572 sizeof(struct sgentry) + ((usg->count & 0xff) * 573 sizeof(struct sgentry64)); 574 if ((data_dir == DMA_NONE) && upsg->count) { 575 kfree (usg); 576 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); 577 rcode = -EINVAL; 578 goto cleanup; 579 } 580 581 for (i = 0; i < usg->count; i++) { 582 u64 addr; 583 void* p; 584 /* Does this really need to be GFP_DMA? */ 585 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 586 if(p == 0) { 587 kfree (usg); 588 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 589 usg->sg[i].count,i,usg->count)); 590 rcode = -ENOMEM; 591 goto cleanup; 592 } 593 sg_user[i] = (void __user *)(long)usg->sg[i].addr; 594 sg_list[i] = p; // save so we can clean up later 595 sg_indx = i; 596 597 if( flags & SRB_DataOut ){ 598 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ 599 kfree (usg); 600 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 601 rcode = -EFAULT; 602 goto cleanup; 603 } 604 } 605 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); 606 607 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 608 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 609 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); 610 byte_count += usg->sg[i].count; 611 } 612 kfree (usg); 613 614 srbcmd->count = cpu_to_le32(byte_count); 615 psg->count = cpu_to_le32(sg_indx+1); 616 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); 617 } else { 618 struct user_sgmap* upsg = &user_srbcmd->sg; 619 struct sgmap* psg = &srbcmd->sg; 620 byte_count = 0; 621 622 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); 623 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 624 dprintk((KERN_DEBUG"aacraid: Bad Size specified in " 625 "Raw SRB command calculated fibsize=%d " 626 "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d " 627 "issued fibsize=%d\n", 628 actual_fibsize, user_srbcmd->sg.count, 629 sizeof(struct aac_srb), sizeof(struct sgentry), 630 fibsize)); 631 rcode = -EINVAL; 632 goto cleanup; 633 } 634 if ((data_dir == DMA_NONE) && upsg->count) { 635 dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); 636 rcode = -EINVAL; 637 goto cleanup; 638 } 639 for (i = 0; i < upsg->count; i++) { 640 dma_addr_t addr; 641 void* p; 642 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); 643 if(p == 0) { 644 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 645 upsg->sg[i].count, i, upsg->count)); 646 rcode = -ENOMEM; 647 goto cleanup; 648 } 649 sg_user[i] = (void __user *)(long)upsg->sg[i].addr; 650 sg_list[i] = p; // save so we can clean up later 651 sg_indx = i; 652 653 if( flags & SRB_DataOut ){ 654 if(copy_from_user(p, sg_user[i], 655 upsg->sg[i].count)) { 656 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); 657 rcode = -EFAULT; 658 goto cleanup; 659 } 660 } 661 addr = pci_map_single(dev->pdev, p, 662 upsg->sg[i].count, data_dir); 663 664 psg->sg[i].addr = cpu_to_le32(addr); 665 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); 666 byte_count += upsg->sg[i].count; 667 } 668 srbcmd->count = cpu_to_le32(byte_count); 669 psg->count = cpu_to_le32(sg_indx+1); 670 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 671 } 672 if (status == -EINTR) { 673 rcode = -EINTR; 674 goto cleanup; 675 } 676 677 if (status != 0){ 678 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 679 rcode = -ENXIO; 680 goto cleanup; 681 } 682 683 if( flags & SRB_DataIn ) { 684 for(i = 0 ; i <= sg_indx; i++){ 685 byte_count = le32_to_cpu((dev->dac_support == 1) 686 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count 687 : srbcmd->sg.sg[i].count); 688 if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ 689 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); 690 rcode = -EFAULT; 691 goto cleanup; 692 693 } 694 } 695 } 696 697 reply = (struct aac_srb_reply *) fib_data(srbfib); 698 if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ 699 dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); 700 rcode = -EFAULT; 701 goto cleanup; 702 } 703 704 cleanup: 705 kfree(user_srbcmd); 706 for(i=0; i <= sg_indx; i++){ 707 kfree(sg_list[i]); 708 } 709 if (rcode != -EINTR) { 710 aac_fib_complete(srbfib); 711 aac_fib_free(srbfib); 712 } 713 714 return rcode; 715 } 716 717 struct aac_pci_info { 718 u32 bus; 719 u32 slot; 720 }; 721 722 723 static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) 724 { 725 struct aac_pci_info pci_info; 726 727 pci_info.bus = dev->pdev->bus->number; 728 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 729 730 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 731 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 732 return -EFAULT; 733 } 734 return 0; 735 } 736 737 738 int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) 739 { 740 int status; 741 742 /* 743 * HBA gets first crack 744 */ 745 746 status = aac_dev_ioctl(dev, cmd, arg); 747 if(status != -ENOTTY) 748 return status; 749 750 switch (cmd) { 751 case FSACTL_MINIPORT_REV_CHECK: 752 status = check_revision(dev, arg); 753 break; 754 case FSACTL_SEND_LARGE_FIB: 755 case FSACTL_SENDFIB: 756 status = ioctl_send_fib(dev, arg); 757 break; 758 case FSACTL_OPEN_GET_ADAPTER_FIB: 759 status = open_getadapter_fib(dev, arg); 760 break; 761 case FSACTL_GET_NEXT_ADAPTER_FIB: 762 status = next_getadapter_fib(dev, arg); 763 break; 764 case FSACTL_CLOSE_GET_ADAPTER_FIB: 765 status = close_getadapter_fib(dev, arg); 766 break; 767 case FSACTL_SEND_RAW_SRB: 768 status = aac_send_raw_srb(dev,arg); 769 break; 770 case FSACTL_GET_PCI_INFO: 771 status = aac_get_pci_info(dev,arg); 772 break; 773 default: 774 status = -ENOTTY; 775 break; 776 } 777 return status; 778 } 779 780