1 /* 2 * History: 3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), 4 * to allow user process control of SCSI devices. 5 * Development Sponsored by Killy Corp. NY NY 6 * 7 * Original driver (sg.c): 8 * Copyright (C) 1992 Lawrence Foard 9 * Version 2 and 3 extensions to driver: 10 * Copyright (C) 1998 - 2005 Douglas Gilbert 11 * 12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 */ 20 21 static int sg_version_num = 30533; /* 2 digits for each component */ 22 #define SG_VERSION_STR "3.5.33" 23 24 /* 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First 27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING 28 * (otherwise the macros compile to empty statements). 29 * 30 */ 31 #include <linux/config.h> 32 #include <linux/module.h> 33 34 #include <linux/fs.h> 35 #include <linux/kernel.h> 36 #include <linux/sched.h> 37 #include <linux/string.h> 38 #include <linux/mm.h> 39 #include <linux/errno.h> 40 #include <linux/mtio.h> 41 #include <linux/ioctl.h> 42 #include <linux/fcntl.h> 43 #include <linux/init.h> 44 #include <linux/poll.h> 45 #include <linux/smp_lock.h> 46 #include <linux/moduleparam.h> 47 #include <linux/devfs_fs_kernel.h> 48 #include <linux/cdev.h> 49 #include <linux/seq_file.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/scatterlist.h> 53 54 #include "scsi.h" 55 #include <scsi/scsi_dbg.h> 56 #include <scsi/scsi_host.h> 57 #include <scsi/scsi_driver.h> 58 #include <scsi/scsi_ioctl.h> 59 #include <scsi/sg.h> 60 61 #include "scsi_logging.h" 62 63 #ifdef CONFIG_SCSI_PROC_FS 64 #include <linux/proc_fs.h> 65 static char *sg_version_date = "20050908"; 66 67 static int sg_proc_init(void); 68 static void sg_proc_cleanup(void); 69 #endif 70 71 #define SG_ALLOW_DIO_DEF 0 72 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */ 73 74 #define SG_MAX_DEVS 32768 75 76 /* 77 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) 78 * Then when using 32 bit integers x * m may overflow during the calculation. 79 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m 80 * calculates the same, but prevents the overflow when both m and d 81 * are "small" numbers (like HZ and USER_HZ). 82 * Of course an overflow is inavoidable if the result of muldiv doesn't fit 83 * in 32 bits. 84 */ 85 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) 86 87 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) 88 89 int sg_big_buff = SG_DEF_RESERVED_SIZE; 90 /* N.B. This variable is readable and writeable via 91 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer 92 of this size (or less if there is not enough memory) will be reserved 93 for use by this file descriptor. [Deprecated usage: this variable is also 94 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into 95 the kernel (i.e. it is not a module).] */ 96 static int def_reserved_size = -1; /* picks up init parameter */ 97 static int sg_allow_dio = SG_ALLOW_DIO_DEF; 98 99 #define SG_SECTOR_SZ 512 100 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) 101 102 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */ 103 104 static int sg_add(struct class_device *, struct class_interface *); 105 static void sg_remove(struct class_device *, struct class_interface *); 106 107 static Scsi_Request *dummy_cmdp; /* only used for sizeof */ 108 109 static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock 110 file descriptor list for device */ 111 112 static struct class_interface sg_interface = { 113 .add = sg_add, 114 .remove = sg_remove, 115 }; 116 117 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ 118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 119 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */ 120 unsigned bufflen; /* Size of (aggregate) data buffer */ 121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 122 void *buffer; /* Data buffer or scatter list (k_use_sg>0) */ 123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 124 unsigned char cmd_opcode; /* first byte of command */ 125 } Sg_scatter_hold; 126 127 struct sg_device; /* forward declarations */ 128 struct sg_fd; 129 130 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ 131 Scsi_Request *my_cmdp; /* != 0 when request with lower levels */ 132 struct sg_request *nextrp; /* NULL -> tail request (slist) */ 133 struct sg_fd *parentfp; /* NULL -> not in use */ 134 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ 135 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ 136 unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)]; 137 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ 138 char orphan; /* 1 -> drop on sight, 0 -> normal */ 139 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 140 volatile char done; /* 0->before bh, 1->before read, 2->read */ 141 } Sg_request; 142 143 typedef struct sg_fd { /* holds the state of a file descriptor */ 144 struct sg_fd *nextfp; /* NULL when last opened fd on this device */ 145 struct sg_device *parentdp; /* owning device */ 146 wait_queue_head_t read_wait; /* queue read until command done */ 147 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 148 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ 149 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ 150 Sg_scatter_hold reserve; /* buffer held for this file descriptor */ 151 unsigned save_scat_len; /* original length of trunc. scat. element */ 152 Sg_request *headrp; /* head of request slist, NULL->empty */ 153 struct fasync_struct *async_qp; /* used by asynchronous notification */ 154 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ 155 char low_dma; /* as in parent but possibly overridden to 1 */ 156 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ 157 volatile char closed; /* 1 -> fd closed but request(s) outstanding */ 158 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ 159 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ 160 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ 161 char mmap_called; /* 0 -> mmap() never called on this fd */ 162 } Sg_fd; 163 164 typedef struct sg_device { /* holds the state of each scsi generic device */ 165 struct scsi_device *device; 166 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 167 int sg_tablesize; /* adapter's max scatter-gather table size */ 168 Sg_fd *headfp; /* first open fd belonging to this device */ 169 volatile char detached; /* 0->attached, 1->detached pending removal */ 170 volatile char exclude; /* opened for exclusive access */ 171 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 172 struct gendisk *disk; 173 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 174 } Sg_device; 175 176 static int sg_fasync(int fd, struct file *filp, int mode); 177 static void sg_cmd_done(Scsi_Cmnd * SCpnt); /* tasklet or soft irq callback */ 178 static int sg_start_req(Sg_request * srp); 179 static void sg_finish_rem_req(Sg_request * srp); 180 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 181 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 182 int tablesize); 183 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 184 Sg_request * srp); 185 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 186 int blocking, int read_only, Sg_request ** o_srp); 187 static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 188 unsigned char *cmnd, int timeout, int blocking); 189 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 190 int wr_xf, int *countp, unsigned char __user **up); 191 static int sg_write_xfer(Sg_request * srp); 192 static int sg_read_xfer(Sg_request * srp); 193 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 194 static void sg_remove_scat(Sg_scatter_hold * schp); 195 static void sg_build_reserve(Sg_fd * sfp, int req_size); 196 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 197 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 198 static char *sg_page_malloc(int rqSz, int lowDma, int *retSzp); 199 static void sg_page_free(char *buff, int size); 200 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 201 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 202 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 203 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); 204 static Sg_request *sg_add_request(Sg_fd * sfp); 205 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 206 static int sg_res_in_use(Sg_fd * sfp); 207 static int sg_allow_access(unsigned char opcode, char dev_type); 208 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); 209 static Sg_device *sg_get_dev(int dev); 210 static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp); 211 #ifdef CONFIG_SCSI_PROC_FS 212 static int sg_last_dev(void); 213 #endif 214 215 static Sg_device **sg_dev_arr = NULL; 216 static int sg_dev_max; 217 static int sg_nr_dev; 218 219 #define SZ_SG_HEADER sizeof(struct sg_header) 220 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) 221 #define SZ_SG_IOVEC sizeof(sg_iovec_t) 222 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 223 224 static int 225 sg_open(struct inode *inode, struct file *filp) 226 { 227 int dev = iminor(inode); 228 int flags = filp->f_flags; 229 Sg_device *sdp; 230 Sg_fd *sfp; 231 int res; 232 int retval; 233 234 nonseekable_open(inode, filp); 235 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 236 sdp = sg_get_dev(dev); 237 if ((!sdp) || (!sdp->device)) 238 return -ENXIO; 239 if (sdp->detached) 240 return -ENODEV; 241 242 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 243 /* Prevent the device driver from vanishing while we sleep */ 244 retval = scsi_device_get(sdp->device); 245 if (retval) 246 return retval; 247 248 if (!((flags & O_NONBLOCK) || 249 scsi_block_when_processing_errors(sdp->device))) { 250 retval = -ENXIO; 251 /* we are in error recovery for this device */ 252 goto error_out; 253 } 254 255 if (flags & O_EXCL) { 256 if (O_RDONLY == (flags & O_ACCMODE)) { 257 retval = -EPERM; /* Can't lock it with read only access */ 258 goto error_out; 259 } 260 if (sdp->headfp && (flags & O_NONBLOCK)) { 261 retval = -EBUSY; 262 goto error_out; 263 } 264 res = 0; 265 __wait_event_interruptible(sdp->o_excl_wait, 266 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); 267 if (res) { 268 retval = res; /* -ERESTARTSYS because signal hit process */ 269 goto error_out; 270 } 271 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */ 272 if (flags & O_NONBLOCK) { 273 retval = -EBUSY; 274 goto error_out; 275 } 276 res = 0; 277 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude), 278 res); 279 if (res) { 280 retval = res; /* -ERESTARTSYS because signal hit process */ 281 goto error_out; 282 } 283 } 284 if (sdp->detached) { 285 retval = -ENODEV; 286 goto error_out; 287 } 288 if (!sdp->headfp) { /* no existing opens on this device */ 289 sdp->sgdebug = 0; 290 sdp->sg_tablesize = sdp->device->host->sg_tablesize; 291 } 292 if ((sfp = sg_add_sfp(sdp, dev))) 293 filp->private_data = sfp; 294 else { 295 if (flags & O_EXCL) 296 sdp->exclude = 0; /* undo if error */ 297 retval = -ENOMEM; 298 goto error_out; 299 } 300 return 0; 301 302 error_out: 303 scsi_device_put(sdp->device); 304 return retval; 305 } 306 307 /* Following function was formerly called 'sg_close' */ 308 static int 309 sg_release(struct inode *inode, struct file *filp) 310 { 311 Sg_device *sdp; 312 Sg_fd *sfp; 313 314 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 315 return -ENXIO; 316 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 317 sg_fasync(-1, filp, 0); /* remove filp from async notification list */ 318 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 319 if (!sdp->detached) { 320 scsi_device_put(sdp->device); 321 } 322 sdp->exclude = 0; 323 wake_up_interruptible(&sdp->o_excl_wait); 324 } 325 return 0; 326 } 327 328 static ssize_t 329 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) 330 { 331 Sg_device *sdp; 332 Sg_fd *sfp; 333 Sg_request *srp; 334 int req_pack_id = -1; 335 sg_io_hdr_t *hp; 336 struct sg_header *old_hdr = NULL; 337 int retval = 0; 338 339 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 340 return -ENXIO; 341 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", 342 sdp->disk->disk_name, (int) count)); 343 if (!access_ok(VERIFY_WRITE, buf, count)) 344 return -EFAULT; 345 if (sfp->force_packid && (count >= SZ_SG_HEADER)) { 346 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 347 if (!old_hdr) 348 return -ENOMEM; 349 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { 350 retval = -EFAULT; 351 goto free_old_hdr; 352 } 353 if (old_hdr->reply_len < 0) { 354 if (count >= SZ_SG_IO_HDR) { 355 sg_io_hdr_t *new_hdr; 356 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); 357 if (!new_hdr) { 358 retval = -ENOMEM; 359 goto free_old_hdr; 360 } 361 retval =__copy_from_user 362 (new_hdr, buf, SZ_SG_IO_HDR); 363 req_pack_id = new_hdr->pack_id; 364 kfree(new_hdr); 365 if (retval) { 366 retval = -EFAULT; 367 goto free_old_hdr; 368 } 369 } 370 } else 371 req_pack_id = old_hdr->pack_id; 372 } 373 srp = sg_get_rq_mark(sfp, req_pack_id); 374 if (!srp) { /* now wait on packet to arrive */ 375 if (sdp->detached) { 376 retval = -ENODEV; 377 goto free_old_hdr; 378 } 379 if (filp->f_flags & O_NONBLOCK) { 380 retval = -EAGAIN; 381 goto free_old_hdr; 382 } 383 while (1) { 384 retval = 0; /* following macro beats race condition */ 385 __wait_event_interruptible(sfp->read_wait, 386 (sdp->detached || 387 (srp = sg_get_rq_mark(sfp, req_pack_id))), 388 retval); 389 if (sdp->detached) { 390 retval = -ENODEV; 391 goto free_old_hdr; 392 } 393 if (0 == retval) 394 break; 395 396 /* -ERESTARTSYS as signal hit process */ 397 goto free_old_hdr; 398 } 399 } 400 if (srp->header.interface_id != '\0') { 401 retval = sg_new_read(sfp, buf, count, srp); 402 goto free_old_hdr; 403 } 404 405 hp = &srp->header; 406 if (old_hdr == NULL) { 407 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 408 if (! old_hdr) { 409 retval = -ENOMEM; 410 goto free_old_hdr; 411 } 412 } 413 memset(old_hdr, 0, SZ_SG_HEADER); 414 old_hdr->reply_len = (int) hp->timeout; 415 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ 416 old_hdr->pack_id = hp->pack_id; 417 old_hdr->twelve_byte = 418 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; 419 old_hdr->target_status = hp->masked_status; 420 old_hdr->host_status = hp->host_status; 421 old_hdr->driver_status = hp->driver_status; 422 if ((CHECK_CONDITION & hp->masked_status) || 423 (DRIVER_SENSE & hp->driver_status)) 424 memcpy(old_hdr->sense_buffer, srp->sense_b, 425 sizeof (old_hdr->sense_buffer)); 426 switch (hp->host_status) { 427 /* This setup of 'result' is for backward compatibility and is best 428 ignored by the user who should use target, host + driver status */ 429 case DID_OK: 430 case DID_PASSTHROUGH: 431 case DID_SOFT_ERROR: 432 old_hdr->result = 0; 433 break; 434 case DID_NO_CONNECT: 435 case DID_BUS_BUSY: 436 case DID_TIME_OUT: 437 old_hdr->result = EBUSY; 438 break; 439 case DID_BAD_TARGET: 440 case DID_ABORT: 441 case DID_PARITY: 442 case DID_RESET: 443 case DID_BAD_INTR: 444 old_hdr->result = EIO; 445 break; 446 case DID_ERROR: 447 old_hdr->result = (srp->sense_b[0] == 0 && 448 hp->masked_status == GOOD) ? 0 : EIO; 449 break; 450 default: 451 old_hdr->result = EIO; 452 break; 453 } 454 455 /* Now copy the result back to the user buffer. */ 456 if (count >= SZ_SG_HEADER) { 457 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { 458 retval = -EFAULT; 459 goto free_old_hdr; 460 } 461 buf += SZ_SG_HEADER; 462 if (count > old_hdr->reply_len) 463 count = old_hdr->reply_len; 464 if (count > SZ_SG_HEADER) { 465 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { 466 retval = -EFAULT; 467 goto free_old_hdr; 468 } 469 } 470 } else 471 count = (old_hdr->result == 0) ? 0 : -EIO; 472 sg_finish_rem_req(srp); 473 retval = count; 474 free_old_hdr: 475 kfree(old_hdr); 476 return retval; 477 } 478 479 static ssize_t 480 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 481 { 482 sg_io_hdr_t *hp = &srp->header; 483 int err = 0; 484 int len; 485 486 if (count < SZ_SG_IO_HDR) { 487 err = -EINVAL; 488 goto err_out; 489 } 490 hp->sb_len_wr = 0; 491 if ((hp->mx_sb_len > 0) && hp->sbp) { 492 if ((CHECK_CONDITION & hp->masked_status) || 493 (DRIVER_SENSE & hp->driver_status)) { 494 int sb_len = sizeof (dummy_cmdp->sr_sense_buffer); 495 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; 496 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ 497 len = (len > sb_len) ? sb_len : len; 498 if (copy_to_user(hp->sbp, srp->sense_b, len)) { 499 err = -EFAULT; 500 goto err_out; 501 } 502 hp->sb_len_wr = len; 503 } 504 } 505 if (hp->masked_status || hp->host_status || hp->driver_status) 506 hp->info |= SG_INFO_CHECK; 507 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { 508 err = -EFAULT; 509 goto err_out; 510 } 511 err = sg_read_xfer(srp); 512 err_out: 513 sg_finish_rem_req(srp); 514 return (0 == err) ? count : err; 515 } 516 517 static ssize_t 518 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) 519 { 520 int mxsize, cmd_size, k; 521 int input_size, blocking; 522 unsigned char opcode; 523 Sg_device *sdp; 524 Sg_fd *sfp; 525 Sg_request *srp; 526 struct sg_header old_hdr; 527 sg_io_hdr_t *hp; 528 unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)]; 529 530 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 531 return -ENXIO; 532 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", 533 sdp->disk->disk_name, (int) count)); 534 if (sdp->detached) 535 return -ENODEV; 536 if (!((filp->f_flags & O_NONBLOCK) || 537 scsi_block_when_processing_errors(sdp->device))) 538 return -ENXIO; 539 540 if (!access_ok(VERIFY_READ, buf, count)) 541 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 542 if (count < SZ_SG_HEADER) 543 return -EIO; 544 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) 545 return -EFAULT; 546 blocking = !(filp->f_flags & O_NONBLOCK); 547 if (old_hdr.reply_len < 0) 548 return sg_new_write(sfp, buf, count, blocking, 0, NULL); 549 if (count < (SZ_SG_HEADER + 6)) 550 return -EIO; /* The minimum scsi command length is 6 bytes. */ 551 552 if (!(srp = sg_add_request(sfp))) { 553 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); 554 return -EDOM; 555 } 556 buf += SZ_SG_HEADER; 557 __get_user(opcode, buf); 558 if (sfp->next_cmd_len > 0) { 559 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { 560 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); 561 sfp->next_cmd_len = 0; 562 sg_remove_request(sfp, srp); 563 return -EIO; 564 } 565 cmd_size = sfp->next_cmd_len; 566 sfp->next_cmd_len = 0; /* reset so only this write() effected */ 567 } else { 568 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ 569 if ((opcode >= 0xc0) && old_hdr.twelve_byte) 570 cmd_size = 12; 571 } 572 SCSI_LOG_TIMEOUT(4, printk( 573 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); 574 /* Determine buffer size. */ 575 input_size = count - cmd_size; 576 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; 577 mxsize -= SZ_SG_HEADER; 578 input_size -= SZ_SG_HEADER; 579 if (input_size < 0) { 580 sg_remove_request(sfp, srp); 581 return -EIO; /* User did not pass enough bytes for this command. */ 582 } 583 hp = &srp->header; 584 hp->interface_id = '\0'; /* indicator of old interface tunnelled */ 585 hp->cmd_len = (unsigned char) cmd_size; 586 hp->iovec_count = 0; 587 hp->mx_sb_len = 0; 588 if (input_size > 0) 589 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? 590 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; 591 else 592 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 593 hp->dxfer_len = mxsize; 594 hp->dxferp = (char __user *)buf + cmd_size; 595 hp->sbp = NULL; 596 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 597 hp->flags = input_size; /* structure abuse ... */ 598 hp->pack_id = old_hdr.pack_id; 599 hp->usr_ptr = NULL; 600 if (__copy_from_user(cmnd, buf, cmd_size)) 601 return -EFAULT; 602 /* 603 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, 604 * but is is possible that the app intended SG_DXFER_TO_DEV, because there 605 * is a non-zero input_size, so emit a warning. 606 */ 607 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) 608 if (printk_ratelimit()) 609 printk(KERN_WARNING 610 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" 611 "guessing data in;\n" KERN_WARNING " " 612 "program %s not setting count and/or reply_len properly\n", 613 old_hdr.reply_len - (int)SZ_SG_HEADER, 614 input_size, (unsigned int) cmnd[0], 615 current->comm); 616 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); 617 return (k < 0) ? k : count; 618 } 619 620 static ssize_t 621 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 622 int blocking, int read_only, Sg_request ** o_srp) 623 { 624 int k; 625 Sg_request *srp; 626 sg_io_hdr_t *hp; 627 unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)]; 628 int timeout; 629 unsigned long ul_timeout; 630 631 if (count < SZ_SG_IO_HDR) 632 return -EINVAL; 633 if (!access_ok(VERIFY_READ, buf, count)) 634 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 635 636 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ 637 if (!(srp = sg_add_request(sfp))) { 638 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); 639 return -EDOM; 640 } 641 hp = &srp->header; 642 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { 643 sg_remove_request(sfp, srp); 644 return -EFAULT; 645 } 646 if (hp->interface_id != 'S') { 647 sg_remove_request(sfp, srp); 648 return -ENOSYS; 649 } 650 if (hp->flags & SG_FLAG_MMAP_IO) { 651 if (hp->dxfer_len > sfp->reserve.bufflen) { 652 sg_remove_request(sfp, srp); 653 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ 654 } 655 if (hp->flags & SG_FLAG_DIRECT_IO) { 656 sg_remove_request(sfp, srp); 657 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ 658 } 659 if (sg_res_in_use(sfp)) { 660 sg_remove_request(sfp, srp); 661 return -EBUSY; /* reserve buffer already being used */ 662 } 663 } 664 ul_timeout = msecs_to_jiffies(srp->header.timeout); 665 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; 666 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { 667 sg_remove_request(sfp, srp); 668 return -EMSGSIZE; 669 } 670 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { 671 sg_remove_request(sfp, srp); 672 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 673 } 674 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { 675 sg_remove_request(sfp, srp); 676 return -EFAULT; 677 } 678 if (read_only && 679 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) { 680 sg_remove_request(sfp, srp); 681 return -EPERM; 682 } 683 k = sg_common_write(sfp, srp, cmnd, timeout, blocking); 684 if (k < 0) 685 return k; 686 if (o_srp) 687 *o_srp = srp; 688 return count; 689 } 690 691 static int 692 sg_common_write(Sg_fd * sfp, Sg_request * srp, 693 unsigned char *cmnd, int timeout, int blocking) 694 { 695 int k; 696 Scsi_Request *SRpnt; 697 Sg_device *sdp = sfp->parentdp; 698 sg_io_hdr_t *hp = &srp->header; 699 request_queue_t *q; 700 701 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ 702 hp->status = 0; 703 hp->masked_status = 0; 704 hp->msg_status = 0; 705 hp->info = 0; 706 hp->host_status = 0; 707 hp->driver_status = 0; 708 hp->resid = 0; 709 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 710 (int) cmnd[0], (int) hp->cmd_len)); 711 712 if ((k = sg_start_req(srp))) { 713 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k)); 714 sg_finish_rem_req(srp); 715 return k; /* probably out of space --> ENOMEM */ 716 } 717 if ((k = sg_write_xfer(srp))) { 718 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n")); 719 sg_finish_rem_req(srp); 720 return k; 721 } 722 if (sdp->detached) { 723 sg_finish_rem_req(srp); 724 return -ENODEV; 725 } 726 SRpnt = scsi_allocate_request(sdp->device, GFP_ATOMIC); 727 if (SRpnt == NULL) { 728 SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n")); 729 sg_finish_rem_req(srp); 730 return -ENOMEM; 731 } 732 733 srp->my_cmdp = SRpnt; 734 q = SRpnt->sr_device->request_queue; 735 SRpnt->sr_request->rq_disk = sdp->disk; 736 SRpnt->sr_sense_buffer[0] = 0; 737 SRpnt->sr_cmd_len = hp->cmd_len; 738 SRpnt->sr_use_sg = srp->data.k_use_sg; 739 SRpnt->sr_sglist_len = srp->data.sglist_len; 740 SRpnt->sr_bufflen = srp->data.bufflen; 741 SRpnt->sr_underflow = 0; 742 SRpnt->sr_buffer = srp->data.buffer; 743 switch (hp->dxfer_direction) { 744 case SG_DXFER_TO_FROM_DEV: 745 case SG_DXFER_FROM_DEV: 746 SRpnt->sr_data_direction = DMA_FROM_DEVICE; 747 break; 748 case SG_DXFER_TO_DEV: 749 SRpnt->sr_data_direction = DMA_TO_DEVICE; 750 break; 751 case SG_DXFER_UNKNOWN: 752 SRpnt->sr_data_direction = DMA_BIDIRECTIONAL; 753 break; 754 default: 755 SRpnt->sr_data_direction = DMA_NONE; 756 break; 757 } 758 SRpnt->upper_private_data = srp; 759 srp->data.k_use_sg = 0; 760 srp->data.sglist_len = 0; 761 srp->data.bufflen = 0; 762 srp->data.buffer = NULL; 763 hp->duration = jiffies_to_msecs(jiffies); 764 /* Now send everything of to mid-level. The next time we hear about this 765 packet is when sg_cmd_done() is called (i.e. a callback). */ 766 scsi_do_req(SRpnt, (void *) cmnd, 767 (void *) SRpnt->sr_buffer, hp->dxfer_len, 768 sg_cmd_done, timeout, SG_DEFAULT_RETRIES); 769 /* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */ 770 return 0; 771 } 772 773 static int 774 sg_srp_done(Sg_request *srp, Sg_fd *sfp) 775 { 776 unsigned long iflags; 777 int done; 778 779 read_lock_irqsave(&sfp->rq_list_lock, iflags); 780 done = srp->done; 781 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 782 return done; 783 } 784 785 static int 786 sg_ioctl(struct inode *inode, struct file *filp, 787 unsigned int cmd_in, unsigned long arg) 788 { 789 void __user *p = (void __user *)arg; 790 int __user *ip = p; 791 int result, val, read_only; 792 Sg_device *sdp; 793 Sg_fd *sfp; 794 Sg_request *srp; 795 unsigned long iflags; 796 797 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 798 return -ENXIO; 799 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 800 sdp->disk->disk_name, (int) cmd_in)); 801 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 802 803 switch (cmd_in) { 804 case SG_IO: 805 { 806 int blocking = 1; /* ignore O_NONBLOCK flag */ 807 808 if (sdp->detached) 809 return -ENODEV; 810 if (!scsi_block_when_processing_errors(sdp->device)) 811 return -ENXIO; 812 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 813 return -EFAULT; 814 result = 815 sg_new_write(sfp, p, SZ_SG_IO_HDR, 816 blocking, read_only, &srp); 817 if (result < 0) 818 return result; 819 srp->sg_io_owned = 1; 820 while (1) { 821 result = 0; /* following macro to beat race condition */ 822 __wait_event_interruptible(sfp->read_wait, 823 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), 824 result); 825 if (sdp->detached) 826 return -ENODEV; 827 if (sfp->closed) 828 return 0; /* request packet dropped already */ 829 if (0 == result) 830 break; 831 srp->orphan = 1; 832 return result; /* -ERESTARTSYS because signal hit process */ 833 } 834 write_lock_irqsave(&sfp->rq_list_lock, iflags); 835 srp->done = 2; 836 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 837 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); 838 return (result < 0) ? result : 0; 839 } 840 case SG_SET_TIMEOUT: 841 result = get_user(val, ip); 842 if (result) 843 return result; 844 if (val < 0) 845 return -EIO; 846 if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) 847 val = MULDIV (INT_MAX, USER_HZ, HZ); 848 sfp->timeout_user = val; 849 sfp->timeout = MULDIV (val, HZ, USER_HZ); 850 851 return 0; 852 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ 853 /* strange ..., for backward compatibility */ 854 return sfp->timeout_user; 855 case SG_SET_FORCE_LOW_DMA: 856 result = get_user(val, ip); 857 if (result) 858 return result; 859 if (val) { 860 sfp->low_dma = 1; 861 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { 862 val = (int) sfp->reserve.bufflen; 863 sg_remove_scat(&sfp->reserve); 864 sg_build_reserve(sfp, val); 865 } 866 } else { 867 if (sdp->detached) 868 return -ENODEV; 869 sfp->low_dma = sdp->device->host->unchecked_isa_dma; 870 } 871 return 0; 872 case SG_GET_LOW_DMA: 873 return put_user((int) sfp->low_dma, ip); 874 case SG_GET_SCSI_ID: 875 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) 876 return -EFAULT; 877 else { 878 sg_scsi_id_t __user *sg_idp = p; 879 880 if (sdp->detached) 881 return -ENODEV; 882 __put_user((int) sdp->device->host->host_no, 883 &sg_idp->host_no); 884 __put_user((int) sdp->device->channel, 885 &sg_idp->channel); 886 __put_user((int) sdp->device->id, &sg_idp->scsi_id); 887 __put_user((int) sdp->device->lun, &sg_idp->lun); 888 __put_user((int) sdp->device->type, &sg_idp->scsi_type); 889 __put_user((short) sdp->device->host->cmd_per_lun, 890 &sg_idp->h_cmd_per_lun); 891 __put_user((short) sdp->device->queue_depth, 892 &sg_idp->d_queue_depth); 893 __put_user(0, &sg_idp->unused[0]); 894 __put_user(0, &sg_idp->unused[1]); 895 return 0; 896 } 897 case SG_SET_FORCE_PACK_ID: 898 result = get_user(val, ip); 899 if (result) 900 return result; 901 sfp->force_packid = val ? 1 : 0; 902 return 0; 903 case SG_GET_PACK_ID: 904 if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) 905 return -EFAULT; 906 read_lock_irqsave(&sfp->rq_list_lock, iflags); 907 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 908 if ((1 == srp->done) && (!srp->sg_io_owned)) { 909 read_unlock_irqrestore(&sfp->rq_list_lock, 910 iflags); 911 __put_user(srp->header.pack_id, ip); 912 return 0; 913 } 914 } 915 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 916 __put_user(-1, ip); 917 return 0; 918 case SG_GET_NUM_WAITING: 919 read_lock_irqsave(&sfp->rq_list_lock, iflags); 920 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { 921 if ((1 == srp->done) && (!srp->sg_io_owned)) 922 ++val; 923 } 924 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 925 return put_user(val, ip); 926 case SG_GET_SG_TABLESIZE: 927 return put_user(sdp->sg_tablesize, ip); 928 case SG_SET_RESERVED_SIZE: 929 result = get_user(val, ip); 930 if (result) 931 return result; 932 if (val < 0) 933 return -EINVAL; 934 if (val != sfp->reserve.bufflen) { 935 if (sg_res_in_use(sfp) || sfp->mmap_called) 936 return -EBUSY; 937 sg_remove_scat(&sfp->reserve); 938 sg_build_reserve(sfp, val); 939 } 940 return 0; 941 case SG_GET_RESERVED_SIZE: 942 val = (int) sfp->reserve.bufflen; 943 return put_user(val, ip); 944 case SG_SET_COMMAND_Q: 945 result = get_user(val, ip); 946 if (result) 947 return result; 948 sfp->cmd_q = val ? 1 : 0; 949 return 0; 950 case SG_GET_COMMAND_Q: 951 return put_user((int) sfp->cmd_q, ip); 952 case SG_SET_KEEP_ORPHAN: 953 result = get_user(val, ip); 954 if (result) 955 return result; 956 sfp->keep_orphan = val; 957 return 0; 958 case SG_GET_KEEP_ORPHAN: 959 return put_user((int) sfp->keep_orphan, ip); 960 case SG_NEXT_CMD_LEN: 961 result = get_user(val, ip); 962 if (result) 963 return result; 964 sfp->next_cmd_len = (val > 0) ? val : 0; 965 return 0; 966 case SG_GET_VERSION_NUM: 967 return put_user(sg_version_num, ip); 968 case SG_GET_ACCESS_COUNT: 969 /* faked - we don't have a real access count anymore */ 970 val = (sdp->device ? 1 : 0); 971 return put_user(val, ip); 972 case SG_GET_REQUEST_TABLE: 973 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 974 return -EFAULT; 975 else { 976 sg_req_info_t *rinfo; 977 unsigned int ms; 978 979 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, 980 GFP_KERNEL); 981 if (!rinfo) 982 return -ENOMEM; 983 read_lock_irqsave(&sfp->rq_list_lock, iflags); 984 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; 985 ++val, srp = srp ? srp->nextrp : srp) { 986 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 987 if (srp) { 988 rinfo[val].req_state = srp->done + 1; 989 rinfo[val].problem = 990 srp->header.masked_status & 991 srp->header.host_status & 992 srp->header.driver_status; 993 if (srp->done) 994 rinfo[val].duration = 995 srp->header.duration; 996 else { 997 ms = jiffies_to_msecs(jiffies); 998 rinfo[val].duration = 999 (ms > srp->header.duration) ? 1000 (ms - srp->header.duration) : 0; 1001 } 1002 rinfo[val].orphan = srp->orphan; 1003 rinfo[val].sg_io_owned = 1004 srp->sg_io_owned; 1005 rinfo[val].pack_id = 1006 srp->header.pack_id; 1007 rinfo[val].usr_ptr = 1008 srp->header.usr_ptr; 1009 } 1010 } 1011 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1012 result = __copy_to_user(p, rinfo, 1013 SZ_SG_REQ_INFO * SG_MAX_QUEUE); 1014 result = result ? -EFAULT : 0; 1015 kfree(rinfo); 1016 return result; 1017 } 1018 case SG_EMULATED_HOST: 1019 if (sdp->detached) 1020 return -ENODEV; 1021 return put_user(sdp->device->host->hostt->emulated, ip); 1022 case SG_SCSI_RESET: 1023 if (sdp->detached) 1024 return -ENODEV; 1025 if (filp->f_flags & O_NONBLOCK) { 1026 if (scsi_host_in_recovery(sdp->device->host)) 1027 return -EBUSY; 1028 } else if (!scsi_block_when_processing_errors(sdp->device)) 1029 return -EBUSY; 1030 result = get_user(val, ip); 1031 if (result) 1032 return result; 1033 if (SG_SCSI_RESET_NOTHING == val) 1034 return 0; 1035 switch (val) { 1036 case SG_SCSI_RESET_DEVICE: 1037 val = SCSI_TRY_RESET_DEVICE; 1038 break; 1039 case SG_SCSI_RESET_BUS: 1040 val = SCSI_TRY_RESET_BUS; 1041 break; 1042 case SG_SCSI_RESET_HOST: 1043 val = SCSI_TRY_RESET_HOST; 1044 break; 1045 default: 1046 return -EINVAL; 1047 } 1048 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 1049 return -EACCES; 1050 return (scsi_reset_provider(sdp->device, val) == 1051 SUCCESS) ? 0 : -EIO; 1052 case SCSI_IOCTL_SEND_COMMAND: 1053 if (sdp->detached) 1054 return -ENODEV; 1055 if (read_only) { 1056 unsigned char opcode = WRITE_6; 1057 Scsi_Ioctl_Command __user *siocp = p; 1058 1059 if (copy_from_user(&opcode, siocp->data, 1)) 1060 return -EFAULT; 1061 if (!sg_allow_access(opcode, sdp->device->type)) 1062 return -EPERM; 1063 } 1064 return scsi_ioctl_send_command(sdp->device, p); 1065 case SG_SET_DEBUG: 1066 result = get_user(val, ip); 1067 if (result) 1068 return result; 1069 sdp->sgdebug = (char) val; 1070 return 0; 1071 case SCSI_IOCTL_GET_IDLUN: 1072 case SCSI_IOCTL_GET_BUS_NUMBER: 1073 case SCSI_IOCTL_PROBE_HOST: 1074 case SG_GET_TRANSFORM: 1075 if (sdp->detached) 1076 return -ENODEV; 1077 return scsi_ioctl(sdp->device, cmd_in, p); 1078 default: 1079 if (read_only) 1080 return -EPERM; /* don't know so take safe approach */ 1081 return scsi_ioctl(sdp->device, cmd_in, p); 1082 } 1083 } 1084 1085 #ifdef CONFIG_COMPAT 1086 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 1087 { 1088 Sg_device *sdp; 1089 Sg_fd *sfp; 1090 struct scsi_device *sdev; 1091 1092 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1093 return -ENXIO; 1094 1095 sdev = sdp->device; 1096 if (sdev->host->hostt->compat_ioctl) { 1097 int ret; 1098 1099 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); 1100 1101 return ret; 1102 } 1103 1104 return -ENOIOCTLCMD; 1105 } 1106 #endif 1107 1108 static unsigned int 1109 sg_poll(struct file *filp, poll_table * wait) 1110 { 1111 unsigned int res = 0; 1112 Sg_device *sdp; 1113 Sg_fd *sfp; 1114 Sg_request *srp; 1115 int count = 0; 1116 unsigned long iflags; 1117 1118 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)) 1119 || sfp->closed) 1120 return POLLERR; 1121 poll_wait(filp, &sfp->read_wait, wait); 1122 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1123 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 1124 /* if any read waiting, flag it */ 1125 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) 1126 res = POLLIN | POLLRDNORM; 1127 ++count; 1128 } 1129 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1130 1131 if (sdp->detached) 1132 res |= POLLHUP; 1133 else if (!sfp->cmd_q) { 1134 if (0 == count) 1135 res |= POLLOUT | POLLWRNORM; 1136 } else if (count < SG_MAX_QUEUE) 1137 res |= POLLOUT | POLLWRNORM; 1138 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n", 1139 sdp->disk->disk_name, (int) res)); 1140 return res; 1141 } 1142 1143 static int 1144 sg_fasync(int fd, struct file *filp, int mode) 1145 { 1146 int retval; 1147 Sg_device *sdp; 1148 Sg_fd *sfp; 1149 1150 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1151 return -ENXIO; 1152 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n", 1153 sdp->disk->disk_name, mode)); 1154 1155 retval = fasync_helper(fd, filp, mode, &sfp->async_qp); 1156 return (retval < 0) ? retval : 0; 1157 } 1158 1159 static inline unsigned char * 1160 sg_scatg2virt(const struct scatterlist *sclp) 1161 { 1162 return (sclp && sclp->page) ? 1163 (unsigned char *) page_address(sclp->page) + sclp->offset : NULL; 1164 } 1165 1166 /* When startFinish==1 increments page counts for pages other than the 1167 first of scatter gather elements obtained from __get_free_pages(). 1168 When startFinish==0 decrements ... */ 1169 static void 1170 sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish) 1171 { 1172 void *page_ptr; 1173 struct page *page; 1174 int k, m; 1175 1176 SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n", 1177 startFinish, rsv_schp->k_use_sg)); 1178 /* N.B. correction _not_ applied to base page of each allocation */ 1179 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ 1180 struct scatterlist *sclp = rsv_schp->buffer; 1181 1182 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) { 1183 for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) { 1184 page_ptr = sg_scatg2virt(sclp) + m; 1185 page = virt_to_page(page_ptr); 1186 if (startFinish) 1187 get_page(page); 1188 else { 1189 if (page_count(page) > 0) 1190 __put_page(page); 1191 } 1192 } 1193 } 1194 } else { /* reserve buffer is just a single allocation */ 1195 for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) { 1196 page_ptr = (unsigned char *) rsv_schp->buffer + m; 1197 page = virt_to_page(page_ptr); 1198 if (startFinish) 1199 get_page(page); 1200 else { 1201 if (page_count(page) > 0) 1202 __put_page(page); 1203 } 1204 } 1205 } 1206 } 1207 1208 static struct page * 1209 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) 1210 { 1211 Sg_fd *sfp; 1212 struct page *page = NOPAGE_SIGBUS; 1213 void *page_ptr = NULL; 1214 unsigned long offset; 1215 Sg_scatter_hold *rsv_schp; 1216 1217 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1218 return page; 1219 rsv_schp = &sfp->reserve; 1220 offset = addr - vma->vm_start; 1221 if (offset >= rsv_schp->bufflen) 1222 return page; 1223 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n", 1224 offset, rsv_schp->k_use_sg)); 1225 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ 1226 int k; 1227 unsigned long sa = vma->vm_start; 1228 unsigned long len; 1229 struct scatterlist *sclp = rsv_schp->buffer; 1230 1231 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1232 ++k, ++sclp) { 1233 len = vma->vm_end - sa; 1234 len = (len < sclp->length) ? len : sclp->length; 1235 if (offset < len) { 1236 page_ptr = sg_scatg2virt(sclp) + offset; 1237 page = virt_to_page(page_ptr); 1238 get_page(page); /* increment page count */ 1239 break; 1240 } 1241 sa += len; 1242 offset -= len; 1243 } 1244 } else { /* reserve buffer is just a single allocation */ 1245 page_ptr = (unsigned char *) rsv_schp->buffer + offset; 1246 page = virt_to_page(page_ptr); 1247 get_page(page); /* increment page count */ 1248 } 1249 if (type) 1250 *type = VM_FAULT_MINOR; 1251 return page; 1252 } 1253 1254 static struct vm_operations_struct sg_mmap_vm_ops = { 1255 .nopage = sg_vma_nopage, 1256 }; 1257 1258 static int 1259 sg_mmap(struct file *filp, struct vm_area_struct *vma) 1260 { 1261 Sg_fd *sfp; 1262 unsigned long req_sz; 1263 Sg_scatter_hold *rsv_schp; 1264 1265 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1266 return -ENXIO; 1267 req_sz = vma->vm_end - vma->vm_start; 1268 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", 1269 (void *) vma->vm_start, (int) req_sz)); 1270 if (vma->vm_pgoff) 1271 return -EINVAL; /* want no offset */ 1272 rsv_schp = &sfp->reserve; 1273 if (req_sz > rsv_schp->bufflen) 1274 return -ENOMEM; /* cannot map more than reserved buffer */ 1275 1276 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ 1277 int k; 1278 unsigned long sa = vma->vm_start; 1279 unsigned long len; 1280 struct scatterlist *sclp = rsv_schp->buffer; 1281 1282 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1283 ++k, ++sclp) { 1284 if (0 != sclp->offset) 1285 return -EFAULT; /* non page aligned memory ?? */ 1286 len = vma->vm_end - sa; 1287 len = (len < sclp->length) ? len : sclp->length; 1288 sa += len; 1289 } 1290 } else { /* reserve buffer is just a single allocation */ 1291 if ((unsigned long) rsv_schp->buffer & (PAGE_SIZE - 1)) 1292 return -EFAULT; /* non page aligned memory ?? */ 1293 } 1294 if (0 == sfp->mmap_called) { 1295 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ 1296 sfp->mmap_called = 1; 1297 } 1298 vma->vm_flags |= VM_RESERVED; 1299 vma->vm_private_data = sfp; 1300 vma->vm_ops = &sg_mmap_vm_ops; 1301 return 0; 1302 } 1303 1304 /* This function is a "bottom half" handler that is called by the 1305 * mid level when a command is completed (or has failed). */ 1306 static void 1307 sg_cmd_done(Scsi_Cmnd * SCpnt) 1308 { 1309 Scsi_Request *SRpnt = NULL; 1310 Sg_device *sdp = NULL; 1311 Sg_fd *sfp; 1312 Sg_request *srp = NULL; 1313 unsigned long iflags; 1314 unsigned int ms; 1315 1316 if (SCpnt && (SRpnt = SCpnt->sc_request)) 1317 srp = (Sg_request *) SRpnt->upper_private_data; 1318 if (NULL == srp) { 1319 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1320 if (SRpnt) 1321 scsi_release_request(SRpnt); 1322 return; 1323 } 1324 sfp = srp->parentfp; 1325 if (sfp) 1326 sdp = sfp->parentdp; 1327 if ((NULL == sdp) || sdp->detached) { 1328 printk(KERN_INFO "sg_cmd_done: device detached\n"); 1329 scsi_release_request(SRpnt); 1330 return; 1331 } 1332 1333 /* First transfer ownership of data buffers to sg_device object. */ 1334 srp->data.k_use_sg = SRpnt->sr_use_sg; 1335 srp->data.sglist_len = SRpnt->sr_sglist_len; 1336 srp->data.bufflen = SRpnt->sr_bufflen; 1337 srp->data.buffer = SRpnt->sr_buffer; 1338 /* now clear out request structure */ 1339 SRpnt->sr_use_sg = 0; 1340 SRpnt->sr_sglist_len = 0; 1341 SRpnt->sr_bufflen = 0; 1342 SRpnt->sr_buffer = NULL; 1343 SRpnt->sr_underflow = 0; 1344 SRpnt->sr_request->rq_disk = NULL; /* "sg" _disowns_ request blk */ 1345 1346 srp->my_cmdp = NULL; 1347 1348 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1349 sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result)); 1350 srp->header.resid = SCpnt->resid; 1351 ms = jiffies_to_msecs(jiffies); 1352 srp->header.duration = (ms > srp->header.duration) ? 1353 (ms - srp->header.duration) : 0; 1354 if (0 != SRpnt->sr_result) { 1355 struct scsi_sense_hdr sshdr; 1356 1357 memcpy(srp->sense_b, SRpnt->sr_sense_buffer, 1358 sizeof (srp->sense_b)); 1359 srp->header.status = 0xff & SRpnt->sr_result; 1360 srp->header.masked_status = status_byte(SRpnt->sr_result); 1361 srp->header.msg_status = msg_byte(SRpnt->sr_result); 1362 srp->header.host_status = host_byte(SRpnt->sr_result); 1363 srp->header.driver_status = driver_byte(SRpnt->sr_result); 1364 if ((sdp->sgdebug > 0) && 1365 ((CHECK_CONDITION == srp->header.masked_status) || 1366 (COMMAND_TERMINATED == srp->header.masked_status))) 1367 scsi_print_req_sense("sg_cmd_done", SRpnt); 1368 1369 /* Following if statement is a patch supplied by Eric Youngdale */ 1370 if (driver_byte(SRpnt->sr_result) != 0 1371 && scsi_command_normalize_sense(SCpnt, &sshdr) 1372 && !scsi_sense_is_deferred(&sshdr) 1373 && sshdr.sense_key == UNIT_ATTENTION 1374 && sdp->device->removable) { 1375 /* Detected possible disc change. Set the bit - this */ 1376 /* may be used if there are filesystems using this device */ 1377 sdp->device->changed = 1; 1378 } 1379 } 1380 /* Rely on write phase to clean out srp status values, so no "else" */ 1381 1382 scsi_release_request(SRpnt); 1383 SRpnt = NULL; 1384 if (sfp->closed) { /* whoops this fd already released, cleanup */ 1385 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); 1386 sg_finish_rem_req(srp); 1387 srp = NULL; 1388 if (NULL == sfp->headrp) { 1389 SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n")); 1390 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */ 1391 scsi_device_put(sdp->device); 1392 } 1393 sfp = NULL; 1394 } 1395 } else if (srp && srp->orphan) { 1396 if (sfp->keep_orphan) 1397 srp->sg_io_owned = 0; 1398 else { 1399 sg_finish_rem_req(srp); 1400 srp = NULL; 1401 } 1402 } 1403 if (sfp && srp) { 1404 /* Now wake up any sg_read() that is waiting for this packet. */ 1405 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); 1406 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1407 srp->done = 1; 1408 wake_up_interruptible(&sfp->read_wait); 1409 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1410 } 1411 } 1412 1413 static struct file_operations sg_fops = { 1414 .owner = THIS_MODULE, 1415 .read = sg_read, 1416 .write = sg_write, 1417 .poll = sg_poll, 1418 .ioctl = sg_ioctl, 1419 #ifdef CONFIG_COMPAT 1420 .compat_ioctl = sg_compat_ioctl, 1421 #endif 1422 .open = sg_open, 1423 .mmap = sg_mmap, 1424 .release = sg_release, 1425 .fasync = sg_fasync, 1426 }; 1427 1428 static struct class *sg_sysfs_class; 1429 1430 static int sg_sysfs_valid = 0; 1431 1432 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) 1433 { 1434 Sg_device *sdp; 1435 unsigned long iflags; 1436 void *old_sg_dev_arr = NULL; 1437 int k, error; 1438 1439 sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL); 1440 if (!sdp) { 1441 printk(KERN_WARNING "kmalloc Sg_device failure\n"); 1442 return -ENOMEM; 1443 } 1444 1445 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1446 if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */ 1447 Sg_device **tmp_da; 1448 int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP; 1449 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1450 1451 tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL); 1452 if (unlikely(!tmp_da)) 1453 goto expand_failed; 1454 1455 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1456 memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *)); 1457 memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *)); 1458 old_sg_dev_arr = sg_dev_arr; 1459 sg_dev_arr = tmp_da; 1460 sg_dev_max = tmp_dev_max; 1461 } 1462 1463 for (k = 0; k < sg_dev_max; k++) 1464 if (!sg_dev_arr[k]) 1465 break; 1466 if (unlikely(k >= SG_MAX_DEVS)) 1467 goto overflow; 1468 1469 memset(sdp, 0, sizeof(*sdp)); 1470 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); 1471 sprintf(disk->disk_name, "sg%d", k); 1472 disk->first_minor = k; 1473 sdp->disk = disk; 1474 sdp->device = scsidp; 1475 init_waitqueue_head(&sdp->o_excl_wait); 1476 sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0; 1477 1478 sg_nr_dev++; 1479 sg_dev_arr[k] = sdp; 1480 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1481 error = k; 1482 1483 out: 1484 if (error < 0) 1485 kfree(sdp); 1486 kfree(old_sg_dev_arr); 1487 return error; 1488 1489 expand_failed: 1490 printk(KERN_WARNING "sg_alloc: device array cannot be resized\n"); 1491 error = -ENOMEM; 1492 goto out; 1493 1494 overflow: 1495 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1496 sdev_printk(KERN_WARNING, scsidp, 1497 "Unable to attach sg device type=%d, minor " 1498 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); 1499 error = -ENODEV; 1500 goto out; 1501 } 1502 1503 static int 1504 sg_add(struct class_device *cl_dev, struct class_interface *cl_intf) 1505 { 1506 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1507 struct gendisk *disk; 1508 Sg_device *sdp = NULL; 1509 struct cdev * cdev = NULL; 1510 int error, k; 1511 1512 disk = alloc_disk(1); 1513 if (!disk) { 1514 printk(KERN_WARNING "alloc_disk failed\n"); 1515 return -ENOMEM; 1516 } 1517 disk->major = SCSI_GENERIC_MAJOR; 1518 1519 error = -ENOMEM; 1520 cdev = cdev_alloc(); 1521 if (!cdev) { 1522 printk(KERN_WARNING "cdev_alloc failed\n"); 1523 goto out; 1524 } 1525 cdev->owner = THIS_MODULE; 1526 cdev->ops = &sg_fops; 1527 1528 error = sg_alloc(disk, scsidp); 1529 if (error < 0) { 1530 printk(KERN_WARNING "sg_alloc failed\n"); 1531 goto out; 1532 } 1533 k = error; 1534 sdp = sg_dev_arr[k]; 1535 1536 devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k), 1537 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 1538 "%s/generic", scsidp->devfs_name); 1539 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1); 1540 if (error) { 1541 devfs_remove("%s/generic", scsidp->devfs_name); 1542 goto out; 1543 } 1544 sdp->cdev = cdev; 1545 if (sg_sysfs_valid) { 1546 struct class_device * sg_class_member; 1547 1548 sg_class_member = class_device_create(sg_sysfs_class, NULL, 1549 MKDEV(SCSI_GENERIC_MAJOR, k), 1550 cl_dev->dev, "%s", 1551 disk->disk_name); 1552 if (IS_ERR(sg_class_member)) 1553 printk(KERN_WARNING "sg_add: " 1554 "class_device_create failed\n"); 1555 class_set_devdata(sg_class_member, sdp); 1556 error = sysfs_create_link(&scsidp->sdev_gendev.kobj, 1557 &sg_class_member->kobj, "generic"); 1558 if (error) 1559 printk(KERN_ERR "sg_add: unable to make symlink " 1560 "'generic' back to sg%d\n", k); 1561 } else 1562 printk(KERN_WARNING "sg_add: sg_sys INvalid\n"); 1563 1564 sdev_printk(KERN_NOTICE, scsidp, 1565 "Attached scsi generic sg%d type %d\n", k,scsidp->type); 1566 1567 return 0; 1568 1569 out: 1570 put_disk(disk); 1571 if (cdev) 1572 cdev_del(cdev); 1573 return error; 1574 } 1575 1576 static void 1577 sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf) 1578 { 1579 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1580 Sg_device *sdp = NULL; 1581 unsigned long iflags; 1582 Sg_fd *sfp; 1583 Sg_fd *tsfp; 1584 Sg_request *srp; 1585 Sg_request *tsrp; 1586 int k, delay; 1587 1588 if (NULL == sg_dev_arr) 1589 return; 1590 delay = 0; 1591 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1592 for (k = 0; k < sg_dev_max; k++) { 1593 sdp = sg_dev_arr[k]; 1594 if ((NULL == sdp) || (sdp->device != scsidp)) 1595 continue; /* dirty but lowers nesting */ 1596 if (sdp->headfp) { 1597 sdp->detached = 1; 1598 for (sfp = sdp->headfp; sfp; sfp = tsfp) { 1599 tsfp = sfp->nextfp; 1600 for (srp = sfp->headrp; srp; srp = tsrp) { 1601 tsrp = srp->nextrp; 1602 if (sfp->closed || (0 == sg_srp_done(srp, sfp))) 1603 sg_finish_rem_req(srp); 1604 } 1605 if (sfp->closed) { 1606 scsi_device_put(sdp->device); 1607 __sg_remove_sfp(sdp, sfp); 1608 } else { 1609 delay = 1; 1610 wake_up_interruptible(&sfp->read_wait); 1611 kill_fasync(&sfp->async_qp, SIGPOLL, 1612 POLL_HUP); 1613 } 1614 } 1615 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k)); 1616 if (NULL == sdp->headfp) { 1617 sg_dev_arr[k] = NULL; 1618 } 1619 } else { /* nothing active, simple case */ 1620 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k)); 1621 sg_dev_arr[k] = NULL; 1622 } 1623 sg_nr_dev--; 1624 break; 1625 } 1626 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1627 1628 if (sdp) { 1629 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1630 class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k)); 1631 cdev_del(sdp->cdev); 1632 sdp->cdev = NULL; 1633 devfs_remove("%s/generic", scsidp->devfs_name); 1634 put_disk(sdp->disk); 1635 sdp->disk = NULL; 1636 if (NULL == sdp->headfp) 1637 kfree((char *) sdp); 1638 } 1639 1640 if (delay) 1641 msleep(10); /* dirty detach so delay device destruction */ 1642 } 1643 1644 /* Set 'perm' (4th argument) to 0 to disable module_param's definition 1645 * of sysfs parameters (which module_param doesn't yet support). 1646 * Sysfs parameters defined explicitly below. 1647 */ 1648 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO); 1649 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); 1650 1651 MODULE_AUTHOR("Douglas Gilbert"); 1652 MODULE_DESCRIPTION("SCSI generic (sg) driver"); 1653 MODULE_LICENSE("GPL"); 1654 MODULE_VERSION(SG_VERSION_STR); 1655 1656 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); 1657 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); 1658 1659 static int __init 1660 init_sg(void) 1661 { 1662 int rc; 1663 1664 if (def_reserved_size >= 0) 1665 sg_big_buff = def_reserved_size; 1666 1667 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1668 SG_MAX_DEVS, "sg"); 1669 if (rc) 1670 return rc; 1671 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); 1672 if ( IS_ERR(sg_sysfs_class) ) { 1673 rc = PTR_ERR(sg_sysfs_class); 1674 goto err_out; 1675 } 1676 sg_sysfs_valid = 1; 1677 rc = scsi_register_interface(&sg_interface); 1678 if (0 == rc) { 1679 #ifdef CONFIG_SCSI_PROC_FS 1680 sg_proc_init(); 1681 #endif /* CONFIG_SCSI_PROC_FS */ 1682 return 0; 1683 } 1684 class_destroy(sg_sysfs_class); 1685 err_out: 1686 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); 1687 return rc; 1688 } 1689 1690 static void __exit 1691 exit_sg(void) 1692 { 1693 #ifdef CONFIG_SCSI_PROC_FS 1694 sg_proc_cleanup(); 1695 #endif /* CONFIG_SCSI_PROC_FS */ 1696 scsi_unregister_interface(&sg_interface); 1697 class_destroy(sg_sysfs_class); 1698 sg_sysfs_valid = 0; 1699 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1700 SG_MAX_DEVS); 1701 kfree((char *)sg_dev_arr); 1702 sg_dev_arr = NULL; 1703 sg_dev_max = 0; 1704 } 1705 1706 static int 1707 sg_start_req(Sg_request * srp) 1708 { 1709 int res; 1710 Sg_fd *sfp = srp->parentfp; 1711 sg_io_hdr_t *hp = &srp->header; 1712 int dxfer_len = (int) hp->dxfer_len; 1713 int dxfer_dir = hp->dxfer_direction; 1714 Sg_scatter_hold *req_schp = &srp->data; 1715 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1716 1717 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); 1718 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1719 return 0; 1720 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1721 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1722 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1723 res = sg_build_direct(srp, sfp, dxfer_len); 1724 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1725 return res; 1726 } 1727 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1728 sg_link_reserve(sfp, srp, dxfer_len); 1729 else { 1730 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1731 if (res) { 1732 sg_remove_scat(req_schp); 1733 return res; 1734 } 1735 } 1736 return 0; 1737 } 1738 1739 static void 1740 sg_finish_rem_req(Sg_request * srp) 1741 { 1742 Sg_fd *sfp = srp->parentfp; 1743 Sg_scatter_hold *req_schp = &srp->data; 1744 1745 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); 1746 if (srp->res_used) 1747 sg_unlink_reserve(sfp, srp); 1748 else 1749 sg_remove_scat(req_schp); 1750 sg_remove_request(sfp, srp); 1751 } 1752 1753 static int 1754 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1755 { 1756 int ret_sz; 1757 int elem_sz = sizeof (struct scatterlist); 1758 int sg_bufflen = tablesize * elem_sz; 1759 int mx_sc_elems = tablesize; 1760 1761 schp->buffer = sg_page_malloc(sg_bufflen, sfp->low_dma, &ret_sz); 1762 if (!schp->buffer) 1763 return -ENOMEM; 1764 else if (ret_sz != sg_bufflen) { 1765 sg_bufflen = ret_sz; 1766 mx_sc_elems = sg_bufflen / elem_sz; 1767 } 1768 schp->sglist_len = sg_bufflen; 1769 memset(schp->buffer, 0, sg_bufflen); 1770 return mx_sc_elems; /* number of scat_gath elements allocated */ 1771 } 1772 1773 #ifdef SG_ALLOW_DIO_CODE 1774 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ 1775 /* hopefully this generic code will moved to a library */ 1776 1777 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if 1778 - mapping of all pages not successful 1779 - any page is above max_pfn 1780 (i.e., either completely successful or fails) 1781 */ 1782 static int 1783 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 1784 unsigned long uaddr, size_t count, int rw, 1785 unsigned long max_pfn) 1786 { 1787 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 1788 unsigned long start = uaddr >> PAGE_SHIFT; 1789 const int nr_pages = end - start; 1790 int res, i, j; 1791 struct page **pages; 1792 1793 /* User attempted Overflow! */ 1794 if ((uaddr + count) < uaddr) 1795 return -EINVAL; 1796 1797 /* Too big */ 1798 if (nr_pages > max_pages) 1799 return -ENOMEM; 1800 1801 /* Hmm? */ 1802 if (count == 0) 1803 return 0; 1804 1805 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL) 1806 return -ENOMEM; 1807 1808 /* Try to fault in all of the necessary pages */ 1809 down_read(¤t->mm->mmap_sem); 1810 /* rw==READ means read from drive, write into memory area */ 1811 res = get_user_pages( 1812 current, 1813 current->mm, 1814 uaddr, 1815 nr_pages, 1816 rw == READ, 1817 0, /* don't force */ 1818 pages, 1819 NULL); 1820 up_read(¤t->mm->mmap_sem); 1821 1822 /* Errors and no page mapped should return here */ 1823 if (res < nr_pages) 1824 goto out_unmap; 1825 1826 for (i=0; i < nr_pages; i++) { 1827 /* FIXME: flush superflous for rw==READ, 1828 * probably wrong function for rw==WRITE 1829 */ 1830 flush_dcache_page(pages[i]); 1831 if (page_to_pfn(pages[i]) > max_pfn) 1832 goto out_unlock; 1833 /* ?? Is locking needed? I don't think so */ 1834 /* if (TestSetPageLocked(pages[i])) 1835 goto out_unlock; */ 1836 } 1837 1838 /* Populate the scatter/gather list */ 1839 sgl[0].page = pages[0]; 1840 sgl[0].offset = uaddr & ~PAGE_MASK; 1841 if (nr_pages > 1) { 1842 sgl[0].length = PAGE_SIZE - sgl[0].offset; 1843 count -= sgl[0].length; 1844 for (i=1; i < nr_pages ; i++) { 1845 sgl[i].offset = 0; 1846 sgl[i].page = pages[i]; 1847 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; 1848 count -= PAGE_SIZE; 1849 } 1850 } 1851 else { 1852 sgl[0].length = count; 1853 } 1854 1855 kfree(pages); 1856 return nr_pages; 1857 1858 out_unlock: 1859 /* for (j=0; j < i; j++) 1860 unlock_page(pages[j]); */ 1861 res = 0; 1862 out_unmap: 1863 if (res > 0) { 1864 for (j=0; j < res; j++) 1865 page_cache_release(pages[j]); 1866 res = 0; 1867 } 1868 kfree(pages); 1869 return res; 1870 } 1871 1872 1873 /* And unmap them... */ 1874 static int 1875 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 1876 int dirtied) 1877 { 1878 int i; 1879 1880 for (i=0; i < nr_pages; i++) { 1881 struct page *page = sgl[i].page; 1882 1883 if (dirtied) 1884 SetPageDirty(page); 1885 /* unlock_page(page); */ 1886 /* FIXME: cache flush missing for rw==READ 1887 * FIXME: call the correct reference counting function 1888 */ 1889 page_cache_release(page); 1890 } 1891 1892 return 0; 1893 } 1894 1895 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */ 1896 #endif 1897 1898 1899 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ 1900 static int 1901 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) 1902 { 1903 #ifdef SG_ALLOW_DIO_CODE 1904 sg_io_hdr_t *hp = &srp->header; 1905 Sg_scatter_hold *schp = &srp->data; 1906 int sg_tablesize = sfp->parentdp->sg_tablesize; 1907 struct scatterlist *sgl; 1908 int mx_sc_elems, res; 1909 struct scsi_device *sdev = sfp->parentdp->device; 1910 1911 if (((unsigned long)hp->dxferp & 1912 queue_dma_alignment(sdev->request_queue)) != 0) 1913 return 1; 1914 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1915 if (mx_sc_elems <= 0) { 1916 return 1; 1917 } 1918 sgl = (struct scatterlist *)schp->buffer; 1919 res = st_map_user_pages(sgl, mx_sc_elems, (unsigned long)hp->dxferp, dxfer_len, 1920 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0, ULONG_MAX); 1921 if (res <= 0) 1922 return 1; 1923 schp->k_use_sg = res; 1924 schp->dio_in_use = 1; 1925 hp->info |= SG_INFO_DIRECT_IO; 1926 return 0; 1927 #else 1928 return 1; 1929 #endif 1930 } 1931 1932 static int 1933 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1934 { 1935 int ret_sz; 1936 int blk_size = buff_size; 1937 unsigned char *p = NULL; 1938 1939 if ((blk_size < 0) || (!sfp)) 1940 return -EFAULT; 1941 if (0 == blk_size) 1942 ++blk_size; /* don't know why */ 1943 /* round request up to next highest SG_SECTOR_SZ byte boundary */ 1944 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); 1945 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", 1946 buff_size, blk_size)); 1947 if (blk_size <= SG_SCATTER_SZ) { 1948 p = sg_page_malloc(blk_size, sfp->low_dma, &ret_sz); 1949 if (!p) 1950 return -ENOMEM; 1951 if (blk_size == ret_sz) { /* got it on the first attempt */ 1952 schp->k_use_sg = 0; 1953 schp->buffer = p; 1954 schp->bufflen = blk_size; 1955 schp->b_malloc_len = blk_size; 1956 return 0; 1957 } 1958 } else { 1959 p = sg_page_malloc(SG_SCATTER_SZ, sfp->low_dma, &ret_sz); 1960 if (!p) 1961 return -ENOMEM; 1962 } 1963 /* Want some local declarations, so start new block ... */ 1964 { /* lets try and build a scatter gather list */ 1965 struct scatterlist *sclp; 1966 int k, rem_sz, num; 1967 int mx_sc_elems; 1968 int sg_tablesize = sfp->parentdp->sg_tablesize; 1969 int first = 1; 1970 1971 /* N.B. ret_sz carried into this block ... */ 1972 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1973 if (mx_sc_elems < 0) 1974 return mx_sc_elems; /* most likely -ENOMEM */ 1975 1976 for (k = 0, sclp = schp->buffer, rem_sz = blk_size; 1977 (rem_sz > 0) && (k < mx_sc_elems); 1978 ++k, rem_sz -= ret_sz, ++sclp) { 1979 if (first) 1980 first = 0; 1981 else { 1982 num = 1983 (rem_sz > 1984 SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz; 1985 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1986 if (!p) 1987 break; 1988 } 1989 sg_set_buf(sclp, p, ret_sz); 1990 1991 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n", 1992 k, sg_scatg2virt(sclp), ret_sz)); 1993 } /* end of for loop */ 1994 schp->k_use_sg = k; 1995 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); 1996 schp->bufflen = blk_size; 1997 if (rem_sz > 0) /* must have failed */ 1998 return -ENOMEM; 1999 } 2000 return 0; 2001 } 2002 2003 static int 2004 sg_write_xfer(Sg_request * srp) 2005 { 2006 sg_io_hdr_t *hp = &srp->header; 2007 Sg_scatter_hold *schp = &srp->data; 2008 int num_xfer = 0; 2009 int j, k, onum, usglen, ksglen, res; 2010 int iovec_count = (int) hp->iovec_count; 2011 int dxfer_dir = hp->dxfer_direction; 2012 unsigned char *p; 2013 unsigned char __user *up; 2014 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 2015 2016 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || 2017 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 2018 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags); 2019 if (schp->bufflen < num_xfer) 2020 num_xfer = schp->bufflen; 2021 } 2022 if ((num_xfer <= 0) || (schp->dio_in_use) || 2023 (new_interface 2024 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 2025 return 0; 2026 2027 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 2028 num_xfer, iovec_count, schp->k_use_sg)); 2029 if (iovec_count) { 2030 onum = iovec_count; 2031 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 2032 return -EFAULT; 2033 } else 2034 onum = 1; 2035 2036 if (0 == schp->k_use_sg) { /* kernel has single buffer */ 2037 for (j = 0, p = schp->buffer; j < onum; ++j) { 2038 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); 2039 if (res) 2040 return res; 2041 usglen = (num_xfer > usglen) ? usglen : num_xfer; 2042 if (__copy_from_user(p, up, usglen)) 2043 return -EFAULT; 2044 p += usglen; 2045 num_xfer -= usglen; 2046 if (num_xfer <= 0) 2047 return 0; 2048 } 2049 } else { /* kernel using scatter gather list */ 2050 struct scatterlist *sclp = (struct scatterlist *) schp->buffer; 2051 2052 ksglen = (int) sclp->length; 2053 p = sg_scatg2virt(sclp); 2054 for (j = 0, k = 0; j < onum; ++j) { 2055 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); 2056 if (res) 2057 return res; 2058 2059 for (; p; ++sclp, ksglen = (int) sclp->length, 2060 p = sg_scatg2virt(sclp)) { 2061 if (usglen <= 0) 2062 break; 2063 if (ksglen > usglen) { 2064 if (usglen >= num_xfer) { 2065 if (__copy_from_user 2066 (p, up, num_xfer)) 2067 return -EFAULT; 2068 return 0; 2069 } 2070 if (__copy_from_user(p, up, usglen)) 2071 return -EFAULT; 2072 p += usglen; 2073 ksglen -= usglen; 2074 break; 2075 } else { 2076 if (ksglen >= num_xfer) { 2077 if (__copy_from_user 2078 (p, up, num_xfer)) 2079 return -EFAULT; 2080 return 0; 2081 } 2082 if (__copy_from_user(p, up, ksglen)) 2083 return -EFAULT; 2084 up += ksglen; 2085 usglen -= ksglen; 2086 } 2087 ++k; 2088 if (k >= schp->k_use_sg) 2089 return 0; 2090 } 2091 } 2092 } 2093 return 0; 2094 } 2095 2096 static int 2097 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 2098 int wr_xf, int *countp, unsigned char __user **up) 2099 { 2100 int num_xfer = (int) hp->dxfer_len; 2101 unsigned char __user *p = hp->dxferp; 2102 int count; 2103 2104 if (0 == sg_num) { 2105 if (wr_xf && ('\0' == hp->interface_id)) 2106 count = (int) hp->flags; /* holds "old" input_size */ 2107 else 2108 count = num_xfer; 2109 } else { 2110 sg_iovec_t iovec; 2111 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC)) 2112 return -EFAULT; 2113 p = iovec.iov_base; 2114 count = (int) iovec.iov_len; 2115 } 2116 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)) 2117 return -EFAULT; 2118 if (up) 2119 *up = p; 2120 if (countp) 2121 *countp = count; 2122 return 0; 2123 } 2124 2125 static void 2126 sg_remove_scat(Sg_scatter_hold * schp) 2127 { 2128 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 2129 if (schp->buffer && (schp->sglist_len > 0)) { 2130 struct scatterlist *sclp = (struct scatterlist *) schp->buffer; 2131 2132 if (schp->dio_in_use) { 2133 #ifdef SG_ALLOW_DIO_CODE 2134 st_unmap_user_pages(sclp, schp->k_use_sg, TRUE); 2135 #endif 2136 } else { 2137 int k; 2138 2139 for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp); 2140 ++k, ++sclp) { 2141 SCSI_LOG_TIMEOUT(5, printk( 2142 "sg_remove_scat: k=%d, a=0x%p, len=%d\n", 2143 k, sg_scatg2virt(sclp), sclp->length)); 2144 sg_page_free(sg_scatg2virt(sclp), sclp->length); 2145 sclp->page = NULL; 2146 sclp->offset = 0; 2147 sclp->length = 0; 2148 } 2149 } 2150 sg_page_free(schp->buffer, schp->sglist_len); 2151 } else if (schp->buffer) 2152 sg_page_free(schp->buffer, schp->b_malloc_len); 2153 memset(schp, 0, sizeof (*schp)); 2154 } 2155 2156 static int 2157 sg_read_xfer(Sg_request * srp) 2158 { 2159 sg_io_hdr_t *hp = &srp->header; 2160 Sg_scatter_hold *schp = &srp->data; 2161 int num_xfer = 0; 2162 int j, k, onum, usglen, ksglen, res; 2163 int iovec_count = (int) hp->iovec_count; 2164 int dxfer_dir = hp->dxfer_direction; 2165 unsigned char *p; 2166 unsigned char __user *up; 2167 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 2168 2169 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) 2170 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 2171 num_xfer = hp->dxfer_len; 2172 if (schp->bufflen < num_xfer) 2173 num_xfer = schp->bufflen; 2174 } 2175 if ((num_xfer <= 0) || (schp->dio_in_use) || 2176 (new_interface 2177 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 2178 return 0; 2179 2180 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 2181 num_xfer, iovec_count, schp->k_use_sg)); 2182 if (iovec_count) { 2183 onum = iovec_count; 2184 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 2185 return -EFAULT; 2186 } else 2187 onum = 1; 2188 2189 if (0 == schp->k_use_sg) { /* kernel has single buffer */ 2190 for (j = 0, p = schp->buffer; j < onum; ++j) { 2191 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); 2192 if (res) 2193 return res; 2194 usglen = (num_xfer > usglen) ? usglen : num_xfer; 2195 if (__copy_to_user(up, p, usglen)) 2196 return -EFAULT; 2197 p += usglen; 2198 num_xfer -= usglen; 2199 if (num_xfer <= 0) 2200 return 0; 2201 } 2202 } else { /* kernel using scatter gather list */ 2203 struct scatterlist *sclp = (struct scatterlist *) schp->buffer; 2204 2205 ksglen = (int) sclp->length; 2206 p = sg_scatg2virt(sclp); 2207 for (j = 0, k = 0; j < onum; ++j) { 2208 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); 2209 if (res) 2210 return res; 2211 2212 for (; p; ++sclp, ksglen = (int) sclp->length, 2213 p = sg_scatg2virt(sclp)) { 2214 if (usglen <= 0) 2215 break; 2216 if (ksglen > usglen) { 2217 if (usglen >= num_xfer) { 2218 if (__copy_to_user 2219 (up, p, num_xfer)) 2220 return -EFAULT; 2221 return 0; 2222 } 2223 if (__copy_to_user(up, p, usglen)) 2224 return -EFAULT; 2225 p += usglen; 2226 ksglen -= usglen; 2227 break; 2228 } else { 2229 if (ksglen >= num_xfer) { 2230 if (__copy_to_user 2231 (up, p, num_xfer)) 2232 return -EFAULT; 2233 return 0; 2234 } 2235 if (__copy_to_user(up, p, ksglen)) 2236 return -EFAULT; 2237 up += ksglen; 2238 usglen -= ksglen; 2239 } 2240 ++k; 2241 if (k >= schp->k_use_sg) 2242 return 0; 2243 } 2244 } 2245 } 2246 return 0; 2247 } 2248 2249 static int 2250 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 2251 { 2252 Sg_scatter_hold *schp = &srp->data; 2253 2254 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 2255 num_read_xfer)); 2256 if ((!outp) || (num_read_xfer <= 0)) 2257 return 0; 2258 if (schp->k_use_sg > 0) { 2259 int k, num; 2260 struct scatterlist *sclp = (struct scatterlist *) schp->buffer; 2261 2262 for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp); 2263 ++k, ++sclp) { 2264 num = (int) sclp->length; 2265 if (num > num_read_xfer) { 2266 if (__copy_to_user 2267 (outp, sg_scatg2virt(sclp), num_read_xfer)) 2268 return -EFAULT; 2269 break; 2270 } else { 2271 if (__copy_to_user 2272 (outp, sg_scatg2virt(sclp), num)) 2273 return -EFAULT; 2274 num_read_xfer -= num; 2275 if (num_read_xfer <= 0) 2276 break; 2277 outp += num; 2278 } 2279 } 2280 } else { 2281 if (__copy_to_user(outp, schp->buffer, num_read_xfer)) 2282 return -EFAULT; 2283 } 2284 return 0; 2285 } 2286 2287 static void 2288 sg_build_reserve(Sg_fd * sfp, int req_size) 2289 { 2290 Sg_scatter_hold *schp = &sfp->reserve; 2291 2292 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); 2293 do { 2294 if (req_size < PAGE_SIZE) 2295 req_size = PAGE_SIZE; 2296 if (0 == sg_build_indirect(schp, sfp, req_size)) 2297 return; 2298 else 2299 sg_remove_scat(schp); 2300 req_size >>= 1; /* divide by 2 */ 2301 } while (req_size > (PAGE_SIZE / 2)); 2302 } 2303 2304 static void 2305 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) 2306 { 2307 Sg_scatter_hold *req_schp = &srp->data; 2308 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2309 2310 srp->res_used = 1; 2311 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 2312 size = (size + 1) & (~1); /* round to even for aha1542 */ 2313 if (rsv_schp->k_use_sg > 0) { 2314 int k, num; 2315 int rem = size; 2316 struct scatterlist *sclp = 2317 (struct scatterlist *) rsv_schp->buffer; 2318 2319 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) { 2320 num = (int) sclp->length; 2321 if (rem <= num) { 2322 if (0 == k) { 2323 req_schp->k_use_sg = 0; 2324 req_schp->buffer = sg_scatg2virt(sclp); 2325 } else { 2326 sfp->save_scat_len = num; 2327 sclp->length = (unsigned) rem; 2328 req_schp->k_use_sg = k + 1; 2329 req_schp->sglist_len = 2330 rsv_schp->sglist_len; 2331 req_schp->buffer = rsv_schp->buffer; 2332 } 2333 req_schp->bufflen = size; 2334 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 2335 break; 2336 } else 2337 rem -= num; 2338 } 2339 if (k >= rsv_schp->k_use_sg) 2340 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); 2341 } else { 2342 req_schp->k_use_sg = 0; 2343 req_schp->bufflen = size; 2344 req_schp->buffer = rsv_schp->buffer; 2345 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 2346 } 2347 } 2348 2349 static void 2350 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 2351 { 2352 Sg_scatter_hold *req_schp = &srp->data; 2353 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2354 2355 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 2356 (int) req_schp->k_use_sg)); 2357 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { 2358 struct scatterlist *sclp = 2359 (struct scatterlist *) rsv_schp->buffer; 2360 2361 if (sfp->save_scat_len > 0) 2362 (sclp + (req_schp->k_use_sg - 1))->length = 2363 (unsigned) sfp->save_scat_len; 2364 else 2365 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); 2366 } 2367 req_schp->k_use_sg = 0; 2368 req_schp->bufflen = 0; 2369 req_schp->buffer = NULL; 2370 req_schp->sglist_len = 0; 2371 sfp->save_scat_len = 0; 2372 srp->res_used = 0; 2373 } 2374 2375 static Sg_request * 2376 sg_get_rq_mark(Sg_fd * sfp, int pack_id) 2377 { 2378 Sg_request *resp; 2379 unsigned long iflags; 2380 2381 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2382 for (resp = sfp->headrp; resp; resp = resp->nextrp) { 2383 /* look for requests that are ready + not SG_IO owned */ 2384 if ((1 == resp->done) && (!resp->sg_io_owned) && 2385 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2386 resp->done = 2; /* guard against other readers */ 2387 break; 2388 } 2389 } 2390 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2391 return resp; 2392 } 2393 2394 #ifdef CONFIG_SCSI_PROC_FS 2395 static Sg_request * 2396 sg_get_nth_request(Sg_fd * sfp, int nth) 2397 { 2398 Sg_request *resp; 2399 unsigned long iflags; 2400 int k; 2401 2402 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2403 for (k = 0, resp = sfp->headrp; resp && (k < nth); 2404 ++k, resp = resp->nextrp) ; 2405 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2406 return resp; 2407 } 2408 #endif 2409 2410 /* always adds to end of list */ 2411 static Sg_request * 2412 sg_add_request(Sg_fd * sfp) 2413 { 2414 int k; 2415 unsigned long iflags; 2416 Sg_request *resp; 2417 Sg_request *rp = sfp->req_arr; 2418 2419 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2420 resp = sfp->headrp; 2421 if (!resp) { 2422 memset(rp, 0, sizeof (Sg_request)); 2423 rp->parentfp = sfp; 2424 resp = rp; 2425 sfp->headrp = resp; 2426 } else { 2427 if (0 == sfp->cmd_q) 2428 resp = NULL; /* command queuing disallowed */ 2429 else { 2430 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { 2431 if (!rp->parentfp) 2432 break; 2433 } 2434 if (k < SG_MAX_QUEUE) { 2435 memset(rp, 0, sizeof (Sg_request)); 2436 rp->parentfp = sfp; 2437 while (resp->nextrp) 2438 resp = resp->nextrp; 2439 resp->nextrp = rp; 2440 resp = rp; 2441 } else 2442 resp = NULL; 2443 } 2444 } 2445 if (resp) { 2446 resp->nextrp = NULL; 2447 resp->header.duration = jiffies_to_msecs(jiffies); 2448 resp->my_cmdp = NULL; 2449 } 2450 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2451 return resp; 2452 } 2453 2454 /* Return of 1 for found; 0 for not found */ 2455 static int 2456 sg_remove_request(Sg_fd * sfp, Sg_request * srp) 2457 { 2458 Sg_request *prev_rp; 2459 Sg_request *rp; 2460 unsigned long iflags; 2461 int res = 0; 2462 2463 if ((!sfp) || (!srp) || (!sfp->headrp)) 2464 return res; 2465 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2466 if (srp->my_cmdp) 2467 srp->my_cmdp->upper_private_data = NULL; 2468 prev_rp = sfp->headrp; 2469 if (srp == prev_rp) { 2470 sfp->headrp = prev_rp->nextrp; 2471 prev_rp->parentfp = NULL; 2472 res = 1; 2473 } else { 2474 while ((rp = prev_rp->nextrp)) { 2475 if (srp == rp) { 2476 prev_rp->nextrp = rp->nextrp; 2477 rp->parentfp = NULL; 2478 res = 1; 2479 break; 2480 } 2481 prev_rp = rp; 2482 } 2483 } 2484 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2485 return res; 2486 } 2487 2488 #ifdef CONFIG_SCSI_PROC_FS 2489 static Sg_fd * 2490 sg_get_nth_sfp(Sg_device * sdp, int nth) 2491 { 2492 Sg_fd *resp; 2493 unsigned long iflags; 2494 int k; 2495 2496 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2497 for (k = 0, resp = sdp->headfp; resp && (k < nth); 2498 ++k, resp = resp->nextfp) ; 2499 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2500 return resp; 2501 } 2502 #endif 2503 2504 static Sg_fd * 2505 sg_add_sfp(Sg_device * sdp, int dev) 2506 { 2507 Sg_fd *sfp; 2508 unsigned long iflags; 2509 2510 sfp = (Sg_fd *) sg_page_malloc(sizeof (Sg_fd), 0, NULL); 2511 if (!sfp) 2512 return NULL; 2513 memset(sfp, 0, sizeof (Sg_fd)); 2514 init_waitqueue_head(&sfp->read_wait); 2515 rwlock_init(&sfp->rq_list_lock); 2516 2517 sfp->timeout = SG_DEFAULT_TIMEOUT; 2518 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; 2519 sfp->force_packid = SG_DEF_FORCE_PACK_ID; 2520 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? 2521 sdp->device->host->unchecked_isa_dma : 1; 2522 sfp->cmd_q = SG_DEF_COMMAND_Q; 2523 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2524 sfp->parentdp = sdp; 2525 write_lock_irqsave(&sg_dev_arr_lock, iflags); 2526 if (!sdp->headfp) 2527 sdp->headfp = sfp; 2528 else { /* add to tail of existing list */ 2529 Sg_fd *pfp = sdp->headfp; 2530 while (pfp->nextfp) 2531 pfp = pfp->nextfp; 2532 pfp->nextfp = sfp; 2533 } 2534 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2535 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2536 sg_build_reserve(sfp, sg_big_buff); 2537 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2538 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2539 return sfp; 2540 } 2541 2542 static void 2543 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2544 { 2545 Sg_fd *fp; 2546 Sg_fd *prev_fp; 2547 2548 prev_fp = sdp->headfp; 2549 if (sfp == prev_fp) 2550 sdp->headfp = prev_fp->nextfp; 2551 else { 2552 while ((fp = prev_fp->nextfp)) { 2553 if (sfp == fp) { 2554 prev_fp->nextfp = fp->nextfp; 2555 break; 2556 } 2557 prev_fp = fp; 2558 } 2559 } 2560 if (sfp->reserve.bufflen > 0) { 2561 SCSI_LOG_TIMEOUT(6, 2562 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", 2563 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); 2564 if (sfp->mmap_called) 2565 sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */ 2566 sg_remove_scat(&sfp->reserve); 2567 } 2568 sfp->parentdp = NULL; 2569 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); 2570 sg_page_free((char *) sfp, sizeof (Sg_fd)); 2571 } 2572 2573 /* Returns 0 in normal case, 1 when detached and sdp object removed */ 2574 static int 2575 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2576 { 2577 Sg_request *srp; 2578 Sg_request *tsrp; 2579 int dirty = 0; 2580 int res = 0; 2581 2582 for (srp = sfp->headrp; srp; srp = tsrp) { 2583 tsrp = srp->nextrp; 2584 if (sg_srp_done(srp, sfp)) 2585 sg_finish_rem_req(srp); 2586 else 2587 ++dirty; 2588 } 2589 if (0 == dirty) { 2590 unsigned long iflags; 2591 2592 write_lock_irqsave(&sg_dev_arr_lock, iflags); 2593 __sg_remove_sfp(sdp, sfp); 2594 if (sdp->detached && (NULL == sdp->headfp)) { 2595 int k, maxd; 2596 2597 maxd = sg_dev_max; 2598 for (k = 0; k < maxd; ++k) { 2599 if (sdp == sg_dev_arr[k]) 2600 break; 2601 } 2602 if (k < maxd) 2603 sg_dev_arr[k] = NULL; 2604 kfree((char *) sdp); 2605 res = 1; 2606 } 2607 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2608 } else { 2609 /* MOD_INC's to inhibit unloading sg and associated adapter driver */ 2610 /* only bump the access_count if we actually succeeded in 2611 * throwing another counter on the host module */ 2612 scsi_device_get(sdp->device); /* XXX: retval ignored? */ 2613 sfp->closed = 1; /* flag dirty state on this fd */ 2614 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n", 2615 dirty)); 2616 } 2617 return res; 2618 } 2619 2620 static int 2621 sg_res_in_use(Sg_fd * sfp) 2622 { 2623 const Sg_request *srp; 2624 unsigned long iflags; 2625 2626 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2627 for (srp = sfp->headrp; srp; srp = srp->nextrp) 2628 if (srp->res_used) 2629 break; 2630 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2631 return srp ? 1 : 0; 2632 } 2633 2634 /* If retSzp==NULL want exact size or fail */ 2635 static char * 2636 sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2637 { 2638 char *resp = NULL; 2639 gfp_t page_mask; 2640 int order, a_size; 2641 int resSz = rqSz; 2642 2643 if (rqSz <= 0) 2644 return resp; 2645 2646 if (lowDma) 2647 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN; 2648 else 2649 page_mask = GFP_ATOMIC | __GFP_NOWARN; 2650 2651 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; 2652 order++, a_size <<= 1) ; 2653 resp = (char *) __get_free_pages(page_mask, order); 2654 while ((!resp) && order && retSzp) { 2655 --order; 2656 a_size >>= 1; /* divide by 2, until PAGE_SIZE */ 2657 resp = (char *) __get_free_pages(page_mask, order); /* try half */ 2658 resSz = a_size; 2659 } 2660 if (resp) { 2661 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2662 memset(resp, 0, resSz); 2663 if (retSzp) 2664 *retSzp = resSz; 2665 } 2666 return resp; 2667 } 2668 2669 static void 2670 sg_page_free(char *buff, int size) 2671 { 2672 int order, a_size; 2673 2674 if (!buff) 2675 return; 2676 for (order = 0, a_size = PAGE_SIZE; a_size < size; 2677 order++, a_size <<= 1) ; 2678 free_pages((unsigned long) buff, order); 2679 } 2680 2681 #ifndef MAINTENANCE_IN_CMD 2682 #define MAINTENANCE_IN_CMD 0xa3 2683 #endif 2684 2685 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE, 2686 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12, 2687 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS, 2688 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD 2689 }; 2690 2691 static int 2692 sg_allow_access(unsigned char opcode, char dev_type) 2693 { 2694 int k; 2695 2696 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */ 2697 return 1; 2698 for (k = 0; k < sizeof (allow_ops); ++k) { 2699 if (opcode == allow_ops[k]) 2700 return 1; 2701 } 2702 return 0; 2703 } 2704 2705 #ifdef CONFIG_SCSI_PROC_FS 2706 static int 2707 sg_last_dev(void) 2708 { 2709 int k; 2710 unsigned long iflags; 2711 2712 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2713 for (k = sg_dev_max - 1; k >= 0; --k) 2714 if (sg_dev_arr[k] && sg_dev_arr[k]->device) 2715 break; 2716 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2717 return k + 1; /* origin 1 */ 2718 } 2719 #endif 2720 2721 static Sg_device * 2722 sg_get_dev(int dev) 2723 { 2724 Sg_device *sdp = NULL; 2725 unsigned long iflags; 2726 2727 if (sg_dev_arr && (dev >= 0)) { 2728 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2729 if (dev < sg_dev_max) 2730 sdp = sg_dev_arr[dev]; 2731 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2732 } 2733 return sdp; 2734 } 2735 2736 #ifdef CONFIG_SCSI_PROC_FS 2737 2738 static struct proc_dir_entry *sg_proc_sgp = NULL; 2739 2740 static char sg_proc_sg_dirname[] = "scsi/sg"; 2741 2742 static int sg_proc_seq_show_int(struct seq_file *s, void *v); 2743 2744 static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2745 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2746 size_t count, loff_t *off); 2747 static struct file_operations adio_fops = { 2748 /* .owner, .read and .llseek added in sg_proc_init() */ 2749 .open = sg_proc_single_open_adio, 2750 .write = sg_proc_write_adio, 2751 .release = single_release, 2752 }; 2753 2754 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2755 static ssize_t sg_proc_write_dressz(struct file *filp, 2756 const char __user *buffer, size_t count, loff_t *off); 2757 static struct file_operations dressz_fops = { 2758 .open = sg_proc_single_open_dressz, 2759 .write = sg_proc_write_dressz, 2760 .release = single_release, 2761 }; 2762 2763 static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2764 static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2765 static struct file_operations version_fops = { 2766 .open = sg_proc_single_open_version, 2767 .release = single_release, 2768 }; 2769 2770 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2771 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2772 static struct file_operations devhdr_fops = { 2773 .open = sg_proc_single_open_devhdr, 2774 .release = single_release, 2775 }; 2776 2777 static int sg_proc_seq_show_dev(struct seq_file *s, void *v); 2778 static int sg_proc_open_dev(struct inode *inode, struct file *file); 2779 static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2780 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2781 static void dev_seq_stop(struct seq_file *s, void *v); 2782 static struct file_operations dev_fops = { 2783 .open = sg_proc_open_dev, 2784 .release = seq_release, 2785 }; 2786 static struct seq_operations dev_seq_ops = { 2787 .start = dev_seq_start, 2788 .next = dev_seq_next, 2789 .stop = dev_seq_stop, 2790 .show = sg_proc_seq_show_dev, 2791 }; 2792 2793 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2794 static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2795 static struct file_operations devstrs_fops = { 2796 .open = sg_proc_open_devstrs, 2797 .release = seq_release, 2798 }; 2799 static struct seq_operations devstrs_seq_ops = { 2800 .start = dev_seq_start, 2801 .next = dev_seq_next, 2802 .stop = dev_seq_stop, 2803 .show = sg_proc_seq_show_devstrs, 2804 }; 2805 2806 static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2807 static int sg_proc_open_debug(struct inode *inode, struct file *file); 2808 static struct file_operations debug_fops = { 2809 .open = sg_proc_open_debug, 2810 .release = seq_release, 2811 }; 2812 static struct seq_operations debug_seq_ops = { 2813 .start = dev_seq_start, 2814 .next = dev_seq_next, 2815 .stop = dev_seq_stop, 2816 .show = sg_proc_seq_show_debug, 2817 }; 2818 2819 2820 struct sg_proc_leaf { 2821 const char * name; 2822 struct file_operations * fops; 2823 }; 2824 2825 static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2826 {"allow_dio", &adio_fops}, 2827 {"debug", &debug_fops}, 2828 {"def_reserved_size", &dressz_fops}, 2829 {"device_hdr", &devhdr_fops}, 2830 {"devices", &dev_fops}, 2831 {"device_strs", &devstrs_fops}, 2832 {"version", &version_fops} 2833 }; 2834 2835 static int 2836 sg_proc_init(void) 2837 { 2838 int k, mask; 2839 int num_leaves = 2840 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]); 2841 struct proc_dir_entry *pdep; 2842 struct sg_proc_leaf * leaf; 2843 2844 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); 2845 if (!sg_proc_sgp) 2846 return 1; 2847 for (k = 0; k < num_leaves; ++k) { 2848 leaf = &sg_proc_leaf_arr[k]; 2849 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2850 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp); 2851 if (pdep) { 2852 leaf->fops->owner = THIS_MODULE, 2853 leaf->fops->read = seq_read, 2854 leaf->fops->llseek = seq_lseek, 2855 pdep->proc_fops = leaf->fops; 2856 } 2857 } 2858 return 0; 2859 } 2860 2861 static void 2862 sg_proc_cleanup(void) 2863 { 2864 int k; 2865 int num_leaves = 2866 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]); 2867 2868 if (!sg_proc_sgp) 2869 return; 2870 for (k = 0; k < num_leaves; ++k) 2871 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); 2872 remove_proc_entry(sg_proc_sg_dirname, NULL); 2873 } 2874 2875 2876 static int sg_proc_seq_show_int(struct seq_file *s, void *v) 2877 { 2878 seq_printf(s, "%d\n", *((int *)s->private)); 2879 return 0; 2880 } 2881 2882 static int sg_proc_single_open_adio(struct inode *inode, struct file *file) 2883 { 2884 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); 2885 } 2886 2887 static ssize_t 2888 sg_proc_write_adio(struct file *filp, const char __user *buffer, 2889 size_t count, loff_t *off) 2890 { 2891 int num; 2892 char buff[11]; 2893 2894 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2895 return -EACCES; 2896 num = (count < 10) ? count : 10; 2897 if (copy_from_user(buff, buffer, num)) 2898 return -EFAULT; 2899 buff[num] = '\0'; 2900 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0; 2901 return count; 2902 } 2903 2904 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) 2905 { 2906 return single_open(file, sg_proc_seq_show_int, &sg_big_buff); 2907 } 2908 2909 static ssize_t 2910 sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2911 size_t count, loff_t *off) 2912 { 2913 int num; 2914 unsigned long k = ULONG_MAX; 2915 char buff[11]; 2916 2917 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2918 return -EACCES; 2919 num = (count < 10) ? count : 10; 2920 if (copy_from_user(buff, buffer, num)) 2921 return -EFAULT; 2922 buff[num] = '\0'; 2923 k = simple_strtoul(buff, NULL, 10); 2924 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2925 sg_big_buff = k; 2926 return count; 2927 } 2928 return -ERANGE; 2929 } 2930 2931 static int sg_proc_seq_show_version(struct seq_file *s, void *v) 2932 { 2933 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, 2934 sg_version_date); 2935 return 0; 2936 } 2937 2938 static int sg_proc_single_open_version(struct inode *inode, struct file *file) 2939 { 2940 return single_open(file, sg_proc_seq_show_version, NULL); 2941 } 2942 2943 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) 2944 { 2945 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" 2946 "online\n"); 2947 return 0; 2948 } 2949 2950 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) 2951 { 2952 return single_open(file, sg_proc_seq_show_devhdr, NULL); 2953 } 2954 2955 struct sg_proc_deviter { 2956 loff_t index; 2957 size_t max; 2958 }; 2959 2960 static void * dev_seq_start(struct seq_file *s, loff_t *pos) 2961 { 2962 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); 2963 2964 s->private = it; 2965 if (! it) 2966 return NULL; 2967 2968 if (NULL == sg_dev_arr) 2969 return NULL; 2970 it->index = *pos; 2971 it->max = sg_last_dev(); 2972 if (it->index >= it->max) 2973 return NULL; 2974 return it; 2975 } 2976 2977 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) 2978 { 2979 struct sg_proc_deviter * it = s->private; 2980 2981 *pos = ++it->index; 2982 return (it->index < it->max) ? it : NULL; 2983 } 2984 2985 static void dev_seq_stop(struct seq_file *s, void *v) 2986 { 2987 kfree(s->private); 2988 } 2989 2990 static int sg_proc_open_dev(struct inode *inode, struct file *file) 2991 { 2992 return seq_open(file, &dev_seq_ops); 2993 } 2994 2995 static int sg_proc_seq_show_dev(struct seq_file *s, void *v) 2996 { 2997 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2998 Sg_device *sdp; 2999 struct scsi_device *scsidp; 3000 3001 sdp = it ? sg_get_dev(it->index) : NULL; 3002 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 3003 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 3004 scsidp->host->host_no, scsidp->channel, 3005 scsidp->id, scsidp->lun, (int) scsidp->type, 3006 1, 3007 (int) scsidp->queue_depth, 3008 (int) scsidp->device_busy, 3009 (int) scsi_device_online(scsidp)); 3010 else 3011 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); 3012 return 0; 3013 } 3014 3015 static int sg_proc_open_devstrs(struct inode *inode, struct file *file) 3016 { 3017 return seq_open(file, &devstrs_seq_ops); 3018 } 3019 3020 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) 3021 { 3022 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 3023 Sg_device *sdp; 3024 struct scsi_device *scsidp; 3025 3026 sdp = it ? sg_get_dev(it->index) : NULL; 3027 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 3028 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", 3029 scsidp->vendor, scsidp->model, scsidp->rev); 3030 else 3031 seq_printf(s, "<no active device>\n"); 3032 return 0; 3033 } 3034 3035 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 3036 { 3037 int k, m, new_interface, blen, usg; 3038 Sg_request *srp; 3039 Sg_fd *fp; 3040 const sg_io_hdr_t *hp; 3041 const char * cp; 3042 unsigned int ms; 3043 3044 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { 3045 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " 3046 "(res)sgat=%d low_dma=%d\n", k + 1, 3047 jiffies_to_msecs(fp->timeout), 3048 fp->reserve.bufflen, 3049 (int) fp->reserve.k_use_sg, 3050 (int) fp->low_dma); 3051 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", 3052 (int) fp->cmd_q, (int) fp->force_packid, 3053 (int) fp->keep_orphan, (int) fp->closed); 3054 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { 3055 hp = &srp->header; 3056 new_interface = (hp->interface_id == '\0') ? 0 : 1; 3057 if (srp->res_used) { 3058 if (new_interface && 3059 (SG_FLAG_MMAP_IO & hp->flags)) 3060 cp = " mmap>> "; 3061 else 3062 cp = " rb>> "; 3063 } else { 3064 if (SG_INFO_DIRECT_IO_MASK & hp->info) 3065 cp = " dio>> "; 3066 else 3067 cp = " "; 3068 } 3069 seq_printf(s, cp); 3070 blen = srp->my_cmdp ? 3071 srp->my_cmdp->sr_bufflen : srp->data.bufflen; 3072 usg = srp->my_cmdp ? 3073 srp->my_cmdp->sr_use_sg : srp->data.k_use_sg; 3074 seq_printf(s, srp->done ? 3075 ((1 == srp->done) ? "rcv:" : "fin:") 3076 : (srp->my_cmdp ? "act:" : "prior:")); 3077 seq_printf(s, " id=%d blen=%d", 3078 srp->header.pack_id, blen); 3079 if (srp->done) 3080 seq_printf(s, " dur=%d", hp->duration); 3081 else { 3082 ms = jiffies_to_msecs(jiffies); 3083 seq_printf(s, " t_o/elap=%d/%d", 3084 (new_interface ? hp->timeout : 3085 jiffies_to_msecs(fp->timeout)), 3086 (ms > hp->duration ? ms - hp->duration : 0)); 3087 } 3088 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, 3089 (int) srp->data.cmd_opcode); 3090 } 3091 if (0 == m) 3092 seq_printf(s, " No requests active\n"); 3093 } 3094 } 3095 3096 static int sg_proc_open_debug(struct inode *inode, struct file *file) 3097 { 3098 return seq_open(file, &debug_seq_ops); 3099 } 3100 3101 static int sg_proc_seq_show_debug(struct seq_file *s, void *v) 3102 { 3103 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 3104 Sg_device *sdp; 3105 3106 if (it && (0 == it->index)) { 3107 seq_printf(s, "dev_max(currently)=%d max_active_device=%d " 3108 "(origin 1)\n", sg_dev_max, (int)it->max); 3109 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); 3110 } 3111 sdp = it ? sg_get_dev(it->index) : NULL; 3112 if (sdp) { 3113 struct scsi_device *scsidp = sdp->device; 3114 3115 if (NULL == scsidp) { 3116 seq_printf(s, "device %d detached ??\n", 3117 (int)it->index); 3118 return 0; 3119 } 3120 3121 if (sg_get_nth_sfp(sdp, 0)) { 3122 seq_printf(s, " >>> device=%s ", 3123 sdp->disk->disk_name); 3124 if (sdp->detached) 3125 seq_printf(s, "detached pending close "); 3126 else 3127 seq_printf 3128 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 3129 scsidp->host->host_no, 3130 scsidp->channel, scsidp->id, 3131 scsidp->lun, 3132 scsidp->host->hostt->emulated); 3133 seq_printf(s, " sg_tablesize=%d excl=%d\n", 3134 sdp->sg_tablesize, sdp->exclude); 3135 } 3136 sg_proc_debug_helper(s, sdp); 3137 } 3138 return 0; 3139 } 3140 3141 #endif /* CONFIG_SCSI_PROC_FS */ 3142 3143 module_init(init_sg); 3144 module_exit(exit_sg); 3145 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); 3146