xref: /linux/drivers/scsi/sg.c (revision 2624f124b3b5d550ab2fbef7ee3bc0e1fed09722)
1 /*
2  *  History:
3  *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4  *           to allow user process control of SCSI devices.
5  *  Development Sponsored by Killy Corp. NY NY
6  *
7  * Original driver (sg.c):
8  *        Copyright (C) 1992 Lawrence Foard
9  * Version 2 and 3 extensions to driver:
10  *        Copyright (C) 1998 - 2005 Douglas Gilbert
11  *
12  *  Modified  19-JAN-1998  Richard Gooch <rgooch@atnf.csiro.au>  Devfs support
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  */
20 
21 static int sg_version_num = 30533;	/* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.33"
23 
24 /*
25  *  D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26  *      - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27  *        the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28  *        (otherwise the macros compile to empty statements).
29  *
30  */
31 #include <linux/config.h>
32 #include <linux/module.h>
33 
34 #include <linux/fs.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/errno.h>
40 #include <linux/mtio.h>
41 #include <linux/ioctl.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/poll.h>
45 #include <linux/smp_lock.h>
46 #include <linux/moduleparam.h>
47 #include <linux/devfs_fs_kernel.h>
48 #include <linux/cdev.h>
49 #include <linux/seq_file.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 
53 #include "scsi.h"
54 #include <scsi/scsi_dbg.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_driver.h>
57 #include <scsi/scsi_ioctl.h>
58 #include <scsi/sg.h>
59 
60 #include "scsi_logging.h"
61 
62 #ifdef CONFIG_SCSI_PROC_FS
63 #include <linux/proc_fs.h>
64 static char *sg_version_date = "20050901";
65 
66 static int sg_proc_init(void);
67 static void sg_proc_cleanup(void);
68 #endif
69 
70 #ifndef LINUX_VERSION_CODE
71 #include <linux/version.h>
72 #endif				/* LINUX_VERSION_CODE */
73 
74 #define SG_ALLOW_DIO_DEF 0
75 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
76 
77 #define SG_MAX_DEVS 32768
78 
79 /*
80  * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
81  * Then when using 32 bit integers x * m may overflow during the calculation.
82  * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
83  * calculates the same, but prevents the overflow when both m and d
84  * are "small" numbers (like HZ and USER_HZ).
85  * Of course an overflow is inavoidable if the result of muldiv doesn't fit
86  * in 32 bits.
87  */
88 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
89 
90 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
91 
92 int sg_big_buff = SG_DEF_RESERVED_SIZE;
93 /* N.B. This variable is readable and writeable via
94    /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
95    of this size (or less if there is not enough memory) will be reserved
96    for use by this file descriptor. [Deprecated usage: this variable is also
97    readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
98    the kernel (i.e. it is not a module).] */
99 static int def_reserved_size = -1;	/* picks up init parameter */
100 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
101 
102 #define SG_SECTOR_SZ 512
103 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
104 
105 #define SG_DEV_ARR_LUMP 32	/* amount to over allocate sg_dev_arr by */
106 
107 static int sg_add(struct class_device *);
108 static void sg_remove(struct class_device *);
109 
110 static Scsi_Request *dummy_cmdp;	/* only used for sizeof */
111 
112 static DEFINE_RWLOCK(sg_dev_arr_lock);	/* Also used to lock
113 							   file descriptor list for device */
114 
115 static struct class_interface sg_interface = {
116 	.add		= sg_add,
117 	.remove		= sg_remove,
118 };
119 
120 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
121 	unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
122 	unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
123 	unsigned bufflen;	/* Size of (aggregate) data buffer */
124 	unsigned b_malloc_len;	/* actual len malloc'ed in buffer */
125 	void *buffer;		/* Data buffer or scatter list (k_use_sg>0) */
126 	char dio_in_use;	/* 0->indirect IO (or mmap), 1->dio */
127 	unsigned char cmd_opcode; /* first byte of command */
128 } Sg_scatter_hold;
129 
130 struct sg_device;		/* forward declarations */
131 struct sg_fd;
132 
133 typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */
134 	Scsi_Request *my_cmdp;	/* != 0  when request with lower levels */
135 	struct sg_request *nextrp;	/* NULL -> tail request (slist) */
136 	struct sg_fd *parentfp;	/* NULL -> not in use */
137 	Sg_scatter_hold data;	/* hold buffer, perhaps scatter list */
138 	sg_io_hdr_t header;	/* scsi command+info, see <scsi/sg.h> */
139 	unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)];
140 	char res_used;		/* 1 -> using reserve buffer, 0 -> not ... */
141 	char orphan;		/* 1 -> drop on sight, 0 -> normal */
142 	char sg_io_owned;	/* 1 -> packet belongs to SG_IO */
143 	volatile char done;	/* 0->before bh, 1->before read, 2->read */
144 } Sg_request;
145 
146 typedef struct sg_fd {		/* holds the state of a file descriptor */
147 	struct sg_fd *nextfp;	/* NULL when last opened fd on this device */
148 	struct sg_device *parentdp;	/* owning device */
149 	wait_queue_head_t read_wait;	/* queue read until command done */
150 	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
151 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
152 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
153 	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
154 	unsigned save_scat_len;	/* original length of trunc. scat. element */
155 	Sg_request *headrp;	/* head of request slist, NULL->empty */
156 	struct fasync_struct *async_qp;	/* used by asynchronous notification */
157 	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
158 	char low_dma;		/* as in parent but possibly overridden to 1 */
159 	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
160 	volatile char closed;	/* 1 -> fd closed but request(s) outstanding */
161 	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
162 	char next_cmd_len;	/* 0 -> automatic (def), >0 -> use on next write() */
163 	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
164 	char mmap_called;	/* 0 -> mmap() never called on this fd */
165 } Sg_fd;
166 
167 typedef struct sg_device { /* holds the state of each scsi generic device */
168 	struct scsi_device *device;
169 	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */
170 	int sg_tablesize;	/* adapter's max scatter-gather table size */
171 	Sg_fd *headfp;		/* first open fd belonging to this device */
172 	volatile char detached;	/* 0->attached, 1->detached pending removal */
173 	volatile char exclude;	/* opened for exclusive access */
174 	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
175 	struct gendisk *disk;
176 	struct cdev * cdev;	/* char_dev [sysfs: /sys/cdev/major/sg<n>] */
177 } Sg_device;
178 
179 static int sg_fasync(int fd, struct file *filp, int mode);
180 static void sg_cmd_done(Scsi_Cmnd * SCpnt);	/* tasklet or soft irq callback */
181 static int sg_start_req(Sg_request * srp);
182 static void sg_finish_rem_req(Sg_request * srp);
183 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
184 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
185 			 int tablesize);
186 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
187 			   Sg_request * srp);
188 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
189 			    int blocking, int read_only, Sg_request ** o_srp);
190 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
191 			   unsigned char *cmnd, int timeout, int blocking);
192 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
193 		      int wr_xf, int *countp, unsigned char __user **up);
194 static int sg_write_xfer(Sg_request * srp);
195 static int sg_read_xfer(Sg_request * srp);
196 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
197 static void sg_remove_scat(Sg_scatter_hold * schp);
198 static void sg_build_reserve(Sg_fd * sfp, int req_size);
199 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
200 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
201 static char *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
202 static void sg_page_free(char *buff, int size);
203 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
204 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
205 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
206 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
207 static Sg_request *sg_add_request(Sg_fd * sfp);
208 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
209 static int sg_res_in_use(Sg_fd * sfp);
210 static int sg_allow_access(unsigned char opcode, char dev_type);
211 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
212 static Sg_device *sg_get_dev(int dev);
213 static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp);
214 #ifdef CONFIG_SCSI_PROC_FS
215 static int sg_last_dev(void);
216 #endif
217 
218 static Sg_device **sg_dev_arr = NULL;
219 static int sg_dev_max;
220 static int sg_nr_dev;
221 
222 #define SZ_SG_HEADER sizeof(struct sg_header)
223 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
224 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
225 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
226 
227 static int
228 sg_open(struct inode *inode, struct file *filp)
229 {
230 	int dev = iminor(inode);
231 	int flags = filp->f_flags;
232 	Sg_device *sdp;
233 	Sg_fd *sfp;
234 	int res;
235 	int retval;
236 
237 	nonseekable_open(inode, filp);
238 	SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
239 	sdp = sg_get_dev(dev);
240 	if ((!sdp) || (!sdp->device))
241 		return -ENXIO;
242 	if (sdp->detached)
243 		return -ENODEV;
244 
245 	/* This driver's module count bumped by fops_get in <linux/fs.h> */
246 	/* Prevent the device driver from vanishing while we sleep */
247 	retval = scsi_device_get(sdp->device);
248 	if (retval)
249 		return retval;
250 
251 	if (!((flags & O_NONBLOCK) ||
252 	      scsi_block_when_processing_errors(sdp->device))) {
253 		retval = -ENXIO;
254 		/* we are in error recovery for this device */
255 		goto error_out;
256 	}
257 
258 	if (flags & O_EXCL) {
259 		if (O_RDONLY == (flags & O_ACCMODE)) {
260 			retval = -EPERM; /* Can't lock it with read only access */
261 			goto error_out;
262 		}
263 		if (sdp->headfp && (flags & O_NONBLOCK)) {
264 			retval = -EBUSY;
265 			goto error_out;
266 		}
267 		res = 0;
268 		__wait_event_interruptible(sdp->o_excl_wait,
269 			((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
270 		if (res) {
271 			retval = res;	/* -ERESTARTSYS because signal hit process */
272 			goto error_out;
273 		}
274 	} else if (sdp->exclude) {	/* some other fd has an exclusive lock on dev */
275 		if (flags & O_NONBLOCK) {
276 			retval = -EBUSY;
277 			goto error_out;
278 		}
279 		res = 0;
280 		__wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
281 					   res);
282 		if (res) {
283 			retval = res;	/* -ERESTARTSYS because signal hit process */
284 			goto error_out;
285 		}
286 	}
287 	if (sdp->detached) {
288 		retval = -ENODEV;
289 		goto error_out;
290 	}
291 	if (!sdp->headfp) {	/* no existing opens on this device */
292 		sdp->sgdebug = 0;
293 		sdp->sg_tablesize = sdp->device->host->sg_tablesize;
294 	}
295 	if ((sfp = sg_add_sfp(sdp, dev)))
296 		filp->private_data = sfp;
297 	else {
298 		if (flags & O_EXCL)
299 			sdp->exclude = 0;	/* undo if error */
300 		retval = -ENOMEM;
301 		goto error_out;
302 	}
303 	return 0;
304 
305       error_out:
306 	scsi_device_put(sdp->device);
307 	return retval;
308 }
309 
310 /* Following function was formerly called 'sg_close' */
311 static int
312 sg_release(struct inode *inode, struct file *filp)
313 {
314 	Sg_device *sdp;
315 	Sg_fd *sfp;
316 
317 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
318 		return -ENXIO;
319 	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
320 	sg_fasync(-1, filp, 0);	/* remove filp from async notification list */
321 	if (0 == sg_remove_sfp(sdp, sfp)) {	/* Returns 1 when sdp gone */
322 		if (!sdp->detached) {
323 			scsi_device_put(sdp->device);
324 		}
325 		sdp->exclude = 0;
326 		wake_up_interruptible(&sdp->o_excl_wait);
327 	}
328 	return 0;
329 }
330 
331 static ssize_t
332 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
333 {
334 	Sg_device *sdp;
335 	Sg_fd *sfp;
336 	Sg_request *srp;
337 	int req_pack_id = -1;
338 	sg_io_hdr_t *hp;
339 	struct sg_header *old_hdr = NULL;
340 	int retval = 0;
341 
342 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
343 		return -ENXIO;
344 	SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
345 				   sdp->disk->disk_name, (int) count));
346 	if (!access_ok(VERIFY_WRITE, buf, count))
347 		return -EFAULT;
348 	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
349 		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
350 		if (!old_hdr)
351 			return -ENOMEM;
352 		if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
353 			retval = -EFAULT;
354 			goto free_old_hdr;
355 		}
356 		if (old_hdr->reply_len < 0) {
357 			if (count >= SZ_SG_IO_HDR) {
358 				sg_io_hdr_t *new_hdr;
359 				new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
360 				if (!new_hdr) {
361 					retval = -ENOMEM;
362 					goto free_old_hdr;
363 				}
364 				retval =__copy_from_user
365 				    (new_hdr, buf, SZ_SG_IO_HDR);
366 				req_pack_id = new_hdr->pack_id;
367 				kfree(new_hdr);
368 				if (retval) {
369 					retval = -EFAULT;
370 					goto free_old_hdr;
371 				}
372 			}
373 		} else
374 			req_pack_id = old_hdr->pack_id;
375 	}
376 	srp = sg_get_rq_mark(sfp, req_pack_id);
377 	if (!srp) {		/* now wait on packet to arrive */
378 		if (sdp->detached) {
379 			retval = -ENODEV;
380 			goto free_old_hdr;
381 		}
382 		if (filp->f_flags & O_NONBLOCK) {
383 			retval = -EAGAIN;
384 			goto free_old_hdr;
385 		}
386 		while (1) {
387 			retval = 0; /* following macro beats race condition */
388 			__wait_event_interruptible(sfp->read_wait,
389 				(sdp->detached ||
390 				(srp = sg_get_rq_mark(sfp, req_pack_id))),
391 				retval);
392 			if (sdp->detached) {
393 				retval = -ENODEV;
394 				goto free_old_hdr;
395 			}
396 			if (0 == retval)
397 				break;
398 
399 			/* -ERESTARTSYS as signal hit process */
400 			goto free_old_hdr;
401 		}
402 	}
403 	if (srp->header.interface_id != '\0') {
404 		retval = sg_new_read(sfp, buf, count, srp);
405 		goto free_old_hdr;
406 	}
407 
408 	hp = &srp->header;
409 	if (old_hdr == NULL) {
410 		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
411 		if (! old_hdr) {
412 			retval = -ENOMEM;
413 			goto free_old_hdr;
414 		}
415 	}
416 	memset(old_hdr, 0, SZ_SG_HEADER);
417 	old_hdr->reply_len = (int) hp->timeout;
418 	old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
419 	old_hdr->pack_id = hp->pack_id;
420 	old_hdr->twelve_byte =
421 	    ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
422 	old_hdr->target_status = hp->masked_status;
423 	old_hdr->host_status = hp->host_status;
424 	old_hdr->driver_status = hp->driver_status;
425 	if ((CHECK_CONDITION & hp->masked_status) ||
426 	    (DRIVER_SENSE & hp->driver_status))
427 		memcpy(old_hdr->sense_buffer, srp->sense_b,
428 		       sizeof (old_hdr->sense_buffer));
429 	switch (hp->host_status) {
430 	/* This setup of 'result' is for backward compatibility and is best
431 	   ignored by the user who should use target, host + driver status */
432 	case DID_OK:
433 	case DID_PASSTHROUGH:
434 	case DID_SOFT_ERROR:
435 		old_hdr->result = 0;
436 		break;
437 	case DID_NO_CONNECT:
438 	case DID_BUS_BUSY:
439 	case DID_TIME_OUT:
440 		old_hdr->result = EBUSY;
441 		break;
442 	case DID_BAD_TARGET:
443 	case DID_ABORT:
444 	case DID_PARITY:
445 	case DID_RESET:
446 	case DID_BAD_INTR:
447 		old_hdr->result = EIO;
448 		break;
449 	case DID_ERROR:
450 		old_hdr->result = (srp->sense_b[0] == 0 &&
451 				  hp->masked_status == GOOD) ? 0 : EIO;
452 		break;
453 	default:
454 		old_hdr->result = EIO;
455 		break;
456 	}
457 
458 	/* Now copy the result back to the user buffer.  */
459 	if (count >= SZ_SG_HEADER) {
460 		if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
461 			retval = -EFAULT;
462 			goto free_old_hdr;
463 		}
464 		buf += SZ_SG_HEADER;
465 		if (count > old_hdr->reply_len)
466 			count = old_hdr->reply_len;
467 		if (count > SZ_SG_HEADER) {
468 			if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
469 				retval = -EFAULT;
470 				goto free_old_hdr;
471 			}
472 		}
473 	} else
474 		count = (old_hdr->result == 0) ? 0 : -EIO;
475 	sg_finish_rem_req(srp);
476 	retval = count;
477 free_old_hdr:
478 	if (old_hdr)
479 		kfree(old_hdr);
480 	return retval;
481 }
482 
483 static ssize_t
484 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
485 {
486 	sg_io_hdr_t *hp = &srp->header;
487 	int err = 0;
488 	int len;
489 
490 	if (count < SZ_SG_IO_HDR) {
491 		err = -EINVAL;
492 		goto err_out;
493 	}
494 	hp->sb_len_wr = 0;
495 	if ((hp->mx_sb_len > 0) && hp->sbp) {
496 		if ((CHECK_CONDITION & hp->masked_status) ||
497 		    (DRIVER_SENSE & hp->driver_status)) {
498 			int sb_len = sizeof (dummy_cmdp->sr_sense_buffer);
499 			sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
500 			len = 8 + (int) srp->sense_b[7];	/* Additional sense length field */
501 			len = (len > sb_len) ? sb_len : len;
502 			if (copy_to_user(hp->sbp, srp->sense_b, len)) {
503 				err = -EFAULT;
504 				goto err_out;
505 			}
506 			hp->sb_len_wr = len;
507 		}
508 	}
509 	if (hp->masked_status || hp->host_status || hp->driver_status)
510 		hp->info |= SG_INFO_CHECK;
511 	if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
512 		err = -EFAULT;
513 		goto err_out;
514 	}
515 	err = sg_read_xfer(srp);
516       err_out:
517 	sg_finish_rem_req(srp);
518 	return (0 == err) ? count : err;
519 }
520 
521 static ssize_t
522 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
523 {
524 	int mxsize, cmd_size, k;
525 	int input_size, blocking;
526 	unsigned char opcode;
527 	Sg_device *sdp;
528 	Sg_fd *sfp;
529 	Sg_request *srp;
530 	struct sg_header old_hdr;
531 	sg_io_hdr_t *hp;
532 	unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
533 
534 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
535 		return -ENXIO;
536 	SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
537 				   sdp->disk->disk_name, (int) count));
538 	if (sdp->detached)
539 		return -ENODEV;
540 	if (!((filp->f_flags & O_NONBLOCK) ||
541 	      scsi_block_when_processing_errors(sdp->device)))
542 		return -ENXIO;
543 
544 	if (!access_ok(VERIFY_READ, buf, count))
545 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
546 	if (count < SZ_SG_HEADER)
547 		return -EIO;
548 	if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
549 		return -EFAULT;
550 	blocking = !(filp->f_flags & O_NONBLOCK);
551 	if (old_hdr.reply_len < 0)
552 		return sg_new_write(sfp, buf, count, blocking, 0, NULL);
553 	if (count < (SZ_SG_HEADER + 6))
554 		return -EIO;	/* The minimum scsi command length is 6 bytes. */
555 
556 	if (!(srp = sg_add_request(sfp))) {
557 		SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
558 		return -EDOM;
559 	}
560 	buf += SZ_SG_HEADER;
561 	__get_user(opcode, buf);
562 	if (sfp->next_cmd_len > 0) {
563 		if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
564 			SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
565 			sfp->next_cmd_len = 0;
566 			sg_remove_request(sfp, srp);
567 			return -EIO;
568 		}
569 		cmd_size = sfp->next_cmd_len;
570 		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
571 	} else {
572 		cmd_size = COMMAND_SIZE(opcode);	/* based on SCSI command group */
573 		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
574 			cmd_size = 12;
575 	}
576 	SCSI_LOG_TIMEOUT(4, printk(
577 		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
578 /* Determine buffer size.  */
579 	input_size = count - cmd_size;
580 	mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
581 	mxsize -= SZ_SG_HEADER;
582 	input_size -= SZ_SG_HEADER;
583 	if (input_size < 0) {
584 		sg_remove_request(sfp, srp);
585 		return -EIO;	/* User did not pass enough bytes for this command. */
586 	}
587 	hp = &srp->header;
588 	hp->interface_id = '\0';	/* indicator of old interface tunnelled */
589 	hp->cmd_len = (unsigned char) cmd_size;
590 	hp->iovec_count = 0;
591 	hp->mx_sb_len = 0;
592 	if (input_size > 0)
593 		hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
594 		    SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
595 	else
596 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
597 	hp->dxfer_len = mxsize;
598 	hp->dxferp = (char __user *)buf + cmd_size;
599 	hp->sbp = NULL;
600 	hp->timeout = old_hdr.reply_len;	/* structure abuse ... */
601 	hp->flags = input_size;	/* structure abuse ... */
602 	hp->pack_id = old_hdr.pack_id;
603 	hp->usr_ptr = NULL;
604 	if (__copy_from_user(cmnd, buf, cmd_size))
605 		return -EFAULT;
606 	/*
607 	 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
608 	 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
609 	 * is a non-zero input_size, so emit a warning.
610 	 */
611 	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
612 		if (printk_ratelimit())
613 			printk(KERN_WARNING
614 			       "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
615 			       "guessing data in;\n" KERN_WARNING "   "
616 			       "program %s not setting count and/or reply_len properly\n",
617 			       old_hdr.reply_len - (int)SZ_SG_HEADER,
618 			       input_size, (unsigned int) cmnd[0],
619 			       current->comm);
620 	k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
621 	return (k < 0) ? k : count;
622 }
623 
624 static ssize_t
625 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
626 	     int blocking, int read_only, Sg_request ** o_srp)
627 {
628 	int k;
629 	Sg_request *srp;
630 	sg_io_hdr_t *hp;
631 	unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
632 	int timeout;
633 	unsigned long ul_timeout;
634 
635 	if (count < SZ_SG_IO_HDR)
636 		return -EINVAL;
637 	if (!access_ok(VERIFY_READ, buf, count))
638 		return -EFAULT; /* protects following copy_from_user()s + get_user()s */
639 
640 	sfp->cmd_q = 1;	/* when sg_io_hdr seen, set command queuing on */
641 	if (!(srp = sg_add_request(sfp))) {
642 		SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
643 		return -EDOM;
644 	}
645 	hp = &srp->header;
646 	if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
647 		sg_remove_request(sfp, srp);
648 		return -EFAULT;
649 	}
650 	if (hp->interface_id != 'S') {
651 		sg_remove_request(sfp, srp);
652 		return -ENOSYS;
653 	}
654 	if (hp->flags & SG_FLAG_MMAP_IO) {
655 		if (hp->dxfer_len > sfp->reserve.bufflen) {
656 			sg_remove_request(sfp, srp);
657 			return -ENOMEM;	/* MMAP_IO size must fit in reserve buffer */
658 		}
659 		if (hp->flags & SG_FLAG_DIRECT_IO) {
660 			sg_remove_request(sfp, srp);
661 			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
662 		}
663 		if (sg_res_in_use(sfp)) {
664 			sg_remove_request(sfp, srp);
665 			return -EBUSY;	/* reserve buffer already being used */
666 		}
667 	}
668 	ul_timeout = msecs_to_jiffies(srp->header.timeout);
669 	timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
670 	if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
671 		sg_remove_request(sfp, srp);
672 		return -EMSGSIZE;
673 	}
674 	if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
675 		sg_remove_request(sfp, srp);
676 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
677 	}
678 	if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
679 		sg_remove_request(sfp, srp);
680 		return -EFAULT;
681 	}
682 	if (read_only &&
683 	    (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
684 		sg_remove_request(sfp, srp);
685 		return -EPERM;
686 	}
687 	k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
688 	if (k < 0)
689 		return k;
690 	if (o_srp)
691 		*o_srp = srp;
692 	return count;
693 }
694 
695 static int
696 sg_common_write(Sg_fd * sfp, Sg_request * srp,
697 		unsigned char *cmnd, int timeout, int blocking)
698 {
699 	int k;
700 	Scsi_Request *SRpnt;
701 	Sg_device *sdp = sfp->parentdp;
702 	sg_io_hdr_t *hp = &srp->header;
703 	request_queue_t *q;
704 
705 	srp->data.cmd_opcode = cmnd[0];	/* hold opcode of command */
706 	hp->status = 0;
707 	hp->masked_status = 0;
708 	hp->msg_status = 0;
709 	hp->info = 0;
710 	hp->host_status = 0;
711 	hp->driver_status = 0;
712 	hp->resid = 0;
713 	SCSI_LOG_TIMEOUT(4, printk("sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
714 			  (int) cmnd[0], (int) hp->cmd_len));
715 
716 	if ((k = sg_start_req(srp))) {
717 		SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
718 		sg_finish_rem_req(srp);
719 		return k;	/* probably out of space --> ENOMEM */
720 	}
721 	if ((k = sg_write_xfer(srp))) {
722 		SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
723 		sg_finish_rem_req(srp);
724 		return k;
725 	}
726 	if (sdp->detached) {
727 		sg_finish_rem_req(srp);
728 		return -ENODEV;
729 	}
730 	SRpnt = scsi_allocate_request(sdp->device, GFP_ATOMIC);
731 	if (SRpnt == NULL) {
732 		SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
733 		sg_finish_rem_req(srp);
734 		return -ENOMEM;
735 	}
736 
737 	srp->my_cmdp = SRpnt;
738 	q = SRpnt->sr_device->request_queue;
739 	SRpnt->sr_request->rq_disk = sdp->disk;
740 	SRpnt->sr_sense_buffer[0] = 0;
741 	SRpnt->sr_cmd_len = hp->cmd_len;
742 	SRpnt->sr_use_sg = srp->data.k_use_sg;
743 	SRpnt->sr_sglist_len = srp->data.sglist_len;
744 	SRpnt->sr_bufflen = srp->data.bufflen;
745 	SRpnt->sr_underflow = 0;
746 	SRpnt->sr_buffer = srp->data.buffer;
747 	switch (hp->dxfer_direction) {
748 	case SG_DXFER_TO_FROM_DEV:
749 	case SG_DXFER_FROM_DEV:
750 		SRpnt->sr_data_direction = DMA_FROM_DEVICE;
751 		break;
752 	case SG_DXFER_TO_DEV:
753 		SRpnt->sr_data_direction = DMA_TO_DEVICE;
754 		break;
755 	case SG_DXFER_UNKNOWN:
756 		SRpnt->sr_data_direction = DMA_BIDIRECTIONAL;
757 		break;
758 	default:
759 		SRpnt->sr_data_direction = DMA_NONE;
760 		break;
761 	}
762 	SRpnt->upper_private_data = srp;
763 	srp->data.k_use_sg = 0;
764 	srp->data.sglist_len = 0;
765 	srp->data.bufflen = 0;
766 	srp->data.buffer = NULL;
767 	hp->duration = jiffies_to_msecs(jiffies);
768 /* Now send everything of to mid-level. The next time we hear about this
769    packet is when sg_cmd_done() is called (i.e. a callback). */
770 	scsi_do_req(SRpnt, (void *) cmnd,
771 		    (void *) SRpnt->sr_buffer, hp->dxfer_len,
772 		    sg_cmd_done, timeout, SG_DEFAULT_RETRIES);
773 	/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
774 	return 0;
775 }
776 
777 static int
778 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
779 {
780 	unsigned long iflags;
781 	int done;
782 
783 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
784 	done = srp->done;
785 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
786 	return done;
787 }
788 
789 static int
790 sg_ioctl(struct inode *inode, struct file *filp,
791 	 unsigned int cmd_in, unsigned long arg)
792 {
793 	void __user *p = (void __user *)arg;
794 	int __user *ip = p;
795 	int result, val, read_only;
796 	Sg_device *sdp;
797 	Sg_fd *sfp;
798 	Sg_request *srp;
799 	unsigned long iflags;
800 
801 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
802 		return -ENXIO;
803 	SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
804 				   sdp->disk->disk_name, (int) cmd_in));
805 	read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
806 
807 	switch (cmd_in) {
808 	case SG_IO:
809 		{
810 			int blocking = 1;	/* ignore O_NONBLOCK flag */
811 
812 			if (sdp->detached)
813 				return -ENODEV;
814 			if (!scsi_block_when_processing_errors(sdp->device))
815 				return -ENXIO;
816 			if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
817 				return -EFAULT;
818 			result =
819 			    sg_new_write(sfp, p, SZ_SG_IO_HDR,
820 					 blocking, read_only, &srp);
821 			if (result < 0)
822 				return result;
823 			srp->sg_io_owned = 1;
824 			while (1) {
825 				result = 0;	/* following macro to beat race condition */
826 				__wait_event_interruptible(sfp->read_wait,
827 					(sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
828 							   result);
829 				if (sdp->detached)
830 					return -ENODEV;
831 				if (sfp->closed)
832 					return 0;	/* request packet dropped already */
833 				if (0 == result)
834 					break;
835 				srp->orphan = 1;
836 				return result;	/* -ERESTARTSYS because signal hit process */
837 			}
838 			write_lock_irqsave(&sfp->rq_list_lock, iflags);
839 			srp->done = 2;
840 			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
841 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
842 			return (result < 0) ? result : 0;
843 		}
844 	case SG_SET_TIMEOUT:
845 		result = get_user(val, ip);
846 		if (result)
847 			return result;
848 		if (val < 0)
849 			return -EIO;
850 		if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
851 		    val = MULDIV (INT_MAX, USER_HZ, HZ);
852 		sfp->timeout_user = val;
853 		sfp->timeout = MULDIV (val, HZ, USER_HZ);
854 
855 		return 0;
856 	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */
857 				/* strange ..., for backward compatibility */
858 		return sfp->timeout_user;
859 	case SG_SET_FORCE_LOW_DMA:
860 		result = get_user(val, ip);
861 		if (result)
862 			return result;
863 		if (val) {
864 			sfp->low_dma = 1;
865 			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
866 				val = (int) sfp->reserve.bufflen;
867 				sg_remove_scat(&sfp->reserve);
868 				sg_build_reserve(sfp, val);
869 			}
870 		} else {
871 			if (sdp->detached)
872 				return -ENODEV;
873 			sfp->low_dma = sdp->device->host->unchecked_isa_dma;
874 		}
875 		return 0;
876 	case SG_GET_LOW_DMA:
877 		return put_user((int) sfp->low_dma, ip);
878 	case SG_GET_SCSI_ID:
879 		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
880 			return -EFAULT;
881 		else {
882 			sg_scsi_id_t __user *sg_idp = p;
883 
884 			if (sdp->detached)
885 				return -ENODEV;
886 			__put_user((int) sdp->device->host->host_no,
887 				   &sg_idp->host_no);
888 			__put_user((int) sdp->device->channel,
889 				   &sg_idp->channel);
890 			__put_user((int) sdp->device->id, &sg_idp->scsi_id);
891 			__put_user((int) sdp->device->lun, &sg_idp->lun);
892 			__put_user((int) sdp->device->type, &sg_idp->scsi_type);
893 			__put_user((short) sdp->device->host->cmd_per_lun,
894 				   &sg_idp->h_cmd_per_lun);
895 			__put_user((short) sdp->device->queue_depth,
896 				   &sg_idp->d_queue_depth);
897 			__put_user(0, &sg_idp->unused[0]);
898 			__put_user(0, &sg_idp->unused[1]);
899 			return 0;
900 		}
901 	case SG_SET_FORCE_PACK_ID:
902 		result = get_user(val, ip);
903 		if (result)
904 			return result;
905 		sfp->force_packid = val ? 1 : 0;
906 		return 0;
907 	case SG_GET_PACK_ID:
908 		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
909 			return -EFAULT;
910 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
911 		for (srp = sfp->headrp; srp; srp = srp->nextrp) {
912 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
913 				read_unlock_irqrestore(&sfp->rq_list_lock,
914 						       iflags);
915 				__put_user(srp->header.pack_id, ip);
916 				return 0;
917 			}
918 		}
919 		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
920 		__put_user(-1, ip);
921 		return 0;
922 	case SG_GET_NUM_WAITING:
923 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
924 		for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
925 			if ((1 == srp->done) && (!srp->sg_io_owned))
926 				++val;
927 		}
928 		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
929 		return put_user(val, ip);
930 	case SG_GET_SG_TABLESIZE:
931 		return put_user(sdp->sg_tablesize, ip);
932 	case SG_SET_RESERVED_SIZE:
933 		result = get_user(val, ip);
934 		if (result)
935 			return result;
936                 if (val < 0)
937                         return -EINVAL;
938 		if (val != sfp->reserve.bufflen) {
939 			if (sg_res_in_use(sfp) || sfp->mmap_called)
940 				return -EBUSY;
941 			sg_remove_scat(&sfp->reserve);
942 			sg_build_reserve(sfp, val);
943 		}
944 		return 0;
945 	case SG_GET_RESERVED_SIZE:
946 		val = (int) sfp->reserve.bufflen;
947 		return put_user(val, ip);
948 	case SG_SET_COMMAND_Q:
949 		result = get_user(val, ip);
950 		if (result)
951 			return result;
952 		sfp->cmd_q = val ? 1 : 0;
953 		return 0;
954 	case SG_GET_COMMAND_Q:
955 		return put_user((int) sfp->cmd_q, ip);
956 	case SG_SET_KEEP_ORPHAN:
957 		result = get_user(val, ip);
958 		if (result)
959 			return result;
960 		sfp->keep_orphan = val;
961 		return 0;
962 	case SG_GET_KEEP_ORPHAN:
963 		return put_user((int) sfp->keep_orphan, ip);
964 	case SG_NEXT_CMD_LEN:
965 		result = get_user(val, ip);
966 		if (result)
967 			return result;
968 		sfp->next_cmd_len = (val > 0) ? val : 0;
969 		return 0;
970 	case SG_GET_VERSION_NUM:
971 		return put_user(sg_version_num, ip);
972 	case SG_GET_ACCESS_COUNT:
973 		/* faked - we don't have a real access count anymore */
974 		val = (sdp->device ? 1 : 0);
975 		return put_user(val, ip);
976 	case SG_GET_REQUEST_TABLE:
977 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
978 			return -EFAULT;
979 		else {
980 			sg_req_info_t *rinfo;
981 			unsigned int ms;
982 
983 			rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
984 								GFP_KERNEL);
985 			if (!rinfo)
986 				return -ENOMEM;
987 			read_lock_irqsave(&sfp->rq_list_lock, iflags);
988 			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
989 			     ++val, srp = srp ? srp->nextrp : srp) {
990 				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
991 				if (srp) {
992 					rinfo[val].req_state = srp->done + 1;
993 					rinfo[val].problem =
994 					    srp->header.masked_status &
995 					    srp->header.host_status &
996 					    srp->header.driver_status;
997 					if (srp->done)
998 						rinfo[val].duration =
999 							srp->header.duration;
1000 					else {
1001 						ms = jiffies_to_msecs(jiffies);
1002 						rinfo[val].duration =
1003 						    (ms > srp->header.duration) ?
1004 						    (ms - srp->header.duration) : 0;
1005 					}
1006 					rinfo[val].orphan = srp->orphan;
1007 					rinfo[val].sg_io_owned =
1008 							srp->sg_io_owned;
1009 					rinfo[val].pack_id =
1010 							srp->header.pack_id;
1011 					rinfo[val].usr_ptr =
1012 							srp->header.usr_ptr;
1013 				}
1014 			}
1015 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1016 			result = __copy_to_user(p, rinfo,
1017 						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1018 			result = result ? -EFAULT : 0;
1019 			kfree(rinfo);
1020 			return result;
1021 		}
1022 	case SG_EMULATED_HOST:
1023 		if (sdp->detached)
1024 			return -ENODEV;
1025 		return put_user(sdp->device->host->hostt->emulated, ip);
1026 	case SG_SCSI_RESET:
1027 		if (sdp->detached)
1028 			return -ENODEV;
1029 		if (filp->f_flags & O_NONBLOCK) {
1030 			if (sdp->device->host->shost_state == SHOST_RECOVERY)
1031 				return -EBUSY;
1032 		} else if (!scsi_block_when_processing_errors(sdp->device))
1033 			return -EBUSY;
1034 		result = get_user(val, ip);
1035 		if (result)
1036 			return result;
1037 		if (SG_SCSI_RESET_NOTHING == val)
1038 			return 0;
1039 		switch (val) {
1040 		case SG_SCSI_RESET_DEVICE:
1041 			val = SCSI_TRY_RESET_DEVICE;
1042 			break;
1043 		case SG_SCSI_RESET_BUS:
1044 			val = SCSI_TRY_RESET_BUS;
1045 			break;
1046 		case SG_SCSI_RESET_HOST:
1047 			val = SCSI_TRY_RESET_HOST;
1048 			break;
1049 		default:
1050 			return -EINVAL;
1051 		}
1052 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1053 			return -EACCES;
1054 		return (scsi_reset_provider(sdp->device, val) ==
1055 			SUCCESS) ? 0 : -EIO;
1056 	case SCSI_IOCTL_SEND_COMMAND:
1057 		if (sdp->detached)
1058 			return -ENODEV;
1059 		if (read_only) {
1060 			unsigned char opcode = WRITE_6;
1061 			Scsi_Ioctl_Command __user *siocp = p;
1062 
1063 			if (copy_from_user(&opcode, siocp->data, 1))
1064 				return -EFAULT;
1065 			if (!sg_allow_access(opcode, sdp->device->type))
1066 				return -EPERM;
1067 		}
1068 		return scsi_ioctl_send_command(sdp->device, p);
1069 	case SG_SET_DEBUG:
1070 		result = get_user(val, ip);
1071 		if (result)
1072 			return result;
1073 		sdp->sgdebug = (char) val;
1074 		return 0;
1075 	case SCSI_IOCTL_GET_IDLUN:
1076 	case SCSI_IOCTL_GET_BUS_NUMBER:
1077 	case SCSI_IOCTL_PROBE_HOST:
1078 	case SG_GET_TRANSFORM:
1079 		if (sdp->detached)
1080 			return -ENODEV;
1081 		return scsi_ioctl(sdp->device, cmd_in, p);
1082 	default:
1083 		if (read_only)
1084 			return -EPERM;	/* don't know so take safe approach */
1085 		return scsi_ioctl(sdp->device, cmd_in, p);
1086 	}
1087 }
1088 
1089 #ifdef CONFIG_COMPAT
1090 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1091 {
1092 	Sg_device *sdp;
1093 	Sg_fd *sfp;
1094 	struct scsi_device *sdev;
1095 
1096 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1097 		return -ENXIO;
1098 
1099 	sdev = sdp->device;
1100 	if (sdev->host->hostt->compat_ioctl) {
1101 		int ret;
1102 
1103 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1104 
1105 		return ret;
1106 	}
1107 
1108 	return -ENOIOCTLCMD;
1109 }
1110 #endif
1111 
1112 static unsigned int
1113 sg_poll(struct file *filp, poll_table * wait)
1114 {
1115 	unsigned int res = 0;
1116 	Sg_device *sdp;
1117 	Sg_fd *sfp;
1118 	Sg_request *srp;
1119 	int count = 0;
1120 	unsigned long iflags;
1121 
1122 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1123 	    || sfp->closed)
1124 		return POLLERR;
1125 	poll_wait(filp, &sfp->read_wait, wait);
1126 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
1127 	for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1128 		/* if any read waiting, flag it */
1129 		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1130 			res = POLLIN | POLLRDNORM;
1131 		++count;
1132 	}
1133 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1134 
1135 	if (sdp->detached)
1136 		res |= POLLHUP;
1137 	else if (!sfp->cmd_q) {
1138 		if (0 == count)
1139 			res |= POLLOUT | POLLWRNORM;
1140 	} else if (count < SG_MAX_QUEUE)
1141 		res |= POLLOUT | POLLWRNORM;
1142 	SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1143 				   sdp->disk->disk_name, (int) res));
1144 	return res;
1145 }
1146 
1147 static int
1148 sg_fasync(int fd, struct file *filp, int mode)
1149 {
1150 	int retval;
1151 	Sg_device *sdp;
1152 	Sg_fd *sfp;
1153 
1154 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1155 		return -ENXIO;
1156 	SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1157 				   sdp->disk->disk_name, mode));
1158 
1159 	retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1160 	return (retval < 0) ? retval : 0;
1161 }
1162 
1163 static inline unsigned char *
1164 sg_scatg2virt(const struct scatterlist *sclp)
1165 {
1166 	return (sclp && sclp->page) ?
1167 	    (unsigned char *) page_address(sclp->page) + sclp->offset : NULL;
1168 }
1169 
1170 /* When startFinish==1 increments page counts for pages other than the
1171    first of scatter gather elements obtained from __get_free_pages().
1172    When startFinish==0 decrements ... */
1173 static void
1174 sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
1175 {
1176 	void *page_ptr;
1177 	struct page *page;
1178 	int k, m;
1179 
1180 	SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
1181 				   startFinish, rsv_schp->k_use_sg));
1182 	/* N.B. correction _not_ applied to base page of each allocation */
1183 	if (rsv_schp->k_use_sg) {	/* reserve buffer is a scatter gather list */
1184 		struct scatterlist *sclp = rsv_schp->buffer;
1185 
1186 		for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
1187 			for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) {
1188 				page_ptr = sg_scatg2virt(sclp) + m;
1189 				page = virt_to_page(page_ptr);
1190 				if (startFinish)
1191 					get_page(page);
1192 				else {
1193 					if (page_count(page) > 0)
1194 						__put_page(page);
1195 				}
1196 			}
1197 		}
1198 	} else {		/* reserve buffer is just a single allocation */
1199 		for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) {
1200 			page_ptr = (unsigned char *) rsv_schp->buffer + m;
1201 			page = virt_to_page(page_ptr);
1202 			if (startFinish)
1203 				get_page(page);
1204 			else {
1205 				if (page_count(page) > 0)
1206 					__put_page(page);
1207 			}
1208 		}
1209 	}
1210 }
1211 
1212 static struct page *
1213 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1214 {
1215 	Sg_fd *sfp;
1216 	struct page *page = NOPAGE_SIGBUS;
1217 	void *page_ptr = NULL;
1218 	unsigned long offset;
1219 	Sg_scatter_hold *rsv_schp;
1220 
1221 	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1222 		return page;
1223 	rsv_schp = &sfp->reserve;
1224 	offset = addr - vma->vm_start;
1225 	if (offset >= rsv_schp->bufflen)
1226 		return page;
1227 	SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1228 				   offset, rsv_schp->k_use_sg));
1229 	if (rsv_schp->k_use_sg) {	/* reserve buffer is a scatter gather list */
1230 		int k;
1231 		unsigned long sa = vma->vm_start;
1232 		unsigned long len;
1233 		struct scatterlist *sclp = rsv_schp->buffer;
1234 
1235 		for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1236 		     ++k, ++sclp) {
1237 			len = vma->vm_end - sa;
1238 			len = (len < sclp->length) ? len : sclp->length;
1239 			if (offset < len) {
1240 				page_ptr = sg_scatg2virt(sclp) + offset;
1241 				page = virt_to_page(page_ptr);
1242 				get_page(page);	/* increment page count */
1243 				break;
1244 			}
1245 			sa += len;
1246 			offset -= len;
1247 		}
1248 	} else {		/* reserve buffer is just a single allocation */
1249 		page_ptr = (unsigned char *) rsv_schp->buffer + offset;
1250 		page = virt_to_page(page_ptr);
1251 		get_page(page);	/* increment page count */
1252 	}
1253 	if (type)
1254 		*type = VM_FAULT_MINOR;
1255 	return page;
1256 }
1257 
1258 static struct vm_operations_struct sg_mmap_vm_ops = {
1259 	.nopage = sg_vma_nopage,
1260 };
1261 
1262 static int
1263 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1264 {
1265 	Sg_fd *sfp;
1266 	unsigned long req_sz;
1267 	Sg_scatter_hold *rsv_schp;
1268 
1269 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1270 		return -ENXIO;
1271 	req_sz = vma->vm_end - vma->vm_start;
1272 	SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1273 				   (void *) vma->vm_start, (int) req_sz));
1274 	if (vma->vm_pgoff)
1275 		return -EINVAL;	/* want no offset */
1276 	rsv_schp = &sfp->reserve;
1277 	if (req_sz > rsv_schp->bufflen)
1278 		return -ENOMEM;	/* cannot map more than reserved buffer */
1279 
1280 	if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1281 		int k;
1282 		unsigned long sa = vma->vm_start;
1283 		unsigned long len;
1284 		struct scatterlist *sclp = rsv_schp->buffer;
1285 
1286 		for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1287 		     ++k, ++sclp) {
1288 			if (0 != sclp->offset)
1289 				return -EFAULT;	/* non page aligned memory ?? */
1290 			len = vma->vm_end - sa;
1291 			len = (len < sclp->length) ? len : sclp->length;
1292 			sa += len;
1293 		}
1294 	} else {	/* reserve buffer is just a single allocation */
1295 		if ((unsigned long) rsv_schp->buffer & (PAGE_SIZE - 1))
1296 			return -EFAULT;	/* non page aligned memory ?? */
1297 	}
1298 	if (0 == sfp->mmap_called) {
1299 		sg_rb_correct4mmap(rsv_schp, 1);	/* do only once per fd lifetime */
1300 		sfp->mmap_called = 1;
1301 	}
1302 	vma->vm_flags |= (VM_RESERVED | VM_IO);
1303 	vma->vm_private_data = sfp;
1304 	vma->vm_ops = &sg_mmap_vm_ops;
1305 	return 0;
1306 }
1307 
1308 /* This function is a "bottom half" handler that is called by the
1309  * mid level when a command is completed (or has failed). */
1310 static void
1311 sg_cmd_done(Scsi_Cmnd * SCpnt)
1312 {
1313 	Scsi_Request *SRpnt = NULL;
1314 	Sg_device *sdp = NULL;
1315 	Sg_fd *sfp;
1316 	Sg_request *srp = NULL;
1317 	unsigned long iflags;
1318 	unsigned int ms;
1319 
1320 	if (SCpnt && (SRpnt = SCpnt->sc_request))
1321 		srp = (Sg_request *) SRpnt->upper_private_data;
1322 	if (NULL == srp) {
1323 		printk(KERN_ERR "sg_cmd_done: NULL request\n");
1324 		if (SRpnt)
1325 			scsi_release_request(SRpnt);
1326 		return;
1327 	}
1328 	sfp = srp->parentfp;
1329 	if (sfp)
1330 		sdp = sfp->parentdp;
1331 	if ((NULL == sdp) || sdp->detached) {
1332 		printk(KERN_INFO "sg_cmd_done: device detached\n");
1333 		scsi_release_request(SRpnt);
1334 		return;
1335 	}
1336 
1337 	/* First transfer ownership of data buffers to sg_device object. */
1338 	srp->data.k_use_sg = SRpnt->sr_use_sg;
1339 	srp->data.sglist_len = SRpnt->sr_sglist_len;
1340 	srp->data.bufflen = SRpnt->sr_bufflen;
1341 	srp->data.buffer = SRpnt->sr_buffer;
1342 	/* now clear out request structure */
1343 	SRpnt->sr_use_sg = 0;
1344 	SRpnt->sr_sglist_len = 0;
1345 	SRpnt->sr_bufflen = 0;
1346 	SRpnt->sr_buffer = NULL;
1347 	SRpnt->sr_underflow = 0;
1348 	SRpnt->sr_request->rq_disk = NULL; /* "sg" _disowns_ request blk */
1349 
1350 	srp->my_cmdp = NULL;
1351 
1352 	SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1353 		sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result));
1354 	srp->header.resid = SCpnt->resid;
1355 	ms = jiffies_to_msecs(jiffies);
1356 	srp->header.duration = (ms > srp->header.duration) ?
1357 				(ms - srp->header.duration) : 0;
1358 	if (0 != SRpnt->sr_result) {
1359 		struct scsi_sense_hdr sshdr;
1360 
1361 		memcpy(srp->sense_b, SRpnt->sr_sense_buffer,
1362 		       sizeof (srp->sense_b));
1363 		srp->header.status = 0xff & SRpnt->sr_result;
1364 		srp->header.masked_status = status_byte(SRpnt->sr_result);
1365 		srp->header.msg_status = msg_byte(SRpnt->sr_result);
1366 		srp->header.host_status = host_byte(SRpnt->sr_result);
1367 		srp->header.driver_status = driver_byte(SRpnt->sr_result);
1368 		if ((sdp->sgdebug > 0) &&
1369 		    ((CHECK_CONDITION == srp->header.masked_status) ||
1370 		     (COMMAND_TERMINATED == srp->header.masked_status)))
1371 			scsi_print_req_sense("sg_cmd_done", SRpnt);
1372 
1373 		/* Following if statement is a patch supplied by Eric Youngdale */
1374 		if (driver_byte(SRpnt->sr_result) != 0
1375 		    && scsi_command_normalize_sense(SCpnt, &sshdr)
1376 		    && !scsi_sense_is_deferred(&sshdr)
1377 		    && sshdr.sense_key == UNIT_ATTENTION
1378 		    && sdp->device->removable) {
1379 			/* Detected possible disc change. Set the bit - this */
1380 			/* may be used if there are filesystems using this device */
1381 			sdp->device->changed = 1;
1382 		}
1383 	}
1384 	/* Rely on write phase to clean out srp status values, so no "else" */
1385 
1386 	scsi_release_request(SRpnt);
1387 	SRpnt = NULL;
1388 	if (sfp->closed) {	/* whoops this fd already released, cleanup */
1389 		SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1390 		sg_finish_rem_req(srp);
1391 		srp = NULL;
1392 		if (NULL == sfp->headrp) {
1393 			SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
1394 			if (0 == sg_remove_sfp(sdp, sfp)) {	/* device still present */
1395 				scsi_device_put(sdp->device);
1396 			}
1397 			sfp = NULL;
1398 		}
1399 	} else if (srp && srp->orphan) {
1400 		if (sfp->keep_orphan)
1401 			srp->sg_io_owned = 0;
1402 		else {
1403 			sg_finish_rem_req(srp);
1404 			srp = NULL;
1405 		}
1406 	}
1407 	if (sfp && srp) {
1408 		/* Now wake up any sg_read() that is waiting for this packet. */
1409 		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1410 		write_lock_irqsave(&sfp->rq_list_lock, iflags);
1411 		srp->done = 1;
1412 		wake_up_interruptible(&sfp->read_wait);
1413 		write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1414 	}
1415 }
1416 
1417 static struct file_operations sg_fops = {
1418 	.owner = THIS_MODULE,
1419 	.read = sg_read,
1420 	.write = sg_write,
1421 	.poll = sg_poll,
1422 	.ioctl = sg_ioctl,
1423 #ifdef CONFIG_COMPAT
1424 	.compat_ioctl = sg_compat_ioctl,
1425 #endif
1426 	.open = sg_open,
1427 	.mmap = sg_mmap,
1428 	.release = sg_release,
1429 	.fasync = sg_fasync,
1430 };
1431 
1432 static struct class *sg_sysfs_class;
1433 
1434 static int sg_sysfs_valid = 0;
1435 
1436 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1437 {
1438 	Sg_device *sdp;
1439 	unsigned long iflags;
1440 	void *old_sg_dev_arr = NULL;
1441 	int k, error;
1442 
1443 	sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);
1444 	if (!sdp) {
1445 		printk(KERN_WARNING "kmalloc Sg_device failure\n");
1446 		return -ENOMEM;
1447 	}
1448 
1449 	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1450 	if (unlikely(sg_nr_dev >= sg_dev_max)) {	/* try to resize */
1451 		Sg_device **tmp_da;
1452 		int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
1453 		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1454 
1455 		tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
1456 		if (unlikely(!tmp_da))
1457 			goto expand_failed;
1458 
1459 		write_lock_irqsave(&sg_dev_arr_lock, iflags);
1460 		memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
1461 		memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
1462 		old_sg_dev_arr = sg_dev_arr;
1463 		sg_dev_arr = tmp_da;
1464 		sg_dev_max = tmp_dev_max;
1465 	}
1466 
1467 	for (k = 0; k < sg_dev_max; k++)
1468 		if (!sg_dev_arr[k])
1469 			break;
1470 	if (unlikely(k >= SG_MAX_DEVS))
1471 		goto overflow;
1472 
1473 	memset(sdp, 0, sizeof(*sdp));
1474 	SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1475 	sprintf(disk->disk_name, "sg%d", k);
1476 	disk->first_minor = k;
1477 	sdp->disk = disk;
1478 	sdp->device = scsidp;
1479 	init_waitqueue_head(&sdp->o_excl_wait);
1480 	sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;
1481 
1482 	sg_nr_dev++;
1483 	sg_dev_arr[k] = sdp;
1484 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1485 	error = k;
1486 
1487  out:
1488 	if (error < 0)
1489 		kfree(sdp);
1490 	kfree(old_sg_dev_arr);
1491 	return error;
1492 
1493  expand_failed:
1494 	printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
1495 	error = -ENOMEM;
1496 	goto out;
1497 
1498  overflow:
1499 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1500 	printk(KERN_WARNING
1501 	       "Unable to attach sg device <%d, %d, %d, %d> type=%d, minor "
1502 	       "number exceeds %d\n", scsidp->host->host_no, scsidp->channel,
1503 	       scsidp->id, scsidp->lun, scsidp->type, SG_MAX_DEVS - 1);
1504 	error = -ENODEV;
1505 	goto out;
1506 }
1507 
1508 static int
1509 sg_add(struct class_device *cl_dev)
1510 {
1511 	struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1512 	struct gendisk *disk;
1513 	Sg_device *sdp = NULL;
1514 	struct cdev * cdev = NULL;
1515 	int error, k;
1516 
1517 	disk = alloc_disk(1);
1518 	if (!disk) {
1519 		printk(KERN_WARNING "alloc_disk failed\n");
1520 		return -ENOMEM;
1521 	}
1522 	disk->major = SCSI_GENERIC_MAJOR;
1523 
1524 	error = -ENOMEM;
1525 	cdev = cdev_alloc();
1526 	if (!cdev) {
1527 		printk(KERN_WARNING "cdev_alloc failed\n");
1528 		goto out;
1529 	}
1530 	cdev->owner = THIS_MODULE;
1531 	cdev->ops = &sg_fops;
1532 
1533 	error = sg_alloc(disk, scsidp);
1534 	if (error < 0) {
1535 		printk(KERN_WARNING "sg_alloc failed\n");
1536 		goto out;
1537 	}
1538 	k = error;
1539 	sdp = sg_dev_arr[k];
1540 
1541 	devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
1542 			S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
1543 			"%s/generic", scsidp->devfs_name);
1544 	error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1545 	if (error) {
1546 		devfs_remove("%s/generic", scsidp->devfs_name);
1547 		goto out;
1548 	}
1549 	sdp->cdev = cdev;
1550 	if (sg_sysfs_valid) {
1551 		struct class_device * sg_class_member;
1552 
1553 		sg_class_member = class_device_create(sg_sysfs_class,
1554 				MKDEV(SCSI_GENERIC_MAJOR, k),
1555 				cl_dev->dev, "%s",
1556 				disk->disk_name);
1557 		if (IS_ERR(sg_class_member))
1558 			printk(KERN_WARNING "sg_add: "
1559 				"class_device_create failed\n");
1560 		class_set_devdata(sg_class_member, sdp);
1561 		error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1562 					  &sg_class_member->kobj, "generic");
1563 		if (error)
1564 			printk(KERN_ERR "sg_add: unable to make symlink "
1565 					"'generic' back to sg%d\n", k);
1566 	} else
1567 		printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
1568 
1569 	printk(KERN_NOTICE
1570 	       "Attached scsi generic sg%d at scsi%d, channel"
1571 	       " %d, id %d, lun %d,  type %d\n", k,
1572 	       scsidp->host->host_no, scsidp->channel, scsidp->id,
1573 	       scsidp->lun, scsidp->type);
1574 
1575 	return 0;
1576 
1577 out:
1578 	put_disk(disk);
1579 	if (cdev)
1580 		cdev_del(cdev);
1581 	return error;
1582 }
1583 
1584 static void
1585 sg_remove(struct class_device *cl_dev)
1586 {
1587 	struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1588 	Sg_device *sdp = NULL;
1589 	unsigned long iflags;
1590 	Sg_fd *sfp;
1591 	Sg_fd *tsfp;
1592 	Sg_request *srp;
1593 	Sg_request *tsrp;
1594 	int k, delay;
1595 
1596 	if (NULL == sg_dev_arr)
1597 		return;
1598 	delay = 0;
1599 	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1600 	for (k = 0; k < sg_dev_max; k++) {
1601 		sdp = sg_dev_arr[k];
1602 		if ((NULL == sdp) || (sdp->device != scsidp))
1603 			continue;	/* dirty but lowers nesting */
1604 		if (sdp->headfp) {
1605 			sdp->detached = 1;
1606 			for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1607 				tsfp = sfp->nextfp;
1608 				for (srp = sfp->headrp; srp; srp = tsrp) {
1609 					tsrp = srp->nextrp;
1610 					if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1611 						sg_finish_rem_req(srp);
1612 				}
1613 				if (sfp->closed) {
1614 					scsi_device_put(sdp->device);
1615 					__sg_remove_sfp(sdp, sfp);
1616 				} else {
1617 					delay = 1;
1618 					wake_up_interruptible(&sfp->read_wait);
1619 					kill_fasync(&sfp->async_qp, SIGPOLL,
1620 						    POLL_HUP);
1621 				}
1622 			}
1623 			SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
1624 			if (NULL == sdp->headfp) {
1625 				sg_dev_arr[k] = NULL;
1626 			}
1627 		} else {	/* nothing active, simple case */
1628 			SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1629 			sg_dev_arr[k] = NULL;
1630 		}
1631 		sg_nr_dev--;
1632 		break;
1633 	}
1634 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1635 
1636 	if (sdp) {
1637 		sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1638 		class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
1639 		cdev_del(sdp->cdev);
1640 		sdp->cdev = NULL;
1641 		devfs_remove("%s/generic", scsidp->devfs_name);
1642 		put_disk(sdp->disk);
1643 		sdp->disk = NULL;
1644 		if (NULL == sdp->headfp)
1645 			kfree((char *) sdp);
1646 	}
1647 
1648 	if (delay)
1649 		msleep(10);	/* dirty detach so delay device destruction */
1650 }
1651 
1652 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
1653  * of sysfs parameters (which module_param doesn't yet support).
1654  * Sysfs parameters defined explicitly below.
1655  */
1656 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
1657 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1658 
1659 MODULE_AUTHOR("Douglas Gilbert");
1660 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1661 MODULE_LICENSE("GPL");
1662 MODULE_VERSION(SG_VERSION_STR);
1663 
1664 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1665 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1666 
1667 static int __init
1668 init_sg(void)
1669 {
1670 	int rc;
1671 
1672 	if (def_reserved_size >= 0)
1673 		sg_big_buff = def_reserved_size;
1674 
1675 	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1676 				    SG_MAX_DEVS, "sg");
1677 	if (rc)
1678 		return rc;
1679         sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1680         if ( IS_ERR(sg_sysfs_class) ) {
1681 		rc = PTR_ERR(sg_sysfs_class);
1682 		goto err_out;
1683         }
1684 	sg_sysfs_valid = 1;
1685 	rc = scsi_register_interface(&sg_interface);
1686 	if (0 == rc) {
1687 #ifdef CONFIG_SCSI_PROC_FS
1688 		sg_proc_init();
1689 #endif				/* CONFIG_SCSI_PROC_FS */
1690 		return 0;
1691 	}
1692 	class_destroy(sg_sysfs_class);
1693 err_out:
1694 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1695 	return rc;
1696 }
1697 
1698 static void __exit
1699 exit_sg(void)
1700 {
1701 #ifdef CONFIG_SCSI_PROC_FS
1702 	sg_proc_cleanup();
1703 #endif				/* CONFIG_SCSI_PROC_FS */
1704 	scsi_unregister_interface(&sg_interface);
1705 	class_destroy(sg_sysfs_class);
1706 	sg_sysfs_valid = 0;
1707 	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1708 				 SG_MAX_DEVS);
1709 	if (sg_dev_arr != NULL) {
1710 		kfree((char *) sg_dev_arr);
1711 		sg_dev_arr = NULL;
1712 	}
1713 	sg_dev_max = 0;
1714 }
1715 
1716 static int
1717 sg_start_req(Sg_request * srp)
1718 {
1719 	int res;
1720 	Sg_fd *sfp = srp->parentfp;
1721 	sg_io_hdr_t *hp = &srp->header;
1722 	int dxfer_len = (int) hp->dxfer_len;
1723 	int dxfer_dir = hp->dxfer_direction;
1724 	Sg_scatter_hold *req_schp = &srp->data;
1725 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
1726 
1727 	SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1728 	if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1729 		return 0;
1730 	if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1731 	    (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1732 	    (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1733 		res = sg_build_direct(srp, sfp, dxfer_len);
1734 		if (res <= 0)	/* -ve -> error, 0 -> done, 1 -> try indirect */
1735 			return res;
1736 	}
1737 	if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1738 		sg_link_reserve(sfp, srp, dxfer_len);
1739 	else {
1740 		res = sg_build_indirect(req_schp, sfp, dxfer_len);
1741 		if (res) {
1742 			sg_remove_scat(req_schp);
1743 			return res;
1744 		}
1745 	}
1746 	return 0;
1747 }
1748 
1749 static void
1750 sg_finish_rem_req(Sg_request * srp)
1751 {
1752 	Sg_fd *sfp = srp->parentfp;
1753 	Sg_scatter_hold *req_schp = &srp->data;
1754 
1755 	SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1756 	if (srp->res_used)
1757 		sg_unlink_reserve(sfp, srp);
1758 	else
1759 		sg_remove_scat(req_schp);
1760 	sg_remove_request(sfp, srp);
1761 }
1762 
1763 static int
1764 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1765 {
1766 	int ret_sz;
1767 	int elem_sz = sizeof (struct scatterlist);
1768 	int sg_bufflen = tablesize * elem_sz;
1769 	int mx_sc_elems = tablesize;
1770 
1771 	schp->buffer = sg_page_malloc(sg_bufflen, sfp->low_dma, &ret_sz);
1772 	if (!schp->buffer)
1773 		return -ENOMEM;
1774 	else if (ret_sz != sg_bufflen) {
1775 		sg_bufflen = ret_sz;
1776 		mx_sc_elems = sg_bufflen / elem_sz;
1777 	}
1778 	schp->sglist_len = sg_bufflen;
1779 	memset(schp->buffer, 0, sg_bufflen);
1780 	return mx_sc_elems;	/* number of scat_gath elements allocated */
1781 }
1782 
1783 #ifdef SG_ALLOW_DIO_CODE
1784 /* vvvvvvvv  following code borrowed from st driver's direct IO vvvvvvvvv */
1785 	/* hopefully this generic code will moved to a library */
1786 
1787 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1788    - mapping of all pages not successful
1789    - any page is above max_pfn
1790    (i.e., either completely successful or fails)
1791 */
1792 static int
1793 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1794 	          unsigned long uaddr, size_t count, int rw,
1795 	          unsigned long max_pfn)
1796 {
1797 	unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1798 	unsigned long start = uaddr >> PAGE_SHIFT;
1799 	const int nr_pages = end - start;
1800 	int res, i, j;
1801 	struct page **pages;
1802 
1803 	/* User attempted Overflow! */
1804 	if ((uaddr + count) < uaddr)
1805 		return -EINVAL;
1806 
1807 	/* Too big */
1808         if (nr_pages > max_pages)
1809 		return -ENOMEM;
1810 
1811 	/* Hmm? */
1812 	if (count == 0)
1813 		return 0;
1814 
1815 	if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1816 		return -ENOMEM;
1817 
1818         /* Try to fault in all of the necessary pages */
1819 	down_read(&current->mm->mmap_sem);
1820         /* rw==READ means read from drive, write into memory area */
1821 	res = get_user_pages(
1822 		current,
1823 		current->mm,
1824 		uaddr,
1825 		nr_pages,
1826 		rw == READ,
1827 		0, /* don't force */
1828 		pages,
1829 		NULL);
1830 	up_read(&current->mm->mmap_sem);
1831 
1832 	/* Errors and no page mapped should return here */
1833 	if (res < nr_pages)
1834 		goto out_unmap;
1835 
1836         for (i=0; i < nr_pages; i++) {
1837                 /* FIXME: flush superflous for rw==READ,
1838                  * probably wrong function for rw==WRITE
1839                  */
1840 		flush_dcache_page(pages[i]);
1841 		if (page_to_pfn(pages[i]) > max_pfn)
1842 			goto out_unlock;
1843 		/* ?? Is locking needed? I don't think so */
1844 		/* if (TestSetPageLocked(pages[i]))
1845 		   goto out_unlock; */
1846         }
1847 
1848 	/* Populate the scatter/gather list */
1849 	sgl[0].page = pages[0];
1850 	sgl[0].offset = uaddr & ~PAGE_MASK;
1851 	if (nr_pages > 1) {
1852 		sgl[0].length = PAGE_SIZE - sgl[0].offset;
1853 		count -= sgl[0].length;
1854 		for (i=1; i < nr_pages ; i++) {
1855 			sgl[i].offset = 0;
1856 			sgl[i].page = pages[i];
1857 			sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1858 			count -= PAGE_SIZE;
1859 		}
1860 	}
1861 	else {
1862 		sgl[0].length = count;
1863 	}
1864 
1865 	kfree(pages);
1866 	return nr_pages;
1867 
1868  out_unlock:
1869 	/* for (j=0; j < i; j++)
1870 	   unlock_page(pages[j]); */
1871 	res = 0;
1872  out_unmap:
1873 	if (res > 0)
1874 		for (j=0; j < res; j++)
1875 			page_cache_release(pages[j]);
1876 	kfree(pages);
1877 	return res;
1878 }
1879 
1880 
1881 /* And unmap them... */
1882 static int
1883 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1884 		    int dirtied)
1885 {
1886 	int i;
1887 
1888 	for (i=0; i < nr_pages; i++) {
1889 		if (dirtied && !PageReserved(sgl[i].page))
1890 			SetPageDirty(sgl[i].page);
1891 		/* unlock_page(sgl[i].page); */
1892 		/* FIXME: cache flush missing for rw==READ
1893 		 * FIXME: call the correct reference counting function
1894 		 */
1895 		page_cache_release(sgl[i].page);
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 /* ^^^^^^^^  above code borrowed from st driver's direct IO ^^^^^^^^^ */
1902 #endif
1903 
1904 
1905 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1906 static int
1907 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1908 {
1909 #ifdef SG_ALLOW_DIO_CODE
1910 	sg_io_hdr_t *hp = &srp->header;
1911 	Sg_scatter_hold *schp = &srp->data;
1912 	int sg_tablesize = sfp->parentdp->sg_tablesize;
1913 	struct scatterlist *sgl;
1914 	int mx_sc_elems, res;
1915 	struct scsi_device *sdev = sfp->parentdp->device;
1916 
1917 	if (((unsigned long)hp->dxferp &
1918 			queue_dma_alignment(sdev->request_queue)) != 0)
1919 		return 1;
1920 	mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1921         if (mx_sc_elems <= 0) {
1922                 return 1;
1923         }
1924 	sgl = (struct scatterlist *)schp->buffer;
1925 	res = st_map_user_pages(sgl, mx_sc_elems, (unsigned long)hp->dxferp, dxfer_len,
1926 				(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0, ULONG_MAX);
1927 	if (res <= 0)
1928 		return 1;
1929 	schp->k_use_sg = res;
1930 	schp->dio_in_use = 1;
1931 	hp->info |= SG_INFO_DIRECT_IO;
1932 	return 0;
1933 #else
1934 	return 1;
1935 #endif
1936 }
1937 
1938 static int
1939 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1940 {
1941 	int ret_sz;
1942 	int blk_size = buff_size;
1943 	unsigned char *p = NULL;
1944 
1945 	if ((blk_size < 0) || (!sfp))
1946 		return -EFAULT;
1947 	if (0 == blk_size)
1948 		++blk_size;	/* don't know why */
1949 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1950 	blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1951 	SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1952 				   buff_size, blk_size));
1953 	if (blk_size <= SG_SCATTER_SZ) {
1954 		p = sg_page_malloc(blk_size, sfp->low_dma, &ret_sz);
1955 		if (!p)
1956 			return -ENOMEM;
1957 		if (blk_size == ret_sz) {	/* got it on the first attempt */
1958 			schp->k_use_sg = 0;
1959 			schp->buffer = p;
1960 			schp->bufflen = blk_size;
1961 			schp->b_malloc_len = blk_size;
1962 			return 0;
1963 		}
1964 	} else {
1965 		p = sg_page_malloc(SG_SCATTER_SZ, sfp->low_dma, &ret_sz);
1966 		if (!p)
1967 			return -ENOMEM;
1968 	}
1969 /* Want some local declarations, so start new block ... */
1970 	{			/* lets try and build a scatter gather list */
1971 		struct scatterlist *sclp;
1972 		int k, rem_sz, num;
1973 		int mx_sc_elems;
1974 		int sg_tablesize = sfp->parentdp->sg_tablesize;
1975 		int first = 1;
1976 
1977 		/* N.B. ret_sz carried into this block ... */
1978 		mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1979 		if (mx_sc_elems < 0)
1980 			return mx_sc_elems;	/* most likely -ENOMEM */
1981 
1982 		for (k = 0, sclp = schp->buffer, rem_sz = blk_size;
1983 		     (rem_sz > 0) && (k < mx_sc_elems);
1984 		     ++k, rem_sz -= ret_sz, ++sclp) {
1985 			if (first)
1986 				first = 0;
1987 			else {
1988 				num =
1989 				    (rem_sz >
1990 				     SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1991 				p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1992 				if (!p)
1993 					break;
1994 			}
1995 			sclp->page = virt_to_page(p);
1996 			sclp->offset = offset_in_page(p);
1997 			sclp->length = ret_sz;
1998 
1999 			SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
2000 					  k, sg_scatg2virt(sclp), ret_sz));
2001 		}		/* end of for loop */
2002 		schp->k_use_sg = k;
2003 		SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
2004 		schp->bufflen = blk_size;
2005 		if (rem_sz > 0)	/* must have failed */
2006 			return -ENOMEM;
2007 	}
2008 	return 0;
2009 }
2010 
2011 static int
2012 sg_write_xfer(Sg_request * srp)
2013 {
2014 	sg_io_hdr_t *hp = &srp->header;
2015 	Sg_scatter_hold *schp = &srp->data;
2016 	int num_xfer = 0;
2017 	int j, k, onum, usglen, ksglen, res;
2018 	int iovec_count = (int) hp->iovec_count;
2019 	int dxfer_dir = hp->dxfer_direction;
2020 	unsigned char *p;
2021 	unsigned char __user *up;
2022 	int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2023 
2024 	if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
2025 	    (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2026 		num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
2027 		if (schp->bufflen < num_xfer)
2028 			num_xfer = schp->bufflen;
2029 	}
2030 	if ((num_xfer <= 0) || (schp->dio_in_use) ||
2031 	    (new_interface
2032 	     && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2033 		return 0;
2034 
2035 	SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2036 			  num_xfer, iovec_count, schp->k_use_sg));
2037 	if (iovec_count) {
2038 		onum = iovec_count;
2039 		if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2040 			return -EFAULT;
2041 	} else
2042 		onum = 1;
2043 
2044 	if (0 == schp->k_use_sg) {	/* kernel has single buffer */
2045 		for (j = 0, p = schp->buffer; j < onum; ++j) {
2046 			res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
2047 			if (res)
2048 				return res;
2049 			usglen = (num_xfer > usglen) ? usglen : num_xfer;
2050 			if (__copy_from_user(p, up, usglen))
2051 				return -EFAULT;
2052 			p += usglen;
2053 			num_xfer -= usglen;
2054 			if (num_xfer <= 0)
2055 				return 0;
2056 		}
2057 	} else {		/* kernel using scatter gather list */
2058 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2059 
2060 		ksglen = (int) sclp->length;
2061 		p = sg_scatg2virt(sclp);
2062 		for (j = 0, k = 0; j < onum; ++j) {
2063 			res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
2064 			if (res)
2065 				return res;
2066 
2067 			for (; p; ++sclp, ksglen = (int) sclp->length,
2068 				  p = sg_scatg2virt(sclp)) {
2069 				if (usglen <= 0)
2070 					break;
2071 				if (ksglen > usglen) {
2072 					if (usglen >= num_xfer) {
2073 						if (__copy_from_user
2074 						    (p, up, num_xfer))
2075 							return -EFAULT;
2076 						return 0;
2077 					}
2078 					if (__copy_from_user(p, up, usglen))
2079 						return -EFAULT;
2080 					p += usglen;
2081 					ksglen -= usglen;
2082 					break;
2083 				} else {
2084 					if (ksglen >= num_xfer) {
2085 						if (__copy_from_user
2086 						    (p, up, num_xfer))
2087 							return -EFAULT;
2088 						return 0;
2089 					}
2090 					if (__copy_from_user(p, up, ksglen))
2091 						return -EFAULT;
2092 					up += ksglen;
2093 					usglen -= ksglen;
2094 				}
2095 				++k;
2096 				if (k >= schp->k_use_sg)
2097 					return 0;
2098 			}
2099 		}
2100 	}
2101 	return 0;
2102 }
2103 
2104 static int
2105 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
2106 	   int wr_xf, int *countp, unsigned char __user **up)
2107 {
2108 	int num_xfer = (int) hp->dxfer_len;
2109 	unsigned char __user *p = hp->dxferp;
2110 	int count;
2111 
2112 	if (0 == sg_num) {
2113 		if (wr_xf && ('\0' == hp->interface_id))
2114 			count = (int) hp->flags;	/* holds "old" input_size */
2115 		else
2116 			count = num_xfer;
2117 	} else {
2118 		sg_iovec_t iovec;
2119 		if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2120 			return -EFAULT;
2121 		p = iovec.iov_base;
2122 		count = (int) iovec.iov_len;
2123 	}
2124 	if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2125 		return -EFAULT;
2126 	if (up)
2127 		*up = p;
2128 	if (countp)
2129 		*countp = count;
2130 	return 0;
2131 }
2132 
2133 static void
2134 sg_remove_scat(Sg_scatter_hold * schp)
2135 {
2136 	SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2137 	if (schp->buffer && (schp->sglist_len > 0)) {
2138 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2139 
2140 		if (schp->dio_in_use) {
2141 #ifdef SG_ALLOW_DIO_CODE
2142 			st_unmap_user_pages(sclp, schp->k_use_sg, TRUE);
2143 #endif
2144 		} else {
2145 			int k;
2146 
2147 			for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp);
2148 			     ++k, ++sclp) {
2149 				SCSI_LOG_TIMEOUT(5, printk(
2150 				    "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
2151 				    k, sg_scatg2virt(sclp), sclp->length));
2152 				sg_page_free(sg_scatg2virt(sclp), sclp->length);
2153 				sclp->page = NULL;
2154 				sclp->offset = 0;
2155 				sclp->length = 0;
2156 			}
2157 		}
2158 		sg_page_free(schp->buffer, schp->sglist_len);
2159 	} else if (schp->buffer)
2160 		sg_page_free(schp->buffer, schp->b_malloc_len);
2161 	memset(schp, 0, sizeof (*schp));
2162 }
2163 
2164 static int
2165 sg_read_xfer(Sg_request * srp)
2166 {
2167 	sg_io_hdr_t *hp = &srp->header;
2168 	Sg_scatter_hold *schp = &srp->data;
2169 	int num_xfer = 0;
2170 	int j, k, onum, usglen, ksglen, res;
2171 	int iovec_count = (int) hp->iovec_count;
2172 	int dxfer_dir = hp->dxfer_direction;
2173 	unsigned char *p;
2174 	unsigned char __user *up;
2175 	int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2176 
2177 	if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2178 	    || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2179 		num_xfer = hp->dxfer_len;
2180 		if (schp->bufflen < num_xfer)
2181 			num_xfer = schp->bufflen;
2182 	}
2183 	if ((num_xfer <= 0) || (schp->dio_in_use) ||
2184 	    (new_interface
2185 	     && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2186 		return 0;
2187 
2188 	SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2189 			  num_xfer, iovec_count, schp->k_use_sg));
2190 	if (iovec_count) {
2191 		onum = iovec_count;
2192 		if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2193 			return -EFAULT;
2194 	} else
2195 		onum = 1;
2196 
2197 	if (0 == schp->k_use_sg) {	/* kernel has single buffer */
2198 		for (j = 0, p = schp->buffer; j < onum; ++j) {
2199 			res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2200 			if (res)
2201 				return res;
2202 			usglen = (num_xfer > usglen) ? usglen : num_xfer;
2203 			if (__copy_to_user(up, p, usglen))
2204 				return -EFAULT;
2205 			p += usglen;
2206 			num_xfer -= usglen;
2207 			if (num_xfer <= 0)
2208 				return 0;
2209 		}
2210 	} else {		/* kernel using scatter gather list */
2211 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2212 
2213 		ksglen = (int) sclp->length;
2214 		p = sg_scatg2virt(sclp);
2215 		for (j = 0, k = 0; j < onum; ++j) {
2216 			res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2217 			if (res)
2218 				return res;
2219 
2220 			for (; p; ++sclp, ksglen = (int) sclp->length,
2221 				  p = sg_scatg2virt(sclp)) {
2222 				if (usglen <= 0)
2223 					break;
2224 				if (ksglen > usglen) {
2225 					if (usglen >= num_xfer) {
2226 						if (__copy_to_user
2227 						    (up, p, num_xfer))
2228 							return -EFAULT;
2229 						return 0;
2230 					}
2231 					if (__copy_to_user(up, p, usglen))
2232 						return -EFAULT;
2233 					p += usglen;
2234 					ksglen -= usglen;
2235 					break;
2236 				} else {
2237 					if (ksglen >= num_xfer) {
2238 						if (__copy_to_user
2239 						    (up, p, num_xfer))
2240 							return -EFAULT;
2241 						return 0;
2242 					}
2243 					if (__copy_to_user(up, p, ksglen))
2244 						return -EFAULT;
2245 					up += ksglen;
2246 					usglen -= ksglen;
2247 				}
2248 				++k;
2249 				if (k >= schp->k_use_sg)
2250 					return 0;
2251 			}
2252 		}
2253 	}
2254 	return 0;
2255 }
2256 
2257 static int
2258 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2259 {
2260 	Sg_scatter_hold *schp = &srp->data;
2261 
2262 	SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2263 				   num_read_xfer));
2264 	if ((!outp) || (num_read_xfer <= 0))
2265 		return 0;
2266 	if (schp->k_use_sg > 0) {
2267 		int k, num;
2268 		struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2269 
2270 		for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp);
2271 		     ++k, ++sclp) {
2272 			num = (int) sclp->length;
2273 			if (num > num_read_xfer) {
2274 				if (__copy_to_user
2275 				    (outp, sg_scatg2virt(sclp), num_read_xfer))
2276 					return -EFAULT;
2277 				break;
2278 			} else {
2279 				if (__copy_to_user
2280 				    (outp, sg_scatg2virt(sclp), num))
2281 					return -EFAULT;
2282 				num_read_xfer -= num;
2283 				if (num_read_xfer <= 0)
2284 					break;
2285 				outp += num;
2286 			}
2287 		}
2288 	} else {
2289 		if (__copy_to_user(outp, schp->buffer, num_read_xfer))
2290 			return -EFAULT;
2291 	}
2292 	return 0;
2293 }
2294 
2295 static void
2296 sg_build_reserve(Sg_fd * sfp, int req_size)
2297 {
2298 	Sg_scatter_hold *schp = &sfp->reserve;
2299 
2300 	SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2301 	do {
2302 		if (req_size < PAGE_SIZE)
2303 			req_size = PAGE_SIZE;
2304 		if (0 == sg_build_indirect(schp, sfp, req_size))
2305 			return;
2306 		else
2307 			sg_remove_scat(schp);
2308 		req_size >>= 1;	/* divide by 2 */
2309 	} while (req_size > (PAGE_SIZE / 2));
2310 }
2311 
2312 static void
2313 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2314 {
2315 	Sg_scatter_hold *req_schp = &srp->data;
2316 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2317 
2318 	srp->res_used = 1;
2319 	SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2320 	size = (size + 1) & (~1);	/* round to even for aha1542 */
2321 	if (rsv_schp->k_use_sg > 0) {
2322 		int k, num;
2323 		int rem = size;
2324 		struct scatterlist *sclp =
2325 		    (struct scatterlist *) rsv_schp->buffer;
2326 
2327 		for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
2328 			num = (int) sclp->length;
2329 			if (rem <= num) {
2330 				if (0 == k) {
2331 					req_schp->k_use_sg = 0;
2332 					req_schp->buffer = sg_scatg2virt(sclp);
2333 				} else {
2334 					sfp->save_scat_len = num;
2335 					sclp->length = (unsigned) rem;
2336 					req_schp->k_use_sg = k + 1;
2337 					req_schp->sglist_len =
2338 					    rsv_schp->sglist_len;
2339 					req_schp->buffer = rsv_schp->buffer;
2340 				}
2341 				req_schp->bufflen = size;
2342 				req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2343 				break;
2344 			} else
2345 				rem -= num;
2346 		}
2347 		if (k >= rsv_schp->k_use_sg)
2348 			SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2349 	} else {
2350 		req_schp->k_use_sg = 0;
2351 		req_schp->bufflen = size;
2352 		req_schp->buffer = rsv_schp->buffer;
2353 		req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2354 	}
2355 }
2356 
2357 static void
2358 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2359 {
2360 	Sg_scatter_hold *req_schp = &srp->data;
2361 	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2362 
2363 	SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2364 				   (int) req_schp->k_use_sg));
2365 	if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2366 		struct scatterlist *sclp =
2367 		    (struct scatterlist *) rsv_schp->buffer;
2368 
2369 		if (sfp->save_scat_len > 0)
2370 			(sclp + (req_schp->k_use_sg - 1))->length =
2371 			    (unsigned) sfp->save_scat_len;
2372 		else
2373 			SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2374 	}
2375 	req_schp->k_use_sg = 0;
2376 	req_schp->bufflen = 0;
2377 	req_schp->buffer = NULL;
2378 	req_schp->sglist_len = 0;
2379 	sfp->save_scat_len = 0;
2380 	srp->res_used = 0;
2381 }
2382 
2383 static Sg_request *
2384 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2385 {
2386 	Sg_request *resp;
2387 	unsigned long iflags;
2388 
2389 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2390 	for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2391 		/* look for requests that are ready + not SG_IO owned */
2392 		if ((1 == resp->done) && (!resp->sg_io_owned) &&
2393 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2394 			resp->done = 2;	/* guard against other readers */
2395 			break;
2396 		}
2397 	}
2398 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2399 	return resp;
2400 }
2401 
2402 #ifdef CONFIG_SCSI_PROC_FS
2403 static Sg_request *
2404 sg_get_nth_request(Sg_fd * sfp, int nth)
2405 {
2406 	Sg_request *resp;
2407 	unsigned long iflags;
2408 	int k;
2409 
2410 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2411 	for (k = 0, resp = sfp->headrp; resp && (k < nth);
2412 	     ++k, resp = resp->nextrp) ;
2413 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2414 	return resp;
2415 }
2416 #endif
2417 
2418 /* always adds to end of list */
2419 static Sg_request *
2420 sg_add_request(Sg_fd * sfp)
2421 {
2422 	int k;
2423 	unsigned long iflags;
2424 	Sg_request *resp;
2425 	Sg_request *rp = sfp->req_arr;
2426 
2427 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2428 	resp = sfp->headrp;
2429 	if (!resp) {
2430 		memset(rp, 0, sizeof (Sg_request));
2431 		rp->parentfp = sfp;
2432 		resp = rp;
2433 		sfp->headrp = resp;
2434 	} else {
2435 		if (0 == sfp->cmd_q)
2436 			resp = NULL;	/* command queuing disallowed */
2437 		else {
2438 			for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2439 				if (!rp->parentfp)
2440 					break;
2441 			}
2442 			if (k < SG_MAX_QUEUE) {
2443 				memset(rp, 0, sizeof (Sg_request));
2444 				rp->parentfp = sfp;
2445 				while (resp->nextrp)
2446 					resp = resp->nextrp;
2447 				resp->nextrp = rp;
2448 				resp = rp;
2449 			} else
2450 				resp = NULL;
2451 		}
2452 	}
2453 	if (resp) {
2454 		resp->nextrp = NULL;
2455 		resp->header.duration = jiffies_to_msecs(jiffies);
2456 		resp->my_cmdp = NULL;
2457 	}
2458 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2459 	return resp;
2460 }
2461 
2462 /* Return of 1 for found; 0 for not found */
2463 static int
2464 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2465 {
2466 	Sg_request *prev_rp;
2467 	Sg_request *rp;
2468 	unsigned long iflags;
2469 	int res = 0;
2470 
2471 	if ((!sfp) || (!srp) || (!sfp->headrp))
2472 		return res;
2473 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2474 	if (srp->my_cmdp)
2475 		srp->my_cmdp->upper_private_data = NULL;
2476 	prev_rp = sfp->headrp;
2477 	if (srp == prev_rp) {
2478 		sfp->headrp = prev_rp->nextrp;
2479 		prev_rp->parentfp = NULL;
2480 		res = 1;
2481 	} else {
2482 		while ((rp = prev_rp->nextrp)) {
2483 			if (srp == rp) {
2484 				prev_rp->nextrp = rp->nextrp;
2485 				rp->parentfp = NULL;
2486 				res = 1;
2487 				break;
2488 			}
2489 			prev_rp = rp;
2490 		}
2491 	}
2492 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2493 	return res;
2494 }
2495 
2496 #ifdef CONFIG_SCSI_PROC_FS
2497 static Sg_fd *
2498 sg_get_nth_sfp(Sg_device * sdp, int nth)
2499 {
2500 	Sg_fd *resp;
2501 	unsigned long iflags;
2502 	int k;
2503 
2504 	read_lock_irqsave(&sg_dev_arr_lock, iflags);
2505 	for (k = 0, resp = sdp->headfp; resp && (k < nth);
2506 	     ++k, resp = resp->nextfp) ;
2507 	read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2508 	return resp;
2509 }
2510 #endif
2511 
2512 static Sg_fd *
2513 sg_add_sfp(Sg_device * sdp, int dev)
2514 {
2515 	Sg_fd *sfp;
2516 	unsigned long iflags;
2517 
2518 	sfp = (Sg_fd *) sg_page_malloc(sizeof (Sg_fd), 0, NULL);
2519 	if (!sfp)
2520 		return NULL;
2521 	memset(sfp, 0, sizeof (Sg_fd));
2522 	init_waitqueue_head(&sfp->read_wait);
2523 	rwlock_init(&sfp->rq_list_lock);
2524 
2525 	sfp->timeout = SG_DEFAULT_TIMEOUT;
2526 	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2527 	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2528 	sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2529 	    sdp->device->host->unchecked_isa_dma : 1;
2530 	sfp->cmd_q = SG_DEF_COMMAND_Q;
2531 	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2532 	sfp->parentdp = sdp;
2533 	write_lock_irqsave(&sg_dev_arr_lock, iflags);
2534 	if (!sdp->headfp)
2535 		sdp->headfp = sfp;
2536 	else {			/* add to tail of existing list */
2537 		Sg_fd *pfp = sdp->headfp;
2538 		while (pfp->nextfp)
2539 			pfp = pfp->nextfp;
2540 		pfp->nextfp = sfp;
2541 	}
2542 	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2543 	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2544 	sg_build_reserve(sfp, sg_big_buff);
2545 	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
2546 			   sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2547 	return sfp;
2548 }
2549 
2550 static void
2551 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2552 {
2553 	Sg_fd *fp;
2554 	Sg_fd *prev_fp;
2555 
2556 	prev_fp = sdp->headfp;
2557 	if (sfp == prev_fp)
2558 		sdp->headfp = prev_fp->nextfp;
2559 	else {
2560 		while ((fp = prev_fp->nextfp)) {
2561 			if (sfp == fp) {
2562 				prev_fp->nextfp = fp->nextfp;
2563 				break;
2564 			}
2565 			prev_fp = fp;
2566 		}
2567 	}
2568 	if (sfp->reserve.bufflen > 0) {
2569 		SCSI_LOG_TIMEOUT(6,
2570 			printk("__sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
2571 			(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2572 		if (sfp->mmap_called)
2573 			sg_rb_correct4mmap(&sfp->reserve, 0);	/* undo correction */
2574 		sg_remove_scat(&sfp->reserve);
2575 	}
2576 	sfp->parentdp = NULL;
2577 	SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp:    sfp=0x%p\n", sfp));
2578 	sg_page_free((char *) sfp, sizeof (Sg_fd));
2579 }
2580 
2581 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2582 static int
2583 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2584 {
2585 	Sg_request *srp;
2586 	Sg_request *tsrp;
2587 	int dirty = 0;
2588 	int res = 0;
2589 
2590 	for (srp = sfp->headrp; srp; srp = tsrp) {
2591 		tsrp = srp->nextrp;
2592 		if (sg_srp_done(srp, sfp))
2593 			sg_finish_rem_req(srp);
2594 		else
2595 			++dirty;
2596 	}
2597 	if (0 == dirty) {
2598 		unsigned long iflags;
2599 
2600 		write_lock_irqsave(&sg_dev_arr_lock, iflags);
2601 		__sg_remove_sfp(sdp, sfp);
2602 		if (sdp->detached && (NULL == sdp->headfp)) {
2603 			int k, maxd;
2604 
2605 			maxd = sg_dev_max;
2606 			for (k = 0; k < maxd; ++k) {
2607 				if (sdp == sg_dev_arr[k])
2608 					break;
2609 			}
2610 			if (k < maxd)
2611 				sg_dev_arr[k] = NULL;
2612 			kfree((char *) sdp);
2613 			res = 1;
2614 		}
2615 		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2616 	} else {
2617 		/* MOD_INC's to inhibit unloading sg and associated adapter driver */
2618 		/* only bump the access_count if we actually succeeded in
2619 		 * throwing another counter on the host module */
2620 		scsi_device_get(sdp->device);	/* XXX: retval ignored? */
2621 		sfp->closed = 1;	/* flag dirty state on this fd */
2622 		SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2623 				  dirty));
2624 	}
2625 	return res;
2626 }
2627 
2628 static int
2629 sg_res_in_use(Sg_fd * sfp)
2630 {
2631 	const Sg_request *srp;
2632 	unsigned long iflags;
2633 
2634 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2635 	for (srp = sfp->headrp; srp; srp = srp->nextrp)
2636 		if (srp->res_used)
2637 			break;
2638 	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2639 	return srp ? 1 : 0;
2640 }
2641 
2642 /* If retSzp==NULL want exact size or fail */
2643 static char *
2644 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2645 {
2646 	char *resp = NULL;
2647 	int page_mask;
2648 	int order, a_size;
2649 	int resSz = rqSz;
2650 
2651 	if (rqSz <= 0)
2652 		return resp;
2653 
2654 	if (lowDma)
2655 		page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
2656 	else
2657 		page_mask = GFP_ATOMIC | __GFP_NOWARN;
2658 
2659 	for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2660 	     order++, a_size <<= 1) ;
2661 	resp = (char *) __get_free_pages(page_mask, order);
2662 	while ((!resp) && order && retSzp) {
2663 		--order;
2664 		a_size >>= 1;	/* divide by 2, until PAGE_SIZE */
2665 		resp = (char *) __get_free_pages(page_mask, order);	/* try half */
2666 		resSz = a_size;
2667 	}
2668 	if (resp) {
2669 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2670 			memset(resp, 0, resSz);
2671 		if (retSzp)
2672 			*retSzp = resSz;
2673 	}
2674 	return resp;
2675 }
2676 
2677 static void
2678 sg_page_free(char *buff, int size)
2679 {
2680 	int order, a_size;
2681 
2682 	if (!buff)
2683 		return;
2684 	for (order = 0, a_size = PAGE_SIZE; a_size < size;
2685 	     order++, a_size <<= 1) ;
2686 	free_pages((unsigned long) buff, order);
2687 }
2688 
2689 #ifndef MAINTENANCE_IN_CMD
2690 #define MAINTENANCE_IN_CMD 0xa3
2691 #endif
2692 
2693 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2694 	INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2695 	READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2696 	SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2697 };
2698 
2699 static int
2700 sg_allow_access(unsigned char opcode, char dev_type)
2701 {
2702 	int k;
2703 
2704 	if (TYPE_SCANNER == dev_type)	/* TYPE_ROM maybe burner */
2705 		return 1;
2706 	for (k = 0; k < sizeof (allow_ops); ++k) {
2707 		if (opcode == allow_ops[k])
2708 			return 1;
2709 	}
2710 	return 0;
2711 }
2712 
2713 #ifdef CONFIG_SCSI_PROC_FS
2714 static int
2715 sg_last_dev(void)
2716 {
2717 	int k;
2718 	unsigned long iflags;
2719 
2720 	read_lock_irqsave(&sg_dev_arr_lock, iflags);
2721 	for (k = sg_dev_max - 1; k >= 0; --k)
2722 		if (sg_dev_arr[k] && sg_dev_arr[k]->device)
2723 			break;
2724 	read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2725 	return k + 1;		/* origin 1 */
2726 }
2727 #endif
2728 
2729 static Sg_device *
2730 sg_get_dev(int dev)
2731 {
2732 	Sg_device *sdp = NULL;
2733 	unsigned long iflags;
2734 
2735 	if (sg_dev_arr && (dev >= 0)) {
2736 		read_lock_irqsave(&sg_dev_arr_lock, iflags);
2737 		if (dev < sg_dev_max)
2738 			sdp = sg_dev_arr[dev];
2739 		read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2740 	}
2741 	return sdp;
2742 }
2743 
2744 #ifdef CONFIG_SCSI_PROC_FS
2745 
2746 static struct proc_dir_entry *sg_proc_sgp = NULL;
2747 
2748 static char sg_proc_sg_dirname[] = "scsi/sg";
2749 
2750 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2751 
2752 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2753 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2754 			          size_t count, loff_t *off);
2755 static struct file_operations adio_fops = {
2756 	/* .owner, .read and .llseek added in sg_proc_init() */
2757 	.open = sg_proc_single_open_adio,
2758 	.write = sg_proc_write_adio,
2759 	.release = single_release,
2760 };
2761 
2762 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2763 static ssize_t sg_proc_write_dressz(struct file *filp,
2764 		const char __user *buffer, size_t count, loff_t *off);
2765 static struct file_operations dressz_fops = {
2766 	.open = sg_proc_single_open_dressz,
2767 	.write = sg_proc_write_dressz,
2768 	.release = single_release,
2769 };
2770 
2771 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2772 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2773 static struct file_operations version_fops = {
2774 	.open = sg_proc_single_open_version,
2775 	.release = single_release,
2776 };
2777 
2778 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2779 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2780 static struct file_operations devhdr_fops = {
2781 	.open = sg_proc_single_open_devhdr,
2782 	.release = single_release,
2783 };
2784 
2785 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2786 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2787 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2788 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2789 static void dev_seq_stop(struct seq_file *s, void *v);
2790 static struct file_operations dev_fops = {
2791 	.open = sg_proc_open_dev,
2792 	.release = seq_release,
2793 };
2794 static struct seq_operations dev_seq_ops = {
2795 	.start = dev_seq_start,
2796 	.next  = dev_seq_next,
2797 	.stop  = dev_seq_stop,
2798 	.show  = sg_proc_seq_show_dev,
2799 };
2800 
2801 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2802 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2803 static struct file_operations devstrs_fops = {
2804 	.open = sg_proc_open_devstrs,
2805 	.release = seq_release,
2806 };
2807 static struct seq_operations devstrs_seq_ops = {
2808 	.start = dev_seq_start,
2809 	.next  = dev_seq_next,
2810 	.stop  = dev_seq_stop,
2811 	.show  = sg_proc_seq_show_devstrs,
2812 };
2813 
2814 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2815 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2816 static struct file_operations debug_fops = {
2817 	.open = sg_proc_open_debug,
2818 	.release = seq_release,
2819 };
2820 static struct seq_operations debug_seq_ops = {
2821 	.start = dev_seq_start,
2822 	.next  = dev_seq_next,
2823 	.stop  = dev_seq_stop,
2824 	.show  = sg_proc_seq_show_debug,
2825 };
2826 
2827 
2828 struct sg_proc_leaf {
2829 	const char * name;
2830 	struct file_operations * fops;
2831 };
2832 
2833 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2834 	{"allow_dio", &adio_fops},
2835 	{"debug", &debug_fops},
2836 	{"def_reserved_size", &dressz_fops},
2837 	{"device_hdr", &devhdr_fops},
2838 	{"devices", &dev_fops},
2839 	{"device_strs", &devstrs_fops},
2840 	{"version", &version_fops}
2841 };
2842 
2843 static int
2844 sg_proc_init(void)
2845 {
2846 	int k, mask;
2847 	int num_leaves =
2848 	    sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2849 	struct proc_dir_entry *pdep;
2850 	struct sg_proc_leaf * leaf;
2851 
2852 	sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname,
2853 					S_IFDIR | S_IRUGO | S_IXUGO, NULL);
2854 	if (!sg_proc_sgp)
2855 		return 1;
2856 	for (k = 0; k < num_leaves; ++k) {
2857 		leaf = &sg_proc_leaf_arr[k];
2858 		mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2859 		pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
2860 		if (pdep) {
2861 			leaf->fops->owner = THIS_MODULE,
2862 			leaf->fops->read = seq_read,
2863 			leaf->fops->llseek = seq_lseek,
2864 			pdep->proc_fops = leaf->fops;
2865 		}
2866 	}
2867 	return 0;
2868 }
2869 
2870 static void
2871 sg_proc_cleanup(void)
2872 {
2873 	int k;
2874 	int num_leaves =
2875 	    sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2876 
2877 	if (!sg_proc_sgp)
2878 		return;
2879 	for (k = 0; k < num_leaves; ++k)
2880 		remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2881 	remove_proc_entry(sg_proc_sg_dirname, NULL);
2882 }
2883 
2884 
2885 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2886 {
2887 	seq_printf(s, "%d\n", *((int *)s->private));
2888 	return 0;
2889 }
2890 
2891 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2892 {
2893 	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2894 }
2895 
2896 static ssize_t
2897 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2898 		   size_t count, loff_t *off)
2899 {
2900 	int num;
2901 	char buff[11];
2902 
2903 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2904 		return -EACCES;
2905 	num = (count < 10) ? count : 10;
2906 	if (copy_from_user(buff, buffer, num))
2907 		return -EFAULT;
2908 	buff[num] = '\0';
2909 	sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2910 	return count;
2911 }
2912 
2913 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2914 {
2915 	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2916 }
2917 
2918 static ssize_t
2919 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2920 		     size_t count, loff_t *off)
2921 {
2922 	int num;
2923 	unsigned long k = ULONG_MAX;
2924 	char buff[11];
2925 
2926 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2927 		return -EACCES;
2928 	num = (count < 10) ? count : 10;
2929 	if (copy_from_user(buff, buffer, num))
2930 		return -EFAULT;
2931 	buff[num] = '\0';
2932 	k = simple_strtoul(buff, NULL, 10);
2933 	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
2934 		sg_big_buff = k;
2935 		return count;
2936 	}
2937 	return -ERANGE;
2938 }
2939 
2940 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2941 {
2942 	seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2943 		   sg_version_date);
2944 	return 0;
2945 }
2946 
2947 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2948 {
2949 	return single_open(file, sg_proc_seq_show_version, NULL);
2950 }
2951 
2952 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2953 {
2954 	seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2955 		   "online\n");
2956 	return 0;
2957 }
2958 
2959 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2960 {
2961 	return single_open(file, sg_proc_seq_show_devhdr, NULL);
2962 }
2963 
2964 struct sg_proc_deviter {
2965 	loff_t	index;
2966 	size_t	max;
2967 };
2968 
2969 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2970 {
2971 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2972 
2973 	s->private = it;
2974 	if (! it)
2975 		return NULL;
2976 
2977 	if (NULL == sg_dev_arr)
2978 		return NULL;
2979 	it->index = *pos;
2980 	it->max = sg_last_dev();
2981 	if (it->index >= it->max)
2982 		return NULL;
2983 	return it;
2984 }
2985 
2986 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2987 {
2988 	struct sg_proc_deviter * it = s->private;
2989 
2990 	*pos = ++it->index;
2991 	return (it->index < it->max) ? it : NULL;
2992 }
2993 
2994 static void dev_seq_stop(struct seq_file *s, void *v)
2995 {
2996 	kfree(s->private);
2997 }
2998 
2999 static int sg_proc_open_dev(struct inode *inode, struct file *file)
3000 {
3001         return seq_open(file, &dev_seq_ops);
3002 }
3003 
3004 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
3005 {
3006 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3007 	Sg_device *sdp;
3008 	struct scsi_device *scsidp;
3009 
3010 	sdp = it ? sg_get_dev(it->index) : NULL;
3011 	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
3012 		seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
3013 			      scsidp->host->host_no, scsidp->channel,
3014 			      scsidp->id, scsidp->lun, (int) scsidp->type,
3015 			      1,
3016 			      (int) scsidp->queue_depth,
3017 			      (int) scsidp->device_busy,
3018 			      (int) scsi_device_online(scsidp));
3019 	else
3020 		seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
3021 	return 0;
3022 }
3023 
3024 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
3025 {
3026         return seq_open(file, &devstrs_seq_ops);
3027 }
3028 
3029 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
3030 {
3031 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3032 	Sg_device *sdp;
3033 	struct scsi_device *scsidp;
3034 
3035 	sdp = it ? sg_get_dev(it->index) : NULL;
3036 	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
3037 		seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
3038 			   scsidp->vendor, scsidp->model, scsidp->rev);
3039 	else
3040 		seq_printf(s, "<no active device>\n");
3041 	return 0;
3042 }
3043 
3044 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3045 {
3046 	int k, m, new_interface, blen, usg;
3047 	Sg_request *srp;
3048 	Sg_fd *fp;
3049 	const sg_io_hdr_t *hp;
3050 	const char * cp;
3051 	unsigned int ms;
3052 
3053 	for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
3054 		seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
3055 			   "(res)sgat=%d low_dma=%d\n", k + 1,
3056 			   jiffies_to_msecs(fp->timeout),
3057 			   fp->reserve.bufflen,
3058 			   (int) fp->reserve.k_use_sg,
3059 			   (int) fp->low_dma);
3060 		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
3061 			   (int) fp->cmd_q, (int) fp->force_packid,
3062 			   (int) fp->keep_orphan, (int) fp->closed);
3063 		for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
3064 			hp = &srp->header;
3065 			new_interface = (hp->interface_id == '\0') ? 0 : 1;
3066 			if (srp->res_used) {
3067 				if (new_interface &&
3068 				    (SG_FLAG_MMAP_IO & hp->flags))
3069 					cp = "     mmap>> ";
3070 				else
3071 					cp = "     rb>> ";
3072 			} else {
3073 				if (SG_INFO_DIRECT_IO_MASK & hp->info)
3074 					cp = "     dio>> ";
3075 				else
3076 					cp = "     ";
3077 			}
3078 			seq_printf(s, cp);
3079 			blen = srp->my_cmdp ?
3080 				srp->my_cmdp->sr_bufflen : srp->data.bufflen;
3081 			usg = srp->my_cmdp ?
3082 				srp->my_cmdp->sr_use_sg : srp->data.k_use_sg;
3083 			seq_printf(s, srp->done ?
3084 				   ((1 == srp->done) ?  "rcv:" : "fin:")
3085 				   : (srp->my_cmdp ? "act:" : "prior:"));
3086 			seq_printf(s, " id=%d blen=%d",
3087 				   srp->header.pack_id, blen);
3088 			if (srp->done)
3089 				seq_printf(s, " dur=%d", hp->duration);
3090 			else {
3091 				ms = jiffies_to_msecs(jiffies);
3092 				seq_printf(s, " t_o/elap=%d/%d",
3093 					(new_interface ? hp->timeout :
3094 						  jiffies_to_msecs(fp->timeout)),
3095 					(ms > hp->duration ? ms - hp->duration : 0));
3096 			}
3097 			seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
3098 				   (int) srp->data.cmd_opcode);
3099 		}
3100 		if (0 == m)
3101 			seq_printf(s, "     No requests active\n");
3102 	}
3103 }
3104 
3105 static int sg_proc_open_debug(struct inode *inode, struct file *file)
3106 {
3107         return seq_open(file, &debug_seq_ops);
3108 }
3109 
3110 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
3111 {
3112 	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3113 	Sg_device *sdp;
3114 
3115 	if (it && (0 == it->index)) {
3116 		seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
3117 			   "(origin 1)\n", sg_dev_max, (int)it->max);
3118 		seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
3119 	}
3120 	sdp = it ? sg_get_dev(it->index) : NULL;
3121 	if (sdp) {
3122 		struct scsi_device *scsidp = sdp->device;
3123 
3124 		if (NULL == scsidp) {
3125 			seq_printf(s, "device %d detached ??\n",
3126 				   (int)it->index);
3127 			return 0;
3128 		}
3129 
3130 		if (sg_get_nth_sfp(sdp, 0)) {
3131 			seq_printf(s, " >>> device=%s ",
3132 				sdp->disk->disk_name);
3133 			if (sdp->detached)
3134 				seq_printf(s, "detached pending close ");
3135 			else
3136 				seq_printf
3137 				    (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
3138 				     scsidp->host->host_no,
3139 				     scsidp->channel, scsidp->id,
3140 				     scsidp->lun,
3141 				     scsidp->host->hostt->emulated);
3142 			seq_printf(s, " sg_tablesize=%d excl=%d\n",
3143 				   sdp->sg_tablesize, sdp->exclude);
3144 		}
3145 		sg_proc_debug_helper(s, sdp);
3146 	}
3147 	return 0;
3148 }
3149 
3150 #endif				/* CONFIG_SCSI_PROC_FS */
3151 
3152 module_init(init_sg);
3153 module_exit(exit_sg);
3154 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
3155