xref: /linux/include/scsi/scsi_host.h (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1 #ifndef _SCSI_SCSI_HOST_H
2 #define _SCSI_SCSI_HOST_H
3 
4 #include <linux/device.h>
5 #include <linux/list.h>
6 #include <linux/types.h>
7 #include <linux/workqueue.h>
8 #include <linux/mutex.h>
9 #include <scsi/scsi.h>
10 
11 struct request_queue;
12 struct block_device;
13 struct completion;
14 struct module;
15 struct scsi_cmnd;
16 struct scsi_device;
17 struct scsi_target;
18 struct Scsi_Host;
19 struct scsi_host_cmd_pool;
20 struct scsi_transport_template;
21 struct blk_queue_tags;
22 
23 
24 /*
25  * The various choices mean:
26  * NONE: Self evident.	Host adapter is not capable of scatter-gather.
27  * ALL:	 Means that the host adapter module can do scatter-gather,
28  *	 and that there is no limit to the size of the table to which
29  *	 we scatter/gather data.  The value we set here is the maximum
30  *	 single element sglist.  To use chained sglists, the adapter
31  *	 has to set a value beyond ALL (and correctly use the chain
32  *	 handling API.
33  * Anything else:  Indicates the maximum number of chains that can be
34  *	 used in one scatter-gather request.
35  */
36 #define SG_NONE 0
37 #define SG_ALL	SCSI_MAX_SG_SEGMENTS
38 
39 #define MODE_UNKNOWN 0x00
40 #define MODE_INITIATOR 0x01
41 #define MODE_TARGET 0x02
42 
43 #define DISABLE_CLUSTERING 0
44 #define ENABLE_CLUSTERING 1
45 
46 struct scsi_host_template {
47 	struct module *module;
48 	const char *name;
49 
50 	/*
51 	 * Used to initialize old-style drivers.  For new-style drivers
52 	 * just perform all work in your module initialization function.
53 	 *
54 	 * Status:  OBSOLETE
55 	 */
56 	int (* detect)(struct scsi_host_template *);
57 
58 	/*
59 	 * Used as unload callback for hosts with old-style drivers.
60 	 *
61 	 * Status: OBSOLETE
62 	 */
63 	int (* release)(struct Scsi_Host *);
64 
65 	/*
66 	 * The info function will return whatever useful information the
67 	 * developer sees fit.  If not provided, then the name field will
68 	 * be used instead.
69 	 *
70 	 * Status: OPTIONAL
71 	 */
72 	const char *(* info)(struct Scsi_Host *);
73 
74 	/*
75 	 * Ioctl interface
76 	 *
77 	 * Status: OPTIONAL
78 	 */
79 	int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
80 
81 
82 #ifdef CONFIG_COMPAT
83 	/*
84 	 * Compat handler. Handle 32bit ABI.
85 	 * When unknown ioctl is passed return -ENOIOCTLCMD.
86 	 *
87 	 * Status: OPTIONAL
88 	 */
89 	int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
90 #endif
91 
92 	/*
93 	 * The queuecommand function is used to queue up a scsi
94 	 * command block to the LLDD.  When the driver finished
95 	 * processing the command the done callback is invoked.
96 	 *
97 	 * If queuecommand returns 0, then the HBA has accepted the
98 	 * command.  The done() function must be called on the command
99 	 * when the driver has finished with it. (you may call done on the
100 	 * command before queuecommand returns, but in this case you
101 	 * *must* return 0 from queuecommand).
102 	 *
103 	 * Queuecommand may also reject the command, in which case it may
104 	 * not touch the command and must not call done() for it.
105 	 *
106 	 * There are two possible rejection returns:
107 	 *
108 	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
109 	 *   allow commands to other devices serviced by this host.
110 	 *
111 	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
112 	 *   host temporarily.
113 	 *
114          * For compatibility, any other non-zero return is treated the
115          * same as SCSI_MLQUEUE_HOST_BUSY.
116 	 *
117 	 * NOTE: "temporarily" means either until the next command for#
118 	 * this device/host completes, or a period of time determined by
119 	 * I/O pressure in the system if there are no other outstanding
120 	 * commands.
121 	 *
122 	 * STATUS: REQUIRED
123 	 */
124 	int (* queuecommand)(struct scsi_cmnd *,
125 			     void (*done)(struct scsi_cmnd *));
126 
127 	/*
128 	 * The transfer functions are used to queue a scsi command to
129 	 * the LLD. When the driver is finished processing the command
130 	 * the done callback is invoked.
131 	 *
132 	 * This is called to inform the LLD to transfer
133 	 * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
134 	 * number of scatterlist entried in the command and
135 	 * scsi_sglist(cmd) returns the scatterlist.
136 	 *
137 	 * return values: see queuecommand
138 	 *
139 	 * If the LLD accepts the cmd, it should set the result to an
140 	 * appropriate value when completed before calling the done function.
141 	 *
142 	 * STATUS: REQUIRED FOR TARGET DRIVERS
143 	 */
144 	/* TODO: rename */
145 	int (* transfer_response)(struct scsi_cmnd *,
146 				  void (*done)(struct scsi_cmnd *));
147 
148 	/*
149 	 * This is an error handling strategy routine.  You don't need to
150 	 * define one of these if you don't want to - there is a default
151 	 * routine that is present that should work in most cases.  For those
152 	 * driver authors that have the inclination and ability to write their
153 	 * own strategy routine, this is where it is specified.  Note - the
154 	 * strategy routine is *ALWAYS* run in the context of the kernel eh
155 	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
156 	 * handler when you execute this, and you are also guaranteed to
157 	 * *NOT* have any other commands being queued while you are in the
158 	 * strategy routine. When you return from this function, operations
159 	 * return to normal.
160 	 *
161 	 * See scsi_error.c scsi_unjam_host for additional comments about
162 	 * what this function should and should not be attempting to do.
163 	 *
164 	 * Status: REQUIRED	(at least one of them)
165 	 */
166 	int (* eh_abort_handler)(struct scsi_cmnd *);
167 	int (* eh_device_reset_handler)(struct scsi_cmnd *);
168 	int (* eh_target_reset_handler)(struct scsi_cmnd *);
169 	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
170 	int (* eh_host_reset_handler)(struct scsi_cmnd *);
171 
172 	/*
173 	 * Before the mid layer attempts to scan for a new device where none
174 	 * currently exists, it will call this entry in your driver.  Should
175 	 * your driver need to allocate any structs or perform any other init
176 	 * items in order to send commands to a currently unused target/lun
177 	 * combo, then this is where you can perform those allocations.  This
178 	 * is specifically so that drivers won't have to perform any kind of
179 	 * "is this a new device" checks in their queuecommand routine,
180 	 * thereby making the hot path a bit quicker.
181 	 *
182 	 * Return values: 0 on success, non-0 on failure
183 	 *
184 	 * Deallocation:  If we didn't find any devices at this ID, you will
185 	 * get an immediate call to slave_destroy().  If we find something
186 	 * here then you will get a call to slave_configure(), then the
187 	 * device will be used for however long it is kept around, then when
188 	 * the device is removed from the system (or * possibly at reboot
189 	 * time), you will then get a call to slave_destroy().  This is
190 	 * assuming you implement slave_configure and slave_destroy.
191 	 * However, if you allocate memory and hang it off the device struct,
192 	 * then you must implement the slave_destroy() routine at a minimum
193 	 * in order to avoid leaking memory
194 	 * each time a device is tore down.
195 	 *
196 	 * Status: OPTIONAL
197 	 */
198 	int (* slave_alloc)(struct scsi_device *);
199 
200 	/*
201 	 * Once the device has responded to an INQUIRY and we know the
202 	 * device is online, we call into the low level driver with the
203 	 * struct scsi_device *.  If the low level device driver implements
204 	 * this function, it *must* perform the task of setting the queue
205 	 * depth on the device.  All other tasks are optional and depend
206 	 * on what the driver supports and various implementation details.
207 	 *
208 	 * Things currently recommended to be handled at this time include:
209 	 *
210 	 * 1.  Setting the device queue depth.  Proper setting of this is
211 	 *     described in the comments for scsi_adjust_queue_depth.
212 	 * 2.  Determining if the device supports the various synchronous
213 	 *     negotiation protocols.  The device struct will already have
214 	 *     responded to INQUIRY and the results of the standard items
215 	 *     will have been shoved into the various device flag bits, eg.
216 	 *     device->sdtr will be true if the device supports SDTR messages.
217 	 * 3.  Allocating command structs that the device will need.
218 	 * 4.  Setting the default timeout on this device (if needed).
219 	 * 5.  Anything else the low level driver might want to do on a device
220 	 *     specific setup basis...
221 	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
222 	 *     as offline on error so that no access will occur.  If you return
223 	 *     non-0, your slave_destroy routine will never get called for this
224 	 *     device, so don't leave any loose memory hanging around, clean
225 	 *     up after yourself before returning non-0
226 	 *
227 	 * Status: OPTIONAL
228 	 */
229 	int (* slave_configure)(struct scsi_device *);
230 
231 	/*
232 	 * Immediately prior to deallocating the device and after all activity
233 	 * has ceased the mid layer calls this point so that the low level
234 	 * driver may completely detach itself from the scsi device and vice
235 	 * versa.  The low level driver is responsible for freeing any memory
236 	 * it allocated in the slave_alloc or slave_configure calls.
237 	 *
238 	 * Status: OPTIONAL
239 	 */
240 	void (* slave_destroy)(struct scsi_device *);
241 
242 	/*
243 	 * Before the mid layer attempts to scan for a new device attached
244 	 * to a target where no target currently exists, it will call this
245 	 * entry in your driver.  Should your driver need to allocate any
246 	 * structs or perform any other init items in order to send commands
247 	 * to a currently unused target, then this is where you can perform
248 	 * those allocations.
249 	 *
250 	 * Return values: 0 on success, non-0 on failure
251 	 *
252 	 * Status: OPTIONAL
253 	 */
254 	int (* target_alloc)(struct scsi_target *);
255 
256 	/*
257 	 * Immediately prior to deallocating the target structure, and
258 	 * after all activity to attached scsi devices has ceased, the
259 	 * midlayer calls this point so that the driver may deallocate
260 	 * and terminate any references to the target.
261 	 *
262 	 * Status: OPTIONAL
263 	 */
264 	void (* target_destroy)(struct scsi_target *);
265 
266 	/*
267 	 * If a host has the ability to discover targets on its own instead
268 	 * of scanning the entire bus, it can fill in this function and
269 	 * call scsi_scan_host().  This function will be called periodically
270 	 * until it returns 1 with the scsi_host and the elapsed time of
271 	 * the scan in jiffies.
272 	 *
273 	 * Status: OPTIONAL
274 	 */
275 	int (* scan_finished)(struct Scsi_Host *, unsigned long);
276 
277 	/*
278 	 * If the host wants to be called before the scan starts, but
279 	 * after the midlayer has set up ready for the scan, it can fill
280 	 * in this function.
281 	 *
282 	 * Status: OPTIONAL
283 	 */
284 	void (* scan_start)(struct Scsi_Host *);
285 
286 	/*
287 	 * Fill in this function to allow the queue depth of this host
288 	 * to be changeable (on a per device basis).  Returns either
289 	 * the current queue depth setting (may be different from what
290 	 * was passed in) or an error.  An error should only be
291 	 * returned if the requested depth is legal but the driver was
292 	 * unable to set it.  If the requested depth is illegal, the
293 	 * driver should set and return the closest legal queue depth.
294 	 *
295 	 * Status: OPTIONAL
296 	 */
297 	int (* change_queue_depth)(struct scsi_device *, int);
298 
299 	/*
300 	 * Fill in this function to allow the changing of tag types
301 	 * (this also allows the enabling/disabling of tag command
302 	 * queueing).  An error should only be returned if something
303 	 * went wrong in the driver while trying to set the tag type.
304 	 * If the driver doesn't support the requested tag type, then
305 	 * it should set the closest type it does support without
306 	 * returning an error.  Returns the actual tag type set.
307 	 *
308 	 * Status: OPTIONAL
309 	 */
310 	int (* change_queue_type)(struct scsi_device *, int);
311 
312 	/*
313 	 * This function determines the BIOS parameters for a given
314 	 * harddisk.  These tend to be numbers that are made up by
315 	 * the host adapter.  Parameters:
316 	 * size, device, list (heads, sectors, cylinders)
317 	 *
318 	 * Status: OPTIONAL
319 	 */
320 	int (* bios_param)(struct scsi_device *, struct block_device *,
321 			sector_t, int []);
322 
323 	/*
324 	 * Can be used to export driver statistics and other infos to the
325 	 * world outside the kernel ie. userspace and it also provides an
326 	 * interface to feed the driver with information.
327 	 *
328 	 * Status: OBSOLETE
329 	 */
330 	int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
331 
332 	/*
333 	 * This is an optional routine that allows the transport to become
334 	 * involved when a scsi io timer fires. The return value tells the
335 	 * timer routine how to finish the io timeout handling:
336 	 * EH_HANDLED:		I fixed the error, please complete the command
337 	 * EH_RESET_TIMER:	I need more time, reset the timer and
338 	 *			begin counting again
339 	 * EH_NOT_HANDLED	Begin normal error recovery
340 	 *
341 	 * Status: OPTIONAL
342 	 */
343 	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
344 
345 	/*
346 	 * Name of proc directory
347 	 */
348 	const char *proc_name;
349 
350 	/*
351 	 * Used to store the procfs directory if a driver implements the
352 	 * proc_info method.
353 	 */
354 	struct proc_dir_entry *proc_dir;
355 
356 	/*
357 	 * This determines if we will use a non-interrupt driven
358 	 * or an interrupt driven scheme.  It is set to the maximum number
359 	 * of simultaneous commands a given host adapter will accept.
360 	 */
361 	int can_queue;
362 
363 	/*
364 	 * In many instances, especially where disconnect / reconnect are
365 	 * supported, our host also has an ID on the SCSI bus.  If this is
366 	 * the case, then it must be reserved.  Please set this_id to -1 if
367 	 * your setup is in single initiator mode, and the host lacks an
368 	 * ID.
369 	 */
370 	int this_id;
371 
372 	/*
373 	 * This determines the degree to which the host adapter is capable
374 	 * of scatter-gather.
375 	 */
376 	unsigned short sg_tablesize;
377 
378 	/*
379 	 * Set this if the host adapter has limitations beside segment count.
380 	 */
381 	unsigned short max_sectors;
382 
383 	/*
384 	 * DMA scatter gather segment boundary limit. A segment crossing this
385 	 * boundary will be split in two.
386 	 */
387 	unsigned long dma_boundary;
388 
389 	/*
390 	 * This specifies "machine infinity" for host templates which don't
391 	 * limit the transfer size.  Note this limit represents an absolute
392 	 * maximum, and may be over the transfer limits allowed for
393 	 * individual devices (e.g. 256 for SCSI-1).
394 	 */
395 #define SCSI_DEFAULT_MAX_SECTORS	1024
396 
397 	/*
398 	 * True if this host adapter can make good use of linked commands.
399 	 * This will allow more than one command to be queued to a given
400 	 * unit on a given host.  Set this to the maximum number of command
401 	 * blocks to be provided for each device.  Set this to 1 for one
402 	 * command block per lun, 2 for two, etc.  Do not set this to 0.
403 	 * You should make sure that the host adapter will do the right thing
404 	 * before you try setting this above 1.
405 	 */
406 	short cmd_per_lun;
407 
408 	/*
409 	 * present contains counter indicating how many boards of this
410 	 * type were found when we did the scan.
411 	 */
412 	unsigned char present;
413 
414 	/*
415 	 * This specifies the mode that a LLD supports.
416 	 */
417 	unsigned supported_mode:2;
418 
419 	/*
420 	 * True if this host adapter uses unchecked DMA onto an ISA bus.
421 	 */
422 	unsigned unchecked_isa_dma:1;
423 
424 	/*
425 	 * True if this host adapter can make good use of clustering.
426 	 * I originally thought that if the tablesize was large that it
427 	 * was a waste of CPU cycles to prepare a cluster list, but
428 	 * it works out that the Buslogic is faster if you use a smaller
429 	 * number of segments (i.e. use clustering).  I guess it is
430 	 * inefficient.
431 	 */
432 	unsigned use_clustering:1;
433 
434 	/*
435 	 * True for emulated SCSI host adapters (e.g. ATAPI).
436 	 */
437 	unsigned emulated:1;
438 
439 	/*
440 	 * True if the low-level driver performs its own reset-settle delays.
441 	 */
442 	unsigned skip_settle_delay:1;
443 
444 	/*
445 	 * True if we are using ordered write support.
446 	 */
447 	unsigned ordered_tag:1;
448 
449 	/*
450 	 * Countdown for host blocking with no commands outstanding.
451 	 */
452 	unsigned int max_host_blocked;
453 
454 	/*
455 	 * Default value for the blocking.  If the queue is empty,
456 	 * host_blocked counts down in the request_fn until it restarts
457 	 * host operations as zero is reached.
458 	 *
459 	 * FIXME: This should probably be a value in the template
460 	 */
461 #define SCSI_DEFAULT_HOST_BLOCKED	7
462 
463 	/*
464 	 * Pointer to the sysfs class properties for this host, NULL terminated.
465 	 */
466 	struct device_attribute **shost_attrs;
467 
468 	/*
469 	 * Pointer to the SCSI device properties for this host, NULL terminated.
470 	 */
471 	struct device_attribute **sdev_attrs;
472 
473 	/*
474 	 * List of hosts per template.
475 	 *
476 	 * This is only for use by scsi_module.c for legacy templates.
477 	 * For these access to it is synchronized implicitly by
478 	 * module_init/module_exit.
479 	 */
480 	struct list_head legacy_hosts;
481 
482 	/*
483 	 * Vendor Identifier associated with the host
484 	 *
485 	 * Note: When specifying vendor_id, be sure to read the
486 	 *   Vendor Type and ID formatting requirements specified in
487 	 *   scsi_netlink.h
488 	 */
489 	u64 vendor_id;
490 };
491 
492 /*
493  * shost state: If you alter this, you also need to alter scsi_sysfs.c
494  * (for the ascii descriptions) and the state model enforcer:
495  * scsi_host_set_state()
496  */
497 enum scsi_host_state {
498 	SHOST_CREATED = 1,
499 	SHOST_RUNNING,
500 	SHOST_CANCEL,
501 	SHOST_DEL,
502 	SHOST_RECOVERY,
503 	SHOST_CANCEL_RECOVERY,
504 	SHOST_DEL_RECOVERY,
505 };
506 
507 struct Scsi_Host {
508 	/*
509 	 * __devices is protected by the host_lock, but you should
510 	 * usually use scsi_device_lookup / shost_for_each_device
511 	 * to access it and don't care about locking yourself.
512 	 * In the rare case of beeing in irq context you can use
513 	 * their __ prefixed variants with the lock held. NEVER
514 	 * access this list directly from a driver.
515 	 */
516 	struct list_head	__devices;
517 	struct list_head	__targets;
518 
519 	struct scsi_host_cmd_pool *cmd_pool;
520 	spinlock_t		free_list_lock;
521 	struct list_head	free_list; /* backup store of cmd structs */
522 	struct list_head	starved_list;
523 
524 	spinlock_t		default_lock;
525 	spinlock_t		*host_lock;
526 
527 	struct mutex		scan_mutex;/* serialize scanning activity */
528 
529 	struct list_head	eh_cmd_q;
530 	struct task_struct    * ehandler;  /* Error recovery thread. */
531 	struct completion     * eh_action; /* Wait for specific actions on the
532 					      host. */
533 	wait_queue_head_t       host_wait;
534 	struct scsi_host_template *hostt;
535 	struct scsi_transport_template *transportt;
536 
537 	/*
538 	 * Area to keep a shared tag map (if needed, will be
539 	 * NULL if not).
540 	 */
541 	struct blk_queue_tag	*bqt;
542 
543 	/*
544 	 * The following two fields are protected with host_lock;
545 	 * however, eh routines can safely access during eh processing
546 	 * without acquiring the lock.
547 	 */
548 	unsigned int host_busy;		   /* commands actually active on low-level */
549 	unsigned int host_failed;	   /* commands that failed. */
550 	unsigned int host_eh_scheduled;    /* EH scheduled without command */
551 
552 	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
553 	int resetting; /* if set, it means that last_reset is a valid value */
554 	unsigned long last_reset;
555 
556 	/*
557 	 * These three parameters can be used to allow for wide scsi,
558 	 * and for host adapters that support multiple busses
559 	 * The first two should be set to 1 more than the actual max id
560 	 * or lun (i.e. 8 for normal systems).
561 	 */
562 	unsigned int max_id;
563 	unsigned int max_lun;
564 	unsigned int max_channel;
565 
566 	/*
567 	 * This is a unique identifier that must be assigned so that we
568 	 * have some way of identifying each detected host adapter properly
569 	 * and uniquely.  For hosts that do not support more than one card
570 	 * in the system at one time, this does not need to be set.  It is
571 	 * initialized to 0 in scsi_register.
572 	 */
573 	unsigned int unique_id;
574 
575 	/*
576 	 * The maximum length of SCSI commands that this host can accept.
577 	 * Probably 12 for most host adapters, but could be 16 for others.
578 	 * or 260 if the driver supports variable length cdbs.
579 	 * For drivers that don't set this field, a value of 12 is
580 	 * assumed.
581 	 */
582 	unsigned short max_cmd_len;
583 
584 	int this_id;
585 	int can_queue;
586 	short cmd_per_lun;
587 	short unsigned int sg_tablesize;
588 	short unsigned int max_sectors;
589 	unsigned long dma_boundary;
590 	/*
591 	 * Used to assign serial numbers to the cmds.
592 	 * Protected by the host lock.
593 	 */
594 	unsigned long cmd_serial_number;
595 
596 	unsigned active_mode:2;
597 	unsigned unchecked_isa_dma:1;
598 	unsigned use_clustering:1;
599 	unsigned use_blk_tcq:1;
600 
601 	/*
602 	 * Host has requested that no further requests come through for the
603 	 * time being.
604 	 */
605 	unsigned host_self_blocked:1;
606 
607 	/*
608 	 * Host uses correct SCSI ordering not PC ordering. The bit is
609 	 * set for the minority of drivers whose authors actually read
610 	 * the spec ;).
611 	 */
612 	unsigned reverse_ordering:1;
613 
614 	/*
615 	 * Ordered write support
616 	 */
617 	unsigned ordered_tag:1;
618 
619 	/* Task mgmt function in progress */
620 	unsigned tmf_in_progress:1;
621 
622 	/* Asynchronous scan in progress */
623 	unsigned async_scan:1;
624 
625 	/*
626 	 * Optional work queue to be utilized by the transport
627 	 */
628 	char work_q_name[20];
629 	struct workqueue_struct *work_q;
630 
631 	/*
632 	 * Host has rejected a command because it was busy.
633 	 */
634 	unsigned int host_blocked;
635 
636 	/*
637 	 * Value host_blocked counts down from
638 	 */
639 	unsigned int max_host_blocked;
640 
641 	/* Protection Information */
642 	unsigned int prot_capabilities;
643 	unsigned char prot_guard_type;
644 
645 	/*
646 	 * q used for scsi_tgt msgs, async events or any other requests that
647 	 * need to be processed in userspace
648 	 */
649 	struct request_queue *uspace_req_q;
650 
651 	/* legacy crap */
652 	unsigned long base;
653 	unsigned long io_port;
654 	unsigned char n_io_port;
655 	unsigned char dma_channel;
656 	unsigned int  irq;
657 
658 
659 	enum scsi_host_state shost_state;
660 
661 	/* ldm bits */
662 	struct device		shost_gendev, shost_dev;
663 
664 	/*
665 	 * List of hosts per template.
666 	 *
667 	 * This is only for use by scsi_module.c for legacy templates.
668 	 * For these access to it is synchronized implicitly by
669 	 * module_init/module_exit.
670 	 */
671 	struct list_head sht_legacy_list;
672 
673 	/*
674 	 * Points to the transport data (if any) which is allocated
675 	 * separately
676 	 */
677 	void *shost_data;
678 
679 	/*
680 	 * We should ensure that this is aligned, both for better performance
681 	 * and also because some compilers (m68k) don't automatically force
682 	 * alignment to a long boundary.
683 	 */
684 	unsigned long hostdata[0]  /* Used for storage of host specific stuff */
685 		__attribute__ ((aligned (sizeof(unsigned long))));
686 };
687 
688 #define		class_to_shost(d)	\
689 	container_of(d, struct Scsi_Host, shost_dev)
690 
691 #define shost_printk(prefix, shost, fmt, a...)	\
692 	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
693 
694 static inline void *shost_priv(struct Scsi_Host *shost)
695 {
696 	return (void *)shost->hostdata;
697 }
698 
699 int scsi_is_host_device(const struct device *);
700 
701 static inline struct Scsi_Host *dev_to_shost(struct device *dev)
702 {
703 	while (!scsi_is_host_device(dev)) {
704 		if (!dev->parent)
705 			return NULL;
706 		dev = dev->parent;
707 	}
708 	return container_of(dev, struct Scsi_Host, shost_gendev);
709 }
710 
711 static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
712 {
713 	return shost->shost_state == SHOST_RECOVERY ||
714 		shost->shost_state == SHOST_CANCEL_RECOVERY ||
715 		shost->shost_state == SHOST_DEL_RECOVERY ||
716 		shost->tmf_in_progress;
717 }
718 
719 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
720 extern void scsi_flush_work(struct Scsi_Host *);
721 
722 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
723 extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *);
724 extern void scsi_scan_host(struct Scsi_Host *);
725 extern void scsi_rescan_device(struct device *);
726 extern void scsi_remove_host(struct Scsi_Host *);
727 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
728 extern void scsi_host_put(struct Scsi_Host *t);
729 extern struct Scsi_Host *scsi_host_lookup(unsigned short);
730 extern const char *scsi_host_state_name(enum scsi_host_state);
731 
732 extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
733 
734 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
735 {
736         return shost->shost_gendev.parent;
737 }
738 
739 /**
740  * scsi_host_scan_allowed - Is scanning of this host allowed
741  * @shost:	Pointer to Scsi_Host.
742  **/
743 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
744 {
745 	return shost->shost_state == SHOST_RUNNING;
746 }
747 
748 extern void scsi_unblock_requests(struct Scsi_Host *);
749 extern void scsi_block_requests(struct Scsi_Host *);
750 
751 struct class_container;
752 
753 extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
754 						void (*) (struct request_queue *));
755 /*
756  * These two functions are used to allocate and free a pseudo device
757  * which will connect to the host adapter itself rather than any
758  * physical device.  You must deallocate when you are done with the
759  * thing.  This physical pseudo-device isn't real and won't be available
760  * from any high-level drivers.
761  */
762 extern void scsi_free_host_dev(struct scsi_device *);
763 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
764 
765 /*
766  * DIF defines the exchange of protection information between
767  * initiator and SBC block device.
768  *
769  * DIX defines the exchange of protection information between OS and
770  * initiator.
771  */
772 enum scsi_host_prot_capabilities {
773 	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
774 	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
775 	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
776 
777 	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
778 	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
779 	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
780 	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
781 };
782 
783 /*
784  * SCSI hosts which support the Data Integrity Extensions must
785  * indicate their capabilities by setting the prot_capabilities using
786  * this call.
787  */
788 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
789 {
790 	shost->prot_capabilities = mask;
791 }
792 
793 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
794 {
795 	return shost->prot_capabilities;
796 }
797 
798 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
799 {
800 	switch (target_type) {
801 	case 1:
802 		if (shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION)
803 			return target_type;
804 	case 2:
805 		if (shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION)
806 			return target_type;
807 	case 3:
808 		if (shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION)
809 			return target_type;
810 	}
811 
812 	return 0;
813 }
814 
815 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
816 {
817 #if defined(CONFIG_BLK_DEV_INTEGRITY)
818 	switch (target_type) {
819 	case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION;
820 	case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION;
821 	case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION;
822 	case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION;
823 	}
824 #endif
825 	return 0;
826 }
827 
828 /*
829  * All DIX-capable initiators must support the T10-mandated CRC
830  * checksum.  Controllers can optionally implement the IP checksum
831  * scheme which has much lower impact on system performance.  Note
832  * that the main rationale for the checksum is to match integrity
833  * metadata with data.  Detecting bit errors are a job for ECC memory
834  * and buses.
835  */
836 
837 enum scsi_host_guard_type {
838 	SHOST_DIX_GUARD_CRC = 1 << 0,
839 	SHOST_DIX_GUARD_IP  = 1 << 1,
840 };
841 
842 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
843 {
844 	shost->prot_guard_type = type;
845 }
846 
847 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
848 {
849 	return shost->prot_guard_type;
850 }
851 
852 /* legacy interfaces */
853 extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
854 extern void scsi_unregister(struct Scsi_Host *);
855 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
856 
857 #endif /* _SCSI_SCSI_HOST_H */
858