1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SCSI_SCSI_HOST_H
3 #define _SCSI_SCSI_HOST_H
4
5 #include <linux/device.h>
6 #include <linux/list.h>
7 #include <linux/types.h>
8 #include <linux/workqueue.h>
9 #include <linux/mutex.h>
10 #include <linux/seq_file.h>
11 #include <linux/blk-mq.h>
12 #include <scsi/scsi.h>
13
14 struct block_device;
15 struct completion;
16 struct module;
17 struct scsi_cmnd;
18 struct scsi_device;
19 struct scsi_target;
20 struct Scsi_Host;
21 struct scsi_transport_template;
22
23
24 #define SG_ALL SG_CHUNK_SIZE
25
26 #define MODE_UNKNOWN 0x00
27 #define MODE_INITIATOR 0x01
28 #define MODE_TARGET 0x02
29
30 /**
31 * enum scsi_timeout_action - How to handle a command that timed out.
32 * @SCSI_EH_DONE: The command has already been completed.
33 * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion.
34 * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command.
35 */
36 enum scsi_timeout_action {
37 SCSI_EH_DONE,
38 SCSI_EH_RESET_TIMER,
39 SCSI_EH_NOT_HANDLED,
40 };
41
42 struct scsi_host_template {
43 /*
44 * Put fields referenced in IO submission path together in
45 * same cacheline
46 */
47
48 /*
49 * Additional per-command data allocated for the driver.
50 */
51 unsigned int cmd_size;
52
53 /*
54 * The queuecommand function is used to queue up a scsi
55 * command block to the LLDD. When the driver finished
56 * processing the command the done callback is invoked.
57 *
58 * If queuecommand returns 0, then the driver has accepted the
59 * command. It must also push it to the HBA if the scsi_cmnd
60 * flag SCMD_LAST is set, or if the driver does not implement
61 * commit_rqs. The done() function must be called on the command
62 * when the driver has finished with it. (you may call done on the
63 * command before queuecommand returns, but in this case you
64 * *must* return 0 from queuecommand).
65 *
66 * Queuecommand may also reject the command, in which case it may
67 * not touch the command and must not call done() for it.
68 *
69 * There are two possible rejection returns:
70 *
71 * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
72 * allow commands to other devices serviced by this host.
73 *
74 * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
75 * host temporarily.
76 *
77 * For compatibility, any other non-zero return is treated the
78 * same as SCSI_MLQUEUE_HOST_BUSY.
79 *
80 * NOTE: "temporarily" means either until the next command for#
81 * this device/host completes, or a period of time determined by
82 * I/O pressure in the system if there are no other outstanding
83 * commands.
84 *
85 * STATUS: REQUIRED
86 */
87 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
88
89 /*
90 * Queue a reserved command (BLK_MQ_REQ_RESERVED). The .queuecommand()
91 * documentation also applies to the .queue_reserved_command() callback.
92 */
93 int (*queue_reserved_command)(struct Scsi_Host *, struct scsi_cmnd *);
94
95 /*
96 * The commit_rqs function is used to trigger a hardware
97 * doorbell after some requests have been queued with
98 * queuecommand, when an error is encountered before sending
99 * the request with SCMD_LAST set.
100 *
101 * STATUS: OPTIONAL
102 */
103 void (*commit_rqs)(struct Scsi_Host *, u16);
104
105 struct module *module;
106 const char *name;
107
108 /*
109 * The info function will return whatever useful information the
110 * developer sees fit. If not provided, then the name field will
111 * be used instead.
112 *
113 * Status: OPTIONAL
114 */
115 const char *(*info)(struct Scsi_Host *);
116
117 /*
118 * Ioctl interface
119 *
120 * Status: OPTIONAL
121 */
122 int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
123 void __user *arg);
124
125
126 #ifdef CONFIG_COMPAT
127 /*
128 * Compat handler. Handle 32bit ABI.
129 * When unknown ioctl is passed return -ENOIOCTLCMD.
130 *
131 * Status: OPTIONAL
132 */
133 int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
134 void __user *arg);
135 #endif
136
137 int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
138 int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
139
140 /*
141 * This is an error handling strategy routine. You don't need to
142 * define one of these if you don't want to - there is a default
143 * routine that is present that should work in most cases. For those
144 * driver authors that have the inclination and ability to write their
145 * own strategy routine, this is where it is specified. Note - the
146 * strategy routine is *ALWAYS* run in the context of the kernel eh
147 * thread. Thus you are guaranteed to *NOT* be in an interrupt
148 * handler when you execute this, and you are also guaranteed to
149 * *NOT* have any other commands being queued while you are in the
150 * strategy routine. When you return from this function, operations
151 * return to normal.
152 *
153 * See scsi_error.c scsi_unjam_host for additional comments about
154 * what this function should and should not be attempting to do.
155 *
156 * Status: REQUIRED (at least one of them)
157 */
158 int (* eh_abort_handler)(struct scsi_cmnd *);
159 int (* eh_device_reset_handler)(struct scsi_cmnd *);
160 int (* eh_target_reset_handler)(struct scsi_cmnd *);
161 int (* eh_bus_reset_handler)(struct scsi_cmnd *);
162 int (* eh_host_reset_handler)(struct scsi_cmnd *);
163
164 /*
165 * Before the mid layer attempts to scan for a new device where none
166 * currently exists, it will call this entry in your driver. Should
167 * your driver need to allocate any structs or perform any other init
168 * items in order to send commands to a currently unused target/lun
169 * combo, then this is where you can perform those allocations. This
170 * is specifically so that drivers won't have to perform any kind of
171 * "is this a new device" checks in their queuecommand routine,
172 * thereby making the hot path a bit quicker.
173 *
174 * Return values: 0 on success, non-0 on failure
175 *
176 * Deallocation: If we didn't find any devices at this ID, you will
177 * get an immediate call to sdev_destroy(). If we find something
178 * here then you will get a call to sdev_configure(), then the
179 * device will be used for however long it is kept around, then when
180 * the device is removed from the system (or * possibly at reboot
181 * time), you will then get a call to sdev_destroy(). This is
182 * assuming you implement sdev_configure and sdev_destroy.
183 * However, if you allocate memory and hang it off the device struct,
184 * then you must implement the sdev_destroy() routine at a minimum
185 * in order to avoid leaking memory
186 * each time a device is tore down.
187 *
188 * Status: OPTIONAL
189 */
190 int (* sdev_init)(struct scsi_device *);
191
192 /*
193 * Once the device has responded to an INQUIRY and we know the
194 * device is online, we call into the low level driver with the
195 * struct scsi_device *. If the low level device driver implements
196 * this function, it *must* perform the task of setting the queue
197 * depth on the device. All other tasks are optional and depend
198 * on what the driver supports and various implementation details.
199 *
200 * Things currently recommended to be handled at this time include:
201 *
202 * 1. Setting the device queue depth. Proper setting of this is
203 * described in the comments for scsi_change_queue_depth.
204 * 2. Determining if the device supports the various synchronous
205 * negotiation protocols. The device struct will already have
206 * responded to INQUIRY and the results of the standard items
207 * will have been shoved into the various device flag bits, eg.
208 * device->sdtr will be true if the device supports SDTR messages.
209 * 3. Allocating command structs that the device will need.
210 * 4. Setting the default timeout on this device (if needed).
211 * 5. Anything else the low level driver might want to do on a device
212 * specific setup basis...
213 * 6. Return 0 on success, non-0 on error. The device will be marked
214 * as offline on error so that no access will occur. If you return
215 * non-0, your sdev_destroy routine will never get called for this
216 * device, so don't leave any loose memory hanging around, clean
217 * up after yourself before returning non-0
218 *
219 * Status: OPTIONAL
220 */
221 int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim);
222
223 /*
224 * Immediately prior to deallocating the device and after all activity
225 * has ceased the mid layer calls this point so that the low level
226 * driver may completely detach itself from the scsi device and vice
227 * versa. The low level driver is responsible for freeing any memory
228 * it allocated in the sdev_init or sdev_configure calls.
229 *
230 * Status: OPTIONAL
231 */
232 void (* sdev_destroy)(struct scsi_device *);
233
234 /*
235 * Before the mid layer attempts to scan for a new device attached
236 * to a target where no target currently exists, it will call this
237 * entry in your driver. Should your driver need to allocate any
238 * structs or perform any other init items in order to send commands
239 * to a currently unused target, then this is where you can perform
240 * those allocations.
241 *
242 * Return values: 0 on success, non-0 on failure
243 *
244 * Status: OPTIONAL
245 */
246 int (* target_alloc)(struct scsi_target *);
247
248 /*
249 * Immediately prior to deallocating the target structure, and
250 * after all activity to attached scsi devices has ceased, the
251 * midlayer calls this point so that the driver may deallocate
252 * and terminate any references to the target.
253 *
254 * Note: This callback is called with the host lock held and hence
255 * must not sleep.
256 *
257 * Status: OPTIONAL
258 */
259 void (* target_destroy)(struct scsi_target *);
260
261 /*
262 * If a host has the ability to discover targets on its own instead
263 * of scanning the entire bus, it can fill in this function and
264 * call scsi_scan_host(). This function will be called periodically
265 * until it returns 1 with the scsi_host and the elapsed time of
266 * the scan in jiffies.
267 *
268 * Status: OPTIONAL
269 */
270 int (* scan_finished)(struct Scsi_Host *, unsigned long);
271
272 /*
273 * If the host wants to be called before the scan starts, but
274 * after the midlayer has set up ready for the scan, it can fill
275 * in this function.
276 *
277 * Status: OPTIONAL
278 */
279 void (* scan_start)(struct Scsi_Host *);
280
281 /*
282 * Fill in this function to allow the queue depth of this host
283 * to be changeable (on a per device basis). Returns either
284 * the current queue depth setting (may be different from what
285 * was passed in) or an error. An error should only be
286 * returned if the requested depth is legal but the driver was
287 * unable to set it. If the requested depth is illegal, the
288 * driver should set and return the closest legal queue depth.
289 *
290 * Status: OPTIONAL
291 */
292 int (* change_queue_depth)(struct scsi_device *, int);
293
294 /*
295 * This functions lets the driver expose the queue mapping
296 * to the block layer.
297 *
298 * Status: OPTIONAL
299 */
300 void (* map_queues)(struct Scsi_Host *shost);
301
302 /*
303 * SCSI interface of blk_poll - poll for IO completions.
304 * Only applicable if SCSI LLD exposes multiple h/w queues.
305 *
306 * Return value: Number of completed entries found.
307 *
308 * Status: OPTIONAL
309 */
310 int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
311
312 /*
313 * Check if scatterlists need to be padded for DMA draining.
314 *
315 * Status: OPTIONAL
316 */
317 bool (* dma_need_drain)(struct request *rq);
318
319 /*
320 * This function determines the BIOS parameters for a given
321 * harddisk. These tend to be numbers that are made up by
322 * the host adapter. Parameters:
323 * size, device, list (heads, sectors, cylinders)
324 *
325 * Status: OPTIONAL
326 */
327 int (* bios_param)(struct scsi_device *, struct gendisk *,
328 sector_t, int []);
329
330 /*
331 * This function is called when one or more partitions on the
332 * device reach beyond the end of the device.
333 *
334 * Status: OPTIONAL
335 */
336 void (*unlock_native_capacity)(struct scsi_device *);
337
338 /*
339 * Can be used to export driver statistics and other infos to the
340 * world outside the kernel ie. userspace and it also provides an
341 * interface to feed the driver with information.
342 *
343 * Status: OBSOLETE
344 */
345 int (*show_info)(struct seq_file *, struct Scsi_Host *);
346 int (*write_info)(struct Scsi_Host *, char *, int);
347
348 /*
349 * This is an optional routine that allows the transport to become
350 * involved when a scsi io timer fires. The return value tells the
351 * timer routine how to finish the io timeout handling.
352 *
353 * Status: OPTIONAL
354 */
355 enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *);
356 /*
357 * Optional routine that allows the transport to decide if a cmd
358 * is retryable. Return true if the transport is in a state the
359 * cmd should be retried on.
360 */
361 bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
362
363 /* This is an optional routine that allows transport to initiate
364 * LLD adapter or firmware reset using sysfs attribute.
365 *
366 * Return values: 0 on success, -ve value on failure.
367 *
368 * Status: OPTIONAL
369 */
370
371 int (*host_reset)(struct Scsi_Host *shost, int reset_type);
372 #define SCSI_ADAPTER_RESET 1
373 #define SCSI_FIRMWARE_RESET 2
374
375
376 /*
377 * Name of proc directory
378 */
379 const char *proc_name;
380
381 /*
382 * This determines if we will use a non-interrupt driven
383 * or an interrupt driven scheme. It is set to the maximum number
384 * of simultaneous commands a single hw queue in HBA will accept
385 * excluding internal commands.
386 */
387 int can_queue;
388
389 /*
390 * This determines how many commands the HBA will set aside
391 * for internal commands. This number will be added to
392 * @can_queue to calculate the maximum number of simultaneous
393 * commands sent to the host.
394 */
395 int nr_reserved_cmds;
396
397 /*
398 * In many instances, especially where disconnect / reconnect are
399 * supported, our host also has an ID on the SCSI bus. If this is
400 * the case, then it must be reserved. Please set this_id to -1 if
401 * your setup is in single initiator mode, and the host lacks an
402 * ID.
403 */
404 int this_id;
405
406 /*
407 * This determines the degree to which the host adapter is capable
408 * of scatter-gather.
409 */
410 unsigned short sg_tablesize;
411 unsigned short sg_prot_tablesize;
412
413 /*
414 * Set this if the host adapter has limitations beside segment count.
415 */
416 unsigned int max_sectors;
417
418 /*
419 * Maximum size in bytes of a single segment.
420 */
421 unsigned int max_segment_size;
422
423 unsigned int dma_alignment;
424
425 /*
426 * DMA scatter gather segment boundary limit. A segment crossing this
427 * boundary will be split in two.
428 */
429 unsigned long dma_boundary;
430
431 unsigned long virt_boundary_mask;
432
433 /*
434 * This specifies "machine infinity" for host templates which don't
435 * limit the transfer size. Note this limit represents an absolute
436 * maximum, and may be over the transfer limits allowed for
437 * individual devices (e.g. 256 for SCSI-1).
438 */
439 #define SCSI_DEFAULT_MAX_SECTORS 1024
440
441 /*
442 * True if this host adapter can make good use of linked commands.
443 * This will allow more than one command to be queued to a given
444 * unit on a given host. Set this to the maximum number of command
445 * blocks to be provided for each device. Set this to 1 for one
446 * command block per lun, 2 for two, etc. Do not set this to 0.
447 * You should make sure that the host adapter will do the right thing
448 * before you try setting this above 1.
449 */
450 short cmd_per_lun;
451
452 /*
453 * Allocate tags starting from last allocated tag.
454 */
455 bool tag_alloc_policy_rr : 1;
456
457 /*
458 * Track QUEUE_FULL events and reduce queue depth on demand.
459 */
460 unsigned track_queue_depth:1;
461
462 /*
463 * This specifies the mode that a LLD supports.
464 */
465 unsigned supported_mode:2;
466
467 /*
468 * True for emulated SCSI host adapters (e.g. ATAPI).
469 */
470 unsigned emulated:1;
471
472 /*
473 * True if the low-level driver performs its own reset-settle delays.
474 */
475 unsigned skip_settle_delay:1;
476
477 /* True if the controller does not support WRITE SAME */
478 unsigned no_write_same:1;
479
480 /* True if the host uses host-wide tagspace */
481 unsigned host_tagset:1;
482
483 /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
484 unsigned queuecommand_may_block:1;
485
486 /*
487 * Countdown for host blocking with no commands outstanding.
488 */
489 unsigned int max_host_blocked;
490
491 /*
492 * Default value for the blocking. If the queue is empty,
493 * host_blocked counts down in the request_fn until it restarts
494 * host operations as zero is reached.
495 *
496 * FIXME: This should probably be a value in the template
497 */
498 #define SCSI_DEFAULT_HOST_BLOCKED 7
499
500 /*
501 * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
502 */
503 const struct attribute_group **shost_groups;
504
505 /*
506 * Pointer to the SCSI device attribute groups for this host,
507 * NULL terminated.
508 */
509 const struct attribute_group **sdev_groups;
510
511 /*
512 * Vendor Identifier associated with the host
513 *
514 * Note: When specifying vendor_id, be sure to read the
515 * Vendor Type and ID formatting requirements specified in
516 * scsi_netlink.h
517 */
518 u64 vendor_id;
519 };
520
521 /*
522 * Temporary #define for host lock push down. Can be removed when all
523 * drivers have been updated to take advantage of unlocked
524 * queuecommand.
525 *
526 */
527 #define DEF_SCSI_QCMD(func_name) \
528 int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \
529 { \
530 unsigned long irq_flags; \
531 int rc; \
532 spin_lock_irqsave(shost->host_lock, irq_flags); \
533 rc = func_name##_lck(cmd); \
534 spin_unlock_irqrestore(shost->host_lock, irq_flags); \
535 return rc; \
536 }
537
538
539 /*
540 * shost state: If you alter this, you also need to alter scsi_sysfs.c
541 * (for the ascii descriptions) and the state model enforcer:
542 * scsi_host_set_state()
543 */
544 enum scsi_host_state {
545 SHOST_CREATED = 1,
546 SHOST_RUNNING,
547 SHOST_CANCEL,
548 SHOST_DEL,
549 SHOST_RECOVERY,
550 SHOST_CANCEL_RECOVERY,
551 SHOST_DEL_RECOVERY,
552 };
553
554 struct Scsi_Host {
555 /*
556 * __devices is protected by the host_lock, but you should
557 * usually use scsi_device_lookup / shost_for_each_device
558 * to access it and don't care about locking yourself.
559 * In the rare case of being in irq context you can use
560 * their __ prefixed variants with the lock held. NEVER
561 * access this list directly from a driver.
562 */
563 struct list_head __devices;
564 struct list_head __targets;
565
566 struct list_head starved_list;
567
568 spinlock_t default_lock;
569 spinlock_t *host_lock;
570
571 struct mutex scan_mutex;/* serialize scanning activity */
572
573 struct list_head eh_abort_list;
574 struct list_head eh_cmd_q;
575 struct task_struct * ehandler; /* Error recovery thread. */
576 struct completion * eh_action; /* Wait for specific actions on the
577 host. */
578 wait_queue_head_t host_wait;
579 const struct scsi_host_template *hostt;
580 struct scsi_transport_template *transportt;
581
582 struct kref tagset_refcnt;
583 struct completion tagset_freed;
584 /* Area to keep a shared tag map */
585 struct blk_mq_tag_set tag_set;
586
587 atomic_t host_blocked;
588
589 unsigned int host_failed; /* commands that failed.
590 protected by host_lock */
591 unsigned int host_eh_scheduled; /* EH scheduled without command */
592
593 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
594
595 /* next two fields are used to bound the time spent in error handling */
596 int eh_deadline;
597 unsigned long last_reset;
598
599
600 /*
601 * These three parameters can be used to allow for wide scsi,
602 * and for host adapters that support multiple busses
603 * The last two should be set to 1 more than the actual max id
604 * or lun (e.g. 8 for SCSI parallel systems).
605 */
606 unsigned int max_channel;
607 unsigned int max_id;
608 u64 max_lun;
609
610 /*
611 * This is a unique identifier that must be assigned so that we
612 * have some way of identifying each detected host adapter properly
613 * and uniquely. For hosts that do not support more than one card
614 * in the system at one time, this does not need to be set. It is
615 * initialized to 0 in scsi_host_alloc.
616 */
617 unsigned int unique_id;
618
619 /*
620 * The maximum length of SCSI commands that this host can accept.
621 * Probably 12 for most host adapters, but could be 16 for others.
622 * or 260 if the driver supports variable length cdbs.
623 * For drivers that don't set this field, a value of 12 is
624 * assumed.
625 */
626 unsigned short max_cmd_len;
627
628 int this_id;
629
630 /*
631 * Number of commands this host can handle at the same time.
632 * This excludes reserved commands as specified by nr_reserved_cmds.
633 */
634 int can_queue;
635 /*
636 * Number of reserved commands to allocate, if any.
637 */
638 unsigned int nr_reserved_cmds;
639
640 short cmd_per_lun;
641 short unsigned int sg_tablesize;
642 short unsigned int sg_prot_tablesize;
643 unsigned int max_sectors;
644 unsigned int opt_sectors;
645 unsigned int max_segment_size;
646 unsigned int dma_alignment;
647 unsigned long dma_boundary;
648 unsigned long virt_boundary_mask;
649 /*
650 * In scsi-mq mode, the number of hardware queues supported by the LLD.
651 *
652 * Note: it is assumed that each hardware queue has a queue depth of
653 * can_queue. In other words, the total queue depth per host
654 * is nr_hw_queues * can_queue. However, for when host_tagset is set,
655 * the total queue depth is can_queue.
656 */
657 unsigned nr_hw_queues;
658 unsigned nr_maps;
659 unsigned active_mode:2;
660
661 /*
662 * Host has requested that no further requests come through for the
663 * time being.
664 */
665 unsigned host_self_blocked:1;
666
667 /*
668 * Host uses correct SCSI ordering not PC ordering. The bit is
669 * set for the minority of drivers whose authors actually read
670 * the spec ;).
671 */
672 unsigned reverse_ordering:1;
673
674 /* Task mgmt function in progress */
675 unsigned tmf_in_progress:1;
676
677 /* Asynchronous scan in progress */
678 unsigned async_scan:1;
679
680 /* Don't resume host in EH */
681 unsigned eh_noresume:1;
682
683 /* The controller does not support WRITE SAME */
684 unsigned no_write_same:1;
685
686 /* True if the host uses host-wide tagspace */
687 unsigned host_tagset:1;
688
689 /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
690 unsigned queuecommand_may_block:1;
691
692 /* Host responded with short (<36 bytes) INQUIRY result */
693 unsigned short_inquiry:1;
694
695 /* The transport requires the LUN bits NOT to be stored in CDB[1] */
696 unsigned no_scsi2_lun_in_cdb:1;
697
698 /*
699 * Optional work queue to be utilized by the transport
700 */
701 struct workqueue_struct *work_q;
702
703 /*
704 * Task management function work queue
705 */
706 struct workqueue_struct *tmf_work_q;
707
708 /*
709 * Value host_blocked counts down from
710 */
711 unsigned int max_host_blocked;
712
713 /* Protection Information */
714 unsigned int prot_capabilities;
715 unsigned char prot_guard_type;
716
717 /* legacy crap */
718 unsigned long base;
719 unsigned long io_port;
720 unsigned char n_io_port;
721 unsigned char dma_channel;
722 unsigned int irq;
723
724
725 enum scsi_host_state shost_state;
726
727 /* ldm bits */
728 struct device shost_gendev, shost_dev;
729
730 /*
731 * A SCSI device structure used for sending internal commands to the
732 * HBA. There is no corresponding logical unit inside the SCSI device.
733 */
734 struct scsi_device *pseudo_sdev;
735
736 /*
737 * Points to the transport data (if any) which is allocated
738 * separately
739 */
740 void *shost_data;
741
742 /*
743 * Points to the physical bus device we'd use to do DMA
744 * Needed just in case we have virtual hosts.
745 */
746 struct device *dma_dev;
747
748 /* Delay for runtime autosuspend */
749 int rpm_autosuspend_delay;
750
751 /*
752 * We should ensure that this is aligned, both for better performance
753 * and also because some compilers (m68k) don't automatically force
754 * alignment to a long boundary.
755 */
756 unsigned long hostdata[] /* Used for storage of host specific stuff */
757 __attribute__ ((aligned (sizeof(unsigned long))));
758 };
759
760 #define class_to_shost(d) \
761 container_of(d, struct Scsi_Host, shost_dev)
762
763 #define shost_printk(prefix, shost, fmt, a...) \
764 dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
765
shost_priv(struct Scsi_Host * shost)766 static inline void *shost_priv(struct Scsi_Host *shost)
767 {
768 return (void *)shost->hostdata;
769 }
770
771 int scsi_is_host_device(const struct device *);
772
dev_to_shost(struct device * dev)773 static inline struct Scsi_Host *dev_to_shost(struct device *dev)
774 {
775 while (!scsi_is_host_device(dev)) {
776 if (!dev->parent)
777 return NULL;
778 dev = dev->parent;
779 }
780 return container_of(dev, struct Scsi_Host, shost_gendev);
781 }
782
scsi_host_in_recovery(struct Scsi_Host * shost)783 static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
784 {
785 return shost->shost_state == SHOST_RECOVERY ||
786 shost->shost_state == SHOST_CANCEL_RECOVERY ||
787 shost->shost_state == SHOST_DEL_RECOVERY ||
788 shost->tmf_in_progress;
789 }
790
791 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
792 extern void scsi_flush_work(struct Scsi_Host *);
793
794 extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int);
795 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
796 struct device *,
797 struct device *);
798 #if defined(CONFIG_SCSI_PROC_FS)
799 struct proc_dir_entry *
800 scsi_template_proc_dir(const struct scsi_host_template *sht);
801 #else
802 #define scsi_template_proc_dir(sht) NULL
803 #endif
804 extern void scsi_scan_host(struct Scsi_Host *);
805 extern int scsi_resume_device(struct scsi_device *sdev);
806 extern int scsi_rescan_device(struct scsi_device *sdev);
807 extern void scsi_remove_host(struct Scsi_Host *);
808 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
809 extern int scsi_host_busy(struct Scsi_Host *shost);
810 extern void scsi_host_put(struct Scsi_Host *t);
811 extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
812 extern const char *scsi_host_state_name(enum scsi_host_state);
813 extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
814 enum scsi_host_status status);
815
scsi_add_host(struct Scsi_Host * host,struct device * dev)816 static inline int __must_check scsi_add_host(struct Scsi_Host *host,
817 struct device *dev)
818 {
819 return scsi_add_host_with_dma(host, dev, dev);
820 }
821
scsi_get_device(struct Scsi_Host * shost)822 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
823 {
824 return shost->shost_gendev.parent;
825 }
826
827 /**
828 * scsi_host_scan_allowed - Is scanning of this host allowed
829 * @shost: Pointer to Scsi_Host.
830 **/
scsi_host_scan_allowed(struct Scsi_Host * shost)831 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
832 {
833 return shost->shost_state == SHOST_RUNNING ||
834 shost->shost_state == SHOST_RECOVERY;
835 }
836
837 extern void scsi_unblock_requests(struct Scsi_Host *);
838 extern void scsi_block_requests(struct Scsi_Host *);
839 extern int scsi_host_block(struct Scsi_Host *shost);
840 extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
841
842 void scsi_host_busy_iter(struct Scsi_Host *,
843 bool (*fn)(struct scsi_cmnd *, void *), void *priv);
844
845 struct class_container;
846
847 /*
848 * DIF defines the exchange of protection information between
849 * initiator and SBC block device.
850 *
851 * DIX defines the exchange of protection information between OS and
852 * initiator.
853 */
854 enum scsi_host_prot_capabilities {
855 SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
856 SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
857 SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
858
859 SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
860 SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
861 SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
862 SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
863 };
864
865 /*
866 * SCSI hosts which support the Data Integrity Extensions must
867 * indicate their capabilities by setting the prot_capabilities using
868 * this call.
869 */
scsi_host_set_prot(struct Scsi_Host * shost,unsigned int mask)870 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
871 {
872 shost->prot_capabilities = mask;
873 }
874
scsi_host_get_prot(struct Scsi_Host * shost)875 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
876 {
877 return shost->prot_capabilities;
878 }
879
scsi_host_prot_dma(struct Scsi_Host * shost)880 static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
881 {
882 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
883 }
884
scsi_host_dif_capable(struct Scsi_Host * shost,unsigned int target_type)885 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
886 {
887 static unsigned char cap[] = { 0,
888 SHOST_DIF_TYPE1_PROTECTION,
889 SHOST_DIF_TYPE2_PROTECTION,
890 SHOST_DIF_TYPE3_PROTECTION };
891
892 if (target_type >= ARRAY_SIZE(cap))
893 return 0;
894
895 return shost->prot_capabilities & cap[target_type] ? target_type : 0;
896 }
897
scsi_host_dix_capable(struct Scsi_Host * shost,unsigned int target_type)898 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
899 {
900 #if defined(CONFIG_BLK_DEV_INTEGRITY)
901 static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
902 SHOST_DIX_TYPE1_PROTECTION,
903 SHOST_DIX_TYPE2_PROTECTION,
904 SHOST_DIX_TYPE3_PROTECTION };
905
906 if (target_type >= ARRAY_SIZE(cap))
907 return 0;
908
909 return shost->prot_capabilities & cap[target_type];
910 #endif
911 return 0;
912 }
913
914 /*
915 * All DIX-capable initiators must support the T10-mandated CRC
916 * checksum. Controllers can optionally implement the IP checksum
917 * scheme which has much lower impact on system performance. Note
918 * that the main rationale for the checksum is to match integrity
919 * metadata with data. Detecting bit errors are a job for ECC memory
920 * and buses.
921 */
922
923 enum scsi_host_guard_type {
924 SHOST_DIX_GUARD_CRC = 1 << 0,
925 SHOST_DIX_GUARD_IP = 1 << 1,
926 };
927
scsi_host_set_guard(struct Scsi_Host * shost,unsigned char type)928 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
929 {
930 shost->prot_guard_type = type;
931 }
932
scsi_host_get_guard(struct Scsi_Host * shost)933 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
934 {
935 return shost->prot_guard_type;
936 }
937
938 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
939
940 #endif /* _SCSI_SCSI_HOST_H */
941