xref: /linux/include/xen/interface/io/blkif.h (revision 32a8d26cc9b96629269e04ee6c583e14398f6f47)
1a42089ddSJeremy Fitzhardinge /******************************************************************************
2a42089ddSJeremy Fitzhardinge  * blkif.h
3a42089ddSJeremy Fitzhardinge  *
4a42089ddSJeremy Fitzhardinge  * Unified block-device I/O interface for Xen guest OSes.
5a42089ddSJeremy Fitzhardinge  *
6a42089ddSJeremy Fitzhardinge  * Copyright (c) 2003-2004, Keir Fraser
7a42089ddSJeremy Fitzhardinge  */
8a42089ddSJeremy Fitzhardinge 
9a42089ddSJeremy Fitzhardinge #ifndef __XEN_PUBLIC_IO_BLKIF_H__
10a42089ddSJeremy Fitzhardinge #define __XEN_PUBLIC_IO_BLKIF_H__
11a42089ddSJeremy Fitzhardinge 
12a42089ddSJeremy Fitzhardinge #include "ring.h"
13a42089ddSJeremy Fitzhardinge #include "../grant_table.h"
14a42089ddSJeremy Fitzhardinge 
15a42089ddSJeremy Fitzhardinge /*
16a42089ddSJeremy Fitzhardinge  * Front->back notifications: When enqueuing a new request, sending a
17a42089ddSJeremy Fitzhardinge  * notification can be made conditional on req_event (i.e., the generic
18a42089ddSJeremy Fitzhardinge  * hold-off mechanism provided by the ring macros). Backends must set
19a42089ddSJeremy Fitzhardinge  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
20a42089ddSJeremy Fitzhardinge  *
21a42089ddSJeremy Fitzhardinge  * Back->front notifications: When enqueuing a new response, sending a
22a42089ddSJeremy Fitzhardinge  * notification can be made conditional on rsp_event (i.e., the generic
23a42089ddSJeremy Fitzhardinge  * hold-off mechanism provided by the ring macros). Frontends must set
24a42089ddSJeremy Fitzhardinge  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
25a42089ddSJeremy Fitzhardinge  */
26a42089ddSJeremy Fitzhardinge 
27a42089ddSJeremy Fitzhardinge typedef uint16_t blkif_vdev_t;
28a42089ddSJeremy Fitzhardinge typedef uint64_t blkif_sector_t;
29a42089ddSJeremy Fitzhardinge 
30a42089ddSJeremy Fitzhardinge /*
31a42089ddSJeremy Fitzhardinge  * REQUEST CODES.
32a42089ddSJeremy Fitzhardinge  */
33a42089ddSJeremy Fitzhardinge #define BLKIF_OP_READ              0
34a42089ddSJeremy Fitzhardinge #define BLKIF_OP_WRITE             1
35a42089ddSJeremy Fitzhardinge /*
36a42089ddSJeremy Fitzhardinge  * Recognised only if "feature-barrier" is present in backend xenbus info.
37a42089ddSJeremy Fitzhardinge  * The "feature_barrier" node contains a boolean indicating whether barrier
38a42089ddSJeremy Fitzhardinge  * requests are likely to succeed or fail. Either way, a barrier request
39a42089ddSJeremy Fitzhardinge  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
40a42089ddSJeremy Fitzhardinge  * the underlying block-device hardware. The boolean simply indicates whether
41a42089ddSJeremy Fitzhardinge  * or not it is worthwhile for the frontend to attempt barrier requests.
42a42089ddSJeremy Fitzhardinge  * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
43a42089ddSJeremy Fitzhardinge  * create the "feature-barrier" node!
44a42089ddSJeremy Fitzhardinge  */
45a42089ddSJeremy Fitzhardinge #define BLKIF_OP_WRITE_BARRIER     2
46a42089ddSJeremy Fitzhardinge 
47a42089ddSJeremy Fitzhardinge /*
486dcfb751SKonrad Rzeszutek Wilk  * Recognised if "feature-flush-cache" is present in backend xenbus
496dcfb751SKonrad Rzeszutek Wilk  * info.  A flush will ask the underlying storage hardware to flush its
506dcfb751SKonrad Rzeszutek Wilk  * non-volatile caches as appropriate.  The "feature-flush-cache" node
516dcfb751SKonrad Rzeszutek Wilk  * contains a boolean indicating whether flush requests are likely to
526dcfb751SKonrad Rzeszutek Wilk  * succeed or fail. Either way, a flush request may fail at any time
536dcfb751SKonrad Rzeszutek Wilk  * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
546dcfb751SKonrad Rzeszutek Wilk  * block-device hardware. The boolean simply indicates whether or not it
556dcfb751SKonrad Rzeszutek Wilk  * is worthwhile for the frontend to attempt flushes.  If a backend does
566dcfb751SKonrad Rzeszutek Wilk  * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
576dcfb751SKonrad Rzeszutek Wilk  * "feature-flush-cache" node!
586dcfb751SKonrad Rzeszutek Wilk  */
596dcfb751SKonrad Rzeszutek Wilk #define BLKIF_OP_FLUSH_DISKCACHE   3
60*32a8d26cSLi Dongyang 
61*32a8d26cSLi Dongyang /*
62*32a8d26cSLi Dongyang  * Recognised only if "feature-discard" is present in backend xenbus info.
63*32a8d26cSLi Dongyang  * The "feature-discard" node contains a boolean indicating whether trim
64*32a8d26cSLi Dongyang  * (ATA) or unmap (SCSI) - conviently called discard requests are likely
65*32a8d26cSLi Dongyang  * to succeed or fail. Either way, a discard request
66*32a8d26cSLi Dongyang  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
67*32a8d26cSLi Dongyang  * the underlying block-device hardware. The boolean simply indicates whether
68*32a8d26cSLi Dongyang  * or not it is worthwhile for the frontend to attempt discard requests.
69*32a8d26cSLi Dongyang  * If a backend does not recognise BLKIF_OP_DISCARD, it should *not*
70*32a8d26cSLi Dongyang  * create the "feature-discard" node!
71*32a8d26cSLi Dongyang  *
72*32a8d26cSLi Dongyang  * Discard operation is a request for the underlying block device to mark
73*32a8d26cSLi Dongyang  * extents to be erased. However, discard does not guarantee that the blocks
74*32a8d26cSLi Dongyang  * will be erased from the device - it is just a hint to the device
75*32a8d26cSLi Dongyang  * controller that these blocks are no longer in use. What the device
76*32a8d26cSLi Dongyang  * controller does with that information is left to the controller.
77*32a8d26cSLi Dongyang  * Discard operations are passed with sector_number as the
78*32a8d26cSLi Dongyang  * sector index to begin discard operations at and nr_sectors as the number of
79*32a8d26cSLi Dongyang  * sectors to be discarded. The specified sectors should be discarded if the
80*32a8d26cSLi Dongyang  * underlying block device supports trim (ATA) or unmap (SCSI) operations,
81*32a8d26cSLi Dongyang  * or a BLKIF_RSP_EOPNOTSUPP  should be returned.
82*32a8d26cSLi Dongyang  * More information about trim/unmap operations at:
83*32a8d26cSLi Dongyang  * http://t13.org/Documents/UploadedDocuments/docs2008/
84*32a8d26cSLi Dongyang  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
85*32a8d26cSLi Dongyang  * http://www.seagate.com/staticfiles/support/disc/manuals/
86*32a8d26cSLi Dongyang  *     Interface%20manuals/100293068c.pdf
87*32a8d26cSLi Dongyang  */
88*32a8d26cSLi Dongyang #define BLKIF_OP_DISCARD           5
89*32a8d26cSLi Dongyang 
906dcfb751SKonrad Rzeszutek Wilk /*
91a42089ddSJeremy Fitzhardinge  * Maximum scatter/gather segments per request.
92a42089ddSJeremy Fitzhardinge  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
93a42089ddSJeremy Fitzhardinge  * NB. This could be 12 if the ring indexes weren't stored in the same page.
94a42089ddSJeremy Fitzhardinge  */
95a42089ddSJeremy Fitzhardinge #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
96a42089ddSJeremy Fitzhardinge 
9751de6952SOwen Smith struct blkif_request_rw {
98a42089ddSJeremy Fitzhardinge 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
99a42089ddSJeremy Fitzhardinge 	struct blkif_request_segment {
100a42089ddSJeremy Fitzhardinge 		grant_ref_t gref;        /* reference to I/O buffer frame        */
101a42089ddSJeremy Fitzhardinge 		/* @first_sect: first sector in frame to transfer (inclusive).   */
102a42089ddSJeremy Fitzhardinge 		/* @last_sect: last sector in frame to transfer (inclusive).     */
103a42089ddSJeremy Fitzhardinge 		uint8_t     first_sect, last_sect;
104a42089ddSJeremy Fitzhardinge 	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
105a42089ddSJeremy Fitzhardinge };
106a42089ddSJeremy Fitzhardinge 
107*32a8d26cSLi Dongyang struct blkif_request_discard {
108*32a8d26cSLi Dongyang 	blkif_sector_t sector_number;
109*32a8d26cSLi Dongyang 	uint64_t nr_sectors;
110*32a8d26cSLi Dongyang };
111*32a8d26cSLi Dongyang 
11251de6952SOwen Smith struct blkif_request {
11351de6952SOwen Smith 	uint8_t        operation;    /* BLKIF_OP_???                         */
11451de6952SOwen Smith 	uint8_t        nr_segments;  /* number of segments                   */
11551de6952SOwen Smith 	blkif_vdev_t   handle;       /* only for read/write requests         */
11651de6952SOwen Smith 	uint64_t       id;           /* private guest value, echoed in resp  */
11751de6952SOwen Smith 	union {
11851de6952SOwen Smith 		struct blkif_request_rw rw;
119*32a8d26cSLi Dongyang 		struct blkif_request_discard discard;
12051de6952SOwen Smith 	} u;
12151de6952SOwen Smith };
12251de6952SOwen Smith 
123a42089ddSJeremy Fitzhardinge struct blkif_response {
124a42089ddSJeremy Fitzhardinge 	uint64_t        id;              /* copied from request */
125a42089ddSJeremy Fitzhardinge 	uint8_t         operation;       /* copied from request */
126a42089ddSJeremy Fitzhardinge 	int16_t         status;          /* BLKIF_RSP_???       */
127a42089ddSJeremy Fitzhardinge };
128a42089ddSJeremy Fitzhardinge 
129a42089ddSJeremy Fitzhardinge /*
130a42089ddSJeremy Fitzhardinge  * STATUS RETURN CODES.
131a42089ddSJeremy Fitzhardinge  */
132a42089ddSJeremy Fitzhardinge  /* Operation not supported (only happens on barrier writes). */
133a42089ddSJeremy Fitzhardinge #define BLKIF_RSP_EOPNOTSUPP  -2
134a42089ddSJeremy Fitzhardinge  /* Operation failed for some unspecified reason (-EIO). */
135a42089ddSJeremy Fitzhardinge #define BLKIF_RSP_ERROR       -1
136a42089ddSJeremy Fitzhardinge  /* Operation completed successfully. */
137a42089ddSJeremy Fitzhardinge #define BLKIF_RSP_OKAY         0
138a42089ddSJeremy Fitzhardinge 
139a42089ddSJeremy Fitzhardinge /*
140a42089ddSJeremy Fitzhardinge  * Generate blkif ring structures and types.
141a42089ddSJeremy Fitzhardinge  */
142a42089ddSJeremy Fitzhardinge 
143a42089ddSJeremy Fitzhardinge DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
144a42089ddSJeremy Fitzhardinge 
145a42089ddSJeremy Fitzhardinge #define VDISK_CDROM        0x1
146a42089ddSJeremy Fitzhardinge #define VDISK_REMOVABLE    0x2
147a42089ddSJeremy Fitzhardinge #define VDISK_READONLY     0x4
148a42089ddSJeremy Fitzhardinge 
149c80a4209SStefano Stabellini /* Xen-defined major numbers for virtual disks, they look strangely
150c80a4209SStefano Stabellini  * familiar */
151c80a4209SStefano Stabellini #define XEN_IDE0_MAJOR	3
152c80a4209SStefano Stabellini #define XEN_IDE1_MAJOR	22
153c80a4209SStefano Stabellini #define XEN_SCSI_DISK0_MAJOR	8
154c80a4209SStefano Stabellini #define XEN_SCSI_DISK1_MAJOR	65
155c80a4209SStefano Stabellini #define XEN_SCSI_DISK2_MAJOR	66
156c80a4209SStefano Stabellini #define XEN_SCSI_DISK3_MAJOR	67
157c80a4209SStefano Stabellini #define XEN_SCSI_DISK4_MAJOR	68
158c80a4209SStefano Stabellini #define XEN_SCSI_DISK5_MAJOR	69
159c80a4209SStefano Stabellini #define XEN_SCSI_DISK6_MAJOR	70
160c80a4209SStefano Stabellini #define XEN_SCSI_DISK7_MAJOR	71
161c80a4209SStefano Stabellini #define XEN_SCSI_DISK8_MAJOR	128
162c80a4209SStefano Stabellini #define XEN_SCSI_DISK9_MAJOR	129
163c80a4209SStefano Stabellini #define XEN_SCSI_DISK10_MAJOR	130
164c80a4209SStefano Stabellini #define XEN_SCSI_DISK11_MAJOR	131
165c80a4209SStefano Stabellini #define XEN_SCSI_DISK12_MAJOR	132
166c80a4209SStefano Stabellini #define XEN_SCSI_DISK13_MAJOR	133
167c80a4209SStefano Stabellini #define XEN_SCSI_DISK14_MAJOR	134
168c80a4209SStefano Stabellini #define XEN_SCSI_DISK15_MAJOR	135
169c80a4209SStefano Stabellini 
170a42089ddSJeremy Fitzhardinge #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
171