xref: /linux/include/xen/interface/io/blkif.h (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1*9e2b3e83SJuergen Gross /* SPDX-License-Identifier: MIT */
2a42089ddSJeremy Fitzhardinge /******************************************************************************
3a42089ddSJeremy Fitzhardinge  * blkif.h
4a42089ddSJeremy Fitzhardinge  *
5a42089ddSJeremy Fitzhardinge  * Unified block-device I/O interface for Xen guest OSes.
6a42089ddSJeremy Fitzhardinge  *
7a42089ddSJeremy Fitzhardinge  * Copyright (c) 2003-2004, Keir Fraser
8a42089ddSJeremy Fitzhardinge  */
9a42089ddSJeremy Fitzhardinge 
10a42089ddSJeremy Fitzhardinge #ifndef __XEN_PUBLIC_IO_BLKIF_H__
11a42089ddSJeremy Fitzhardinge #define __XEN_PUBLIC_IO_BLKIF_H__
12a42089ddSJeremy Fitzhardinge 
13a1ce3928SDavid Howells #include <xen/interface/io/ring.h>
14a1ce3928SDavid Howells #include <xen/interface/grant_table.h>
15a42089ddSJeremy Fitzhardinge 
16a42089ddSJeremy Fitzhardinge /*
17a42089ddSJeremy Fitzhardinge  * Front->back notifications: When enqueuing a new request, sending a
18a42089ddSJeremy Fitzhardinge  * notification can be made conditional on req_event (i.e., the generic
19a42089ddSJeremy Fitzhardinge  * hold-off mechanism provided by the ring macros). Backends must set
20a42089ddSJeremy Fitzhardinge  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
21a42089ddSJeremy Fitzhardinge  *
22a42089ddSJeremy Fitzhardinge  * Back->front notifications: When enqueuing a new response, sending a
23a42089ddSJeremy Fitzhardinge  * notification can be made conditional on rsp_event (i.e., the generic
24a42089ddSJeremy Fitzhardinge  * hold-off mechanism provided by the ring macros). Frontends must set
25a42089ddSJeremy Fitzhardinge  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
26a42089ddSJeremy Fitzhardinge  */
27a42089ddSJeremy Fitzhardinge 
28a42089ddSJeremy Fitzhardinge typedef uint16_t blkif_vdev_t;
29a42089ddSJeremy Fitzhardinge typedef uint64_t blkif_sector_t;
30a42089ddSJeremy Fitzhardinge 
31a42089ddSJeremy Fitzhardinge /*
32eb5df87fSBob Liu  * Multiple hardware queues/rings:
33eb5df87fSBob Liu  * If supported, the backend will write the key "multi-queue-max-queues" to
34eb5df87fSBob Liu  * the directory for that vbd, and set its value to the maximum supported
35eb5df87fSBob Liu  * number of queues.
36eb5df87fSBob Liu  * Frontends that are aware of this feature and wish to use it can write the
37eb5df87fSBob Liu  * key "multi-queue-num-queues" with the number they wish to use, which must be
38eb5df87fSBob Liu  * greater than zero, and no more than the value reported by the backend in
39eb5df87fSBob Liu  * "multi-queue-max-queues".
40eb5df87fSBob Liu  *
41eb5df87fSBob Liu  * For frontends requesting just one queue, the usual event-channel and
42eb5df87fSBob Liu  * ring-ref keys are written as before, simplifying the backend processing
43eb5df87fSBob Liu  * to avoid distinguishing between a frontend that doesn't understand the
44eb5df87fSBob Liu  * multi-queue feature, and one that does, but requested only one queue.
45eb5df87fSBob Liu  *
46eb5df87fSBob Liu  * Frontends requesting two or more queues must not write the toplevel
47eb5df87fSBob Liu  * event-channel and ring-ref keys, instead writing those keys under sub-keys
48eb5df87fSBob Liu  * having the name "queue-N" where N is the integer ID of the queue/ring for
49eb5df87fSBob Liu  * which those keys belong. Queues are indexed from zero.
50eb5df87fSBob Liu  * For example, a frontend with two queues must write the following set of
51eb5df87fSBob Liu  * queue-related keys:
52eb5df87fSBob Liu  *
53eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
54eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0 = ""
55eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
56eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
57eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1 = ""
58eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
59eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
60eb5df87fSBob Liu  *
61eb5df87fSBob Liu  * It is also possible to use multiple queues/rings together with
62eb5df87fSBob Liu  * feature multi-page ring buffer.
63eb5df87fSBob Liu  * For example, a frontend requests two queues/rings and the size of each ring
64eb5df87fSBob Liu  * buffer is two pages must write the following set of related keys:
65eb5df87fSBob Liu  *
66eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
67eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/ring-page-order = "1"
68eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0 = ""
69eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
70eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
71eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
72eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1 = ""
73eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
74eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
75eb5df87fSBob Liu  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
76eb5df87fSBob Liu  *
77eb5df87fSBob Liu  */
78eb5df87fSBob Liu 
79eb5df87fSBob Liu /*
80a42089ddSJeremy Fitzhardinge  * REQUEST CODES.
81a42089ddSJeremy Fitzhardinge  */
82a42089ddSJeremy Fitzhardinge #define BLKIF_OP_READ              0
83a42089ddSJeremy Fitzhardinge #define BLKIF_OP_WRITE             1
84a42089ddSJeremy Fitzhardinge /*
85a42089ddSJeremy Fitzhardinge  * Recognised only if "feature-barrier" is present in backend xenbus info.
86a42089ddSJeremy Fitzhardinge  * The "feature_barrier" node contains a boolean indicating whether barrier
87a42089ddSJeremy Fitzhardinge  * requests are likely to succeed or fail. Either way, a barrier request
88a42089ddSJeremy Fitzhardinge  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
89a42089ddSJeremy Fitzhardinge  * the underlying block-device hardware. The boolean simply indicates whether
90a42089ddSJeremy Fitzhardinge  * or not it is worthwhile for the frontend to attempt barrier requests.
91a42089ddSJeremy Fitzhardinge  * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
92a42089ddSJeremy Fitzhardinge  * create the "feature-barrier" node!
93a42089ddSJeremy Fitzhardinge  */
94a42089ddSJeremy Fitzhardinge #define BLKIF_OP_WRITE_BARRIER     2
95a42089ddSJeremy Fitzhardinge 
96a42089ddSJeremy Fitzhardinge /*
976dcfb751SKonrad Rzeszutek Wilk  * Recognised if "feature-flush-cache" is present in backend xenbus
986dcfb751SKonrad Rzeszutek Wilk  * info.  A flush will ask the underlying storage hardware to flush its
996dcfb751SKonrad Rzeszutek Wilk  * non-volatile caches as appropriate.  The "feature-flush-cache" node
1006dcfb751SKonrad Rzeszutek Wilk  * contains a boolean indicating whether flush requests are likely to
1016dcfb751SKonrad Rzeszutek Wilk  * succeed or fail. Either way, a flush request may fail at any time
1026dcfb751SKonrad Rzeszutek Wilk  * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
1036dcfb751SKonrad Rzeszutek Wilk  * block-device hardware. The boolean simply indicates whether or not it
1046dcfb751SKonrad Rzeszutek Wilk  * is worthwhile for the frontend to attempt flushes.  If a backend does
1056dcfb751SKonrad Rzeszutek Wilk  * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
1066dcfb751SKonrad Rzeszutek Wilk  * "feature-flush-cache" node!
1076dcfb751SKonrad Rzeszutek Wilk  */
1086dcfb751SKonrad Rzeszutek Wilk #define BLKIF_OP_FLUSH_DISKCACHE   3
10932a8d26cSLi Dongyang 
11032a8d26cSLi Dongyang /*
11132a8d26cSLi Dongyang  * Recognised only if "feature-discard" is present in backend xenbus info.
11232a8d26cSLi Dongyang  * The "feature-discard" node contains a boolean indicating whether trim
11332a8d26cSLi Dongyang  * (ATA) or unmap (SCSI) - conviently called discard requests are likely
11432a8d26cSLi Dongyang  * to succeed or fail. Either way, a discard request
11532a8d26cSLi Dongyang  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
11632a8d26cSLi Dongyang  * the underlying block-device hardware. The boolean simply indicates whether
11732a8d26cSLi Dongyang  * or not it is worthwhile for the frontend to attempt discard requests.
11832a8d26cSLi Dongyang  * If a backend does not recognise BLKIF_OP_DISCARD, it should *not*
11932a8d26cSLi Dongyang  * create the "feature-discard" node!
12032a8d26cSLi Dongyang  *
12132a8d26cSLi Dongyang  * Discard operation is a request for the underlying block device to mark
12232a8d26cSLi Dongyang  * extents to be erased. However, discard does not guarantee that the blocks
12332a8d26cSLi Dongyang  * will be erased from the device - it is just a hint to the device
12432a8d26cSLi Dongyang  * controller that these blocks are no longer in use. What the device
12532a8d26cSLi Dongyang  * controller does with that information is left to the controller.
12632a8d26cSLi Dongyang  * Discard operations are passed with sector_number as the
12732a8d26cSLi Dongyang  * sector index to begin discard operations at and nr_sectors as the number of
12832a8d26cSLi Dongyang  * sectors to be discarded. The specified sectors should be discarded if the
12932a8d26cSLi Dongyang  * underlying block device supports trim (ATA) or unmap (SCSI) operations,
13032a8d26cSLi Dongyang  * or a BLKIF_RSP_EOPNOTSUPP  should be returned.
13132a8d26cSLi Dongyang  * More information about trim/unmap operations at:
13232a8d26cSLi Dongyang  * http://t13.org/Documents/UploadedDocuments/docs2008/
13332a8d26cSLi Dongyang  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
13432a8d26cSLi Dongyang  * http://www.seagate.com/staticfiles/support/disc/manuals/
13532a8d26cSLi Dongyang  *     Interface%20manuals/100293068c.pdf
1365ea42986SKonrad Rzeszutek Wilk  * The backend can optionally provide three extra XenBus attributes to
1375ea42986SKonrad Rzeszutek Wilk  * further optimize the discard functionality:
1381c339ef7SOlaf Hering  * 'discard-alignment' - Devices that support discard functionality may
1395ea42986SKonrad Rzeszutek Wilk  * internally allocate space in units that are bigger than the exported
1405ea42986SKonrad Rzeszutek Wilk  * logical block size. The discard-alignment parameter indicates how many bytes
1415ea42986SKonrad Rzeszutek Wilk  * the beginning of the partition is offset from the internal allocation unit's
1425ea42986SKonrad Rzeszutek Wilk  * natural alignment.
1435ea42986SKonrad Rzeszutek Wilk  * 'discard-granularity'  - Devices that support discard functionality may
1445ea42986SKonrad Rzeszutek Wilk  * internally allocate space using units that are bigger than the logical block
1455ea42986SKonrad Rzeszutek Wilk  * size. The discard-granularity parameter indicates the size of the internal
1465ea42986SKonrad Rzeszutek Wilk  * allocation unit in bytes if reported by the device. Otherwise the
1475ea42986SKonrad Rzeszutek Wilk  * discard-granularity will be set to match the device's physical block size.
1485ea42986SKonrad Rzeszutek Wilk  * 'discard-secure' - All copies of the discarded sectors (potentially created
1495ea42986SKonrad Rzeszutek Wilk  * by garbage collection) must also be erased.  To use this feature, the flag
1505ea42986SKonrad Rzeszutek Wilk  * BLKIF_DISCARD_SECURE must be set in the blkif_request_trim.
15132a8d26cSLi Dongyang  */
15232a8d26cSLi Dongyang #define BLKIF_OP_DISCARD           5
15332a8d26cSLi Dongyang 
1546dcfb751SKonrad Rzeszutek Wilk /*
155402b27f9SRoger Pau Monne  * Recognized if "feature-max-indirect-segments" in present in the backend
156402b27f9SRoger Pau Monne  * xenbus info. The "feature-max-indirect-segments" node contains the maximum
157402b27f9SRoger Pau Monne  * number of segments allowed by the backend per request. If the node is
158402b27f9SRoger Pau Monne  * present, the frontend might use blkif_request_indirect structs in order to
159402b27f9SRoger Pau Monne  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
160402b27f9SRoger Pau Monne  * maximum number of indirect segments is fixed by the backend, but the
161402b27f9SRoger Pau Monne  * frontend can issue requests with any number of indirect segments as long as
162402b27f9SRoger Pau Monne  * it's less than the number provided by the backend. The indirect_grefs field
163402b27f9SRoger Pau Monne  * in blkif_request_indirect should be filled by the frontend with the
164402b27f9SRoger Pau Monne  * grant references of the pages that are holding the indirect segments.
16580bfa2f6SRoger Pau Monne  * These pages are filled with an array of blkif_request_segment that hold the
16680bfa2f6SRoger Pau Monne  * information about the segments. The number of indirect pages to use is
16780bfa2f6SRoger Pau Monne  * determined by the number of segments an indirect request contains. Every
16880bfa2f6SRoger Pau Monne  * indirect page can contain a maximum of
16980bfa2f6SRoger Pau Monne  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
17080bfa2f6SRoger Pau Monne  * calculate the number of indirect pages to use we have to do
17180bfa2f6SRoger Pau Monne  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
172402b27f9SRoger Pau Monne  *
173402b27f9SRoger Pau Monne  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
174402b27f9SRoger Pau Monne  * create the "feature-max-indirect-segments" node!
175402b27f9SRoger Pau Monne  */
176402b27f9SRoger Pau Monne #define BLKIF_OP_INDIRECT          6
177402b27f9SRoger Pau Monne 
178402b27f9SRoger Pau Monne /*
179a42089ddSJeremy Fitzhardinge  * Maximum scatter/gather segments per request.
180a42089ddSJeremy Fitzhardinge  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
181a42089ddSJeremy Fitzhardinge  * NB. This could be 12 if the ring indexes weren't stored in the same page.
182a42089ddSJeremy Fitzhardinge  */
183a42089ddSJeremy Fitzhardinge #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
184a42089ddSJeremy Fitzhardinge 
185402b27f9SRoger Pau Monne #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
186402b27f9SRoger Pau Monne 
18780bfa2f6SRoger Pau Monne struct blkif_request_segment {
188402b27f9SRoger Pau Monne 		grant_ref_t gref;        /* reference to I/O buffer frame        */
189402b27f9SRoger Pau Monne 		/* @first_sect: first sector in frame to transfer (inclusive).   */
190402b27f9SRoger Pau Monne 		/* @last_sect: last sector in frame to transfer (inclusive).     */
191402b27f9SRoger Pau Monne 		uint8_t     first_sect, last_sect;
19280bfa2f6SRoger Pau Monne };
193402b27f9SRoger Pau Monne 
19451de6952SOwen Smith struct blkif_request_rw {
19597e36834SKonrad Rzeszutek Wilk 	uint8_t        nr_segments;  /* number of segments                   */
19697e36834SKonrad Rzeszutek Wilk 	blkif_vdev_t   handle;       /* only for read/write requests         */
197380108d8SJulien Grall #ifndef CONFIG_X86_32
19897e36834SKonrad Rzeszutek Wilk 	uint32_t       _pad1;	     /* offsetof(blkif_request,u.rw.id) == 8 */
19997e36834SKonrad Rzeszutek Wilk #endif
20097e36834SKonrad Rzeszutek Wilk 	uint64_t       id;           /* private guest value, echoed in resp  */
201a42089ddSJeremy Fitzhardinge 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
20280bfa2f6SRoger Pau Monne 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
20397e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
204a42089ddSJeremy Fitzhardinge 
20532a8d26cSLi Dongyang struct blkif_request_discard {
2065ea42986SKonrad Rzeszutek Wilk 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero.        */
2075ea42986SKonrad Rzeszutek Wilk #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0          */
20897e36834SKonrad Rzeszutek Wilk 	blkif_vdev_t   _pad1;        /* only for read/write requests         */
209380108d8SJulien Grall #ifndef CONFIG_X86_32
21097e36834SKonrad Rzeszutek Wilk 	uint32_t       _pad2;        /* offsetof(blkif_req..,u.discard.id)==8*/
21197e36834SKonrad Rzeszutek Wilk #endif
21297e36834SKonrad Rzeszutek Wilk 	uint64_t       id;           /* private guest value, echoed in resp  */
21332a8d26cSLi Dongyang 	blkif_sector_t sector_number;
21432a8d26cSLi Dongyang 	uint64_t       nr_sectors;
21597e36834SKonrad Rzeszutek Wilk 	uint8_t        _pad3;
21697e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
21732a8d26cSLi Dongyang 
2180e367ae4SDavid Vrabel struct blkif_request_other {
2190e367ae4SDavid Vrabel 	uint8_t      _pad1;
2200e367ae4SDavid Vrabel 	blkif_vdev_t _pad2;        /* only for read/write requests         */
221380108d8SJulien Grall #ifndef CONFIG_X86_32
2220e367ae4SDavid Vrabel 	uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
2230e367ae4SDavid Vrabel #endif
2240e367ae4SDavid Vrabel 	uint64_t     id;           /* private guest value, echoed in resp  */
2250e367ae4SDavid Vrabel } __attribute__((__packed__));
2260e367ae4SDavid Vrabel 
227402b27f9SRoger Pau Monne struct blkif_request_indirect {
228402b27f9SRoger Pau Monne 	uint8_t        indirect_op;
229402b27f9SRoger Pau Monne 	uint16_t       nr_segments;
230380108d8SJulien Grall #ifndef CONFIG_X86_32
231402b27f9SRoger Pau Monne 	uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */
232402b27f9SRoger Pau Monne #endif
233402b27f9SRoger Pau Monne 	uint64_t       id;
234402b27f9SRoger Pau Monne 	blkif_sector_t sector_number;
235402b27f9SRoger Pau Monne 	blkif_vdev_t   handle;
236402b27f9SRoger Pau Monne 	uint16_t       _pad2;
237402b27f9SRoger Pau Monne 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
238380108d8SJulien Grall #ifndef CONFIG_X86_32
239402b27f9SRoger Pau Monne 	uint32_t      _pad3;         /* make it 64 byte aligned */
240402b27f9SRoger Pau Monne #else
241402b27f9SRoger Pau Monne 	uint64_t      _pad3;         /* make it 64 byte aligned */
242402b27f9SRoger Pau Monne #endif
243402b27f9SRoger Pau Monne } __attribute__((__packed__));
244402b27f9SRoger Pau Monne 
24551de6952SOwen Smith struct blkif_request {
24651de6952SOwen Smith 	uint8_t        operation;    /* BLKIF_OP_???                         */
24751de6952SOwen Smith 	union {
24851de6952SOwen Smith 		struct blkif_request_rw rw;
24932a8d26cSLi Dongyang 		struct blkif_request_discard discard;
2500e367ae4SDavid Vrabel 		struct blkif_request_other other;
251402b27f9SRoger Pau Monne 		struct blkif_request_indirect indirect;
25251de6952SOwen Smith 	} u;
25397e36834SKonrad Rzeszutek Wilk } __attribute__((__packed__));
25451de6952SOwen Smith 
255a42089ddSJeremy Fitzhardinge struct blkif_response {
256a42089ddSJeremy Fitzhardinge 	uint64_t        id;              /* copied from request */
257a42089ddSJeremy Fitzhardinge 	uint8_t         operation;       /* copied from request */
258a42089ddSJeremy Fitzhardinge 	int16_t         status;          /* BLKIF_RSP_???       */
259a42089ddSJeremy Fitzhardinge };
260a42089ddSJeremy Fitzhardinge 
261a42089ddSJeremy Fitzhardinge /*
262a42089ddSJeremy Fitzhardinge  * STATUS RETURN CODES.
263a42089ddSJeremy Fitzhardinge  */
264a42089ddSJeremy Fitzhardinge  /* Operation not supported (only happens on barrier writes). */
265a42089ddSJeremy Fitzhardinge #define BLKIF_RSP_EOPNOTSUPP  -2
266a42089ddSJeremy Fitzhardinge  /* Operation failed for some unspecified reason (-EIO). */
267a42089ddSJeremy Fitzhardinge #define BLKIF_RSP_ERROR       -1
268a42089ddSJeremy Fitzhardinge  /* Operation completed successfully. */
269a42089ddSJeremy Fitzhardinge #define BLKIF_RSP_OKAY         0
270a42089ddSJeremy Fitzhardinge 
271a42089ddSJeremy Fitzhardinge /*
272a42089ddSJeremy Fitzhardinge  * Generate blkif ring structures and types.
273a42089ddSJeremy Fitzhardinge  */
274a42089ddSJeremy Fitzhardinge 
275a42089ddSJeremy Fitzhardinge DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
276a42089ddSJeremy Fitzhardinge 
277a42089ddSJeremy Fitzhardinge #define VDISK_CDROM        0x1
278a42089ddSJeremy Fitzhardinge #define VDISK_REMOVABLE    0x2
279a42089ddSJeremy Fitzhardinge #define VDISK_READONLY     0x4
280a42089ddSJeremy Fitzhardinge 
281c80a4209SStefano Stabellini /* Xen-defined major numbers for virtual disks, they look strangely
282c80a4209SStefano Stabellini  * familiar */
283c80a4209SStefano Stabellini #define XEN_IDE0_MAJOR	3
284c80a4209SStefano Stabellini #define XEN_IDE1_MAJOR	22
285c80a4209SStefano Stabellini #define XEN_SCSI_DISK0_MAJOR	8
286c80a4209SStefano Stabellini #define XEN_SCSI_DISK1_MAJOR	65
287c80a4209SStefano Stabellini #define XEN_SCSI_DISK2_MAJOR	66
288c80a4209SStefano Stabellini #define XEN_SCSI_DISK3_MAJOR	67
289c80a4209SStefano Stabellini #define XEN_SCSI_DISK4_MAJOR	68
290c80a4209SStefano Stabellini #define XEN_SCSI_DISK5_MAJOR	69
291c80a4209SStefano Stabellini #define XEN_SCSI_DISK6_MAJOR	70
292c80a4209SStefano Stabellini #define XEN_SCSI_DISK7_MAJOR	71
293c80a4209SStefano Stabellini #define XEN_SCSI_DISK8_MAJOR	128
294c80a4209SStefano Stabellini #define XEN_SCSI_DISK9_MAJOR	129
295c80a4209SStefano Stabellini #define XEN_SCSI_DISK10_MAJOR	130
296c80a4209SStefano Stabellini #define XEN_SCSI_DISK11_MAJOR	131
297c80a4209SStefano Stabellini #define XEN_SCSI_DISK12_MAJOR	132
298c80a4209SStefano Stabellini #define XEN_SCSI_DISK13_MAJOR	133
299c80a4209SStefano Stabellini #define XEN_SCSI_DISK14_MAJOR	134
300c80a4209SStefano Stabellini #define XEN_SCSI_DISK15_MAJOR	135
301c80a4209SStefano Stabellini 
302a42089ddSJeremy Fitzhardinge #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
303