xref: /linux/drivers/block/xen-blkback/blkback.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 
43 #include <xen/events.h>
44 #include <xen/page.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
47 #include "common.h"
48 
49 /*
50  * These are rather arbitrary. They are fairly large because adjacent requests
51  * pulled from a communication ring are quite likely to end up being part of
52  * the same scatter/gather request at the disc.
53  *
54  * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
55  *
56  * This will increase the chances of being able to write whole tracks.
57  * 64 should be enough to keep us competitive with Linux.
58  */
59 static int xen_blkif_reqs = 64;
60 module_param_named(reqs, xen_blkif_reqs, int, 0);
61 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
62 
63 /* Run-time switchable: /sys/module/blkback/parameters/ */
64 static unsigned int log_stats;
65 module_param(log_stats, int, 0644);
66 
67 /*
68  * Each outstanding request that we've passed to the lower device layers has a
69  * 'pending_req' allocated to it. Each buffer_head that completes decrements
70  * the pendcnt towards zero. When it hits zero, the specified domain has a
71  * response queued for it, with the saved 'id' passed back.
72  */
73 struct pending_req {
74 	struct xen_blkif	*blkif;
75 	u64			id;
76 	int			nr_pages;
77 	atomic_t		pendcnt;
78 	unsigned short		operation;
79 	int			status;
80 	struct list_head	free_list;
81 };
82 
83 #define BLKBACK_INVALID_HANDLE (~0)
84 
85 struct xen_blkbk {
86 	struct pending_req	*pending_reqs;
87 	/* List of all 'pending_req' available */
88 	struct list_head	pending_free;
89 	/* And its spinlock. */
90 	spinlock_t		pending_free_lock;
91 	wait_queue_head_t	pending_free_wq;
92 	/* The list of all pages that are available. */
93 	struct page		**pending_pages;
94 	/* And the grant handles that are available. */
95 	grant_handle_t		*pending_grant_handles;
96 };
97 
98 static struct xen_blkbk *blkbk;
99 
100 /*
101  * Little helpful macro to figure out the index and virtual address of the
102  * pending_pages[..]. For each 'pending_req' we have have up to
103  * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
104  * 10 and would index in the pending_pages[..].
105  */
106 static inline int vaddr_pagenr(struct pending_req *req, int seg)
107 {
108 	return (req - blkbk->pending_reqs) *
109 		BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
110 }
111 
112 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
113 
114 static inline unsigned long vaddr(struct pending_req *req, int seg)
115 {
116 	unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
117 	return (unsigned long)pfn_to_kaddr(pfn);
118 }
119 
120 #define pending_handle(_req, _seg) \
121 	(blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
122 
123 
124 static int do_block_io_op(struct xen_blkif *blkif);
125 static int dispatch_rw_block_io(struct xen_blkif *blkif,
126 				struct blkif_request *req,
127 				struct pending_req *pending_req);
128 static void make_response(struct xen_blkif *blkif, u64 id,
129 			  unsigned short op, int st);
130 
131 /*
132  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
133  */
134 static struct pending_req *alloc_req(void)
135 {
136 	struct pending_req *req = NULL;
137 	unsigned long flags;
138 
139 	spin_lock_irqsave(&blkbk->pending_free_lock, flags);
140 	if (!list_empty(&blkbk->pending_free)) {
141 		req = list_entry(blkbk->pending_free.next, struct pending_req,
142 				 free_list);
143 		list_del(&req->free_list);
144 	}
145 	spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
146 	return req;
147 }
148 
149 /*
150  * Return the 'pending_req' structure back to the freepool. We also
151  * wake up the thread if it was waiting for a free page.
152  */
153 static void free_req(struct pending_req *req)
154 {
155 	unsigned long flags;
156 	int was_empty;
157 
158 	spin_lock_irqsave(&blkbk->pending_free_lock, flags);
159 	was_empty = list_empty(&blkbk->pending_free);
160 	list_add(&req->free_list, &blkbk->pending_free);
161 	spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
162 	if (was_empty)
163 		wake_up(&blkbk->pending_free_wq);
164 }
165 
166 /*
167  * Routines for managing virtual block devices (vbds).
168  */
169 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
170 			     int operation)
171 {
172 	struct xen_vbd *vbd = &blkif->vbd;
173 	int rc = -EACCES;
174 
175 	if ((operation != READ) && vbd->readonly)
176 		goto out;
177 
178 	if (likely(req->nr_sects)) {
179 		blkif_sector_t end = req->sector_number + req->nr_sects;
180 
181 		if (unlikely(end < req->sector_number))
182 			goto out;
183 		if (unlikely(end > vbd_sz(vbd)))
184 			goto out;
185 	}
186 
187 	req->dev  = vbd->pdevice;
188 	req->bdev = vbd->bdev;
189 	rc = 0;
190 
191  out:
192 	return rc;
193 }
194 
195 static void xen_vbd_resize(struct xen_blkif *blkif)
196 {
197 	struct xen_vbd *vbd = &blkif->vbd;
198 	struct xenbus_transaction xbt;
199 	int err;
200 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
201 	unsigned long long new_size = vbd_sz(vbd);
202 
203 	pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
204 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
205 	pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
206 	vbd->size = new_size;
207 again:
208 	err = xenbus_transaction_start(&xbt);
209 	if (err) {
210 		pr_warn(DRV_PFX "Error starting transaction");
211 		return;
212 	}
213 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
214 			    (unsigned long long)vbd_sz(vbd));
215 	if (err) {
216 		pr_warn(DRV_PFX "Error writing new size");
217 		goto abort;
218 	}
219 	/*
220 	 * Write the current state; we will use this to synchronize
221 	 * the front-end. If the current state is "connected" the
222 	 * front-end will get the new size information online.
223 	 */
224 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
225 	if (err) {
226 		pr_warn(DRV_PFX "Error writing the state");
227 		goto abort;
228 	}
229 
230 	err = xenbus_transaction_end(xbt, 0);
231 	if (err == -EAGAIN)
232 		goto again;
233 	if (err)
234 		pr_warn(DRV_PFX "Error ending transaction");
235 	return;
236 abort:
237 	xenbus_transaction_end(xbt, 1);
238 }
239 
240 /*
241  * Notification from the guest OS.
242  */
243 static void blkif_notify_work(struct xen_blkif *blkif)
244 {
245 	blkif->waiting_reqs = 1;
246 	wake_up(&blkif->wq);
247 }
248 
249 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
250 {
251 	blkif_notify_work(dev_id);
252 	return IRQ_HANDLED;
253 }
254 
255 /*
256  * SCHEDULER FUNCTIONS
257  */
258 
259 static void print_stats(struct xen_blkif *blkif)
260 {
261 	pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d"
262 		 "  |  ds %4d\n",
263 		 current->comm, blkif->st_oo_req,
264 		 blkif->st_rd_req, blkif->st_wr_req,
265 		 blkif->st_f_req, blkif->st_ds_req);
266 	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
267 	blkif->st_rd_req = 0;
268 	blkif->st_wr_req = 0;
269 	blkif->st_oo_req = 0;
270 	blkif->st_ds_req = 0;
271 }
272 
273 int xen_blkif_schedule(void *arg)
274 {
275 	struct xen_blkif *blkif = arg;
276 	struct xen_vbd *vbd = &blkif->vbd;
277 
278 	xen_blkif_get(blkif);
279 
280 	while (!kthread_should_stop()) {
281 		if (try_to_freeze())
282 			continue;
283 		if (unlikely(vbd->size != vbd_sz(vbd)))
284 			xen_vbd_resize(blkif);
285 
286 		wait_event_interruptible(
287 			blkif->wq,
288 			blkif->waiting_reqs || kthread_should_stop());
289 		wait_event_interruptible(
290 			blkbk->pending_free_wq,
291 			!list_empty(&blkbk->pending_free) ||
292 			kthread_should_stop());
293 
294 		blkif->waiting_reqs = 0;
295 		smp_mb(); /* clear flag *before* checking for work */
296 
297 		if (do_block_io_op(blkif))
298 			blkif->waiting_reqs = 1;
299 
300 		if (log_stats && time_after(jiffies, blkif->st_print))
301 			print_stats(blkif);
302 	}
303 
304 	if (log_stats)
305 		print_stats(blkif);
306 
307 	blkif->xenblkd = NULL;
308 	xen_blkif_put(blkif);
309 
310 	return 0;
311 }
312 
313 struct seg_buf {
314 	unsigned long buf;
315 	unsigned int nsec;
316 };
317 /*
318  * Unmap the grant references, and also remove the M2P over-rides
319  * used in the 'pending_req'.
320  */
321 static void xen_blkbk_unmap(struct pending_req *req)
322 {
323 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
324 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
325 	unsigned int i, invcount = 0;
326 	grant_handle_t handle;
327 	int ret;
328 
329 	for (i = 0; i < req->nr_pages; i++) {
330 		handle = pending_handle(req, i);
331 		if (handle == BLKBACK_INVALID_HANDLE)
332 			continue;
333 		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
334 				    GNTMAP_host_map, handle);
335 		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
336 		pages[invcount] = virt_to_page(vaddr(req, i));
337 		invcount++;
338 	}
339 
340 	ret = gnttab_unmap_refs(unmap, pages, invcount, false);
341 	BUG_ON(ret);
342 }
343 
344 static int xen_blkbk_map(struct blkif_request *req,
345 			 struct pending_req *pending_req,
346 			 struct seg_buf seg[])
347 {
348 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
349 	int i;
350 	int nseg = req->u.rw.nr_segments;
351 	int ret = 0;
352 
353 	/*
354 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
355 	 * assign map[..] with the PFN of the page in our domain with the
356 	 * corresponding grant reference for each page.
357 	 */
358 	for (i = 0; i < nseg; i++) {
359 		uint32_t flags;
360 
361 		flags = GNTMAP_host_map;
362 		if (pending_req->operation != BLKIF_OP_READ)
363 			flags |= GNTMAP_readonly;
364 		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
365 				  req->u.rw.seg[i].gref,
366 				  pending_req->blkif->domid);
367 	}
368 
369 	ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
370 	BUG_ON(ret);
371 
372 	/*
373 	 * Now swizzle the MFN in our domain with the MFN from the other domain
374 	 * so that when we access vaddr(pending_req,i) it has the contents of
375 	 * the page from the other domain.
376 	 */
377 	for (i = 0; i < nseg; i++) {
378 		if (unlikely(map[i].status != 0)) {
379 			pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
380 			map[i].handle = BLKBACK_INVALID_HANDLE;
381 			ret |= 1;
382 		}
383 
384 		pending_handle(pending_req, i) = map[i].handle;
385 
386 		if (ret)
387 			continue;
388 
389 		seg[i].buf  = map[i].dev_bus_addr |
390 			(req->u.rw.seg[i].first_sect << 9);
391 	}
392 	return ret;
393 }
394 
395 static int dispatch_discard_io(struct xen_blkif *blkif,
396 				struct blkif_request *req)
397 {
398 	int err = 0;
399 	int status = BLKIF_RSP_OKAY;
400 	struct block_device *bdev = blkif->vbd.bdev;
401 	unsigned long secure;
402 
403 	blkif->st_ds_req++;
404 
405 	xen_blkif_get(blkif);
406 	secure = (blkif->vbd.discard_secure &&
407 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
408 		 BLKDEV_DISCARD_SECURE : 0;
409 
410 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
411 				   req->u.discard.nr_sectors,
412 				   GFP_KERNEL, secure);
413 
414 	if (err == -EOPNOTSUPP) {
415 		pr_debug(DRV_PFX "discard op failed, not supported\n");
416 		status = BLKIF_RSP_EOPNOTSUPP;
417 	} else if (err)
418 		status = BLKIF_RSP_ERROR;
419 
420 	make_response(blkif, req->u.discard.id, req->operation, status);
421 	xen_blkif_put(blkif);
422 	return err;
423 }
424 
425 static void xen_blk_drain_io(struct xen_blkif *blkif)
426 {
427 	atomic_set(&blkif->drain, 1);
428 	do {
429 		/* The initial value is one, and one refcnt taken at the
430 		 * start of the xen_blkif_schedule thread. */
431 		if (atomic_read(&blkif->refcnt) <= 2)
432 			break;
433 		wait_for_completion_interruptible_timeout(
434 				&blkif->drain_complete, HZ);
435 
436 		if (!atomic_read(&blkif->drain))
437 			break;
438 	} while (!kthread_should_stop());
439 	atomic_set(&blkif->drain, 0);
440 }
441 
442 /*
443  * Completion callback on the bio's. Called as bh->b_end_io()
444  */
445 
446 static void __end_block_io_op(struct pending_req *pending_req, int error)
447 {
448 	/* An error fails the entire request. */
449 	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
450 	    (error == -EOPNOTSUPP)) {
451 		pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
452 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
453 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
454 	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
455 		    (error == -EOPNOTSUPP)) {
456 		pr_debug(DRV_PFX "write barrier op failed, not supported\n");
457 		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
458 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
459 	} else if (error) {
460 		pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
461 			 " error=%d\n", error);
462 		pending_req->status = BLKIF_RSP_ERROR;
463 	}
464 
465 	/*
466 	 * If all of the bio's have completed it is time to unmap
467 	 * the grant references associated with 'request' and provide
468 	 * the proper response on the ring.
469 	 */
470 	if (atomic_dec_and_test(&pending_req->pendcnt)) {
471 		xen_blkbk_unmap(pending_req);
472 		make_response(pending_req->blkif, pending_req->id,
473 			      pending_req->operation, pending_req->status);
474 		xen_blkif_put(pending_req->blkif);
475 		if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
476 			if (atomic_read(&pending_req->blkif->drain))
477 				complete(&pending_req->blkif->drain_complete);
478 		}
479 		free_req(pending_req);
480 	}
481 }
482 
483 /*
484  * bio callback.
485  */
486 static void end_block_io_op(struct bio *bio, int error)
487 {
488 	__end_block_io_op(bio->bi_private, error);
489 	bio_put(bio);
490 }
491 
492 
493 
494 /*
495  * Function to copy the from the ring buffer the 'struct blkif_request'
496  * (which has the sectors we want, number of them, grant references, etc),
497  * and transmute  it to the block API to hand it over to the proper block disk.
498  */
499 static int
500 __do_block_io_op(struct xen_blkif *blkif)
501 {
502 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
503 	struct blkif_request req;
504 	struct pending_req *pending_req;
505 	RING_IDX rc, rp;
506 	int more_to_do = 0;
507 
508 	rc = blk_rings->common.req_cons;
509 	rp = blk_rings->common.sring->req_prod;
510 	rmb(); /* Ensure we see queued requests up to 'rp'. */
511 
512 	while (rc != rp) {
513 
514 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
515 			break;
516 
517 		if (kthread_should_stop()) {
518 			more_to_do = 1;
519 			break;
520 		}
521 
522 		pending_req = alloc_req();
523 		if (NULL == pending_req) {
524 			blkif->st_oo_req++;
525 			more_to_do = 1;
526 			break;
527 		}
528 
529 		switch (blkif->blk_protocol) {
530 		case BLKIF_PROTOCOL_NATIVE:
531 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
532 			break;
533 		case BLKIF_PROTOCOL_X86_32:
534 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
535 			break;
536 		case BLKIF_PROTOCOL_X86_64:
537 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
538 			break;
539 		default:
540 			BUG();
541 		}
542 		blk_rings->common.req_cons = ++rc; /* before make_response() */
543 
544 		/* Apply all sanity checks to /private copy/ of request. */
545 		barrier();
546 		if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
547 			free_req(pending_req);
548 			if (dispatch_discard_io(blkif, &req))
549 				break;
550 		} else if (dispatch_rw_block_io(blkif, &req, pending_req))
551 			break;
552 
553 		/* Yield point for this unbounded loop. */
554 		cond_resched();
555 	}
556 
557 	return more_to_do;
558 }
559 
560 static int
561 do_block_io_op(struct xen_blkif *blkif)
562 {
563 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
564 	int more_to_do;
565 
566 	do {
567 		more_to_do = __do_block_io_op(blkif);
568 		if (more_to_do)
569 			break;
570 
571 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
572 	} while (more_to_do);
573 
574 	return more_to_do;
575 }
576 /*
577  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
578  * and call the 'submit_bio' to pass it to the underlying storage.
579  */
580 static int dispatch_rw_block_io(struct xen_blkif *blkif,
581 				struct blkif_request *req,
582 				struct pending_req *pending_req)
583 {
584 	struct phys_req preq;
585 	struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
586 	unsigned int nseg;
587 	struct bio *bio = NULL;
588 	struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
589 	int i, nbio = 0;
590 	int operation;
591 	struct blk_plug plug;
592 	bool drain = false;
593 
594 	switch (req->operation) {
595 	case BLKIF_OP_READ:
596 		blkif->st_rd_req++;
597 		operation = READ;
598 		break;
599 	case BLKIF_OP_WRITE:
600 		blkif->st_wr_req++;
601 		operation = WRITE_ODIRECT;
602 		break;
603 	case BLKIF_OP_WRITE_BARRIER:
604 		drain = true;
605 	case BLKIF_OP_FLUSH_DISKCACHE:
606 		blkif->st_f_req++;
607 		operation = WRITE_FLUSH;
608 		break;
609 	default:
610 		operation = 0; /* make gcc happy */
611 		goto fail_response;
612 		break;
613 	}
614 
615 	/* Check that the number of segments is sane. */
616 	nseg = req->u.rw.nr_segments;
617 
618 	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
619 	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
620 		pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
621 			 nseg);
622 		/* Haven't submitted any bio's yet. */
623 		goto fail_response;
624 	}
625 
626 	preq.dev           = req->u.rw.handle;
627 	preq.sector_number = req->u.rw.sector_number;
628 	preq.nr_sects      = 0;
629 
630 	pending_req->blkif     = blkif;
631 	pending_req->id        = req->u.rw.id;
632 	pending_req->operation = req->operation;
633 	pending_req->status    = BLKIF_RSP_OKAY;
634 	pending_req->nr_pages  = nseg;
635 
636 	for (i = 0; i < nseg; i++) {
637 		seg[i].nsec = req->u.rw.seg[i].last_sect -
638 			req->u.rw.seg[i].first_sect + 1;
639 		if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
640 		    (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
641 			goto fail_response;
642 		preq.nr_sects += seg[i].nsec;
643 
644 	}
645 
646 	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
647 		pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
648 			 operation == READ ? "read" : "write",
649 			 preq.sector_number,
650 			 preq.sector_number + preq.nr_sects, preq.dev);
651 		goto fail_response;
652 	}
653 
654 	/*
655 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
656 	 * is set there.
657 	 */
658 	for (i = 0; i < nseg; i++) {
659 		if (((int)preq.sector_number|(int)seg[i].nsec) &
660 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
661 			pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
662 				 blkif->domid);
663 			goto fail_response;
664 		}
665 	}
666 
667 	/* Wait on all outstanding I/O's and once that has been completed
668 	 * issue the WRITE_FLUSH.
669 	 */
670 	if (drain)
671 		xen_blk_drain_io(pending_req->blkif);
672 
673 	/*
674 	 * If we have failed at this point, we need to undo the M2P override,
675 	 * set gnttab_set_unmap_op on all of the grant references and perform
676 	 * the hypercall to unmap the grants - that is all done in
677 	 * xen_blkbk_unmap.
678 	 */
679 	if (xen_blkbk_map(req, pending_req, seg))
680 		goto fail_flush;
681 
682 	/*
683 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
684 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
685 	 */
686 	xen_blkif_get(blkif);
687 
688 	for (i = 0; i < nseg; i++) {
689 		while ((bio == NULL) ||
690 		       (bio_add_page(bio,
691 				     blkbk->pending_page(pending_req, i),
692 				     seg[i].nsec << 9,
693 				     seg[i].buf & ~PAGE_MASK) == 0)) {
694 
695 			bio = bio_alloc(GFP_KERNEL, nseg-i);
696 			if (unlikely(bio == NULL))
697 				goto fail_put_bio;
698 
699 			biolist[nbio++] = bio;
700 			bio->bi_bdev    = preq.bdev;
701 			bio->bi_private = pending_req;
702 			bio->bi_end_io  = end_block_io_op;
703 			bio->bi_sector  = preq.sector_number;
704 		}
705 
706 		preq.sector_number += seg[i].nsec;
707 	}
708 
709 	/* This will be hit if the operation was a flush or discard. */
710 	if (!bio) {
711 		BUG_ON(operation != WRITE_FLUSH);
712 
713 		bio = bio_alloc(GFP_KERNEL, 0);
714 		if (unlikely(bio == NULL))
715 			goto fail_put_bio;
716 
717 		biolist[nbio++] = bio;
718 		bio->bi_bdev    = preq.bdev;
719 		bio->bi_private = pending_req;
720 		bio->bi_end_io  = end_block_io_op;
721 	}
722 
723 	/*
724 	 * We set it one so that the last submit_bio does not have to call
725 	 * atomic_inc.
726 	 */
727 	atomic_set(&pending_req->pendcnt, nbio);
728 
729 	/* Get a reference count for the disk queue and start sending I/O */
730 	blk_start_plug(&plug);
731 
732 	for (i = 0; i < nbio; i++)
733 		submit_bio(operation, biolist[i]);
734 
735 	/* Let the I/Os go.. */
736 	blk_finish_plug(&plug);
737 
738 	if (operation == READ)
739 		blkif->st_rd_sect += preq.nr_sects;
740 	else if (operation & WRITE)
741 		blkif->st_wr_sect += preq.nr_sects;
742 
743 	return 0;
744 
745  fail_flush:
746 	xen_blkbk_unmap(pending_req);
747  fail_response:
748 	/* Haven't submitted any bio's yet. */
749 	make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
750 	free_req(pending_req);
751 	msleep(1); /* back off a bit */
752 	return -EIO;
753 
754  fail_put_bio:
755 	for (i = 0; i < nbio; i++)
756 		bio_put(biolist[i]);
757 	__end_block_io_op(pending_req, -EINVAL);
758 	msleep(1); /* back off a bit */
759 	return -EIO;
760 }
761 
762 
763 
764 /*
765  * Put a response on the ring on how the operation fared.
766  */
767 static void make_response(struct xen_blkif *blkif, u64 id,
768 			  unsigned short op, int st)
769 {
770 	struct blkif_response  resp;
771 	unsigned long     flags;
772 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
773 	int notify;
774 
775 	resp.id        = id;
776 	resp.operation = op;
777 	resp.status    = st;
778 
779 	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
780 	/* Place on the response ring for the relevant domain. */
781 	switch (blkif->blk_protocol) {
782 	case BLKIF_PROTOCOL_NATIVE:
783 		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
784 		       &resp, sizeof(resp));
785 		break;
786 	case BLKIF_PROTOCOL_X86_32:
787 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
788 		       &resp, sizeof(resp));
789 		break;
790 	case BLKIF_PROTOCOL_X86_64:
791 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
792 		       &resp, sizeof(resp));
793 		break;
794 	default:
795 		BUG();
796 	}
797 	blk_rings->common.rsp_prod_pvt++;
798 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
799 	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
800 	if (notify)
801 		notify_remote_via_irq(blkif->irq);
802 }
803 
804 static int __init xen_blkif_init(void)
805 {
806 	int i, mmap_pages;
807 	int rc = 0;
808 
809 	if (!xen_domain())
810 		return -ENODEV;
811 
812 	blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
813 	if (!blkbk) {
814 		pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
815 		return -ENOMEM;
816 	}
817 
818 	mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
819 
820 	blkbk->pending_reqs          = kzalloc(sizeof(blkbk->pending_reqs[0]) *
821 					xen_blkif_reqs, GFP_KERNEL);
822 	blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
823 					mmap_pages, GFP_KERNEL);
824 	blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
825 					mmap_pages, GFP_KERNEL);
826 
827 	if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
828 	    !blkbk->pending_pages) {
829 		rc = -ENOMEM;
830 		goto out_of_memory;
831 	}
832 
833 	for (i = 0; i < mmap_pages; i++) {
834 		blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
835 		blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
836 		if (blkbk->pending_pages[i] == NULL) {
837 			rc = -ENOMEM;
838 			goto out_of_memory;
839 		}
840 	}
841 	rc = xen_blkif_interface_init();
842 	if (rc)
843 		goto failed_init;
844 
845 	INIT_LIST_HEAD(&blkbk->pending_free);
846 	spin_lock_init(&blkbk->pending_free_lock);
847 	init_waitqueue_head(&blkbk->pending_free_wq);
848 
849 	for (i = 0; i < xen_blkif_reqs; i++)
850 		list_add_tail(&blkbk->pending_reqs[i].free_list,
851 			      &blkbk->pending_free);
852 
853 	rc = xen_blkif_xenbus_init();
854 	if (rc)
855 		goto failed_init;
856 
857 	return 0;
858 
859  out_of_memory:
860 	pr_alert(DRV_PFX "%s: out of memory\n", __func__);
861  failed_init:
862 	kfree(blkbk->pending_reqs);
863 	kfree(blkbk->pending_grant_handles);
864 	if (blkbk->pending_pages) {
865 		for (i = 0; i < mmap_pages; i++) {
866 			if (blkbk->pending_pages[i])
867 				__free_page(blkbk->pending_pages[i]);
868 		}
869 		kfree(blkbk->pending_pages);
870 	}
871 	kfree(blkbk);
872 	blkbk = NULL;
873 	return rc;
874 }
875 
876 module_init(xen_blkif_init);
877 
878 MODULE_LICENSE("Dual BSD/GPL");
879 MODULE_ALIAS("xen-backend:vbd");
880