xref: /linux/fs/direct-io.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * fs/direct-io.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * O_DIRECT
7  *
8  * 04Jul2002	akpm@zip.com.au
9  *		Initial version
10  * 11Sep2002	janetinc@us.ibm.com
11  * 		added readv/writev support.
12  * 29Oct2002	akpm@zip.com.au
13  *		rewrote bio_add_page() support.
14  * 30Oct2002	pbadari@us.ibm.com
15  *		added support for non-aligned IO.
16  * 06Nov2002	pbadari@us.ibm.com
17  *		added asynchronous IO support.
18  * 21Jul2003	nathans@sgi.com
19  *		added IO completion notifier.
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/bio.h>
31 #include <linux/wait.h>
32 #include <linux/err.h>
33 #include <linux/blkdev.h>
34 #include <linux/buffer_head.h>
35 #include <linux/rwsem.h>
36 #include <linux/uio.h>
37 #include <asm/atomic.h>
38 
39 /*
40  * How many user pages to map in one call to get_user_pages().  This determines
41  * the size of a structure on the stack.
42  */
43 #define DIO_PAGES	64
44 
45 /*
46  * This code generally works in units of "dio_blocks".  A dio_block is
47  * somewhere between the hard sector size and the filesystem block size.  it
48  * is determined on a per-invocation basis.   When talking to the filesystem
49  * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
50  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
51  * to bio_block quantities by shifting left by blkfactor.
52  *
53  * If blkfactor is zero then the user's request was aligned to the filesystem's
54  * blocksize.
55  *
56  * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
57  * This determines whether we need to do the fancy locking which prevents
58  * direct-IO from being able to read uninitialised disk blocks.  If its zero
59  * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
60  * not held for the entire direct write (taken briefly, initially, during a
61  * direct read though, but its never held for the duration of a direct-IO).
62  */
63 
64 struct dio {
65 	/* BIO submission state */
66 	struct bio *bio;		/* bio under assembly */
67 	struct inode *inode;
68 	int rw;
69 	loff_t i_size;			/* i_size when submitted */
70 	int lock_type;			/* doesn't change */
71 	unsigned blkbits;		/* doesn't change */
72 	unsigned blkfactor;		/* When we're using an alignment which
73 					   is finer than the filesystem's soft
74 					   blocksize, this specifies how much
75 					   finer.  blkfactor=2 means 1/4-block
76 					   alignment.  Does not change */
77 	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
78 					   been performed at the start of a
79 					   write */
80 	int pages_in_io;		/* approximate total IO pages */
81 	size_t	size;			/* total request size (doesn't change)*/
82 	sector_t block_in_file;		/* Current offset into the underlying
83 					   file in dio_block units. */
84 	unsigned blocks_available;	/* At block_in_file.  changes */
85 	sector_t final_block_in_request;/* doesn't change */
86 	unsigned first_block_in_page;	/* doesn't change, Used only once */
87 	int boundary;			/* prev block is at a boundary */
88 	int reap_counter;		/* rate limit reaping */
89 	get_block_t *get_block;		/* block mapping function */
90 	dio_iodone_t *end_io;		/* IO completion function */
91 	sector_t final_block_in_bio;	/* current final block in bio + 1 */
92 	sector_t next_block_for_io;	/* next block to be put under IO,
93 					   in dio_blocks units */
94 	struct buffer_head map_bh;	/* last get_block() result */
95 
96 	/*
97 	 * Deferred addition of a page to the dio.  These variables are
98 	 * private to dio_send_cur_page(), submit_page_section() and
99 	 * dio_bio_add_page().
100 	 */
101 	struct page *cur_page;		/* The page */
102 	unsigned cur_page_offset;	/* Offset into it, in bytes */
103 	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
104 	sector_t cur_page_block;	/* Where it starts */
105 
106 	/*
107 	 * Page fetching state. These variables belong to dio_refill_pages().
108 	 */
109 	int curr_page;			/* changes */
110 	int total_pages;		/* doesn't change */
111 	unsigned long curr_user_address;/* changes */
112 
113 	/*
114 	 * Page queue.  These variables belong to dio_refill_pages() and
115 	 * dio_get_page().
116 	 */
117 	struct page *pages[DIO_PAGES];	/* page buffer */
118 	unsigned head;			/* next page to process */
119 	unsigned tail;			/* last valid page + 1 */
120 	int page_errors;		/* errno from get_user_pages() */
121 
122 	/* BIO completion state */
123 	spinlock_t bio_lock;		/* protects BIO fields below */
124 	int bio_count;			/* nr bios to be completed */
125 	int bios_in_flight;		/* nr bios in flight */
126 	struct bio *bio_list;		/* singly linked via bi_private */
127 	struct task_struct *waiter;	/* waiting task (NULL if none) */
128 
129 	/* AIO related stuff */
130 	struct kiocb *iocb;		/* kiocb */
131 	int is_async;			/* is IO async ? */
132 	int io_error;			/* IO error in completion path */
133 	ssize_t result;                 /* IO result */
134 };
135 
136 /*
137  * How many pages are in the queue?
138  */
139 static inline unsigned dio_pages_present(struct dio *dio)
140 {
141 	return dio->tail - dio->head;
142 }
143 
144 /*
145  * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
146  */
147 static int dio_refill_pages(struct dio *dio)
148 {
149 	int ret;
150 	int nr_pages;
151 
152 	nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES);
153 	down_read(&current->mm->mmap_sem);
154 	ret = get_user_pages(
155 		current,			/* Task for fault acounting */
156 		current->mm,			/* whose pages? */
157 		dio->curr_user_address,		/* Where from? */
158 		nr_pages,			/* How many pages? */
159 		dio->rw == READ,		/* Write to memory? */
160 		0,				/* force (?) */
161 		&dio->pages[0],
162 		NULL);				/* vmas */
163 	up_read(&current->mm->mmap_sem);
164 
165 	if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
166 		struct page *page = ZERO_PAGE(dio->curr_user_address);
167 		/*
168 		 * A memory fault, but the filesystem has some outstanding
169 		 * mapped blocks.  We need to use those blocks up to avoid
170 		 * leaking stale data in the file.
171 		 */
172 		if (dio->page_errors == 0)
173 			dio->page_errors = ret;
174 		page_cache_get(page);
175 		dio->pages[0] = page;
176 		dio->head = 0;
177 		dio->tail = 1;
178 		ret = 0;
179 		goto out;
180 	}
181 
182 	if (ret >= 0) {
183 		dio->curr_user_address += ret * PAGE_SIZE;
184 		dio->curr_page += ret;
185 		dio->head = 0;
186 		dio->tail = ret;
187 		ret = 0;
188 	}
189 out:
190 	return ret;
191 }
192 
193 /*
194  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
195  * buffered inside the dio so that we can call get_user_pages() against a
196  * decent number of pages, less frequently.  To provide nicer use of the
197  * L1 cache.
198  */
199 static struct page *dio_get_page(struct dio *dio)
200 {
201 	if (dio_pages_present(dio) == 0) {
202 		int ret;
203 
204 		ret = dio_refill_pages(dio);
205 		if (ret)
206 			return ERR_PTR(ret);
207 		BUG_ON(dio_pages_present(dio) == 0);
208 	}
209 	return dio->pages[dio->head++];
210 }
211 
212 /*
213  * Called when all DIO BIO I/O has been completed - let the filesystem
214  * know, if it registered an interest earlier via get_block.  Pass the
215  * private field of the map buffer_head so that filesystems can use it
216  * to hold additional state between get_block calls and dio_complete.
217  */
218 static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
219 {
220 	if (dio->end_io && dio->result)
221 		dio->end_io(dio->iocb, offset, bytes, dio->map_bh.b_private);
222 	if (dio->lock_type == DIO_LOCKING)
223 		/* lockdep: non-owner release */
224 		up_read_non_owner(&dio->inode->i_alloc_sem);
225 }
226 
227 /*
228  * Called when a BIO has been processed.  If the count goes to zero then IO is
229  * complete and we can signal this to the AIO layer.
230  */
231 static void finished_one_bio(struct dio *dio)
232 {
233 	unsigned long flags;
234 
235 	spin_lock_irqsave(&dio->bio_lock, flags);
236 	if (dio->bio_count == 1) {
237 		if (dio->is_async) {
238 			ssize_t transferred;
239 			loff_t offset;
240 
241 			/*
242 			 * Last reference to the dio is going away.
243 			 * Drop spinlock and complete the DIO.
244 			 */
245 			spin_unlock_irqrestore(&dio->bio_lock, flags);
246 
247 			/* Check for short read case */
248 			transferred = dio->result;
249 			offset = dio->iocb->ki_pos;
250 
251 			if ((dio->rw == READ) &&
252 			    ((offset + transferred) > dio->i_size))
253 				transferred = dio->i_size - offset;
254 
255 			/* check for error in completion path */
256 			if (dio->io_error)
257 				transferred = dio->io_error;
258 
259 			dio_complete(dio, offset, transferred);
260 
261 			/* Complete AIO later if falling back to buffered i/o */
262 			if (dio->result == dio->size ||
263 				((dio->rw == READ) && dio->result)) {
264 				aio_complete(dio->iocb, transferred, 0);
265 				kfree(dio);
266 				return;
267 			} else {
268 				/*
269 				 * Falling back to buffered
270 				 */
271 				spin_lock_irqsave(&dio->bio_lock, flags);
272 				dio->bio_count--;
273 				if (dio->waiter)
274 					wake_up_process(dio->waiter);
275 				spin_unlock_irqrestore(&dio->bio_lock, flags);
276 				return;
277 			}
278 		}
279 	}
280 	dio->bio_count--;
281 	spin_unlock_irqrestore(&dio->bio_lock, flags);
282 }
283 
284 static int dio_bio_complete(struct dio *dio, struct bio *bio);
285 /*
286  * Asynchronous IO callback.
287  */
288 static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
289 {
290 	struct dio *dio = bio->bi_private;
291 
292 	if (bio->bi_size)
293 		return 1;
294 
295 	/* cleanup the bio */
296 	dio_bio_complete(dio, bio);
297 	return 0;
298 }
299 
300 /*
301  * The BIO completion handler simply queues the BIO up for the process-context
302  * handler.
303  *
304  * During I/O bi_private points at the dio.  After I/O, bi_private is used to
305  * implement a singly-linked list of completed BIOs, at dio->bio_list.
306  */
307 static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
308 {
309 	struct dio *dio = bio->bi_private;
310 	unsigned long flags;
311 
312 	if (bio->bi_size)
313 		return 1;
314 
315 	spin_lock_irqsave(&dio->bio_lock, flags);
316 	bio->bi_private = dio->bio_list;
317 	dio->bio_list = bio;
318 	dio->bios_in_flight--;
319 	if (dio->waiter && dio->bios_in_flight == 0)
320 		wake_up_process(dio->waiter);
321 	spin_unlock_irqrestore(&dio->bio_lock, flags);
322 	return 0;
323 }
324 
325 static int
326 dio_bio_alloc(struct dio *dio, struct block_device *bdev,
327 		sector_t first_sector, int nr_vecs)
328 {
329 	struct bio *bio;
330 
331 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
332 	if (bio == NULL)
333 		return -ENOMEM;
334 
335 	bio->bi_bdev = bdev;
336 	bio->bi_sector = first_sector;
337 	if (dio->is_async)
338 		bio->bi_end_io = dio_bio_end_aio;
339 	else
340 		bio->bi_end_io = dio_bio_end_io;
341 
342 	dio->bio = bio;
343 	return 0;
344 }
345 
346 /*
347  * In the AIO read case we speculatively dirty the pages before starting IO.
348  * During IO completion, any of these pages which happen to have been written
349  * back will be redirtied by bio_check_pages_dirty().
350  */
351 static void dio_bio_submit(struct dio *dio)
352 {
353 	struct bio *bio = dio->bio;
354 	unsigned long flags;
355 
356 	bio->bi_private = dio;
357 	spin_lock_irqsave(&dio->bio_lock, flags);
358 	dio->bio_count++;
359 	dio->bios_in_flight++;
360 	spin_unlock_irqrestore(&dio->bio_lock, flags);
361 	if (dio->is_async && dio->rw == READ)
362 		bio_set_pages_dirty(bio);
363 	submit_bio(dio->rw, bio);
364 
365 	dio->bio = NULL;
366 	dio->boundary = 0;
367 }
368 
369 /*
370  * Release any resources in case of a failure
371  */
372 static void dio_cleanup(struct dio *dio)
373 {
374 	while (dio_pages_present(dio))
375 		page_cache_release(dio_get_page(dio));
376 }
377 
378 /*
379  * Wait for the next BIO to complete.  Remove it and return it.
380  */
381 static struct bio *dio_await_one(struct dio *dio)
382 {
383 	unsigned long flags;
384 	struct bio *bio;
385 
386 	spin_lock_irqsave(&dio->bio_lock, flags);
387 	while (dio->bio_list == NULL) {
388 		set_current_state(TASK_UNINTERRUPTIBLE);
389 		if (dio->bio_list == NULL) {
390 			dio->waiter = current;
391 			spin_unlock_irqrestore(&dio->bio_lock, flags);
392 			blk_run_address_space(dio->inode->i_mapping);
393 			io_schedule();
394 			spin_lock_irqsave(&dio->bio_lock, flags);
395 			dio->waiter = NULL;
396 		}
397 		set_current_state(TASK_RUNNING);
398 	}
399 	bio = dio->bio_list;
400 	dio->bio_list = bio->bi_private;
401 	spin_unlock_irqrestore(&dio->bio_lock, flags);
402 	return bio;
403 }
404 
405 /*
406  * Process one completed BIO.  No locks are held.
407  */
408 static int dio_bio_complete(struct dio *dio, struct bio *bio)
409 {
410 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
411 	struct bio_vec *bvec = bio->bi_io_vec;
412 	int page_no;
413 
414 	if (!uptodate)
415 		dio->io_error = -EIO;
416 
417 	if (dio->is_async && dio->rw == READ) {
418 		bio_check_pages_dirty(bio);	/* transfers ownership */
419 	} else {
420 		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
421 			struct page *page = bvec[page_no].bv_page;
422 
423 			if (dio->rw == READ && !PageCompound(page))
424 				set_page_dirty_lock(page);
425 			page_cache_release(page);
426 		}
427 		bio_put(bio);
428 	}
429 	finished_one_bio(dio);
430 	return uptodate ? 0 : -EIO;
431 }
432 
433 /*
434  * Wait on and process all in-flight BIOs.
435  */
436 static int dio_await_completion(struct dio *dio)
437 {
438 	int ret = 0;
439 
440 	if (dio->bio)
441 		dio_bio_submit(dio);
442 
443 	/*
444 	 * The bio_lock is not held for the read of bio_count.
445 	 * This is ok since it is the dio_bio_complete() that changes
446 	 * bio_count.
447 	 */
448 	while (dio->bio_count) {
449 		struct bio *bio = dio_await_one(dio);
450 		int ret2;
451 
452 		ret2 = dio_bio_complete(dio, bio);
453 		if (ret == 0)
454 			ret = ret2;
455 	}
456 	return ret;
457 }
458 
459 /*
460  * A really large O_DIRECT read or write can generate a lot of BIOs.  So
461  * to keep the memory consumption sane we periodically reap any completed BIOs
462  * during the BIO generation phase.
463  *
464  * This also helps to limit the peak amount of pinned userspace memory.
465  */
466 static int dio_bio_reap(struct dio *dio)
467 {
468 	int ret = 0;
469 
470 	if (dio->reap_counter++ >= 64) {
471 		while (dio->bio_list) {
472 			unsigned long flags;
473 			struct bio *bio;
474 			int ret2;
475 
476 			spin_lock_irqsave(&dio->bio_lock, flags);
477 			bio = dio->bio_list;
478 			dio->bio_list = bio->bi_private;
479 			spin_unlock_irqrestore(&dio->bio_lock, flags);
480 			ret2 = dio_bio_complete(dio, bio);
481 			if (ret == 0)
482 				ret = ret2;
483 		}
484 		dio->reap_counter = 0;
485 	}
486 	return ret;
487 }
488 
489 /*
490  * Call into the fs to map some more disk blocks.  We record the current number
491  * of available blocks at dio->blocks_available.  These are in units of the
492  * fs blocksize, (1 << inode->i_blkbits).
493  *
494  * The fs is allowed to map lots of blocks at once.  If it wants to do that,
495  * it uses the passed inode-relative block number as the file offset, as usual.
496  *
497  * get_block() is passed the number of i_blkbits-sized blocks which direct_io
498  * has remaining to do.  The fs should not map more than this number of blocks.
499  *
500  * If the fs has mapped a lot of blocks, it should populate bh->b_size to
501  * indicate how much contiguous disk space has been made available at
502  * bh->b_blocknr.
503  *
504  * If *any* of the mapped blocks are new, then the fs must set buffer_new().
505  * This isn't very efficient...
506  *
507  * In the case of filesystem holes: the fs may return an arbitrarily-large
508  * hole by returning an appropriate value in b_size and by clearing
509  * buffer_mapped().  However the direct-io code will only process holes one
510  * block at a time - it will repeatedly call get_block() as it walks the hole.
511  */
512 static int get_more_blocks(struct dio *dio)
513 {
514 	int ret;
515 	struct buffer_head *map_bh = &dio->map_bh;
516 	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
517 	unsigned long fs_count;	/* Number of filesystem-sized blocks */
518 	unsigned long dio_count;/* Number of dio_block-sized blocks */
519 	unsigned long blkmask;
520 	int create;
521 
522 	/*
523 	 * If there was a memory error and we've overwritten all the
524 	 * mapped blocks then we can now return that memory error
525 	 */
526 	ret = dio->page_errors;
527 	if (ret == 0) {
528 		BUG_ON(dio->block_in_file >= dio->final_block_in_request);
529 		fs_startblk = dio->block_in_file >> dio->blkfactor;
530 		dio_count = dio->final_block_in_request - dio->block_in_file;
531 		fs_count = dio_count >> dio->blkfactor;
532 		blkmask = (1 << dio->blkfactor) - 1;
533 		if (dio_count & blkmask)
534 			fs_count++;
535 
536 		map_bh->b_state = 0;
537 		map_bh->b_size = fs_count << dio->inode->i_blkbits;
538 
539 		create = dio->rw & WRITE;
540 		if (dio->lock_type == DIO_LOCKING) {
541 			if (dio->block_in_file < (i_size_read(dio->inode) >>
542 							dio->blkbits))
543 				create = 0;
544 		} else if (dio->lock_type == DIO_NO_LOCKING) {
545 			create = 0;
546 		}
547 
548 		/*
549 		 * For writes inside i_size we forbid block creations: only
550 		 * overwrites are permitted.  We fall back to buffered writes
551 		 * at a higher level for inside-i_size block-instantiating
552 		 * writes.
553 		 */
554 		ret = (*dio->get_block)(dio->inode, fs_startblk,
555 						map_bh, create);
556 	}
557 	return ret;
558 }
559 
560 /*
561  * There is no bio.  Make one now.
562  */
563 static int dio_new_bio(struct dio *dio, sector_t start_sector)
564 {
565 	sector_t sector;
566 	int ret, nr_pages;
567 
568 	ret = dio_bio_reap(dio);
569 	if (ret)
570 		goto out;
571 	sector = start_sector << (dio->blkbits - 9);
572 	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
573 	BUG_ON(nr_pages <= 0);
574 	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
575 	dio->boundary = 0;
576 out:
577 	return ret;
578 }
579 
580 /*
581  * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
582  * that was successful then update final_block_in_bio and take a ref against
583  * the just-added page.
584  *
585  * Return zero on success.  Non-zero means the caller needs to start a new BIO.
586  */
587 static int dio_bio_add_page(struct dio *dio)
588 {
589 	int ret;
590 
591 	ret = bio_add_page(dio->bio, dio->cur_page,
592 			dio->cur_page_len, dio->cur_page_offset);
593 	if (ret == dio->cur_page_len) {
594 		/*
595 		 * Decrement count only, if we are done with this page
596 		 */
597 		if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE)
598 			dio->pages_in_io--;
599 		page_cache_get(dio->cur_page);
600 		dio->final_block_in_bio = dio->cur_page_block +
601 			(dio->cur_page_len >> dio->blkbits);
602 		ret = 0;
603 	} else {
604 		ret = 1;
605 	}
606 	return ret;
607 }
608 
609 /*
610  * Put cur_page under IO.  The section of cur_page which is described by
611  * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
612  * starts on-disk at cur_page_block.
613  *
614  * We take a ref against the page here (on behalf of its presence in the bio).
615  *
616  * The caller of this function is responsible for removing cur_page from the
617  * dio, and for dropping the refcount which came from that presence.
618  */
619 static int dio_send_cur_page(struct dio *dio)
620 {
621 	int ret = 0;
622 
623 	if (dio->bio) {
624 		/*
625 		 * See whether this new request is contiguous with the old
626 		 */
627 		if (dio->final_block_in_bio != dio->cur_page_block)
628 			dio_bio_submit(dio);
629 		/*
630 		 * Submit now if the underlying fs is about to perform a
631 		 * metadata read
632 		 */
633 		if (dio->boundary)
634 			dio_bio_submit(dio);
635 	}
636 
637 	if (dio->bio == NULL) {
638 		ret = dio_new_bio(dio, dio->cur_page_block);
639 		if (ret)
640 			goto out;
641 	}
642 
643 	if (dio_bio_add_page(dio) != 0) {
644 		dio_bio_submit(dio);
645 		ret = dio_new_bio(dio, dio->cur_page_block);
646 		if (ret == 0) {
647 			ret = dio_bio_add_page(dio);
648 			BUG_ON(ret != 0);
649 		}
650 	}
651 out:
652 	return ret;
653 }
654 
655 /*
656  * An autonomous function to put a chunk of a page under deferred IO.
657  *
658  * The caller doesn't actually know (or care) whether this piece of page is in
659  * a BIO, or is under IO or whatever.  We just take care of all possible
660  * situations here.  The separation between the logic of do_direct_IO() and
661  * that of submit_page_section() is important for clarity.  Please don't break.
662  *
663  * The chunk of page starts on-disk at blocknr.
664  *
665  * We perform deferred IO, by recording the last-submitted page inside our
666  * private part of the dio structure.  If possible, we just expand the IO
667  * across that page here.
668  *
669  * If that doesn't work out then we put the old page into the bio and add this
670  * page to the dio instead.
671  */
672 static int
673 submit_page_section(struct dio *dio, struct page *page,
674 		unsigned offset, unsigned len, sector_t blocknr)
675 {
676 	int ret = 0;
677 
678 	/*
679 	 * Can we just grow the current page's presence in the dio?
680 	 */
681 	if (	(dio->cur_page == page) &&
682 		(dio->cur_page_offset + dio->cur_page_len == offset) &&
683 		(dio->cur_page_block +
684 			(dio->cur_page_len >> dio->blkbits) == blocknr)) {
685 		dio->cur_page_len += len;
686 
687 		/*
688 		 * If dio->boundary then we want to schedule the IO now to
689 		 * avoid metadata seeks.
690 		 */
691 		if (dio->boundary) {
692 			ret = dio_send_cur_page(dio);
693 			page_cache_release(dio->cur_page);
694 			dio->cur_page = NULL;
695 		}
696 		goto out;
697 	}
698 
699 	/*
700 	 * If there's a deferred page already there then send it.
701 	 */
702 	if (dio->cur_page) {
703 		ret = dio_send_cur_page(dio);
704 		page_cache_release(dio->cur_page);
705 		dio->cur_page = NULL;
706 		if (ret)
707 			goto out;
708 	}
709 
710 	page_cache_get(page);		/* It is in dio */
711 	dio->cur_page = page;
712 	dio->cur_page_offset = offset;
713 	dio->cur_page_len = len;
714 	dio->cur_page_block = blocknr;
715 out:
716 	return ret;
717 }
718 
719 /*
720  * Clean any dirty buffers in the blockdev mapping which alias newly-created
721  * file blocks.  Only called for S_ISREG files - blockdevs do not set
722  * buffer_new
723  */
724 static void clean_blockdev_aliases(struct dio *dio)
725 {
726 	unsigned i;
727 	unsigned nblocks;
728 
729 	nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits;
730 
731 	for (i = 0; i < nblocks; i++) {
732 		unmap_underlying_metadata(dio->map_bh.b_bdev,
733 					dio->map_bh.b_blocknr + i);
734 	}
735 }
736 
737 /*
738  * If we are not writing the entire block and get_block() allocated
739  * the block for us, we need to fill-in the unused portion of the
740  * block with zeros. This happens only if user-buffer, fileoffset or
741  * io length is not filesystem block-size multiple.
742  *
743  * `end' is zero if we're doing the start of the IO, 1 at the end of the
744  * IO.
745  */
746 static void dio_zero_block(struct dio *dio, int end)
747 {
748 	unsigned dio_blocks_per_fs_block;
749 	unsigned this_chunk_blocks;	/* In dio_blocks */
750 	unsigned this_chunk_bytes;
751 	struct page *page;
752 
753 	dio->start_zero_done = 1;
754 	if (!dio->blkfactor || !buffer_new(&dio->map_bh))
755 		return;
756 
757 	dio_blocks_per_fs_block = 1 << dio->blkfactor;
758 	this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1);
759 
760 	if (!this_chunk_blocks)
761 		return;
762 
763 	/*
764 	 * We need to zero out part of an fs block.  It is either at the
765 	 * beginning or the end of the fs block.
766 	 */
767 	if (end)
768 		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
769 
770 	this_chunk_bytes = this_chunk_blocks << dio->blkbits;
771 
772 	page = ZERO_PAGE(dio->curr_user_address);
773 	if (submit_page_section(dio, page, 0, this_chunk_bytes,
774 				dio->next_block_for_io))
775 		return;
776 
777 	dio->next_block_for_io += this_chunk_blocks;
778 }
779 
780 /*
781  * Walk the user pages, and the file, mapping blocks to disk and generating
782  * a sequence of (page,offset,len,block) mappings.  These mappings are injected
783  * into submit_page_section(), which takes care of the next stage of submission
784  *
785  * Direct IO against a blockdev is different from a file.  Because we can
786  * happily perform page-sized but 512-byte aligned IOs.  It is important that
787  * blockdev IO be able to have fine alignment and large sizes.
788  *
789  * So what we do is to permit the ->get_block function to populate bh.b_size
790  * with the size of IO which is permitted at this offset and this i_blkbits.
791  *
792  * For best results, the blockdev should be set up with 512-byte i_blkbits and
793  * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
794  * fine alignment but still allows this function to work in PAGE_SIZE units.
795  */
796 static int do_direct_IO(struct dio *dio)
797 {
798 	const unsigned blkbits = dio->blkbits;
799 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
800 	struct page *page;
801 	unsigned block_in_page;
802 	struct buffer_head *map_bh = &dio->map_bh;
803 	int ret = 0;
804 
805 	/* The I/O can start at any block offset within the first page */
806 	block_in_page = dio->first_block_in_page;
807 
808 	while (dio->block_in_file < dio->final_block_in_request) {
809 		page = dio_get_page(dio);
810 		if (IS_ERR(page)) {
811 			ret = PTR_ERR(page);
812 			goto out;
813 		}
814 
815 		while (block_in_page < blocks_per_page) {
816 			unsigned offset_in_page = block_in_page << blkbits;
817 			unsigned this_chunk_bytes;	/* # of bytes mapped */
818 			unsigned this_chunk_blocks;	/* # of blocks */
819 			unsigned u;
820 
821 			if (dio->blocks_available == 0) {
822 				/*
823 				 * Need to go and map some more disk
824 				 */
825 				unsigned long blkmask;
826 				unsigned long dio_remainder;
827 
828 				ret = get_more_blocks(dio);
829 				if (ret) {
830 					page_cache_release(page);
831 					goto out;
832 				}
833 				if (!buffer_mapped(map_bh))
834 					goto do_holes;
835 
836 				dio->blocks_available =
837 						map_bh->b_size >> dio->blkbits;
838 				dio->next_block_for_io =
839 					map_bh->b_blocknr << dio->blkfactor;
840 				if (buffer_new(map_bh))
841 					clean_blockdev_aliases(dio);
842 
843 				if (!dio->blkfactor)
844 					goto do_holes;
845 
846 				blkmask = (1 << dio->blkfactor) - 1;
847 				dio_remainder = (dio->block_in_file & blkmask);
848 
849 				/*
850 				 * If we are at the start of IO and that IO
851 				 * starts partway into a fs-block,
852 				 * dio_remainder will be non-zero.  If the IO
853 				 * is a read then we can simply advance the IO
854 				 * cursor to the first block which is to be
855 				 * read.  But if the IO is a write and the
856 				 * block was newly allocated we cannot do that;
857 				 * the start of the fs block must be zeroed out
858 				 * on-disk
859 				 */
860 				if (!buffer_new(map_bh))
861 					dio->next_block_for_io += dio_remainder;
862 				dio->blocks_available -= dio_remainder;
863 			}
864 do_holes:
865 			/* Handle holes */
866 			if (!buffer_mapped(map_bh)) {
867 				char *kaddr;
868 				loff_t i_size_aligned;
869 
870 				/* AKPM: eargh, -ENOTBLK is a hack */
871 				if (dio->rw & WRITE) {
872 					page_cache_release(page);
873 					return -ENOTBLK;
874 				}
875 
876 				/*
877 				 * Be sure to account for a partial block as the
878 				 * last block in the file
879 				 */
880 				i_size_aligned = ALIGN(i_size_read(dio->inode),
881 							1 << blkbits);
882 				if (dio->block_in_file >=
883 						i_size_aligned >> blkbits) {
884 					/* We hit eof */
885 					page_cache_release(page);
886 					goto out;
887 				}
888 				kaddr = kmap_atomic(page, KM_USER0);
889 				memset(kaddr + (block_in_page << blkbits),
890 						0, 1 << blkbits);
891 				flush_dcache_page(page);
892 				kunmap_atomic(kaddr, KM_USER0);
893 				dio->block_in_file++;
894 				block_in_page++;
895 				goto next_block;
896 			}
897 
898 			/*
899 			 * If we're performing IO which has an alignment which
900 			 * is finer than the underlying fs, go check to see if
901 			 * we must zero out the start of this block.
902 			 */
903 			if (unlikely(dio->blkfactor && !dio->start_zero_done))
904 				dio_zero_block(dio, 0);
905 
906 			/*
907 			 * Work out, in this_chunk_blocks, how much disk we
908 			 * can add to this page
909 			 */
910 			this_chunk_blocks = dio->blocks_available;
911 			u = (PAGE_SIZE - offset_in_page) >> blkbits;
912 			if (this_chunk_blocks > u)
913 				this_chunk_blocks = u;
914 			u = dio->final_block_in_request - dio->block_in_file;
915 			if (this_chunk_blocks > u)
916 				this_chunk_blocks = u;
917 			this_chunk_bytes = this_chunk_blocks << blkbits;
918 			BUG_ON(this_chunk_bytes == 0);
919 
920 			dio->boundary = buffer_boundary(map_bh);
921 			ret = submit_page_section(dio, page, offset_in_page,
922 				this_chunk_bytes, dio->next_block_for_io);
923 			if (ret) {
924 				page_cache_release(page);
925 				goto out;
926 			}
927 			dio->next_block_for_io += this_chunk_blocks;
928 
929 			dio->block_in_file += this_chunk_blocks;
930 			block_in_page += this_chunk_blocks;
931 			dio->blocks_available -= this_chunk_blocks;
932 next_block:
933 			BUG_ON(dio->block_in_file > dio->final_block_in_request);
934 			if (dio->block_in_file == dio->final_block_in_request)
935 				break;
936 		}
937 
938 		/* Drop the ref which was taken in get_user_pages() */
939 		page_cache_release(page);
940 		block_in_page = 0;
941 	}
942 out:
943 	return ret;
944 }
945 
946 /*
947  * Releases both i_mutex and i_alloc_sem
948  */
949 static ssize_t
950 direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
951 	const struct iovec *iov, loff_t offset, unsigned long nr_segs,
952 	unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
953 	struct dio *dio)
954 {
955 	unsigned long user_addr;
956 	int seg;
957 	ssize_t ret = 0;
958 	ssize_t ret2;
959 	size_t bytes;
960 
961 	dio->bio = NULL;
962 	dio->inode = inode;
963 	dio->rw = rw;
964 	dio->blkbits = blkbits;
965 	dio->blkfactor = inode->i_blkbits - blkbits;
966 	dio->start_zero_done = 0;
967 	dio->size = 0;
968 	dio->block_in_file = offset >> blkbits;
969 	dio->blocks_available = 0;
970 	dio->cur_page = NULL;
971 
972 	dio->boundary = 0;
973 	dio->reap_counter = 0;
974 	dio->get_block = get_block;
975 	dio->end_io = end_io;
976 	dio->map_bh.b_private = NULL;
977 	dio->final_block_in_bio = -1;
978 	dio->next_block_for_io = -1;
979 
980 	dio->page_errors = 0;
981 	dio->io_error = 0;
982 	dio->result = 0;
983 	dio->iocb = iocb;
984 	dio->i_size = i_size_read(inode);
985 
986 	/*
987 	 * BIO completion state.
988 	 *
989 	 * ->bio_count starts out at one, and we decrement it to zero after all
990 	 * BIOs are submitted.  This to avoid the situation where a really fast
991 	 * (or synchronous) device could take the count to zero while we're
992 	 * still submitting BIOs.
993 	 */
994 	dio->bio_count = 1;
995 	dio->bios_in_flight = 0;
996 	spin_lock_init(&dio->bio_lock);
997 	dio->bio_list = NULL;
998 	dio->waiter = NULL;
999 
1000 	/*
1001 	 * In case of non-aligned buffers, we may need 2 more
1002 	 * pages since we need to zero out first and last block.
1003 	 */
1004 	if (unlikely(dio->blkfactor))
1005 		dio->pages_in_io = 2;
1006 	else
1007 		dio->pages_in_io = 0;
1008 
1009 	for (seg = 0; seg < nr_segs; seg++) {
1010 		user_addr = (unsigned long)iov[seg].iov_base;
1011 		dio->pages_in_io +=
1012 			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
1013 				- user_addr/PAGE_SIZE);
1014 	}
1015 
1016 	for (seg = 0; seg < nr_segs; seg++) {
1017 		user_addr = (unsigned long)iov[seg].iov_base;
1018 		dio->size += bytes = iov[seg].iov_len;
1019 
1020 		/* Index into the first page of the first block */
1021 		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
1022 		dio->final_block_in_request = dio->block_in_file +
1023 						(bytes >> blkbits);
1024 		/* Page fetching state */
1025 		dio->head = 0;
1026 		dio->tail = 0;
1027 		dio->curr_page = 0;
1028 
1029 		dio->total_pages = 0;
1030 		if (user_addr & (PAGE_SIZE-1)) {
1031 			dio->total_pages++;
1032 			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
1033 		}
1034 		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1035 		dio->curr_user_address = user_addr;
1036 
1037 		ret = do_direct_IO(dio);
1038 
1039 		dio->result += iov[seg].iov_len -
1040 			((dio->final_block_in_request - dio->block_in_file) <<
1041 					blkbits);
1042 
1043 		if (ret) {
1044 			dio_cleanup(dio);
1045 			break;
1046 		}
1047 	} /* end iovec loop */
1048 
1049 	if (ret == -ENOTBLK && (rw & WRITE)) {
1050 		/*
1051 		 * The remaining part of the request will be
1052 		 * be handled by buffered I/O when we return
1053 		 */
1054 		ret = 0;
1055 	}
1056 	/*
1057 	 * There may be some unwritten disk at the end of a part-written
1058 	 * fs-block-sized block.  Go zero that now.
1059 	 */
1060 	dio_zero_block(dio, 1);
1061 
1062 	if (dio->cur_page) {
1063 		ret2 = dio_send_cur_page(dio);
1064 		if (ret == 0)
1065 			ret = ret2;
1066 		page_cache_release(dio->cur_page);
1067 		dio->cur_page = NULL;
1068 	}
1069 	if (dio->bio)
1070 		dio_bio_submit(dio);
1071 
1072 	/*
1073 	 * It is possible that, we return short IO due to end of file.
1074 	 * In that case, we need to release all the pages we got hold on.
1075 	 */
1076 	dio_cleanup(dio);
1077 
1078 	/*
1079 	 * All block lookups have been performed. For READ requests
1080 	 * we can let i_mutex go now that its achieved its purpose
1081 	 * of protecting us from looking up uninitialized blocks.
1082 	 */
1083 	if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
1084 		mutex_unlock(&dio->inode->i_mutex);
1085 
1086 	/*
1087 	 * OK, all BIOs are submitted, so we can decrement bio_count to truly
1088 	 * reflect the number of to-be-processed BIOs.
1089 	 */
1090 	if (dio->is_async) {
1091 		int should_wait = 0;
1092 
1093 		if (dio->result < dio->size && (rw & WRITE)) {
1094 			dio->waiter = current;
1095 			should_wait = 1;
1096 		}
1097 		if (ret == 0)
1098 			ret = dio->result;
1099 		finished_one_bio(dio);		/* This can free the dio */
1100 		blk_run_address_space(inode->i_mapping);
1101 		if (should_wait) {
1102 			unsigned long flags;
1103 			/*
1104 			 * Wait for already issued I/O to drain out and
1105 			 * release its references to user-space pages
1106 			 * before returning to fallback on buffered I/O
1107 			 */
1108 
1109 			spin_lock_irqsave(&dio->bio_lock, flags);
1110 			set_current_state(TASK_UNINTERRUPTIBLE);
1111 			while (dio->bio_count) {
1112 				spin_unlock_irqrestore(&dio->bio_lock, flags);
1113 				io_schedule();
1114 				spin_lock_irqsave(&dio->bio_lock, flags);
1115 				set_current_state(TASK_UNINTERRUPTIBLE);
1116 			}
1117 			spin_unlock_irqrestore(&dio->bio_lock, flags);
1118 			set_current_state(TASK_RUNNING);
1119 			kfree(dio);
1120 		}
1121 	} else {
1122 		ssize_t transferred = 0;
1123 
1124 		finished_one_bio(dio);
1125 		ret2 = dio_await_completion(dio);
1126 		if (ret == 0)
1127 			ret = ret2;
1128 		if (ret == 0)
1129 			ret = dio->page_errors;
1130 		if (dio->result) {
1131 			loff_t i_size = i_size_read(inode);
1132 
1133 			transferred = dio->result;
1134 			/*
1135 			 * Adjust the return value if the read crossed a
1136 			 * non-block-aligned EOF.
1137 			 */
1138 			if (rw == READ && (offset + transferred > i_size))
1139 				transferred = i_size - offset;
1140 		}
1141 		dio_complete(dio, offset, transferred);
1142 		if (ret == 0)
1143 			ret = transferred;
1144 
1145 		/* We could have also come here on an AIO file extend */
1146 		if (!is_sync_kiocb(iocb) && (rw & WRITE) &&
1147 		    ret >= 0 && dio->result == dio->size)
1148 			/*
1149 			 * For AIO writes where we have completed the
1150 			 * i/o, we have to mark the the aio complete.
1151 			 */
1152 			aio_complete(iocb, ret, 0);
1153 		kfree(dio);
1154 	}
1155 	return ret;
1156 }
1157 
1158 /*
1159  * This is a library function for use by filesystem drivers.
1160  * The locking rules are governed by the dio_lock_type parameter.
1161  *
1162  * DIO_NO_LOCKING (no locking, for raw block device access)
1163  * For writes, i_mutex is not held on entry; it is never taken.
1164  *
1165  * DIO_LOCKING (simple locking for regular files)
1166  * For writes we are called under i_mutex and return with i_mutex held, even
1167  * though it is internally dropped.
1168  * For reads, i_mutex is not held on entry, but it is taken and dropped before
1169  * returning.
1170  *
1171  * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
1172  *	uninitialised data, allowing parallel direct readers and writers)
1173  * For writes we are called without i_mutex, return without it, never touch it.
1174  * For reads we are called under i_mutex and return with i_mutex held, even
1175  * though it may be internally dropped.
1176  *
1177  * Additional i_alloc_sem locking requirements described inline below.
1178  */
1179 ssize_t
1180 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1181 	struct block_device *bdev, const struct iovec *iov, loff_t offset,
1182 	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1183 	int dio_lock_type)
1184 {
1185 	int seg;
1186 	size_t size;
1187 	unsigned long addr;
1188 	unsigned blkbits = inode->i_blkbits;
1189 	unsigned bdev_blkbits = 0;
1190 	unsigned blocksize_mask = (1 << blkbits) - 1;
1191 	ssize_t retval = -EINVAL;
1192 	loff_t end = offset;
1193 	struct dio *dio;
1194 	int release_i_mutex = 0;
1195 	int acquire_i_mutex = 0;
1196 
1197 	if (rw & WRITE)
1198 		rw = WRITE_SYNC;
1199 
1200 	if (bdev)
1201 		bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
1202 
1203 	if (offset & blocksize_mask) {
1204 		if (bdev)
1205 			 blkbits = bdev_blkbits;
1206 		blocksize_mask = (1 << blkbits) - 1;
1207 		if (offset & blocksize_mask)
1208 			goto out;
1209 	}
1210 
1211 	/* Check the memory alignment.  Blocks cannot straddle pages */
1212 	for (seg = 0; seg < nr_segs; seg++) {
1213 		addr = (unsigned long)iov[seg].iov_base;
1214 		size = iov[seg].iov_len;
1215 		end += size;
1216 		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
1217 			if (bdev)
1218 				 blkbits = bdev_blkbits;
1219 			blocksize_mask = (1 << blkbits) - 1;
1220 			if ((addr & blocksize_mask) || (size & blocksize_mask))
1221 				goto out;
1222 		}
1223 	}
1224 
1225 	dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1226 	retval = -ENOMEM;
1227 	if (!dio)
1228 		goto out;
1229 
1230 	/*
1231 	 * For block device access DIO_NO_LOCKING is used,
1232 	 *	neither readers nor writers do any locking at all
1233 	 * For regular files using DIO_LOCKING,
1234 	 *	readers need to grab i_mutex and i_alloc_sem
1235 	 *	writers need to grab i_alloc_sem only (i_mutex is already held)
1236 	 * For regular files using DIO_OWN_LOCKING,
1237 	 *	neither readers nor writers take any locks here
1238 	 */
1239 	dio->lock_type = dio_lock_type;
1240 	if (dio_lock_type != DIO_NO_LOCKING) {
1241 		/* watch out for a 0 len io from a tricksy fs */
1242 		if (rw == READ && end > offset) {
1243 			struct address_space *mapping;
1244 
1245 			mapping = iocb->ki_filp->f_mapping;
1246 			if (dio_lock_type != DIO_OWN_LOCKING) {
1247 				mutex_lock(&inode->i_mutex);
1248 				release_i_mutex = 1;
1249 			}
1250 
1251 			retval = filemap_write_and_wait_range(mapping, offset,
1252 							      end - 1);
1253 			if (retval) {
1254 				kfree(dio);
1255 				goto out;
1256 			}
1257 
1258 			if (dio_lock_type == DIO_OWN_LOCKING) {
1259 				mutex_unlock(&inode->i_mutex);
1260 				acquire_i_mutex = 1;
1261 			}
1262 		}
1263 
1264 		if (dio_lock_type == DIO_LOCKING)
1265 			/* lockdep: not the owner will release it */
1266 			down_read_non_owner(&inode->i_alloc_sem);
1267 	}
1268 
1269 	/*
1270 	 * For file extending writes updating i_size before data
1271 	 * writeouts complete can expose uninitialized blocks. So
1272 	 * even for AIO, we need to wait for i/o to complete before
1273 	 * returning in this case.
1274 	 */
1275 	dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
1276 		(end > i_size_read(inode)));
1277 
1278 	retval = direct_io_worker(rw, iocb, inode, iov, offset,
1279 				nr_segs, blkbits, get_block, end_io, dio);
1280 
1281 	if (rw == READ && dio_lock_type == DIO_LOCKING)
1282 		release_i_mutex = 0;
1283 
1284 out:
1285 	if (release_i_mutex)
1286 		mutex_unlock(&inode->i_mutex);
1287 	else if (acquire_i_mutex)
1288 		mutex_lock(&inode->i_mutex);
1289 	return retval;
1290 }
1291 EXPORT_SYMBOL(__blockdev_direct_IO);
1292