xref: /linux/fs/nfs/blocklayout/blocklayout.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>		/* struct bio */
38 #include <linux/buffer_head.h>	/* various write calls */
39 #include <linux/prefetch.h>
40 
41 #include "blocklayout.h"
42 
43 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
44 
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
48 
49 struct dentry *bl_device_pipe;
50 wait_queue_head_t bl_wq;
51 
52 static void print_page(struct page *page)
53 {
54 	dprintk("PRINTPAGE page %p\n", page);
55 	dprintk("	PagePrivate %d\n", PagePrivate(page));
56 	dprintk("	PageUptodate %d\n", PageUptodate(page));
57 	dprintk("	PageError %d\n", PageError(page));
58 	dprintk("	PageDirty %d\n", PageDirty(page));
59 	dprintk("	PageReferenced %d\n", PageReferenced(page));
60 	dprintk("	PageLocked %d\n", PageLocked(page));
61 	dprintk("	PageWriteback %d\n", PageWriteback(page));
62 	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
63 	dprintk("\n");
64 }
65 
66 /* Given the be associated with isect, determine if page data needs to be
67  * initialized.
68  */
69 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
70 {
71 	if (be->be_state == PNFS_BLOCK_NONE_DATA)
72 		return 1;
73 	else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
74 		return 0;
75 	else
76 		return !bl_is_sector_init(be->be_inval, isect);
77 }
78 
79 /* Given the be associated with isect, determine if page data can be
80  * written to disk.
81  */
82 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
83 {
84 	return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
85 		be->be_state == PNFS_BLOCK_INVALID_DATA);
86 }
87 
88 /* The data we are handed might be spread across several bios.  We need
89  * to track when the last one is finished.
90  */
91 struct parallel_io {
92 	struct kref refcnt;
93 	void (*pnfs_callback) (void *data, int num_se);
94 	void *data;
95 	int bse_count;
96 };
97 
98 static inline struct parallel_io *alloc_parallel(void *data)
99 {
100 	struct parallel_io *rv;
101 
102 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
103 	if (rv) {
104 		rv->data = data;
105 		kref_init(&rv->refcnt);
106 		rv->bse_count = 0;
107 	}
108 	return rv;
109 }
110 
111 static inline void get_parallel(struct parallel_io *p)
112 {
113 	kref_get(&p->refcnt);
114 }
115 
116 static void destroy_parallel(struct kref *kref)
117 {
118 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
119 
120 	dprintk("%s enter\n", __func__);
121 	p->pnfs_callback(p->data, p->bse_count);
122 	kfree(p);
123 }
124 
125 static inline void put_parallel(struct parallel_io *p)
126 {
127 	kref_put(&p->refcnt, destroy_parallel);
128 }
129 
130 static struct bio *
131 bl_submit_bio(int rw, struct bio *bio)
132 {
133 	if (bio) {
134 		get_parallel(bio->bi_private);
135 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
136 			rw == READ ? "read" : "write",
137 			bio->bi_size, (unsigned long long)bio->bi_sector);
138 		submit_bio(rw, bio);
139 	}
140 	return NULL;
141 }
142 
143 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
144 				     struct pnfs_block_extent *be,
145 				     void (*end_io)(struct bio *, int err),
146 				     struct parallel_io *par)
147 {
148 	struct bio *bio;
149 
150 	npg = min(npg, BIO_MAX_PAGES);
151 	bio = bio_alloc(GFP_NOIO, npg);
152 	if (!bio && (current->flags & PF_MEMALLOC)) {
153 		while (!bio && (npg /= 2))
154 			bio = bio_alloc(GFP_NOIO, npg);
155 	}
156 
157 	if (bio) {
158 		bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
159 		bio->bi_bdev = be->be_mdev;
160 		bio->bi_end_io = end_io;
161 		bio->bi_private = par;
162 	}
163 	return bio;
164 }
165 
166 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
167 				      sector_t isect, struct page *page,
168 				      struct pnfs_block_extent *be,
169 				      void (*end_io)(struct bio *, int err),
170 				      struct parallel_io *par)
171 {
172 retry:
173 	if (!bio) {
174 		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
175 		if (!bio)
176 			return ERR_PTR(-ENOMEM);
177 	}
178 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
179 		bio = bl_submit_bio(rw, bio);
180 		goto retry;
181 	}
182 	return bio;
183 }
184 
185 /* This is basically copied from mpage_end_io_read */
186 static void bl_end_io_read(struct bio *bio, int err)
187 {
188 	struct parallel_io *par = bio->bi_private;
189 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
190 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
191 	struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
192 
193 	do {
194 		struct page *page = bvec->bv_page;
195 
196 		if (--bvec >= bio->bi_io_vec)
197 			prefetchw(&bvec->bv_page->flags);
198 		if (uptodate)
199 			SetPageUptodate(page);
200 	} while (bvec >= bio->bi_io_vec);
201 	if (!uptodate) {
202 		if (!rdata->pnfs_error)
203 			rdata->pnfs_error = -EIO;
204 		pnfs_set_lo_fail(rdata->lseg);
205 	}
206 	bio_put(bio);
207 	put_parallel(par);
208 }
209 
210 static void bl_read_cleanup(struct work_struct *work)
211 {
212 	struct rpc_task *task;
213 	struct nfs_read_data *rdata;
214 	dprintk("%s enter\n", __func__);
215 	task = container_of(work, struct rpc_task, u.tk_work);
216 	rdata = container_of(task, struct nfs_read_data, task);
217 	pnfs_ld_read_done(rdata);
218 }
219 
220 static void
221 bl_end_par_io_read(void *data, int unused)
222 {
223 	struct nfs_read_data *rdata = data;
224 
225 	rdata->task.tk_status = rdata->pnfs_error;
226 	INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
227 	schedule_work(&rdata->task.u.tk_work);
228 }
229 
230 static enum pnfs_try_status
231 bl_read_pagelist(struct nfs_read_data *rdata)
232 {
233 	int i, hole;
234 	struct bio *bio = NULL;
235 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
236 	sector_t isect, extent_length = 0;
237 	struct parallel_io *par;
238 	loff_t f_offset = rdata->args.offset;
239 	size_t count = rdata->args.count;
240 	struct page **pages = rdata->args.pages;
241 	int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
242 
243 	dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
244 	       rdata->npages, f_offset, count);
245 
246 	par = alloc_parallel(rdata);
247 	if (!par)
248 		goto use_mds;
249 	par->pnfs_callback = bl_end_par_io_read;
250 	/* At this point, we can no longer jump to use_mds */
251 
252 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
253 	/* Code assumes extents are page-aligned */
254 	for (i = pg_index; i < rdata->npages; i++) {
255 		if (!extent_length) {
256 			/* We've used up the previous extent */
257 			bl_put_extent(be);
258 			bl_put_extent(cow_read);
259 			bio = bl_submit_bio(READ, bio);
260 			/* Get the next one */
261 			be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
262 					     isect, &cow_read);
263 			if (!be) {
264 				rdata->pnfs_error = -EIO;
265 				goto out;
266 			}
267 			extent_length = be->be_length -
268 				(isect - be->be_f_offset);
269 			if (cow_read) {
270 				sector_t cow_length = cow_read->be_length -
271 					(isect - cow_read->be_f_offset);
272 				extent_length = min(extent_length, cow_length);
273 			}
274 		}
275 		hole = is_hole(be, isect);
276 		if (hole && !cow_read) {
277 			bio = bl_submit_bio(READ, bio);
278 			/* Fill hole w/ zeroes w/o accessing device */
279 			dprintk("%s Zeroing page for hole\n", __func__);
280 			zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
281 			print_page(pages[i]);
282 			SetPageUptodate(pages[i]);
283 		} else {
284 			struct pnfs_block_extent *be_read;
285 
286 			be_read = (hole && cow_read) ? cow_read : be;
287 			bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
288 						 isect, pages[i], be_read,
289 						 bl_end_io_read, par);
290 			if (IS_ERR(bio)) {
291 				rdata->pnfs_error = PTR_ERR(bio);
292 				bio = NULL;
293 				goto out;
294 			}
295 		}
296 		isect += PAGE_CACHE_SECTORS;
297 		extent_length -= PAGE_CACHE_SECTORS;
298 	}
299 	if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
300 		rdata->res.eof = 1;
301 		rdata->res.count = rdata->inode->i_size - f_offset;
302 	} else {
303 		rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
304 	}
305 out:
306 	bl_put_extent(be);
307 	bl_put_extent(cow_read);
308 	bl_submit_bio(READ, bio);
309 	put_parallel(par);
310 	return PNFS_ATTEMPTED;
311 
312  use_mds:
313 	dprintk("Giving up and using normal NFS\n");
314 	return PNFS_NOT_ATTEMPTED;
315 }
316 
317 static void mark_extents_written(struct pnfs_block_layout *bl,
318 				 __u64 offset, __u32 count)
319 {
320 	sector_t isect, end;
321 	struct pnfs_block_extent *be;
322 	struct pnfs_block_short_extent *se;
323 
324 	dprintk("%s(%llu, %u)\n", __func__, offset, count);
325 	if (count == 0)
326 		return;
327 	isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
328 	end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
329 	end >>= SECTOR_SHIFT;
330 	while (isect < end) {
331 		sector_t len;
332 		be = bl_find_get_extent(bl, isect, NULL);
333 		BUG_ON(!be); /* FIXME */
334 		len = min(end, be->be_f_offset + be->be_length) - isect;
335 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
336 			se = bl_pop_one_short_extent(be->be_inval);
337 			BUG_ON(!se);
338 			bl_mark_for_commit(be, isect, len, se);
339 		}
340 		isect += len;
341 		bl_put_extent(be);
342 	}
343 }
344 
345 static void bl_end_io_write_zero(struct bio *bio, int err)
346 {
347 	struct parallel_io *par = bio->bi_private;
348 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
349 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
350 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
351 
352 	do {
353 		struct page *page = bvec->bv_page;
354 
355 		if (--bvec >= bio->bi_io_vec)
356 			prefetchw(&bvec->bv_page->flags);
357 		/* This is the zeroing page we added */
358 		end_page_writeback(page);
359 		page_cache_release(page);
360 	} while (bvec >= bio->bi_io_vec);
361 
362 	if (unlikely(!uptodate)) {
363 		if (!wdata->pnfs_error)
364 			wdata->pnfs_error = -EIO;
365 		pnfs_set_lo_fail(wdata->lseg);
366 	}
367 	bio_put(bio);
368 	put_parallel(par);
369 }
370 
371 static void bl_end_io_write(struct bio *bio, int err)
372 {
373 	struct parallel_io *par = bio->bi_private;
374 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
375 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
376 
377 	if (!uptodate) {
378 		if (!wdata->pnfs_error)
379 			wdata->pnfs_error = -EIO;
380 		pnfs_set_lo_fail(wdata->lseg);
381 	}
382 	bio_put(bio);
383 	put_parallel(par);
384 }
385 
386 /* Function scheduled for call during bl_end_par_io_write,
387  * it marks sectors as written and extends the commitlist.
388  */
389 static void bl_write_cleanup(struct work_struct *work)
390 {
391 	struct rpc_task *task;
392 	struct nfs_write_data *wdata;
393 	dprintk("%s enter\n", __func__);
394 	task = container_of(work, struct rpc_task, u.tk_work);
395 	wdata = container_of(task, struct nfs_write_data, task);
396 	if (likely(!wdata->pnfs_error)) {
397 		/* Marks for LAYOUTCOMMIT */
398 		mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
399 				     wdata->args.offset, wdata->args.count);
400 	}
401 	pnfs_ld_write_done(wdata);
402 }
403 
404 /* Called when last of bios associated with a bl_write_pagelist call finishes */
405 static void bl_end_par_io_write(void *data, int num_se)
406 {
407 	struct nfs_write_data *wdata = data;
408 
409 	if (unlikely(wdata->pnfs_error)) {
410 		bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
411 					num_se);
412 	}
413 
414 	wdata->task.tk_status = wdata->pnfs_error;
415 	wdata->verf.committed = NFS_FILE_SYNC;
416 	INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
417 	schedule_work(&wdata->task.u.tk_work);
418 }
419 
420 /* FIXME STUB - mark intersection of layout and page as bad, so is not
421  * used again.
422  */
423 static void mark_bad_read(void)
424 {
425 	return;
426 }
427 
428 /*
429  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
430  * block_device
431  */
432 static void
433 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
434 {
435 	dprintk("%s enter be=%p\n", __func__, be);
436 
437 	set_buffer_mapped(bh);
438 	bh->b_bdev = be->be_mdev;
439 	bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
440 	    (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
441 
442 	dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
443 		__func__, (unsigned long long)isect, (long)bh->b_blocknr,
444 		bh->b_size);
445 	return;
446 }
447 
448 /* Given an unmapped page, zero it or read in page for COW, page is locked
449  * by caller.
450  */
451 static int
452 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
453 {
454 	struct buffer_head *bh = NULL;
455 	int ret = 0;
456 	sector_t isect;
457 
458 	dprintk("%s enter, %p\n", __func__, page);
459 	BUG_ON(PageUptodate(page));
460 	if (!cow_read) {
461 		zero_user_segment(page, 0, PAGE_SIZE);
462 		SetPageUptodate(page);
463 		goto cleanup;
464 	}
465 
466 	bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
467 	if (!bh) {
468 		ret = -ENOMEM;
469 		goto cleanup;
470 	}
471 
472 	isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
473 	map_block(bh, isect, cow_read);
474 	if (!bh_uptodate_or_lock(bh))
475 		ret = bh_submit_read(bh);
476 	if (ret)
477 		goto cleanup;
478 	SetPageUptodate(page);
479 
480 cleanup:
481 	bl_put_extent(cow_read);
482 	if (bh)
483 		free_buffer_head(bh);
484 	if (ret) {
485 		/* Need to mark layout with bad read...should now
486 		 * just use nfs4 for reads and writes.
487 		 */
488 		mark_bad_read();
489 	}
490 	return ret;
491 }
492 
493 /* Find or create a zeroing page marked being writeback.
494  * Return ERR_PTR on error, NULL to indicate skip this page and page itself
495  * to indicate write out.
496  */
497 static struct page *
498 bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
499 			struct pnfs_block_extent *cow_read)
500 {
501 	struct page *page;
502 	int locked = 0;
503 	page = find_get_page(inode->i_mapping, index);
504 	if (page)
505 		goto check_page;
506 
507 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
508 	if (unlikely(!page)) {
509 		dprintk("%s oom\n", __func__);
510 		return ERR_PTR(-ENOMEM);
511 	}
512 	locked = 1;
513 
514 check_page:
515 	/* PageDirty: Other will write this out
516 	 * PageWriteback: Other is writing this out
517 	 * PageUptodate: It was read before
518 	 */
519 	if (PageDirty(page) || PageWriteback(page)) {
520 		print_page(page);
521 		if (locked)
522 			unlock_page(page);
523 		page_cache_release(page);
524 		return NULL;
525 	}
526 
527 	if (!locked) {
528 		lock_page(page);
529 		locked = 1;
530 		goto check_page;
531 	}
532 	if (!PageUptodate(page)) {
533 		/* New page, readin or zero it */
534 		init_page_for_write(page, cow_read);
535 	}
536 	set_page_writeback(page);
537 	unlock_page(page);
538 
539 	return page;
540 }
541 
542 static enum pnfs_try_status
543 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
544 {
545 	int i, ret, npg_zero, pg_index, last = 0;
546 	struct bio *bio = NULL;
547 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
548 	sector_t isect, last_isect = 0, extent_length = 0;
549 	struct parallel_io *par;
550 	loff_t offset = wdata->args.offset;
551 	size_t count = wdata->args.count;
552 	struct page **pages = wdata->args.pages;
553 	struct page *page;
554 	pgoff_t index;
555 	u64 temp;
556 	int npg_per_block =
557 	    NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
558 
559 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
560 	/* At this point, wdata->pages is a (sequential) list of nfs_pages.
561 	 * We want to write each, and if there is an error set pnfs_error
562 	 * to have it redone using nfs.
563 	 */
564 	par = alloc_parallel(wdata);
565 	if (!par)
566 		goto out_mds;
567 	par->pnfs_callback = bl_end_par_io_write;
568 	/* At this point, have to be more careful with error handling */
569 
570 	isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
571 	be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
572 	if (!be || !is_writable(be, isect)) {
573 		dprintk("%s no matching extents!\n", __func__);
574 		goto out_mds;
575 	}
576 
577 	/* First page inside INVALID extent */
578 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
579 		if (likely(!bl_push_one_short_extent(be->be_inval)))
580 			par->bse_count++;
581 		else
582 			goto out_mds;
583 		temp = offset >> PAGE_CACHE_SHIFT;
584 		npg_zero = do_div(temp, npg_per_block);
585 		isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
586 				     (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
587 		extent_length = be->be_length - (isect - be->be_f_offset);
588 
589 fill_invalid_ext:
590 		dprintk("%s need to zero %d pages\n", __func__, npg_zero);
591 		for (;npg_zero > 0; npg_zero--) {
592 			if (bl_is_sector_init(be->be_inval, isect)) {
593 				dprintk("isect %llu already init\n",
594 					(unsigned long long)isect);
595 				goto next_page;
596 			}
597 			/* page ref released in bl_end_io_write_zero */
598 			index = isect >> PAGE_CACHE_SECTOR_SHIFT;
599 			dprintk("%s zero %dth page: index %lu isect %llu\n",
600 				__func__, npg_zero, index,
601 				(unsigned long long)isect);
602 			page = bl_find_get_zeroing_page(wdata->inode, index,
603 							cow_read);
604 			if (unlikely(IS_ERR(page))) {
605 				wdata->pnfs_error = PTR_ERR(page);
606 				goto out;
607 			} else if (page == NULL)
608 				goto next_page;
609 
610 			ret = bl_mark_sectors_init(be->be_inval, isect,
611 						       PAGE_CACHE_SECTORS);
612 			if (unlikely(ret)) {
613 				dprintk("%s bl_mark_sectors_init fail %d\n",
614 					__func__, ret);
615 				end_page_writeback(page);
616 				page_cache_release(page);
617 				wdata->pnfs_error = ret;
618 				goto out;
619 			}
620 			if (likely(!bl_push_one_short_extent(be->be_inval)))
621 				par->bse_count++;
622 			else {
623 				end_page_writeback(page);
624 				page_cache_release(page);
625 				wdata->pnfs_error = -ENOMEM;
626 				goto out;
627 			}
628 			/* FIXME: This should be done in bi_end_io */
629 			mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
630 					     page->index << PAGE_CACHE_SHIFT,
631 					     PAGE_CACHE_SIZE);
632 
633 			bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
634 						 isect, page, be,
635 						 bl_end_io_write_zero, par);
636 			if (IS_ERR(bio)) {
637 				wdata->pnfs_error = PTR_ERR(bio);
638 				bio = NULL;
639 				goto out;
640 			}
641 next_page:
642 			isect += PAGE_CACHE_SECTORS;
643 			extent_length -= PAGE_CACHE_SECTORS;
644 		}
645 		if (last)
646 			goto write_done;
647 	}
648 	bio = bl_submit_bio(WRITE, bio);
649 
650 	/* Middle pages */
651 	pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
652 	for (i = pg_index; i < wdata->npages; i++) {
653 		if (!extent_length) {
654 			/* We've used up the previous extent */
655 			bl_put_extent(be);
656 			bio = bl_submit_bio(WRITE, bio);
657 			/* Get the next one */
658 			be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
659 					     isect, NULL);
660 			if (!be || !is_writable(be, isect)) {
661 				wdata->pnfs_error = -EINVAL;
662 				goto out;
663 			}
664 			if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
665 				if (likely(!bl_push_one_short_extent(
666 								be->be_inval)))
667 					par->bse_count++;
668 				else {
669 					wdata->pnfs_error = -ENOMEM;
670 					goto out;
671 				}
672 			}
673 			extent_length = be->be_length -
674 			    (isect - be->be_f_offset);
675 		}
676 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
677 			ret = bl_mark_sectors_init(be->be_inval, isect,
678 						       PAGE_CACHE_SECTORS);
679 			if (unlikely(ret)) {
680 				dprintk("%s bl_mark_sectors_init fail %d\n",
681 					__func__, ret);
682 				wdata->pnfs_error = ret;
683 				goto out;
684 			}
685 		}
686 		bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
687 					 isect, pages[i], be,
688 					 bl_end_io_write, par);
689 		if (IS_ERR(bio)) {
690 			wdata->pnfs_error = PTR_ERR(bio);
691 			bio = NULL;
692 			goto out;
693 		}
694 		isect += PAGE_CACHE_SECTORS;
695 		last_isect = isect;
696 		extent_length -= PAGE_CACHE_SECTORS;
697 	}
698 
699 	/* Last page inside INVALID extent */
700 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
701 		bio = bl_submit_bio(WRITE, bio);
702 		temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
703 		npg_zero = npg_per_block - do_div(temp, npg_per_block);
704 		if (npg_zero < npg_per_block) {
705 			last = 1;
706 			goto fill_invalid_ext;
707 		}
708 	}
709 
710 write_done:
711 	wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
712 	if (count < wdata->res.count) {
713 		wdata->res.count = count;
714 	}
715 out:
716 	bl_put_extent(be);
717 	bl_submit_bio(WRITE, bio);
718 	put_parallel(par);
719 	return PNFS_ATTEMPTED;
720 out_mds:
721 	bl_put_extent(be);
722 	kfree(par);
723 	return PNFS_NOT_ATTEMPTED;
724 }
725 
726 /* FIXME - range ignored */
727 static void
728 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
729 {
730 	int i;
731 	struct pnfs_block_extent *be;
732 
733 	spin_lock(&bl->bl_ext_lock);
734 	for (i = 0; i < EXTENT_LISTS; i++) {
735 		while (!list_empty(&bl->bl_extents[i])) {
736 			be = list_first_entry(&bl->bl_extents[i],
737 					      struct pnfs_block_extent,
738 					      be_node);
739 			list_del(&be->be_node);
740 			bl_put_extent(be);
741 		}
742 	}
743 	spin_unlock(&bl->bl_ext_lock);
744 }
745 
746 static void
747 release_inval_marks(struct pnfs_inval_markings *marks)
748 {
749 	struct pnfs_inval_tracking *pos, *temp;
750 	struct pnfs_block_short_extent *se, *stemp;
751 
752 	list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
753 		list_del(&pos->it_link);
754 		kfree(pos);
755 	}
756 
757 	list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
758 		list_del(&se->bse_node);
759 		kfree(se);
760 	}
761 	return;
762 }
763 
764 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
765 {
766 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
767 
768 	dprintk("%s enter\n", __func__);
769 	release_extents(bl, NULL);
770 	release_inval_marks(&bl->bl_inval);
771 	kfree(bl);
772 }
773 
774 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
775 						   gfp_t gfp_flags)
776 {
777 	struct pnfs_block_layout *bl;
778 
779 	dprintk("%s enter\n", __func__);
780 	bl = kzalloc(sizeof(*bl), gfp_flags);
781 	if (!bl)
782 		return NULL;
783 	spin_lock_init(&bl->bl_ext_lock);
784 	INIT_LIST_HEAD(&bl->bl_extents[0]);
785 	INIT_LIST_HEAD(&bl->bl_extents[1]);
786 	INIT_LIST_HEAD(&bl->bl_commit);
787 	INIT_LIST_HEAD(&bl->bl_committing);
788 	bl->bl_count = 0;
789 	bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
790 	BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
791 	return &bl->bl_layout;
792 }
793 
794 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
795 {
796 	dprintk("%s enter\n", __func__);
797 	kfree(lseg);
798 }
799 
800 /* We pretty much ignore lseg, and store all data layout wide, so we
801  * can correctly merge.
802  */
803 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
804 						 struct nfs4_layoutget_res *lgr,
805 						 gfp_t gfp_flags)
806 {
807 	struct pnfs_layout_segment *lseg;
808 	int status;
809 
810 	dprintk("%s enter\n", __func__);
811 	lseg = kzalloc(sizeof(*lseg), gfp_flags);
812 	if (!lseg)
813 		return ERR_PTR(-ENOMEM);
814 	status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
815 	if (status) {
816 		/* We don't want to call the full-blown bl_free_lseg,
817 		 * since on error extents were not touched.
818 		 */
819 		kfree(lseg);
820 		return ERR_PTR(status);
821 	}
822 	return lseg;
823 }
824 
825 static void
826 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
827 		       const struct nfs4_layoutcommit_args *arg)
828 {
829 	dprintk("%s enter\n", __func__);
830 	encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
831 }
832 
833 static void
834 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
835 {
836 	struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
837 
838 	dprintk("%s enter\n", __func__);
839 	clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
840 }
841 
842 static void free_blk_mountid(struct block_mount_id *mid)
843 {
844 	if (mid) {
845 		struct pnfs_block_dev *dev, *tmp;
846 
847 		/* No need to take bm_lock as we are last user freeing bm_devlist */
848 		list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
849 			list_del(&dev->bm_node);
850 			bl_free_block_dev(dev);
851 		}
852 		kfree(mid);
853 	}
854 }
855 
856 /* This is mostly copied from the filelayout's get_device_info function.
857  * It seems much of this should be at the generic pnfs level.
858  */
859 static struct pnfs_block_dev *
860 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
861 			struct nfs4_deviceid *d_id)
862 {
863 	struct pnfs_device *dev;
864 	struct pnfs_block_dev *rv;
865 	u32 max_resp_sz;
866 	int max_pages;
867 	struct page **pages = NULL;
868 	int i, rc;
869 
870 	/*
871 	 * Use the session max response size as the basis for setting
872 	 * GETDEVICEINFO's maxcount
873 	 */
874 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
875 	max_pages = max_resp_sz >> PAGE_SHIFT;
876 	dprintk("%s max_resp_sz %u max_pages %d\n",
877 		__func__, max_resp_sz, max_pages);
878 
879 	dev = kmalloc(sizeof(*dev), GFP_NOFS);
880 	if (!dev) {
881 		dprintk("%s kmalloc failed\n", __func__);
882 		return ERR_PTR(-ENOMEM);
883 	}
884 
885 	pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
886 	if (pages == NULL) {
887 		kfree(dev);
888 		return ERR_PTR(-ENOMEM);
889 	}
890 	for (i = 0; i < max_pages; i++) {
891 		pages[i] = alloc_page(GFP_NOFS);
892 		if (!pages[i]) {
893 			rv = ERR_PTR(-ENOMEM);
894 			goto out_free;
895 		}
896 	}
897 
898 	memcpy(&dev->dev_id, d_id, sizeof(*d_id));
899 	dev->layout_type = LAYOUT_BLOCK_VOLUME;
900 	dev->pages = pages;
901 	dev->pgbase = 0;
902 	dev->pglen = PAGE_SIZE * max_pages;
903 	dev->mincount = 0;
904 
905 	dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
906 	rc = nfs4_proc_getdeviceinfo(server, dev);
907 	dprintk("%s getdevice info returns %d\n", __func__, rc);
908 	if (rc) {
909 		rv = ERR_PTR(rc);
910 		goto out_free;
911 	}
912 
913 	rv = nfs4_blk_decode_device(server, dev);
914  out_free:
915 	for (i = 0; i < max_pages; i++)
916 		__free_page(pages[i]);
917 	kfree(pages);
918 	kfree(dev);
919 	return rv;
920 }
921 
922 static int
923 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
924 {
925 	struct block_mount_id *b_mt_id = NULL;
926 	struct pnfs_devicelist *dlist = NULL;
927 	struct pnfs_block_dev *bdev;
928 	LIST_HEAD(block_disklist);
929 	int status, i;
930 
931 	dprintk("%s enter\n", __func__);
932 
933 	if (server->pnfs_blksize == 0) {
934 		dprintk("%s Server did not return blksize\n", __func__);
935 		return -EINVAL;
936 	}
937 	b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
938 	if (!b_mt_id) {
939 		status = -ENOMEM;
940 		goto out_error;
941 	}
942 	/* Initialize nfs4 block layout mount id */
943 	spin_lock_init(&b_mt_id->bm_lock);
944 	INIT_LIST_HEAD(&b_mt_id->bm_devlist);
945 
946 	dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
947 	if (!dlist) {
948 		status = -ENOMEM;
949 		goto out_error;
950 	}
951 	dlist->eof = 0;
952 	while (!dlist->eof) {
953 		status = nfs4_proc_getdevicelist(server, fh, dlist);
954 		if (status)
955 			goto out_error;
956 		dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
957 			__func__, dlist->num_devs, dlist->eof);
958 		for (i = 0; i < dlist->num_devs; i++) {
959 			bdev = nfs4_blk_get_deviceinfo(server, fh,
960 						       &dlist->dev_id[i]);
961 			if (IS_ERR(bdev)) {
962 				status = PTR_ERR(bdev);
963 				goto out_error;
964 			}
965 			spin_lock(&b_mt_id->bm_lock);
966 			list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
967 			spin_unlock(&b_mt_id->bm_lock);
968 		}
969 	}
970 	dprintk("%s SUCCESS\n", __func__);
971 	server->pnfs_ld_data = b_mt_id;
972 
973  out_return:
974 	kfree(dlist);
975 	return status;
976 
977  out_error:
978 	free_blk_mountid(b_mt_id);
979 	goto out_return;
980 }
981 
982 static int
983 bl_clear_layoutdriver(struct nfs_server *server)
984 {
985 	struct block_mount_id *b_mt_id = server->pnfs_ld_data;
986 
987 	dprintk("%s enter\n", __func__);
988 	free_blk_mountid(b_mt_id);
989 	dprintk("%s RETURNS\n", __func__);
990 	return 0;
991 }
992 
993 static const struct nfs_pageio_ops bl_pg_read_ops = {
994 	.pg_init = pnfs_generic_pg_init_read,
995 	.pg_test = pnfs_generic_pg_test,
996 	.pg_doio = pnfs_generic_pg_readpages,
997 };
998 
999 static const struct nfs_pageio_ops bl_pg_write_ops = {
1000 	.pg_init = pnfs_generic_pg_init_write,
1001 	.pg_test = pnfs_generic_pg_test,
1002 	.pg_doio = pnfs_generic_pg_writepages,
1003 };
1004 
1005 static struct pnfs_layoutdriver_type blocklayout_type = {
1006 	.id				= LAYOUT_BLOCK_VOLUME,
1007 	.name				= "LAYOUT_BLOCK_VOLUME",
1008 	.read_pagelist			= bl_read_pagelist,
1009 	.write_pagelist			= bl_write_pagelist,
1010 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
1011 	.free_layout_hdr		= bl_free_layout_hdr,
1012 	.alloc_lseg			= bl_alloc_lseg,
1013 	.free_lseg			= bl_free_lseg,
1014 	.encode_layoutcommit		= bl_encode_layoutcommit,
1015 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
1016 	.set_layoutdriver		= bl_set_layoutdriver,
1017 	.clear_layoutdriver		= bl_clear_layoutdriver,
1018 	.pg_read_ops			= &bl_pg_read_ops,
1019 	.pg_write_ops			= &bl_pg_write_ops,
1020 };
1021 
1022 static const struct rpc_pipe_ops bl_upcall_ops = {
1023 	.upcall		= rpc_pipe_generic_upcall,
1024 	.downcall	= bl_pipe_downcall,
1025 	.destroy_msg	= bl_pipe_destroy_msg,
1026 };
1027 
1028 static int __init nfs4blocklayout_init(void)
1029 {
1030 	struct vfsmount *mnt;
1031 	struct path path;
1032 	int ret;
1033 
1034 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1035 
1036 	ret = pnfs_register_layoutdriver(&blocklayout_type);
1037 	if (ret)
1038 		goto out;
1039 
1040 	init_waitqueue_head(&bl_wq);
1041 
1042 	mnt = rpc_get_mount();
1043 	if (IS_ERR(mnt)) {
1044 		ret = PTR_ERR(mnt);
1045 		goto out_remove;
1046 	}
1047 
1048 	ret = vfs_path_lookup(mnt->mnt_root,
1049 			      mnt,
1050 			      NFS_PIPE_DIRNAME, 0, &path);
1051 	if (ret)
1052 		goto out_putrpc;
1053 
1054 	bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
1055 				    &bl_upcall_ops, 0);
1056 	path_put(&path);
1057 	if (IS_ERR(bl_device_pipe)) {
1058 		ret = PTR_ERR(bl_device_pipe);
1059 		goto out_putrpc;
1060 	}
1061 out:
1062 	return ret;
1063 
1064 out_putrpc:
1065 	rpc_put_mount();
1066 out_remove:
1067 	pnfs_unregister_layoutdriver(&blocklayout_type);
1068 	return ret;
1069 }
1070 
1071 static void __exit nfs4blocklayout_exit(void)
1072 {
1073 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1074 	       __func__);
1075 
1076 	pnfs_unregister_layoutdriver(&blocklayout_type);
1077 	rpc_unlink(bl_device_pipe);
1078 	rpc_put_mount();
1079 }
1080 
1081 MODULE_ALIAS("nfs-layouttype4-3");
1082 
1083 module_init(nfs4blocklayout_init);
1084 module_exit(nfs4blocklayout_exit);
1085