xref: /linux/fs/nfs/blocklayout/blocklayout.c (revision a67ff6a54095e27093ea501fb143fefe51a536c2)
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>		/* struct bio */
38 #include <linux/buffer_head.h>	/* various write calls */
39 #include <linux/prefetch.h>
40 
41 #include "blocklayout.h"
42 
43 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
44 
45 MODULE_LICENSE("GPL");
46 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
47 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
48 
49 struct dentry *bl_device_pipe;
50 wait_queue_head_t bl_wq;
51 
52 static void print_page(struct page *page)
53 {
54 	dprintk("PRINTPAGE page %p\n", page);
55 	dprintk("	PagePrivate %d\n", PagePrivate(page));
56 	dprintk("	PageUptodate %d\n", PageUptodate(page));
57 	dprintk("	PageError %d\n", PageError(page));
58 	dprintk("	PageDirty %d\n", PageDirty(page));
59 	dprintk("	PageReferenced %d\n", PageReferenced(page));
60 	dprintk("	PageLocked %d\n", PageLocked(page));
61 	dprintk("	PageWriteback %d\n", PageWriteback(page));
62 	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
63 	dprintk("\n");
64 }
65 
66 /* Given the be associated with isect, determine if page data needs to be
67  * initialized.
68  */
69 static int is_hole(struct pnfs_block_extent *be, sector_t isect)
70 {
71 	if (be->be_state == PNFS_BLOCK_NONE_DATA)
72 		return 1;
73 	else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
74 		return 0;
75 	else
76 		return !bl_is_sector_init(be->be_inval, isect);
77 }
78 
79 /* Given the be associated with isect, determine if page data can be
80  * written to disk.
81  */
82 static int is_writable(struct pnfs_block_extent *be, sector_t isect)
83 {
84 	return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
85 		be->be_state == PNFS_BLOCK_INVALID_DATA);
86 }
87 
88 /* The data we are handed might be spread across several bios.  We need
89  * to track when the last one is finished.
90  */
91 struct parallel_io {
92 	struct kref refcnt;
93 	struct rpc_call_ops call_ops;
94 	void (*pnfs_callback) (void *data);
95 	void *data;
96 };
97 
98 static inline struct parallel_io *alloc_parallel(void *data)
99 {
100 	struct parallel_io *rv;
101 
102 	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
103 	if (rv) {
104 		rv->data = data;
105 		kref_init(&rv->refcnt);
106 	}
107 	return rv;
108 }
109 
110 static inline void get_parallel(struct parallel_io *p)
111 {
112 	kref_get(&p->refcnt);
113 }
114 
115 static void destroy_parallel(struct kref *kref)
116 {
117 	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
118 
119 	dprintk("%s enter\n", __func__);
120 	p->pnfs_callback(p->data);
121 	kfree(p);
122 }
123 
124 static inline void put_parallel(struct parallel_io *p)
125 {
126 	kref_put(&p->refcnt, destroy_parallel);
127 }
128 
129 static struct bio *
130 bl_submit_bio(int rw, struct bio *bio)
131 {
132 	if (bio) {
133 		get_parallel(bio->bi_private);
134 		dprintk("%s submitting %s bio %u@%llu\n", __func__,
135 			rw == READ ? "read" : "write",
136 			bio->bi_size, (unsigned long long)bio->bi_sector);
137 		submit_bio(rw, bio);
138 	}
139 	return NULL;
140 }
141 
142 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
143 				     struct pnfs_block_extent *be,
144 				     void (*end_io)(struct bio *, int err),
145 				     struct parallel_io *par)
146 {
147 	struct bio *bio;
148 
149 	bio = bio_alloc(GFP_NOIO, npg);
150 	if (!bio)
151 		return NULL;
152 
153 	bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
154 	bio->bi_bdev = be->be_mdev;
155 	bio->bi_end_io = end_io;
156 	bio->bi_private = par;
157 	return bio;
158 }
159 
160 static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
161 				      sector_t isect, struct page *page,
162 				      struct pnfs_block_extent *be,
163 				      void (*end_io)(struct bio *, int err),
164 				      struct parallel_io *par)
165 {
166 retry:
167 	if (!bio) {
168 		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
169 		if (!bio)
170 			return ERR_PTR(-ENOMEM);
171 	}
172 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
173 		bio = bl_submit_bio(rw, bio);
174 		goto retry;
175 	}
176 	return bio;
177 }
178 
179 /* This is basically copied from mpage_end_io_read */
180 static void bl_end_io_read(struct bio *bio, int err)
181 {
182 	struct parallel_io *par = bio->bi_private;
183 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
184 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
185 	struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
186 
187 	do {
188 		struct page *page = bvec->bv_page;
189 
190 		if (--bvec >= bio->bi_io_vec)
191 			prefetchw(&bvec->bv_page->flags);
192 		if (uptodate)
193 			SetPageUptodate(page);
194 	} while (bvec >= bio->bi_io_vec);
195 	if (!uptodate) {
196 		if (!rdata->pnfs_error)
197 			rdata->pnfs_error = -EIO;
198 		pnfs_set_lo_fail(rdata->lseg);
199 	}
200 	bio_put(bio);
201 	put_parallel(par);
202 }
203 
204 static void bl_read_cleanup(struct work_struct *work)
205 {
206 	struct rpc_task *task;
207 	struct nfs_read_data *rdata;
208 	dprintk("%s enter\n", __func__);
209 	task = container_of(work, struct rpc_task, u.tk_work);
210 	rdata = container_of(task, struct nfs_read_data, task);
211 	pnfs_ld_read_done(rdata);
212 }
213 
214 static void
215 bl_end_par_io_read(void *data)
216 {
217 	struct nfs_read_data *rdata = data;
218 
219 	INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
220 	schedule_work(&rdata->task.u.tk_work);
221 }
222 
223 /* We don't want normal .rpc_call_done callback used, so we replace it
224  * with this stub.
225  */
226 static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
227 {
228 	return;
229 }
230 
231 static enum pnfs_try_status
232 bl_read_pagelist(struct nfs_read_data *rdata)
233 {
234 	int i, hole;
235 	struct bio *bio = NULL;
236 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
237 	sector_t isect, extent_length = 0;
238 	struct parallel_io *par;
239 	loff_t f_offset = rdata->args.offset;
240 	size_t count = rdata->args.count;
241 	struct page **pages = rdata->args.pages;
242 	int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
243 
244 	dprintk("%s enter nr_pages %u offset %lld count %Zd\n", __func__,
245 	       rdata->npages, f_offset, count);
246 
247 	par = alloc_parallel(rdata);
248 	if (!par)
249 		goto use_mds;
250 	par->call_ops = *rdata->mds_ops;
251 	par->call_ops.rpc_call_done = bl_rpc_do_nothing;
252 	par->pnfs_callback = bl_end_par_io_read;
253 	/* At this point, we can no longer jump to use_mds */
254 
255 	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
256 	/* Code assumes extents are page-aligned */
257 	for (i = pg_index; i < rdata->npages; i++) {
258 		if (!extent_length) {
259 			/* We've used up the previous extent */
260 			bl_put_extent(be);
261 			bl_put_extent(cow_read);
262 			bio = bl_submit_bio(READ, bio);
263 			/* Get the next one */
264 			be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
265 					     isect, &cow_read);
266 			if (!be) {
267 				rdata->pnfs_error = -EIO;
268 				goto out;
269 			}
270 			extent_length = be->be_length -
271 				(isect - be->be_f_offset);
272 			if (cow_read) {
273 				sector_t cow_length = cow_read->be_length -
274 					(isect - cow_read->be_f_offset);
275 				extent_length = min(extent_length, cow_length);
276 			}
277 		}
278 		hole = is_hole(be, isect);
279 		if (hole && !cow_read) {
280 			bio = bl_submit_bio(READ, bio);
281 			/* Fill hole w/ zeroes w/o accessing device */
282 			dprintk("%s Zeroing page for hole\n", __func__);
283 			zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
284 			print_page(pages[i]);
285 			SetPageUptodate(pages[i]);
286 		} else {
287 			struct pnfs_block_extent *be_read;
288 
289 			be_read = (hole && cow_read) ? cow_read : be;
290 			bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
291 						 isect, pages[i], be_read,
292 						 bl_end_io_read, par);
293 			if (IS_ERR(bio)) {
294 				rdata->pnfs_error = PTR_ERR(bio);
295 				bio = NULL;
296 				goto out;
297 			}
298 		}
299 		isect += PAGE_CACHE_SECTORS;
300 		extent_length -= PAGE_CACHE_SECTORS;
301 	}
302 	if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
303 		rdata->res.eof = 1;
304 		rdata->res.count = rdata->inode->i_size - f_offset;
305 	} else {
306 		rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
307 	}
308 out:
309 	bl_put_extent(be);
310 	bl_put_extent(cow_read);
311 	bl_submit_bio(READ, bio);
312 	put_parallel(par);
313 	return PNFS_ATTEMPTED;
314 
315  use_mds:
316 	dprintk("Giving up and using normal NFS\n");
317 	return PNFS_NOT_ATTEMPTED;
318 }
319 
320 static void mark_extents_written(struct pnfs_block_layout *bl,
321 				 __u64 offset, __u32 count)
322 {
323 	sector_t isect, end;
324 	struct pnfs_block_extent *be;
325 
326 	dprintk("%s(%llu, %u)\n", __func__, offset, count);
327 	if (count == 0)
328 		return;
329 	isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
330 	end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
331 	end >>= SECTOR_SHIFT;
332 	while (isect < end) {
333 		sector_t len;
334 		be = bl_find_get_extent(bl, isect, NULL);
335 		BUG_ON(!be); /* FIXME */
336 		len = min(end, be->be_f_offset + be->be_length) - isect;
337 		if (be->be_state == PNFS_BLOCK_INVALID_DATA)
338 			bl_mark_for_commit(be, isect, len); /* What if fails? */
339 		isect += len;
340 		bl_put_extent(be);
341 	}
342 }
343 
344 static void bl_end_io_write_zero(struct bio *bio, int err)
345 {
346 	struct parallel_io *par = bio->bi_private;
347 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
348 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
349 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
350 
351 	do {
352 		struct page *page = bvec->bv_page;
353 
354 		if (--bvec >= bio->bi_io_vec)
355 			prefetchw(&bvec->bv_page->flags);
356 		/* This is the zeroing page we added */
357 		end_page_writeback(page);
358 		page_cache_release(page);
359 	} while (bvec >= bio->bi_io_vec);
360 	if (!uptodate) {
361 		if (!wdata->pnfs_error)
362 			wdata->pnfs_error = -EIO;
363 		pnfs_set_lo_fail(wdata->lseg);
364 	}
365 	bio_put(bio);
366 	put_parallel(par);
367 }
368 
369 /* This is basically copied from mpage_end_io_read */
370 static void bl_end_io_write(struct bio *bio, int err)
371 {
372 	struct parallel_io *par = bio->bi_private;
373 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
374 	struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
375 
376 	if (!uptodate) {
377 		if (!wdata->pnfs_error)
378 			wdata->pnfs_error = -EIO;
379 		pnfs_set_lo_fail(wdata->lseg);
380 	}
381 	bio_put(bio);
382 	put_parallel(par);
383 }
384 
385 /* Function scheduled for call during bl_end_par_io_write,
386  * it marks sectors as written and extends the commitlist.
387  */
388 static void bl_write_cleanup(struct work_struct *work)
389 {
390 	struct rpc_task *task;
391 	struct nfs_write_data *wdata;
392 	dprintk("%s enter\n", __func__);
393 	task = container_of(work, struct rpc_task, u.tk_work);
394 	wdata = container_of(task, struct nfs_write_data, task);
395 	if (!wdata->pnfs_error) {
396 		/* Marks for LAYOUTCOMMIT */
397 		mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
398 				     wdata->args.offset, wdata->args.count);
399 	}
400 	pnfs_ld_write_done(wdata);
401 }
402 
403 /* Called when last of bios associated with a bl_write_pagelist call finishes */
404 static void bl_end_par_io_write(void *data)
405 {
406 	struct nfs_write_data *wdata = data;
407 
408 	wdata->task.tk_status = 0;
409 	wdata->verf.committed = NFS_FILE_SYNC;
410 	INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
411 	schedule_work(&wdata->task.u.tk_work);
412 }
413 
414 /* FIXME STUB - mark intersection of layout and page as bad, so is not
415  * used again.
416  */
417 static void mark_bad_read(void)
418 {
419 	return;
420 }
421 
422 /*
423  * map_block:  map a requested I/0 block (isect) into an offset in the LVM
424  * block_device
425  */
426 static void
427 map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
428 {
429 	dprintk("%s enter be=%p\n", __func__, be);
430 
431 	set_buffer_mapped(bh);
432 	bh->b_bdev = be->be_mdev;
433 	bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
434 	    (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
435 
436 	dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
437 		__func__, (unsigned long long)isect, (long)bh->b_blocknr,
438 		bh->b_size);
439 	return;
440 }
441 
442 /* Given an unmapped page, zero it or read in page for COW, page is locked
443  * by caller.
444  */
445 static int
446 init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
447 {
448 	struct buffer_head *bh = NULL;
449 	int ret = 0;
450 	sector_t isect;
451 
452 	dprintk("%s enter, %p\n", __func__, page);
453 	BUG_ON(PageUptodate(page));
454 	if (!cow_read) {
455 		zero_user_segment(page, 0, PAGE_SIZE);
456 		SetPageUptodate(page);
457 		goto cleanup;
458 	}
459 
460 	bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
461 	if (!bh) {
462 		ret = -ENOMEM;
463 		goto cleanup;
464 	}
465 
466 	isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
467 	map_block(bh, isect, cow_read);
468 	if (!bh_uptodate_or_lock(bh))
469 		ret = bh_submit_read(bh);
470 	if (ret)
471 		goto cleanup;
472 	SetPageUptodate(page);
473 
474 cleanup:
475 	bl_put_extent(cow_read);
476 	if (bh)
477 		free_buffer_head(bh);
478 	if (ret) {
479 		/* Need to mark layout with bad read...should now
480 		 * just use nfs4 for reads and writes.
481 		 */
482 		mark_bad_read();
483 	}
484 	return ret;
485 }
486 
487 static enum pnfs_try_status
488 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
489 {
490 	int i, ret, npg_zero, pg_index, last = 0;
491 	struct bio *bio = NULL;
492 	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
493 	sector_t isect, last_isect = 0, extent_length = 0;
494 	struct parallel_io *par;
495 	loff_t offset = wdata->args.offset;
496 	size_t count = wdata->args.count;
497 	struct page **pages = wdata->args.pages;
498 	struct page *page;
499 	pgoff_t index;
500 	u64 temp;
501 	int npg_per_block =
502 	    NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
503 
504 	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
505 	/* At this point, wdata->pages is a (sequential) list of nfs_pages.
506 	 * We want to write each, and if there is an error set pnfs_error
507 	 * to have it redone using nfs.
508 	 */
509 	par = alloc_parallel(wdata);
510 	if (!par)
511 		return PNFS_NOT_ATTEMPTED;
512 	par->call_ops = *wdata->mds_ops;
513 	par->call_ops.rpc_call_done = bl_rpc_do_nothing;
514 	par->pnfs_callback = bl_end_par_io_write;
515 	/* At this point, have to be more careful with error handling */
516 
517 	isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
518 	be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
519 	if (!be || !is_writable(be, isect)) {
520 		dprintk("%s no matching extents!\n", __func__);
521 		wdata->pnfs_error = -EINVAL;
522 		goto out;
523 	}
524 
525 	/* First page inside INVALID extent */
526 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
527 		temp = offset >> PAGE_CACHE_SHIFT;
528 		npg_zero = do_div(temp, npg_per_block);
529 		isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
530 				     (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
531 		extent_length = be->be_length - (isect - be->be_f_offset);
532 
533 fill_invalid_ext:
534 		dprintk("%s need to zero %d pages\n", __func__, npg_zero);
535 		for (;npg_zero > 0; npg_zero--) {
536 			if (bl_is_sector_init(be->be_inval, isect)) {
537 				dprintk("isect %llu already init\n",
538 					(unsigned long long)isect);
539 				goto next_page;
540 			}
541 			/* page ref released in bl_end_io_write_zero */
542 			index = isect >> PAGE_CACHE_SECTOR_SHIFT;
543 			dprintk("%s zero %dth page: index %lu isect %llu\n",
544 				__func__, npg_zero, index,
545 				(unsigned long long)isect);
546 			page =
547 			    find_or_create_page(wdata->inode->i_mapping, index,
548 						GFP_NOFS);
549 			if (!page) {
550 				dprintk("%s oom\n", __func__);
551 				wdata->pnfs_error = -ENOMEM;
552 				goto out;
553 			}
554 
555 			/* PageDirty: Other will write this out
556 			 * PageWriteback: Other is writing this out
557 			 * PageUptodate: It was read before
558 			 * sector_initialized: already written out
559 			 */
560 			if (PageDirty(page) || PageWriteback(page)) {
561 				print_page(page);
562 				unlock_page(page);
563 				page_cache_release(page);
564 				goto next_page;
565 			}
566 			if (!PageUptodate(page)) {
567 				/* New page, readin or zero it */
568 				init_page_for_write(page, cow_read);
569 			}
570 			set_page_writeback(page);
571 			unlock_page(page);
572 
573 			ret = bl_mark_sectors_init(be->be_inval, isect,
574 						       PAGE_CACHE_SECTORS,
575 						       NULL);
576 			if (unlikely(ret)) {
577 				dprintk("%s bl_mark_sectors_init fail %d\n",
578 					__func__, ret);
579 				end_page_writeback(page);
580 				page_cache_release(page);
581 				wdata->pnfs_error = ret;
582 				goto out;
583 			}
584 			bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
585 						 isect, page, be,
586 						 bl_end_io_write_zero, par);
587 			if (IS_ERR(bio)) {
588 				wdata->pnfs_error = PTR_ERR(bio);
589 				bio = NULL;
590 				goto out;
591 			}
592 			/* FIXME: This should be done in bi_end_io */
593 			mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
594 					     page->index << PAGE_CACHE_SHIFT,
595 					     PAGE_CACHE_SIZE);
596 next_page:
597 			isect += PAGE_CACHE_SECTORS;
598 			extent_length -= PAGE_CACHE_SECTORS;
599 		}
600 		if (last)
601 			goto write_done;
602 	}
603 	bio = bl_submit_bio(WRITE, bio);
604 
605 	/* Middle pages */
606 	pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
607 	for (i = pg_index; i < wdata->npages; i++) {
608 		if (!extent_length) {
609 			/* We've used up the previous extent */
610 			bl_put_extent(be);
611 			bio = bl_submit_bio(WRITE, bio);
612 			/* Get the next one */
613 			be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
614 					     isect, NULL);
615 			if (!be || !is_writable(be, isect)) {
616 				wdata->pnfs_error = -EINVAL;
617 				goto out;
618 			}
619 			extent_length = be->be_length -
620 			    (isect - be->be_f_offset);
621 		}
622 		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
623 			ret = bl_mark_sectors_init(be->be_inval, isect,
624 						       PAGE_CACHE_SECTORS,
625 						       NULL);
626 			if (unlikely(ret)) {
627 				dprintk("%s bl_mark_sectors_init fail %d\n",
628 					__func__, ret);
629 				wdata->pnfs_error = ret;
630 				goto out;
631 			}
632 		}
633 		bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
634 					 isect, pages[i], be,
635 					 bl_end_io_write, par);
636 		if (IS_ERR(bio)) {
637 			wdata->pnfs_error = PTR_ERR(bio);
638 			bio = NULL;
639 			goto out;
640 		}
641 		isect += PAGE_CACHE_SECTORS;
642 		last_isect = isect;
643 		extent_length -= PAGE_CACHE_SECTORS;
644 	}
645 
646 	/* Last page inside INVALID extent */
647 	if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
648 		bio = bl_submit_bio(WRITE, bio);
649 		temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
650 		npg_zero = npg_per_block - do_div(temp, npg_per_block);
651 		if (npg_zero < npg_per_block) {
652 			last = 1;
653 			goto fill_invalid_ext;
654 		}
655 	}
656 
657 write_done:
658 	wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
659 	if (count < wdata->res.count) {
660 		wdata->res.count = count;
661 	}
662 out:
663 	bl_put_extent(be);
664 	bl_submit_bio(WRITE, bio);
665 	put_parallel(par);
666 	return PNFS_ATTEMPTED;
667 }
668 
669 /* FIXME - range ignored */
670 static void
671 release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
672 {
673 	int i;
674 	struct pnfs_block_extent *be;
675 
676 	spin_lock(&bl->bl_ext_lock);
677 	for (i = 0; i < EXTENT_LISTS; i++) {
678 		while (!list_empty(&bl->bl_extents[i])) {
679 			be = list_first_entry(&bl->bl_extents[i],
680 					      struct pnfs_block_extent,
681 					      be_node);
682 			list_del(&be->be_node);
683 			bl_put_extent(be);
684 		}
685 	}
686 	spin_unlock(&bl->bl_ext_lock);
687 }
688 
689 static void
690 release_inval_marks(struct pnfs_inval_markings *marks)
691 {
692 	struct pnfs_inval_tracking *pos, *temp;
693 
694 	list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
695 		list_del(&pos->it_link);
696 		kfree(pos);
697 	}
698 	return;
699 }
700 
701 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
702 {
703 	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
704 
705 	dprintk("%s enter\n", __func__);
706 	release_extents(bl, NULL);
707 	release_inval_marks(&bl->bl_inval);
708 	kfree(bl);
709 }
710 
711 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
712 						   gfp_t gfp_flags)
713 {
714 	struct pnfs_block_layout *bl;
715 
716 	dprintk("%s enter\n", __func__);
717 	bl = kzalloc(sizeof(*bl), gfp_flags);
718 	if (!bl)
719 		return NULL;
720 	spin_lock_init(&bl->bl_ext_lock);
721 	INIT_LIST_HEAD(&bl->bl_extents[0]);
722 	INIT_LIST_HEAD(&bl->bl_extents[1]);
723 	INIT_LIST_HEAD(&bl->bl_commit);
724 	INIT_LIST_HEAD(&bl->bl_committing);
725 	bl->bl_count = 0;
726 	bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
727 	BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
728 	return &bl->bl_layout;
729 }
730 
731 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
732 {
733 	dprintk("%s enter\n", __func__);
734 	kfree(lseg);
735 }
736 
737 /* We pretty much ignore lseg, and store all data layout wide, so we
738  * can correctly merge.
739  */
740 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
741 						 struct nfs4_layoutget_res *lgr,
742 						 gfp_t gfp_flags)
743 {
744 	struct pnfs_layout_segment *lseg;
745 	int status;
746 
747 	dprintk("%s enter\n", __func__);
748 	lseg = kzalloc(sizeof(*lseg), gfp_flags);
749 	if (!lseg)
750 		return ERR_PTR(-ENOMEM);
751 	status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
752 	if (status) {
753 		/* We don't want to call the full-blown bl_free_lseg,
754 		 * since on error extents were not touched.
755 		 */
756 		kfree(lseg);
757 		return ERR_PTR(status);
758 	}
759 	return lseg;
760 }
761 
762 static void
763 bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
764 		       const struct nfs4_layoutcommit_args *arg)
765 {
766 	dprintk("%s enter\n", __func__);
767 	encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
768 }
769 
770 static void
771 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
772 {
773 	struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
774 
775 	dprintk("%s enter\n", __func__);
776 	clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
777 }
778 
779 static void free_blk_mountid(struct block_mount_id *mid)
780 {
781 	if (mid) {
782 		struct pnfs_block_dev *dev;
783 		spin_lock(&mid->bm_lock);
784 		while (!list_empty(&mid->bm_devlist)) {
785 			dev = list_first_entry(&mid->bm_devlist,
786 					       struct pnfs_block_dev,
787 					       bm_node);
788 			list_del(&dev->bm_node);
789 			bl_free_block_dev(dev);
790 		}
791 		spin_unlock(&mid->bm_lock);
792 		kfree(mid);
793 	}
794 }
795 
796 /* This is mostly copied from the filelayout's get_device_info function.
797  * It seems much of this should be at the generic pnfs level.
798  */
799 static struct pnfs_block_dev *
800 nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
801 			struct nfs4_deviceid *d_id)
802 {
803 	struct pnfs_device *dev;
804 	struct pnfs_block_dev *rv;
805 	u32 max_resp_sz;
806 	int max_pages;
807 	struct page **pages = NULL;
808 	int i, rc;
809 
810 	/*
811 	 * Use the session max response size as the basis for setting
812 	 * GETDEVICEINFO's maxcount
813 	 */
814 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
815 	max_pages = max_resp_sz >> PAGE_SHIFT;
816 	dprintk("%s max_resp_sz %u max_pages %d\n",
817 		__func__, max_resp_sz, max_pages);
818 
819 	dev = kmalloc(sizeof(*dev), GFP_NOFS);
820 	if (!dev) {
821 		dprintk("%s kmalloc failed\n", __func__);
822 		return ERR_PTR(-ENOMEM);
823 	}
824 
825 	pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
826 	if (pages == NULL) {
827 		kfree(dev);
828 		return ERR_PTR(-ENOMEM);
829 	}
830 	for (i = 0; i < max_pages; i++) {
831 		pages[i] = alloc_page(GFP_NOFS);
832 		if (!pages[i]) {
833 			rv = ERR_PTR(-ENOMEM);
834 			goto out_free;
835 		}
836 	}
837 
838 	memcpy(&dev->dev_id, d_id, sizeof(*d_id));
839 	dev->layout_type = LAYOUT_BLOCK_VOLUME;
840 	dev->pages = pages;
841 	dev->pgbase = 0;
842 	dev->pglen = PAGE_SIZE * max_pages;
843 	dev->mincount = 0;
844 
845 	dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
846 	rc = nfs4_proc_getdeviceinfo(server, dev);
847 	dprintk("%s getdevice info returns %d\n", __func__, rc);
848 	if (rc) {
849 		rv = ERR_PTR(rc);
850 		goto out_free;
851 	}
852 
853 	rv = nfs4_blk_decode_device(server, dev);
854  out_free:
855 	for (i = 0; i < max_pages; i++)
856 		__free_page(pages[i]);
857 	kfree(pages);
858 	kfree(dev);
859 	return rv;
860 }
861 
862 static int
863 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
864 {
865 	struct block_mount_id *b_mt_id = NULL;
866 	struct pnfs_devicelist *dlist = NULL;
867 	struct pnfs_block_dev *bdev;
868 	LIST_HEAD(block_disklist);
869 	int status, i;
870 
871 	dprintk("%s enter\n", __func__);
872 
873 	if (server->pnfs_blksize == 0) {
874 		dprintk("%s Server did not return blksize\n", __func__);
875 		return -EINVAL;
876 	}
877 	b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
878 	if (!b_mt_id) {
879 		status = -ENOMEM;
880 		goto out_error;
881 	}
882 	/* Initialize nfs4 block layout mount id */
883 	spin_lock_init(&b_mt_id->bm_lock);
884 	INIT_LIST_HEAD(&b_mt_id->bm_devlist);
885 
886 	dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
887 	if (!dlist) {
888 		status = -ENOMEM;
889 		goto out_error;
890 	}
891 	dlist->eof = 0;
892 	while (!dlist->eof) {
893 		status = nfs4_proc_getdevicelist(server, fh, dlist);
894 		if (status)
895 			goto out_error;
896 		dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
897 			__func__, dlist->num_devs, dlist->eof);
898 		for (i = 0; i < dlist->num_devs; i++) {
899 			bdev = nfs4_blk_get_deviceinfo(server, fh,
900 						       &dlist->dev_id[i]);
901 			if (IS_ERR(bdev)) {
902 				status = PTR_ERR(bdev);
903 				goto out_error;
904 			}
905 			spin_lock(&b_mt_id->bm_lock);
906 			list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
907 			spin_unlock(&b_mt_id->bm_lock);
908 		}
909 	}
910 	dprintk("%s SUCCESS\n", __func__);
911 	server->pnfs_ld_data = b_mt_id;
912 
913  out_return:
914 	kfree(dlist);
915 	return status;
916 
917  out_error:
918 	free_blk_mountid(b_mt_id);
919 	goto out_return;
920 }
921 
922 static int
923 bl_clear_layoutdriver(struct nfs_server *server)
924 {
925 	struct block_mount_id *b_mt_id = server->pnfs_ld_data;
926 
927 	dprintk("%s enter\n", __func__);
928 	free_blk_mountid(b_mt_id);
929 	dprintk("%s RETURNS\n", __func__);
930 	return 0;
931 }
932 
933 static const struct nfs_pageio_ops bl_pg_read_ops = {
934 	.pg_init = pnfs_generic_pg_init_read,
935 	.pg_test = pnfs_generic_pg_test,
936 	.pg_doio = pnfs_generic_pg_readpages,
937 };
938 
939 static const struct nfs_pageio_ops bl_pg_write_ops = {
940 	.pg_init = pnfs_generic_pg_init_write,
941 	.pg_test = pnfs_generic_pg_test,
942 	.pg_doio = pnfs_generic_pg_writepages,
943 };
944 
945 static struct pnfs_layoutdriver_type blocklayout_type = {
946 	.id				= LAYOUT_BLOCK_VOLUME,
947 	.name				= "LAYOUT_BLOCK_VOLUME",
948 	.read_pagelist			= bl_read_pagelist,
949 	.write_pagelist			= bl_write_pagelist,
950 	.alloc_layout_hdr		= bl_alloc_layout_hdr,
951 	.free_layout_hdr		= bl_free_layout_hdr,
952 	.alloc_lseg			= bl_alloc_lseg,
953 	.free_lseg			= bl_free_lseg,
954 	.encode_layoutcommit		= bl_encode_layoutcommit,
955 	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
956 	.set_layoutdriver		= bl_set_layoutdriver,
957 	.clear_layoutdriver		= bl_clear_layoutdriver,
958 	.pg_read_ops			= &bl_pg_read_ops,
959 	.pg_write_ops			= &bl_pg_write_ops,
960 };
961 
962 static const struct rpc_pipe_ops bl_upcall_ops = {
963 	.upcall		= rpc_pipe_generic_upcall,
964 	.downcall	= bl_pipe_downcall,
965 	.destroy_msg	= bl_pipe_destroy_msg,
966 };
967 
968 static int __init nfs4blocklayout_init(void)
969 {
970 	struct vfsmount *mnt;
971 	struct path path;
972 	int ret;
973 
974 	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
975 
976 	ret = pnfs_register_layoutdriver(&blocklayout_type);
977 	if (ret)
978 		goto out;
979 
980 	init_waitqueue_head(&bl_wq);
981 
982 	mnt = rpc_get_mount();
983 	if (IS_ERR(mnt)) {
984 		ret = PTR_ERR(mnt);
985 		goto out_remove;
986 	}
987 
988 	ret = vfs_path_lookup(mnt->mnt_root,
989 			      mnt,
990 			      NFS_PIPE_DIRNAME, 0, &path);
991 	if (ret)
992 		goto out_putrpc;
993 
994 	bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
995 				    &bl_upcall_ops, 0);
996 	path_put(&path);
997 	if (IS_ERR(bl_device_pipe)) {
998 		ret = PTR_ERR(bl_device_pipe);
999 		goto out_putrpc;
1000 	}
1001 out:
1002 	return ret;
1003 
1004 out_putrpc:
1005 	rpc_put_mount();
1006 out_remove:
1007 	pnfs_unregister_layoutdriver(&blocklayout_type);
1008 	return ret;
1009 }
1010 
1011 static void __exit nfs4blocklayout_exit(void)
1012 {
1013 	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1014 	       __func__);
1015 
1016 	pnfs_unregister_layoutdriver(&blocklayout_type);
1017 	rpc_unlink(bl_device_pipe);
1018 	rpc_put_mount();
1019 }
1020 
1021 MODULE_ALIAS("nfs-layouttype4-3");
1022 
1023 module_init(nfs4blocklayout_init);
1024 module_exit(nfs4blocklayout_exit);
1025