xref: /linux/fs/btrfs/volumes.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <asm/div64.h>
29 #include "compat.h"
30 #include "ctree.h"
31 #include "extent_map.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "print-tree.h"
35 #include "volumes.h"
36 #include "async-thread.h"
37 #include "check-integrity.h"
38 #include "rcu-string.h"
39 
40 static int init_first_rw_device(struct btrfs_trans_handle *trans,
41 				struct btrfs_root *root,
42 				struct btrfs_device *device);
43 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
46 
47 static DEFINE_MUTEX(uuid_mutex);
48 static LIST_HEAD(fs_uuids);
49 
50 static void lock_chunks(struct btrfs_root *root)
51 {
52 	mutex_lock(&root->fs_info->chunk_mutex);
53 }
54 
55 static void unlock_chunks(struct btrfs_root *root)
56 {
57 	mutex_unlock(&root->fs_info->chunk_mutex);
58 }
59 
60 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
61 {
62 	struct btrfs_device *device;
63 	WARN_ON(fs_devices->opened);
64 	while (!list_empty(&fs_devices->devices)) {
65 		device = list_entry(fs_devices->devices.next,
66 				    struct btrfs_device, dev_list);
67 		list_del(&device->dev_list);
68 		rcu_string_free(device->name);
69 		kfree(device);
70 	}
71 	kfree(fs_devices);
72 }
73 
74 void btrfs_cleanup_fs_uuids(void)
75 {
76 	struct btrfs_fs_devices *fs_devices;
77 
78 	while (!list_empty(&fs_uuids)) {
79 		fs_devices = list_entry(fs_uuids.next,
80 					struct btrfs_fs_devices, list);
81 		list_del(&fs_devices->list);
82 		free_fs_devices(fs_devices);
83 	}
84 }
85 
86 static noinline struct btrfs_device *__find_device(struct list_head *head,
87 						   u64 devid, u8 *uuid)
88 {
89 	struct btrfs_device *dev;
90 
91 	list_for_each_entry(dev, head, dev_list) {
92 		if (dev->devid == devid &&
93 		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
94 			return dev;
95 		}
96 	}
97 	return NULL;
98 }
99 
100 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
101 {
102 	struct btrfs_fs_devices *fs_devices;
103 
104 	list_for_each_entry(fs_devices, &fs_uuids, list) {
105 		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
106 			return fs_devices;
107 	}
108 	return NULL;
109 }
110 
111 static void requeue_list(struct btrfs_pending_bios *pending_bios,
112 			struct bio *head, struct bio *tail)
113 {
114 
115 	struct bio *old_head;
116 
117 	old_head = pending_bios->head;
118 	pending_bios->head = head;
119 	if (pending_bios->tail)
120 		tail->bi_next = old_head;
121 	else
122 		pending_bios->tail = tail;
123 }
124 
125 /*
126  * we try to collect pending bios for a device so we don't get a large
127  * number of procs sending bios down to the same device.  This greatly
128  * improves the schedulers ability to collect and merge the bios.
129  *
130  * But, it also turns into a long list of bios to process and that is sure
131  * to eventually make the worker thread block.  The solution here is to
132  * make some progress and then put this work struct back at the end of
133  * the list if the block device is congested.  This way, multiple devices
134  * can make progress from a single worker thread.
135  */
136 static noinline void run_scheduled_bios(struct btrfs_device *device)
137 {
138 	struct bio *pending;
139 	struct backing_dev_info *bdi;
140 	struct btrfs_fs_info *fs_info;
141 	struct btrfs_pending_bios *pending_bios;
142 	struct bio *tail;
143 	struct bio *cur;
144 	int again = 0;
145 	unsigned long num_run;
146 	unsigned long batch_run = 0;
147 	unsigned long limit;
148 	unsigned long last_waited = 0;
149 	int force_reg = 0;
150 	int sync_pending = 0;
151 	struct blk_plug plug;
152 
153 	/*
154 	 * this function runs all the bios we've collected for
155 	 * a particular device.  We don't want to wander off to
156 	 * another device without first sending all of these down.
157 	 * So, setup a plug here and finish it off before we return
158 	 */
159 	blk_start_plug(&plug);
160 
161 	bdi = blk_get_backing_dev_info(device->bdev);
162 	fs_info = device->dev_root->fs_info;
163 	limit = btrfs_async_submit_limit(fs_info);
164 	limit = limit * 2 / 3;
165 
166 loop:
167 	spin_lock(&device->io_lock);
168 
169 loop_lock:
170 	num_run = 0;
171 
172 	/* take all the bios off the list at once and process them
173 	 * later on (without the lock held).  But, remember the
174 	 * tail and other pointers so the bios can be properly reinserted
175 	 * into the list if we hit congestion
176 	 */
177 	if (!force_reg && device->pending_sync_bios.head) {
178 		pending_bios = &device->pending_sync_bios;
179 		force_reg = 1;
180 	} else {
181 		pending_bios = &device->pending_bios;
182 		force_reg = 0;
183 	}
184 
185 	pending = pending_bios->head;
186 	tail = pending_bios->tail;
187 	WARN_ON(pending && !tail);
188 
189 	/*
190 	 * if pending was null this time around, no bios need processing
191 	 * at all and we can stop.  Otherwise it'll loop back up again
192 	 * and do an additional check so no bios are missed.
193 	 *
194 	 * device->running_pending is used to synchronize with the
195 	 * schedule_bio code.
196 	 */
197 	if (device->pending_sync_bios.head == NULL &&
198 	    device->pending_bios.head == NULL) {
199 		again = 0;
200 		device->running_pending = 0;
201 	} else {
202 		again = 1;
203 		device->running_pending = 1;
204 	}
205 
206 	pending_bios->head = NULL;
207 	pending_bios->tail = NULL;
208 
209 	spin_unlock(&device->io_lock);
210 
211 	while (pending) {
212 
213 		rmb();
214 		/* we want to work on both lists, but do more bios on the
215 		 * sync list than the regular list
216 		 */
217 		if ((num_run > 32 &&
218 		    pending_bios != &device->pending_sync_bios &&
219 		    device->pending_sync_bios.head) ||
220 		   (num_run > 64 && pending_bios == &device->pending_sync_bios &&
221 		    device->pending_bios.head)) {
222 			spin_lock(&device->io_lock);
223 			requeue_list(pending_bios, pending, tail);
224 			goto loop_lock;
225 		}
226 
227 		cur = pending;
228 		pending = pending->bi_next;
229 		cur->bi_next = NULL;
230 		atomic_dec(&fs_info->nr_async_bios);
231 
232 		if (atomic_read(&fs_info->nr_async_bios) < limit &&
233 		    waitqueue_active(&fs_info->async_submit_wait))
234 			wake_up(&fs_info->async_submit_wait);
235 
236 		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
237 
238 		/*
239 		 * if we're doing the sync list, record that our
240 		 * plug has some sync requests on it
241 		 *
242 		 * If we're doing the regular list and there are
243 		 * sync requests sitting around, unplug before
244 		 * we add more
245 		 */
246 		if (pending_bios == &device->pending_sync_bios) {
247 			sync_pending = 1;
248 		} else if (sync_pending) {
249 			blk_finish_plug(&plug);
250 			blk_start_plug(&plug);
251 			sync_pending = 0;
252 		}
253 
254 		btrfsic_submit_bio(cur->bi_rw, cur);
255 		num_run++;
256 		batch_run++;
257 		if (need_resched())
258 			cond_resched();
259 
260 		/*
261 		 * we made progress, there is more work to do and the bdi
262 		 * is now congested.  Back off and let other work structs
263 		 * run instead
264 		 */
265 		if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
266 		    fs_info->fs_devices->open_devices > 1) {
267 			struct io_context *ioc;
268 
269 			ioc = current->io_context;
270 
271 			/*
272 			 * the main goal here is that we don't want to
273 			 * block if we're going to be able to submit
274 			 * more requests without blocking.
275 			 *
276 			 * This code does two great things, it pokes into
277 			 * the elevator code from a filesystem _and_
278 			 * it makes assumptions about how batching works.
279 			 */
280 			if (ioc && ioc->nr_batch_requests > 0 &&
281 			    time_before(jiffies, ioc->last_waited + HZ/50UL) &&
282 			    (last_waited == 0 ||
283 			     ioc->last_waited == last_waited)) {
284 				/*
285 				 * we want to go through our batch of
286 				 * requests and stop.  So, we copy out
287 				 * the ioc->last_waited time and test
288 				 * against it before looping
289 				 */
290 				last_waited = ioc->last_waited;
291 				if (need_resched())
292 					cond_resched();
293 				continue;
294 			}
295 			spin_lock(&device->io_lock);
296 			requeue_list(pending_bios, pending, tail);
297 			device->running_pending = 1;
298 
299 			spin_unlock(&device->io_lock);
300 			btrfs_requeue_work(&device->work);
301 			goto done;
302 		}
303 		/* unplug every 64 requests just for good measure */
304 		if (batch_run % 64 == 0) {
305 			blk_finish_plug(&plug);
306 			blk_start_plug(&plug);
307 			sync_pending = 0;
308 		}
309 	}
310 
311 	cond_resched();
312 	if (again)
313 		goto loop;
314 
315 	spin_lock(&device->io_lock);
316 	if (device->pending_bios.head || device->pending_sync_bios.head)
317 		goto loop_lock;
318 	spin_unlock(&device->io_lock);
319 
320 done:
321 	blk_finish_plug(&plug);
322 }
323 
324 static void pending_bios_fn(struct btrfs_work *work)
325 {
326 	struct btrfs_device *device;
327 
328 	device = container_of(work, struct btrfs_device, work);
329 	run_scheduled_bios(device);
330 }
331 
332 static noinline int device_list_add(const char *path,
333 			   struct btrfs_super_block *disk_super,
334 			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
335 {
336 	struct btrfs_device *device;
337 	struct btrfs_fs_devices *fs_devices;
338 	struct rcu_string *name;
339 	u64 found_transid = btrfs_super_generation(disk_super);
340 
341 	fs_devices = find_fsid(disk_super->fsid);
342 	if (!fs_devices) {
343 		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
344 		if (!fs_devices)
345 			return -ENOMEM;
346 		INIT_LIST_HEAD(&fs_devices->devices);
347 		INIT_LIST_HEAD(&fs_devices->alloc_list);
348 		list_add(&fs_devices->list, &fs_uuids);
349 		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
350 		fs_devices->latest_devid = devid;
351 		fs_devices->latest_trans = found_transid;
352 		mutex_init(&fs_devices->device_list_mutex);
353 		device = NULL;
354 	} else {
355 		device = __find_device(&fs_devices->devices, devid,
356 				       disk_super->dev_item.uuid);
357 	}
358 	if (!device) {
359 		if (fs_devices->opened)
360 			return -EBUSY;
361 
362 		device = kzalloc(sizeof(*device), GFP_NOFS);
363 		if (!device) {
364 			/* we can safely leave the fs_devices entry around */
365 			return -ENOMEM;
366 		}
367 		device->devid = devid;
368 		device->dev_stats_valid = 0;
369 		device->work.func = pending_bios_fn;
370 		memcpy(device->uuid, disk_super->dev_item.uuid,
371 		       BTRFS_UUID_SIZE);
372 		spin_lock_init(&device->io_lock);
373 
374 		name = rcu_string_strdup(path, GFP_NOFS);
375 		if (!name) {
376 			kfree(device);
377 			return -ENOMEM;
378 		}
379 		rcu_assign_pointer(device->name, name);
380 		INIT_LIST_HEAD(&device->dev_alloc_list);
381 
382 		/* init readahead state */
383 		spin_lock_init(&device->reada_lock);
384 		device->reada_curr_zone = NULL;
385 		atomic_set(&device->reada_in_flight, 0);
386 		device->reada_next = 0;
387 		INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
388 		INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
389 
390 		mutex_lock(&fs_devices->device_list_mutex);
391 		list_add_rcu(&device->dev_list, &fs_devices->devices);
392 		mutex_unlock(&fs_devices->device_list_mutex);
393 
394 		device->fs_devices = fs_devices;
395 		fs_devices->num_devices++;
396 	} else if (!device->name || strcmp(device->name->str, path)) {
397 		name = rcu_string_strdup(path, GFP_NOFS);
398 		if (!name)
399 			return -ENOMEM;
400 		rcu_string_free(device->name);
401 		rcu_assign_pointer(device->name, name);
402 		if (device->missing) {
403 			fs_devices->missing_devices--;
404 			device->missing = 0;
405 		}
406 	}
407 
408 	if (found_transid > fs_devices->latest_trans) {
409 		fs_devices->latest_devid = devid;
410 		fs_devices->latest_trans = found_transid;
411 	}
412 	*fs_devices_ret = fs_devices;
413 	return 0;
414 }
415 
416 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
417 {
418 	struct btrfs_fs_devices *fs_devices;
419 	struct btrfs_device *device;
420 	struct btrfs_device *orig_dev;
421 
422 	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
423 	if (!fs_devices)
424 		return ERR_PTR(-ENOMEM);
425 
426 	INIT_LIST_HEAD(&fs_devices->devices);
427 	INIT_LIST_HEAD(&fs_devices->alloc_list);
428 	INIT_LIST_HEAD(&fs_devices->list);
429 	mutex_init(&fs_devices->device_list_mutex);
430 	fs_devices->latest_devid = orig->latest_devid;
431 	fs_devices->latest_trans = orig->latest_trans;
432 	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
433 
434 	/* We have held the volume lock, it is safe to get the devices. */
435 	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 		struct rcu_string *name;
437 
438 		device = kzalloc(sizeof(*device), GFP_NOFS);
439 		if (!device)
440 			goto error;
441 
442 		/*
443 		 * This is ok to do without rcu read locked because we hold the
444 		 * uuid mutex so nothing we touch in here is going to disappear.
445 		 */
446 		name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
447 		if (!name) {
448 			kfree(device);
449 			goto error;
450 		}
451 		rcu_assign_pointer(device->name, name);
452 
453 		device->devid = orig_dev->devid;
454 		device->work.func = pending_bios_fn;
455 		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
456 		spin_lock_init(&device->io_lock);
457 		INIT_LIST_HEAD(&device->dev_list);
458 		INIT_LIST_HEAD(&device->dev_alloc_list);
459 
460 		list_add(&device->dev_list, &fs_devices->devices);
461 		device->fs_devices = fs_devices;
462 		fs_devices->num_devices++;
463 	}
464 	return fs_devices;
465 error:
466 	free_fs_devices(fs_devices);
467 	return ERR_PTR(-ENOMEM);
468 }
469 
470 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
471 {
472 	struct btrfs_device *device, *next;
473 
474 	struct block_device *latest_bdev = NULL;
475 	u64 latest_devid = 0;
476 	u64 latest_transid = 0;
477 
478 	mutex_lock(&uuid_mutex);
479 again:
480 	/* This is the initialized path, it is safe to release the devices. */
481 	list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
482 		if (device->in_fs_metadata) {
483 			if (!latest_transid ||
484 			    device->generation > latest_transid) {
485 				latest_devid = device->devid;
486 				latest_transid = device->generation;
487 				latest_bdev = device->bdev;
488 			}
489 			continue;
490 		}
491 
492 		if (device->bdev) {
493 			blkdev_put(device->bdev, device->mode);
494 			device->bdev = NULL;
495 			fs_devices->open_devices--;
496 		}
497 		if (device->writeable) {
498 			list_del_init(&device->dev_alloc_list);
499 			device->writeable = 0;
500 			fs_devices->rw_devices--;
501 		}
502 		list_del_init(&device->dev_list);
503 		fs_devices->num_devices--;
504 		rcu_string_free(device->name);
505 		kfree(device);
506 	}
507 
508 	if (fs_devices->seed) {
509 		fs_devices = fs_devices->seed;
510 		goto again;
511 	}
512 
513 	fs_devices->latest_bdev = latest_bdev;
514 	fs_devices->latest_devid = latest_devid;
515 	fs_devices->latest_trans = latest_transid;
516 
517 	mutex_unlock(&uuid_mutex);
518 }
519 
520 static void __free_device(struct work_struct *work)
521 {
522 	struct btrfs_device *device;
523 
524 	device = container_of(work, struct btrfs_device, rcu_work);
525 
526 	if (device->bdev)
527 		blkdev_put(device->bdev, device->mode);
528 
529 	rcu_string_free(device->name);
530 	kfree(device);
531 }
532 
533 static void free_device(struct rcu_head *head)
534 {
535 	struct btrfs_device *device;
536 
537 	device = container_of(head, struct btrfs_device, rcu);
538 
539 	INIT_WORK(&device->rcu_work, __free_device);
540 	schedule_work(&device->rcu_work);
541 }
542 
543 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
544 {
545 	struct btrfs_device *device;
546 
547 	if (--fs_devices->opened > 0)
548 		return 0;
549 
550 	mutex_lock(&fs_devices->device_list_mutex);
551 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
552 		struct btrfs_device *new_device;
553 		struct rcu_string *name;
554 
555 		if (device->bdev)
556 			fs_devices->open_devices--;
557 
558 		if (device->writeable) {
559 			list_del_init(&device->dev_alloc_list);
560 			fs_devices->rw_devices--;
561 		}
562 
563 		if (device->can_discard)
564 			fs_devices->num_can_discard--;
565 
566 		new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
567 		BUG_ON(!new_device); /* -ENOMEM */
568 		memcpy(new_device, device, sizeof(*new_device));
569 
570 		/* Safe because we are under uuid_mutex */
571 		name = rcu_string_strdup(device->name->str, GFP_NOFS);
572 		BUG_ON(device->name && !name); /* -ENOMEM */
573 		rcu_assign_pointer(new_device->name, name);
574 		new_device->bdev = NULL;
575 		new_device->writeable = 0;
576 		new_device->in_fs_metadata = 0;
577 		new_device->can_discard = 0;
578 		list_replace_rcu(&device->dev_list, &new_device->dev_list);
579 
580 		call_rcu(&device->rcu, free_device);
581 	}
582 	mutex_unlock(&fs_devices->device_list_mutex);
583 
584 	WARN_ON(fs_devices->open_devices);
585 	WARN_ON(fs_devices->rw_devices);
586 	fs_devices->opened = 0;
587 	fs_devices->seeding = 0;
588 
589 	return 0;
590 }
591 
592 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
593 {
594 	struct btrfs_fs_devices *seed_devices = NULL;
595 	int ret;
596 
597 	mutex_lock(&uuid_mutex);
598 	ret = __btrfs_close_devices(fs_devices);
599 	if (!fs_devices->opened) {
600 		seed_devices = fs_devices->seed;
601 		fs_devices->seed = NULL;
602 	}
603 	mutex_unlock(&uuid_mutex);
604 
605 	while (seed_devices) {
606 		fs_devices = seed_devices;
607 		seed_devices = fs_devices->seed;
608 		__btrfs_close_devices(fs_devices);
609 		free_fs_devices(fs_devices);
610 	}
611 	return ret;
612 }
613 
614 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
615 				fmode_t flags, void *holder)
616 {
617 	struct request_queue *q;
618 	struct block_device *bdev;
619 	struct list_head *head = &fs_devices->devices;
620 	struct btrfs_device *device;
621 	struct block_device *latest_bdev = NULL;
622 	struct buffer_head *bh;
623 	struct btrfs_super_block *disk_super;
624 	u64 latest_devid = 0;
625 	u64 latest_transid = 0;
626 	u64 devid;
627 	int seeding = 1;
628 	int ret = 0;
629 
630 	flags |= FMODE_EXCL;
631 
632 	list_for_each_entry(device, head, dev_list) {
633 		if (device->bdev)
634 			continue;
635 		if (!device->name)
636 			continue;
637 
638 		bdev = blkdev_get_by_path(device->name->str, flags, holder);
639 		if (IS_ERR(bdev)) {
640 			printk(KERN_INFO "open %s failed\n", device->name->str);
641 			goto error;
642 		}
643 		filemap_write_and_wait(bdev->bd_inode->i_mapping);
644 		invalidate_bdev(bdev);
645 		set_blocksize(bdev, 4096);
646 
647 		bh = btrfs_read_dev_super(bdev);
648 		if (!bh)
649 			goto error_close;
650 
651 		disk_super = (struct btrfs_super_block *)bh->b_data;
652 		devid = btrfs_stack_device_id(&disk_super->dev_item);
653 		if (devid != device->devid)
654 			goto error_brelse;
655 
656 		if (memcmp(device->uuid, disk_super->dev_item.uuid,
657 			   BTRFS_UUID_SIZE))
658 			goto error_brelse;
659 
660 		device->generation = btrfs_super_generation(disk_super);
661 		if (!latest_transid || device->generation > latest_transid) {
662 			latest_devid = devid;
663 			latest_transid = device->generation;
664 			latest_bdev = bdev;
665 		}
666 
667 		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
668 			device->writeable = 0;
669 		} else {
670 			device->writeable = !bdev_read_only(bdev);
671 			seeding = 0;
672 		}
673 
674 		q = bdev_get_queue(bdev);
675 		if (blk_queue_discard(q)) {
676 			device->can_discard = 1;
677 			fs_devices->num_can_discard++;
678 		}
679 
680 		device->bdev = bdev;
681 		device->in_fs_metadata = 0;
682 		device->mode = flags;
683 
684 		if (!blk_queue_nonrot(bdev_get_queue(bdev)))
685 			fs_devices->rotating = 1;
686 
687 		fs_devices->open_devices++;
688 		if (device->writeable) {
689 			fs_devices->rw_devices++;
690 			list_add(&device->dev_alloc_list,
691 				 &fs_devices->alloc_list);
692 		}
693 		brelse(bh);
694 		continue;
695 
696 error_brelse:
697 		brelse(bh);
698 error_close:
699 		blkdev_put(bdev, flags);
700 error:
701 		continue;
702 	}
703 	if (fs_devices->open_devices == 0) {
704 		ret = -EINVAL;
705 		goto out;
706 	}
707 	fs_devices->seeding = seeding;
708 	fs_devices->opened = 1;
709 	fs_devices->latest_bdev = latest_bdev;
710 	fs_devices->latest_devid = latest_devid;
711 	fs_devices->latest_trans = latest_transid;
712 	fs_devices->total_rw_bytes = 0;
713 out:
714 	return ret;
715 }
716 
717 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
718 		       fmode_t flags, void *holder)
719 {
720 	int ret;
721 
722 	mutex_lock(&uuid_mutex);
723 	if (fs_devices->opened) {
724 		fs_devices->opened++;
725 		ret = 0;
726 	} else {
727 		ret = __btrfs_open_devices(fs_devices, flags, holder);
728 	}
729 	mutex_unlock(&uuid_mutex);
730 	return ret;
731 }
732 
733 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
734 			  struct btrfs_fs_devices **fs_devices_ret)
735 {
736 	struct btrfs_super_block *disk_super;
737 	struct block_device *bdev;
738 	struct buffer_head *bh;
739 	int ret;
740 	u64 devid;
741 	u64 transid;
742 
743 	flags |= FMODE_EXCL;
744 	bdev = blkdev_get_by_path(path, flags, holder);
745 
746 	if (IS_ERR(bdev)) {
747 		ret = PTR_ERR(bdev);
748 		goto error;
749 	}
750 
751 	mutex_lock(&uuid_mutex);
752 	ret = set_blocksize(bdev, 4096);
753 	if (ret)
754 		goto error_close;
755 	bh = btrfs_read_dev_super(bdev);
756 	if (!bh) {
757 		ret = -EINVAL;
758 		goto error_close;
759 	}
760 	disk_super = (struct btrfs_super_block *)bh->b_data;
761 	devid = btrfs_stack_device_id(&disk_super->dev_item);
762 	transid = btrfs_super_generation(disk_super);
763 	if (disk_super->label[0])
764 		printk(KERN_INFO "device label %s ", disk_super->label);
765 	else
766 		printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
767 	printk(KERN_CONT "devid %llu transid %llu %s\n",
768 	       (unsigned long long)devid, (unsigned long long)transid, path);
769 	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
770 
771 	brelse(bh);
772 error_close:
773 	mutex_unlock(&uuid_mutex);
774 	blkdev_put(bdev, flags);
775 error:
776 	return ret;
777 }
778 
779 /* helper to account the used device space in the range */
780 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
781 				   u64 end, u64 *length)
782 {
783 	struct btrfs_key key;
784 	struct btrfs_root *root = device->dev_root;
785 	struct btrfs_dev_extent *dev_extent;
786 	struct btrfs_path *path;
787 	u64 extent_end;
788 	int ret;
789 	int slot;
790 	struct extent_buffer *l;
791 
792 	*length = 0;
793 
794 	if (start >= device->total_bytes)
795 		return 0;
796 
797 	path = btrfs_alloc_path();
798 	if (!path)
799 		return -ENOMEM;
800 	path->reada = 2;
801 
802 	key.objectid = device->devid;
803 	key.offset = start;
804 	key.type = BTRFS_DEV_EXTENT_KEY;
805 
806 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
807 	if (ret < 0)
808 		goto out;
809 	if (ret > 0) {
810 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
811 		if (ret < 0)
812 			goto out;
813 	}
814 
815 	while (1) {
816 		l = path->nodes[0];
817 		slot = path->slots[0];
818 		if (slot >= btrfs_header_nritems(l)) {
819 			ret = btrfs_next_leaf(root, path);
820 			if (ret == 0)
821 				continue;
822 			if (ret < 0)
823 				goto out;
824 
825 			break;
826 		}
827 		btrfs_item_key_to_cpu(l, &key, slot);
828 
829 		if (key.objectid < device->devid)
830 			goto next;
831 
832 		if (key.objectid > device->devid)
833 			break;
834 
835 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
836 			goto next;
837 
838 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
839 		extent_end = key.offset + btrfs_dev_extent_length(l,
840 								  dev_extent);
841 		if (key.offset <= start && extent_end > end) {
842 			*length = end - start + 1;
843 			break;
844 		} else if (key.offset <= start && extent_end > start)
845 			*length += extent_end - start;
846 		else if (key.offset > start && extent_end <= end)
847 			*length += extent_end - key.offset;
848 		else if (key.offset > start && key.offset <= end) {
849 			*length += end - key.offset + 1;
850 			break;
851 		} else if (key.offset > end)
852 			break;
853 
854 next:
855 		path->slots[0]++;
856 	}
857 	ret = 0;
858 out:
859 	btrfs_free_path(path);
860 	return ret;
861 }
862 
863 /*
864  * find_free_dev_extent - find free space in the specified device
865  * @device:	the device which we search the free space in
866  * @num_bytes:	the size of the free space that we need
867  * @start:	store the start of the free space.
868  * @len:	the size of the free space. that we find, or the size of the max
869  * 		free space if we don't find suitable free space
870  *
871  * this uses a pretty simple search, the expectation is that it is
872  * called very infrequently and that a given device has a small number
873  * of extents
874  *
875  * @start is used to store the start of the free space if we find. But if we
876  * don't find suitable free space, it will be used to store the start position
877  * of the max free space.
878  *
879  * @len is used to store the size of the free space that we find.
880  * But if we don't find suitable free space, it is used to store the size of
881  * the max free space.
882  */
883 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
884 			 u64 *start, u64 *len)
885 {
886 	struct btrfs_key key;
887 	struct btrfs_root *root = device->dev_root;
888 	struct btrfs_dev_extent *dev_extent;
889 	struct btrfs_path *path;
890 	u64 hole_size;
891 	u64 max_hole_start;
892 	u64 max_hole_size;
893 	u64 extent_end;
894 	u64 search_start;
895 	u64 search_end = device->total_bytes;
896 	int ret;
897 	int slot;
898 	struct extent_buffer *l;
899 
900 	/* FIXME use last free of some kind */
901 
902 	/* we don't want to overwrite the superblock on the drive,
903 	 * so we make sure to start at an offset of at least 1MB
904 	 */
905 	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
906 
907 	max_hole_start = search_start;
908 	max_hole_size = 0;
909 	hole_size = 0;
910 
911 	if (search_start >= search_end) {
912 		ret = -ENOSPC;
913 		goto error;
914 	}
915 
916 	path = btrfs_alloc_path();
917 	if (!path) {
918 		ret = -ENOMEM;
919 		goto error;
920 	}
921 	path->reada = 2;
922 
923 	key.objectid = device->devid;
924 	key.offset = search_start;
925 	key.type = BTRFS_DEV_EXTENT_KEY;
926 
927 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
928 	if (ret < 0)
929 		goto out;
930 	if (ret > 0) {
931 		ret = btrfs_previous_item(root, path, key.objectid, key.type);
932 		if (ret < 0)
933 			goto out;
934 	}
935 
936 	while (1) {
937 		l = path->nodes[0];
938 		slot = path->slots[0];
939 		if (slot >= btrfs_header_nritems(l)) {
940 			ret = btrfs_next_leaf(root, path);
941 			if (ret == 0)
942 				continue;
943 			if (ret < 0)
944 				goto out;
945 
946 			break;
947 		}
948 		btrfs_item_key_to_cpu(l, &key, slot);
949 
950 		if (key.objectid < device->devid)
951 			goto next;
952 
953 		if (key.objectid > device->devid)
954 			break;
955 
956 		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
957 			goto next;
958 
959 		if (key.offset > search_start) {
960 			hole_size = key.offset - search_start;
961 
962 			if (hole_size > max_hole_size) {
963 				max_hole_start = search_start;
964 				max_hole_size = hole_size;
965 			}
966 
967 			/*
968 			 * If this free space is greater than which we need,
969 			 * it must be the max free space that we have found
970 			 * until now, so max_hole_start must point to the start
971 			 * of this free space and the length of this free space
972 			 * is stored in max_hole_size. Thus, we return
973 			 * max_hole_start and max_hole_size and go back to the
974 			 * caller.
975 			 */
976 			if (hole_size >= num_bytes) {
977 				ret = 0;
978 				goto out;
979 			}
980 		}
981 
982 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
983 		extent_end = key.offset + btrfs_dev_extent_length(l,
984 								  dev_extent);
985 		if (extent_end > search_start)
986 			search_start = extent_end;
987 next:
988 		path->slots[0]++;
989 		cond_resched();
990 	}
991 
992 	/*
993 	 * At this point, search_start should be the end of
994 	 * allocated dev extents, and when shrinking the device,
995 	 * search_end may be smaller than search_start.
996 	 */
997 	if (search_end > search_start)
998 		hole_size = search_end - search_start;
999 
1000 	if (hole_size > max_hole_size) {
1001 		max_hole_start = search_start;
1002 		max_hole_size = hole_size;
1003 	}
1004 
1005 	/* See above. */
1006 	if (hole_size < num_bytes)
1007 		ret = -ENOSPC;
1008 	else
1009 		ret = 0;
1010 
1011 out:
1012 	btrfs_free_path(path);
1013 error:
1014 	*start = max_hole_start;
1015 	if (len)
1016 		*len = max_hole_size;
1017 	return ret;
1018 }
1019 
1020 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1021 			  struct btrfs_device *device,
1022 			  u64 start)
1023 {
1024 	int ret;
1025 	struct btrfs_path *path;
1026 	struct btrfs_root *root = device->dev_root;
1027 	struct btrfs_key key;
1028 	struct btrfs_key found_key;
1029 	struct extent_buffer *leaf = NULL;
1030 	struct btrfs_dev_extent *extent = NULL;
1031 
1032 	path = btrfs_alloc_path();
1033 	if (!path)
1034 		return -ENOMEM;
1035 
1036 	key.objectid = device->devid;
1037 	key.offset = start;
1038 	key.type = BTRFS_DEV_EXTENT_KEY;
1039 again:
1040 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1041 	if (ret > 0) {
1042 		ret = btrfs_previous_item(root, path, key.objectid,
1043 					  BTRFS_DEV_EXTENT_KEY);
1044 		if (ret)
1045 			goto out;
1046 		leaf = path->nodes[0];
1047 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1048 		extent = btrfs_item_ptr(leaf, path->slots[0],
1049 					struct btrfs_dev_extent);
1050 		BUG_ON(found_key.offset > start || found_key.offset +
1051 		       btrfs_dev_extent_length(leaf, extent) < start);
1052 		key = found_key;
1053 		btrfs_release_path(path);
1054 		goto again;
1055 	} else if (ret == 0) {
1056 		leaf = path->nodes[0];
1057 		extent = btrfs_item_ptr(leaf, path->slots[0],
1058 					struct btrfs_dev_extent);
1059 	} else {
1060 		btrfs_error(root->fs_info, ret, "Slot search failed");
1061 		goto out;
1062 	}
1063 
1064 	if (device->bytes_used > 0) {
1065 		u64 len = btrfs_dev_extent_length(leaf, extent);
1066 		device->bytes_used -= len;
1067 		spin_lock(&root->fs_info->free_chunk_lock);
1068 		root->fs_info->free_chunk_space += len;
1069 		spin_unlock(&root->fs_info->free_chunk_lock);
1070 	}
1071 	ret = btrfs_del_item(trans, root, path);
1072 	if (ret) {
1073 		btrfs_error(root->fs_info, ret,
1074 			    "Failed to remove dev extent item");
1075 	}
1076 out:
1077 	btrfs_free_path(path);
1078 	return ret;
1079 }
1080 
1081 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1082 			   struct btrfs_device *device,
1083 			   u64 chunk_tree, u64 chunk_objectid,
1084 			   u64 chunk_offset, u64 start, u64 num_bytes)
1085 {
1086 	int ret;
1087 	struct btrfs_path *path;
1088 	struct btrfs_root *root = device->dev_root;
1089 	struct btrfs_dev_extent *extent;
1090 	struct extent_buffer *leaf;
1091 	struct btrfs_key key;
1092 
1093 	WARN_ON(!device->in_fs_metadata);
1094 	path = btrfs_alloc_path();
1095 	if (!path)
1096 		return -ENOMEM;
1097 
1098 	key.objectid = device->devid;
1099 	key.offset = start;
1100 	key.type = BTRFS_DEV_EXTENT_KEY;
1101 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1102 				      sizeof(*extent));
1103 	if (ret)
1104 		goto out;
1105 
1106 	leaf = path->nodes[0];
1107 	extent = btrfs_item_ptr(leaf, path->slots[0],
1108 				struct btrfs_dev_extent);
1109 	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1110 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1111 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1112 
1113 	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1114 		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1115 		    BTRFS_UUID_SIZE);
1116 
1117 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1118 	btrfs_mark_buffer_dirty(leaf);
1119 out:
1120 	btrfs_free_path(path);
1121 	return ret;
1122 }
1123 
1124 static noinline int find_next_chunk(struct btrfs_root *root,
1125 				    u64 objectid, u64 *offset)
1126 {
1127 	struct btrfs_path *path;
1128 	int ret;
1129 	struct btrfs_key key;
1130 	struct btrfs_chunk *chunk;
1131 	struct btrfs_key found_key;
1132 
1133 	path = btrfs_alloc_path();
1134 	if (!path)
1135 		return -ENOMEM;
1136 
1137 	key.objectid = objectid;
1138 	key.offset = (u64)-1;
1139 	key.type = BTRFS_CHUNK_ITEM_KEY;
1140 
1141 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1142 	if (ret < 0)
1143 		goto error;
1144 
1145 	BUG_ON(ret == 0); /* Corruption */
1146 
1147 	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1148 	if (ret) {
1149 		*offset = 0;
1150 	} else {
1151 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1152 				      path->slots[0]);
1153 		if (found_key.objectid != objectid)
1154 			*offset = 0;
1155 		else {
1156 			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1157 					       struct btrfs_chunk);
1158 			*offset = found_key.offset +
1159 				btrfs_chunk_length(path->nodes[0], chunk);
1160 		}
1161 	}
1162 	ret = 0;
1163 error:
1164 	btrfs_free_path(path);
1165 	return ret;
1166 }
1167 
1168 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1169 {
1170 	int ret;
1171 	struct btrfs_key key;
1172 	struct btrfs_key found_key;
1173 	struct btrfs_path *path;
1174 
1175 	root = root->fs_info->chunk_root;
1176 
1177 	path = btrfs_alloc_path();
1178 	if (!path)
1179 		return -ENOMEM;
1180 
1181 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1182 	key.type = BTRFS_DEV_ITEM_KEY;
1183 	key.offset = (u64)-1;
1184 
1185 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1186 	if (ret < 0)
1187 		goto error;
1188 
1189 	BUG_ON(ret == 0); /* Corruption */
1190 
1191 	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1192 				  BTRFS_DEV_ITEM_KEY);
1193 	if (ret) {
1194 		*objectid = 1;
1195 	} else {
1196 		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1197 				      path->slots[0]);
1198 		*objectid = found_key.offset + 1;
1199 	}
1200 	ret = 0;
1201 error:
1202 	btrfs_free_path(path);
1203 	return ret;
1204 }
1205 
1206 /*
1207  * the device information is stored in the chunk root
1208  * the btrfs_device struct should be fully filled in
1209  */
1210 int btrfs_add_device(struct btrfs_trans_handle *trans,
1211 		     struct btrfs_root *root,
1212 		     struct btrfs_device *device)
1213 {
1214 	int ret;
1215 	struct btrfs_path *path;
1216 	struct btrfs_dev_item *dev_item;
1217 	struct extent_buffer *leaf;
1218 	struct btrfs_key key;
1219 	unsigned long ptr;
1220 
1221 	root = root->fs_info->chunk_root;
1222 
1223 	path = btrfs_alloc_path();
1224 	if (!path)
1225 		return -ENOMEM;
1226 
1227 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1228 	key.type = BTRFS_DEV_ITEM_KEY;
1229 	key.offset = device->devid;
1230 
1231 	ret = btrfs_insert_empty_item(trans, root, path, &key,
1232 				      sizeof(*dev_item));
1233 	if (ret)
1234 		goto out;
1235 
1236 	leaf = path->nodes[0];
1237 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1238 
1239 	btrfs_set_device_id(leaf, dev_item, device->devid);
1240 	btrfs_set_device_generation(leaf, dev_item, 0);
1241 	btrfs_set_device_type(leaf, dev_item, device->type);
1242 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1243 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1244 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1245 	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1246 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1247 	btrfs_set_device_group(leaf, dev_item, 0);
1248 	btrfs_set_device_seek_speed(leaf, dev_item, 0);
1249 	btrfs_set_device_bandwidth(leaf, dev_item, 0);
1250 	btrfs_set_device_start_offset(leaf, dev_item, 0);
1251 
1252 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
1253 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1254 	ptr = (unsigned long)btrfs_device_fsid(dev_item);
1255 	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1256 	btrfs_mark_buffer_dirty(leaf);
1257 
1258 	ret = 0;
1259 out:
1260 	btrfs_free_path(path);
1261 	return ret;
1262 }
1263 
1264 static int btrfs_rm_dev_item(struct btrfs_root *root,
1265 			     struct btrfs_device *device)
1266 {
1267 	int ret;
1268 	struct btrfs_path *path;
1269 	struct btrfs_key key;
1270 	struct btrfs_trans_handle *trans;
1271 
1272 	root = root->fs_info->chunk_root;
1273 
1274 	path = btrfs_alloc_path();
1275 	if (!path)
1276 		return -ENOMEM;
1277 
1278 	trans = btrfs_start_transaction(root, 0);
1279 	if (IS_ERR(trans)) {
1280 		btrfs_free_path(path);
1281 		return PTR_ERR(trans);
1282 	}
1283 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1284 	key.type = BTRFS_DEV_ITEM_KEY;
1285 	key.offset = device->devid;
1286 	lock_chunks(root);
1287 
1288 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1289 	if (ret < 0)
1290 		goto out;
1291 
1292 	if (ret > 0) {
1293 		ret = -ENOENT;
1294 		goto out;
1295 	}
1296 
1297 	ret = btrfs_del_item(trans, root, path);
1298 	if (ret)
1299 		goto out;
1300 out:
1301 	btrfs_free_path(path);
1302 	unlock_chunks(root);
1303 	btrfs_commit_transaction(trans, root);
1304 	return ret;
1305 }
1306 
1307 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1308 {
1309 	struct btrfs_device *device;
1310 	struct btrfs_device *next_device;
1311 	struct block_device *bdev;
1312 	struct buffer_head *bh = NULL;
1313 	struct btrfs_super_block *disk_super;
1314 	struct btrfs_fs_devices *cur_devices;
1315 	u64 all_avail;
1316 	u64 devid;
1317 	u64 num_devices;
1318 	u8 *dev_uuid;
1319 	int ret = 0;
1320 	bool clear_super = false;
1321 
1322 	mutex_lock(&uuid_mutex);
1323 
1324 	all_avail = root->fs_info->avail_data_alloc_bits |
1325 		root->fs_info->avail_system_alloc_bits |
1326 		root->fs_info->avail_metadata_alloc_bits;
1327 
1328 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1329 	    root->fs_info->fs_devices->num_devices <= 4) {
1330 		printk(KERN_ERR "btrfs: unable to go below four devices "
1331 		       "on raid10\n");
1332 		ret = -EINVAL;
1333 		goto out;
1334 	}
1335 
1336 	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1337 	    root->fs_info->fs_devices->num_devices <= 2) {
1338 		printk(KERN_ERR "btrfs: unable to go below two "
1339 		       "devices on raid1\n");
1340 		ret = -EINVAL;
1341 		goto out;
1342 	}
1343 
1344 	if (strcmp(device_path, "missing") == 0) {
1345 		struct list_head *devices;
1346 		struct btrfs_device *tmp;
1347 
1348 		device = NULL;
1349 		devices = &root->fs_info->fs_devices->devices;
1350 		/*
1351 		 * It is safe to read the devices since the volume_mutex
1352 		 * is held.
1353 		 */
1354 		list_for_each_entry(tmp, devices, dev_list) {
1355 			if (tmp->in_fs_metadata && !tmp->bdev) {
1356 				device = tmp;
1357 				break;
1358 			}
1359 		}
1360 		bdev = NULL;
1361 		bh = NULL;
1362 		disk_super = NULL;
1363 		if (!device) {
1364 			printk(KERN_ERR "btrfs: no missing devices found to "
1365 			       "remove\n");
1366 			goto out;
1367 		}
1368 	} else {
1369 		bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1370 					  root->fs_info->bdev_holder);
1371 		if (IS_ERR(bdev)) {
1372 			ret = PTR_ERR(bdev);
1373 			goto out;
1374 		}
1375 
1376 		set_blocksize(bdev, 4096);
1377 		invalidate_bdev(bdev);
1378 		bh = btrfs_read_dev_super(bdev);
1379 		if (!bh) {
1380 			ret = -EINVAL;
1381 			goto error_close;
1382 		}
1383 		disk_super = (struct btrfs_super_block *)bh->b_data;
1384 		devid = btrfs_stack_device_id(&disk_super->dev_item);
1385 		dev_uuid = disk_super->dev_item.uuid;
1386 		device = btrfs_find_device(root, devid, dev_uuid,
1387 					   disk_super->fsid);
1388 		if (!device) {
1389 			ret = -ENOENT;
1390 			goto error_brelse;
1391 		}
1392 	}
1393 
1394 	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1395 		printk(KERN_ERR "btrfs: unable to remove the only writeable "
1396 		       "device\n");
1397 		ret = -EINVAL;
1398 		goto error_brelse;
1399 	}
1400 
1401 	if (device->writeable) {
1402 		lock_chunks(root);
1403 		list_del_init(&device->dev_alloc_list);
1404 		unlock_chunks(root);
1405 		root->fs_info->fs_devices->rw_devices--;
1406 		clear_super = true;
1407 	}
1408 
1409 	ret = btrfs_shrink_device(device, 0);
1410 	if (ret)
1411 		goto error_undo;
1412 
1413 	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1414 	if (ret)
1415 		goto error_undo;
1416 
1417 	spin_lock(&root->fs_info->free_chunk_lock);
1418 	root->fs_info->free_chunk_space = device->total_bytes -
1419 		device->bytes_used;
1420 	spin_unlock(&root->fs_info->free_chunk_lock);
1421 
1422 	device->in_fs_metadata = 0;
1423 	btrfs_scrub_cancel_dev(root, device);
1424 
1425 	/*
1426 	 * the device list mutex makes sure that we don't change
1427 	 * the device list while someone else is writing out all
1428 	 * the device supers.
1429 	 */
1430 
1431 	cur_devices = device->fs_devices;
1432 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1433 	list_del_rcu(&device->dev_list);
1434 
1435 	device->fs_devices->num_devices--;
1436 
1437 	if (device->missing)
1438 		root->fs_info->fs_devices->missing_devices--;
1439 
1440 	next_device = list_entry(root->fs_info->fs_devices->devices.next,
1441 				 struct btrfs_device, dev_list);
1442 	if (device->bdev == root->fs_info->sb->s_bdev)
1443 		root->fs_info->sb->s_bdev = next_device->bdev;
1444 	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1445 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1446 
1447 	if (device->bdev)
1448 		device->fs_devices->open_devices--;
1449 
1450 	call_rcu(&device->rcu, free_device);
1451 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1452 
1453 	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1454 	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1455 
1456 	if (cur_devices->open_devices == 0) {
1457 		struct btrfs_fs_devices *fs_devices;
1458 		fs_devices = root->fs_info->fs_devices;
1459 		while (fs_devices) {
1460 			if (fs_devices->seed == cur_devices)
1461 				break;
1462 			fs_devices = fs_devices->seed;
1463 		}
1464 		fs_devices->seed = cur_devices->seed;
1465 		cur_devices->seed = NULL;
1466 		lock_chunks(root);
1467 		__btrfs_close_devices(cur_devices);
1468 		unlock_chunks(root);
1469 		free_fs_devices(cur_devices);
1470 	}
1471 
1472 	/*
1473 	 * at this point, the device is zero sized.  We want to
1474 	 * remove it from the devices list and zero out the old super
1475 	 */
1476 	if (clear_super) {
1477 		/* make sure this device isn't detected as part of
1478 		 * the FS anymore
1479 		 */
1480 		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1481 		set_buffer_dirty(bh);
1482 		sync_dirty_buffer(bh);
1483 	}
1484 
1485 	ret = 0;
1486 
1487 error_brelse:
1488 	brelse(bh);
1489 error_close:
1490 	if (bdev)
1491 		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1492 out:
1493 	mutex_unlock(&uuid_mutex);
1494 	return ret;
1495 error_undo:
1496 	if (device->writeable) {
1497 		lock_chunks(root);
1498 		list_add(&device->dev_alloc_list,
1499 			 &root->fs_info->fs_devices->alloc_list);
1500 		unlock_chunks(root);
1501 		root->fs_info->fs_devices->rw_devices++;
1502 	}
1503 	goto error_brelse;
1504 }
1505 
1506 /*
1507  * does all the dirty work required for changing file system's UUID.
1508  */
1509 static int btrfs_prepare_sprout(struct btrfs_root *root)
1510 {
1511 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1512 	struct btrfs_fs_devices *old_devices;
1513 	struct btrfs_fs_devices *seed_devices;
1514 	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1515 	struct btrfs_device *device;
1516 	u64 super_flags;
1517 
1518 	BUG_ON(!mutex_is_locked(&uuid_mutex));
1519 	if (!fs_devices->seeding)
1520 		return -EINVAL;
1521 
1522 	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1523 	if (!seed_devices)
1524 		return -ENOMEM;
1525 
1526 	old_devices = clone_fs_devices(fs_devices);
1527 	if (IS_ERR(old_devices)) {
1528 		kfree(seed_devices);
1529 		return PTR_ERR(old_devices);
1530 	}
1531 
1532 	list_add(&old_devices->list, &fs_uuids);
1533 
1534 	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1535 	seed_devices->opened = 1;
1536 	INIT_LIST_HEAD(&seed_devices->devices);
1537 	INIT_LIST_HEAD(&seed_devices->alloc_list);
1538 	mutex_init(&seed_devices->device_list_mutex);
1539 
1540 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1541 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1542 			      synchronize_rcu);
1543 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1544 
1545 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1546 	list_for_each_entry(device, &seed_devices->devices, dev_list) {
1547 		device->fs_devices = seed_devices;
1548 	}
1549 
1550 	fs_devices->seeding = 0;
1551 	fs_devices->num_devices = 0;
1552 	fs_devices->open_devices = 0;
1553 	fs_devices->seed = seed_devices;
1554 
1555 	generate_random_uuid(fs_devices->fsid);
1556 	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1557 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1558 	super_flags = btrfs_super_flags(disk_super) &
1559 		      ~BTRFS_SUPER_FLAG_SEEDING;
1560 	btrfs_set_super_flags(disk_super, super_flags);
1561 
1562 	return 0;
1563 }
1564 
1565 /*
1566  * strore the expected generation for seed devices in device items.
1567  */
1568 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1569 			       struct btrfs_root *root)
1570 {
1571 	struct btrfs_path *path;
1572 	struct extent_buffer *leaf;
1573 	struct btrfs_dev_item *dev_item;
1574 	struct btrfs_device *device;
1575 	struct btrfs_key key;
1576 	u8 fs_uuid[BTRFS_UUID_SIZE];
1577 	u8 dev_uuid[BTRFS_UUID_SIZE];
1578 	u64 devid;
1579 	int ret;
1580 
1581 	path = btrfs_alloc_path();
1582 	if (!path)
1583 		return -ENOMEM;
1584 
1585 	root = root->fs_info->chunk_root;
1586 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1587 	key.offset = 0;
1588 	key.type = BTRFS_DEV_ITEM_KEY;
1589 
1590 	while (1) {
1591 		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1592 		if (ret < 0)
1593 			goto error;
1594 
1595 		leaf = path->nodes[0];
1596 next_slot:
1597 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1598 			ret = btrfs_next_leaf(root, path);
1599 			if (ret > 0)
1600 				break;
1601 			if (ret < 0)
1602 				goto error;
1603 			leaf = path->nodes[0];
1604 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1605 			btrfs_release_path(path);
1606 			continue;
1607 		}
1608 
1609 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1610 		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1611 		    key.type != BTRFS_DEV_ITEM_KEY)
1612 			break;
1613 
1614 		dev_item = btrfs_item_ptr(leaf, path->slots[0],
1615 					  struct btrfs_dev_item);
1616 		devid = btrfs_device_id(leaf, dev_item);
1617 		read_extent_buffer(leaf, dev_uuid,
1618 				   (unsigned long)btrfs_device_uuid(dev_item),
1619 				   BTRFS_UUID_SIZE);
1620 		read_extent_buffer(leaf, fs_uuid,
1621 				   (unsigned long)btrfs_device_fsid(dev_item),
1622 				   BTRFS_UUID_SIZE);
1623 		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1624 		BUG_ON(!device); /* Logic error */
1625 
1626 		if (device->fs_devices->seeding) {
1627 			btrfs_set_device_generation(leaf, dev_item,
1628 						    device->generation);
1629 			btrfs_mark_buffer_dirty(leaf);
1630 		}
1631 
1632 		path->slots[0]++;
1633 		goto next_slot;
1634 	}
1635 	ret = 0;
1636 error:
1637 	btrfs_free_path(path);
1638 	return ret;
1639 }
1640 
1641 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1642 {
1643 	struct request_queue *q;
1644 	struct btrfs_trans_handle *trans;
1645 	struct btrfs_device *device;
1646 	struct block_device *bdev;
1647 	struct list_head *devices;
1648 	struct super_block *sb = root->fs_info->sb;
1649 	struct rcu_string *name;
1650 	u64 total_bytes;
1651 	int seeding_dev = 0;
1652 	int ret = 0;
1653 
1654 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1655 		return -EROFS;
1656 
1657 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1658 				  root->fs_info->bdev_holder);
1659 	if (IS_ERR(bdev))
1660 		return PTR_ERR(bdev);
1661 
1662 	if (root->fs_info->fs_devices->seeding) {
1663 		seeding_dev = 1;
1664 		down_write(&sb->s_umount);
1665 		mutex_lock(&uuid_mutex);
1666 	}
1667 
1668 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
1669 
1670 	devices = &root->fs_info->fs_devices->devices;
1671 	/*
1672 	 * we have the volume lock, so we don't need the extra
1673 	 * device list mutex while reading the list here.
1674 	 */
1675 	list_for_each_entry(device, devices, dev_list) {
1676 		if (device->bdev == bdev) {
1677 			ret = -EEXIST;
1678 			goto error;
1679 		}
1680 	}
1681 
1682 	device = kzalloc(sizeof(*device), GFP_NOFS);
1683 	if (!device) {
1684 		/* we can safely leave the fs_devices entry around */
1685 		ret = -ENOMEM;
1686 		goto error;
1687 	}
1688 
1689 	name = rcu_string_strdup(device_path, GFP_NOFS);
1690 	if (!name) {
1691 		kfree(device);
1692 		ret = -ENOMEM;
1693 		goto error;
1694 	}
1695 	rcu_assign_pointer(device->name, name);
1696 
1697 	ret = find_next_devid(root, &device->devid);
1698 	if (ret) {
1699 		rcu_string_free(device->name);
1700 		kfree(device);
1701 		goto error;
1702 	}
1703 
1704 	trans = btrfs_start_transaction(root, 0);
1705 	if (IS_ERR(trans)) {
1706 		rcu_string_free(device->name);
1707 		kfree(device);
1708 		ret = PTR_ERR(trans);
1709 		goto error;
1710 	}
1711 
1712 	lock_chunks(root);
1713 
1714 	q = bdev_get_queue(bdev);
1715 	if (blk_queue_discard(q))
1716 		device->can_discard = 1;
1717 	device->writeable = 1;
1718 	device->work.func = pending_bios_fn;
1719 	generate_random_uuid(device->uuid);
1720 	spin_lock_init(&device->io_lock);
1721 	device->generation = trans->transid;
1722 	device->io_width = root->sectorsize;
1723 	device->io_align = root->sectorsize;
1724 	device->sector_size = root->sectorsize;
1725 	device->total_bytes = i_size_read(bdev->bd_inode);
1726 	device->disk_total_bytes = device->total_bytes;
1727 	device->dev_root = root->fs_info->dev_root;
1728 	device->bdev = bdev;
1729 	device->in_fs_metadata = 1;
1730 	device->mode = FMODE_EXCL;
1731 	set_blocksize(device->bdev, 4096);
1732 
1733 	if (seeding_dev) {
1734 		sb->s_flags &= ~MS_RDONLY;
1735 		ret = btrfs_prepare_sprout(root);
1736 		BUG_ON(ret); /* -ENOMEM */
1737 	}
1738 
1739 	device->fs_devices = root->fs_info->fs_devices;
1740 
1741 	/*
1742 	 * we don't want write_supers to jump in here with our device
1743 	 * half setup
1744 	 */
1745 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1746 	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1747 	list_add(&device->dev_alloc_list,
1748 		 &root->fs_info->fs_devices->alloc_list);
1749 	root->fs_info->fs_devices->num_devices++;
1750 	root->fs_info->fs_devices->open_devices++;
1751 	root->fs_info->fs_devices->rw_devices++;
1752 	if (device->can_discard)
1753 		root->fs_info->fs_devices->num_can_discard++;
1754 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1755 
1756 	spin_lock(&root->fs_info->free_chunk_lock);
1757 	root->fs_info->free_chunk_space += device->total_bytes;
1758 	spin_unlock(&root->fs_info->free_chunk_lock);
1759 
1760 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1761 		root->fs_info->fs_devices->rotating = 1;
1762 
1763 	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1764 	btrfs_set_super_total_bytes(root->fs_info->super_copy,
1765 				    total_bytes + device->total_bytes);
1766 
1767 	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1768 	btrfs_set_super_num_devices(root->fs_info->super_copy,
1769 				    total_bytes + 1);
1770 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1771 
1772 	if (seeding_dev) {
1773 		ret = init_first_rw_device(trans, root, device);
1774 		if (ret)
1775 			goto error_trans;
1776 		ret = btrfs_finish_sprout(trans, root);
1777 		if (ret)
1778 			goto error_trans;
1779 	} else {
1780 		ret = btrfs_add_device(trans, root, device);
1781 		if (ret)
1782 			goto error_trans;
1783 	}
1784 
1785 	/*
1786 	 * we've got more storage, clear any full flags on the space
1787 	 * infos
1788 	 */
1789 	btrfs_clear_space_info_full(root->fs_info);
1790 
1791 	unlock_chunks(root);
1792 	ret = btrfs_commit_transaction(trans, root);
1793 
1794 	if (seeding_dev) {
1795 		mutex_unlock(&uuid_mutex);
1796 		up_write(&sb->s_umount);
1797 
1798 		if (ret) /* transaction commit */
1799 			return ret;
1800 
1801 		ret = btrfs_relocate_sys_chunks(root);
1802 		if (ret < 0)
1803 			btrfs_error(root->fs_info, ret,
1804 				    "Failed to relocate sys chunks after "
1805 				    "device initialization. This can be fixed "
1806 				    "using the \"btrfs balance\" command.");
1807 	}
1808 
1809 	return ret;
1810 
1811 error_trans:
1812 	unlock_chunks(root);
1813 	btrfs_abort_transaction(trans, root, ret);
1814 	btrfs_end_transaction(trans, root);
1815 	rcu_string_free(device->name);
1816 	kfree(device);
1817 error:
1818 	blkdev_put(bdev, FMODE_EXCL);
1819 	if (seeding_dev) {
1820 		mutex_unlock(&uuid_mutex);
1821 		up_write(&sb->s_umount);
1822 	}
1823 	return ret;
1824 }
1825 
1826 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1827 					struct btrfs_device *device)
1828 {
1829 	int ret;
1830 	struct btrfs_path *path;
1831 	struct btrfs_root *root;
1832 	struct btrfs_dev_item *dev_item;
1833 	struct extent_buffer *leaf;
1834 	struct btrfs_key key;
1835 
1836 	root = device->dev_root->fs_info->chunk_root;
1837 
1838 	path = btrfs_alloc_path();
1839 	if (!path)
1840 		return -ENOMEM;
1841 
1842 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1843 	key.type = BTRFS_DEV_ITEM_KEY;
1844 	key.offset = device->devid;
1845 
1846 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1847 	if (ret < 0)
1848 		goto out;
1849 
1850 	if (ret > 0) {
1851 		ret = -ENOENT;
1852 		goto out;
1853 	}
1854 
1855 	leaf = path->nodes[0];
1856 	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1857 
1858 	btrfs_set_device_id(leaf, dev_item, device->devid);
1859 	btrfs_set_device_type(leaf, dev_item, device->type);
1860 	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1861 	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1862 	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1863 	btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1864 	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1865 	btrfs_mark_buffer_dirty(leaf);
1866 
1867 out:
1868 	btrfs_free_path(path);
1869 	return ret;
1870 }
1871 
1872 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1873 		      struct btrfs_device *device, u64 new_size)
1874 {
1875 	struct btrfs_super_block *super_copy =
1876 		device->dev_root->fs_info->super_copy;
1877 	u64 old_total = btrfs_super_total_bytes(super_copy);
1878 	u64 diff = new_size - device->total_bytes;
1879 
1880 	if (!device->writeable)
1881 		return -EACCES;
1882 	if (new_size <= device->total_bytes)
1883 		return -EINVAL;
1884 
1885 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
1886 	device->fs_devices->total_rw_bytes += diff;
1887 
1888 	device->total_bytes = new_size;
1889 	device->disk_total_bytes = new_size;
1890 	btrfs_clear_space_info_full(device->dev_root->fs_info);
1891 
1892 	return btrfs_update_device(trans, device);
1893 }
1894 
1895 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1896 		      struct btrfs_device *device, u64 new_size)
1897 {
1898 	int ret;
1899 	lock_chunks(device->dev_root);
1900 	ret = __btrfs_grow_device(trans, device, new_size);
1901 	unlock_chunks(device->dev_root);
1902 	return ret;
1903 }
1904 
1905 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1906 			    struct btrfs_root *root,
1907 			    u64 chunk_tree, u64 chunk_objectid,
1908 			    u64 chunk_offset)
1909 {
1910 	int ret;
1911 	struct btrfs_path *path;
1912 	struct btrfs_key key;
1913 
1914 	root = root->fs_info->chunk_root;
1915 	path = btrfs_alloc_path();
1916 	if (!path)
1917 		return -ENOMEM;
1918 
1919 	key.objectid = chunk_objectid;
1920 	key.offset = chunk_offset;
1921 	key.type = BTRFS_CHUNK_ITEM_KEY;
1922 
1923 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1924 	if (ret < 0)
1925 		goto out;
1926 	else if (ret > 0) { /* Logic error or corruption */
1927 		btrfs_error(root->fs_info, -ENOENT,
1928 			    "Failed lookup while freeing chunk.");
1929 		ret = -ENOENT;
1930 		goto out;
1931 	}
1932 
1933 	ret = btrfs_del_item(trans, root, path);
1934 	if (ret < 0)
1935 		btrfs_error(root->fs_info, ret,
1936 			    "Failed to delete chunk item.");
1937 out:
1938 	btrfs_free_path(path);
1939 	return ret;
1940 }
1941 
1942 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1943 			chunk_offset)
1944 {
1945 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1946 	struct btrfs_disk_key *disk_key;
1947 	struct btrfs_chunk *chunk;
1948 	u8 *ptr;
1949 	int ret = 0;
1950 	u32 num_stripes;
1951 	u32 array_size;
1952 	u32 len = 0;
1953 	u32 cur;
1954 	struct btrfs_key key;
1955 
1956 	array_size = btrfs_super_sys_array_size(super_copy);
1957 
1958 	ptr = super_copy->sys_chunk_array;
1959 	cur = 0;
1960 
1961 	while (cur < array_size) {
1962 		disk_key = (struct btrfs_disk_key *)ptr;
1963 		btrfs_disk_key_to_cpu(&key, disk_key);
1964 
1965 		len = sizeof(*disk_key);
1966 
1967 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1968 			chunk = (struct btrfs_chunk *)(ptr + len);
1969 			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1970 			len += btrfs_chunk_item_size(num_stripes);
1971 		} else {
1972 			ret = -EIO;
1973 			break;
1974 		}
1975 		if (key.objectid == chunk_objectid &&
1976 		    key.offset == chunk_offset) {
1977 			memmove(ptr, ptr + len, array_size - (cur + len));
1978 			array_size -= len;
1979 			btrfs_set_super_sys_array_size(super_copy, array_size);
1980 		} else {
1981 			ptr += len;
1982 			cur += len;
1983 		}
1984 	}
1985 	return ret;
1986 }
1987 
1988 static int btrfs_relocate_chunk(struct btrfs_root *root,
1989 			 u64 chunk_tree, u64 chunk_objectid,
1990 			 u64 chunk_offset)
1991 {
1992 	struct extent_map_tree *em_tree;
1993 	struct btrfs_root *extent_root;
1994 	struct btrfs_trans_handle *trans;
1995 	struct extent_map *em;
1996 	struct map_lookup *map;
1997 	int ret;
1998 	int i;
1999 
2000 	root = root->fs_info->chunk_root;
2001 	extent_root = root->fs_info->extent_root;
2002 	em_tree = &root->fs_info->mapping_tree.map_tree;
2003 
2004 	ret = btrfs_can_relocate(extent_root, chunk_offset);
2005 	if (ret)
2006 		return -ENOSPC;
2007 
2008 	/* step one, relocate all the extents inside this chunk */
2009 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2010 	if (ret)
2011 		return ret;
2012 
2013 	trans = btrfs_start_transaction(root, 0);
2014 	BUG_ON(IS_ERR(trans));
2015 
2016 	lock_chunks(root);
2017 
2018 	/*
2019 	 * step two, delete the device extents and the
2020 	 * chunk tree entries
2021 	 */
2022 	read_lock(&em_tree->lock);
2023 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2024 	read_unlock(&em_tree->lock);
2025 
2026 	BUG_ON(!em || em->start > chunk_offset ||
2027 	       em->start + em->len < chunk_offset);
2028 	map = (struct map_lookup *)em->bdev;
2029 
2030 	for (i = 0; i < map->num_stripes; i++) {
2031 		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2032 					    map->stripes[i].physical);
2033 		BUG_ON(ret);
2034 
2035 		if (map->stripes[i].dev) {
2036 			ret = btrfs_update_device(trans, map->stripes[i].dev);
2037 			BUG_ON(ret);
2038 		}
2039 	}
2040 	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2041 			       chunk_offset);
2042 
2043 	BUG_ON(ret);
2044 
2045 	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2046 
2047 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2048 		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2049 		BUG_ON(ret);
2050 	}
2051 
2052 	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2053 	BUG_ON(ret);
2054 
2055 	write_lock(&em_tree->lock);
2056 	remove_extent_mapping(em_tree, em);
2057 	write_unlock(&em_tree->lock);
2058 
2059 	kfree(map);
2060 	em->bdev = NULL;
2061 
2062 	/* once for the tree */
2063 	free_extent_map(em);
2064 	/* once for us */
2065 	free_extent_map(em);
2066 
2067 	unlock_chunks(root);
2068 	btrfs_end_transaction(trans, root);
2069 	return 0;
2070 }
2071 
2072 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2073 {
2074 	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2075 	struct btrfs_path *path;
2076 	struct extent_buffer *leaf;
2077 	struct btrfs_chunk *chunk;
2078 	struct btrfs_key key;
2079 	struct btrfs_key found_key;
2080 	u64 chunk_tree = chunk_root->root_key.objectid;
2081 	u64 chunk_type;
2082 	bool retried = false;
2083 	int failed = 0;
2084 	int ret;
2085 
2086 	path = btrfs_alloc_path();
2087 	if (!path)
2088 		return -ENOMEM;
2089 
2090 again:
2091 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2092 	key.offset = (u64)-1;
2093 	key.type = BTRFS_CHUNK_ITEM_KEY;
2094 
2095 	while (1) {
2096 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2097 		if (ret < 0)
2098 			goto error;
2099 		BUG_ON(ret == 0); /* Corruption */
2100 
2101 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
2102 					  key.type);
2103 		if (ret < 0)
2104 			goto error;
2105 		if (ret > 0)
2106 			break;
2107 
2108 		leaf = path->nodes[0];
2109 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2110 
2111 		chunk = btrfs_item_ptr(leaf, path->slots[0],
2112 				       struct btrfs_chunk);
2113 		chunk_type = btrfs_chunk_type(leaf, chunk);
2114 		btrfs_release_path(path);
2115 
2116 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2117 			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2118 						   found_key.objectid,
2119 						   found_key.offset);
2120 			if (ret == -ENOSPC)
2121 				failed++;
2122 			else if (ret)
2123 				BUG();
2124 		}
2125 
2126 		if (found_key.offset == 0)
2127 			break;
2128 		key.offset = found_key.offset - 1;
2129 	}
2130 	ret = 0;
2131 	if (failed && !retried) {
2132 		failed = 0;
2133 		retried = true;
2134 		goto again;
2135 	} else if (failed && retried) {
2136 		WARN_ON(1);
2137 		ret = -ENOSPC;
2138 	}
2139 error:
2140 	btrfs_free_path(path);
2141 	return ret;
2142 }
2143 
2144 static int insert_balance_item(struct btrfs_root *root,
2145 			       struct btrfs_balance_control *bctl)
2146 {
2147 	struct btrfs_trans_handle *trans;
2148 	struct btrfs_balance_item *item;
2149 	struct btrfs_disk_balance_args disk_bargs;
2150 	struct btrfs_path *path;
2151 	struct extent_buffer *leaf;
2152 	struct btrfs_key key;
2153 	int ret, err;
2154 
2155 	path = btrfs_alloc_path();
2156 	if (!path)
2157 		return -ENOMEM;
2158 
2159 	trans = btrfs_start_transaction(root, 0);
2160 	if (IS_ERR(trans)) {
2161 		btrfs_free_path(path);
2162 		return PTR_ERR(trans);
2163 	}
2164 
2165 	key.objectid = BTRFS_BALANCE_OBJECTID;
2166 	key.type = BTRFS_BALANCE_ITEM_KEY;
2167 	key.offset = 0;
2168 
2169 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2170 				      sizeof(*item));
2171 	if (ret)
2172 		goto out;
2173 
2174 	leaf = path->nodes[0];
2175 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2176 
2177 	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2178 
2179 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2180 	btrfs_set_balance_data(leaf, item, &disk_bargs);
2181 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2182 	btrfs_set_balance_meta(leaf, item, &disk_bargs);
2183 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2184 	btrfs_set_balance_sys(leaf, item, &disk_bargs);
2185 
2186 	btrfs_set_balance_flags(leaf, item, bctl->flags);
2187 
2188 	btrfs_mark_buffer_dirty(leaf);
2189 out:
2190 	btrfs_free_path(path);
2191 	err = btrfs_commit_transaction(trans, root);
2192 	if (err && !ret)
2193 		ret = err;
2194 	return ret;
2195 }
2196 
2197 static int del_balance_item(struct btrfs_root *root)
2198 {
2199 	struct btrfs_trans_handle *trans;
2200 	struct btrfs_path *path;
2201 	struct btrfs_key key;
2202 	int ret, err;
2203 
2204 	path = btrfs_alloc_path();
2205 	if (!path)
2206 		return -ENOMEM;
2207 
2208 	trans = btrfs_start_transaction(root, 0);
2209 	if (IS_ERR(trans)) {
2210 		btrfs_free_path(path);
2211 		return PTR_ERR(trans);
2212 	}
2213 
2214 	key.objectid = BTRFS_BALANCE_OBJECTID;
2215 	key.type = BTRFS_BALANCE_ITEM_KEY;
2216 	key.offset = 0;
2217 
2218 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2219 	if (ret < 0)
2220 		goto out;
2221 	if (ret > 0) {
2222 		ret = -ENOENT;
2223 		goto out;
2224 	}
2225 
2226 	ret = btrfs_del_item(trans, root, path);
2227 out:
2228 	btrfs_free_path(path);
2229 	err = btrfs_commit_transaction(trans, root);
2230 	if (err && !ret)
2231 		ret = err;
2232 	return ret;
2233 }
2234 
2235 /*
2236  * This is a heuristic used to reduce the number of chunks balanced on
2237  * resume after balance was interrupted.
2238  */
2239 static void update_balance_args(struct btrfs_balance_control *bctl)
2240 {
2241 	/*
2242 	 * Turn on soft mode for chunk types that were being converted.
2243 	 */
2244 	if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2245 		bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2246 	if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2247 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2248 	if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2249 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2250 
2251 	/*
2252 	 * Turn on usage filter if is not already used.  The idea is
2253 	 * that chunks that we have already balanced should be
2254 	 * reasonably full.  Don't do it for chunks that are being
2255 	 * converted - that will keep us from relocating unconverted
2256 	 * (albeit full) chunks.
2257 	 */
2258 	if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2259 	    !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2260 		bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2261 		bctl->data.usage = 90;
2262 	}
2263 	if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2264 	    !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2265 		bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2266 		bctl->sys.usage = 90;
2267 	}
2268 	if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2269 	    !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2270 		bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2271 		bctl->meta.usage = 90;
2272 	}
2273 }
2274 
2275 /*
2276  * Should be called with both balance and volume mutexes held to
2277  * serialize other volume operations (add_dev/rm_dev/resize) with
2278  * restriper.  Same goes for unset_balance_control.
2279  */
2280 static void set_balance_control(struct btrfs_balance_control *bctl)
2281 {
2282 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2283 
2284 	BUG_ON(fs_info->balance_ctl);
2285 
2286 	spin_lock(&fs_info->balance_lock);
2287 	fs_info->balance_ctl = bctl;
2288 	spin_unlock(&fs_info->balance_lock);
2289 }
2290 
2291 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2292 {
2293 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2294 
2295 	BUG_ON(!fs_info->balance_ctl);
2296 
2297 	spin_lock(&fs_info->balance_lock);
2298 	fs_info->balance_ctl = NULL;
2299 	spin_unlock(&fs_info->balance_lock);
2300 
2301 	kfree(bctl);
2302 }
2303 
2304 /*
2305  * Balance filters.  Return 1 if chunk should be filtered out
2306  * (should not be balanced).
2307  */
2308 static int chunk_profiles_filter(u64 chunk_type,
2309 				 struct btrfs_balance_args *bargs)
2310 {
2311 	chunk_type = chunk_to_extended(chunk_type) &
2312 				BTRFS_EXTENDED_PROFILE_MASK;
2313 
2314 	if (bargs->profiles & chunk_type)
2315 		return 0;
2316 
2317 	return 1;
2318 }
2319 
2320 static u64 div_factor_fine(u64 num, int factor)
2321 {
2322 	if (factor <= 0)
2323 		return 0;
2324 	if (factor >= 100)
2325 		return num;
2326 
2327 	num *= factor;
2328 	do_div(num, 100);
2329 	return num;
2330 }
2331 
2332 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2333 			      struct btrfs_balance_args *bargs)
2334 {
2335 	struct btrfs_block_group_cache *cache;
2336 	u64 chunk_used, user_thresh;
2337 	int ret = 1;
2338 
2339 	cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2340 	chunk_used = btrfs_block_group_used(&cache->item);
2341 
2342 	user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2343 	if (chunk_used < user_thresh)
2344 		ret = 0;
2345 
2346 	btrfs_put_block_group(cache);
2347 	return ret;
2348 }
2349 
2350 static int chunk_devid_filter(struct extent_buffer *leaf,
2351 			      struct btrfs_chunk *chunk,
2352 			      struct btrfs_balance_args *bargs)
2353 {
2354 	struct btrfs_stripe *stripe;
2355 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2356 	int i;
2357 
2358 	for (i = 0; i < num_stripes; i++) {
2359 		stripe = btrfs_stripe_nr(chunk, i);
2360 		if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2361 			return 0;
2362 	}
2363 
2364 	return 1;
2365 }
2366 
2367 /* [pstart, pend) */
2368 static int chunk_drange_filter(struct extent_buffer *leaf,
2369 			       struct btrfs_chunk *chunk,
2370 			       u64 chunk_offset,
2371 			       struct btrfs_balance_args *bargs)
2372 {
2373 	struct btrfs_stripe *stripe;
2374 	int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2375 	u64 stripe_offset;
2376 	u64 stripe_length;
2377 	int factor;
2378 	int i;
2379 
2380 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2381 		return 0;
2382 
2383 	if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2384 	     BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2385 		factor = 2;
2386 	else
2387 		factor = 1;
2388 	factor = num_stripes / factor;
2389 
2390 	for (i = 0; i < num_stripes; i++) {
2391 		stripe = btrfs_stripe_nr(chunk, i);
2392 		if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2393 			continue;
2394 
2395 		stripe_offset = btrfs_stripe_offset(leaf, stripe);
2396 		stripe_length = btrfs_chunk_length(leaf, chunk);
2397 		do_div(stripe_length, factor);
2398 
2399 		if (stripe_offset < bargs->pend &&
2400 		    stripe_offset + stripe_length > bargs->pstart)
2401 			return 0;
2402 	}
2403 
2404 	return 1;
2405 }
2406 
2407 /* [vstart, vend) */
2408 static int chunk_vrange_filter(struct extent_buffer *leaf,
2409 			       struct btrfs_chunk *chunk,
2410 			       u64 chunk_offset,
2411 			       struct btrfs_balance_args *bargs)
2412 {
2413 	if (chunk_offset < bargs->vend &&
2414 	    chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2415 		/* at least part of the chunk is inside this vrange */
2416 		return 0;
2417 
2418 	return 1;
2419 }
2420 
2421 static int chunk_soft_convert_filter(u64 chunk_type,
2422 				     struct btrfs_balance_args *bargs)
2423 {
2424 	if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2425 		return 0;
2426 
2427 	chunk_type = chunk_to_extended(chunk_type) &
2428 				BTRFS_EXTENDED_PROFILE_MASK;
2429 
2430 	if (bargs->target == chunk_type)
2431 		return 1;
2432 
2433 	return 0;
2434 }
2435 
2436 static int should_balance_chunk(struct btrfs_root *root,
2437 				struct extent_buffer *leaf,
2438 				struct btrfs_chunk *chunk, u64 chunk_offset)
2439 {
2440 	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2441 	struct btrfs_balance_args *bargs = NULL;
2442 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2443 
2444 	/* type filter */
2445 	if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2446 	      (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2447 		return 0;
2448 	}
2449 
2450 	if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2451 		bargs = &bctl->data;
2452 	else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2453 		bargs = &bctl->sys;
2454 	else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2455 		bargs = &bctl->meta;
2456 
2457 	/* profiles filter */
2458 	if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2459 	    chunk_profiles_filter(chunk_type, bargs)) {
2460 		return 0;
2461 	}
2462 
2463 	/* usage filter */
2464 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2465 	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2466 		return 0;
2467 	}
2468 
2469 	/* devid filter */
2470 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2471 	    chunk_devid_filter(leaf, chunk, bargs)) {
2472 		return 0;
2473 	}
2474 
2475 	/* drange filter, makes sense only with devid filter */
2476 	if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2477 	    chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2478 		return 0;
2479 	}
2480 
2481 	/* vrange filter */
2482 	if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2483 	    chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2484 		return 0;
2485 	}
2486 
2487 	/* soft profile changing mode */
2488 	if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2489 	    chunk_soft_convert_filter(chunk_type, bargs)) {
2490 		return 0;
2491 	}
2492 
2493 	return 1;
2494 }
2495 
2496 static u64 div_factor(u64 num, int factor)
2497 {
2498 	if (factor == 10)
2499 		return num;
2500 	num *= factor;
2501 	do_div(num, 10);
2502 	return num;
2503 }
2504 
2505 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2506 {
2507 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2508 	struct btrfs_root *chunk_root = fs_info->chunk_root;
2509 	struct btrfs_root *dev_root = fs_info->dev_root;
2510 	struct list_head *devices;
2511 	struct btrfs_device *device;
2512 	u64 old_size;
2513 	u64 size_to_free;
2514 	struct btrfs_chunk *chunk;
2515 	struct btrfs_path *path;
2516 	struct btrfs_key key;
2517 	struct btrfs_key found_key;
2518 	struct btrfs_trans_handle *trans;
2519 	struct extent_buffer *leaf;
2520 	int slot;
2521 	int ret;
2522 	int enospc_errors = 0;
2523 	bool counting = true;
2524 
2525 	/* step one make some room on all the devices */
2526 	devices = &fs_info->fs_devices->devices;
2527 	list_for_each_entry(device, devices, dev_list) {
2528 		old_size = device->total_bytes;
2529 		size_to_free = div_factor(old_size, 1);
2530 		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2531 		if (!device->writeable ||
2532 		    device->total_bytes - device->bytes_used > size_to_free)
2533 			continue;
2534 
2535 		ret = btrfs_shrink_device(device, old_size - size_to_free);
2536 		if (ret == -ENOSPC)
2537 			break;
2538 		BUG_ON(ret);
2539 
2540 		trans = btrfs_start_transaction(dev_root, 0);
2541 		BUG_ON(IS_ERR(trans));
2542 
2543 		ret = btrfs_grow_device(trans, device, old_size);
2544 		BUG_ON(ret);
2545 
2546 		btrfs_end_transaction(trans, dev_root);
2547 	}
2548 
2549 	/* step two, relocate all the chunks */
2550 	path = btrfs_alloc_path();
2551 	if (!path) {
2552 		ret = -ENOMEM;
2553 		goto error;
2554 	}
2555 
2556 	/* zero out stat counters */
2557 	spin_lock(&fs_info->balance_lock);
2558 	memset(&bctl->stat, 0, sizeof(bctl->stat));
2559 	spin_unlock(&fs_info->balance_lock);
2560 again:
2561 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2562 	key.offset = (u64)-1;
2563 	key.type = BTRFS_CHUNK_ITEM_KEY;
2564 
2565 	while (1) {
2566 		if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2567 		    atomic_read(&fs_info->balance_cancel_req)) {
2568 			ret = -ECANCELED;
2569 			goto error;
2570 		}
2571 
2572 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2573 		if (ret < 0)
2574 			goto error;
2575 
2576 		/*
2577 		 * this shouldn't happen, it means the last relocate
2578 		 * failed
2579 		 */
2580 		if (ret == 0)
2581 			BUG(); /* FIXME break ? */
2582 
2583 		ret = btrfs_previous_item(chunk_root, path, 0,
2584 					  BTRFS_CHUNK_ITEM_KEY);
2585 		if (ret) {
2586 			ret = 0;
2587 			break;
2588 		}
2589 
2590 		leaf = path->nodes[0];
2591 		slot = path->slots[0];
2592 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2593 
2594 		if (found_key.objectid != key.objectid)
2595 			break;
2596 
2597 		/* chunk zero is special */
2598 		if (found_key.offset == 0)
2599 			break;
2600 
2601 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2602 
2603 		if (!counting) {
2604 			spin_lock(&fs_info->balance_lock);
2605 			bctl->stat.considered++;
2606 			spin_unlock(&fs_info->balance_lock);
2607 		}
2608 
2609 		ret = should_balance_chunk(chunk_root, leaf, chunk,
2610 					   found_key.offset);
2611 		btrfs_release_path(path);
2612 		if (!ret)
2613 			goto loop;
2614 
2615 		if (counting) {
2616 			spin_lock(&fs_info->balance_lock);
2617 			bctl->stat.expected++;
2618 			spin_unlock(&fs_info->balance_lock);
2619 			goto loop;
2620 		}
2621 
2622 		ret = btrfs_relocate_chunk(chunk_root,
2623 					   chunk_root->root_key.objectid,
2624 					   found_key.objectid,
2625 					   found_key.offset);
2626 		if (ret && ret != -ENOSPC)
2627 			goto error;
2628 		if (ret == -ENOSPC) {
2629 			enospc_errors++;
2630 		} else {
2631 			spin_lock(&fs_info->balance_lock);
2632 			bctl->stat.completed++;
2633 			spin_unlock(&fs_info->balance_lock);
2634 		}
2635 loop:
2636 		key.offset = found_key.offset - 1;
2637 	}
2638 
2639 	if (counting) {
2640 		btrfs_release_path(path);
2641 		counting = false;
2642 		goto again;
2643 	}
2644 error:
2645 	btrfs_free_path(path);
2646 	if (enospc_errors) {
2647 		printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2648 		       enospc_errors);
2649 		if (!ret)
2650 			ret = -ENOSPC;
2651 	}
2652 
2653 	return ret;
2654 }
2655 
2656 /**
2657  * alloc_profile_is_valid - see if a given profile is valid and reduced
2658  * @flags: profile to validate
2659  * @extended: if true @flags is treated as an extended profile
2660  */
2661 static int alloc_profile_is_valid(u64 flags, int extended)
2662 {
2663 	u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2664 			       BTRFS_BLOCK_GROUP_PROFILE_MASK);
2665 
2666 	flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2667 
2668 	/* 1) check that all other bits are zeroed */
2669 	if (flags & ~mask)
2670 		return 0;
2671 
2672 	/* 2) see if profile is reduced */
2673 	if (flags == 0)
2674 		return !extended; /* "0" is valid for usual profiles */
2675 
2676 	/* true if exactly one bit set */
2677 	return (flags & (flags - 1)) == 0;
2678 }
2679 
2680 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2681 {
2682 	/* cancel requested || normal exit path */
2683 	return atomic_read(&fs_info->balance_cancel_req) ||
2684 		(atomic_read(&fs_info->balance_pause_req) == 0 &&
2685 		 atomic_read(&fs_info->balance_cancel_req) == 0);
2686 }
2687 
2688 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2689 {
2690 	int ret;
2691 
2692 	unset_balance_control(fs_info);
2693 	ret = del_balance_item(fs_info->tree_root);
2694 	BUG_ON(ret);
2695 }
2696 
2697 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2698 			       struct btrfs_ioctl_balance_args *bargs);
2699 
2700 /*
2701  * Should be called with both balance and volume mutexes held
2702  */
2703 int btrfs_balance(struct btrfs_balance_control *bctl,
2704 		  struct btrfs_ioctl_balance_args *bargs)
2705 {
2706 	struct btrfs_fs_info *fs_info = bctl->fs_info;
2707 	u64 allowed;
2708 	int mixed = 0;
2709 	int ret;
2710 
2711 	if (btrfs_fs_closing(fs_info) ||
2712 	    atomic_read(&fs_info->balance_pause_req) ||
2713 	    atomic_read(&fs_info->balance_cancel_req)) {
2714 		ret = -EINVAL;
2715 		goto out;
2716 	}
2717 
2718 	allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2719 	if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2720 		mixed = 1;
2721 
2722 	/*
2723 	 * In case of mixed groups both data and meta should be picked,
2724 	 * and identical options should be given for both of them.
2725 	 */
2726 	allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2727 	if (mixed && (bctl->flags & allowed)) {
2728 		if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2729 		    !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2730 		    memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2731 			printk(KERN_ERR "btrfs: with mixed groups data and "
2732 			       "metadata balance options must be the same\n");
2733 			ret = -EINVAL;
2734 			goto out;
2735 		}
2736 	}
2737 
2738 	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2739 	if (fs_info->fs_devices->num_devices == 1)
2740 		allowed |= BTRFS_BLOCK_GROUP_DUP;
2741 	else if (fs_info->fs_devices->num_devices < 4)
2742 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2743 	else
2744 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2745 				BTRFS_BLOCK_GROUP_RAID10);
2746 
2747 	if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2748 	    (!alloc_profile_is_valid(bctl->data.target, 1) ||
2749 	     (bctl->data.target & ~allowed))) {
2750 		printk(KERN_ERR "btrfs: unable to start balance with target "
2751 		       "data profile %llu\n",
2752 		       (unsigned long long)bctl->data.target);
2753 		ret = -EINVAL;
2754 		goto out;
2755 	}
2756 	if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757 	    (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2758 	     (bctl->meta.target & ~allowed))) {
2759 		printk(KERN_ERR "btrfs: unable to start balance with target "
2760 		       "metadata profile %llu\n",
2761 		       (unsigned long long)bctl->meta.target);
2762 		ret = -EINVAL;
2763 		goto out;
2764 	}
2765 	if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2766 	    (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2767 	     (bctl->sys.target & ~allowed))) {
2768 		printk(KERN_ERR "btrfs: unable to start balance with target "
2769 		       "system profile %llu\n",
2770 		       (unsigned long long)bctl->sys.target);
2771 		ret = -EINVAL;
2772 		goto out;
2773 	}
2774 
2775 	/* allow dup'ed data chunks only in mixed mode */
2776 	if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2777 	    (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2778 		printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2779 		ret = -EINVAL;
2780 		goto out;
2781 	}
2782 
2783 	/* allow to reduce meta or sys integrity only if force set */
2784 	allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2785 			BTRFS_BLOCK_GROUP_RAID10;
2786 	if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2787 	     (fs_info->avail_system_alloc_bits & allowed) &&
2788 	     !(bctl->sys.target & allowed)) ||
2789 	    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2790 	     (fs_info->avail_metadata_alloc_bits & allowed) &&
2791 	     !(bctl->meta.target & allowed))) {
2792 		if (bctl->flags & BTRFS_BALANCE_FORCE) {
2793 			printk(KERN_INFO "btrfs: force reducing metadata "
2794 			       "integrity\n");
2795 		} else {
2796 			printk(KERN_ERR "btrfs: balance will reduce metadata "
2797 			       "integrity, use force if you want this\n");
2798 			ret = -EINVAL;
2799 			goto out;
2800 		}
2801 	}
2802 
2803 	ret = insert_balance_item(fs_info->tree_root, bctl);
2804 	if (ret && ret != -EEXIST)
2805 		goto out;
2806 
2807 	if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2808 		BUG_ON(ret == -EEXIST);
2809 		set_balance_control(bctl);
2810 	} else {
2811 		BUG_ON(ret != -EEXIST);
2812 		spin_lock(&fs_info->balance_lock);
2813 		update_balance_args(bctl);
2814 		spin_unlock(&fs_info->balance_lock);
2815 	}
2816 
2817 	atomic_inc(&fs_info->balance_running);
2818 	mutex_unlock(&fs_info->balance_mutex);
2819 
2820 	ret = __btrfs_balance(fs_info);
2821 
2822 	mutex_lock(&fs_info->balance_mutex);
2823 	atomic_dec(&fs_info->balance_running);
2824 
2825 	if (bargs) {
2826 		memset(bargs, 0, sizeof(*bargs));
2827 		update_ioctl_balance_args(fs_info, 0, bargs);
2828 	}
2829 
2830 	if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2831 	    balance_need_close(fs_info)) {
2832 		__cancel_balance(fs_info);
2833 	}
2834 
2835 	wake_up(&fs_info->balance_wait_q);
2836 
2837 	return ret;
2838 out:
2839 	if (bctl->flags & BTRFS_BALANCE_RESUME)
2840 		__cancel_balance(fs_info);
2841 	else
2842 		kfree(bctl);
2843 	return ret;
2844 }
2845 
2846 static int balance_kthread(void *data)
2847 {
2848 	struct btrfs_fs_info *fs_info = data;
2849 	int ret = 0;
2850 
2851 	mutex_lock(&fs_info->volume_mutex);
2852 	mutex_lock(&fs_info->balance_mutex);
2853 
2854 	if (fs_info->balance_ctl) {
2855 		printk(KERN_INFO "btrfs: continuing balance\n");
2856 		ret = btrfs_balance(fs_info->balance_ctl, NULL);
2857 	}
2858 
2859 	mutex_unlock(&fs_info->balance_mutex);
2860 	mutex_unlock(&fs_info->volume_mutex);
2861 
2862 	return ret;
2863 }
2864 
2865 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
2866 {
2867 	struct task_struct *tsk;
2868 
2869 	spin_lock(&fs_info->balance_lock);
2870 	if (!fs_info->balance_ctl) {
2871 		spin_unlock(&fs_info->balance_lock);
2872 		return 0;
2873 	}
2874 	spin_unlock(&fs_info->balance_lock);
2875 
2876 	if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2877 		printk(KERN_INFO "btrfs: force skipping balance\n");
2878 		return 0;
2879 	}
2880 
2881 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
2882 	if (IS_ERR(tsk))
2883 		return PTR_ERR(tsk);
2884 
2885 	return 0;
2886 }
2887 
2888 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
2889 {
2890 	struct btrfs_balance_control *bctl;
2891 	struct btrfs_balance_item *item;
2892 	struct btrfs_disk_balance_args disk_bargs;
2893 	struct btrfs_path *path;
2894 	struct extent_buffer *leaf;
2895 	struct btrfs_key key;
2896 	int ret;
2897 
2898 	path = btrfs_alloc_path();
2899 	if (!path)
2900 		return -ENOMEM;
2901 
2902 	key.objectid = BTRFS_BALANCE_OBJECTID;
2903 	key.type = BTRFS_BALANCE_ITEM_KEY;
2904 	key.offset = 0;
2905 
2906 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
2907 	if (ret < 0)
2908 		goto out;
2909 	if (ret > 0) { /* ret = -ENOENT; */
2910 		ret = 0;
2911 		goto out;
2912 	}
2913 
2914 	bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2915 	if (!bctl) {
2916 		ret = -ENOMEM;
2917 		goto out;
2918 	}
2919 
2920 	leaf = path->nodes[0];
2921 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2922 
2923 	bctl->fs_info = fs_info;
2924 	bctl->flags = btrfs_balance_flags(leaf, item);
2925 	bctl->flags |= BTRFS_BALANCE_RESUME;
2926 
2927 	btrfs_balance_data(leaf, item, &disk_bargs);
2928 	btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2929 	btrfs_balance_meta(leaf, item, &disk_bargs);
2930 	btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2931 	btrfs_balance_sys(leaf, item, &disk_bargs);
2932 	btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2933 
2934 	mutex_lock(&fs_info->volume_mutex);
2935 	mutex_lock(&fs_info->balance_mutex);
2936 
2937 	set_balance_control(bctl);
2938 
2939 	mutex_unlock(&fs_info->balance_mutex);
2940 	mutex_unlock(&fs_info->volume_mutex);
2941 out:
2942 	btrfs_free_path(path);
2943 	return ret;
2944 }
2945 
2946 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2947 {
2948 	int ret = 0;
2949 
2950 	mutex_lock(&fs_info->balance_mutex);
2951 	if (!fs_info->balance_ctl) {
2952 		mutex_unlock(&fs_info->balance_mutex);
2953 		return -ENOTCONN;
2954 	}
2955 
2956 	if (atomic_read(&fs_info->balance_running)) {
2957 		atomic_inc(&fs_info->balance_pause_req);
2958 		mutex_unlock(&fs_info->balance_mutex);
2959 
2960 		wait_event(fs_info->balance_wait_q,
2961 			   atomic_read(&fs_info->balance_running) == 0);
2962 
2963 		mutex_lock(&fs_info->balance_mutex);
2964 		/* we are good with balance_ctl ripped off from under us */
2965 		BUG_ON(atomic_read(&fs_info->balance_running));
2966 		atomic_dec(&fs_info->balance_pause_req);
2967 	} else {
2968 		ret = -ENOTCONN;
2969 	}
2970 
2971 	mutex_unlock(&fs_info->balance_mutex);
2972 	return ret;
2973 }
2974 
2975 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2976 {
2977 	mutex_lock(&fs_info->balance_mutex);
2978 	if (!fs_info->balance_ctl) {
2979 		mutex_unlock(&fs_info->balance_mutex);
2980 		return -ENOTCONN;
2981 	}
2982 
2983 	atomic_inc(&fs_info->balance_cancel_req);
2984 	/*
2985 	 * if we are running just wait and return, balance item is
2986 	 * deleted in btrfs_balance in this case
2987 	 */
2988 	if (atomic_read(&fs_info->balance_running)) {
2989 		mutex_unlock(&fs_info->balance_mutex);
2990 		wait_event(fs_info->balance_wait_q,
2991 			   atomic_read(&fs_info->balance_running) == 0);
2992 		mutex_lock(&fs_info->balance_mutex);
2993 	} else {
2994 		/* __cancel_balance needs volume_mutex */
2995 		mutex_unlock(&fs_info->balance_mutex);
2996 		mutex_lock(&fs_info->volume_mutex);
2997 		mutex_lock(&fs_info->balance_mutex);
2998 
2999 		if (fs_info->balance_ctl)
3000 			__cancel_balance(fs_info);
3001 
3002 		mutex_unlock(&fs_info->volume_mutex);
3003 	}
3004 
3005 	BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3006 	atomic_dec(&fs_info->balance_cancel_req);
3007 	mutex_unlock(&fs_info->balance_mutex);
3008 	return 0;
3009 }
3010 
3011 /*
3012  * shrinking a device means finding all of the device extents past
3013  * the new size, and then following the back refs to the chunks.
3014  * The chunk relocation code actually frees the device extent
3015  */
3016 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3017 {
3018 	struct btrfs_trans_handle *trans;
3019 	struct btrfs_root *root = device->dev_root;
3020 	struct btrfs_dev_extent *dev_extent = NULL;
3021 	struct btrfs_path *path;
3022 	u64 length;
3023 	u64 chunk_tree;
3024 	u64 chunk_objectid;
3025 	u64 chunk_offset;
3026 	int ret;
3027 	int slot;
3028 	int failed = 0;
3029 	bool retried = false;
3030 	struct extent_buffer *l;
3031 	struct btrfs_key key;
3032 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3033 	u64 old_total = btrfs_super_total_bytes(super_copy);
3034 	u64 old_size = device->total_bytes;
3035 	u64 diff = device->total_bytes - new_size;
3036 
3037 	if (new_size >= device->total_bytes)
3038 		return -EINVAL;
3039 
3040 	path = btrfs_alloc_path();
3041 	if (!path)
3042 		return -ENOMEM;
3043 
3044 	path->reada = 2;
3045 
3046 	lock_chunks(root);
3047 
3048 	device->total_bytes = new_size;
3049 	if (device->writeable) {
3050 		device->fs_devices->total_rw_bytes -= diff;
3051 		spin_lock(&root->fs_info->free_chunk_lock);
3052 		root->fs_info->free_chunk_space -= diff;
3053 		spin_unlock(&root->fs_info->free_chunk_lock);
3054 	}
3055 	unlock_chunks(root);
3056 
3057 again:
3058 	key.objectid = device->devid;
3059 	key.offset = (u64)-1;
3060 	key.type = BTRFS_DEV_EXTENT_KEY;
3061 
3062 	do {
3063 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3064 		if (ret < 0)
3065 			goto done;
3066 
3067 		ret = btrfs_previous_item(root, path, 0, key.type);
3068 		if (ret < 0)
3069 			goto done;
3070 		if (ret) {
3071 			ret = 0;
3072 			btrfs_release_path(path);
3073 			break;
3074 		}
3075 
3076 		l = path->nodes[0];
3077 		slot = path->slots[0];
3078 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3079 
3080 		if (key.objectid != device->devid) {
3081 			btrfs_release_path(path);
3082 			break;
3083 		}
3084 
3085 		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3086 		length = btrfs_dev_extent_length(l, dev_extent);
3087 
3088 		if (key.offset + length <= new_size) {
3089 			btrfs_release_path(path);
3090 			break;
3091 		}
3092 
3093 		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3094 		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3095 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3096 		btrfs_release_path(path);
3097 
3098 		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3099 					   chunk_offset);
3100 		if (ret && ret != -ENOSPC)
3101 			goto done;
3102 		if (ret == -ENOSPC)
3103 			failed++;
3104 	} while (key.offset-- > 0);
3105 
3106 	if (failed && !retried) {
3107 		failed = 0;
3108 		retried = true;
3109 		goto again;
3110 	} else if (failed && retried) {
3111 		ret = -ENOSPC;
3112 		lock_chunks(root);
3113 
3114 		device->total_bytes = old_size;
3115 		if (device->writeable)
3116 			device->fs_devices->total_rw_bytes += diff;
3117 		spin_lock(&root->fs_info->free_chunk_lock);
3118 		root->fs_info->free_chunk_space += diff;
3119 		spin_unlock(&root->fs_info->free_chunk_lock);
3120 		unlock_chunks(root);
3121 		goto done;
3122 	}
3123 
3124 	/* Shrinking succeeded, else we would be at "done". */
3125 	trans = btrfs_start_transaction(root, 0);
3126 	if (IS_ERR(trans)) {
3127 		ret = PTR_ERR(trans);
3128 		goto done;
3129 	}
3130 
3131 	lock_chunks(root);
3132 
3133 	device->disk_total_bytes = new_size;
3134 	/* Now btrfs_update_device() will change the on-disk size. */
3135 	ret = btrfs_update_device(trans, device);
3136 	if (ret) {
3137 		unlock_chunks(root);
3138 		btrfs_end_transaction(trans, root);
3139 		goto done;
3140 	}
3141 	WARN_ON(diff > old_total);
3142 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
3143 	unlock_chunks(root);
3144 	btrfs_end_transaction(trans, root);
3145 done:
3146 	btrfs_free_path(path);
3147 	return ret;
3148 }
3149 
3150 static int btrfs_add_system_chunk(struct btrfs_root *root,
3151 			   struct btrfs_key *key,
3152 			   struct btrfs_chunk *chunk, int item_size)
3153 {
3154 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3155 	struct btrfs_disk_key disk_key;
3156 	u32 array_size;
3157 	u8 *ptr;
3158 
3159 	array_size = btrfs_super_sys_array_size(super_copy);
3160 	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3161 		return -EFBIG;
3162 
3163 	ptr = super_copy->sys_chunk_array + array_size;
3164 	btrfs_cpu_key_to_disk(&disk_key, key);
3165 	memcpy(ptr, &disk_key, sizeof(disk_key));
3166 	ptr += sizeof(disk_key);
3167 	memcpy(ptr, chunk, item_size);
3168 	item_size += sizeof(disk_key);
3169 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3170 	return 0;
3171 }
3172 
3173 /*
3174  * sort the devices in descending order by max_avail, total_avail
3175  */
3176 static int btrfs_cmp_device_info(const void *a, const void *b)
3177 {
3178 	const struct btrfs_device_info *di_a = a;
3179 	const struct btrfs_device_info *di_b = b;
3180 
3181 	if (di_a->max_avail > di_b->max_avail)
3182 		return -1;
3183 	if (di_a->max_avail < di_b->max_avail)
3184 		return 1;
3185 	if (di_a->total_avail > di_b->total_avail)
3186 		return -1;
3187 	if (di_a->total_avail < di_b->total_avail)
3188 		return 1;
3189 	return 0;
3190 }
3191 
3192 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3193 			       struct btrfs_root *extent_root,
3194 			       struct map_lookup **map_ret,
3195 			       u64 *num_bytes_out, u64 *stripe_size_out,
3196 			       u64 start, u64 type)
3197 {
3198 	struct btrfs_fs_info *info = extent_root->fs_info;
3199 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
3200 	struct list_head *cur;
3201 	struct map_lookup *map = NULL;
3202 	struct extent_map_tree *em_tree;
3203 	struct extent_map *em;
3204 	struct btrfs_device_info *devices_info = NULL;
3205 	u64 total_avail;
3206 	int num_stripes;	/* total number of stripes to allocate */
3207 	int sub_stripes;	/* sub_stripes info for map */
3208 	int dev_stripes;	/* stripes per dev */
3209 	int devs_max;		/* max devs to use */
3210 	int devs_min;		/* min devs needed */
3211 	int devs_increment;	/* ndevs has to be a multiple of this */
3212 	int ncopies;		/* how many copies to data has */
3213 	int ret;
3214 	u64 max_stripe_size;
3215 	u64 max_chunk_size;
3216 	u64 stripe_size;
3217 	u64 num_bytes;
3218 	int ndevs;
3219 	int i;
3220 	int j;
3221 
3222 	BUG_ON(!alloc_profile_is_valid(type, 0));
3223 
3224 	if (list_empty(&fs_devices->alloc_list))
3225 		return -ENOSPC;
3226 
3227 	sub_stripes = 1;
3228 	dev_stripes = 1;
3229 	devs_increment = 1;
3230 	ncopies = 1;
3231 	devs_max = 0;	/* 0 == as many as possible */
3232 	devs_min = 1;
3233 
3234 	/*
3235 	 * define the properties of each RAID type.
3236 	 * FIXME: move this to a global table and use it in all RAID
3237 	 * calculation code
3238 	 */
3239 	if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3240 		dev_stripes = 2;
3241 		ncopies = 2;
3242 		devs_max = 1;
3243 	} else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3244 		devs_min = 2;
3245 	} else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3246 		devs_increment = 2;
3247 		ncopies = 2;
3248 		devs_max = 2;
3249 		devs_min = 2;
3250 	} else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3251 		sub_stripes = 2;
3252 		devs_increment = 2;
3253 		ncopies = 2;
3254 		devs_min = 4;
3255 	} else {
3256 		devs_max = 1;
3257 	}
3258 
3259 	if (type & BTRFS_BLOCK_GROUP_DATA) {
3260 		max_stripe_size = 1024 * 1024 * 1024;
3261 		max_chunk_size = 10 * max_stripe_size;
3262 	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3263 		/* for larger filesystems, use larger metadata chunks */
3264 		if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3265 			max_stripe_size = 1024 * 1024 * 1024;
3266 		else
3267 			max_stripe_size = 256 * 1024 * 1024;
3268 		max_chunk_size = max_stripe_size;
3269 	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3270 		max_stripe_size = 32 * 1024 * 1024;
3271 		max_chunk_size = 2 * max_stripe_size;
3272 	} else {
3273 		printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3274 		       type);
3275 		BUG_ON(1);
3276 	}
3277 
3278 	/* we don't want a chunk larger than 10% of writeable space */
3279 	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3280 			     max_chunk_size);
3281 
3282 	devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3283 			       GFP_NOFS);
3284 	if (!devices_info)
3285 		return -ENOMEM;
3286 
3287 	cur = fs_devices->alloc_list.next;
3288 
3289 	/*
3290 	 * in the first pass through the devices list, we gather information
3291 	 * about the available holes on each device.
3292 	 */
3293 	ndevs = 0;
3294 	while (cur != &fs_devices->alloc_list) {
3295 		struct btrfs_device *device;
3296 		u64 max_avail;
3297 		u64 dev_offset;
3298 
3299 		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3300 
3301 		cur = cur->next;
3302 
3303 		if (!device->writeable) {
3304 			printk(KERN_ERR
3305 			       "btrfs: read-only device in alloc_list\n");
3306 			WARN_ON(1);
3307 			continue;
3308 		}
3309 
3310 		if (!device->in_fs_metadata)
3311 			continue;
3312 
3313 		if (device->total_bytes > device->bytes_used)
3314 			total_avail = device->total_bytes - device->bytes_used;
3315 		else
3316 			total_avail = 0;
3317 
3318 		/* If there is no space on this device, skip it. */
3319 		if (total_avail == 0)
3320 			continue;
3321 
3322 		ret = find_free_dev_extent(device,
3323 					   max_stripe_size * dev_stripes,
3324 					   &dev_offset, &max_avail);
3325 		if (ret && ret != -ENOSPC)
3326 			goto error;
3327 
3328 		if (ret == 0)
3329 			max_avail = max_stripe_size * dev_stripes;
3330 
3331 		if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3332 			continue;
3333 
3334 		devices_info[ndevs].dev_offset = dev_offset;
3335 		devices_info[ndevs].max_avail = max_avail;
3336 		devices_info[ndevs].total_avail = total_avail;
3337 		devices_info[ndevs].dev = device;
3338 		++ndevs;
3339 	}
3340 
3341 	/*
3342 	 * now sort the devices by hole size / available space
3343 	 */
3344 	sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3345 	     btrfs_cmp_device_info, NULL);
3346 
3347 	/* round down to number of usable stripes */
3348 	ndevs -= ndevs % devs_increment;
3349 
3350 	if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3351 		ret = -ENOSPC;
3352 		goto error;
3353 	}
3354 
3355 	if (devs_max && ndevs > devs_max)
3356 		ndevs = devs_max;
3357 	/*
3358 	 * the primary goal is to maximize the number of stripes, so use as many
3359 	 * devices as possible, even if the stripes are not maximum sized.
3360 	 */
3361 	stripe_size = devices_info[ndevs-1].max_avail;
3362 	num_stripes = ndevs * dev_stripes;
3363 
3364 	if (stripe_size * ndevs > max_chunk_size * ncopies) {
3365 		stripe_size = max_chunk_size * ncopies;
3366 		do_div(stripe_size, ndevs);
3367 	}
3368 
3369 	do_div(stripe_size, dev_stripes);
3370 
3371 	/* align to BTRFS_STRIPE_LEN */
3372 	do_div(stripe_size, BTRFS_STRIPE_LEN);
3373 	stripe_size *= BTRFS_STRIPE_LEN;
3374 
3375 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3376 	if (!map) {
3377 		ret = -ENOMEM;
3378 		goto error;
3379 	}
3380 	map->num_stripes = num_stripes;
3381 
3382 	for (i = 0; i < ndevs; ++i) {
3383 		for (j = 0; j < dev_stripes; ++j) {
3384 			int s = i * dev_stripes + j;
3385 			map->stripes[s].dev = devices_info[i].dev;
3386 			map->stripes[s].physical = devices_info[i].dev_offset +
3387 						   j * stripe_size;
3388 		}
3389 	}
3390 	map->sector_size = extent_root->sectorsize;
3391 	map->stripe_len = BTRFS_STRIPE_LEN;
3392 	map->io_align = BTRFS_STRIPE_LEN;
3393 	map->io_width = BTRFS_STRIPE_LEN;
3394 	map->type = type;
3395 	map->sub_stripes = sub_stripes;
3396 
3397 	*map_ret = map;
3398 	num_bytes = stripe_size * (num_stripes / ncopies);
3399 
3400 	*stripe_size_out = stripe_size;
3401 	*num_bytes_out = num_bytes;
3402 
3403 	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3404 
3405 	em = alloc_extent_map();
3406 	if (!em) {
3407 		ret = -ENOMEM;
3408 		goto error;
3409 	}
3410 	em->bdev = (struct block_device *)map;
3411 	em->start = start;
3412 	em->len = num_bytes;
3413 	em->block_start = 0;
3414 	em->block_len = em->len;
3415 
3416 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3417 	write_lock(&em_tree->lock);
3418 	ret = add_extent_mapping(em_tree, em);
3419 	write_unlock(&em_tree->lock);
3420 	free_extent_map(em);
3421 	if (ret)
3422 		goto error;
3423 
3424 	ret = btrfs_make_block_group(trans, extent_root, 0, type,
3425 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3426 				     start, num_bytes);
3427 	if (ret)
3428 		goto error;
3429 
3430 	for (i = 0; i < map->num_stripes; ++i) {
3431 		struct btrfs_device *device;
3432 		u64 dev_offset;
3433 
3434 		device = map->stripes[i].dev;
3435 		dev_offset = map->stripes[i].physical;
3436 
3437 		ret = btrfs_alloc_dev_extent(trans, device,
3438 				info->chunk_root->root_key.objectid,
3439 				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3440 				start, dev_offset, stripe_size);
3441 		if (ret) {
3442 			btrfs_abort_transaction(trans, extent_root, ret);
3443 			goto error;
3444 		}
3445 	}
3446 
3447 	kfree(devices_info);
3448 	return 0;
3449 
3450 error:
3451 	kfree(map);
3452 	kfree(devices_info);
3453 	return ret;
3454 }
3455 
3456 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3457 				struct btrfs_root *extent_root,
3458 				struct map_lookup *map, u64 chunk_offset,
3459 				u64 chunk_size, u64 stripe_size)
3460 {
3461 	u64 dev_offset;
3462 	struct btrfs_key key;
3463 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3464 	struct btrfs_device *device;
3465 	struct btrfs_chunk *chunk;
3466 	struct btrfs_stripe *stripe;
3467 	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3468 	int index = 0;
3469 	int ret;
3470 
3471 	chunk = kzalloc(item_size, GFP_NOFS);
3472 	if (!chunk)
3473 		return -ENOMEM;
3474 
3475 	index = 0;
3476 	while (index < map->num_stripes) {
3477 		device = map->stripes[index].dev;
3478 		device->bytes_used += stripe_size;
3479 		ret = btrfs_update_device(trans, device);
3480 		if (ret)
3481 			goto out_free;
3482 		index++;
3483 	}
3484 
3485 	spin_lock(&extent_root->fs_info->free_chunk_lock);
3486 	extent_root->fs_info->free_chunk_space -= (stripe_size *
3487 						   map->num_stripes);
3488 	spin_unlock(&extent_root->fs_info->free_chunk_lock);
3489 
3490 	index = 0;
3491 	stripe = &chunk->stripe;
3492 	while (index < map->num_stripes) {
3493 		device = map->stripes[index].dev;
3494 		dev_offset = map->stripes[index].physical;
3495 
3496 		btrfs_set_stack_stripe_devid(stripe, device->devid);
3497 		btrfs_set_stack_stripe_offset(stripe, dev_offset);
3498 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3499 		stripe++;
3500 		index++;
3501 	}
3502 
3503 	btrfs_set_stack_chunk_length(chunk, chunk_size);
3504 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3505 	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3506 	btrfs_set_stack_chunk_type(chunk, map->type);
3507 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3508 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3509 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3510 	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3511 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3512 
3513 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3514 	key.type = BTRFS_CHUNK_ITEM_KEY;
3515 	key.offset = chunk_offset;
3516 
3517 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3518 
3519 	if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3520 		/*
3521 		 * TODO: Cleanup of inserted chunk root in case of
3522 		 * failure.
3523 		 */
3524 		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3525 					     item_size);
3526 	}
3527 
3528 out_free:
3529 	kfree(chunk);
3530 	return ret;
3531 }
3532 
3533 /*
3534  * Chunk allocation falls into two parts. The first part does works
3535  * that make the new allocated chunk useable, but not do any operation
3536  * that modifies the chunk tree. The second part does the works that
3537  * require modifying the chunk tree. This division is important for the
3538  * bootstrap process of adding storage to a seed btrfs.
3539  */
3540 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3541 		      struct btrfs_root *extent_root, u64 type)
3542 {
3543 	u64 chunk_offset;
3544 	u64 chunk_size;
3545 	u64 stripe_size;
3546 	struct map_lookup *map;
3547 	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3548 	int ret;
3549 
3550 	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3551 			      &chunk_offset);
3552 	if (ret)
3553 		return ret;
3554 
3555 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3556 				  &stripe_size, chunk_offset, type);
3557 	if (ret)
3558 		return ret;
3559 
3560 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3561 				   chunk_size, stripe_size);
3562 	if (ret)
3563 		return ret;
3564 	return 0;
3565 }
3566 
3567 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3568 					 struct btrfs_root *root,
3569 					 struct btrfs_device *device)
3570 {
3571 	u64 chunk_offset;
3572 	u64 sys_chunk_offset;
3573 	u64 chunk_size;
3574 	u64 sys_chunk_size;
3575 	u64 stripe_size;
3576 	u64 sys_stripe_size;
3577 	u64 alloc_profile;
3578 	struct map_lookup *map;
3579 	struct map_lookup *sys_map;
3580 	struct btrfs_fs_info *fs_info = root->fs_info;
3581 	struct btrfs_root *extent_root = fs_info->extent_root;
3582 	int ret;
3583 
3584 	ret = find_next_chunk(fs_info->chunk_root,
3585 			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3586 	if (ret)
3587 		return ret;
3588 
3589 	alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3590 				fs_info->avail_metadata_alloc_bits;
3591 	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3592 
3593 	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3594 				  &stripe_size, chunk_offset, alloc_profile);
3595 	if (ret)
3596 		return ret;
3597 
3598 	sys_chunk_offset = chunk_offset + chunk_size;
3599 
3600 	alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3601 				fs_info->avail_system_alloc_bits;
3602 	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3603 
3604 	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3605 				  &sys_chunk_size, &sys_stripe_size,
3606 				  sys_chunk_offset, alloc_profile);
3607 	if (ret)
3608 		goto abort;
3609 
3610 	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3611 	if (ret)
3612 		goto abort;
3613 
3614 	/*
3615 	 * Modifying chunk tree needs allocating new blocks from both
3616 	 * system block group and metadata block group. So we only can
3617 	 * do operations require modifying the chunk tree after both
3618 	 * block groups were created.
3619 	 */
3620 	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3621 				   chunk_size, stripe_size);
3622 	if (ret)
3623 		goto abort;
3624 
3625 	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3626 				   sys_chunk_offset, sys_chunk_size,
3627 				   sys_stripe_size);
3628 	if (ret)
3629 		goto abort;
3630 
3631 	return 0;
3632 
3633 abort:
3634 	btrfs_abort_transaction(trans, root, ret);
3635 	return ret;
3636 }
3637 
3638 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3639 {
3640 	struct extent_map *em;
3641 	struct map_lookup *map;
3642 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3643 	int readonly = 0;
3644 	int i;
3645 
3646 	read_lock(&map_tree->map_tree.lock);
3647 	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3648 	read_unlock(&map_tree->map_tree.lock);
3649 	if (!em)
3650 		return 1;
3651 
3652 	if (btrfs_test_opt(root, DEGRADED)) {
3653 		free_extent_map(em);
3654 		return 0;
3655 	}
3656 
3657 	map = (struct map_lookup *)em->bdev;
3658 	for (i = 0; i < map->num_stripes; i++) {
3659 		if (!map->stripes[i].dev->writeable) {
3660 			readonly = 1;
3661 			break;
3662 		}
3663 	}
3664 	free_extent_map(em);
3665 	return readonly;
3666 }
3667 
3668 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3669 {
3670 	extent_map_tree_init(&tree->map_tree);
3671 }
3672 
3673 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3674 {
3675 	struct extent_map *em;
3676 
3677 	while (1) {
3678 		write_lock(&tree->map_tree.lock);
3679 		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3680 		if (em)
3681 			remove_extent_mapping(&tree->map_tree, em);
3682 		write_unlock(&tree->map_tree.lock);
3683 		if (!em)
3684 			break;
3685 		kfree(em->bdev);
3686 		/* once for us */
3687 		free_extent_map(em);
3688 		/* once for the tree */
3689 		free_extent_map(em);
3690 	}
3691 }
3692 
3693 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3694 {
3695 	struct extent_map *em;
3696 	struct map_lookup *map;
3697 	struct extent_map_tree *em_tree = &map_tree->map_tree;
3698 	int ret;
3699 
3700 	read_lock(&em_tree->lock);
3701 	em = lookup_extent_mapping(em_tree, logical, len);
3702 	read_unlock(&em_tree->lock);
3703 	BUG_ON(!em);
3704 
3705 	BUG_ON(em->start > logical || em->start + em->len < logical);
3706 	map = (struct map_lookup *)em->bdev;
3707 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3708 		ret = map->num_stripes;
3709 	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3710 		ret = map->sub_stripes;
3711 	else
3712 		ret = 1;
3713 	free_extent_map(em);
3714 	return ret;
3715 }
3716 
3717 static int find_live_mirror(struct map_lookup *map, int first, int num,
3718 			    int optimal)
3719 {
3720 	int i;
3721 	if (map->stripes[optimal].dev->bdev)
3722 		return optimal;
3723 	for (i = first; i < first + num; i++) {
3724 		if (map->stripes[i].dev->bdev)
3725 			return i;
3726 	}
3727 	/* we couldn't find one that doesn't fail.  Just return something
3728 	 * and the io error handling code will clean up eventually
3729 	 */
3730 	return optimal;
3731 }
3732 
3733 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3734 			     u64 logical, u64 *length,
3735 			     struct btrfs_bio **bbio_ret,
3736 			     int mirror_num)
3737 {
3738 	struct extent_map *em;
3739 	struct map_lookup *map;
3740 	struct extent_map_tree *em_tree = &map_tree->map_tree;
3741 	u64 offset;
3742 	u64 stripe_offset;
3743 	u64 stripe_end_offset;
3744 	u64 stripe_nr;
3745 	u64 stripe_nr_orig;
3746 	u64 stripe_nr_end;
3747 	int stripe_index;
3748 	int i;
3749 	int ret = 0;
3750 	int num_stripes;
3751 	int max_errors = 0;
3752 	struct btrfs_bio *bbio = NULL;
3753 
3754 	read_lock(&em_tree->lock);
3755 	em = lookup_extent_mapping(em_tree, logical, *length);
3756 	read_unlock(&em_tree->lock);
3757 
3758 	if (!em) {
3759 		printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3760 		       (unsigned long long)logical,
3761 		       (unsigned long long)*length);
3762 		BUG();
3763 	}
3764 
3765 	BUG_ON(em->start > logical || em->start + em->len < logical);
3766 	map = (struct map_lookup *)em->bdev;
3767 	offset = logical - em->start;
3768 
3769 	if (mirror_num > map->num_stripes)
3770 		mirror_num = 0;
3771 
3772 	stripe_nr = offset;
3773 	/*
3774 	 * stripe_nr counts the total number of stripes we have to stride
3775 	 * to get to this block
3776 	 */
3777 	do_div(stripe_nr, map->stripe_len);
3778 
3779 	stripe_offset = stripe_nr * map->stripe_len;
3780 	BUG_ON(offset < stripe_offset);
3781 
3782 	/* stripe_offset is the offset of this block in its stripe*/
3783 	stripe_offset = offset - stripe_offset;
3784 
3785 	if (rw & REQ_DISCARD)
3786 		*length = min_t(u64, em->len - offset, *length);
3787 	else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3788 		/* we limit the length of each bio to what fits in a stripe */
3789 		*length = min_t(u64, em->len - offset,
3790 				map->stripe_len - stripe_offset);
3791 	} else {
3792 		*length = em->len - offset;
3793 	}
3794 
3795 	if (!bbio_ret)
3796 		goto out;
3797 
3798 	num_stripes = 1;
3799 	stripe_index = 0;
3800 	stripe_nr_orig = stripe_nr;
3801 	stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3802 			(~(map->stripe_len - 1));
3803 	do_div(stripe_nr_end, map->stripe_len);
3804 	stripe_end_offset = stripe_nr_end * map->stripe_len -
3805 			    (offset + *length);
3806 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3807 		if (rw & REQ_DISCARD)
3808 			num_stripes = min_t(u64, map->num_stripes,
3809 					    stripe_nr_end - stripe_nr_orig);
3810 		stripe_index = do_div(stripe_nr, map->num_stripes);
3811 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3812 		if (rw & (REQ_WRITE | REQ_DISCARD))
3813 			num_stripes = map->num_stripes;
3814 		else if (mirror_num)
3815 			stripe_index = mirror_num - 1;
3816 		else {
3817 			stripe_index = find_live_mirror(map, 0,
3818 					    map->num_stripes,
3819 					    current->pid % map->num_stripes);
3820 			mirror_num = stripe_index + 1;
3821 		}
3822 
3823 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3824 		if (rw & (REQ_WRITE | REQ_DISCARD)) {
3825 			num_stripes = map->num_stripes;
3826 		} else if (mirror_num) {
3827 			stripe_index = mirror_num - 1;
3828 		} else {
3829 			mirror_num = 1;
3830 		}
3831 
3832 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3833 		int factor = map->num_stripes / map->sub_stripes;
3834 
3835 		stripe_index = do_div(stripe_nr, factor);
3836 		stripe_index *= map->sub_stripes;
3837 
3838 		if (rw & REQ_WRITE)
3839 			num_stripes = map->sub_stripes;
3840 		else if (rw & REQ_DISCARD)
3841 			num_stripes = min_t(u64, map->sub_stripes *
3842 					    (stripe_nr_end - stripe_nr_orig),
3843 					    map->num_stripes);
3844 		else if (mirror_num)
3845 			stripe_index += mirror_num - 1;
3846 		else {
3847 			int old_stripe_index = stripe_index;
3848 			stripe_index = find_live_mirror(map, stripe_index,
3849 					      map->sub_stripes, stripe_index +
3850 					      current->pid % map->sub_stripes);
3851 			mirror_num = stripe_index - old_stripe_index + 1;
3852 		}
3853 	} else {
3854 		/*
3855 		 * after this do_div call, stripe_nr is the number of stripes
3856 		 * on this device we have to walk to find the data, and
3857 		 * stripe_index is the number of our device in the stripe array
3858 		 */
3859 		stripe_index = do_div(stripe_nr, map->num_stripes);
3860 		mirror_num = stripe_index + 1;
3861 	}
3862 	BUG_ON(stripe_index >= map->num_stripes);
3863 
3864 	bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3865 	if (!bbio) {
3866 		ret = -ENOMEM;
3867 		goto out;
3868 	}
3869 	atomic_set(&bbio->error, 0);
3870 
3871 	if (rw & REQ_DISCARD) {
3872 		int factor = 0;
3873 		int sub_stripes = 0;
3874 		u64 stripes_per_dev = 0;
3875 		u32 remaining_stripes = 0;
3876 		u32 last_stripe = 0;
3877 
3878 		if (map->type &
3879 		    (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3880 			if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3881 				sub_stripes = 1;
3882 			else
3883 				sub_stripes = map->sub_stripes;
3884 
3885 			factor = map->num_stripes / sub_stripes;
3886 			stripes_per_dev = div_u64_rem(stripe_nr_end -
3887 						      stripe_nr_orig,
3888 						      factor,
3889 						      &remaining_stripes);
3890 			div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3891 			last_stripe *= sub_stripes;
3892 		}
3893 
3894 		for (i = 0; i < num_stripes; i++) {
3895 			bbio->stripes[i].physical =
3896 				map->stripes[stripe_index].physical +
3897 				stripe_offset + stripe_nr * map->stripe_len;
3898 			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3899 
3900 			if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3901 					 BTRFS_BLOCK_GROUP_RAID10)) {
3902 				bbio->stripes[i].length = stripes_per_dev *
3903 							  map->stripe_len;
3904 
3905 				if (i / sub_stripes < remaining_stripes)
3906 					bbio->stripes[i].length +=
3907 						map->stripe_len;
3908 
3909 				/*
3910 				 * Special for the first stripe and
3911 				 * the last stripe:
3912 				 *
3913 				 * |-------|...|-------|
3914 				 *     |----------|
3915 				 *    off     end_off
3916 				 */
3917 				if (i < sub_stripes)
3918 					bbio->stripes[i].length -=
3919 						stripe_offset;
3920 
3921 				if (stripe_index >= last_stripe &&
3922 				    stripe_index <= (last_stripe +
3923 						     sub_stripes - 1))
3924 					bbio->stripes[i].length -=
3925 						stripe_end_offset;
3926 
3927 				if (i == sub_stripes - 1)
3928 					stripe_offset = 0;
3929 			} else
3930 				bbio->stripes[i].length = *length;
3931 
3932 			stripe_index++;
3933 			if (stripe_index == map->num_stripes) {
3934 				/* This could only happen for RAID0/10 */
3935 				stripe_index = 0;
3936 				stripe_nr++;
3937 			}
3938 		}
3939 	} else {
3940 		for (i = 0; i < num_stripes; i++) {
3941 			bbio->stripes[i].physical =
3942 				map->stripes[stripe_index].physical +
3943 				stripe_offset +
3944 				stripe_nr * map->stripe_len;
3945 			bbio->stripes[i].dev =
3946 				map->stripes[stripe_index].dev;
3947 			stripe_index++;
3948 		}
3949 	}
3950 
3951 	if (rw & REQ_WRITE) {
3952 		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3953 				 BTRFS_BLOCK_GROUP_RAID10 |
3954 				 BTRFS_BLOCK_GROUP_DUP)) {
3955 			max_errors = 1;
3956 		}
3957 	}
3958 
3959 	*bbio_ret = bbio;
3960 	bbio->num_stripes = num_stripes;
3961 	bbio->max_errors = max_errors;
3962 	bbio->mirror_num = mirror_num;
3963 out:
3964 	free_extent_map(em);
3965 	return ret;
3966 }
3967 
3968 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3969 		      u64 logical, u64 *length,
3970 		      struct btrfs_bio **bbio_ret, int mirror_num)
3971 {
3972 	return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3973 				 mirror_num);
3974 }
3975 
3976 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3977 		     u64 chunk_start, u64 physical, u64 devid,
3978 		     u64 **logical, int *naddrs, int *stripe_len)
3979 {
3980 	struct extent_map_tree *em_tree = &map_tree->map_tree;
3981 	struct extent_map *em;
3982 	struct map_lookup *map;
3983 	u64 *buf;
3984 	u64 bytenr;
3985 	u64 length;
3986 	u64 stripe_nr;
3987 	int i, j, nr = 0;
3988 
3989 	read_lock(&em_tree->lock);
3990 	em = lookup_extent_mapping(em_tree, chunk_start, 1);
3991 	read_unlock(&em_tree->lock);
3992 
3993 	BUG_ON(!em || em->start != chunk_start);
3994 	map = (struct map_lookup *)em->bdev;
3995 
3996 	length = em->len;
3997 	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3998 		do_div(length, map->num_stripes / map->sub_stripes);
3999 	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4000 		do_div(length, map->num_stripes);
4001 
4002 	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4003 	BUG_ON(!buf); /* -ENOMEM */
4004 
4005 	for (i = 0; i < map->num_stripes; i++) {
4006 		if (devid && map->stripes[i].dev->devid != devid)
4007 			continue;
4008 		if (map->stripes[i].physical > physical ||
4009 		    map->stripes[i].physical + length <= physical)
4010 			continue;
4011 
4012 		stripe_nr = physical - map->stripes[i].physical;
4013 		do_div(stripe_nr, map->stripe_len);
4014 
4015 		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4016 			stripe_nr = stripe_nr * map->num_stripes + i;
4017 			do_div(stripe_nr, map->sub_stripes);
4018 		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4019 			stripe_nr = stripe_nr * map->num_stripes + i;
4020 		}
4021 		bytenr = chunk_start + stripe_nr * map->stripe_len;
4022 		WARN_ON(nr >= map->num_stripes);
4023 		for (j = 0; j < nr; j++) {
4024 			if (buf[j] == bytenr)
4025 				break;
4026 		}
4027 		if (j == nr) {
4028 			WARN_ON(nr >= map->num_stripes);
4029 			buf[nr++] = bytenr;
4030 		}
4031 	}
4032 
4033 	*logical = buf;
4034 	*naddrs = nr;
4035 	*stripe_len = map->stripe_len;
4036 
4037 	free_extent_map(em);
4038 	return 0;
4039 }
4040 
4041 static void *merge_stripe_index_into_bio_private(void *bi_private,
4042 						 unsigned int stripe_index)
4043 {
4044 	/*
4045 	 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4046 	 * at most 1.
4047 	 * The alternative solution (instead of stealing bits from the
4048 	 * pointer) would be to allocate an intermediate structure
4049 	 * that contains the old private pointer plus the stripe_index.
4050 	 */
4051 	BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4052 	BUG_ON(stripe_index > 3);
4053 	return (void *)(((uintptr_t)bi_private) | stripe_index);
4054 }
4055 
4056 static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4057 {
4058 	return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4059 }
4060 
4061 static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4062 {
4063 	return (unsigned int)((uintptr_t)bi_private) & 3;
4064 }
4065 
4066 static void btrfs_end_bio(struct bio *bio, int err)
4067 {
4068 	struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4069 	int is_orig_bio = 0;
4070 
4071 	if (err) {
4072 		atomic_inc(&bbio->error);
4073 		if (err == -EIO || err == -EREMOTEIO) {
4074 			unsigned int stripe_index =
4075 				extract_stripe_index_from_bio_private(
4076 					bio->bi_private);
4077 			struct btrfs_device *dev;
4078 
4079 			BUG_ON(stripe_index >= bbio->num_stripes);
4080 			dev = bbio->stripes[stripe_index].dev;
4081 			if (dev->bdev) {
4082 				if (bio->bi_rw & WRITE)
4083 					btrfs_dev_stat_inc(dev,
4084 						BTRFS_DEV_STAT_WRITE_ERRS);
4085 				else
4086 					btrfs_dev_stat_inc(dev,
4087 						BTRFS_DEV_STAT_READ_ERRS);
4088 				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4089 					btrfs_dev_stat_inc(dev,
4090 						BTRFS_DEV_STAT_FLUSH_ERRS);
4091 				btrfs_dev_stat_print_on_error(dev);
4092 			}
4093 		}
4094 	}
4095 
4096 	if (bio == bbio->orig_bio)
4097 		is_orig_bio = 1;
4098 
4099 	if (atomic_dec_and_test(&bbio->stripes_pending)) {
4100 		if (!is_orig_bio) {
4101 			bio_put(bio);
4102 			bio = bbio->orig_bio;
4103 		}
4104 		bio->bi_private = bbio->private;
4105 		bio->bi_end_io = bbio->end_io;
4106 		bio->bi_bdev = (struct block_device *)
4107 					(unsigned long)bbio->mirror_num;
4108 		/* only send an error to the higher layers if it is
4109 		 * beyond the tolerance of the multi-bio
4110 		 */
4111 		if (atomic_read(&bbio->error) > bbio->max_errors) {
4112 			err = -EIO;
4113 		} else {
4114 			/*
4115 			 * this bio is actually up to date, we didn't
4116 			 * go over the max number of errors
4117 			 */
4118 			set_bit(BIO_UPTODATE, &bio->bi_flags);
4119 			err = 0;
4120 		}
4121 		kfree(bbio);
4122 
4123 		bio_endio(bio, err);
4124 	} else if (!is_orig_bio) {
4125 		bio_put(bio);
4126 	}
4127 }
4128 
4129 struct async_sched {
4130 	struct bio *bio;
4131 	int rw;
4132 	struct btrfs_fs_info *info;
4133 	struct btrfs_work work;
4134 };
4135 
4136 /*
4137  * see run_scheduled_bios for a description of why bios are collected for
4138  * async submit.
4139  *
4140  * This will add one bio to the pending list for a device and make sure
4141  * the work struct is scheduled.
4142  */
4143 static noinline void schedule_bio(struct btrfs_root *root,
4144 				 struct btrfs_device *device,
4145 				 int rw, struct bio *bio)
4146 {
4147 	int should_queue = 1;
4148 	struct btrfs_pending_bios *pending_bios;
4149 
4150 	/* don't bother with additional async steps for reads, right now */
4151 	if (!(rw & REQ_WRITE)) {
4152 		bio_get(bio);
4153 		btrfsic_submit_bio(rw, bio);
4154 		bio_put(bio);
4155 		return;
4156 	}
4157 
4158 	/*
4159 	 * nr_async_bios allows us to reliably return congestion to the
4160 	 * higher layers.  Otherwise, the async bio makes it appear we have
4161 	 * made progress against dirty pages when we've really just put it
4162 	 * on a queue for later
4163 	 */
4164 	atomic_inc(&root->fs_info->nr_async_bios);
4165 	WARN_ON(bio->bi_next);
4166 	bio->bi_next = NULL;
4167 	bio->bi_rw |= rw;
4168 
4169 	spin_lock(&device->io_lock);
4170 	if (bio->bi_rw & REQ_SYNC)
4171 		pending_bios = &device->pending_sync_bios;
4172 	else
4173 		pending_bios = &device->pending_bios;
4174 
4175 	if (pending_bios->tail)
4176 		pending_bios->tail->bi_next = bio;
4177 
4178 	pending_bios->tail = bio;
4179 	if (!pending_bios->head)
4180 		pending_bios->head = bio;
4181 	if (device->running_pending)
4182 		should_queue = 0;
4183 
4184 	spin_unlock(&device->io_lock);
4185 
4186 	if (should_queue)
4187 		btrfs_queue_worker(&root->fs_info->submit_workers,
4188 				   &device->work);
4189 }
4190 
4191 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4192 		  int mirror_num, int async_submit)
4193 {
4194 	struct btrfs_mapping_tree *map_tree;
4195 	struct btrfs_device *dev;
4196 	struct bio *first_bio = bio;
4197 	u64 logical = (u64)bio->bi_sector << 9;
4198 	u64 length = 0;
4199 	u64 map_length;
4200 	int ret;
4201 	int dev_nr = 0;
4202 	int total_devs = 1;
4203 	struct btrfs_bio *bbio = NULL;
4204 
4205 	length = bio->bi_size;
4206 	map_tree = &root->fs_info->mapping_tree;
4207 	map_length = length;
4208 
4209 	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4210 			      mirror_num);
4211 	if (ret) /* -ENOMEM */
4212 		return ret;
4213 
4214 	total_devs = bbio->num_stripes;
4215 	if (map_length < length) {
4216 		printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4217 		       "len %llu\n", (unsigned long long)logical,
4218 		       (unsigned long long)length,
4219 		       (unsigned long long)map_length);
4220 		BUG();
4221 	}
4222 
4223 	bbio->orig_bio = first_bio;
4224 	bbio->private = first_bio->bi_private;
4225 	bbio->end_io = first_bio->bi_end_io;
4226 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4227 
4228 	while (dev_nr < total_devs) {
4229 		if (dev_nr < total_devs - 1) {
4230 			bio = bio_clone(first_bio, GFP_NOFS);
4231 			BUG_ON(!bio); /* -ENOMEM */
4232 		} else {
4233 			bio = first_bio;
4234 		}
4235 		bio->bi_private = bbio;
4236 		bio->bi_private = merge_stripe_index_into_bio_private(
4237 				bio->bi_private, (unsigned int)dev_nr);
4238 		bio->bi_end_io = btrfs_end_bio;
4239 		bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4240 		dev = bbio->stripes[dev_nr].dev;
4241 		if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4242 #ifdef DEBUG
4243 			struct rcu_string *name;
4244 
4245 			rcu_read_lock();
4246 			name = rcu_dereference(dev->name);
4247 			pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4248 				 "(%s id %llu), size=%u\n", rw,
4249 				 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4250 				 name->str, dev->devid, bio->bi_size);
4251 			rcu_read_unlock();
4252 #endif
4253 			bio->bi_bdev = dev->bdev;
4254 			if (async_submit)
4255 				schedule_bio(root, dev, rw, bio);
4256 			else
4257 				btrfsic_submit_bio(rw, bio);
4258 		} else {
4259 			bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4260 			bio->bi_sector = logical >> 9;
4261 			bio_endio(bio, -EIO);
4262 		}
4263 		dev_nr++;
4264 	}
4265 	return 0;
4266 }
4267 
4268 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4269 				       u8 *uuid, u8 *fsid)
4270 {
4271 	struct btrfs_device *device;
4272 	struct btrfs_fs_devices *cur_devices;
4273 
4274 	cur_devices = root->fs_info->fs_devices;
4275 	while (cur_devices) {
4276 		if (!fsid ||
4277 		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4278 			device = __find_device(&cur_devices->devices,
4279 					       devid, uuid);
4280 			if (device)
4281 				return device;
4282 		}
4283 		cur_devices = cur_devices->seed;
4284 	}
4285 	return NULL;
4286 }
4287 
4288 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4289 					    u64 devid, u8 *dev_uuid)
4290 {
4291 	struct btrfs_device *device;
4292 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4293 
4294 	device = kzalloc(sizeof(*device), GFP_NOFS);
4295 	if (!device)
4296 		return NULL;
4297 	list_add(&device->dev_list,
4298 		 &fs_devices->devices);
4299 	device->dev_root = root->fs_info->dev_root;
4300 	device->devid = devid;
4301 	device->work.func = pending_bios_fn;
4302 	device->fs_devices = fs_devices;
4303 	device->missing = 1;
4304 	fs_devices->num_devices++;
4305 	fs_devices->missing_devices++;
4306 	spin_lock_init(&device->io_lock);
4307 	INIT_LIST_HEAD(&device->dev_alloc_list);
4308 	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4309 	return device;
4310 }
4311 
4312 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4313 			  struct extent_buffer *leaf,
4314 			  struct btrfs_chunk *chunk)
4315 {
4316 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4317 	struct map_lookup *map;
4318 	struct extent_map *em;
4319 	u64 logical;
4320 	u64 length;
4321 	u64 devid;
4322 	u8 uuid[BTRFS_UUID_SIZE];
4323 	int num_stripes;
4324 	int ret;
4325 	int i;
4326 
4327 	logical = key->offset;
4328 	length = btrfs_chunk_length(leaf, chunk);
4329 
4330 	read_lock(&map_tree->map_tree.lock);
4331 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4332 	read_unlock(&map_tree->map_tree.lock);
4333 
4334 	/* already mapped? */
4335 	if (em && em->start <= logical && em->start + em->len > logical) {
4336 		free_extent_map(em);
4337 		return 0;
4338 	} else if (em) {
4339 		free_extent_map(em);
4340 	}
4341 
4342 	em = alloc_extent_map();
4343 	if (!em)
4344 		return -ENOMEM;
4345 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4346 	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4347 	if (!map) {
4348 		free_extent_map(em);
4349 		return -ENOMEM;
4350 	}
4351 
4352 	em->bdev = (struct block_device *)map;
4353 	em->start = logical;
4354 	em->len = length;
4355 	em->block_start = 0;
4356 	em->block_len = em->len;
4357 
4358 	map->num_stripes = num_stripes;
4359 	map->io_width = btrfs_chunk_io_width(leaf, chunk);
4360 	map->io_align = btrfs_chunk_io_align(leaf, chunk);
4361 	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4362 	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4363 	map->type = btrfs_chunk_type(leaf, chunk);
4364 	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4365 	for (i = 0; i < num_stripes; i++) {
4366 		map->stripes[i].physical =
4367 			btrfs_stripe_offset_nr(leaf, chunk, i);
4368 		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4369 		read_extent_buffer(leaf, uuid, (unsigned long)
4370 				   btrfs_stripe_dev_uuid_nr(chunk, i),
4371 				   BTRFS_UUID_SIZE);
4372 		map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4373 							NULL);
4374 		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4375 			kfree(map);
4376 			free_extent_map(em);
4377 			return -EIO;
4378 		}
4379 		if (!map->stripes[i].dev) {
4380 			map->stripes[i].dev =
4381 				add_missing_dev(root, devid, uuid);
4382 			if (!map->stripes[i].dev) {
4383 				kfree(map);
4384 				free_extent_map(em);
4385 				return -EIO;
4386 			}
4387 		}
4388 		map->stripes[i].dev->in_fs_metadata = 1;
4389 	}
4390 
4391 	write_lock(&map_tree->map_tree.lock);
4392 	ret = add_extent_mapping(&map_tree->map_tree, em);
4393 	write_unlock(&map_tree->map_tree.lock);
4394 	BUG_ON(ret); /* Tree corruption */
4395 	free_extent_map(em);
4396 
4397 	return 0;
4398 }
4399 
4400 static void fill_device_from_item(struct extent_buffer *leaf,
4401 				 struct btrfs_dev_item *dev_item,
4402 				 struct btrfs_device *device)
4403 {
4404 	unsigned long ptr;
4405 
4406 	device->devid = btrfs_device_id(leaf, dev_item);
4407 	device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4408 	device->total_bytes = device->disk_total_bytes;
4409 	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4410 	device->type = btrfs_device_type(leaf, dev_item);
4411 	device->io_align = btrfs_device_io_align(leaf, dev_item);
4412 	device->io_width = btrfs_device_io_width(leaf, dev_item);
4413 	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4414 
4415 	ptr = (unsigned long)btrfs_device_uuid(dev_item);
4416 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4417 }
4418 
4419 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4420 {
4421 	struct btrfs_fs_devices *fs_devices;
4422 	int ret;
4423 
4424 	BUG_ON(!mutex_is_locked(&uuid_mutex));
4425 
4426 	fs_devices = root->fs_info->fs_devices->seed;
4427 	while (fs_devices) {
4428 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4429 			ret = 0;
4430 			goto out;
4431 		}
4432 		fs_devices = fs_devices->seed;
4433 	}
4434 
4435 	fs_devices = find_fsid(fsid);
4436 	if (!fs_devices) {
4437 		ret = -ENOENT;
4438 		goto out;
4439 	}
4440 
4441 	fs_devices = clone_fs_devices(fs_devices);
4442 	if (IS_ERR(fs_devices)) {
4443 		ret = PTR_ERR(fs_devices);
4444 		goto out;
4445 	}
4446 
4447 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4448 				   root->fs_info->bdev_holder);
4449 	if (ret) {
4450 		free_fs_devices(fs_devices);
4451 		goto out;
4452 	}
4453 
4454 	if (!fs_devices->seeding) {
4455 		__btrfs_close_devices(fs_devices);
4456 		free_fs_devices(fs_devices);
4457 		ret = -EINVAL;
4458 		goto out;
4459 	}
4460 
4461 	fs_devices->seed = root->fs_info->fs_devices->seed;
4462 	root->fs_info->fs_devices->seed = fs_devices;
4463 out:
4464 	return ret;
4465 }
4466 
4467 static int read_one_dev(struct btrfs_root *root,
4468 			struct extent_buffer *leaf,
4469 			struct btrfs_dev_item *dev_item)
4470 {
4471 	struct btrfs_device *device;
4472 	u64 devid;
4473 	int ret;
4474 	u8 fs_uuid[BTRFS_UUID_SIZE];
4475 	u8 dev_uuid[BTRFS_UUID_SIZE];
4476 
4477 	devid = btrfs_device_id(leaf, dev_item);
4478 	read_extent_buffer(leaf, dev_uuid,
4479 			   (unsigned long)btrfs_device_uuid(dev_item),
4480 			   BTRFS_UUID_SIZE);
4481 	read_extent_buffer(leaf, fs_uuid,
4482 			   (unsigned long)btrfs_device_fsid(dev_item),
4483 			   BTRFS_UUID_SIZE);
4484 
4485 	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4486 		ret = open_seed_devices(root, fs_uuid);
4487 		if (ret && !btrfs_test_opt(root, DEGRADED))
4488 			return ret;
4489 	}
4490 
4491 	device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4492 	if (!device || !device->bdev) {
4493 		if (!btrfs_test_opt(root, DEGRADED))
4494 			return -EIO;
4495 
4496 		if (!device) {
4497 			printk(KERN_WARNING "warning devid %llu missing\n",
4498 			       (unsigned long long)devid);
4499 			device = add_missing_dev(root, devid, dev_uuid);
4500 			if (!device)
4501 				return -ENOMEM;
4502 		} else if (!device->missing) {
4503 			/*
4504 			 * this happens when a device that was properly setup
4505 			 * in the device info lists suddenly goes bad.
4506 			 * device->bdev is NULL, and so we have to set
4507 			 * device->missing to one here
4508 			 */
4509 			root->fs_info->fs_devices->missing_devices++;
4510 			device->missing = 1;
4511 		}
4512 	}
4513 
4514 	if (device->fs_devices != root->fs_info->fs_devices) {
4515 		BUG_ON(device->writeable);
4516 		if (device->generation !=
4517 		    btrfs_device_generation(leaf, dev_item))
4518 			return -EINVAL;
4519 	}
4520 
4521 	fill_device_from_item(leaf, dev_item, device);
4522 	device->dev_root = root->fs_info->dev_root;
4523 	device->in_fs_metadata = 1;
4524 	if (device->writeable) {
4525 		device->fs_devices->total_rw_bytes += device->total_bytes;
4526 		spin_lock(&root->fs_info->free_chunk_lock);
4527 		root->fs_info->free_chunk_space += device->total_bytes -
4528 			device->bytes_used;
4529 		spin_unlock(&root->fs_info->free_chunk_lock);
4530 	}
4531 	ret = 0;
4532 	return ret;
4533 }
4534 
4535 int btrfs_read_sys_array(struct btrfs_root *root)
4536 {
4537 	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4538 	struct extent_buffer *sb;
4539 	struct btrfs_disk_key *disk_key;
4540 	struct btrfs_chunk *chunk;
4541 	u8 *ptr;
4542 	unsigned long sb_ptr;
4543 	int ret = 0;
4544 	u32 num_stripes;
4545 	u32 array_size;
4546 	u32 len = 0;
4547 	u32 cur;
4548 	struct btrfs_key key;
4549 
4550 	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4551 					  BTRFS_SUPER_INFO_SIZE);
4552 	if (!sb)
4553 		return -ENOMEM;
4554 	btrfs_set_buffer_uptodate(sb);
4555 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4556 	/*
4557 	 * The sb extent buffer is artifical and just used to read the system array.
4558 	 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4559 	 * pages up-to-date when the page is larger: extent does not cover the
4560 	 * whole page and consequently check_page_uptodate does not find all
4561 	 * the page's extents up-to-date (the hole beyond sb),
4562 	 * write_extent_buffer then triggers a WARN_ON.
4563 	 *
4564 	 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4565 	 * but sb spans only this function. Add an explicit SetPageUptodate call
4566 	 * to silence the warning eg. on PowerPC 64.
4567 	 */
4568 	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4569 		SetPageUptodate(sb->pages[0]);
4570 
4571 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4572 	array_size = btrfs_super_sys_array_size(super_copy);
4573 
4574 	ptr = super_copy->sys_chunk_array;
4575 	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4576 	cur = 0;
4577 
4578 	while (cur < array_size) {
4579 		disk_key = (struct btrfs_disk_key *)ptr;
4580 		btrfs_disk_key_to_cpu(&key, disk_key);
4581 
4582 		len = sizeof(*disk_key); ptr += len;
4583 		sb_ptr += len;
4584 		cur += len;
4585 
4586 		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4587 			chunk = (struct btrfs_chunk *)sb_ptr;
4588 			ret = read_one_chunk(root, &key, sb, chunk);
4589 			if (ret)
4590 				break;
4591 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4592 			len = btrfs_chunk_item_size(num_stripes);
4593 		} else {
4594 			ret = -EIO;
4595 			break;
4596 		}
4597 		ptr += len;
4598 		sb_ptr += len;
4599 		cur += len;
4600 	}
4601 	free_extent_buffer(sb);
4602 	return ret;
4603 }
4604 
4605 struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
4606 						   u64 logical, int mirror_num)
4607 {
4608 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4609 	int ret;
4610 	u64 map_length = 0;
4611 	struct btrfs_bio *bbio = NULL;
4612 	struct btrfs_device *device;
4613 
4614 	BUG_ON(mirror_num == 0);
4615 	ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
4616 			      mirror_num);
4617 	if (ret) {
4618 		BUG_ON(bbio != NULL);
4619 		return NULL;
4620 	}
4621 	BUG_ON(mirror_num != bbio->mirror_num);
4622 	device = bbio->stripes[mirror_num - 1].dev;
4623 	kfree(bbio);
4624 	return device;
4625 }
4626 
4627 int btrfs_read_chunk_tree(struct btrfs_root *root)
4628 {
4629 	struct btrfs_path *path;
4630 	struct extent_buffer *leaf;
4631 	struct btrfs_key key;
4632 	struct btrfs_key found_key;
4633 	int ret;
4634 	int slot;
4635 
4636 	root = root->fs_info->chunk_root;
4637 
4638 	path = btrfs_alloc_path();
4639 	if (!path)
4640 		return -ENOMEM;
4641 
4642 	mutex_lock(&uuid_mutex);
4643 	lock_chunks(root);
4644 
4645 	/* first we search for all of the device items, and then we
4646 	 * read in all of the chunk items.  This way we can create chunk
4647 	 * mappings that reference all of the devices that are afound
4648 	 */
4649 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4650 	key.offset = 0;
4651 	key.type = 0;
4652 again:
4653 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4654 	if (ret < 0)
4655 		goto error;
4656 	while (1) {
4657 		leaf = path->nodes[0];
4658 		slot = path->slots[0];
4659 		if (slot >= btrfs_header_nritems(leaf)) {
4660 			ret = btrfs_next_leaf(root, path);
4661 			if (ret == 0)
4662 				continue;
4663 			if (ret < 0)
4664 				goto error;
4665 			break;
4666 		}
4667 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4668 		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4669 			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4670 				break;
4671 			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4672 				struct btrfs_dev_item *dev_item;
4673 				dev_item = btrfs_item_ptr(leaf, slot,
4674 						  struct btrfs_dev_item);
4675 				ret = read_one_dev(root, leaf, dev_item);
4676 				if (ret)
4677 					goto error;
4678 			}
4679 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4680 			struct btrfs_chunk *chunk;
4681 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4682 			ret = read_one_chunk(root, &found_key, leaf, chunk);
4683 			if (ret)
4684 				goto error;
4685 		}
4686 		path->slots[0]++;
4687 	}
4688 	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4689 		key.objectid = 0;
4690 		btrfs_release_path(path);
4691 		goto again;
4692 	}
4693 	ret = 0;
4694 error:
4695 	unlock_chunks(root);
4696 	mutex_unlock(&uuid_mutex);
4697 
4698 	btrfs_free_path(path);
4699 	return ret;
4700 }
4701 
4702 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4703 {
4704 	int i;
4705 
4706 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4707 		btrfs_dev_stat_reset(dev, i);
4708 }
4709 
4710 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4711 {
4712 	struct btrfs_key key;
4713 	struct btrfs_key found_key;
4714 	struct btrfs_root *dev_root = fs_info->dev_root;
4715 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4716 	struct extent_buffer *eb;
4717 	int slot;
4718 	int ret = 0;
4719 	struct btrfs_device *device;
4720 	struct btrfs_path *path = NULL;
4721 	int i;
4722 
4723 	path = btrfs_alloc_path();
4724 	if (!path) {
4725 		ret = -ENOMEM;
4726 		goto out;
4727 	}
4728 
4729 	mutex_lock(&fs_devices->device_list_mutex);
4730 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
4731 		int item_size;
4732 		struct btrfs_dev_stats_item *ptr;
4733 
4734 		key.objectid = 0;
4735 		key.type = BTRFS_DEV_STATS_KEY;
4736 		key.offset = device->devid;
4737 		ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4738 		if (ret) {
4739 			printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
4740 				      rcu_str_deref(device->name),
4741 				      (unsigned long long)device->devid);
4742 			__btrfs_reset_dev_stats(device);
4743 			device->dev_stats_valid = 1;
4744 			btrfs_release_path(path);
4745 			continue;
4746 		}
4747 		slot = path->slots[0];
4748 		eb = path->nodes[0];
4749 		btrfs_item_key_to_cpu(eb, &found_key, slot);
4750 		item_size = btrfs_item_size_nr(eb, slot);
4751 
4752 		ptr = btrfs_item_ptr(eb, slot,
4753 				     struct btrfs_dev_stats_item);
4754 
4755 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4756 			if (item_size >= (1 + i) * sizeof(__le64))
4757 				btrfs_dev_stat_set(device, i,
4758 					btrfs_dev_stats_value(eb, ptr, i));
4759 			else
4760 				btrfs_dev_stat_reset(device, i);
4761 		}
4762 
4763 		device->dev_stats_valid = 1;
4764 		btrfs_dev_stat_print_on_load(device);
4765 		btrfs_release_path(path);
4766 	}
4767 	mutex_unlock(&fs_devices->device_list_mutex);
4768 
4769 out:
4770 	btrfs_free_path(path);
4771 	return ret < 0 ? ret : 0;
4772 }
4773 
4774 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4775 				struct btrfs_root *dev_root,
4776 				struct btrfs_device *device)
4777 {
4778 	struct btrfs_path *path;
4779 	struct btrfs_key key;
4780 	struct extent_buffer *eb;
4781 	struct btrfs_dev_stats_item *ptr;
4782 	int ret;
4783 	int i;
4784 
4785 	key.objectid = 0;
4786 	key.type = BTRFS_DEV_STATS_KEY;
4787 	key.offset = device->devid;
4788 
4789 	path = btrfs_alloc_path();
4790 	BUG_ON(!path);
4791 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4792 	if (ret < 0) {
4793 		printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4794 			      ret, rcu_str_deref(device->name));
4795 		goto out;
4796 	}
4797 
4798 	if (ret == 0 &&
4799 	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4800 		/* need to delete old one and insert a new one */
4801 		ret = btrfs_del_item(trans, dev_root, path);
4802 		if (ret != 0) {
4803 			printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4804 				      rcu_str_deref(device->name), ret);
4805 			goto out;
4806 		}
4807 		ret = 1;
4808 	}
4809 
4810 	if (ret == 1) {
4811 		/* need to insert a new item */
4812 		btrfs_release_path(path);
4813 		ret = btrfs_insert_empty_item(trans, dev_root, path,
4814 					      &key, sizeof(*ptr));
4815 		if (ret < 0) {
4816 			printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4817 				      rcu_str_deref(device->name), ret);
4818 			goto out;
4819 		}
4820 	}
4821 
4822 	eb = path->nodes[0];
4823 	ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4824 	for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4825 		btrfs_set_dev_stats_value(eb, ptr, i,
4826 					  btrfs_dev_stat_read(device, i));
4827 	btrfs_mark_buffer_dirty(eb);
4828 
4829 out:
4830 	btrfs_free_path(path);
4831 	return ret;
4832 }
4833 
4834 /*
4835  * called from commit_transaction. Writes all changed device stats to disk.
4836  */
4837 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4838 			struct btrfs_fs_info *fs_info)
4839 {
4840 	struct btrfs_root *dev_root = fs_info->dev_root;
4841 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4842 	struct btrfs_device *device;
4843 	int ret = 0;
4844 
4845 	mutex_lock(&fs_devices->device_list_mutex);
4846 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
4847 		if (!device->dev_stats_valid || !device->dev_stats_dirty)
4848 			continue;
4849 
4850 		ret = update_dev_stat_item(trans, dev_root, device);
4851 		if (!ret)
4852 			device->dev_stats_dirty = 0;
4853 	}
4854 	mutex_unlock(&fs_devices->device_list_mutex);
4855 
4856 	return ret;
4857 }
4858 
4859 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4860 {
4861 	btrfs_dev_stat_inc(dev, index);
4862 	btrfs_dev_stat_print_on_error(dev);
4863 }
4864 
4865 void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4866 {
4867 	if (!dev->dev_stats_valid)
4868 		return;
4869 	printk_ratelimited_in_rcu(KERN_ERR
4870 			   "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4871 			   rcu_str_deref(dev->name),
4872 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4873 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4874 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4875 			   btrfs_dev_stat_read(dev,
4876 					       BTRFS_DEV_STAT_CORRUPTION_ERRS),
4877 			   btrfs_dev_stat_read(dev,
4878 					       BTRFS_DEV_STAT_GENERATION_ERRS));
4879 }
4880 
4881 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4882 {
4883 	printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4884 	       rcu_str_deref(dev->name),
4885 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4886 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4887 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4888 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
4889 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
4890 }
4891 
4892 int btrfs_get_dev_stats(struct btrfs_root *root,
4893 			struct btrfs_ioctl_get_dev_stats *stats,
4894 			int reset_after_read)
4895 {
4896 	struct btrfs_device *dev;
4897 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4898 	int i;
4899 
4900 	mutex_lock(&fs_devices->device_list_mutex);
4901 	dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4902 	mutex_unlock(&fs_devices->device_list_mutex);
4903 
4904 	if (!dev) {
4905 		printk(KERN_WARNING
4906 		       "btrfs: get dev_stats failed, device not found\n");
4907 		return -ENODEV;
4908 	} else if (!dev->dev_stats_valid) {
4909 		printk(KERN_WARNING
4910 		       "btrfs: get dev_stats failed, not yet valid\n");
4911 		return -ENODEV;
4912 	} else if (reset_after_read) {
4913 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4914 			if (stats->nr_items > i)
4915 				stats->values[i] =
4916 					btrfs_dev_stat_read_and_reset(dev, i);
4917 			else
4918 				btrfs_dev_stat_reset(dev, i);
4919 		}
4920 	} else {
4921 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4922 			if (stats->nr_items > i)
4923 				stats->values[i] = btrfs_dev_stat_read(dev, i);
4924 	}
4925 	if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4926 		stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
4927 	return 0;
4928 }
4929