Lines Matching defs:path

599 	BTRFS_PATH_AUTO_FREE(path);
609 path = btrfs_alloc_path();
610 if (!path)
616 path->skip_locking = true;
617 path->search_commit_root = true;
618 path->reada = READA_FORWARD;
625 btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) {
716 BTRFS_PATH_AUTO_FREE(path);
725 path = btrfs_alloc_path();
726 if (!path)
747 path->skip_locking = true;
748 path->search_commit_root = true;
749 path->reada = READA_FORWARD;
756 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
760 leaf = path->nodes[0];
769 if (path->slots[0] < nritems) {
770 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
772 ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
778 btrfs_release_path(path);
787 ret = btrfs_next_leaf(extent_root, path);
792 leaf = path->nodes[0];
801 btrfs_release_path(path);
806 path->slots[0]++;
836 path->slots[0]++;
1041 struct btrfs_path *path,
1054 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1060 ret = btrfs_del_item(trans, root, path);
1068 BTRFS_PATH_AUTO_FREE(path);
1116 path = btrfs_alloc_path();
1117 if (!path) {
1126 inode = lookup_free_space_inode(block_group, path);
1140 btrfs_wait_cache_io(trans, block_group, path);
1255 ret = remove_block_group_item(trans, path, block_group);
1552 * to the unused_bgs code path. Therefore, if it's not fully
1697 * The normal path here is an unused block group is passed here,
1698 * then trimming is handled in the transaction commit path.
1700 * before coming down the unused block group path as trimming
1701 * will no longer be done later in the transaction commit path.
2048 const struct btrfs_path *path)
2057 slot = path->slots[0];
2058 leaf = path->nodes[0];
2095 struct btrfs_path *path,
2102 btrfs_for_each_slot(root, key, &found_key, path, ret) {
2105 return read_bg_from_eb(fs_info, &found_key, path);
2536 struct btrfs_path *path;
2559 path = btrfs_alloc_path();
2560 if (!path)
2575 ret = find_first_block_group(info, path, &key);
2581 leaf = path->nodes[0];
2582 slot = path->slots[0];
2588 btrfs_release_path(path);
2595 btrfs_release_path(path);
2632 btrfs_free_path(path);
2689 BTRFS_PATH_AUTO_FREE(path);
2697 path = btrfs_alloc_path();
2698 if (!path)
2704 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
2708 leaf = path->nodes[0];
2709 extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
3120 struct btrfs_path *path,
3154 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3161 leaf = path->nodes[0];
3162 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3169 btrfs_release_path(path);
3190 struct btrfs_path *path)
3218 inode = lookup_free_space_inode(block_group, path);
3221 btrfs_release_path(path);
3232 ret = create_free_space_inode(trans, block_group, path);
3340 btrfs_release_path(path);
3357 BTRFS_PATH_AUTO_FREE(path);
3363 path = btrfs_alloc_path();
3364 if (!path)
3371 cache_save_setup(cache, trans, path);
3396 BTRFS_PATH_AUTO_FREE(path);
3413 if (!path) {
3414 path = btrfs_alloc_path();
3415 if (!path) {
3439 btrfs_wait_cache_io(trans, cache, path);
3458 cache_save_setup(cache, trans, path);
3462 ret = btrfs_write_out_cache(trans, cache, path);
3481 ret = update_block_group_item(trans, path, cache);
3561 BTRFS_PATH_AUTO_FREE(path);
3564 path = btrfs_alloc_path();
3565 if (!path)
3597 btrfs_wait_cache_io(trans, cache, path);
3610 cache_save_setup(cache, trans, path);
3617 ret = btrfs_write_out_cache(trans, cache, path);
3630 ret = update_block_group_item(trans, path, cache);
3647 ret = update_block_group_item(trans, path, cache);
3671 btrfs_wait_cache_io(trans, cache, path);
3791 * @num_bytes except for the compress path.
4056 * in the extent btree right way, we could deadlock because the path for the
4170 * Allocation of system chunks can not happen through this path, as we