blob: 94627c4cc193343aacc82b3aca1a6e9eaaefd8dd [file] [log] [blame]
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/sort.h>
#include <linux/rcupdate.h>
#include <linux/kthread.h>
#include "compat.h"
#include "hash.h"
#include "ctree.h"
#include "disk-io.h"
#include "print-tree.h"
#include "transaction.h"
#include "volumes.h"
#include "locking.h"
#include "free-space-cache.h"
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc,
int mark_free);
static int update_reserved_extents(struct btrfs_block_group_cache *cache,
u64 num_bytes, int reserve);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner_objectid,
u64 owner_offset, int refs_to_drop,
struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 parent, u64 root_objectid,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins);
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 alloc_bytes,
u64 flags, int force);
static int pin_down_bytes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 num_bytes,
int is_data, int reserved,
struct extent_buffer **must_clean);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
{
smp_mb();
return cache->cached == BTRFS_CACHE_FINISHED;
}
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
return (cache->flags & bits) == bits;
}
/*
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct btrfs_block_group_cache *cache;
spin_lock(&info->block_group_cache_lock);
p = &info->block_group_cache_tree.rb_node;
while (*p) {
parent = *p;
cache = rb_entry(parent, struct btrfs_block_group_cache,
cache_node);
if (block_group->key.objectid < cache->key.objectid) {
p = &(*p)->rb_left;
} else if (block_group->key.objectid > cache->key.objectid) {
p = &(*p)->rb_right;
} else {
spin_unlock(&info->block_group_cache_lock);
return -EEXIST;
}
}
rb_link_node(&block_group->cache_node, parent, p);
rb_insert_color(&block_group->cache_node,
&info->block_group_cache_tree);
spin_unlock(&info->block_group_cache_lock);
return 0;
}
/*
* This will return the block group at or after bytenr if contains is 0, else
* it will return the block group that contains the bytenr
*/
static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
int contains)
{
struct btrfs_block_group_cache *cache, *ret = NULL;
struct rb_node *n;
u64 end, start;
spin_lock(&info->block_group_cache_lock);
n = info->block_group_cache_tree.rb_node;
while (n) {
cache = rb_entry(n, struct btrfs_block_group_cache,
cache_node);
end = cache->key.objectid + cache->key.offset - 1;
start = cache->key.objectid;
if (bytenr < start) {
if (!contains && (!ret || start < ret->key.objectid))
ret = cache;
n = n->rb_left;
} else if (bytenr > start) {
if (contains && bytenr <= end) {
ret = cache;
break;
}
n = n->rb_right;
} else {
ret = cache;
break;
}
}
if (ret)
atomic_inc(&ret->count);
spin_unlock(&info->block_group_cache_lock);
return ret;
}
static int add_excluded_extent(struct btrfs_root *root,
u64 start, u64 num_bytes)
{
u64 end = start + num_bytes - 1;
set_extent_bits(&root->fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE, GFP_NOFS);
set_extent_bits(&root->fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE, GFP_NOFS);
return 0;
}
static void free_excluded_extents(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
u64 start, end;
start = cache->key.objectid;
end = start + cache->key.offset - 1;
clear_extent_bits(&root->fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE, GFP_NOFS);
clear_extent_bits(&root->fs_info->freed_extents[1],
start, end, EXTENT_UPTODATE, GFP_NOFS);
}
static int exclude_super_stripes(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
u64 bytenr;
u64 *logical;
int stripe_len;
int i, nr, ret;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
cache->key.objectid, bytenr,
0, &logical, &nr, &stripe_len);
BUG_ON(ret);
while (nr--) {
cache->bytes_super += stripe_len;
ret = add_excluded_extent(root, logical[nr],
stripe_len);
BUG_ON(ret);
}
kfree(logical);
}
return 0;
}
static struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *ctl;
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_STARTED) {
spin_unlock(&cache->lock);
return NULL;
}
ctl = cache->caching_ctl;
atomic_inc(&ctl->count);
spin_unlock(&cache->lock);
return ctl;
}
static void put_caching_control(struct btrfs_caching_control *ctl)
{
if (atomic_dec_and_test(&ctl->count))
kfree(ctl);
}
/*
* this is only called by cache_block_group, since we could have freed extents
* we need to check the pinned_extents for any extents that can't be used yet
* since their free space will be released as soon as the transaction commits.
*/
static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end)
{
u64 extent_start, extent_end, size, total_added = 0;
int ret;
while (start < end) {
ret = find_first_extent_bit(info->pinned_extents, start,
&extent_start, &extent_end,
EXTENT_DIRTY | EXTENT_UPTODATE);
if (ret)
break;
if (extent_start == start) {
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
total_added += size;
ret = btrfs_add_free_space(block_group, start,
size);
BUG_ON(ret);
start = extent_end + 1;
} else {
break;
}
}
if (start < end) {
size = end - start;
total_added += size;
ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret);
}
return total_added;
}
static int caching_kthread(void *data)
{
struct btrfs_block_group_cache *block_group = data;
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
u64 total_found = 0;
u64 last = 0;
u32 nritems;
int ret = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
exclude_super_stripes(extent_root, block_group);
spin_lock(&block_group->space_info->lock);
block_group->space_info->bytes_super += block_group->bytes_super;
spin_unlock(&block_group->space_info->lock);
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
/*
* We don't want to deadlock with somebody trying to allocate a new
* extent for the extent root while also trying to search the extent
* root to add free space. So we skip locking and search the commit
* root, since its read-only
*/
path->skip_locking = 1;
path->search_commit_root = 1;
path->reada = 2;
key.objectid = last;
key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
again:
mutex_lock(&caching_ctl->mutex);
/* need to make sure the commit_root doesn't disappear */
down_read(&fs_info->extent_commit_sem);
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto err;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
while (1) {
smp_mb();
if (fs_info->closing > 1) {
last = (u64)-1;
break;
}
if (path->slots[0] < nritems) {
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
} else {
ret = find_next_key(path, 0, &key);
if (ret)
break;
caching_ctl->progress = last;
btrfs_release_path(extent_root, path);
up_read(&fs_info->extent_commit_sem);
mutex_unlock(&caching_ctl->mutex);
if (btrfs_transaction_in_commit(fs_info))
schedule_timeout(1);
else
cond_resched();
goto again;
}
if (key.objectid < block_group->key.objectid) {
path->slots[0]++;
continue;
}
if (key.objectid >= block_group->key.objectid +
block_group->key.offset)
break;
if (key.type == BTRFS_EXTENT_ITEM_KEY) {
total_found += add_new_free_space(block_group,
fs_info, last,
key.objectid);
last = key.objectid + key.offset;
if (total_found > (1024 * 1024 * 2)) {
total_found = 0;
wake_up(&caching_ctl->wait);
}
}
path->slots[0]++;
}
ret = 0;
total_found += add_new_free_space(block_group, fs_info, last,
block_group->key.objectid +
block_group->key.offset);
caching_ctl->progress = (u64)-1;
spin_lock(&block_group->lock);
block_group->caching_ctl = NULL;
block_group->cached = BTRFS_CACHE_FINISHED;
spin_unlock(&block_group->lock);
err:
btrfs_free_path(path);
up_read(&fs_info->extent_commit_sem);
free_excluded_extents(extent_root, block_group);
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl);
atomic_dec(&block_group->space_info->caching_threads);
return 0;
}
static int cache_block_group(struct btrfs_block_group_cache *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl;
struct task_struct *tsk;
int ret = 0;
smp_mb();
if (cache->cached != BTRFS_CACHE_NO)
return 0;
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
BUG_ON(!caching_ctl);
INIT_LIST_HEAD(&caching_ctl->list);
mutex_init(&caching_ctl->mutex);
init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache;
caching_ctl->progress = cache->key.objectid;
/* one for caching kthread, one for caching block group list */
atomic_set(&caching_ctl->count, 2);
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
spin_unlock(&cache->lock);
kfree(caching_ctl);
return 0;
}
cache->caching_ctl = caching_ctl;
cache->cached = BTRFS_CACHE_STARTED;
spin_unlock(&cache->lock);
down_write(&fs_info->extent_commit_sem);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->extent_commit_sem);
atomic_inc(&cache->space_info->caching_threads);
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
cache->key.objectid);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
printk(KERN_ERR "error running thread %d\n", ret);
BUG();
}
return ret;
}
/*
* return the block group that starts at or after bytenr
*/
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
{
struct btrfs_block_group_cache *cache;
cache = block_group_cache_tree_search(info, bytenr, 0);
return cache;
}
/*
* return the block group that contains the given bytenr
*/
struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr)
{
struct btrfs_block_group_cache *cache;
cache = block_group_cache_tree_search(info, bytenr, 1);
return cache;
}
void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
if (atomic_dec_and_test(&cache->count))
kfree(cache);
}
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags == flags) {
rcu_read_unlock();
return found;
}
}
rcu_read_unlock();
return NULL;
}
/*
* after adding space to the filesystem, we need to clear the full flags
* on all the space infos.
*/
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;
struct btrfs_space_info *found;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list)
found->full = 0;
rcu_read_unlock();
}
static u64 div_factor(u64 num, int factor)
{
if (factor == 10)
return num;
num *= factor;
do_div(num, 10);
return num;
}
u64 btrfs_find_block_group(struct btrfs_root *root,
u64 search_start, u64 search_hint, int owner)
{
struct btrfs_block_group_cache *cache;
u64 used;
u64 last = max(search_hint, search_start);
u64 group_start = 0;
int full_search = 0;
int factor = 9;
int wrapped = 0;
again:
while (1) {
cache = btrfs_lookup_first_block_group(root->fs_info, last);
if (!cache)
break;
spin_lock(&cache->lock);
last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item);
if ((full_search || !cache->ro) &&
block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
if (used + cache->pinned + cache->reserved <
div_factor(cache->key.offset, factor)) {
group_start = cache->key.objectid;
spin_unlock(&cache->lock);
btrfs_put_block_group(cache);
goto found;
}
}
spin_unlock(&cache->lock);
btrfs_put_block_group(cache);
cond_resched();
}
if (!wrapped) {
last = search_start;
wrapped = 1;
goto again;
}
if (!full_search && factor < 10) {
last = search_start;
full_search = 1;
factor = 10;
goto again;
}
found:
return group_start;
}
/* simple helper to search for an existing extent at a given offset */
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
{
int ret;
struct btrfs_key key;
struct btrfs_path *path;
path = btrfs_alloc_path();
BUG_ON(!path);
key.objectid = start;
key.offset = len;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
0, 0);
btrfs_free_path(path);
return ret;
}
/*
* Back reference rules. Back refs have three main goals:
*
* 1) differentiate between all holders of references to an extent so that
* when a reference is dropped we can make sure it was a valid reference
* before freeing the extent.
*
* 2) Provide enough information to quickly find the holders of an extent
* if we notice a given block is corrupted or bad.
*
* 3) Make it easy to migrate blocks for FS shrinking or storage pool
* maintenance. This is actually the same as #2, but with a slightly
* different use case.
*
* There are two kinds of back refs. The implicit back refs is optimized
* for pointers in non-shared tree blocks. For a given pointer in a block,
* back refs of this kind provide information about the block's owner tree
* and the pointer's key. These information allow us to find the block by
* b-tree searching. The full back refs is for pointers in tree blocks not
* referenced by their owner trees. The location of tree block is recorded
* in the back refs. Actually the full back refs is generic, and can be
* used in all cases the implicit back refs is used. The major shortcoming
* of the full back refs is its overhead. Every time a tree block gets
* COWed, we have to update back refs entry for all pointers in it.
*
* For a newly allocated tree block, we use implicit back refs for
* pointers in it. This means most tree related operations only involve
* implicit back refs. For a tree block created in old transaction, the
* only way to drop a reference to it is COW it. So we can detect the
* event that tree block loses its owner tree's reference and do the
* back refs conversion.
*
* When a tree block is COW'd through a tree, there are four cases:
*
* The reference count of the block is one and the tree is the block's
* owner tree. Nothing to do in this case.
*
* The reference count of the block is one and the tree is not the
* block's owner tree. In this case, full back refs is used for pointers
* in the block. Remove these full back refs, add implicit back refs for
* every pointers in the new block.
*
* The reference count of the block is greater than one and the tree is
* the block's owner tree. In this case, implicit back refs is used for
* pointers in the block. Add full back refs for every pointers in the
* block, increase lower level extents' reference counts. The original
* implicit back refs are entailed to the new block.
*
* The reference count of the block is greater than one and the tree is
* not the block's owner tree. Add implicit back refs for every pointer in
* the new block, increase lower level extents' reference count.
*
* Back Reference Key composing:
*
* The key objectid corresponds to the first byte in the extent,
* The key type is used to differentiate between types of back refs.
* There are different meanings of the key offset for different types
* of back refs.
*
* File extents can be referenced by:
*
* - multiple snapshots, subvolumes, or different generations in one subvol
* - different files inside a single subvolume
* - different offsets inside a file (bookend extents in file.c)
*
* The extent ref structure for the implicit back refs has fields for:
*
* - Objectid of the subvolume root
* - objectid of the file holding the reference
* - original offset in the file
* - how many bookend extents
*
* The key offset for the implicit back refs is hash of the first
* three fields.
*
* The extent ref structure for the full back refs has field for:
*
* - number of pointers in the tree leaf
*
* The key offset for the implicit back refs is the first byte of
* the tree leaf
*
* When a file extent is allocated, The implicit back refs is used.
* the fields are filled in:
*
* (root_key.objectid, inode objectid, offset in file, 1)
*
* When a file extent is removed file truncation, we find the
* corresponding implicit back refs and check the following fields:
*
* (btrfs_header_owner(leaf), inode objectid, offset in file)
*
* Btree extents can be referenced by:
*
* - Different subvolumes
*
* Both the implicit back refs and the full back refs for tree blocks
* only consist of key. The key offset for the implicit back refs is
* objectid of block's owner tree. The key offset for the full back refs
* is the first byte of parent block.
*
* When implicit back refs is used, information about the lowest key and
* level of the tree block are required. These information are stored in
* tree block info structure.
*/
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 owner, u32 extra_size)
{
struct btrfs_extent_item *item;
struct btrfs_extent_item_v0 *ei0;
struct btrfs_extent_ref_v0 *ref0;
struct btrfs_tree_block_info *bi;
struct extent_buffer *leaf;
struct btrfs_key key;
struct btrfs_key found_key;
u32 new_size = sizeof(*item);
u64 refs;
int ret;
leaf = path->nodes[0];
BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
ei0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_item_v0);
refs = btrfs_extent_refs_v0(leaf, ei0);
if (owner == (u64)-1) {
while (1) {
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
return ret;
BUG_ON(ret > 0);
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &found_key,
path->slots[0]);
BUG_ON(key.objectid != found_key.objectid);
if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
path->slots[0]++;
continue;
}
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
owner = btrfs_ref_objectid_v0(leaf, ref0);
break;
}
}
btrfs_release_path(root, path);
if (owner < BTRFS_FIRST_FREE_OBJECTID)
new_size += sizeof(*bi);
new_size -= sizeof(*ei0);
ret = btrfs_search_slot(trans, root, &key, path,
new_size + extra_size, 1);
if (ret < 0)
return ret;
BUG_ON(ret);
ret = btrfs_extend_item(trans, root, path, new_size);
BUG_ON(ret);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
btrfs_set_extent_refs(leaf, item, refs);
/* FIXME: get real generation */
btrfs_set_extent_generation(leaf, item, 0);
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
btrfs_set_extent_flags(leaf, item,
BTRFS_EXTENT_FLAG_TREE_BLOCK |
BTRFS_BLOCK_FLAG_FULL_BACKREF);
bi = (struct btrfs_tree_block_info *)(item + 1);
/* FIXME: get first key of the block */
memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
btrfs_set_tree_block_level(leaf, bi, (int)owner);
} else {
btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
}
btrfs_mark_buffer_dirty(leaf);
return 0;
}
#endif
static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
u32 high_crc = ~(u32)0;
u32 low_crc = ~(u32)0;
__le64 lenum;
lenum = cpu_to_le64(root_objectid);
high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(owner);
low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(offset);
low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
return ((u64)high_crc << 31) ^ (u64)low_crc;
}
static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref)
{
return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
btrfs_extent_data_ref_objectid(leaf, ref),
btrfs_extent_data_ref_offset(leaf, ref));
}
static int match_extent_data_ref(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *ref,
u64 root_objectid, u64 owner, u64 offset)
{
if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
return 0;
return 1;
}
static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid,
u64 owner, u64 offset)
{
struct btrfs_key key;
struct btrfs_extent_data_ref *ref;
struct extent_buffer *leaf;
u32 nritems;
int ret;
int recow;
int err = -ENOENT;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_DATA_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_EXTENT_DATA_REF_KEY;
key.offset = hash_extent_data_ref(root_objectid,
owner, offset);
}
again:
recow = 0;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto fail;
}
if (parent) {
if (!ret)
return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
key.type = BTRFS_EXTENT_REF_V0_KEY;
btrfs_release_path(root, path);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0) {
err = ret;
goto fail;
}
if (!ret)
return 0;
#endif
goto fail;
}
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
while (1) {
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
err = ret;
if (ret)
goto fail;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
recow = 1;
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr ||
key.type != BTRFS_EXTENT_DATA_REF_KEY)
goto fail;
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset)) {
if (recow) {
btrfs_release_path(root, path);
goto again;
}
err = 0;
break;
}
path->slots[0]++;
}
fail:
return err;
}
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid, u64 owner,
u64 offset, int refs_to_add)
{
struct btrfs_key key;
struct extent_buffer *leaf;
u32 size;
u32 num_refs;
int ret;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_DATA_REF_KEY;
key.offset = parent;
size = sizeof(struct btrfs_shared_data_ref);
} else {
key.type = BTRFS_EXTENT_DATA_REF_KEY;
key.offset = hash_extent_data_ref(root_objectid,
owner, offset);
size = sizeof(struct btrfs_extent_data_ref);
}
ret = btrfs_insert_empty_item(trans, root, path, &key, size);
if (ret && ret != -EEXIST)
goto fail;
leaf = path->nodes[0];
if (parent) {
struct btrfs_shared_data_ref *ref;
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
if (ret == 0) {
btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
} else {
num_refs = btrfs_shared_data_ref_count(leaf, ref);
num_refs += refs_to_add;
btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
}
} else {
struct btrfs_extent_data_ref *ref;
while (ret == -EEXIST) {
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (match_extent_data_ref(leaf, ref, root_objectid,
owner, offset))
break;
btrfs_release_path(root, path);
key.offset++;
ret = btrfs_insert_empty_item(trans, root, path, &key,
size);
if (ret && ret != -EEXIST)
goto fail;
leaf = path->nodes[0];
}
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (ret == 0) {
btrfs_set_extent_data_ref_root(leaf, ref,
root_objectid);
btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
btrfs_set_extent_data_ref_offset(leaf, ref, offset);
btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
} else {
num_refs = btrfs_extent_data_ref_count(leaf, ref);
num_refs += refs_to_add;
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
btrfs_mark_buffer_dirty(leaf);
ret = 0;
fail:
btrfs_release_path(root, path);
return ret;
}
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
int refs_to_drop)
{
struct btrfs_key key;
struct btrfs_extent_data_ref *ref1 = NULL;
struct btrfs_shared_data_ref *ref2 = NULL;
struct extent_buffer *leaf;
u32 num_refs = 0;
int ret = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
ref1 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
} else {
BUG();
}
BUG_ON(num_refs < refs_to_drop);
num_refs -= refs_to_drop;
if (num_refs == 0) {
ret = btrfs_del_item(trans, root, path);
} else {
if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
else {
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
btrfs_set_ref_count_v0(leaf, ref0, num_refs);
}
#endif
btrfs_mark_buffer_dirty(leaf);
}
return ret;
}
static noinline u32 extent_data_ref_count(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref)
{
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_data_ref *ref1;
struct btrfs_shared_data_ref *ref2;
u32 num_refs = 0;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (iref) {
if (btrfs_extent_inline_ref_type(leaf, iref) ==
BTRFS_EXTENT_DATA_REF_KEY) {
ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else {
ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
}
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
ref1 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
num_refs = btrfs_extent_data_ref_count(leaf, ref1);
} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_ref_v0);
num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
} else {
WARN_ON(1);
}
return num_refs;
}
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
{
struct btrfs_key key;
int ret;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
key.offset = root_objectid;
}
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (ret == -ENOENT && parent) {
btrfs_release_path(root, path);
key.type = BTRFS_EXTENT_REF_V0_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
}
#endif
return ret;
}
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
{
struct btrfs_key key;
int ret;
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
key.offset = root_objectid;
}
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
btrfs_release_path(root, path);
return ret;
}
static inline int extent_ref_type(u64 parent, u64 owner)
{
int type;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
if (parent > 0)
type = BTRFS_SHARED_BLOCK_REF_KEY;
else
type = BTRFS_TREE_BLOCK_REF_KEY;
} else {
if (parent > 0)
type = BTRFS_SHARED_DATA_REF_KEY;
else
type = BTRFS_EXTENT_DATA_REF_KEY;
}
return type;
}
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key)
{
for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path->nodes[level])
break;
if (path->slots[level] + 1 >=
btrfs_header_nritems(path->nodes[level]))
continue;
if (level == 0)
btrfs_item_key_to_cpu(path->nodes[level], key,
path->slots[level] + 1);
else
btrfs_node_key_to_cpu(path->nodes[level], key,
path->slots[level] + 1);
return 0;
}
return 1;
}
/*
* look for inline back ref. if back ref is found, *ref_ret is set
* to the address of inline back ref, and 0 is returned.
*
* if back ref isn't found, *ref_ret is set to the address where it
* should be inserted, and -ENOENT is returned.
*
* if insert is true and there are too many inline back refs, the path
* points to the extent item, and -EAGAIN is returned.
*
* NOTE: inline back refs are ordered in the same way that back ref
* items in the tree are ordered.
*/
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int insert)
{
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
u64 flags;
u64 item_size;
unsigned long ptr;
unsigned long end;
int extra_size;
int type;
int want;
int ret;
int err = 0;
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
want = extent_ref_type(parent, owner);
if (insert) {
extra_size = btrfs_extent_inline_ref_size(want);
path->keep_locks = 1;
} else
extra_size = -1;
ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
if (ret < 0) {
err = ret;
goto out;
}
BUG_ON(ret);
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
if (!insert) {
err = -ENOENT;
goto out;
}
ret = convert_extent_item_v0(trans, root, path, owner,
extra_size);
if (ret < 0) {
err = ret;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
}
#endif
BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
ptr = (unsigned long)(ei + 1);
end = (unsigned long)ei + item_size;
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
ptr += sizeof(struct btrfs_tree_block_info);
BUG_ON(ptr > end);
} else {
BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
}
err = -ENOENT;
while (1) {
if (ptr >= end) {
WARN_ON(ptr > end);
break;
}
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_extent_inline_ref_type(leaf, iref);
if (want < type)
break;
if (want > type) {
ptr += btrfs_extent_inline_ref_size(type);
continue;
}
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
struct btrfs_extent_data_ref *dref;
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (match_extent_data_ref(leaf, dref, root_objectid,
owner, offset)) {
err = 0;
break;
}
if (hash_extent_data_ref_item(leaf, dref) <
hash_extent_data_ref(root_objectid, owner, offset))
break;
} else {
u64 ref_offset;
ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
if (parent > 0) {
if (parent == ref_offset) {
err = 0;
break;
}
if (ref_offset < parent)
break;
} else {
if (root_objectid == ref_offset) {
err = 0;
break;
}
if (ref_offset < root_objectid)
break;
}
}
ptr += btrfs_extent_inline_ref_size(type);
}
if (err == -ENOENT && insert) {
if (item_size + extra_size >=
BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
err = -EAGAIN;
goto out;
}
/*
* To add new inline back ref, we have to make sure
* there is no corresponding back ref item.
* For simplicity, we just do not add new inline back
* ref if there is any kind of item for this block
*/
if (find_next_key(path, 0, &key) == 0 &&
key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
err = -EAGAIN;
goto out;
}
}
*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
if (insert) {
path->keep_locks = 0;
btrfs_unlock_up_safe(path, 1);
}
return err;
}
/*
* helper to add new inline back ref
*/
static noinline_for_stack
int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
unsigned long ptr;
unsigned long end;
unsigned long item_offset;
u64 refs;
int size;
int type;
int ret;
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
item_offset = (unsigned long)iref - (unsigned long)ei;
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
ret = btrfs_extend_item(trans, root, path, size);
BUG_ON(ret);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
refs += refs_to_add;
btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
ptr = (unsigned long)ei + item_offset;
end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
if (ptr < end - size)
memmove_extent_buffer(leaf, ptr + size, ptr,
end - size - ptr);
iref = (struct btrfs_extent_inline_ref *)ptr;
btrfs_set_extent_inline_ref_type(leaf, iref, type);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
struct btrfs_extent_data_ref *dref;
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
btrfs_set_extent_data_ref_offset(leaf, dref, offset);
btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
struct btrfs_shared_data_ref *sref;
sref = (struct btrfs_shared_data_ref *)(iref + 1);
btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
} else {
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
btrfs_mark_buffer_dirty(leaf);
return 0;
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
int ret;
ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
bytenr, num_bytes, parent,
root_objectid, owner, offset, 0);
if (ret != -ENOENT)
return ret;
btrfs_release_path(root, path);
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
root_objectid);
} else {
ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
root_objectid, owner, offset);
}
return ret;
}
/*
* helper to update/remove inline back ref
*/
static noinline_for_stack
int update_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
unsigned long ptr;
unsigned long end;
u32 item_size;
int size;
int type;
int ret;
u64 refs;
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
refs += refs_to_mod;
btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
type = btrfs_extent_inline_ref_type(leaf, iref);
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
refs = btrfs_extent_data_ref_count(leaf, dref);
} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
sref = (struct btrfs_shared_data_ref *)(iref + 1);
refs = btrfs_shared_data_ref_count(leaf, sref);
} else {
refs = 1;
BUG_ON(refs_to_mod != -1);
}
BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
refs += refs_to_mod;
if (refs > 0) {
if (type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, dref, refs);
else
btrfs_set_shared_data_ref_count(leaf, sref, refs);
} else {
size = btrfs_extent_inline_ref_size(type);
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ptr = (unsigned long)iref;
end = (unsigned long)ei + item_size;
if (ptr + size < end)
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
ret = btrfs_truncate_item(trans, root, path, item_size, 1);
BUG_ON(ret);
}
btrfs_mark_buffer_dirty(leaf);
return 0;
}
static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner,
u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_extent_inline_ref *iref;
int ret;
ret = lookup_inline_extent_backref(trans, root, path, &iref,
bytenr, num_bytes, parent,
root_objectid, owner, offset, 1);
if (ret == 0) {
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
ret = update_inline_extent_backref(trans, root, path, iref,
refs_to_add, extent_op);
} else if (ret == -ENOENT) {
ret = setup_inline_extent_backref(trans, root, path, iref,
parent, root_objectid,
owner, offset, refs_to_add,
extent_op);
}
return ret;
}
static int insert_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add)
{
int ret;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
BUG_ON(refs_to_add != 1);
ret = insert_tree_block_ref(trans, root, path, bytenr,
parent, root_objectid);
} else {
ret = insert_extent_data_ref(trans, root, path, bytenr,
parent, root_objectid,
owner, offset, refs_to_add);
}
return ret;
}
static int remove_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_drop, int is_data)
{
int ret;
BUG_ON(!is_data && refs_to_drop != 1);
if (iref) {
ret = update_inline_extent_backref(trans, root, path, iref,
-refs_to_drop, NULL);
} else if (is_data) {
ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
} else {
ret = btrfs_del_item(trans, root, path);
}
return ret;
}
static void btrfs_issue_discard(struct block_device *bdev,
u64 start, u64 len)
{
blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
DISCARD_FL_BARRIER);
}
static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
u64 num_bytes)
{
int ret;
u64 map_length = num_bytes;
struct btrfs_multi_bio *multi = NULL;
if (!btrfs_test_opt(root, DISCARD))
return 0;
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
bytenr, &map_length, &multi, 0);
if (!ret) {
struct btrfs_bio_stripe *stripe = multi->stripes;
int i;
if (map_length > num_bytes)
map_length = num_bytes;
for (i = 0; i < multi->num_stripes; i++, stripe++) {
btrfs_issue_discard(stripe->dev->bdev,
stripe->physical,
map_length);
}
kfree(multi);
}
return ret;
}
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
int ret;
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
root_objectid == BTRFS_TREE_LOG_OBJECTID);
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
parent, root_objectid, (int)owner,
BTRFS_ADD_DELAYED_REF, NULL);
} else {
ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
parent, root_objectid, owner, offset,
BTRFS_ADD_DELAYED_REF, NULL);
}
return ret;
}
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_extent_item *item;
u64 refs;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = 1;
path->leave_spinning = 1;
/* this will setup the path even if it fails to insert the back ref */
ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
path, bytenr, num_bytes, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
if (ret == 0)
goto out;
if (ret != -EAGAIN) {
err = ret;
goto out;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, item);
btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, item);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root->fs_info->extent_root, path);
path->reada = 1;
path->leave_spinning = 1;
/* now insert the actual backref */
ret = insert_extent_backref(trans, root->fs_info->extent_root,
path, bytenr, parent, root_objectid,
owner, offset, refs_to_add);
BUG_ON(ret);
out:
btrfs_free_path(path);
return err;
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
int ret = 0;
struct btrfs_delayed_data_ref *ref;
struct btrfs_key ins;
u64 parent = 0;
u64 ref_root = 0;
u64 flags = 0;
ins.objectid = node->bytenr;
ins.offset = node->num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
else
ref_root = ref->root;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
if (extent_op) {
BUG_ON(extent_op->update_key);
flags |= extent_op->flags_to_set;
}
ret = alloc_reserved_file_extent(trans, root,
parent, ref_root, flags,
ref->objectid, ref->offset,
&ins, node->ref_mod);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
node->num_bytes, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
ret = __btrfs_free_extent(trans, root, node->bytenr,
node->num_bytes, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
} else {
BUG();
}
return ret;
}
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei)
{
u64 flags = btrfs_extent_flags(leaf, ei);
if (extent_op->update_flags) {
flags |= extent_op->flags_to_set;
btrfs_set_extent_flags(leaf, ei, flags);
}
if (extent_op->update_key) {
struct btrfs_tree_block_info *bi;
BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
bi = (struct btrfs_tree_block_info *)(ei + 1);
btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
}
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
struct extent_buffer *leaf;
u32 item_size;
int ret;
int err = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = node->bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = node->num_bytes;
path->reada = 1;
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
path, 0, 1);
if (ret < 0) {
err = ret;
goto out;
}
if (ret > 0) {
err = -EIO;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
path, (u64)-1, 0);
if (ret < 0) {
err = ret;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
}
#endif
BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return err;
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
int ret = 0;
struct btrfs_delayed_tree_ref *ref;
struct btrfs_key ins;
u64 parent = 0;
u64 ref_root = 0;
ins.objectid = node->bytenr;
ins.offset = node->num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_tree_ref(node);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
else
ref_root = ref->root;
BUG_ON(node->ref_mod != 1);
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags ||
!extent_op->update_key);
ret = alloc_reserved_tree_block(trans, root,
parent, ref_root,
extent_op->flags_to_set,
&extent_op->key,
ref->level, &ins);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
node->num_bytes, parent, ref_root,
ref->level, 0, 1, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
ret = __btrfs_free_extent(trans, root, node->bytenr,
node->num_bytes, parent, ref_root,
ref->level, 0, 1, extent_op);
} else {
BUG();
}
return ret;
}
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
{
int ret;
if (btrfs_delayed_ref_is_head(node)) {
struct btrfs_delayed_ref_head *head;
/*
* we've hit the end of the chain and we were supposed
* to insert this extent into the tree. But, it got
* deleted before we ever needed to insert it, so all
* we have to do is clean up the accounting
*/
BUG_ON(extent_op);
head = btrfs_delayed_node_to_head(node);
if (insert_reserved) {
int mark_free = 0;
struct extent_buffer *must_clean = NULL;
ret = pin_down_bytes(trans, root, NULL,
node->bytenr, node->num_bytes,
head->is_data, 1, &must_clean);
if (ret > 0)
mark_free = 1;
if (must_clean) {
clean_tree_block(NULL, root, must_clean);
btrfs_tree_unlock(must_clean);
free_extent_buffer(must_clean);
}
if (head->is_data) {
ret = btrfs_del_csums(trans, root,
node->bytenr,
node->num_bytes);
BUG_ON(ret);
}
if (mark_free) {
ret = btrfs_free_reserved_extent(root,
node->bytenr,
node->num_bytes);
BUG_ON(ret);
}
}
mutex_unlock(&head->mutex);
return 0;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
ret = run_delayed_tree_ref(trans, root, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
ret = run_delayed_data_ref(trans, root, node, extent_op,
insert_reserved);
else
BUG();
return ret;
}
static noinline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct rb_node *node;
struct btrfs_delayed_ref_node *ref;
int action = BTRFS_ADD_DELAYED_REF;
again:
/*
* select delayed ref of type BTRFS_ADD_DELAYED_REF first.
* this prevents ref count from going down to zero when
* there still are pending delayed ref.
*/
node = rb_prev(&head->node.rb_node);
while (1) {
if (!node)
break;
ref = rb_entry(node, struct btrfs_delayed_ref_node,
rb_node);
if (ref->bytenr != head->node.bytenr)
break;
if (ref->action == action)
return ref;
node = rb_prev(node);
}
if (action == BTRFS_ADD_DELAYED_REF) {
action = BTRFS_DROP_DELAYED_REF;
goto again;
}
return NULL;
}
static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct list_head *cluster)
{
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
int ret;
int count = 0;
int must_insert_reserved = 0;
delayed_refs = &trans->transaction->delayed_refs;
while (1) {
if (!locked_ref) {
/* pick a new head ref from the cluster list */
if (list_empty(cluster))
break;
locked_ref = list_entry(cluster->next,
struct btrfs_delayed_ref_head, cluster);
/* grab the lock that says we are going to process
* all the refs for this head */
ret = btrfs_delayed_ref_lock(trans, locked_ref);
/*
* we may have dropped the spin lock to get the head
* mutex lock, and that might have given someone else
* time to free the head. If that's true, it has been
* removed from our list and we can move on.
*/
if (ret == -EAGAIN) {
locked_ref = NULL;
count++;
continue;
}
}
/*
* record the must insert reserved flag before we
* drop the spin lock.
*/
must_insert_reserved = locked_ref->must_insert_reserved;
locked_ref->must_insert_reserved = 0;
extent_op = locked_ref->extent_op;
locked_ref->extent_op = NULL;
/*
* locked_ref is the head node, so we have to go one
* node back for any delayed ref updates
*/
ref = select_delayed_ref(locked_ref);
if (!ref) {
/* All delayed refs have been processed, Go ahead
* and send the head node to run_one_delayed_ref,
* so that any accounting fixes can happen
*/
ref = &locked_ref->node;
if (extent_op && must_insert_reserved) {
kfree(extent_op);
extent_op = NULL;
}
if (extent_op) {
spin_unlock(&delayed_refs->lock);
ret = run_delayed_extent_op(trans, root,
ref, extent_op);
BUG_ON(ret);
kfree(extent_op);
cond_resched();
spin_lock(&delayed_refs->lock);
continue;
}
list_del_init(&locked_ref->cluster);
locked_ref = NULL;
}
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
spin_unlock(&delayed_refs->lock);
ret = run_one_delayed_ref(trans, root, ref, extent_op,
must_insert_reserved);
BUG_ON(ret);
btrfs_put_delayed_ref(ref);
kfree(extent_op);
count++;
cond_resched();
spin_lock(&delayed_refs->lock);
}
return count;
}
/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
* 0, which means to process everything in the tree at the start
* of the run (but not newly added entries), or it can be some target
* number you'd like to process.
*/
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count)
{
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct list_head cluster;
int ret;
int run_all = count == (unsigned long)-1;
int run_most = 0;
if (root == root->fs_info->extent_root)
root = root->fs_info->tree_root;
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
again:
spin_lock(&delayed_refs->lock);
if (count == 0) {
count = delayed_refs->num_entries * 2;
run_most = 1;
}
while (1) {
if (!(run_all || run_most) &&
delayed_refs->num_heads_ready < 64)
break;
/*
* go find something we can process in the rbtree. We start at
* the beginning of the tree, and then build a cluster
* of refs to process starting at the first one we are able to
* lock
*/
ret = btrfs_find_ref_cluster(trans, &cluster,
delayed_refs->run_delayed_start);
if (ret)
break;
ret = run_clustered_refs(trans, root, &cluster);
BUG_ON(ret < 0);
count -= min_t(unsigned long, ret, count);
if (count == 0)
break;
}
if (run_all) {
node = rb_first(&delayed_refs->root);
if (!node)
goto out;
count = (unsigned long)-1;
while (node) {
ref = rb_entry(node, struct btrfs_delayed_ref_node,
rb_node);
if (btrfs_delayed_ref_is_head(ref)) {
struct btrfs_delayed_ref_head *head;
head = btrfs_delayed_node_to_head(ref);
atomic_inc(&ref->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(ref);
cond_resched();
goto again;
}
node = rb_next(node);
}
spin_unlock(&delayed_refs->lock);
schedule_timeout(1);
goto again;
}
out:
spin_unlock(&delayed_refs->lock);
return 0;
}
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags,
int is_data)
{
struct btrfs_delayed_extent_op *extent_op;
int ret;
extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
if (!extent_op)
return -ENOMEM;
extent_op->flags_to_set = flags;
extent_op->update_flags = 1;
extent_op->update_key = 0;
extent_op->is_data = is_data ? 1 : 0;
ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
if (ret)
kfree(extent_op);
return ret;
}
static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct rb_node *node;
int ret = 0;
ret = -ENOENT;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(trans, bytenr);
if (!head)
goto out;
if (!mutex_trylock(&head->mutex)) {
atomic_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(root->fs_info->extent_root, path);
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
return -EAGAIN;
}
node = rb_prev(&head->node.rb_node);
if (!node)
goto out_unlock;
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
if (ref->bytenr != bytenr)
goto out_unlock;
ret = 1;
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
goto out_unlock;
data_ref = btrfs_delayed_node_to_data_ref(ref);
node = rb_prev(node);
if (node) {
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
if (ref->bytenr == bytenr)
goto out_unlock;
}
if (data_ref->root != root->root_key.objectid ||
data_ref->objectid != objectid || data_ref->offset != offset)
goto out_unlock;
ret = 0;
out_unlock:
mutex_unlock(&head->mutex);
out:
spin_unlock(&delayed_refs->lock);
return ret;
}
static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 objectid, u64 offset, u64 bytenr)
{
struct btrfs_root *extent_root = root->fs_info->extent_root;
struct extent_buffer *leaf;
struct btrfs_extent_data_ref *ref;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_item *ei;
struct btrfs_key key;
u32 item_size;
int ret;
key.objectid = bytenr;
key.offset = (u64)-1;
key.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
BUG_ON(ret == 0);
ret = -ENOENT;
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
goto out;
ret = 1;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
goto out;
}
#endif
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
if (item_size != sizeof(*ei) +
btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
goto out;
if (btrfs_extent_generation(leaf, ei) <=
btrfs_root_last_snapshot(&root->root_item))
goto out;
iref = (struct btrfs_extent_inline_ref *)(ei + 1);
if (btrfs_extent_inline_ref_type(leaf, iref) !=
BTRFS_EXTENT_DATA_REF_KEY)
goto out;
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (btrfs_extent_refs(leaf, ei) !=
btrfs_extent_data_ref_count(leaf, ref) ||
btrfs_extent_data_ref_root(leaf, ref) !=
root->root_key.objectid ||
btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
goto out;
ret = 0;
out:
return ret;
}
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr)
{
struct btrfs_path *path;
int ret;
int ret2;
path = btrfs_alloc_path();
if (!path)
return -ENOENT;
do {
ret = check_committed_ref(trans, root, path, objectid,
offset, bytenr);
if (ret && ret != -ENOENT)
goto out;
ret2 = check_delayed_ref(trans, root, path, objectid,
offset, bytenr);
} while (ret2 == -EAGAIN);
if (ret2 && ret2 != -ENOENT) {
ret = ret2;
goto out;
}
if (ret != -ENOENT || ret2 != -ENOENT)
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
#if 0
int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, u32 nr_extents)
{
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
u64 root_gen;
u32 nritems;
int i;
int level;
int ret = 0;
int shared = 0;
if (!root->ref_cows)
return 0;
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
shared = 0;
root_gen = root->root_key.offset;
} else {
shared = 1;
root_gen = trans->transid - 1;
}
level = btrfs_header_level(buf);
nritems = btrfs_header_nritems(buf);
if (level == 0) {
struct btrfs_leaf_ref *ref;
struct btrfs_extent_info *info;
ref = btrfs_alloc_leaf_ref(root, nr_extents);
if (!ref) {
ret = -ENOMEM;
goto out;
}
ref->root_gen = root_gen;
ref->bytenr = buf->start;
ref->owner = btrfs_header_owner(buf);
ref->generation = btrfs_header_generation(buf);
ref->nritems = nr_extents;
info = ref->extents;
for (i = 0; nr_extents > 0 && i < nritems; i++) {
u64 disk_bytenr;
btrfs_item_key_to_cpu(buf, &key, i);
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(buf, i,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(buf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
if (disk_bytenr == 0)
continue;
info->bytenr = disk_bytenr;
info->num_bytes =
btrfs_file_extent_disk_num_bytes(buf, fi);
info->objectid = key.objectid;
info->offset = key.offset;
info++;
}
ret = btrfs_add_leaf_ref(root, ref, shared);
if (ret == -EEXIST && shared) {
struct btrfs_leaf_ref *old;
old = btrfs_lookup_leaf_ref(root, ref->bytenr);
BUG_ON(!old);
btrfs_remove_leaf_ref(root, old);
btrfs_free_leaf_ref(root, old);
ret = btrfs_add_leaf_ref(root, ref, shared);
}
WARN_ON(ret);
btrfs_free_leaf_ref(root, ref);
}
out:
return ret;
}
/* when a block goes through cow, we update the reference counts of
* everything that block points to. The internal pointers of the block
* can be in just about any order, and it is likely to have clusters of
* things that are close together and clusters of things that are not.
*
* To help reduce the seeks that come with updating all of these reference
* counts, sort them by byte number before actual updates are done.
*
* struct refsort is used to match byte number to slot in the btree block.
* we sort based on the byte number and then use the slot to actually
* find the item.
*
* struct refsort is smaller than strcut btrfs_item and smaller than
* struct btrfs_key_ptr. Since we're currently limited to the page size
* for a btree block, there's no way for a kmalloc of refsorts for a
* single node to be bigger than a page.
*/
struct refsort {
u64 bytenr;
u32 slot;
};
/*
* for passing into sort()
*/
static int refsort_cmp(const void *a_void, const void *b_void)
{
const struct refsort *a = a_void;
const struct refsort *b = b_void;
if (a->bytenr < b->bytenr)
return -1;
if (a->bytenr > b->bytenr)
return 1;
return 0;
}
#endif
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
int full_backref, int inc)
{
u64 bytenr;
u64 num_bytes;
u64 parent;
u64 ref_root;
u32 nritems;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
int i;
int level;
int ret = 0;
int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
u64, u64, u64, u64, u64, u64);
ref_root = btrfs_header_owner(buf);
nritems = btrfs_header_nritems(buf);
level = btrfs_header_level(buf);
if (!root->ref_cows && level == 0)
return 0;
if (inc)
process_func = btrfs_inc_extent_ref;
else
process_func = btrfs_free_extent;
if (full_backref)
parent = buf->start;
else
parent = 0;
for (i = 0; i < nritems; i++) {
if (level == 0) {
btrfs_item_key_to_cpu(buf, &key, i);
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(buf, i,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(buf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
if (bytenr == 0)
continue;
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, key.objectid,
key.offset);
if (ret)
goto fail;
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = btrfs_level_size(root, level - 1);
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, level - 1, 0);
if (ret)
goto fail;
}
}
return 0;
fail:
BUG();
return ret;
}
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref)
{
return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
}
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref)
{
return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
}
static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_block_group_cache *cache)
{
int ret;
struct btrfs_root *extent_root = root->fs_info->extent_root;
unsigned long bi;
struct extent_buffer *leaf;
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
if (ret < 0)
goto fail;
BUG_ON(ret);
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(extent_root, path);
fail:
if (ret)
return ret;
return 0;
}
static struct btrfs_block_group_cache *
next_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
struct rb_node *node;
spin_lock(&root->fs_info->block_group_cache_lock);
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
if (node) {
cache = rb_entry(node, struct btrfs_block_group_cache,
cache_node);
atomic_inc(&cache->count);
} else
cache = NULL;
spin_unlock(&root->fs_info->block_group_cache_lock);
return cache;
}
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_block_group_cache *cache;
int err = 0;
struct btrfs_path *path;
u64 last = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while (1) {
if (last == 0) {
err = btrfs_run_delayed_refs(trans, root,
(unsigned long)-1);
BUG_ON(err);
}
cache = btrfs_lookup_first_block_group(root->fs_info, last);
while (cache) {
if (cache->dirty)
break;
cache = next_block_group(root, cache);
}
if (!cache) {
if (last == 0)
break;
last = 0;
continue;
}
cache->dirty = 0;
last = cache->key.objectid + cache->key.offset;
err = write_one_cache_group(trans, root, path, cache);
BUG_ON(err);
btrfs_put_block_group(cache);
}
btrfs_free_path(path);
return 0;
}
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
{
struct btrfs_block_group_cache *block_group;
int readonly = 0;
block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
if (!block_group || block_group->ro)
readonly = 1;
if (block_group)
btrfs_put_block_group(block_group);
return readonly;
}
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
found = __find_space_info(info, flags);
if (found) {
spin_lock(&found->lock);
found->total_bytes += total_bytes;
found->bytes_used += bytes_used;
found->full = 0;
spin_unlock(&found->lock);
*space_info = found;
return 0;
}
found = kzalloc(sizeof(*found), GFP_NOFS);
if (!found)
return -ENOMEM;
INIT_LIST_HEAD(&found->block_groups);
init_rwsem(&found->groups_sem);
spin_lock_init(&found->lock);
found->flags = flags;
found->total_bytes = total_bytes;
found->bytes_used = bytes_used;
found->bytes_pinned = 0;
found->bytes_reserved = 0;
found->bytes_readonly = 0;
found->bytes_delalloc = 0;
found->full = 0;
found->force_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
return 0;
}
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_DUP);
if (extra_flags) {
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits |= extra_flags;
}
}
static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
{
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
if (!cache->ro) {
cache->space_info->bytes_readonly += cache->key.offset -
btrfs_block_group_used(&cache->item);
cache->ro = 1;
}
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
}
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
{
u64 num_devices = root->fs_info->fs_devices->rw_devices;
if (num_devices == 1)
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
if (num_devices < 4)
flags &= ~BTRFS_BLOCK_GROUP_RAID10;
if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
(flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))) {
flags &= ~BTRFS_BLOCK_GROUP_DUP;
}
if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
(flags & BTRFS_BLOCK_GROUP_RAID10)) {
flags &= ~BTRFS_BLOCK_GROUP_RAID1;
}
if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
((flags & BTRFS_BLOCK_GROUP_RAID1) |
(flags & BTRFS_BLOCK_GROUP_RAID10) |
(flags & BTRFS_BLOCK_GROUP_DUP)))
flags &= ~BTRFS_BLOCK_GROUP_RAID0;
return flags;
}
static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
{
struct btrfs_fs_info *info = root->fs_info;
u64 alloc_profile;
if (data) {
alloc_profile = info->avail_data_alloc_bits &
info->data_alloc_profile;
data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
} else if (root == root->fs_info->chunk_root) {
alloc_profile = info->avail_system_alloc_bits &
info->system_alloc_profile;
data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
} else {
alloc_profile = info->avail_metadata_alloc_bits &
info->metadata_alloc_profile;
data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
}
return btrfs_reduce_alloc_profile(root, data);
}
void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
{
u64 alloc_target;
alloc_target = btrfs_get_alloc_profile(root, 1);
BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
alloc_target);
}
static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
{
u64 num_bytes;
int level;
level = BTRFS_MAX_LEVEL - 2;
/*
* NOTE: these calculations are absolutely the worst possible case.
* This assumes that _every_ item we insert will require a new leaf, and
* that the tree has grown to its maximum level size.
*/
/*
* for every item we insert we could insert both an extent item and a
* extent ref item. Then for ever item we insert, we will need to cow
* both the original leaf, plus the leaf to the left and right of it.
*
* Unless we are talking about the extent root, then we just want the
* number of items * 2, since we just need the extent item plus its ref.
*/
if (root == root->fs_info->extent_root)
num_bytes = num_items * 2;
else
num_bytes = (num_items + (2 * num_items)) * 3;
/*
* num_bytes is total number of leaves we could need times the leaf
* size, and then for every leaf we could end up cow'ing 2 nodes per
* level, down to the leaf level.
*/
num_bytes = (num_bytes * root->leafsize) +
(num_bytes * (level * 2)) * root->nodesize;
return num_bytes;
}
/*
* Unreserve metadata space for delalloc. If we have less reserved credits than
* we have extents, this function does nothing.
*/
int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
struct inode *inode, int num_items)
{
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_space_info *meta_sinfo;
u64 num_bytes;
u64 alloc_target;
bool bug = false;
/* get the space info for where the metadata will live */
alloc_target = btrfs_get_alloc_profile(root, 0);
meta_sinfo = __find_space_info(info, alloc_target);
num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
num_items);
spin_lock(&meta_sinfo->lock);
spin_lock(&BTRFS_I(inode)->accounting_lock);
if (BTRFS_I(inode)->reserved_extents <=
BTRFS_I(inode)->outstanding_extents) {
spin_unlock(&BTRFS_I(inode)->accounting_lock);
spin_unlock(&meta_sinfo->lock);
return 0;
}
spin_unlock(&BTRFS_I(inode)->accounting_lock);
BTRFS_I(inode)->reserved_extents--;
BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
if (meta_sinfo->bytes_delalloc < num_bytes) {
bug = true;
meta_sinfo->bytes_delalloc = 0;
} else {
meta_sinfo->bytes_delalloc -= num_bytes;
}
spin_unlock(&meta_sinfo->lock);
BUG_ON(bug);
return 0;
}
static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
{
u64 thresh;
thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
meta_sinfo->bytes_super + meta_sinfo->bytes_root +
meta_sinfo->bytes_may_use;
thresh = meta_sinfo->total_bytes - thresh;
thresh *= 80;
do_div(thresh, 100);
if (thresh <= meta_sinfo->bytes_delalloc)
meta_sinfo->force_delalloc = 1;
else
meta_sinfo->force_delalloc = 0;
}
struct async_flush {
struct btrfs_root *root;
struct btrfs_space_info *info;
struct btrfs_work work;
};
static noinline void flush_delalloc_async(struct btrfs_work *work)
{
struct async_flush *async;
struct btrfs_root *root;
struct btrfs_space_info *info;
async = container_of(work, struct async_flush, work);
root = async->root;
info = async->info;
btrfs_start_delalloc_inodes(root);
wake_up(&info->flush_wait);
btrfs_wait_ordered_extents(root, 0);
spin_lock(&info->lock);
info->flushing = 0;
spin_unlock(&info->lock);
wake_up(&info->flush_wait);
kfree(async);
}
static void wait_on_flush(struct btrfs_space_info *info)
{
DEFINE_WAIT(wait);
u64 used;
while (1) {
prepare_to_wait(&info->flush_wait, &wait,
TASK_UNINTERRUPTIBLE);
spin_lock(&info->lock);
if (!info->flushing) {
spin_unlock(&info->lock);
break;
}
used = info->bytes_used + info->bytes_reserved +
info->bytes_pinned + info->bytes_readonly +
info->bytes_super + info->bytes_root +
info->bytes_may_use + info->bytes_delalloc;
if (used < info->total_bytes) {
spin_unlock(&info->lock);
break;
}
spin_unlock(&info->lock);
schedule();
}
finish_wait(&info->flush_wait, &wait);
}
static void flush_delalloc(struct btrfs_root *root,
struct btrfs_space_info *info)
{
struct async_flush *async;
bool wait = false;
spin_lock(&info->lock);
if (!info->flushing) {
info->flushing = 1;
init_waitqueue_head(&info->flush_wait);
} else {
wait = true;
}
spin_unlock(&info->lock);
if (wait) {
wait_on_flush(info);
return;
}
async = kzalloc(sizeof(*async), GFP_NOFS);
if (!async)
goto flush;
async->root = root;
async->info = info;
async->work.func = flush_delalloc_async;
btrfs_queue_worker(&root->fs_info->enospc_workers,
&async->work);
wait_on_flush(info);
return;
flush:
btrfs_start_delalloc_inodes(root);
btrfs_wait_ordered_extents(root, 0);
spin_lock(&info->lock);
info->flushing = 0;
spin_unlock(&info->lock);
wake_up(&info->flush_wait);
}
static int maybe_allocate_chunk(struct btrfs_root *root,
struct btrfs_space_info *info)
{
struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
struct btrfs_trans_handle *trans;
bool wait = false;
int ret = 0;
u64 min_metadata;
u64 free_space;
free_space = btrfs_super_total_bytes(disk_super);
/*
* we allow the metadata to grow to a max of either 10gb or 5% of the
* space in the volume.
*/
min_metadata = min((u64)10 * 1024 * 1024 * 1024,
div64_u64(free_space * 5, 100));
if (info->total_bytes >= min_metadata) {
spin_unlock(&info->lock);
return 0;
}
if (info->full) {
spin_unlock(&info->lock);
return 0;
}
if (!info->allocating_chunk) {
info->force_alloc = 1;
info->allocating_chunk = 1;
init_waitqueue_head(&info->allocate_wait);
} else {
wait = true;
}
spin_unlock(&info->lock);
if (wait) {
wait_event(info->allocate_wait,
!info->allocating_chunk);
return 1;
}
trans = btrfs_start_transaction(root, 1);
if (!trans) {
ret = -ENOMEM;
goto out;
}
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4096 + 2 * 1024 * 1024,
info->flags, 0);
btrfs_end_transaction(trans, root);
if (ret)
goto out;
out:
spin_lock(&info->lock);
info->allocating_chunk = 0;
spin_unlock(&info->lock);
wake_up(&info->allocate_wait);
if (ret)
return 0;
return 1;
}
/*
* Reserve metadata space for delalloc.
*/
int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
struct inode *inode, int num_items)
{
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_space_info *meta_sinfo;
u64 num_bytes;
u64 used;
u64 alloc_target;
int flushed = 0;
int force_delalloc;
/* get the space info for where the metadata will live */
alloc_target = btrfs_get_alloc_profile(root, 0);
meta_sinfo = __find_space_info(info, alloc_target);
num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
num_items);
again:
spin_lock(&meta_sinfo->lock);
force_delalloc = meta_sinfo->force_delalloc;
if (unlikely(!meta_sinfo->bytes_root))