| /* |
| * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com |
| * Written by Alex Tomas <alex@clusterfs.com> |
| * |
| * Architecture independence: |
| * Copyright (c) 2005, Bull S.A. |
| * Written by Pierre Peiffer <pierre.peiffer@bull.net> |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public License |
| * along with this program; if not, write to the Free Software |
| * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
| */ |
| |
| /* |
| * Extents support for EXT4 |
| * |
| * TODO: |
| * - ext4*_error() should be used in some situations |
| * - analyze all BUG()/BUG_ON(), use -EIO where appropriate |
| * - smart tree reduction |
| */ |
| |
| #include <linux/fs.h> |
| #include <linux/time.h> |
| #include <linux/jbd2.h> |
| #include <linux/highuid.h> |
| #include <linux/pagemap.h> |
| #include <linux/quotaops.h> |
| #include <linux/string.h> |
| #include <linux/slab.h> |
| #include <asm/uaccess.h> |
| #include <linux/fiemap.h> |
| #include <linux/backing-dev.h> |
| #include "ext4_jbd2.h" |
| #include "ext4_extents.h" |
| #include "xattr.h" |
| |
| #include <trace/events/ext4.h> |
| |
| /* |
| * used by extent splitting. |
| */ |
| #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ |
| due to ENOSPC */ |
| #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ |
| #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ |
| |
| #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ |
| #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ |
| |
| static __le32 ext4_extent_block_csum(struct inode *inode, |
| struct ext4_extent_header *eh) |
| { |
| struct ext4_inode_info *ei = EXT4_I(inode); |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| __u32 csum; |
| |
| csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, |
| EXT4_EXTENT_TAIL_OFFSET(eh)); |
| return cpu_to_le32(csum); |
| } |
| |
| static int ext4_extent_block_csum_verify(struct inode *inode, |
| struct ext4_extent_header *eh) |
| { |
| struct ext4_extent_tail *et; |
| |
| if (!ext4_has_metadata_csum(inode->i_sb)) |
| return 1; |
| |
| et = find_ext4_extent_tail(eh); |
| if (et->et_checksum != ext4_extent_block_csum(inode, eh)) |
| return 0; |
| return 1; |
| } |
| |
| static void ext4_extent_block_csum_set(struct inode *inode, |
| struct ext4_extent_header *eh) |
| { |
| struct ext4_extent_tail *et; |
| |
| if (!ext4_has_metadata_csum(inode->i_sb)) |
| return; |
| |
| et = find_ext4_extent_tail(eh); |
| et->et_checksum = ext4_extent_block_csum(inode, eh); |
| } |
| |
| static int ext4_split_extent(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path **ppath, |
| struct ext4_map_blocks *map, |
| int split_flag, |
| int flags); |
| |
| static int ext4_split_extent_at(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path **ppath, |
| ext4_lblk_t split, |
| int split_flag, |
| int flags); |
| |
| static int ext4_find_delayed_extent(struct inode *inode, |
| struct extent_status *newes); |
| |
| static int ext4_ext_truncate_extend_restart(handle_t *handle, |
| struct inode *inode, |
| int needed) |
| { |
| int err; |
| |
| if (!ext4_handle_valid(handle)) |
| return 0; |
| if (handle->h_buffer_credits >= needed) |
| return 0; |
| /* |
| * If we need to extend the journal get a few extra blocks |
| * while we're at it for efficiency's sake. |
| */ |
| needed += 3; |
| err = ext4_journal_extend(handle, needed - handle->h_buffer_credits); |
| if (err <= 0) |
| return err; |
| err = ext4_truncate_restart_trans(handle, inode, needed); |
| if (err == 0) |
| err = -EAGAIN; |
| |
| return err; |
| } |
| |
| /* |
| * could return: |
| * - EROFS |
| * - ENOMEM |
| */ |
| static int ext4_ext_get_access(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| if (path->p_bh) { |
| /* path points to block */ |
| BUFFER_TRACE(path->p_bh, "get_write_access"); |
| return ext4_journal_get_write_access(handle, path->p_bh); |
| } |
| /* path points to leaf/index in inode body */ |
| /* we use in-core data, no need to protect them */ |
| return 0; |
| } |
| |
| /* |
| * could return: |
| * - EROFS |
| * - ENOMEM |
| * - EIO |
| */ |
| int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, |
| struct inode *inode, struct ext4_ext_path *path) |
| { |
| int err; |
| |
| WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); |
| if (path->p_bh) { |
| ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); |
| /* path points to block */ |
| err = __ext4_handle_dirty_metadata(where, line, handle, |
| inode, path->p_bh); |
| } else { |
| /* path points to leaf/index in inode body */ |
| err = ext4_mark_inode_dirty(handle, inode); |
| } |
| return err; |
| } |
| |
| static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t block) |
| { |
| if (path) { |
| int depth = path->p_depth; |
| struct ext4_extent *ex; |
| |
| /* |
| * Try to predict block placement assuming that we are |
| * filling in a file which will eventually be |
| * non-sparse --- i.e., in the case of libbfd writing |
| * an ELF object sections out-of-order but in a way |
| * the eventually results in a contiguous object or |
| * executable file, or some database extending a table |
| * space file. However, this is actually somewhat |
| * non-ideal if we are writing a sparse file such as |
| * qemu or KVM writing a raw image file that is going |
| * to stay fairly sparse, since it will end up |
| * fragmenting the file system's free space. Maybe we |
| * should have some hueristics or some way to allow |
| * userspace to pass a hint to file system, |
| * especially if the latter case turns out to be |
| * common. |
| */ |
| ex = path[depth].p_ext; |
| if (ex) { |
| ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); |
| ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); |
| |
| if (block > ext_block) |
| return ext_pblk + (block - ext_block); |
| else |
| return ext_pblk - (ext_block - block); |
| } |
| |
| /* it looks like index is empty; |
| * try to find starting block from index itself */ |
| if (path[depth].p_bh) |
| return path[depth].p_bh->b_blocknr; |
| } |
| |
| /* OK. use inode's group */ |
| return ext4_inode_to_goal_block(inode); |
| } |
| |
| /* |
| * Allocation for a meta data block |
| */ |
| static ext4_fsblk_t |
| ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *ex, int *err, unsigned int flags) |
| { |
| ext4_fsblk_t goal, newblock; |
| |
| goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); |
| newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
| NULL, err); |
| return newblock; |
| } |
| |
| static inline int ext4_ext_space_block(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
| / sizeof(struct ext4_extent); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 6) |
| size = 6; |
| #endif |
| return size; |
| } |
| |
| static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
| / sizeof(struct ext4_extent_idx); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 5) |
| size = 5; |
| #endif |
| return size; |
| } |
| |
| static inline int ext4_ext_space_root(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = sizeof(EXT4_I(inode)->i_data); |
| size -= sizeof(struct ext4_extent_header); |
| size /= sizeof(struct ext4_extent); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 3) |
| size = 3; |
| #endif |
| return size; |
| } |
| |
| static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = sizeof(EXT4_I(inode)->i_data); |
| size -= sizeof(struct ext4_extent_header); |
| size /= sizeof(struct ext4_extent_idx); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 4) |
| size = 4; |
| #endif |
| return size; |
| } |
| |
| static inline int |
| ext4_force_split_extent_at(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path **ppath, ext4_lblk_t lblk, |
| int nofail) |
| { |
| struct ext4_ext_path *path = *ppath; |
| int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); |
| |
| return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? |
| EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, |
| EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | |
| (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); |
| } |
| |
| /* |
| * Calculate the number of metadata blocks needed |
| * to allocate @blocks |
| * Worse case is one block per extent |
| */ |
| int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) |
| { |
| struct ext4_inode_info *ei = EXT4_I(inode); |
| int idxs; |
| |
| idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
| / sizeof(struct ext4_extent_idx)); |
| |
| /* |
| * If the new delayed allocation block is contiguous with the |
| * previous da block, it can share index blocks with the |
| * previous block, so we only need to allocate a new index |
| * block every idxs leaf blocks. At ldxs**2 blocks, we need |
| * an additional index block, and at ldxs**3 blocks, yet |
| * another index blocks. |
| */ |
| if (ei->i_da_metadata_calc_len && |
| ei->i_da_metadata_calc_last_lblock+1 == lblock) { |
| int num = 0; |
| |
| if ((ei->i_da_metadata_calc_len % idxs) == 0) |
| num++; |
| if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) |
| num++; |
| if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { |
| num++; |
| ei->i_da_metadata_calc_len = 0; |
| } else |
| ei->i_da_metadata_calc_len++; |
| ei->i_da_metadata_calc_last_lblock++; |
| return num; |
| } |
| |
| /* |
| * In the worst case we need a new set of index blocks at |
| * every level of the inode's extent tree. |
| */ |
| ei->i_da_metadata_calc_len = 1; |
| ei->i_da_metadata_calc_last_lblock = lblock; |
| return ext_depth(inode) + 1; |
| } |
| |
| static int |
| ext4_ext_max_entries(struct inode *inode, int depth) |
| { |
| int max; |
| |
| if (depth == ext_depth(inode)) { |
| if (depth == 0) |
| max = ext4_ext_space_root(inode, 1); |
| else |
| max = ext4_ext_space_root_idx(inode, 1); |
| } else { |
| if (depth == 0) |
| max = ext4_ext_space_block(inode, 1); |
| else |
| max = ext4_ext_space_block_idx(inode, 1); |
| } |
| |
| return max; |
| } |
| |
| static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
| { |
| ext4_fsblk_t block = ext4_ext_pblock(ext); |
| int len = ext4_ext_get_actual_len(ext); |
| ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); |
| ext4_lblk_t last = lblock + len - 1; |
| |
| if (len == 0 || lblock > last) |
| return 0; |
| return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
| } |
| |
| static int ext4_valid_extent_idx(struct inode *inode, |
| struct ext4_extent_idx *ext_idx) |
| { |
| ext4_fsblk_t block = ext4_idx_pblock(ext_idx); |
| |
| return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); |
| } |
| |
| static int ext4_valid_extent_entries(struct inode *inode, |
| struct ext4_extent_header *eh, |
| int depth) |
| { |
| unsigned short entries; |
| if (eh->eh_entries == 0) |
| return 1; |
| |
| entries = le16_to_cpu(eh->eh_entries); |
| |
| if (depth == 0) { |
| /* leaf entries */ |
| struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
| struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; |
| ext4_fsblk_t pblock = 0; |
| ext4_lblk_t lblock = 0; |
| ext4_lblk_t prev = 0; |
| int len = 0; |
| while (entries) { |
| if (!ext4_valid_extent(inode, ext)) |
| return 0; |
| |
| /* Check for overlapping extents */ |
| lblock = le32_to_cpu(ext->ee_block); |
| len = ext4_ext_get_actual_len(ext); |
| if ((lblock <= prev) && prev) { |
| pblock = ext4_ext_pblock(ext); |
| es->s_last_error_block = cpu_to_le64(pblock); |
| return 0; |
| } |
| ext++; |
| entries--; |
| prev = lblock + len - 1; |
| } |
| } else { |
| struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
| while (entries) { |
| if (!ext4_valid_extent_idx(inode, ext_idx)) |
| return 0; |
| ext_idx++; |
| entries--; |
| } |
| } |
| return 1; |
| } |
| |
| static int __ext4_ext_check(const char *function, unsigned int line, |
| struct inode *inode, struct ext4_extent_header *eh, |
| int depth, ext4_fsblk_t pblk) |
| { |
| const char *error_msg; |
| int max = 0, err = -EFSCORRUPTED; |
| |
| if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { |
| error_msg = "invalid magic"; |
| goto corrupted; |
| } |
| if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { |
| error_msg = "unexpected eh_depth"; |
| goto corrupted; |
| } |
| if (unlikely(eh->eh_max == 0)) { |
| error_msg = "invalid eh_max"; |
| goto corrupted; |
| } |
| max = ext4_ext_max_entries(inode, depth); |
| if (unlikely(le16_to_cpu(eh->eh_max) > max)) { |
| error_msg = "too large eh_max"; |
| goto corrupted; |
| } |
| if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { |
| error_msg = "invalid eh_entries"; |
| goto corrupted; |
| } |
| if (!ext4_valid_extent_entries(inode, eh, depth)) { |
| error_msg = "invalid extent entries"; |
| goto corrupted; |
| } |
| /* Verify checksum on non-root extent tree nodes */ |
| if (ext_depth(inode) != depth && |
| !ext4_extent_block_csum_verify(inode, eh)) { |
| error_msg = "extent tree corrupted"; |
| err = -EFSBADCRC; |
| goto corrupted; |
| } |
| return 0; |
| |
| corrupted: |
| ext4_error_inode(inode, function, line, 0, |
| "pblk %llu bad header/extent: %s - magic %x, " |
| "entries %u, max %u(%u), depth %u(%u)", |
| (unsigned long long) pblk, error_msg, |
| le16_to_cpu(eh->eh_magic), |
| le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), |
| max, le16_to_cpu(eh->eh_depth), depth); |
| return err; |
| } |
| |
| #define ext4_ext_check(inode, eh, depth, pblk) \ |
| __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) |
| |
| int ext4_ext_check_inode(struct inode *inode) |
| { |
| return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); |
| } |
| |
| static struct buffer_head * |
| __read_extent_tree_block(const char *function, unsigned int line, |
| struct inode *inode, ext4_fsblk_t pblk, int depth, |
| int flags) |
| { |
| struct buffer_head *bh; |
| int err; |
| |
| bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); |
| if (unlikely(!bh)) |
| return ERR_PTR(-ENOMEM); |
| |
| if (!bh_uptodate_or_lock(bh)) { |
| trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); |
| err = bh_submit_read(bh); |
| if (err < 0) |
| goto errout; |
| } |
| if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) |
| return bh; |
| err = __ext4_ext_check(function, line, inode, |
| ext_block_hdr(bh), depth, pblk); |
| if (err) |
| goto errout; |
| set_buffer_verified(bh); |
| /* |
| * If this is a leaf block, cache all of its entries |
| */ |
| if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { |
| struct ext4_extent_header *eh = ext_block_hdr(bh); |
| struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); |
| ext4_lblk_t prev = 0; |
| int i; |
| |
| for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { |
| unsigned int status = EXTENT_STATUS_WRITTEN; |
| ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); |
| int len = ext4_ext_get_actual_len(ex); |
| |
| if (prev && (prev != lblk)) |
| ext4_es_cache_extent(inode, prev, |
| lblk - prev, ~0, |
| EXTENT_STATUS_HOLE); |
| |
| if (ext4_ext_is_unwritten(ex)) |
| status = EXTENT_STATUS_UNWRITTEN; |
| ext4_es_cache_extent(inode, lblk, len, |
| ext4_ext_pblock(ex), status); |
| prev = lblk + len; |
| } |
| } |
| return bh; |
| errout: |
| put_bh(bh); |
| return ERR_PTR(err); |
| |
| } |
| |
| #define read_extent_tree_block(inode, pblk, depth, flags) \ |
| __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ |
| (depth), (flags)) |
| |
| /* |
| * This function is called to cache a file's extent information in the |
| * extent status tree |
| */ |
| int ext4_ext_precache(struct inode *inode) |
| { |
| struct ext4_inode_info *ei = EXT4_I(inode); |
| struct ext4_ext_path *path = NULL; |
| struct buffer_head *bh; |
| int i = 0, depth, ret = 0; |
| |
| if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
| return 0; /* not an extent-mapped inode */ |
| |
| down_read(&ei->i_data_sem); |
| depth = ext_depth(inode); |
| |
| path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), |
| GFP_NOFS); |
| if (path == NULL) { |
| up_read(&ei->i_data_sem); |
| return -ENOMEM; |
| } |
| |
| /* Don't cache anything if there are no external extent blocks */ |
| if (depth == 0) |
| goto out; |
| path[0].p_hdr = ext_inode_hdr(inode); |
| ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); |
| if (ret) |
| goto out; |
| path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); |
| while (i >= 0) { |
| /* |
| * If this is a leaf block or we've reached the end of |
| * the index block, go up |
| */ |
| if ((i == depth) || |
| path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { |
| brelse(path[i].p_bh); |
| path[i].p_bh = NULL; |
| i--; |
| continue; |
| } |
| bh = read_extent_tree_block(inode, |
| ext4_idx_pblock(path[i].p_idx++), |
| depth - i - 1, |
| EXT4_EX_FORCE_CACHE); |
| if (IS_ERR(bh)) { |
| ret = PTR_ERR(bh); |
| break; |
| } |
| i++; |
| path[i].p_bh = bh; |
| path[i].p_hdr = ext_block_hdr(bh); |
| path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); |
| } |
| ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); |
| out: |
| up_read(&ei->i_data_sem); |
| ext4_ext_drop_refs(path); |
| kfree(path); |
| return ret; |
| } |
| |
| #ifdef EXT_DEBUG |
| static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) |
| { |
| int k, l = path->p_depth; |
| |
| ext_debug("path:"); |
| for (k = 0; k <= l; k++, path++) { |
| if (path->p_idx) { |
| ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), |
| ext4_idx_pblock(path->p_idx)); |
| } else if (path->p_ext) { |
| ext_debug(" %d:[%d]%d:%llu ", |
| le32_to_cpu(path->p_ext->ee_block), |
| ext4_ext_is_unwritten(path->p_ext), |
| ext4_ext_get_actual_len(path->p_ext), |
| ext4_ext_pblock(path->p_ext)); |
| } else |
| ext_debug(" []"); |
| } |
| ext_debug("\n"); |
| } |
| |
| static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) |
| { |
| int depth = ext_depth(inode); |
| struct ext4_extent_header *eh; |
| struct ext4_extent *ex; |
| int i; |
| |
| if (!path) |
| return; |
| |
| eh = path[depth].p_hdr; |
| ex = EXT_FIRST_EXTENT(eh); |
| |
| ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); |
| |
| for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
| ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
| ext4_ext_is_unwritten(ex), |
| ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); |
| } |
| ext_debug("\n"); |
| } |
| |
| static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, |
| ext4_fsblk_t newblock, int level) |
| { |
| int depth = ext_depth(inode); |
| struct ext4_extent *ex; |
| |
| if (depth != level) { |
| struct ext4_extent_idx *idx; |
| idx = path[level].p_idx; |
| while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { |
| ext_debug("%d: move %d:%llu in new index %llu\n", level, |
| le32_to_cpu(idx->ei_block), |
| ext4_idx_pblock(idx), |
| newblock); |
| idx++; |
| } |
| |
| return; |
| } |
| |
| ex = path[depth].p_ext; |
| while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { |
| ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_pblock(ex), |
| ext4_ext_is_unwritten(ex), |
| ext4_ext_get_actual_len(ex), |
| newblock); |
| ex++; |
| } |
| } |
| |
| #else |
| #define ext4_ext_show_path(inode, path) |
| #define ext4_ext_show_leaf(inode, path) |
| #define ext4_ext_show_move(inode, path, newblock, level) |
| #endif |
| |
| void ext4_ext_drop_refs(struct ext4_ext_path *path) |
| { |
| int depth, i; |
| |
| if (!path) |
| return; |
| depth = path->p_depth; |
| for (i = 0; i <= depth; i++, path++) |
| if (path->p_bh) { |
| brelse(path->p_bh); |
| path->p_bh = NULL; |
| } |
| } |
| |
| /* |
| * ext4_ext_binsearch_idx: |
| * binary search for the closest index of the given block |
| * the header must be checked before calling this |
| */ |
| static void |
| ext4_ext_binsearch_idx(struct inode *inode, |
| struct ext4_ext_path *path, ext4_lblk_t block) |
| { |
| struct ext4_extent_header *eh = path->p_hdr; |
| struct ext4_extent_idx *r, *l, *m; |
| |
| |
| ext_debug("binsearch for %u(idx): ", block); |
| |
| l = EXT_FIRST_INDEX(eh) + 1; |
| r = EXT_LAST_INDEX(eh); |
| while (l <= r) { |
| m = l + (r - l) / 2; |
| if (block < le32_to_cpu(m->ei_block)) |
| r = m - 1; |
| else |
| l = m + 1; |
| ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), |
| m, le32_to_cpu(m->ei_block), |
| r, le32_to_cpu(r->ei_block)); |
| } |
| |
| path->p_idx = l - 1; |
| ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), |
| ext4_idx_pblock(path->p_idx)); |
| |
| #ifdef CHECK_BINSEARCH |
| { |
| struct ext4_extent_idx *chix, *ix; |
| int k; |
| |
| chix = ix = EXT_FIRST_INDEX(eh); |
| for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { |
| if (k != 0 && |
| le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { |
| printk(KERN_DEBUG "k=%d, ix=0x%p, " |
| "first=0x%p\n", k, |
| ix, EXT_FIRST_INDEX(eh)); |
| printk(KERN_DEBUG "%u <= %u\n", |
| le32_to_cpu(ix->ei_block), |
| le32_to_cpu(ix[-1].ei_block)); |
| } |
| BUG_ON(k && le32_to_cpu(ix->ei_block) |
| <= le32_to_cpu(ix[-1].ei_block)); |
| if (block < le32_to_cpu(ix->ei_block)) |
| break; |
| chix = ix; |
| } |
| BUG_ON(chix != path->p_idx); |
| } |
| #endif |
| |
| } |
| |
| /* |
| * ext4_ext_binsearch: |
| * binary search for closest extent of the given block |
| * the header must be checked before calling this |
| */ |
| static void |
| ext4_ext_binsearch(struct inode *inode, |
| struct ext4_ext_path *path, ext4_lblk_t block) |
| { |
| struct ext4_extent_header *eh = path->p_hdr; |
| struct ext4_extent *r, *l, *m; |
| |
| if (eh->eh_entries == 0) { |
| /* |
| * this leaf is empty: |
| * we get such a leaf in split/add case |
| */ |
| return; |
| } |
| |
| ext_debug("binsearch for %u: ", block); |
| |
| l = EXT_FIRST_EXTENT(eh) + 1; |
| r = EXT_LAST_EXTENT(eh); |
| |
| while (l <= r) { |
| m = l + (r - l) / 2; |
| if (block < le32_to_cpu(m->ee_block)) |
| r = m - 1; |
| else |
| l = m + 1; |
| ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), |
| m, le32_to_cpu(m->ee_block), |
| r, le32_to_cpu(r->ee_block)); |
| } |
| |
| path->p_ext = l - 1; |
| ext_debug(" -> %d:%llu:[%d]%d ", |
| le32_to_cpu(path->p_ext->ee_block), |
| ext4_ext_pblock(path->p_ext), |
| ext4_ext_is_unwritten(path->p_ext), |
| ext4_ext_get_actual_len(path->p_ext)); |
| |
| #ifdef CHECK_BINSEARCH |
| { |
| struct ext4_extent *chex, *ex; |
| int k; |
| |
| chex = ex = EXT_FIRST_EXTENT(eh); |
| for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { |
| BUG_ON(k && le32_to_cpu(ex->ee_block) |
| <= le32_to_cpu(ex[-1].ee_block)); |
| if (block < le32_to_cpu(ex->ee_block)) |
| break; |
| chex = ex; |
| } |
| BUG_ON(chex != path->p_ext); |
| } |
| #endif |
| |
| } |
| |
| int ext4_ext_tree_init(handle_t *handle, struct inode *inode) |
| { |
| struct ext4_extent_header *eh; |
| |
| eh = ext_inode_hdr(inode); |
| eh->eh_depth = 0; |
| eh->eh_entries = 0; |
| eh->eh_magic = EXT4_EXT_MAGIC; |
| eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
| ext4_mark_inode_dirty(handle, inode); |
| return 0; |
| } |
| |
| struct ext4_ext_path * |
| ext4_find_extent(struct inode *inode, ext4_lblk_t block, |
| struct ext4_ext_path **orig_path, int flags) |
| { |
| struct ext4_extent_header *eh; |
| struct buffer_head *bh; |
| struct ext4_ext_path *path = orig_path ? *orig_path : NULL; |
| short int depth, i, ppos = 0; |
| int ret; |
| |
| eh = ext_inode_hdr(inode); |
| depth = ext_depth(inode); |
| |
| if (path) { |
| ext4_ext_drop_refs(path); |
| if (depth > path[0].p_maxdepth) { |
| kfree(path); |
| *orig_path = path = NULL; |
| } |
| } |
| if (!path) { |
| /* account possible depth increase */ |
| path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), |
| GFP_NOFS); |
| if (unlikely(!path)) |
| return ERR_PTR(-ENOMEM); |
| path[0].p_maxdepth = depth + 1; |
| } |
| path[0].p_hdr = eh; |
| path[0].p_bh = NULL; |
| |
| i = depth; |
| /* walk through the tree */ |
| while (i) { |
| ext_debug("depth %d: num %d, max %d\n", |
| ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
| |
| ext4_ext_binsearch_idx(inode, path + ppos, block); |
| path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); |
| path[ppos].p_depth = i; |
| path[ppos].p_ext = NULL; |
| |
| bh = read_extent_tree_block(inode, path[ppos].p_block, --i, |
| flags); |
| if (IS_ERR(bh)) { |
| ret = PTR_ERR(bh); |
| goto err; |
| } |
| |
| eh = ext_block_hdr(bh); |
| ppos++; |
| path[ppos].p_bh = bh; |
| path[ppos].p_hdr = eh; |
| } |
| |
| path[ppos].p_depth = i; |
| path[ppos].p_ext = NULL; |
| path[ppos].p_idx = NULL; |
| |
| /* find extent */ |
| ext4_ext_binsearch(inode, path + ppos, block); |
| /* if not an empty leaf */ |
| if (path[ppos].p_ext) |
| path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); |
| |
| ext4_ext_show_path(inode, path); |
| |
| return path; |
| |
| err: |
| ext4_ext_drop_refs(path); |
| kfree(path); |
| if (orig_path) |
| *orig_path = NULL; |
| return ERR_PTR(ret); |
| } |
| |
| /* |
| * ext4_ext_insert_index: |
| * insert new index [@logical;@ptr] into the block at @curp; |
| * check where to insert: before @curp or after @curp |
| */ |
| static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *curp, |
| int logical, ext4_fsblk_t ptr) |
| { |
| struct ext4_extent_idx *ix; |
| int len, err; |
| |
| err = ext4_ext_get_access(handle, inode, curp); |
| if (err) |
| return err; |
| |
| if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
| EXT4_ERROR_INODE(inode, |
| "logical %d == ei_block %d!", |
| logical, le32_to_cpu(curp->p_idx->ei_block)); |
| return -EFSCORRUPTED; |
| } |
| |
| if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) |
| >= le16_to_cpu(curp->p_hdr->eh_max))) { |
| EXT4_ERROR_INODE(inode, |
| "eh_entries %d >= eh_max %d!", |
| le16_to_cpu(curp->p_hdr->eh_entries), |
| le16_to_cpu(curp->p_hdr->eh_max)); |
| return -EFSCORRUPTED; |
| } |
| |
| if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
| /* insert after */ |
| ext_debug("insert new index %d after: %llu\n", logical, ptr); |
| ix = curp->p_idx + 1; |
| } else { |
| /* insert before */ |
| ext_debug("insert new index %d before: %llu\n", logical, ptr); |
| ix = curp->p_idx; |
| } |
| |
| len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; |
| BUG_ON(len < 0); |
| if (len > 0) { |
| ext_debug("insert new index %d: " |
| "move %d indices from 0x%p to 0x%p\n", |
| logical, len, ix, ix + 1); |
| memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); |
| } |
| |
| if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { |
| EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); |
| return -EFSCORRUPTED; |
| } |
| |
| ix->ei_block = cpu_to_le32(logical); |
| ext4_idx_store_pblock(ix, ptr); |
| le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
| |
| if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { |
| EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); |
| return -EFSCORRUPTED; |
| } |
| |
| err = ext4_ext_dirty(handle, inode, curp); |
| ext4_std_error(inode->i_sb, err); |
| |
| return err; |
| } |
| |
| /* |
| * ext4_ext_split: |
| * inserts new subtree into the path, using free index entry |
| * at depth @at: |
| * - allocates all needed blocks (new leaf and all intermediate index blocks) |
| * - makes decision where to split |
| * - moves remaining extents and index entries (right to the split point) |
| * into the newly allocated blocks |
| * - initializes subtree |
| */ |
| static int ext4_ext_split(handle_t *handle, struct inode *inode, |
| unsigned int flags, |
| struct ext4_ext_path *path, |
| struct ext4_extent *newext, int at) |
| { |
| struct buffer_head *bh = NULL; |
| int depth = ext_depth(inode); |
| struct ext4_extent_header *neh; |
| struct ext4_extent_idx *fidx; |
| int i = at, k, m, a; |
| ext4_fsblk_t newblock, oldblock; |
| __le32 border; |
| ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
| int err = 0; |
| |
| /* make decision: where to split? */ |
| /* FIXME: now decision is simplest: at current extent */ |
| |
| /* if current leaf will be split, then we should use |
| * border from split point */ |
| if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
| EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); |
| return -EFSCORRUPTED; |
| } |
| if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
| border = path[depth].p_ext[1].ee_block; |
| ext_debug("leaf will be split." |
| " next leaf starts at %d\n", |
| le32_to_cpu(border)); |
| } else { |
| border = newext->ee_block; |
| ext_debug("leaf will be added." |
| " next leaf starts at %d\n", |
| le32_to_cpu(border)); |
| } |
| |
| /* |
| * If error occurs, then we break processing |
| * and mark filesystem read-only. index won't |
| * be inserted and tree will be in consistent |
| * state. Next mount will repair buffers too. |
| */ |
| |
| /* |
| * Get array to track all allocated blocks. |
| * We need this to handle errors and free blocks |
| * upon them. |
| */ |
| ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); |
| if (!ablocks) |
| return -ENOMEM; |
| |
| /* allocate all needed blocks */ |
| ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); |
| for (a = 0; a < depth - at; a++) { |
| newblock = ext4_ext_new_meta_block(handle, inode, path, |
| newext, &err, flags); |
| if (newblock == 0) |
| goto cleanup; |
| ablocks[a] = newblock; |
| } |
| |
| /* initialize new leaf */ |
| newblock = ablocks[--a]; |
| if (unlikely(newblock == 0)) { |
| EXT4_ERROR_INODE(inode, "newblock == 0!"); |
| err = -EFSCORRUPTED; |
| goto cleanup; |
| } |
| bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); |
| if (unlikely(!bh)) { |
| err = -ENOMEM; |
| goto cleanup; |
| } |
| lock_buffer(bh); |
| |
| err = ext4_journal_get_create_access(handle, bh); |
| if (err) |
| goto cleanup; |
| |
| neh = ext_block_hdr(bh); |
| neh->eh_entries = 0; |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
| neh->eh_magic = EXT4_EXT_MAGIC; |
| neh->eh_depth = 0; |
| |
| /* move remainder of path[depth] to the new leaf */ |
| if (unlikely(path[depth].p_hdr->eh_entries != |
| path[depth].p_hdr->eh_max)) { |
| EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", |
| path[depth].p_hdr->eh_entries, |
| path[depth].p_hdr->eh_max); |
| err = -EFSCORRUPTED; |
| goto cleanup; |
| } |
| /* start copy from next extent */ |
| m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
| ext4_ext_show_move(inode, path, newblock, depth); |
| if (m) { |
| struct ext4_extent *ex; |
| ex = EXT_FIRST_EXTENT(neh); |
| memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); |
| le16_add_cpu(&neh->eh_entries, m); |
| } |
| |
| ext4_extent_block_csum_set(inode, neh); |
| set_buffer_uptodate(bh); |
| unlock_buffer(bh); |
| |
| err = ext4_handle_dirty_metadata(handle, inode, bh); |
| if (err) |
| goto cleanup; |
| brelse(bh); |
| bh = NULL; |
| |
| /* correct old leaf */ |
| if (m) { |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto cleanup; |
| le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| if (err) |
| goto cleanup; |
| |
| } |
| |
| /* create intermediate indexes */ |
| k = depth - at - 1; |
| if (unlikely(k < 0)) { |
| EXT4_ERROR_INODE(inode, "k %d < 0!", k); |
| err = -EFSCORRUPTED; |
| goto cleanup; |
| } |
| if (k) |
| ext_debug("create %d intermediate indices\n", k); |
| /* insert new index into current index block */ |
| /* current depth stored in i var */ |
| i = depth - 1; |
| while (k--) { |
| oldblock = newblock; |
| newblock = ablocks[--a]; |
| bh = sb_getblk(inode->i_sb, newblock); |
| if (unlikely(!bh)) { |
| err = -ENOMEM; |
| goto cleanup; |
| } |
| lock_buffer(bh); |
| |
| err = ext4_journal_get_create_access(handle, bh); |
| if (err) |
| goto cleanup; |
| |
| neh = ext_block_hdr(bh); |
| neh->eh_entries = cpu_to_le16(1); |
| neh->eh_magic = EXT4_EXT_MAGIC; |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
| neh->eh_depth = cpu_to_le16(depth - i); |
| fidx = EXT_FIRST_INDEX(neh); |
| fidx->ei_block = border; |
| ext4_idx_store_pblock(fidx, oldblock); |
| |
| ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
| i, newblock, le32_to_cpu(border), oldblock); |
| |
| /* move remainder of path[i] to the new index block */ |
| if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
| EXT_LAST_INDEX(path[i].p_hdr))) { |
| EXT4_ERROR_INODE(inode, |
| "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", |
| le32_to_cpu(path[i].p_ext->ee_block)); |
| err = -EFSCORRUPTED; |
| goto cleanup; |
| } |
| /* start copy indexes */ |
| m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; |
| ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
| EXT_MAX_INDEX(path[i].p_hdr)); |
| ext4_ext_show_move(inode, path, newblock, i); |
| if (m) { |
| memmove(++fidx, path[i].p_idx, |
| sizeof(struct ext4_extent_idx) * m); |
| le16_add_cpu(&neh->eh_entries, m); |
| } |
| ext4_extent_block_csum_set(inode, neh); |
| set_buffer_uptodate(bh); |
| unlock_buffer(bh); |
| |
| err = ext4_handle_dirty_metadata(handle, inode, bh); |
| if (err) |
| goto cleanup; |
| brelse(bh); |
| bh = NULL; |
| |
| /* correct old index */ |
| if (m) { |
| err = ext4_ext_get_access(handle, inode, path + i); |
| if (err) |
| goto cleanup; |
| le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
| err = ext4_ext_dirty(handle, inode, path + i); |
| if (err) |
| goto cleanup; |
| } |
| |
| i--; |
| } |
| |
| /* insert new index */ |
| err = ext4_ext_insert_index(handle, inode, path + at, |
| le32_to_cpu(border), newblock); |
| |
| cleanup: |
| if (bh) { |
| if (buffer_locked(bh)) |
| unlock_buffer(bh); |
| brelse(bh); |
| } |
| |
| if (err) { |
| /* free all allocated blocks in error case */ |
| for (i = 0; i < depth; i++) { |
| if (!ablocks[i]) |
| continue; |
| ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
| EXT4_FREE_BLOCKS_METADATA); |
| } |
| } |
| kfree(ablocks); |
| |
| return err; |
| } |
| |
| /* |
| * ext4_ext_grow_indepth: |
| * implements tree growing procedure: |
| * - allocates new block |
| * - moves top-level data (index block or leaf) into the new block |
| * - initializes new top-level, creating index that points to the |
| * just created block |
| */ |
| static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, |
| unsigned int flags) |
| { |
| struct ext4_extent_header *neh; |
| struct buffer_head *bh; |
| ext4_fsblk_t newblock, goal = 0; |
| struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; |
| int err = 0; |
| |
| /* Try to prepend new index to old one */ |
| if (ext_depth(inode)) |
| goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); |
| if (goal > le32_to_cpu(es->s_first_data_block)) { |
| flags |= EXT4_MB_HINT_TRY_GOAL; |
| goal--; |
| } else |
| goal = ext4_inode_to_goal_block(inode); |
| newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
| NULL, &err); |
| if (newblock == 0) |
| return err; |
| |
| bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); |
| if (unlikely(!bh)) |
| return -ENOMEM; |
| lock_buffer(bh); |
| |
| err = ext4_journal_get_create_access(handle, bh); |
| if (err) { |
| unlock_buffer(bh); |
| goto out; |
| } |
| |
| /* move top-level index/leaf into new block */ |
| memmove(bh->b_data, EXT4_I(inode)->i_data, |
| sizeof(EXT4_I(inode)->i_data)); |
| |
| /* set size of new block */ |
| neh = ext_block_hdr(bh); |
| /* old root could have indexes or leaves |
| * so calculate e_max right way */ |
| if (ext_depth(inode)) |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
| else |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
| neh->eh_magic = EXT4_EXT_MAGIC; |
| ext4_extent_block_csum_set(inode, neh); |
| set_buffer_uptodate(bh); |
| unlock_buffer(bh); |
| |
| err = ext4_handle_dirty_metadata(handle, inode, bh); |
| if (err) |
| goto out; |
| |
| /* Update top-level index: num,max,pointer */ |
| neh = ext_inode_hdr(inode); |
| neh->eh_entries = cpu_to_le16(1); |
| ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); |
| if (neh->eh_depth == 0) { |
| /* Root extent block becomes index block */ |
| neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); |
| EXT_FIRST_INDEX(neh)->ei_block = |
| EXT_FIRST_EXTENT(neh)->ee_block; |
| } |
| ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", |
| le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
| le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), |
| ext4_idx_pblock(EXT_FIRST_INDEX(neh))); |
| |
| le16_add_cpu(&neh->eh_depth, 1); |
| ext4_mark_inode_dirty(handle, inode); |
| out: |
| brelse(bh); |
| |
| return err; |
| } |
| |
| /* |
| * ext4_ext_create_new_leaf: |
| * finds empty index and adds new leaf. |
| * if no free index is found, then it requests in-depth growing. |
| */ |
| static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, |
| unsigned int mb_flags, |
| unsigned int gb_flags, |
| struct ext4_ext_path **ppath, |
| struct ext4_extent *newext) |
| { |
| struct ext4_ext_path *path = *ppath; |
| struct ext4_ext_path *curp; |
| int depth, i, err = 0; |
| |
| repeat: |
| i = depth = ext_depth(inode); |
| |
| /* walk up to the tree and look for free index entry */ |
| curp = path + depth; |
| while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { |
| i--; |
| curp--; |
| } |
| |
| /* we use already allocated block for index block, |
| * so subsequent data blocks should be contiguous */ |
| if (EXT_HAS_FREE_INDEX(curp)) { |
| /* if we found index with free entry, then use that |
| * entry: create all needed subtree and add new leaf */ |
| err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); |
| if (err) |
| goto out; |
| |
| /* refill path */ |
| path = ext4_find_extent(inode, |
| (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
| ppath, gb_flags); |
| if (IS_ERR(path)) |
| err = PTR_ERR(path); |
| } else { |
| /* tree is full, time to grow in depth */ |
| err = ext4_ext_grow_indepth(handle, inode, mb_flags); |
| if (err) |
| goto out; |
| |
| /* refill path */ |
| path = ext4_find_extent(inode, |
| (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
| ppath, gb_flags); |
| if (IS_ERR(path)) { |
| err = PTR_ERR(path); |
| goto out; |
| } |
| |
| /* |
| * only first (depth 0 -> 1) produces free space; |
| * in all other cases we have to split the grown tree |
| */ |
| depth = ext_depth(inode); |
| if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { |
| /* now we need to split */ |
| goto repeat; |
| } |
| } |
| |
| out: |
| return err; |
| } |
| |
| /* |
| * search the closest allocated block to the left for *logical |
| * and returns it at @logical + it's physical address at @phys |
| * if *logical is the smallest allocated block, the function |
| * returns 0 at @phys |
| * return value contains 0 (success) or error code |
| */ |
| static int ext4_ext_search_left(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t *logical, ext4_fsblk_t *phys) |
| { |
| struct ext4_extent_idx *ix; |
| struct ext4_extent *ex; |
| int depth, ee_len; |
| |
| if (unlikely(path == NULL)) { |
| EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); |
| return -EFSCORRUPTED; |
| } |
| depth = path->p_depth; |
| *phys = 0; |
| |
| if (depth == 0 && path->p_ext == NULL) |
| return 0; |
| |
| /* usually extent in the path covers blocks smaller |
| * then *logical, but it can be that extent is the |
| * first one in the file */ |
| |
| ex = path[depth].p_ext; |
| ee_len = ext4_ext_get_actual_len(ex); |
| if (*logical < le32_to_cpu(ex->ee_block)) { |
| if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
| EXT4_ERROR_INODE(inode, |
| "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", |
| *logical, le32_to_cpu(ex->ee_block)); |
| return -EFSCORRUPTED; |
| } |
| while (--depth >= 0) { |
| ix = path[depth].p_idx; |
| if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
| EXT4_ERROR_INODE(inode, |
| "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", |
| ix != NULL ? le32_to_cpu(ix->ei_block) : 0, |
| EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? |
| le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, |
| depth); |
| return -EFSCORRUPTED; |
| } |
| } |
| return 0; |
| } |
| |
| if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
| EXT4_ERROR_INODE(inode, |
| "logical %d < ee_block %d + ee_len %d!", |
| *logical, le32_to_cpu(ex->ee_block), ee_len); |
| return -EFSCORRUPTED; |
| } |
| |
| *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
| *phys = ext4_ext_pblock(ex) + ee_len - 1; |
| return 0; |
| } |
| |
| /* |
| * search the closest allocated block to the right for *logical |
| * and returns it at @logical + it's physical address at @phys |
| * if *logical is the largest allocated block, the function |
| * returns 0 at @phys |
| * return value contains 0 (success) or error code |
| */ |
| static int ext4_ext_search_right(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t *logical, ext4_fsblk_t *phys, |
| struct ext4_extent **ret_ex) |
| { |
| struct buffer_head *bh = NULL; |
| struct ext4_extent_header *eh; |
| struct ext4_extent_idx *ix; |
| struct ext4_extent *ex; |
| ext4_fsblk_t block; |
| int depth; /* Note, NOT eh_depth; depth from top of tree */ |
| int ee_len; |
| |
| if (unlikely(path == NULL)) { |
| EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); |
| return -EFSCORRUPTED; |
| } |
| depth = path->p_depth; |
| *phys = 0; |
| |
| if (depth == 0 && path->p_ext == NULL) |
| return 0; |
| |
| /* usually extent in the path covers blocks smaller |
| * then *logical, but it can be that extent is the |
| * first one in the file */ |
| |
| ex = path[depth].p_ext; |
| ee_len = ext4_ext_get_actual_len(ex); |
| if (*logical < le32_to_cpu(ex->ee_block)) { |
| if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
| EXT4_ERROR_INODE(inode, |
| "first_extent(path[%d].p_hdr) != ex", |
| depth); |
| return -EFSCORRUPTED; |
| } |
| while (--depth >= 0) { |
| ix = path[depth].p_idx; |
| if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
| EXT4_ERROR_INODE(inode, |
| "ix != EXT_FIRST_INDEX *logical %d!", |
| *logical); |
| return -EFSCORRUPTED; |
| } |
| } |
| goto found_extent; |
| } |
| |
| if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
| EXT4_ERROR_INODE(inode, |
| "logical %d < ee_block %d + ee_len %d!", |
| *logical, le32_to_cpu(ex->ee_block), ee_len); |
| return -EFSCORRUPTED; |
| } |
| |
| if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { |
| /* next allocated block in this leaf */ |
| ex++; |
| goto found_extent; |
| } |
| |
| /* go up and search for index to the right */ |
| while (--depth >= 0) { |
| ix = path[depth].p_idx; |
| if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) |
| goto got_index; |
| } |
| |
| /* we've gone up to the root and found no index to the right */ |
| return 0; |
| |
| got_index: |
| /* we've found index to the right, let's |
| * follow it and find the closest allocated |
| * block to the right */ |
| ix++; |
| block = ext4_idx_pblock(ix); |
| while (++depth < path->p_depth) { |
| /* subtract from p_depth to get proper eh_depth */ |
| bh = read_extent_tree_block(inode, block, |
| path->p_depth - depth, 0); |
| if (IS_ERR(bh)) |
| return PTR_ERR(bh); |
| eh = ext_block_hdr(bh); |
| ix = EXT_FIRST_INDEX(eh); |
| block = ext4_idx_pblock(ix); |
| put_bh(bh); |
| } |
| |
| bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); |
| if (IS_ERR(bh)) |
| return PTR_ERR(bh); |
| eh = ext_block_hdr(bh); |
| ex = EXT_FIRST_EXTENT(eh); |
| found_extent: |
| *logical = le32_to_cpu(ex->ee_block); |
| *phys = ext4_ext_pblock(ex); |
| *ret_ex = ex; |
| if (bh) |
| put_bh(bh); |
| return 0; |
| } |
| |
| /* |
| * ext4_ext_next_allocated_block: |
| * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. |
| * NOTE: it considers block number from index entry as |
| * allocated block. Thus, index entries have to be consistent |
| * with leaves. |
| */ |
| ext4_lblk_t |
| ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
| { |
| int depth; |
| |
| BUG_ON(path == NULL); |
| depth = path->p_depth; |
| |
| if (depth == 0 && path->p_ext == NULL) |
| return EXT_MAX_BLOCKS; |
| |
| while (depth >= 0) { |
| if (depth == path->p_depth) { |
| /* leaf */ |
| if (path[depth].p_ext && |
| path[depth].p_ext != |
| EXT_LAST_EXTENT(path[depth].p_hdr)) |
| return le32_to_cpu(path[depth].p_ext[1].ee_block); |
| } else { |
| /* index */ |
| if (path[depth].p_idx != |
| EXT_LAST_INDEX(path[depth].p_hdr)) |
| return le32_to_cpu(path[depth].p_idx[1].ei_block); |
| } |
| depth--; |
| } |
| |
| return EXT_MAX_BLOCKS; |
| } |
| |
| /* |
| * ext4_ext_next_leaf_block: |
| * returns first allocated block from next leaf or EXT_MAX_BLOCKS |
| */ |
| static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) |
| { |
| int depth; |
| |
| BUG_ON(path == NULL); |
| depth = path->p_depth; |
| |
| /* zero-tree has no leaf blocks at all */ |
| if (depth == 0) |
| return EXT_MAX_BLOCKS; |
| |
| /* go to index block */ |
| depth--; |
| |
| while (depth >= 0) { |
| if (path[depth].p_idx != |
| EXT_LAST_INDEX(path[depth].p_hdr)) |
| return (ext4_lblk_t) |
| le32_to_cpu(path[depth].p_idx[1].ei_block); |
| depth--; |
| } |
| |
| return EXT_MAX_BLOCKS; |
| } |
| |
| /* |
| * ext4_ext_correct_indexes: |
| * if leaf gets modified and modified extent is first in the leaf, |
| * then we have to correct all indexes above. |
| * TODO: do we need to correct tree in all cases? |
| */ |
| static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| struct ext4_extent_header *eh; |
| int depth = ext_depth(inode); |
| struct ext4_extent *ex; |
| __le32 border; |
| int k, err = 0; |
| |
| eh = path[depth].p_hdr; |
| ex = path[depth].p_ext; |
| |
| if (unlikely(ex == NULL || eh == NULL)) { |
| EXT4_ERROR_INODE(inode, |
| "ex %p == NULL or eh %p == NULL", ex, eh); |
| return -EFSCORRUPTED; |
| } |
| |
| if (depth == 0) { |
| /* there is no tree at all */ |
| return 0; |
| } |
| |
| if (ex != EXT_FIRST_EXTENT(eh)) { |
| /* we correct tree if first leaf got modified only */ |
| return 0; |
| } |
| |
| /* |
| * TODO: we need correction if border is smaller than current one |
| */ |
| k = depth - 1; |
| border = path[depth].p_ext->ee_block; |
| err = ext4_ext_get_access(handle, inode, path + k); |
| if (err) |
| return err; |
| path[k].p_idx->ei_block = border; |
| err = ext4_ext_dirty(handle, inode, path + k); |
| if (err) |
| return err; |
| |
| while (k--) { |
| /* change all left-side indexes */ |
| if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) |
| break; |
| err = ext4_ext_get_access(handle, inode, path + k); |
| if (err) |
| break; |
| path[k].p_idx->ei_block = border; |
| err = ext4_ext_dirty(handle, inode, path + k); |
| if (err) |
| break; |
| } |
| |
| return err; |
| } |
| |
| int |
| ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, |
| struct ext4_extent *ex2) |
| { |
| unsigned short ext1_ee_len, ext2_ee_len; |
| |
| if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) |
| return 0; |
| |
| ext1_ee_len = ext4_ext_get_actual_len(ex1); |
| ext2_ee_len = ext4_ext_get_actual_len(ex2); |
| |
| if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != |
| le32_to_cpu(ex2->ee_block)) |
| return 0; |
| |
| /* |
| * To allow future support for preallocated extents to be added |
| * as an RO_COMPAT feature, refuse to merge to extents if |
| * this can result in the top bit of ee_len being set. |
| */ |
| if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) |
| return 0; |
| /* |
| * The check for IO to unwritten extent is somewhat racy as we |
| * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after |
| * dropping i_data_sem. But reserved blocks should save us in that |
| * case. |
| */ |
| if (ext4_ext_is_unwritten(ex1) && |
| (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || |
| atomic_read(&EXT4_I(inode)->i_unwritten) || |
| (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN))) |
| return 0; |
| #ifdef AGGRESSIVE_TEST |
| if (ext1_ee_len >= 4) |
| return 0; |
| #endif |
| |
| if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * This function tries to merge the "ex" extent to the next extent in the tree. |
| * It always tries to merge towards right. If you want to merge towards |
| * left, pass "ex - 1" as argument instead of "ex". |
| * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns |
| * 1 if they got merged. |
| */ |
| static int ext4_ext_try_to_merge_right(struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *ex) |
| { |
| struct ext4_extent_header *eh; |
| unsigned int depth, len; |
| int merge_done = 0, unwritten; |
| |
| depth = ext_depth(inode); |
| BUG_ON(path[depth].p_hdr == NULL); |
| eh = path[depth].p_hdr; |
| |
| while (ex < EXT_LAST_EXTENT(eh)) { |
| if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) |
| break; |
| /* merge with next extent! */ |
| unwritten = ext4_ext_is_unwritten(ex); |
| ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
| + ext4_ext_get_actual_len(ex + 1)); |
| if (unwritten) |
| ext4_ext_mark_unwritten(ex); |
| |
| if (ex + 1 < EXT_LAST_EXTENT(eh)) { |
| len = (EXT_LAST_EXTENT(eh) - ex - 1) |
| * sizeof(struct ext4_extent); |
| memmove(ex + 1, ex + 2, len); |
| } |
| le16_add_cpu(&eh->eh_entries, -1); |
| merge_done = 1; |
| WARN_ON(eh->eh_entries == 0); |
| if (!eh->eh_entries) |
| EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
| } |
| |
| return merge_done; |
| } |
| |
| /* |
| * This function does a very simple check to see if we can collapse |
| * an extent tree with a single extent tree leaf block into the inode. |
| */ |
| static void ext4_ext_try_to_merge_up(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| size_t s; |
| unsigned max_root = ext4_ext_space_root(inode, 0); |
| ext4_fsblk_t blk; |
| |
| if ((path[0].p_depth != 1) || |
| (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || |
| (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) |
| return; |
| |
| /* |
| * We need to modify the block allocation bitmap and the block |
| * group descriptor to release the extent tree block. If we |
| * can't get the journal credits, give up. |
| */ |
| if (ext4_journal_extend(handle, 2)) |
| return; |
| |
| /* |
| * Copy the extent data up to the inode |
| */ |
| blk = ext4_idx_pblock(path[0].p_idx); |
| s = le16_to_cpu(path[1].p_hdr->eh_entries) * |
| sizeof(struct ext4_extent_idx); |
| s += sizeof(struct ext4_extent_header); |
| |
| path[1].p_maxdepth = path[0].p_maxdepth; |
| memcpy(path[0].p_hdr, path[1].p_hdr, s); |
| path[0].p_depth = 0; |
| path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + |
| (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); |
| path[0].p_hdr->eh_max = cpu_to_le16(max_root); |
| |
| brelse(path[1].p_bh); |
| ext4_free_blocks(handle, inode, NULL, blk, 1, |
| EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
| } |
| |
| /* |
| * This function tries to merge the @ex extent to neighbours in the tree. |
| * return 1 if merge left else 0. |
| */ |
| static void ext4_ext_try_to_merge(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *ex) { |
| struct ext4_extent_header *eh; |
| unsigned int depth; |
| int merge_done = 0; |
| |
| depth = ext_depth(inode); |
| BUG_ON(path[depth].p_hdr == NULL); |
| eh = path[depth].p_hdr; |
| |
| if (ex > EXT_FIRST_EXTENT(eh)) |
| merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); |
| |
| if (!merge_done) |
| (void) ext4_ext_try_to_merge_right(inode, path, ex); |
| |
| ext4_ext_try_to_merge_up(handle, inode, path); |
| } |
| |
| /* |
| * check if a portion of the "newext" extent overlaps with an |
| * existing extent. |
| * |
| * If there is an overlap discovered, it updates the length of the newext |
| * such that there will be no overlap, and then returns 1. |
| * If there is no overlap found, it returns 0. |
| */ |
| static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, |
| struct inode *inode, |
| struct ext4_extent *newext, |
| struct ext4_ext_path *path) |
| { |
| ext4_lblk_t b1, b2; |
| unsigned int depth, len1; |
| unsigned int ret = 0; |
| |
| b1 = le32_to_cpu(newext->ee_block); |
| len1 = ext4_ext_get_actual_len(newext); |
| depth = ext_depth(inode); |
| if (!path[depth].p_ext) |
| goto out; |
| b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
| |
| /* |
| * get the next allocated block if the extent in the path |
| * is before the requested block(s) |
| */ |
| if (b2 < b1) { |
| b2 = ext4_ext_next_allocated_block(path); |
| if (b2 == EXT_MAX_BLOCKS) |
| goto out; |
| b2 = EXT4_LBLK_CMASK(sbi, b2); |
| } |
| |
| /* check for wrap through zero on extent logical start block*/ |
| if (b1 + len1 < b1) { |
| len1 = EXT_MAX_BLOCKS - b1; |
| newext->ee_len = cpu_to_le16(len1); |
| ret = 1; |
| } |
| |
| /* check for overlap */ |
| if (b1 + len1 > b2) { |
| newext->ee_len = cpu_to_le16(b2 - b1); |
| ret = 1; |
| } |
| out: |
| return ret; |
| } |
| |
| /* |
| * ext4_ext_insert_extent: |
| * tries to merge requsted extent into the existing extent or |
| * inserts requested extent as new one into the tree, |
| * creating new leaf in the no-space case. |
| */ |
| int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path **ppath, |
| struct ext4_extent *newext, int gb_flags) |
| { |
| struct ext4_ext_path *path = *ppath; |
| struct ext4_extent_header *eh; |
| struct ext4_extent *ex, *fex; |
| struct ext4_extent *nearex; /* nearest extent */ |
| struct ext4_ext_path *npath = NULL; |
| int depth, len, err; |
| ext4_lblk_t next; |
| int mb_flags = 0, unwritten; |
| |
| if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
| mb_flags |= EXT4_MB_DELALLOC_RESERVED; |
| if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
| EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); |
| return -EFSCORRUPTED; |
| } |
| depth = ext_depth(inode); |
| ex = path[depth].p_ext; |
| eh = path[depth].p_hdr; |
| if (unlikely(path[depth].p_hdr == NULL)) { |
| EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
| return -EFSCORRUPTED; |
| } |
| |
| /* try to insert block into found extent and return */ |
| if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { |
| |
| /* |
| * Try to see whether we should rather test the extent on |
| * right from ex, or from the left of ex. This is because |
| * ext4_find_extent() can return either extent on the |
| * left, or on the right from the searched position. This |
| * will make merging more effective. |
| */ |
| if (ex < EXT_LAST_EXTENT(eh) && |
| (le32_to_cpu(ex->ee_block) + |
| ext4_ext_get_actual_len(ex) < |
| le32_to_cpu(newext->ee_block))) { |
| ex += 1; |
| goto prepend; |
| } else if ((ex > EXT_FIRST_EXTENT(eh)) && |
| (le32_to_cpu(newext->ee_block) + |
| ext4_ext_get_actual_len(newext) < |
| le32_to_cpu(ex->ee_block))) |
| ex -= 1; |
| |
| /* Try to append newex to the ex */ |
| if (ext4_can_extents_be_merged(inode, ex, newext)) { |
| ext_debug("append [%d]%d block to %u:[%d]%d" |
| "(from %llu)\n", |
| ext4_ext_is_unwritten(newext), |
| ext4_ext_get_actual_len(newext), |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_is_unwritten(ex), |
| ext4_ext_get_actual_len(ex), |
| ext4_ext_pblock(ex)); |
| err = ext4_ext_get_access(handle, inode, |
| path + depth); |
| if (err) |
| return err; |
| unwritten = ext4_ext_is_unwritten(ex); |
| ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
| + ext4_ext_get_actual_len(newext)); |
| if (unwritten) |
| ext4_ext_mark_unwritten(ex); |
| eh = path[depth].p_hdr; |
| nearex = ex; |
| goto merge; |
| } |
| |
| prepend: |
| /* Try to prepend newex to the ex */ |
| if (ext4_can_extents_be_merged(inode, newext, ex)) { |
| ext_debug("prepend %u[%d]%d block to %u:[%d]%d" |
| "(from %llu)\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_is_unwritten(newext), |
| ext4_ext_get_actual_len(newext), |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_is_unwritten(ex), |
| ext4_ext_get_actual_len(ex), |
| ext4_ext_pblock(ex)); |
| err = ext4_ext_get_access(handle, inode, |
| path + depth); |
| if (err) |
| return err; |
| |
| unwritten = ext4_ext_is_unwritten(ex); |
| ex->ee_block = newext->ee_block; |
| ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); |
| ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
| + ext4_ext_get_actual_len(newext)); |
| if (unwritten) |
| ext4_ext_mark_unwritten(ex); |
| eh = path[depth].p_hdr; |
| nearex = ex; |
| goto merge; |
| } |
| } |
| |
| depth = ext_depth(inode); |
| eh = path[depth].p_hdr; |
| if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) |
| goto has_space; |
| |
| /* probably next leaf has space for us? */ |
| fex = EXT_LAST_EXTENT(eh); |
| next = EXT_MAX_BLOCKS; |
| if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) |
| next = ext4_ext_next_leaf_block(path); |
| if (next != EXT_MAX_BLOCKS) { |
| ext_debug("next leaf block - %u\n", next); |
| BUG_ON(npath != NULL); |
| npath = ext4_find_extent(inode, next, NULL, 0); |
| if (IS_ERR(npath)) |
| return PTR_ERR(npath); |
| BUG_ON(npath->p_depth != path->p_depth); |
| eh = npath[depth].p_hdr; |
| if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { |
| ext_debug("next leaf isn't full(%d)\n", |
| le16_to_cpu(eh->eh_entries)); |
| path = npath; |
| goto has_space; |
| } |
| ext_debug("next leaf has no free space(%d,%d)\n", |
| le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
| } |
| |
| /* |
| * There is no free space in the found leaf. |
| * We're gonna add a new leaf in the tree. |
| */ |
| if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) |
| mb_flags |= EXT4_MB_USE_RESERVED; |
| err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, |
| ppath, newext); |
| if (err) |
| goto cleanup; |
| depth = ext_depth(inode); |
| eh = path[depth].p_hdr; |
| |
| has_space: |
| nearex = path[depth].p_ext; |
| |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto cleanup; |
| |
| if (!nearex) { |
| /* there is no extent in this leaf, create first one */ |
| ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_unwritten(newext), |
| ext4_ext_get_actual_len(newext)); |
| nearex = EXT_FIRST_EXTENT(eh); |
| } else { |
| if (le32_to_cpu(newext->ee_block) |
| > le32_to_cpu(nearex->ee_block)) { |
| /* Insert after */ |
| ext_debug("insert %u:%llu:[%d]%d before: " |
| "nearest %p\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_unwritten(newext), |
| ext4_ext_get_actual_len(newext), |
| nearex); |
| nearex++; |
| } else { |
| /* Insert before */ |
| BUG_ON(newext->ee_block == nearex->ee_block); |
| ext_debug("insert %u:%llu:[%d]%d after: " |
| "nearest %p\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_unwritten(newext), |
| ext4_ext_get_actual_len(newext), |
| nearex); |
| } |
| len = EXT_LAST_EXTENT(eh) - nearex + 1; |
| if (len > 0) { |
| ext_debug("insert %u:%llu:[%d]%d: " |
| "move %d extents from 0x%p to 0x%p\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_unwritten(newext), |
| ext4_ext_get_actual_len(newext), |
| len, nearex, nearex + 1); |
| memmove(nearex + 1, nearex, |
| len * sizeof(struct ext4_extent)); |
| } |
| } |
| |
| le16_add_cpu(&eh->eh_entries, 1); |
| path[depth].p_ext = nearex; |
| nearex->ee_block = newext->ee_block; |
| ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); |
| nearex->ee_len = newext->ee_len; |
| |
| merge: |
| /* try to merge extents */ |
| if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) |
| ext4_ext_try_to_merge(handle, inode, path, nearex); |
| |
| |
| /* time to correct all indexes above */ |
| err = ext4_ext_correct_indexes(handle, inode, path); |
| if (err) |
| goto cleanup; |
| |
| err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
| |
| cleanup: |
| ext4_ext_drop_refs(npath); |
| kfree(npath); |
| return err; |
| } |
| |
| static int ext4_fill_fiemap_extents(struct inode *inode, |
| ext4_lblk_t block, ext4_lblk_t num, |
| struct fiemap_extent_info *fieinfo) |
| { |
| struct ext4_ext_path *path = NULL; |
| struct ext4_extent *ex; |
| struct extent_status es; |
| ext4_lblk_t next, next_del, start = 0, end = 0; |
| ext4_lblk_t last = block + num; |
| int exists, depth = 0, err = 0; |
| unsigned int flags = 0; |
| unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; |
| |
| while (block < last && block != EXT_MAX_BLOCKS) { |
| num = last - block; |
| /* find extent for this block */ |
| down_read(&EXT4_I(inode)->i_data_sem); |
| |
| path = ext4_find_extent(inode, block, &path, 0); |
| if (IS_ERR(path)) { |
| up_read(&EXT4_I(inode)->i_data_sem); |
| err = PTR_ERR(path); |
| path = NULL; |
| break; |
| } |
| |
| depth = ext_depth(inode); |
| if (unlikely(path[depth].p_hdr == NULL)) { |
| up_read(&EXT4_I(inode)->i_data_sem); |
| EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
| err = -EFSCORRUPTED; |
| break; |
| } |
| ex = path[depth].p_ext; |
| next = ext4_ext_next_allocated_block(path); |
| |
| flags = 0; |
| exists = 0; |
| if (!ex) { |
| /* there is no extent yet, so try to allocate |
| * all requested space */ |
| start = block; |
| end = block + num; |
| } else if (le32_to_cpu(ex->ee_block) > block) { |
| /* need to allocate space before found extent */ |
| start = block; |
| end = le32_to_cpu(ex->ee_block); |
| if (block + num < end) |
| end = block + num; |
| } else if (block >= le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex)) { |
| /* need to allocate space after found extent */ |
| start = block; |
| end = block + num; |
| if (end >= next) |
| end = next; |
| } else if (block >= le32_to_cpu(ex->ee_block)) { |
| /* |
| * some part of requested space is covered |
| * by found extent |
| */ |
| start = block; |
| end = le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex); |
| if (block + num < end) |
| end = block + num; |
| exists = 1; |
| } else { |
| BUG(); |
| } |
| BUG_ON(end <= start); |
| |
| if (!exists) { |
| es.es_lblk = start; |
| es.es_len = end - start; |
| es.es_pblk = 0; |
| } else { |
| es.es_lblk = le32_to_cpu(ex->ee_block); |
| es.es_len = ext4_ext_get_actual_len(ex); |
| es.es_pblk = ext4_ext_pblock(ex); |
| if (ext4_ext_is_unwritten(ex)) |
| flags |= FIEMAP_EXTENT_UNWRITTEN; |
| } |
| |
| /* |
| * Find delayed extent and update es accordingly. We call |
| * it even in !exists case to find out whether es is the |
| * last existing extent or not. |
| */ |
| next_del = ext4_find_delayed_extent(inode, &es); |
| if (!exists && next_del) { |
| exists = 1; |
| flags |= (FIEMAP_EXTENT_DELALLOC | |
| FIEMAP_EXTENT_UNKNOWN); |
| } |
| up_read(&EXT4_I(inode)->i_data_sem); |
| |
| if (unlikely(es.es_len == 0)) { |
| EXT4_ERROR_INODE(inode, "es.es_len == 0"); |
| err = -EFSCORRUPTED; |
| break; |
| } |
| |
| /* |
| * This is possible iff next == next_del == EXT_MAX_BLOCKS. |
| * we need to check next == EXT_MAX_BLOCKS because it is |
| * possible that an extent is with unwritten and delayed |
| * status due to when an extent is delayed allocated and |
| * is allocated by fallocate status tree will track both of |
| * them in a extent. |
| * |
| * So we could return a unwritten and delayed extent, and |
| * its block is equal to 'next'. |
| */ |
| if (next == next_del && next == EXT_MAX_BLOCKS) { |
| flags |= FIEMAP_EXTENT_LAST; |
| if (unlikely(next_del != EXT_MAX_BLOCKS || |
| next != EXT_MAX_BLOCKS)) { |
| EXT4_ERROR_INODE(inode, |
| "next extent == %u, next " |
| "delalloc extent = %u", |
| next, next_del); |
| err = -EFSCORRUPTED; |
| break; |
| } |
| } |
| |
| if (exists) { |
| err = fiemap_fill_next_extent(fieinfo, |
| (__u64)es.es_lblk << blksize_bits, |
| (__u64)es.es_pblk << blksize_bits, |
| (__u64)es.es_len << blksize_bits, |
| flags); |
| if (err < 0) |
| break; |
| if (err == 1) { |
| err = 0; |
| break; |
| } |
| } |
| |
| block = es.es_lblk + es.es_len; |
| } |
| |
| ext4_ext_drop_refs(path); |
| kfree(path); |
| return err; |
| } |
| |
| /* |
| * ext4_ext_determine_hole - determine hole around given block |
| * @inode: inode we lookup in |
| * @path: path in extent tree to @lblk |
| * @lblk: pointer to logical block around which we want to determine hole |
| * |
| * Determine hole length (and start if easily possible) around given logical |
| * block. We don't try too hard to find the beginning of the hole but @path |
| * actually points to extent before @lblk, we provide it. |
| * |
| * The function returns the length of a hole starting at @lblk. We update @lblk |
| * to the beginning of the hole if we managed to find it. |
| */ |
| static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t *lblk) |
| { |
| int depth = ext_depth(inode); |
| struct ext4_extent *ex; |
| ext4_lblk_t len; |
| |
| ex = path[depth].p_ext; |
| if (ex == NULL) { |
| /* there is no extent yet, so gap is [0;-] */ |
| *lblk = 0; |
| len = EXT_MAX_BLOCKS; |
| } else if (*lblk < le32_to_cpu(ex->ee_block)) { |
| len = le32_to_cpu(ex->ee_block) - *lblk; |
| } else if (*lblk >= le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex)) { |
| ext4_lblk_t next; |
| |
| *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); |
| next = ext4_ext_next_allocated_block(path); |
| BUG_ON(next == *lblk); |
| len = next - *lblk; |
| } else { |
| BUG(); |
| } |
| return len; |
| } |
| |
| /* |
| * ext4_ext_put_gap_in_cache: |
| * calculate boundaries of the gap that the requested block fits into |
| * and cache this gap |
| */ |
| static void |
| ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, |
| ext4_lblk_t hole_len) |
| { |
| struct extent_status es; |
| |
| ext4_es_find_delayed_extent_range(inode, hole_start, |
| hole_start + hole_len - 1, &es); |
| if (es.es_len) { |
| /* There's delayed extent containing lblock? */ |
| if (es.es_lblk <= hole_start) |
| return; |
| hole_len = min(es.es_lblk - hole_start, hole_len); |
| } |
| ext_debug(" -> %u:%u\n", hole_start, hole_len); |
| ext4_es_insert_extent(inode, hole_start, hole_len, ~0, |
| EXTENT_STATUS_HOLE); |
| } |
| |
| /* |
| * ext4_ext_rm_idx: |
| * removes index from the index block. |
| */ |
| static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path, int depth) |
| { |
| int err; |
| ext4_fsblk_t leaf; |
| |
| /* free index block */ |
| depth--; |
| path = path + depth; |
| leaf = ext4_idx_pblock(path->p_idx); |
| if (unlikely(path->p_hdr->eh_entries == 0)) { |
| EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); |
| return -EFSCORRUPTED; |
| } |
| err = ext4_ext_get_access(handle, inode, path); |
| if (err) |
| return err; |
| |
| if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { |
| int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; |
| len *= sizeof(struct ext4_extent_idx); |
| memmove(path->p_idx, path->p_idx + 1, len); |
| } |
| |
| le16_add_cpu(&path->p_hdr->eh_entries, -1); |
| err = ext4_ext_dirty(handle, inode, path); |
| if (err) |
| return err; |
| ext_debug("index is empty, remove it, free block %llu\n", leaf); |
| trace_ext4_ext_rm_idx(inode, leaf); |
| |
| ext4_free_blocks(handle, inode, NULL, leaf, 1, |
| EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
| |
| while (--depth >= 0) { |
| if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) |
| break; |
| path--; |
| err = ext4_ext_get_access(handle, inode, path); |
| if (err) |
| break; |
| path->p_idx->ei_block = (path+1)->p_idx->ei_block; |
| err = ext4_ext_dirty(handle, inode, path); |
| if (err) |
| break; |
| } |
| return err; |
| } |
| |
| /* |
| * ext4_ext_calc_credits_for_single_extent: |
| * This routine returns max. credits that needed to insert an extent |
| * to the extent tree. |
| * When pass the actual path, the caller should calculate credits |
| * under i_data_sem. |
| */ |
| int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
| struct ext4_ext_path *path) |
| { |
| if (path) { |
| int depth = ext_depth(inode); |
| int ret = 0; |
| |
| /* probably there is space in leaf? */ |
| if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
| < le16_to_cpu(path[depth].p_hdr->eh_max)) { |
| |
| /* |
| * There are some space in the leaf tree, no |
| * need to account for leaf block credit |
| * |
| * bitmaps and block group descriptor blocks |
| * and other metadata blocks still need to be |
| * accounted. |
| */ |
| /* 1 bitmap, 1 block group descriptor */ |
| ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
| return ret; |
| } |
| } |
| |
| return ext4_chunk_trans_blocks(inode, nrblocks); |
| } |
| |
| /* |
| * How many index/leaf blocks need to change/allocate to add @extents extents? |
| * |
| * If we add a single extent, then in the worse case, each tree level |
| * index/leaf need to be changed in case of the tree split. |
| * |
| * If more extents are inserted, they could cause the whole tree split more |
| * than once, but this is really rare. |
| */ |
| int ext4_ext_index_trans_blocks(struct inode *inode, int extents) |
| { |
| int index; |
| int depth; |
| |
| /* If we are converting the inline data, only one is needed here. */ |
| if (ext4_has_inline_data(inode)) |
| return 1; |
| |
| depth = ext_depth(inode); |
| |
| if (extents <= 1) |
| index = depth * 2; |
| else |
| index = depth * 3; |
| |
| return index; |
| } |
| |
| static inline int get_default_free_blocks_flags(struct inode *inode) |
| { |
| if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
| return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; |
| else if (ext4_should_journal_data(inode)) |
| return EXT4_FREE_BLOCKS_FORGET; |
| return 0; |
| } |
| |
| static int ext4_remove_blocks(handle_t *handle, struct inode *inode, |
| struct ext4_extent *ex, |
| long long *partial_cluster, |
| ext4_lblk_t from, ext4_lblk_t to) |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| unsigned short ee_len = ext4_ext_get_actual_len(ex); |
| ext4_fsblk_t pblk; |
| int flags = get_default_free_blocks_flags(inode); |
| |
| /* |
| * For bigalloc file systems, we never free a partial cluster |
| * at the beginning of the extent. Instead, we make a note |
| * that we tried freeing the cluster, and check to see if we |
| * need to free it on a subsequent call to ext4_remove_blocks, |
| * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. |
| */ |
| flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; |
| |
| trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); |
| /* |
| * If we have a partial cluster, and it's different from the |
| * cluster of the last block, we need to explicitly free the |
| * partial cluster here. |
| */ |
| pblk = ext4_ext_pblock(ex) + ee_len - 1; |
| if (*partial_cluster > 0 && |
| *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { |
| ext4_free_blocks(handle, inode, NULL, |
| EXT4_C2B(sbi, *partial_cluster), |
| sbi->s_cluster_ratio, flags); |
| *partial_cluster = 0; |
| } |
| |
| #ifdef EXTENTS_STATS |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| spin_lock(&sbi->s_ext_stats_lock); |
| sbi->s_ext_blocks += ee_len; |
| sbi->s_ext_extents++; |
| if (ee_len < sbi->s_ext_min) |
| sbi->s_ext_min = ee_len; |
| if (ee_len > sbi->s_ext_max) |
| sbi->s_ext_max = ee_len; |
| if (ext_depth(inode) > sbi->s_depth_max) |
| sbi->s_depth_max = ext_depth(inode); |
| spin_unlock(&sbi->s_ext_stats_lock); |
| } |
| #endif |
| if (from >= le32_to_cpu(ex->ee_block) |
| && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { |
| /* tail removal */ |
| ext4_lblk_t num; |
| long long first_cluster; |
| |
| num = le32_to_cpu(ex->ee_block) + ee_len - from; |
| pblk = ext4_ext_pblock(ex) + ee_len - num; |
| /* |
| * Usually we want to free partial cluster at the end of the |
| * extent, except for the situation when the cluster is still |
| * used by any other extent (partial_cluster is negative). |
| */ |
| if (*partial_cluster < 0 && |
| *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1)) |
| flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; |
| |
| ext_debug("free last %u blocks starting %llu partial %lld\n", |
| num, pblk, *partial_cluster); |
| ext4_free_blocks(handle, inode, NULL, pblk, num, flags); |
| /* |
| * If the block range to be freed didn't start at the |
| * beginning of a cluster, and we removed the entire |
| * extent and the cluster is not used by any other extent, |
| * save the partial cluster here, since we might need to |
| * delete if we determine that the truncate or punch hole |
| * operation has removed all of the blocks in the cluster. |
| * If that cluster is used by another extent, preserve its |
| * negative value so it isn't freed later on. |
| * |
| * If the whole extent wasn't freed, we've reached the |
| * start of the truncated/punched region and have finished |
| * removing blocks. If there's a partial cluster here it's |
| * shared with the remainder of the extent and is no longer |
| * a candidate for removal. |
| */ |
| if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) { |
| first_cluster = (long long) EXT4_B2C(sbi, pblk); |
| if (first_cluster != -*partial_cluster) |
| *partial_cluster = first_cluster; |
| } else { |
| *partial_cluster = 0; |
| } |
| } else |
| ext4_error(sbi->s_sb, "strange request: removal(2) " |
| "%u-%u from %u:%u", |
| from, to, le32_to_cpu(ex->ee_block), ee_len); |
| return 0; |
| } |
| |
| |
| /* |
| * ext4_ext_rm_leaf() Removes the extents associated with the |
| * blocks appearing between "start" and "end". Both "start" |
| * and "end" must appear in the same extent or EIO is returned. |
| * |
| * @handle: The journal handle |
| * @inode: The files inode |
| * @path: The path to the leaf |
| * @partial_cluster: The cluster which we'll have to free if all extents |
| * has been released from it. However, if this value is |
| * negative, it's a cluster just to the right of the |
| * punched region and it must not be freed. |
| * @start: The first block to remove |
| * @end: The last block to remove |
| */ |
| static int |
| ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path, |
| long long *partial_cluster, |
| ext4_lblk_t start, ext4_lblk_t end) |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| int err = 0, correct_index = 0; |
| int depth = ext_depth(inode), credits; |
| struct ext4_extent_header *eh; |
| ext4_lblk_t a, b; |
| unsigned num; |
| ext4_lblk_t ex_ee_block; |
| unsigned short ex_ee_len; |
| unsigned unwritten = 0; |
| struct ext4_extent *ex; |
| ext4_fsblk_t pblk; |
| |
| /* the header must be checked already in ext4_ext_remove_space() */ |
| ext_debug("truncate since %u in leaf to %u\n", start, end); |
| if (!path[depth].p_hdr) |
| path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); |
| eh = path[depth].p_hdr; |
| if (unlikely(path[depth].p_hdr == NULL)) { |
| EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
| return -EFSCORRUPTED; |
| } |
| /* find where to start removing */ |
| ex = path[depth].p_ext; |
| if (!ex) |
| ex = EXT_LAST_EXTENT(eh); |
| |
| ex_ee_block = le32_to_cpu(ex->ee_block); |
| ex_ee_len = ext4_ext_get_actual_len(ex); |
| |
| trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); |
| |
| while (ex >= EXT_FIRST_EXTENT(eh) && |
| ex_ee_block + ex_ee_len > start) { |
| |
| if (ext4_ext_is_unwritten(ex)) |
| unwritten = 1; |
| else |
| unwritten = 0; |
| |
| ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, |
| unwritten, ex_ee_len); |
| path[depth].p_ext = ex; |
| |
| a = ex_ee_block > start ? ex_ee_block : start; |
| b = ex_ee_block+ex_ee_len - 1 < end ? |
| ex_ee_block+ex_ee_len - 1 : end; |
| |
| ext_debug(" border %u:%u\n", a, b); |
| |
| /* If this extent is beyond the end of the hole, skip it */ |
| if (end < ex_ee_block) { |
| /* |
| * We're going to skip this extent and move to another, |
| * so note that its first cluster is in use to avoid |
| * freeing it when removing blocks. Eventually, the |
| * right edge of the truncated/punched region will |
| * be just to the left. |
| */ |
| if (sbi->s_cluster_ratio > 1) { |
| pblk = ext4_ext_pblock(ex); |
| *partial_cluster = |
| -(long long) EXT4_B2C(sbi, pblk); |
| } |
| ex--; |
| ex_ee_block = le32_to_cpu(ex->ee_block); |
| ex_ee_len = ext4_ext_get_actual_len(ex); |
| continue; |
| } else if (b != ex_ee_block + ex_ee_len - 1) { |
| EXT4_ERROR_INODE(inode, |
| "can not handle truncate %u:%u " |
| "on extent %u:%u", |
| start, end, ex_ee_block, |
| ex_ee_block + ex_ee_len - 1); |
| err = -EFSCORRUPTED; |
| goto out; |
| } else if (a != ex_ee_block) { |
| /* remove tail of the extent */ |
| num = a - ex_ee_block; |
| } else { |
| /* remove whole extent: excellent! */ |
| num = 0; |
| } |
| /* |
| * 3 for leaf, sb, and inode plus 2 (bmap and group |
| * descriptor) for each block group; assume two block |
| * groups plus ex_ee_len/blocks_per_block_group for |
| * the worst case |
| */ |
| credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); |
| if (ex == EXT_FIRST_EXTENT(eh)) { |
| correct_index = 1; |
| credits += (ext_depth(inode)) + 1; |
| } |
| credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
| |
| err = ext4_ext_truncate_extend_restart(handle, inode, credits); |
| if (err) |
| goto out; |
| |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto out; |
| |
| err = ext4_remove_blocks(handle, inode, ex, partial_cluster, |
| a, b); |
| if (err) |
| goto out; |
| |
| if (num == 0) |
| /* this extent is removed; mark slot entirely unused */ |
| ext4_ext_store_pblock(ex, 0); |
| |
| ex->ee_len = cpu_to_le16(num); |
| /* |
| * Do not mark unwritten if all the blocks in the |
| * extent have been removed. |
| */ |
| if (unwritten && num) |
| ext4_ext_mark_unwritten(ex); |
| /* |
| * If the extent was completely released, |
| * we need to remove it from the leaf |
| */ |
| if (num == 0) { |
| if (end != EXT_MAX_BLOCKS - 1) { |
| /* |
| * For hole punching, we need to scoot all the |
| * extents up when an extent is removed so that |
| * we dont have blank extents in the middle |
| */ |
| memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * |
| sizeof(struct ext4_extent)); |
| |
| /* Now get rid of the one at the end */ |
| memset(EXT_LAST_EXTENT(eh), 0, |
| sizeof(struct ext4_extent)); |
| } |
| le16_add_cpu(&eh->eh_entries, -1); |
| } |
| |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| if (err) |
| goto out; |
| |
| ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, |
| ext4_ext_pblock(ex)); |
| ex--; |
| ex_ee_block = le32_to_cpu(ex->ee_block); |
| ex_ee_len = ext4_ext_get_actual_len(ex); |
| } |
| |
| if (correct_index && eh->eh_entries) |
| err = ext4_ext_correct_indexes(handle, inode, path); |
| |
| /* |
| * If there's a partial cluster and at least one extent remains in |
| * the leaf, free the partial cluster if it isn't shared with the |
| * current extent. If it is shared with the current extent |
| * we zero partial_cluster because we've reached the start of the |
| * truncated/punched region and we're done removing blocks. |
| */ |
| if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) { |
| pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; |
| if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { |
| ext4_free_blocks(handle, inode, NULL, |
| EXT4_C2B(sbi, *partial_cluster), |
| sbi->s_cluster_ratio, |
| get_default_free_blocks_flags(inode)); |
| } |
| *partial_cluster = 0; |
| } |
| |
| /* if this leaf is free, then we should |
| * remove it from index block above */ |
| if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) |
| err = ext4_ext_rm_idx(handle, inode, path, depth); |
| |
| out: |
| return err; |
| } |
| |
| /* |
| * ext4_ext_more_to_rm: |
| * returns 1 if current index has to be freed (even partial) |
| */ |
| static int |
| ext4_ext_more_to_rm(struct ext4_ext_path *path) |
| { |
| BUG_ON(path->p_idx == NULL); |
| |
| if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) |
| return 0; |
| |
| /* |
| * if truncate on deeper level happened, it wasn't partial, |
| * so we have to consider current index for truncation |
| */ |
| if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) |
| return 0; |
| return 1; |
| } |
| |
| int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, |
| ext4_lblk_t end) |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| int depth = ext_depth(inode); |
| struct ext4_ext_path *path = NULL; |
| long long partial_cluster = 0; |
| handle_t *handle; |
| int i = 0, err = 0; |
| |
| ext_debug("truncate since %u to %u\n", start, end); |
| |
| /* probably first extent we're gonna free will be last in block */ |
| handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); |
| if (IS_ERR(handle)) |
| return PTR_ERR(handle); |
| |
| again: |
| trace_ext4_ext_remove_space(inode, start, end, depth); |
| |
| /* |
| * Check if we are removing extents inside the extent tree. If that |
| * is the case, we are going to punch a hole inside the extent tree |
| * so we have to check whether we need to split the extent covering |
| * the last block to remove so we can easily remove the part of it |
| * in ext4_ext_rm_leaf(). |
| */ |
| if (end < EXT_MAX_BLOCKS - 1) { |
| struct ext4_extent *ex; |
| ext4_lblk_t ee_block, ex_end, lblk; |
| ext4_fsblk_t pblk; |
| |
| /* find extent for or closest extent to this block */ |
| path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); |
| if (IS_ERR(path)) { |
| ext4_journal_stop(handle); |
| return PTR_ERR(path); |
| } |
| depth = ext_depth(inode); |
| /* Leaf not may not exist only if inode has no blocks at all */ |
| ex = path[depth].p_ext; |
| if (!ex) { |
| if (depth) { |
| EXT4_ERROR_INODE(inode, |
| "path[%d].p_hdr == NULL", |
| depth); |
| err = -EFSCORRUPTED; |
| } |
| goto out; |
| } |
| |
| ee_block = le32_to_cpu(ex->ee_block); |
| ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; |
| |
| /* |
| * See if the last block is inside the extent, if so split |
| * the extent at 'end' block so we can easily remove the |
| * tail of the first part of the split extent in |
| * ext4_ext_rm_leaf(). |
| */ |
| if (end >= ee_block && end < ex_end) { |
| |
| /* |
| * If we're going to split the extent, note that |
| * the cluster containing the block after 'end' is |
| * in use to avoid freeing it when removing blocks. |
| */ |
| if (sbi->s_cluster_ratio > 1) { |
| pblk = ext4_ext_pblock(ex) + end - ee_block + 2; |
| partial_cluster = |
| -(long long) EXT4_B2C(sbi, pblk); |
| } |
| |
| /* |
| * Split the extent in two so that 'end' is the last |
| * block in the first new extent. Also we should not |
| * fail removing space due to ENOSPC so try to use |
| * reserved block if that happens. |
| */ |
| err = ext4_force_split_extent_at(handle, inode, &path, |
| end + 1, 1); |
| if (err < 0) |
| goto out; |
| |
| } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) { |
| /* |
| * If there's an extent to the right its first cluster |
| * contains the immediate right boundary of the |
| * truncated/punched region. Set partial_cluster to |
| * its negative value so it won't be freed if shared |
| * with the current extent. The end < ee_block case |
| * is handled in ext4_ext_rm_leaf(). |
| */ |
| lblk = ex_end + 1; |
| err = ext4_ext_search_right(inode, path, &lblk, &pblk, |
| &ex); |
| if (err) |
| goto out; |
| if (pblk) |
| partial_cluster = |
| -(long long) EXT4_B2C(sbi, pblk); |
| } |
| } |
| /* |
| * We start scanning from right side, freeing all the blocks |
| * after i_size and walking into the tree depth-wise. |
| */ |
| depth = ext_depth(inode); |
| if (path) { |
| int k = i = depth; |
| while (--k > 0) |
| path[k].p_block = |
| le16_to_cpu(path[k].p_hdr->eh_entries)+1; |
| } else { |
| path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), |
| GFP_NOFS); |
| if (path == NULL) { |
| ext4_journal_stop(handle); |
| return -ENOMEM; |
| } |
| path[0].p_maxdepth = path[0].p_depth = depth; |
| path[0].p_hdr = ext_inode_hdr(inode); |
| i = 0; |
| |
| if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { |
| err = -EFSCORRUPTED; |
| goto out; |
| } |
| } |
| err = 0; |
| |
| while (i >= 0 && err == 0) { |
| if (i == depth) { |
| /* this is leaf block */ |
| err = ext4_ext_rm_leaf(handle, inode, path, |
| &partial_cluster, start, |
| end); |
| /* root level has p_bh == NULL, brelse() eats this */ |
| brelse(path[i].p_bh); |
| path[i].p_bh = NULL; |
| i--; |
| continue; |
| } |
| |
| /* this is index block */ |
| if (!path[i].p_hdr) { |
| ext_debug("initialize header\n"); |
| path[i].p_hdr = ext_block_hdr(path[i].p_bh); |
| } |
| |
| if (!path[i].p_idx) { |
| /* this level hasn't been touched yet */ |
| path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); |
| path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; |
| ext_debug("init index ptr: hdr 0x%p, num %d\n", |
| path[i].p_hdr, |
| le16_to_cpu(path[i].p_hdr->eh_entries)); |
| } else { |
| /* we were already here, see at next index */ |
| path[i].p_idx--; |
| } |
| |
| ext_debug("level %d - index, first 0x%p, cur 0x%p\n", |
| i, EXT_FIRST_INDEX(path[i].p_hdr), |
| path[i].p_idx); |
| if (ext4_ext_more_to_rm(path + i)) { |
| struct buffer_head *bh; |
| /* go to the next level */ |
| ext_debug("move to level %d (block %llu)\n", |
| i + 1, ext4_idx_pblock(path[i].p_idx)); |
| memset(path + i + 1, 0, sizeof(*path)); |
| bh = read_extent_tree_block(inode, |
| ext4_idx_pblock(path[i].p_idx), depth - i - 1, |
| EXT4_EX_NOCACHE); |
| if (IS_ERR(bh)) { |
| /* should we reset i_size? */ |
| err = PTR_ERR(bh); |
| break; |
| } |
| /* Yield here to deal with large extent trees. |
| * Should be a no-op if we did IO above. */ |
| cond_resched(); |
| if (WARN_ON(i + 1 > depth)) { |
| err = -EFSCORRUPTED; |
| break; |
| } |
| path[i + 1].p_bh = bh; |
| |
| /* save actual number of indexes since this |
| * number is changed at the next iteration */ |
| path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); |
| i++; |
| } else { |
| /* we finished processing this index, go up */ |
| if (path[i].p_hdr->eh_entries == 0 && i > 0) { |
| /* index is empty, remove it; |
| * handle must be already prepared by the |
| * truncatei_leaf() */ |
| err = ext4_ext_rm_idx(handle, inode, path, i); |
| } |
| /* root level has p_bh == NULL, brelse() eats this */ |
| brelse(path[i].p_bh); |
| path[i].p_bh = NULL; |
| i--; |
| ext_debug("return to level %d\n", i); |
| } |
| } |
| |
| trace_ext4_ext_remove_space_done(inode, start, end, depth, |
| partial_cluster, path->p_hdr->eh_entries); |
| |
| /* |
| * If we still have something in the partial cluster and we have removed |
| * even the first extent, then we should free the blocks in the partial |
| * cluster as well. (This code will only run when there are no leaves |
| * to the immediate left of the truncated/punched region.) |
| */ |
| if (partial_cluster > 0 && err == 0) { |
| /* don't zero partial_cluster since it's not used afterwards */ |
| ext4_free_blocks(handle, inode, NULL, |
| EXT4_C2B(sbi, partial_cluster), |
| sbi->s_cluster_ratio, |
| get_default_free_blocks_flags(inode)); |
| } |
| |
| /* TODO: flexible tree reduction should be here */ |
| if (path->p_hdr->eh_entries == 0) { |
| /* |
| * truncate to zero freed all the tree, |
| * so we need to correct eh_depth |
| */ |
| err = ext4_ext_get_access(handle, inode, path); |
| if (err == 0) { |
| ext_inode_hdr(inode |