| /* |
| * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com |
| * Written by Alex Tomas <alex@clusterfs.com> |
| * |
| * Architecture independence: |
| * Copyright (c) 2005, Bull S.A. |
| * Written by Pierre Peiffer <pierre.peiffer@bull.net> |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 as |
| * published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public Licens |
| * along with this program; if not, write to the Free Software |
| * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
| */ |
| |
| /* |
| * Extents support for EXT4 |
| * |
| * TODO: |
| * - ext4*_error() should be used in some situations |
| * - analyze all BUG()/BUG_ON(), use -EIO where appropriate |
| * - smart tree reduction |
| */ |
| |
| #include <linux/fs.h> |
| #include <linux/time.h> |
| #include <linux/jbd2.h> |
| #include <linux/highuid.h> |
| #include <linux/pagemap.h> |
| #include <linux/quotaops.h> |
| #include <linux/string.h> |
| #include <linux/slab.h> |
| #include <linux/falloc.h> |
| #include <asm/uaccess.h> |
| #include <linux/fiemap.h> |
| #include "ext4_jbd2.h" |
| |
| #include <trace/events/ext4.h> |
| |
| static int ext4_split_extent(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_map_blocks *map, |
| int split_flag, |
| int flags); |
| |
| static int ext4_ext_truncate_extend_restart(handle_t *handle, |
| struct inode *inode, |
| int needed) |
| { |
| int err; |
| |
| if (!ext4_handle_valid(handle)) |
| return 0; |
| if (handle->h_buffer_credits > needed) |
| return 0; |
| err = ext4_journal_extend(handle, needed); |
| if (err <= 0) |
| return err; |
| err = ext4_truncate_restart_trans(handle, inode, needed); |
| if (err == 0) |
| err = -EAGAIN; |
| |
| return err; |
| } |
| |
| /* |
| * could return: |
| * - EROFS |
| * - ENOMEM |
| */ |
| static int ext4_ext_get_access(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| if (path->p_bh) { |
| /* path points to block */ |
| return ext4_journal_get_write_access(handle, path->p_bh); |
| } |
| /* path points to leaf/index in inode body */ |
| /* we use in-core data, no need to protect them */ |
| return 0; |
| } |
| |
| /* |
| * could return: |
| * - EROFS |
| * - ENOMEM |
| * - EIO |
| */ |
| #define ext4_ext_dirty(handle, inode, path) \ |
| __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) |
| static int __ext4_ext_dirty(const char *where, unsigned int line, |
| handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| int err; |
| if (path->p_bh) { |
| /* path points to block */ |
| err = __ext4_handle_dirty_metadata(where, line, handle, |
| inode, path->p_bh); |
| } else { |
| /* path points to leaf/index in inode body */ |
| err = ext4_mark_inode_dirty(handle, inode); |
| } |
| return err; |
| } |
| |
| static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t block) |
| { |
| if (path) { |
| int depth = path->p_depth; |
| struct ext4_extent *ex; |
| |
| /* |
| * Try to predict block placement assuming that we are |
| * filling in a file which will eventually be |
| * non-sparse --- i.e., in the case of libbfd writing |
| * an ELF object sections out-of-order but in a way |
| * the eventually results in a contiguous object or |
| * executable file, or some database extending a table |
| * space file. However, this is actually somewhat |
| * non-ideal if we are writing a sparse file such as |
| * qemu or KVM writing a raw image file that is going |
| * to stay fairly sparse, since it will end up |
| * fragmenting the file system's free space. Maybe we |
| * should have some hueristics or some way to allow |
| * userspace to pass a hint to file system, |
| * especially if the latter case turns out to be |
| * common. |
| */ |
| ex = path[depth].p_ext; |
| if (ex) { |
| ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); |
| ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); |
| |
| if (block > ext_block) |
| return ext_pblk + (block - ext_block); |
| else |
| return ext_pblk - (ext_block - block); |
| } |
| |
| /* it looks like index is empty; |
| * try to find starting block from index itself */ |
| if (path[depth].p_bh) |
| return path[depth].p_bh->b_blocknr; |
| } |
| |
| /* OK. use inode's group */ |
| return ext4_inode_to_goal_block(inode); |
| } |
| |
| /* |
| * Allocation for a meta data block |
| */ |
| static ext4_fsblk_t |
| ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *ex, int *err, unsigned int flags) |
| { |
| ext4_fsblk_t goal, newblock; |
| |
| goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); |
| newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
| NULL, err); |
| return newblock; |
| } |
| |
| static inline int ext4_ext_space_block(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
| / sizeof(struct ext4_extent); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 6) |
| size = 6; |
| #endif |
| return size; |
| } |
| |
| static inline int ext4_ext_space_block_idx(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
| / sizeof(struct ext4_extent_idx); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 5) |
| size = 5; |
| #endif |
| return size; |
| } |
| |
| static inline int ext4_ext_space_root(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = sizeof(EXT4_I(inode)->i_data); |
| size -= sizeof(struct ext4_extent_header); |
| size /= sizeof(struct ext4_extent); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 3) |
| size = 3; |
| #endif |
| return size; |
| } |
| |
| static inline int ext4_ext_space_root_idx(struct inode *inode, int check) |
| { |
| int size; |
| |
| size = sizeof(EXT4_I(inode)->i_data); |
| size -= sizeof(struct ext4_extent_header); |
| size /= sizeof(struct ext4_extent_idx); |
| #ifdef AGGRESSIVE_TEST |
| if (!check && size > 4) |
| size = 4; |
| #endif |
| return size; |
| } |
| |
| /* |
| * Calculate the number of metadata blocks needed |
| * to allocate @blocks |
| * Worse case is one block per extent |
| */ |
| int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) |
| { |
| struct ext4_inode_info *ei = EXT4_I(inode); |
| int idxs; |
| |
| idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) |
| / sizeof(struct ext4_extent_idx)); |
| |
| /* |
| * If the new delayed allocation block is contiguous with the |
| * previous da block, it can share index blocks with the |
| * previous block, so we only need to allocate a new index |
| * block every idxs leaf blocks. At ldxs**2 blocks, we need |
| * an additional index block, and at ldxs**3 blocks, yet |
| * another index blocks. |
| */ |
| if (ei->i_da_metadata_calc_len && |
| ei->i_da_metadata_calc_last_lblock+1 == lblock) { |
| int num = 0; |
| |
| if ((ei->i_da_metadata_calc_len % idxs) == 0) |
| num++; |
| if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) |
| num++; |
| if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { |
| num++; |
| ei->i_da_metadata_calc_len = 0; |
| } else |
| ei->i_da_metadata_calc_len++; |
| ei->i_da_metadata_calc_last_lblock++; |
| return num; |
| } |
| |
| /* |
| * In the worst case we need a new set of index blocks at |
| * every level of the inode's extent tree. |
| */ |
| ei->i_da_metadata_calc_len = 1; |
| ei->i_da_metadata_calc_last_lblock = lblock; |
| return ext_depth(inode) + 1; |
| } |
| |
| static int |
| ext4_ext_max_entries(struct inode *inode, int depth) |
| { |
| int max; |
| |
| if (depth == ext_depth(inode)) { |
| if (depth == 0) |
| max = ext4_ext_space_root(inode, 1); |
| else |
| max = ext4_ext_space_root_idx(inode, 1); |
| } else { |
| if (depth == 0) |
| max = ext4_ext_space_block(inode, 1); |
| else |
| max = ext4_ext_space_block_idx(inode, 1); |
| } |
| |
| return max; |
| } |
| |
| static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
| { |
| ext4_fsblk_t block = ext4_ext_pblock(ext); |
| int len = ext4_ext_get_actual_len(ext); |
| |
| if (len == 0) |
| return 0; |
| return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
| } |
| |
| static int ext4_valid_extent_idx(struct inode *inode, |
| struct ext4_extent_idx *ext_idx) |
| { |
| ext4_fsblk_t block = ext4_idx_pblock(ext_idx); |
| |
| return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); |
| } |
| |
| static int ext4_valid_extent_entries(struct inode *inode, |
| struct ext4_extent_header *eh, |
| int depth) |
| { |
| unsigned short entries; |
| if (eh->eh_entries == 0) |
| return 1; |
| |
| entries = le16_to_cpu(eh->eh_entries); |
| |
| if (depth == 0) { |
| /* leaf entries */ |
| struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); |
| while (entries) { |
| if (!ext4_valid_extent(inode, ext)) |
| return 0; |
| ext++; |
| entries--; |
| } |
| } else { |
| struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); |
| while (entries) { |
| if (!ext4_valid_extent_idx(inode, ext_idx)) |
| return 0; |
| ext_idx++; |
| entries--; |
| } |
| } |
| return 1; |
| } |
| |
| static int __ext4_ext_check(const char *function, unsigned int line, |
| struct inode *inode, struct ext4_extent_header *eh, |
| int depth) |
| { |
| const char *error_msg; |
| int max = 0; |
| |
| if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { |
| error_msg = "invalid magic"; |
| goto corrupted; |
| } |
| if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { |
| error_msg = "unexpected eh_depth"; |
| goto corrupted; |
| } |
| if (unlikely(eh->eh_max == 0)) { |
| error_msg = "invalid eh_max"; |
| goto corrupted; |
| } |
| max = ext4_ext_max_entries(inode, depth); |
| if (unlikely(le16_to_cpu(eh->eh_max) > max)) { |
| error_msg = "too large eh_max"; |
| goto corrupted; |
| } |
| if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { |
| error_msg = "invalid eh_entries"; |
| goto corrupted; |
| } |
| if (!ext4_valid_extent_entries(inode, eh, depth)) { |
| error_msg = "invalid extent entries"; |
| goto corrupted; |
| } |
| return 0; |
| |
| corrupted: |
| ext4_error_inode(inode, function, line, 0, |
| "bad header/extent: %s - magic %x, " |
| "entries %u, max %u(%u), depth %u(%u)", |
| error_msg, le16_to_cpu(eh->eh_magic), |
| le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), |
| max, le16_to_cpu(eh->eh_depth), depth); |
| |
| return -EIO; |
| } |
| |
| #define ext4_ext_check(inode, eh, depth) \ |
| __ext4_ext_check(__func__, __LINE__, inode, eh, depth) |
| |
| int ext4_ext_check_inode(struct inode *inode) |
| { |
| return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); |
| } |
| |
| #ifdef EXT_DEBUG |
| static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) |
| { |
| int k, l = path->p_depth; |
| |
| ext_debug("path:"); |
| for (k = 0; k <= l; k++, path++) { |
| if (path->p_idx) { |
| ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), |
| ext4_idx_pblock(path->p_idx)); |
| } else if (path->p_ext) { |
| ext_debug(" %d:[%d]%d:%llu ", |
| le32_to_cpu(path->p_ext->ee_block), |
| ext4_ext_is_uninitialized(path->p_ext), |
| ext4_ext_get_actual_len(path->p_ext), |
| ext4_ext_pblock(path->p_ext)); |
| } else |
| ext_debug(" []"); |
| } |
| ext_debug("\n"); |
| } |
| |
| static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) |
| { |
| int depth = ext_depth(inode); |
| struct ext4_extent_header *eh; |
| struct ext4_extent *ex; |
| int i; |
| |
| if (!path) |
| return; |
| |
| eh = path[depth].p_hdr; |
| ex = EXT_FIRST_EXTENT(eh); |
| |
| ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); |
| |
| for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { |
| ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), |
| ext4_ext_is_uninitialized(ex), |
| ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); |
| } |
| ext_debug("\n"); |
| } |
| |
| static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, |
| ext4_fsblk_t newblock, int level) |
| { |
| int depth = ext_depth(inode); |
| struct ext4_extent *ex; |
| |
| if (depth != level) { |
| struct ext4_extent_idx *idx; |
| idx = path[level].p_idx; |
| while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { |
| ext_debug("%d: move %d:%llu in new index %llu\n", level, |
| le32_to_cpu(idx->ei_block), |
| ext4_idx_pblock(idx), |
| newblock); |
| idx++; |
| } |
| |
| return; |
| } |
| |
| ex = path[depth].p_ext; |
| while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { |
| ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_pblock(ex), |
| ext4_ext_is_uninitialized(ex), |
| ext4_ext_get_actual_len(ex), |
| newblock); |
| ex++; |
| } |
| } |
| |
| #else |
| #define ext4_ext_show_path(inode, path) |
| #define ext4_ext_show_leaf(inode, path) |
| #define ext4_ext_show_move(inode, path, newblock, level) |
| #endif |
| |
| void ext4_ext_drop_refs(struct ext4_ext_path *path) |
| { |
| int depth = path->p_depth; |
| int i; |
| |
| for (i = 0; i <= depth; i++, path++) |
| if (path->p_bh) { |
| brelse(path->p_bh); |
| path->p_bh = NULL; |
| } |
| } |
| |
| /* |
| * ext4_ext_binsearch_idx: |
| * binary search for the closest index of the given block |
| * the header must be checked before calling this |
| */ |
| static void |
| ext4_ext_binsearch_idx(struct inode *inode, |
| struct ext4_ext_path *path, ext4_lblk_t block) |
| { |
| struct ext4_extent_header *eh = path->p_hdr; |
| struct ext4_extent_idx *r, *l, *m; |
| |
| |
| ext_debug("binsearch for %u(idx): ", block); |
| |
| l = EXT_FIRST_INDEX(eh) + 1; |
| r = EXT_LAST_INDEX(eh); |
| while (l <= r) { |
| m = l + (r - l) / 2; |
| if (block < le32_to_cpu(m->ei_block)) |
| r = m - 1; |
| else |
| l = m + 1; |
| ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), |
| m, le32_to_cpu(m->ei_block), |
| r, le32_to_cpu(r->ei_block)); |
| } |
| |
| path->p_idx = l - 1; |
| ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), |
| ext4_idx_pblock(path->p_idx)); |
| |
| #ifdef CHECK_BINSEARCH |
| { |
| struct ext4_extent_idx *chix, *ix; |
| int k; |
| |
| chix = ix = EXT_FIRST_INDEX(eh); |
| for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { |
| if (k != 0 && |
| le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { |
| printk(KERN_DEBUG "k=%d, ix=0x%p, " |
| "first=0x%p\n", k, |
| ix, EXT_FIRST_INDEX(eh)); |
| printk(KERN_DEBUG "%u <= %u\n", |
| le32_to_cpu(ix->ei_block), |
| le32_to_cpu(ix[-1].ei_block)); |
| } |
| BUG_ON(k && le32_to_cpu(ix->ei_block) |
| <= le32_to_cpu(ix[-1].ei_block)); |
| if (block < le32_to_cpu(ix->ei_block)) |
| break; |
| chix = ix; |
| } |
| BUG_ON(chix != path->p_idx); |
| } |
| #endif |
| |
| } |
| |
| /* |
| * ext4_ext_binsearch: |
| * binary search for closest extent of the given block |
| * the header must be checked before calling this |
| */ |
| static void |
| ext4_ext_binsearch(struct inode *inode, |
| struct ext4_ext_path *path, ext4_lblk_t block) |
| { |
| struct ext4_extent_header *eh = path->p_hdr; |
| struct ext4_extent *r, *l, *m; |
| |
| if (eh->eh_entries == 0) { |
| /* |
| * this leaf is empty: |
| * we get such a leaf in split/add case |
| */ |
| return; |
| } |
| |
| ext_debug("binsearch for %u: ", block); |
| |
| l = EXT_FIRST_EXTENT(eh) + 1; |
| r = EXT_LAST_EXTENT(eh); |
| |
| while (l <= r) { |
| m = l + (r - l) / 2; |
| if (block < le32_to_cpu(m->ee_block)) |
| r = m - 1; |
| else |
| l = m + 1; |
| ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), |
| m, le32_to_cpu(m->ee_block), |
| r, le32_to_cpu(r->ee_block)); |
| } |
| |
| path->p_ext = l - 1; |
| ext_debug(" -> %d:%llu:[%d]%d ", |
| le32_to_cpu(path->p_ext->ee_block), |
| ext4_ext_pblock(path->p_ext), |
| ext4_ext_is_uninitialized(path->p_ext), |
| ext4_ext_get_actual_len(path->p_ext)); |
| |
| #ifdef CHECK_BINSEARCH |
| { |
| struct ext4_extent *chex, *ex; |
| int k; |
| |
| chex = ex = EXT_FIRST_EXTENT(eh); |
| for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { |
| BUG_ON(k && le32_to_cpu(ex->ee_block) |
| <= le32_to_cpu(ex[-1].ee_block)); |
| if (block < le32_to_cpu(ex->ee_block)) |
| break; |
| chex = ex; |
| } |
| BUG_ON(chex != path->p_ext); |
| } |
| #endif |
| |
| } |
| |
| int ext4_ext_tree_init(handle_t *handle, struct inode *inode) |
| { |
| struct ext4_extent_header *eh; |
| |
| eh = ext_inode_hdr(inode); |
| eh->eh_depth = 0; |
| eh->eh_entries = 0; |
| eh->eh_magic = EXT4_EXT_MAGIC; |
| eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); |
| ext4_mark_inode_dirty(handle, inode); |
| ext4_ext_invalidate_cache(inode); |
| return 0; |
| } |
| |
| struct ext4_ext_path * |
| ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, |
| struct ext4_ext_path *path) |
| { |
| struct ext4_extent_header *eh; |
| struct buffer_head *bh; |
| short int depth, i, ppos = 0, alloc = 0; |
| |
| eh = ext_inode_hdr(inode); |
| depth = ext_depth(inode); |
| |
| /* account possible depth increase */ |
| if (!path) { |
| path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), |
| GFP_NOFS); |
| if (!path) |
| return ERR_PTR(-ENOMEM); |
| alloc = 1; |
| } |
| path[0].p_hdr = eh; |
| path[0].p_bh = NULL; |
| |
| i = depth; |
| /* walk through the tree */ |
| while (i) { |
| int need_to_validate = 0; |
| |
| ext_debug("depth %d: num %d, max %d\n", |
| ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
| |
| ext4_ext_binsearch_idx(inode, path + ppos, block); |
| path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); |
| path[ppos].p_depth = i; |
| path[ppos].p_ext = NULL; |
| |
| bh = sb_getblk(inode->i_sb, path[ppos].p_block); |
| if (unlikely(!bh)) |
| goto err; |
| if (!bh_uptodate_or_lock(bh)) { |
| trace_ext4_ext_load_extent(inode, block, |
| path[ppos].p_block); |
| if (bh_submit_read(bh) < 0) { |
| put_bh(bh); |
| goto err; |
| } |
| /* validate the extent entries */ |
| need_to_validate = 1; |
| } |
| eh = ext_block_hdr(bh); |
| ppos++; |
| if (unlikely(ppos > depth)) { |
| put_bh(bh); |
| EXT4_ERROR_INODE(inode, |
| "ppos %d > depth %d", ppos, depth); |
| goto err; |
| } |
| path[ppos].p_bh = bh; |
| path[ppos].p_hdr = eh; |
| i--; |
| |
| if (need_to_validate && ext4_ext_check(inode, eh, i)) |
| goto err; |
| } |
| |
| path[ppos].p_depth = i; |
| path[ppos].p_ext = NULL; |
| path[ppos].p_idx = NULL; |
| |
| /* find extent */ |
| ext4_ext_binsearch(inode, path + ppos, block); |
| /* if not an empty leaf */ |
| if (path[ppos].p_ext) |
| path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); |
| |
| ext4_ext_show_path(inode, path); |
| |
| return path; |
| |
| err: |
| ext4_ext_drop_refs(path); |
| if (alloc) |
| kfree(path); |
| return ERR_PTR(-EIO); |
| } |
| |
| /* |
| * ext4_ext_insert_index: |
| * insert new index [@logical;@ptr] into the block at @curp; |
| * check where to insert: before @curp or after @curp |
| */ |
| static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *curp, |
| int logical, ext4_fsblk_t ptr) |
| { |
| struct ext4_extent_idx *ix; |
| int len, err; |
| |
| err = ext4_ext_get_access(handle, inode, curp); |
| if (err) |
| return err; |
| |
| if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { |
| EXT4_ERROR_INODE(inode, |
| "logical %d == ei_block %d!", |
| logical, le32_to_cpu(curp->p_idx->ei_block)); |
| return -EIO; |
| } |
| |
| if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) |
| >= le16_to_cpu(curp->p_hdr->eh_max))) { |
| EXT4_ERROR_INODE(inode, |
| "eh_entries %d >= eh_max %d!", |
| le16_to_cpu(curp->p_hdr->eh_entries), |
| le16_to_cpu(curp->p_hdr->eh_max)); |
| return -EIO; |
| } |
| |
| if (logical > le32_to_cpu(curp->p_idx->ei_block)) { |
| /* insert after */ |
| ext_debug("insert new index %d after: %llu\n", logical, ptr); |
| ix = curp->p_idx + 1; |
| } else { |
| /* insert before */ |
| ext_debug("insert new index %d before: %llu\n", logical, ptr); |
| ix = curp->p_idx; |
| } |
| |
| len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; |
| BUG_ON(len < 0); |
| if (len > 0) { |
| ext_debug("insert new index %d: " |
| "move %d indices from 0x%p to 0x%p\n", |
| logical, len, ix, ix + 1); |
| memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); |
| } |
| |
| if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { |
| EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); |
| return -EIO; |
| } |
| |
| ix->ei_block = cpu_to_le32(logical); |
| ext4_idx_store_pblock(ix, ptr); |
| le16_add_cpu(&curp->p_hdr->eh_entries, 1); |
| |
| if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { |
| EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); |
| return -EIO; |
| } |
| |
| err = ext4_ext_dirty(handle, inode, curp); |
| ext4_std_error(inode->i_sb, err); |
| |
| return err; |
| } |
| |
| /* |
| * ext4_ext_split: |
| * inserts new subtree into the path, using free index entry |
| * at depth @at: |
| * - allocates all needed blocks (new leaf and all intermediate index blocks) |
| * - makes decision where to split |
| * - moves remaining extents and index entries (right to the split point) |
| * into the newly allocated blocks |
| * - initializes subtree |
| */ |
| static int ext4_ext_split(handle_t *handle, struct inode *inode, |
| unsigned int flags, |
| struct ext4_ext_path *path, |
| struct ext4_extent *newext, int at) |
| { |
| struct buffer_head *bh = NULL; |
| int depth = ext_depth(inode); |
| struct ext4_extent_header *neh; |
| struct ext4_extent_idx *fidx; |
| int i = at, k, m, a; |
| ext4_fsblk_t newblock, oldblock; |
| __le32 border; |
| ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ |
| int err = 0; |
| |
| /* make decision: where to split? */ |
| /* FIXME: now decision is simplest: at current extent */ |
| |
| /* if current leaf will be split, then we should use |
| * border from split point */ |
| if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { |
| EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); |
| return -EIO; |
| } |
| if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { |
| border = path[depth].p_ext[1].ee_block; |
| ext_debug("leaf will be split." |
| " next leaf starts at %d\n", |
| le32_to_cpu(border)); |
| } else { |
| border = newext->ee_block; |
| ext_debug("leaf will be added." |
| " next leaf starts at %d\n", |
| le32_to_cpu(border)); |
| } |
| |
| /* |
| * If error occurs, then we break processing |
| * and mark filesystem read-only. index won't |
| * be inserted and tree will be in consistent |
| * state. Next mount will repair buffers too. |
| */ |
| |
| /* |
| * Get array to track all allocated blocks. |
| * We need this to handle errors and free blocks |
| * upon them. |
| */ |
| ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); |
| if (!ablocks) |
| return -ENOMEM; |
| |
| /* allocate all needed blocks */ |
| ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); |
| for (a = 0; a < depth - at; a++) { |
| newblock = ext4_ext_new_meta_block(handle, inode, path, |
| newext, &err, flags); |
| if (newblock == 0) |
| goto cleanup; |
| ablocks[a] = newblock; |
| } |
| |
| /* initialize new leaf */ |
| newblock = ablocks[--a]; |
| if (unlikely(newblock == 0)) { |
| EXT4_ERROR_INODE(inode, "newblock == 0!"); |
| err = -EIO; |
| goto cleanup; |
| } |
| bh = sb_getblk(inode->i_sb, newblock); |
| if (!bh) { |
| err = -EIO; |
| goto cleanup; |
| } |
| lock_buffer(bh); |
| |
| err = ext4_journal_get_create_access(handle, bh); |
| if (err) |
| goto cleanup; |
| |
| neh = ext_block_hdr(bh); |
| neh->eh_entries = 0; |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
| neh->eh_magic = EXT4_EXT_MAGIC; |
| neh->eh_depth = 0; |
| |
| /* move remainder of path[depth] to the new leaf */ |
| if (unlikely(path[depth].p_hdr->eh_entries != |
| path[depth].p_hdr->eh_max)) { |
| EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", |
| path[depth].p_hdr->eh_entries, |
| path[depth].p_hdr->eh_max); |
| err = -EIO; |
| goto cleanup; |
| } |
| /* start copy from next extent */ |
| m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
| ext4_ext_show_move(inode, path, newblock, depth); |
| if (m) { |
| struct ext4_extent *ex; |
| ex = EXT_FIRST_EXTENT(neh); |
| memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); |
| le16_add_cpu(&neh->eh_entries, m); |
| } |
| |
| set_buffer_uptodate(bh); |
| unlock_buffer(bh); |
| |
| err = ext4_handle_dirty_metadata(handle, inode, bh); |
| if (err) |
| goto cleanup; |
| brelse(bh); |
| bh = NULL; |
| |
| /* correct old leaf */ |
| if (m) { |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto cleanup; |
| le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| if (err) |
| goto cleanup; |
| |
| } |
| |
| /* create intermediate indexes */ |
| k = depth - at - 1; |
| if (unlikely(k < 0)) { |
| EXT4_ERROR_INODE(inode, "k %d < 0!", k); |
| err = -EIO; |
| goto cleanup; |
| } |
| if (k) |
| ext_debug("create %d intermediate indices\n", k); |
| /* insert new index into current index block */ |
| /* current depth stored in i var */ |
| i = depth - 1; |
| while (k--) { |
| oldblock = newblock; |
| newblock = ablocks[--a]; |
| bh = sb_getblk(inode->i_sb, newblock); |
| if (!bh) { |
| err = -EIO; |
| goto cleanup; |
| } |
| lock_buffer(bh); |
| |
| err = ext4_journal_get_create_access(handle, bh); |
| if (err) |
| goto cleanup; |
| |
| neh = ext_block_hdr(bh); |
| neh->eh_entries = cpu_to_le16(1); |
| neh->eh_magic = EXT4_EXT_MAGIC; |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
| neh->eh_depth = cpu_to_le16(depth - i); |
| fidx = EXT_FIRST_INDEX(neh); |
| fidx->ei_block = border; |
| ext4_idx_store_pblock(fidx, oldblock); |
| |
| ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
| i, newblock, le32_to_cpu(border), oldblock); |
| |
| /* move remainder of path[i] to the new index block */ |
| if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
| EXT_LAST_INDEX(path[i].p_hdr))) { |
| EXT4_ERROR_INODE(inode, |
| "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", |
| le32_to_cpu(path[i].p_ext->ee_block)); |
| err = -EIO; |
| goto cleanup; |
| } |
| /* start copy indexes */ |
| m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; |
| ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
| EXT_MAX_INDEX(path[i].p_hdr)); |
| ext4_ext_show_move(inode, path, newblock, i); |
| if (m) { |
| memmove(++fidx, path[i].p_idx, |
| sizeof(struct ext4_extent_idx) * m); |
| le16_add_cpu(&neh->eh_entries, m); |
| } |
| set_buffer_uptodate(bh); |
| unlock_buffer(bh); |
| |
| err = ext4_handle_dirty_metadata(handle, inode, bh); |
| if (err) |
| goto cleanup; |
| brelse(bh); |
| bh = NULL; |
| |
| /* correct old index */ |
| if (m) { |
| err = ext4_ext_get_access(handle, inode, path + i); |
| if (err) |
| goto cleanup; |
| le16_add_cpu(&path[i].p_hdr->eh_entries, -m); |
| err = ext4_ext_dirty(handle, inode, path + i); |
| if (err) |
| goto cleanup; |
| } |
| |
| i--; |
| } |
| |
| /* insert new index */ |
| err = ext4_ext_insert_index(handle, inode, path + at, |
| le32_to_cpu(border), newblock); |
| |
| cleanup: |
| if (bh) { |
| if (buffer_locked(bh)) |
| unlock_buffer(bh); |
| brelse(bh); |
| } |
| |
| if (err) { |
| /* free all allocated blocks in error case */ |
| for (i = 0; i < depth; i++) { |
| if (!ablocks[i]) |
| continue; |
| ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
| EXT4_FREE_BLOCKS_METADATA); |
| } |
| } |
| kfree(ablocks); |
| |
| return err; |
| } |
| |
| /* |
| * ext4_ext_grow_indepth: |
| * implements tree growing procedure: |
| * - allocates new block |
| * - moves top-level data (index block or leaf) into the new block |
| * - initializes new top-level, creating index that points to the |
| * just created block |
| */ |
| static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, |
| unsigned int flags, |
| struct ext4_extent *newext) |
| { |
| struct ext4_extent_header *neh; |
| struct buffer_head *bh; |
| ext4_fsblk_t newblock; |
| int err = 0; |
| |
| newblock = ext4_ext_new_meta_block(handle, inode, NULL, |
| newext, &err, flags); |
| if (newblock == 0) |
| return err; |
| |
| bh = sb_getblk(inode->i_sb, newblock); |
| if (!bh) { |
| err = -EIO; |
| ext4_std_error(inode->i_sb, err); |
| return err; |
| } |
| lock_buffer(bh); |
| |
| err = ext4_journal_get_create_access(handle, bh); |
| if (err) { |
| unlock_buffer(bh); |
| goto out; |
| } |
| |
| /* move top-level index/leaf into new block */ |
| memmove(bh->b_data, EXT4_I(inode)->i_data, |
| sizeof(EXT4_I(inode)->i_data)); |
| |
| /* set size of new block */ |
| neh = ext_block_hdr(bh); |
| /* old root could have indexes or leaves |
| * so calculate e_max right way */ |
| if (ext_depth(inode)) |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); |
| else |
| neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
| neh->eh_magic = EXT4_EXT_MAGIC; |
| set_buffer_uptodate(bh); |
| unlock_buffer(bh); |
| |
| err = ext4_handle_dirty_metadata(handle, inode, bh); |
| if (err) |
| goto out; |
| |
| /* Update top-level index: num,max,pointer */ |
| neh = ext_inode_hdr(inode); |
| neh->eh_entries = cpu_to_le16(1); |
| ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); |
| if (neh->eh_depth == 0) { |
| /* Root extent block becomes index block */ |
| neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); |
| EXT_FIRST_INDEX(neh)->ei_block = |
| EXT_FIRST_EXTENT(neh)->ee_block; |
| } |
| ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", |
| le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), |
| le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), |
| ext4_idx_pblock(EXT_FIRST_INDEX(neh))); |
| |
| neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1); |
| ext4_mark_inode_dirty(handle, inode); |
| out: |
| brelse(bh); |
| |
| return err; |
| } |
| |
| /* |
| * ext4_ext_create_new_leaf: |
| * finds empty index and adds new leaf. |
| * if no free index is found, then it requests in-depth growing. |
| */ |
| static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, |
| unsigned int flags, |
| struct ext4_ext_path *path, |
| struct ext4_extent *newext) |
| { |
| struct ext4_ext_path *curp; |
| int depth, i, err = 0; |
| |
| repeat: |
| i = depth = ext_depth(inode); |
| |
| /* walk up to the tree and look for free index entry */ |
| curp = path + depth; |
| while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { |
| i--; |
| curp--; |
| } |
| |
| /* we use already allocated block for index block, |
| * so subsequent data blocks should be contiguous */ |
| if (EXT_HAS_FREE_INDEX(curp)) { |
| /* if we found index with free entry, then use that |
| * entry: create all needed subtree and add new leaf */ |
| err = ext4_ext_split(handle, inode, flags, path, newext, i); |
| if (err) |
| goto out; |
| |
| /* refill path */ |
| ext4_ext_drop_refs(path); |
| path = ext4_ext_find_extent(inode, |
| (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
| path); |
| if (IS_ERR(path)) |
| err = PTR_ERR(path); |
| } else { |
| /* tree is full, time to grow in depth */ |
| err = ext4_ext_grow_indepth(handle, inode, flags, newext); |
| if (err) |
| goto out; |
| |
| /* refill path */ |
| ext4_ext_drop_refs(path); |
| path = ext4_ext_find_extent(inode, |
| (ext4_lblk_t)le32_to_cpu(newext->ee_block), |
| path); |
| if (IS_ERR(path)) { |
| err = PTR_ERR(path); |
| goto out; |
| } |
| |
| /* |
| * only first (depth 0 -> 1) produces free space; |
| * in all other cases we have to split the grown tree |
| */ |
| depth = ext_depth(inode); |
| if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { |
| /* now we need to split */ |
| goto repeat; |
| } |
| } |
| |
| out: |
| return err; |
| } |
| |
| /* |
| * search the closest allocated block to the left for *logical |
| * and returns it at @logical + it's physical address at @phys |
| * if *logical is the smallest allocated block, the function |
| * returns 0 at @phys |
| * return value contains 0 (success) or error code |
| */ |
| static int ext4_ext_search_left(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t *logical, ext4_fsblk_t *phys) |
| { |
| struct ext4_extent_idx *ix; |
| struct ext4_extent *ex; |
| int depth, ee_len; |
| |
| if (unlikely(path == NULL)) { |
| EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); |
| return -EIO; |
| } |
| depth = path->p_depth; |
| *phys = 0; |
| |
| if (depth == 0 && path->p_ext == NULL) |
| return 0; |
| |
| /* usually extent in the path covers blocks smaller |
| * then *logical, but it can be that extent is the |
| * first one in the file */ |
| |
| ex = path[depth].p_ext; |
| ee_len = ext4_ext_get_actual_len(ex); |
| if (*logical < le32_to_cpu(ex->ee_block)) { |
| if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
| EXT4_ERROR_INODE(inode, |
| "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", |
| *logical, le32_to_cpu(ex->ee_block)); |
| return -EIO; |
| } |
| while (--depth >= 0) { |
| ix = path[depth].p_idx; |
| if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
| EXT4_ERROR_INODE(inode, |
| "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", |
| ix != NULL ? le32_to_cpu(ix->ei_block) : 0, |
| EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? |
| le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, |
| depth); |
| return -EIO; |
| } |
| } |
| return 0; |
| } |
| |
| if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
| EXT4_ERROR_INODE(inode, |
| "logical %d < ee_block %d + ee_len %d!", |
| *logical, le32_to_cpu(ex->ee_block), ee_len); |
| return -EIO; |
| } |
| |
| *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; |
| *phys = ext4_ext_pblock(ex) + ee_len - 1; |
| return 0; |
| } |
| |
| /* |
| * search the closest allocated block to the right for *logical |
| * and returns it at @logical + it's physical address at @phys |
| * if *logical is the largest allocated block, the function |
| * returns 0 at @phys |
| * return value contains 0 (success) or error code |
| */ |
| static int ext4_ext_search_right(struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t *logical, ext4_fsblk_t *phys, |
| struct ext4_extent **ret_ex) |
| { |
| struct buffer_head *bh = NULL; |
| struct ext4_extent_header *eh; |
| struct ext4_extent_idx *ix; |
| struct ext4_extent *ex; |
| ext4_fsblk_t block; |
| int depth; /* Note, NOT eh_depth; depth from top of tree */ |
| int ee_len; |
| |
| if (unlikely(path == NULL)) { |
| EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); |
| return -EIO; |
| } |
| depth = path->p_depth; |
| *phys = 0; |
| |
| if (depth == 0 && path->p_ext == NULL) |
| return 0; |
| |
| /* usually extent in the path covers blocks smaller |
| * then *logical, but it can be that extent is the |
| * first one in the file */ |
| |
| ex = path[depth].p_ext; |
| ee_len = ext4_ext_get_actual_len(ex); |
| if (*logical < le32_to_cpu(ex->ee_block)) { |
| if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { |
| EXT4_ERROR_INODE(inode, |
| "first_extent(path[%d].p_hdr) != ex", |
| depth); |
| return -EIO; |
| } |
| while (--depth >= 0) { |
| ix = path[depth].p_idx; |
| if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { |
| EXT4_ERROR_INODE(inode, |
| "ix != EXT_FIRST_INDEX *logical %d!", |
| *logical); |
| return -EIO; |
| } |
| } |
| goto found_extent; |
| } |
| |
| if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { |
| EXT4_ERROR_INODE(inode, |
| "logical %d < ee_block %d + ee_len %d!", |
| *logical, le32_to_cpu(ex->ee_block), ee_len); |
| return -EIO; |
| } |
| |
| if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { |
| /* next allocated block in this leaf */ |
| ex++; |
| goto found_extent; |
| } |
| |
| /* go up and search for index to the right */ |
| while (--depth >= 0) { |
| ix = path[depth].p_idx; |
| if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) |
| goto got_index; |
| } |
| |
| /* we've gone up to the root and found no index to the right */ |
| return 0; |
| |
| got_index: |
| /* we've found index to the right, let's |
| * follow it and find the closest allocated |
| * block to the right */ |
| ix++; |
| block = ext4_idx_pblock(ix); |
| while (++depth < path->p_depth) { |
| bh = sb_bread(inode->i_sb, block); |
| if (bh == NULL) |
| return -EIO; |
| eh = ext_block_hdr(bh); |
| /* subtract from p_depth to get proper eh_depth */ |
| if (ext4_ext_check(inode, eh, path->p_depth - depth)) { |
| put_bh(bh); |
| return -EIO; |
| } |
| ix = EXT_FIRST_INDEX(eh); |
| block = ext4_idx_pblock(ix); |
| put_bh(bh); |
| } |
| |
| bh = sb_bread(inode->i_sb, block); |
| if (bh == NULL) |
| return -EIO; |
| eh = ext_block_hdr(bh); |
| if (ext4_ext_check(inode, eh, path->p_depth - depth)) { |
| put_bh(bh); |
| return -EIO; |
| } |
| ex = EXT_FIRST_EXTENT(eh); |
| found_extent: |
| *logical = le32_to_cpu(ex->ee_block); |
| *phys = ext4_ext_pblock(ex); |
| *ret_ex = ex; |
| if (bh) |
| put_bh(bh); |
| return 0; |
| } |
| |
| /* |
| * ext4_ext_next_allocated_block: |
| * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. |
| * NOTE: it considers block number from index entry as |
| * allocated block. Thus, index entries have to be consistent |
| * with leaves. |
| */ |
| static ext4_lblk_t |
| ext4_ext_next_allocated_block(struct ext4_ext_path *path) |
| { |
| int depth; |
| |
| BUG_ON(path == NULL); |
| depth = path->p_depth; |
| |
| if (depth == 0 && path->p_ext == NULL) |
| return EXT_MAX_BLOCKS; |
| |
| while (depth >= 0) { |
| if (depth == path->p_depth) { |
| /* leaf */ |
| if (path[depth].p_ext && |
| path[depth].p_ext != |
| EXT_LAST_EXTENT(path[depth].p_hdr)) |
| return le32_to_cpu(path[depth].p_ext[1].ee_block); |
| } else { |
| /* index */ |
| if (path[depth].p_idx != |
| EXT_LAST_INDEX(path[depth].p_hdr)) |
| return le32_to_cpu(path[depth].p_idx[1].ei_block); |
| } |
| depth--; |
| } |
| |
| return EXT_MAX_BLOCKS; |
| } |
| |
| /* |
| * ext4_ext_next_leaf_block: |
| * returns first allocated block from next leaf or EXT_MAX_BLOCKS |
| */ |
| static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) |
| { |
| int depth; |
| |
| BUG_ON(path == NULL); |
| depth = path->p_depth; |
| |
| /* zero-tree has no leaf blocks at all */ |
| if (depth == 0) |
| return EXT_MAX_BLOCKS; |
| |
| /* go to index block */ |
| depth--; |
| |
| while (depth >= 0) { |
| if (path[depth].p_idx != |
| EXT_LAST_INDEX(path[depth].p_hdr)) |
| return (ext4_lblk_t) |
| le32_to_cpu(path[depth].p_idx[1].ei_block); |
| depth--; |
| } |
| |
| return EXT_MAX_BLOCKS; |
| } |
| |
| /* |
| * ext4_ext_correct_indexes: |
| * if leaf gets modified and modified extent is first in the leaf, |
| * then we have to correct all indexes above. |
| * TODO: do we need to correct tree in all cases? |
| */ |
| static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| struct ext4_extent_header *eh; |
| int depth = ext_depth(inode); |
| struct ext4_extent *ex; |
| __le32 border; |
| int k, err = 0; |
| |
| eh = path[depth].p_hdr; |
| ex = path[depth].p_ext; |
| |
| if (unlikely(ex == NULL || eh == NULL)) { |
| EXT4_ERROR_INODE(inode, |
| "ex %p == NULL or eh %p == NULL", ex, eh); |
| return -EIO; |
| } |
| |
| if (depth == 0) { |
| /* there is no tree at all */ |
| return 0; |
| } |
| |
| if (ex != EXT_FIRST_EXTENT(eh)) { |
| /* we correct tree if first leaf got modified only */ |
| return 0; |
| } |
| |
| /* |
| * TODO: we need correction if border is smaller than current one |
| */ |
| k = depth - 1; |
| border = path[depth].p_ext->ee_block; |
| err = ext4_ext_get_access(handle, inode, path + k); |
| if (err) |
| return err; |
| path[k].p_idx->ei_block = border; |
| err = ext4_ext_dirty(handle, inode, path + k); |
| if (err) |
| return err; |
| |
| while (k--) { |
| /* change all left-side indexes */ |
| if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) |
| break; |
| err = ext4_ext_get_access(handle, inode, path + k); |
| if (err) |
| break; |
| path[k].p_idx->ei_block = border; |
| err = ext4_ext_dirty(handle, inode, path + k); |
| if (err) |
| break; |
| } |
| |
| return err; |
| } |
| |
| int |
| ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, |
| struct ext4_extent *ex2) |
| { |
| unsigned short ext1_ee_len, ext2_ee_len, max_len; |
| |
| /* |
| * Make sure that either both extents are uninitialized, or |
| * both are _not_. |
| */ |
| if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) |
| return 0; |
| |
| if (ext4_ext_is_uninitialized(ex1)) |
| max_len = EXT_UNINIT_MAX_LEN; |
| else |
| max_len = EXT_INIT_MAX_LEN; |
| |
| ext1_ee_len = ext4_ext_get_actual_len(ex1); |
| ext2_ee_len = ext4_ext_get_actual_len(ex2); |
| |
| if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != |
| le32_to_cpu(ex2->ee_block)) |
| return 0; |
| |
| /* |
| * To allow future support for preallocated extents to be added |
| * as an RO_COMPAT feature, refuse to merge to extents if |
| * this can result in the top bit of ee_len being set. |
| */ |
| if (ext1_ee_len + ext2_ee_len > max_len) |
| return 0; |
| #ifdef AGGRESSIVE_TEST |
| if (ext1_ee_len >= 4) |
| return 0; |
| #endif |
| |
| if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * This function tries to merge the "ex" extent to the next extent in the tree. |
| * It always tries to merge towards right. If you want to merge towards |
| * left, pass "ex - 1" as argument instead of "ex". |
| * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns |
| * 1 if they got merged. |
| */ |
| static int ext4_ext_try_to_merge_right(struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *ex) |
| { |
| struct ext4_extent_header *eh; |
| unsigned int depth, len; |
| int merge_done = 0; |
| int uninitialized = 0; |
| |
| depth = ext_depth(inode); |
| BUG_ON(path[depth].p_hdr == NULL); |
| eh = path[depth].p_hdr; |
| |
| while (ex < EXT_LAST_EXTENT(eh)) { |
| if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) |
| break; |
| /* merge with next extent! */ |
| if (ext4_ext_is_uninitialized(ex)) |
| uninitialized = 1; |
| ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
| + ext4_ext_get_actual_len(ex + 1)); |
| if (uninitialized) |
| ext4_ext_mark_uninitialized(ex); |
| |
| if (ex + 1 < EXT_LAST_EXTENT(eh)) { |
| len = (EXT_LAST_EXTENT(eh) - ex - 1) |
| * sizeof(struct ext4_extent); |
| memmove(ex + 1, ex + 2, len); |
| } |
| le16_add_cpu(&eh->eh_entries, -1); |
| merge_done = 1; |
| WARN_ON(eh->eh_entries == 0); |
| if (!eh->eh_entries) |
| EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); |
| } |
| |
| return merge_done; |
| } |
| |
| /* |
| * This function tries to merge the @ex extent to neighbours in the tree. |
| * return 1 if merge left else 0. |
| */ |
| static int ext4_ext_try_to_merge(struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *ex) { |
| struct ext4_extent_header *eh; |
| unsigned int depth; |
| int merge_done = 0; |
| int ret = 0; |
| |
| depth = ext_depth(inode); |
| BUG_ON(path[depth].p_hdr == NULL); |
| eh = path[depth].p_hdr; |
| |
| if (ex > EXT_FIRST_EXTENT(eh)) |
| merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); |
| |
| if (!merge_done) |
| ret = ext4_ext_try_to_merge_right(inode, path, ex); |
| |
| return ret; |
| } |
| |
| /* |
| * check if a portion of the "newext" extent overlaps with an |
| * existing extent. |
| * |
| * If there is an overlap discovered, it updates the length of the newext |
| * such that there will be no overlap, and then returns 1. |
| * If there is no overlap found, it returns 0. |
| */ |
| static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, |
| struct inode *inode, |
| struct ext4_extent *newext, |
| struct ext4_ext_path *path) |
| { |
| ext4_lblk_t b1, b2; |
| unsigned int depth, len1; |
| unsigned int ret = 0; |
| |
| b1 = le32_to_cpu(newext->ee_block); |
| len1 = ext4_ext_get_actual_len(newext); |
| depth = ext_depth(inode); |
| if (!path[depth].p_ext) |
| goto out; |
| b2 = le32_to_cpu(path[depth].p_ext->ee_block); |
| b2 &= ~(sbi->s_cluster_ratio - 1); |
| |
| /* |
| * get the next allocated block if the extent in the path |
| * is before the requested block(s) |
| */ |
| if (b2 < b1) { |
| b2 = ext4_ext_next_allocated_block(path); |
| if (b2 == EXT_MAX_BLOCKS) |
| goto out; |
| b2 &= ~(sbi->s_cluster_ratio - 1); |
| } |
| |
| /* check for wrap through zero on extent logical start block*/ |
| if (b1 + len1 < b1) { |
| len1 = EXT_MAX_BLOCKS - b1; |
| newext->ee_len = cpu_to_le16(len1); |
| ret = 1; |
| } |
| |
| /* check for overlap */ |
| if (b1 + len1 > b2) { |
| newext->ee_len = cpu_to_le16(b2 - b1); |
| ret = 1; |
| } |
| out: |
| return ret; |
| } |
| |
| /* |
| * ext4_ext_insert_extent: |
| * tries to merge requsted extent into the existing extent or |
| * inserts requested extent as new one into the tree, |
| * creating new leaf in the no-space case. |
| */ |
| int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_extent *newext, int flag) |
| { |
| struct ext4_extent_header *eh; |
| struct ext4_extent *ex, *fex; |
| struct ext4_extent *nearex; /* nearest extent */ |
| struct ext4_ext_path *npath = NULL; |
| int depth, len, err; |
| ext4_lblk_t next; |
| unsigned uninitialized = 0; |
| int flags = 0; |
| |
| if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
| EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); |
| return -EIO; |
| } |
| depth = ext_depth(inode); |
| ex = path[depth].p_ext; |
| if (unlikely(path[depth].p_hdr == NULL)) { |
| EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
| return -EIO; |
| } |
| |
| /* try to insert block into found extent and return */ |
| if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) |
| && ext4_can_extents_be_merged(inode, ex, newext)) { |
| ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n", |
| ext4_ext_is_uninitialized(newext), |
| ext4_ext_get_actual_len(newext), |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_is_uninitialized(ex), |
| ext4_ext_get_actual_len(ex), |
| ext4_ext_pblock(ex)); |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| return err; |
| |
| /* |
| * ext4_can_extents_be_merged should have checked that either |
| * both extents are uninitialized, or both aren't. Thus we |
| * need to check only one of them here. |
| */ |
| if (ext4_ext_is_uninitialized(ex)) |
| uninitialized = 1; |
| ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) |
| + ext4_ext_get_actual_len(newext)); |
| if (uninitialized) |
| ext4_ext_mark_uninitialized(ex); |
| eh = path[depth].p_hdr; |
| nearex = ex; |
| goto merge; |
| } |
| |
| depth = ext_depth(inode); |
| eh = path[depth].p_hdr; |
| if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) |
| goto has_space; |
| |
| /* probably next leaf has space for us? */ |
| fex = EXT_LAST_EXTENT(eh); |
| next = EXT_MAX_BLOCKS; |
| if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) |
| next = ext4_ext_next_leaf_block(path); |
| if (next != EXT_MAX_BLOCKS) { |
| ext_debug("next leaf block - %u\n", next); |
| BUG_ON(npath != NULL); |
| npath = ext4_ext_find_extent(inode, next, NULL); |
| if (IS_ERR(npath)) |
| return PTR_ERR(npath); |
| BUG_ON(npath->p_depth != path->p_depth); |
| eh = npath[depth].p_hdr; |
| if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { |
| ext_debug("next leaf isn't full(%d)\n", |
| le16_to_cpu(eh->eh_entries)); |
| path = npath; |
| goto has_space; |
| } |
| ext_debug("next leaf has no free space(%d,%d)\n", |
| le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); |
| } |
| |
| /* |
| * There is no free space in the found leaf. |
| * We're gonna add a new leaf in the tree. |
| */ |
| if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) |
| flags = EXT4_MB_USE_ROOT_BLOCKS; |
| err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); |
| if (err) |
| goto cleanup; |
| depth = ext_depth(inode); |
| eh = path[depth].p_hdr; |
| |
| has_space: |
| nearex = path[depth].p_ext; |
| |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto cleanup; |
| |
| if (!nearex) { |
| /* there is no extent in this leaf, create first one */ |
| ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_uninitialized(newext), |
| ext4_ext_get_actual_len(newext)); |
| nearex = EXT_FIRST_EXTENT(eh); |
| } else { |
| if (le32_to_cpu(newext->ee_block) |
| > le32_to_cpu(nearex->ee_block)) { |
| /* Insert after */ |
| ext_debug("insert %u:%llu:[%d]%d before: " |
| "nearest %p\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_uninitialized(newext), |
| ext4_ext_get_actual_len(newext), |
| nearex); |
| nearex++; |
| } else { |
| /* Insert before */ |
| BUG_ON(newext->ee_block == nearex->ee_block); |
| ext_debug("insert %u:%llu:[%d]%d after: " |
| "nearest %p\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_uninitialized(newext), |
| ext4_ext_get_actual_len(newext), |
| nearex); |
| } |
| len = EXT_LAST_EXTENT(eh) - nearex + 1; |
| if (len > 0) { |
| ext_debug("insert %u:%llu:[%d]%d: " |
| "move %d extents from 0x%p to 0x%p\n", |
| le32_to_cpu(newext->ee_block), |
| ext4_ext_pblock(newext), |
| ext4_ext_is_uninitialized(newext), |
| ext4_ext_get_actual_len(newext), |
| len, nearex, nearex + 1); |
| memmove(nearex + 1, nearex, |
| len * sizeof(struct ext4_extent)); |
| } |
| } |
| |
| le16_add_cpu(&eh->eh_entries, 1); |
| path[depth].p_ext = nearex; |
| nearex->ee_block = newext->ee_block; |
| ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); |
| nearex->ee_len = newext->ee_len; |
| |
| merge: |
| /* try to merge extents to the right */ |
| if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) |
| ext4_ext_try_to_merge(inode, path, nearex); |
| |
| /* try to merge extents to the left */ |
| |
| /* time to correct all indexes above */ |
| err = ext4_ext_correct_indexes(handle, inode, path); |
| if (err) |
| goto cleanup; |
| |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| |
| cleanup: |
| if (npath) { |
| ext4_ext_drop_refs(npath); |
| kfree(npath); |
| } |
| ext4_ext_invalidate_cache(inode); |
| return err; |
| } |
| |
| static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, |
| ext4_lblk_t num, ext_prepare_callback func, |
| void *cbdata) |
| { |
| struct ext4_ext_path *path = NULL; |
| struct ext4_ext_cache cbex; |
| struct ext4_extent *ex; |
| ext4_lblk_t next, start = 0, end = 0; |
| ext4_lblk_t last = block + num; |
| int depth, exists, err = 0; |
| |
| BUG_ON(func == NULL); |
| BUG_ON(inode == NULL); |
| |
| while (block < last && block != EXT_MAX_BLOCKS) { |
| num = last - block; |
| /* find extent for this block */ |
| down_read(&EXT4_I(inode)->i_data_sem); |
| path = ext4_ext_find_extent(inode, block, path); |
| up_read(&EXT4_I(inode)->i_data_sem); |
| if (IS_ERR(path)) { |
| err = PTR_ERR(path); |
| path = NULL; |
| break; |
| } |
| |
| depth = ext_depth(inode); |
| if (unlikely(path[depth].p_hdr == NULL)) { |
| EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
| err = -EIO; |
| break; |
| } |
| ex = path[depth].p_ext; |
| next = ext4_ext_next_allocated_block(path); |
| |
| exists = 0; |
| if (!ex) { |
| /* there is no extent yet, so try to allocate |
| * all requested space */ |
| start = block; |
| end = block + num; |
| } else if (le32_to_cpu(ex->ee_block) > block) { |
| /* need to allocate space before found extent */ |
| start = block; |
| end = le32_to_cpu(ex->ee_block); |
| if (block + num < end) |
| end = block + num; |
| } else if (block >= le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex)) { |
| /* need to allocate space after found extent */ |
| start = block; |
| end = block + num; |
| if (end >= next) |
| end = next; |
| } else if (block >= le32_to_cpu(ex->ee_block)) { |
| /* |
| * some part of requested space is covered |
| * by found extent |
| */ |
| start = block; |
| end = le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex); |
| if (block + num < end) |
| end = block + num; |
| exists = 1; |
| } else { |
| BUG(); |
| } |
| BUG_ON(end <= start); |
| |
| if (!exists) { |
| cbex.ec_block = start; |
| cbex.ec_len = end - start; |
| cbex.ec_start = 0; |
| } else { |
| cbex.ec_block = le32_to_cpu(ex->ee_block); |
| cbex.ec_len = ext4_ext_get_actual_len(ex); |
| cbex.ec_start = ext4_ext_pblock(ex); |
| } |
| |
| if (unlikely(cbex.ec_len == 0)) { |
| EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); |
| err = -EIO; |
| break; |
| } |
| err = func(inode, next, &cbex, ex, cbdata); |
| ext4_ext_drop_refs(path); |
| |
| if (err < 0) |
| break; |
| |
| if (err == EXT_REPEAT) |
| continue; |
| else if (err == EXT_BREAK) { |
| err = 0; |
| break; |
| } |
| |
| if (ext_depth(inode) != depth) { |
| /* depth was changed. we have to realloc path */ |
| kfree(path); |
| path = NULL; |
| } |
| |
| block = cbex.ec_block + cbex.ec_len; |
| } |
| |
| if (path) { |
| ext4_ext_drop_refs(path); |
| kfree(path); |
| } |
| |
| return err; |
| } |
| |
| static void |
| ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, |
| __u32 len, ext4_fsblk_t start) |
| { |
| struct ext4_ext_cache *cex; |
| BUG_ON(len == 0); |
| spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
| trace_ext4_ext_put_in_cache(inode, block, len, start); |
| cex = &EXT4_I(inode)->i_cached_extent; |
| cex->ec_block = block; |
| cex->ec_len = len; |
| cex->ec_start = start; |
| spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| } |
| |
| /* |
| * ext4_ext_put_gap_in_cache: |
| * calculate boundaries of the gap that the requested block fits into |
| * and cache this gap |
| */ |
| static void |
| ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, |
| ext4_lblk_t block) |
| { |
| int depth = ext_depth(inode); |
| unsigned long len; |
| ext4_lblk_t lblock; |
| struct ext4_extent *ex; |
| |
| ex = path[depth].p_ext; |
| if (ex == NULL) { |
| /* there is no extent yet, so gap is [0;-] */ |
| lblock = 0; |
| len = EXT_MAX_BLOCKS; |
| ext_debug("cache gap(whole file):"); |
| } else if (block < le32_to_cpu(ex->ee_block)) { |
| lblock = block; |
| len = le32_to_cpu(ex->ee_block) - block; |
| ext_debug("cache gap(before): %u [%u:%u]", |
| block, |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_get_actual_len(ex)); |
| } else if (block >= le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex)) { |
| ext4_lblk_t next; |
| lblock = le32_to_cpu(ex->ee_block) |
| + ext4_ext_get_actual_len(ex); |
| |
| next = ext4_ext_next_allocated_block(path); |
| ext_debug("cache gap(after): [%u:%u] %u", |
| le32_to_cpu(ex->ee_block), |
| ext4_ext_get_actual_len(ex), |
| block); |
| BUG_ON(next == lblock); |
| len = next - lblock; |
| } else { |
| lblock = len = 0; |
| BUG(); |
| } |
| |
| ext_debug(" -> %u:%lu\n", lblock, len); |
| ext4_ext_put_in_cache(inode, lblock, len, 0); |
| } |
| |
| /* |
| * ext4_ext_check_cache() |
| * Checks to see if the given block is in the cache. |
| * If it is, the cached extent is stored in the given |
| * cache extent pointer. If the cached extent is a hole, |
| * this routine should be used instead of |
| * ext4_ext_in_cache if the calling function needs to |
| * know the size of the hole. |
| * |
| * @inode: The files inode |
| * @block: The block to look for in the cache |
| * @ex: Pointer where the cached extent will be stored |
| * if it contains block |
| * |
| * Return 0 if cache is invalid; 1 if the cache is valid |
| */ |
| static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, |
| struct ext4_ext_cache *ex){ |
| struct ext4_ext_cache *cex; |
| struct ext4_sb_info *sbi; |
| int ret = 0; |
| |
| /* |
| * We borrow i_block_reservation_lock to protect i_cached_extent |
| */ |
| spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
| cex = &EXT4_I(inode)->i_cached_extent; |
| sbi = EXT4_SB(inode->i_sb); |
| |
| /* has cache valid data? */ |
| if (cex->ec_len == 0) |
| goto errout; |
| |
| if (in_range(block, cex->ec_block, cex->ec_len)) { |
| memcpy(ex, cex, sizeof(struct ext4_ext_cache)); |
| ext_debug("%u cached by %u:%u:%llu\n", |
| block, |
| cex->ec_block, cex->ec_len, cex->ec_start); |
| ret = 1; |
| } |
| errout: |
| trace_ext4_ext_in_cache(inode, block, ret); |
| spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| return ret; |
| } |
| |
| /* |
| * ext4_ext_in_cache() |
| * Checks to see if the given block is in the cache. |
| * If it is, the cached extent is stored in the given |
| * extent pointer. |
| * |
| * @inode: The files inode |
| * @block: The block to look for in the cache |
| * @ex: Pointer where the cached extent will be stored |
| * if it contains block |
| * |
| * Return 0 if cache is invalid; 1 if the cache is valid |
| */ |
| static int |
| ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, |
| struct ext4_extent *ex) |
| { |
| struct ext4_ext_cache cex; |
| int ret = 0; |
| |
| if (ext4_ext_check_cache(inode, block, &cex)) { |
| ex->ee_block = cpu_to_le32(cex.ec_block); |
| ext4_ext_store_pblock(ex, cex.ec_start); |
| ex->ee_len = cpu_to_le16(cex.ec_len); |
| ret = 1; |
| } |
| |
| return ret; |
| } |
| |
| |
| /* |
| * ext4_ext_rm_idx: |
| * removes index from the index block. |
| */ |
| static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path) |
| { |
| int err; |
| ext4_fsblk_t leaf; |
| |
| /* free index block */ |
| path--; |
| leaf = ext4_idx_pblock(path->p_idx); |
| if (unlikely(path->p_hdr->eh_entries == 0)) { |
| EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); |
| return -EIO; |
| } |
| err = ext4_ext_get_access(handle, inode, path); |
| if (err) |
| return err; |
| |
| if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { |
| int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; |
| len *= sizeof(struct ext4_extent_idx); |
| memmove(path->p_idx, path->p_idx + 1, len); |
| } |
| |
| le16_add_cpu(&path->p_hdr->eh_entries, -1); |
| err = ext4_ext_dirty(handle, inode, path); |
| if (err) |
| return err; |
| ext_debug("index is empty, remove it, free block %llu\n", leaf); |
| trace_ext4_ext_rm_idx(inode, leaf); |
| |
| ext4_free_blocks(handle, inode, NULL, leaf, 1, |
| EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
| return err; |
| } |
| |
| /* |
| * ext4_ext_calc_credits_for_single_extent: |
| * This routine returns max. credits that needed to insert an extent |
| * to the extent tree. |
| * When pass the actual path, the caller should calculate credits |
| * under i_data_sem. |
| */ |
| int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, |
| struct ext4_ext_path *path) |
| { |
| if (path) { |
| int depth = ext_depth(inode); |
| int ret = 0; |
| |
| /* probably there is space in leaf? */ |
| if (le16_to_cpu(path[depth].p_hdr->eh_entries) |
| < le16_to_cpu(path[depth].p_hdr->eh_max)) { |
| |
| /* |
| * There are some space in the leaf tree, no |
| * need to account for leaf block credit |
| * |
| * bitmaps and block group descriptor blocks |
| * and other metadata blocks still need to be |
| * accounted. |
| */ |
| /* 1 bitmap, 1 block group descriptor */ |
| ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); |
| return ret; |
| } |
| } |
| |
| return ext4_chunk_trans_blocks(inode, nrblocks); |
| } |
| |
| /* |
| * How many index/leaf blocks need to change/allocate to modify nrblocks? |
| * |
| * if nrblocks are fit in a single extent (chunk flag is 1), then |
| * in the worse case, each tree level index/leaf need to be changed |
| * if the tree split due to insert a new extent, then the old tree |
| * index/leaf need to be updated too |
| * |
| * If the nrblocks are discontiguous, they could cause |
| * the whole tree split more than once, but this is really rare. |
| */ |
| int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) |
| { |
| int index; |
| int depth = ext_depth(inode); |
| |
| if (chunk) |
| index = depth * 2; |
| else |
| index = depth * 3; |
| |
| return index; |
| } |
| |
| static int ext4_remove_blocks(handle_t *handle, struct inode *inode, |
| struct ext4_extent *ex, |
| ext4_fsblk_t *partial_cluster, |
| ext4_lblk_t from, ext4_lblk_t to) |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| unsigned short ee_len = ext4_ext_get_actual_len(ex); |
| ext4_fsblk_t pblk; |
| int flags = EXT4_FREE_BLOCKS_FORGET; |
| |
| if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
| flags |= EXT4_FREE_BLOCKS_METADATA; |
| /* |
| * For bigalloc file systems, we never free a partial cluster |
| * at the beginning of the extent. Instead, we make a note |
| * that we tried freeing the cluster, and check to see if we |
| * need to free it on a subsequent call to ext4_remove_blocks, |
| * or at the end of the ext4_truncate() operation. |
| */ |
| flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; |
| |
| trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); |
| /* |
| * If we have a partial cluster, and it's different from the |
| * cluster of the last block, we need to explicitly free the |
| * partial cluster here. |
| */ |
| pblk = ext4_ext_pblock(ex) + ee_len - 1; |
| if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { |
| ext4_free_blocks(handle, inode, NULL, |
| EXT4_C2B(sbi, *partial_cluster), |
| sbi->s_cluster_ratio, flags); |
| *partial_cluster = 0; |
| } |
| |
| #ifdef EXTENTS_STATS |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| spin_lock(&sbi->s_ext_stats_lock); |
| sbi->s_ext_blocks += ee_len; |
| sbi->s_ext_extents++; |
| if (ee_len < sbi->s_ext_min) |
| sbi->s_ext_min = ee_len; |
| if (ee_len > sbi->s_ext_max) |
| sbi->s_ext_max = ee_len; |
| if (ext_depth(inode) > sbi->s_depth_max) |
| sbi->s_depth_max = ext_depth(inode); |
| spin_unlock(&sbi->s_ext_stats_lock); |
| } |
| #endif |
| if (from >= le32_to_cpu(ex->ee_block) |
| && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { |
| /* tail removal */ |
| ext4_lblk_t num; |
| |
| num = le32_to_cpu(ex->ee_block) + ee_len - from; |
| pblk = ext4_ext_pblock(ex) + ee_len - num; |
| ext_debug("free last %u blocks starting %llu\n", num, pblk); |
| ext4_free_blocks(handle, inode, NULL, pblk, num, flags); |
| /* |
| * If the block range to be freed didn't start at the |
| * beginning of a cluster, and we removed the entire |
| * extent, save the partial cluster here, since we |
| * might need to delete if we determine that the |
| * truncate operation has removed all of the blocks in |
| * the cluster. |
| */ |
| if (pblk & (sbi->s_cluster_ratio - 1) && |
| (ee_len == num)) |
| *partial_cluster = EXT4_B2C(sbi, pblk); |
| else |
| *partial_cluster = 0; |
| } else if (from == le32_to_cpu(ex->ee_block) |
| && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { |
| /* head removal */ |
| ext4_lblk_t num; |
| ext4_fsblk_t start; |
| |
| num = to - from; |
| start = ext4_ext_pblock(ex); |
| |
| ext_debug("free first %u blocks starting %llu\n", num, start); |
| ext4_free_blocks(handle, inode, NULL, start, num, flags); |
| |
| } else { |
| printk(KERN_INFO "strange request: removal(2) " |
| "%u-%u from %u:%u\n", |
| from, to, le32_to_cpu(ex->ee_block), ee_len); |
| } |
| return 0; |
| } |
| |
| |
| /* |
| * ext4_ext_rm_leaf() Removes the extents associated with the |
| * blocks appearing between "start" and "end", and splits the extents |
| * if "start" and "end" appear in the same extent |
| * |
| * @handle: The journal handle |
| * @inode: The files inode |
| * @path: The path to the leaf |
| * @start: The first block to remove |
| * @end: The last block to remove |
| */ |
| static int |
| ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, |
| struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, |
| ext4_lblk_t start, ext4_lblk_t end) |
| { |
| struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| int err = 0, correct_index = 0; |
| int depth = ext_depth(inode), credits; |
| struct ext4_extent_header *eh; |
| ext4_lblk_t a, b; |
| unsigned num; |
| ext4_lblk_t ex_ee_block; |
| unsigned short ex_ee_len; |
| unsigned uninitialized = 0; |
| struct ext4_extent *ex; |
| |
| /* the header must be checked already in ext4_ext_remove_space() */ |
| ext_debug("truncate since %u in leaf\n", start); |
| if (!path[depth].p_hdr) |
| path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); |
| eh = path[depth].p_hdr; |
| if (unlikely(path[depth].p_hdr == NULL)) { |
| EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); |
| return -EIO; |
| } |
| /* find where to start removing */ |
| ex = EXT_LAST_EXTENT(eh); |
| |
| ex_ee_block = le32_to_cpu(ex->ee_block); |
| ex_ee_len = ext4_ext_get_actual_len(ex); |
| |
| trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); |
| |
| while (ex >= EXT_FIRST_EXTENT(eh) && |
| ex_ee_block + ex_ee_len > start) { |
| |
| if (ext4_ext_is_uninitialized(ex)) |
| uninitialized = 1; |
| else |
| uninitialized = 0; |
| |
| ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, |
| uninitialized, ex_ee_len); |
| path[depth].p_ext = ex; |
| |
| a = ex_ee_block > start ? ex_ee_block : start; |
| b = ex_ee_block+ex_ee_len - 1 < end ? |
| ex_ee_block+ex_ee_len - 1 : end; |
| |
| ext_debug(" border %u:%u\n", a, b); |
| |
| /* If this extent is beyond the end of the hole, skip it */ |
| if (end <= ex_ee_block) { |
| ex--; |
| ex_ee_block = le32_to_cpu(ex->ee_block); |
| ex_ee_len = ext4_ext_get_actual_len(ex); |
| continue; |
| } else if (b != ex_ee_block + ex_ee_len - 1) { |
| EXT4_ERROR_INODE(inode," bad truncate %u:%u\n", |
| start, end); |
| err = -EIO; |
| goto out; |
| } else if (a != ex_ee_block) { |
| /* remove tail of the extent */ |
| num = a - ex_ee_block; |
| } else { |
| /* remove whole extent: excellent! */ |
| num = 0; |
| } |
| /* |
| * 3 for leaf, sb, and inode plus 2 (bmap and group |
| * descriptor) for each block group; assume two block |
| * groups plus ex_ee_len/blocks_per_block_group for |
| * the worst case |
| */ |
| credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); |
| if (ex == EXT_FIRST_EXTENT(eh)) { |
| correct_index = 1; |
| credits += (ext_depth(inode)) + 1; |
| } |
| credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
| |
| err = ext4_ext_truncate_extend_restart(handle, inode, credits); |
| if (err) |
| goto out; |
| |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto out; |
| |
| err = ext4_remove_blocks(handle, inode, ex, partial_cluster, |
| a, b); |
| if (err) |
| goto out; |
| |
| if (num == 0) |
| /* this extent is removed; mark slot entirely unused */ |
| ext4_ext_store_pblock(ex, 0); |
| |
| ex->ee_len = cpu_to_le16(num); |
| /* |
| * Do not mark uninitialized if all the blocks in the |
| * extent have been removed. |
| */ |
| if (uninitialized && num) |
| ext4_ext_mark_uninitialized(ex); |
| /* |
| * If the extent was completely released, |
| * we need to remove it from the leaf |
| */ |
| if (num == 0) { |
| if (end != EXT_MAX_BLOCKS - 1) { |
| /* |
| * For hole punching, we need to scoot all the |
| * extents up when an extent is removed so that |
| * we dont have blank extents in the middle |
| */ |
| memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * |
| sizeof(struct ext4_extent)); |
| |
| /* Now get rid of the one at the end */ |
| memset(EXT_LAST_EXTENT(eh), 0, |
| sizeof(struct ext4_extent)); |
| } |
| le16_add_cpu(&eh->eh_entries, -1); |
| } else |
| *partial_cluster = 0; |
| |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| if (err) |
| goto out; |
| |
| ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, |
| ext4_ext_pblock(ex)); |
| ex--; |
| ex_ee_block = le32_to_cpu(ex->ee_block); |
| ex_ee_len = ext4_ext_get_actual_len(ex); |
| } |
| |
| if (correct_index && eh->eh_entries) |
| err = ext4_ext_correct_indexes(handle, inode, path); |
| |
| /* |
| * If there is still a entry in the leaf node, check to see if |
| * it references the partial cluster. This is the only place |
| * where it could; if it doesn't, we can free the cluster. |
| */ |
| if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && |
| (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != |
| *partial_cluster)) { |
| int flags = EXT4_FREE_BLOCKS_FORGET; |
| |
| if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
| flags |= EXT4_FREE_BLOCKS_METADATA; |
| |
| ext4_free_blocks(handle, inode, NULL, |
| EXT4_C2B(sbi, *partial_cluster), |
| sbi->s_cluster_ratio, flags); |
| *partial_cluster = 0; |
| } |
| |
| /* if this leaf is free, then we should |
| * remove it from index block above */ |
| if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) |
| err = ext4_ext_rm_idx(handle, inode, path + depth); |
| |
| out: |
| return err; |
| } |
| |
| /* |
| * ext4_ext_more_to_rm: |
| * returns 1 if current index has to be freed (even partial) |
| */ |
| static int |
| ext4_ext_more_to_rm(struct ext4_ext_path *path) |
| { |
| BUG_ON(path->p_idx == NULL); |
| |
| if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) |
| return 0; |
| |
| /* |
| * if truncate on deeper level happened, it wasn't partial, |
| * so we have to consider current index for truncation |
| */ |
| if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) |
| return 0; |
| return 1; |
| } |
| |
| static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) |
| { |
| struct super_block *sb = inode->i_sb; |
| int depth = ext_depth(inode); |
| struct ext4_ext_path *path; |
| ext4_fsblk_t partial_cluster = 0; |
| handle_t *handle; |
| int i, err; |
| |
| ext_debug("truncate since %u\n", start); |
| |
| /* probably first extent we're gonna free will be last in block */ |
| handle = ext4_journal_start(inode, depth + 1); |
| if (IS_ERR(handle)) |
| return PTR_ERR(handle); |
| |
| again: |
| ext4_ext_invalidate_cache(inode); |
| |
| trace_ext4_ext_remove_space(inode, start, depth); |
| |
| /* |
| * We start scanning from right side, freeing all the blocks |
| * after i_size and walking into the tree depth-wise. |
| */ |
| depth = ext_depth(inode); |
| path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); |
| if (path == NULL) { |
| ext4_journal_stop(handle); |
| return -ENOMEM; |
| } |
| path[0].p_depth = depth; |
| path[0].p_hdr = ext_inode_hdr(inode); |
| if (ext4_ext_check(inode, path[0].p_hdr, depth)) { |
| err = -EIO; |
| goto out; |
| } |
| i = err = 0; |
| |
| while (i >= 0 && err == 0) { |
| if (i == depth) { |
| /* this is leaf block */ |
| err = ext4_ext_rm_leaf(handle, inode, path, |
| &partial_cluster, start, |
| EXT_MAX_BLOCKS - 1); |
| /* root level has p_bh == NULL, brelse() eats this */ |
| brelse(path[i].p_bh); |
| path[i].p_bh = NULL; |
| i--; |
| continue; |
| } |
| |
| /* this is index block */ |
| if (!path[i].p_hdr) { |
| ext_debug("initialize header\n"); |
| path[i].p_hdr = ext_block_hdr(path[i].p_bh); |
| } |
| |
| if (!path[i].p_idx) { |
| /* this level hasn't been touched yet */ |
| path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); |
| path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; |
| ext_debug("init index ptr: hdr 0x%p, num %d\n", |
| path[i].p_hdr, |
| le16_to_cpu(path[i].p_hdr->eh_entries)); |
| } else { |
| /* we were already here, see at next index */ |
| path[i].p_idx--; |
| } |
| |
| ext_debug("level %d - index, first 0x%p, cur 0x%p\n", |
| i, EXT_FIRST_INDEX(path[i].p_hdr), |
| path[i].p_idx); |
| if (ext4_ext_more_to_rm(path + i)) { |
| struct buffer_head *bh; |
| /* go to the next level */ |
| ext_debug("move to level %d (block %llu)\n", |
| i + 1, ext4_idx_pblock(path[i].p_idx)); |
| memset(path + i + 1, 0, sizeof(*path)); |
| bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); |
| if (!bh) { |
| /* should we reset i_size? */ |
| err = -EIO; |
| break; |
| } |
| if (WARN_ON(i + 1 > depth)) { |
| err = -EIO; |
| break; |
| } |
| if (ext4_ext_check(inode, ext_block_hdr(bh), |
| depth - i - 1)) { |
| err = -EIO; |
| break; |
| } |
| path[i + 1].p_bh = bh; |
| |
| /* save actual number of indexes since this |
| * number is changed at the next iteration */ |
| path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); |
| i++; |
| } else { |
| /* we finished processing this index, go up */ |
| if (path[i].p_hdr->eh_entries == 0 && i > 0) { |
| /* index is empty, remove it; |
| * handle must be already prepared by the |
| * truncatei_leaf() */ |
| err = ext4_ext_rm_idx(handle, inode, path + i); |
| } |
| /* root level has p_bh == NULL, brelse() eats this */ |
| brelse(path[i].p_bh); |
| path[i].p_bh = NULL; |
| i--; |
| ext_debug("return to level %d\n", i); |
| } |
| } |
| |
| trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, |
| path->p_hdr->eh_entries); |
| |
| /* If we still have something in the partial cluster and we have removed |
| * even the first extent, then we should free the blocks in the partial |
| * cluster as well. */ |
| if (partial_cluster && path->p_hdr->eh_entries == 0) { |
| int flags = EXT4_FREE_BLOCKS_FORGET; |
| |
| if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
| flags |= EXT4_FREE_BLOCKS_METADATA; |
| |
| ext4_free_blocks(handle, inode, NULL, |
| EXT4_C2B(EXT4_SB(sb), partial_cluster), |
| EXT4_SB(sb)->s_cluster_ratio, flags); |
| partial_cluster = 0; |
| } |
| |
| /* TODO: flexible tree reduction should be here */ |
| if (path->p_hdr->eh_entries == 0) { |
| /* |
| * truncate to zero freed all the tree, |
| * so we need to correct eh_depth |
| */ |
| err = ext4_ext_get_access(handle, inode, path); |
| if (err == 0) { |
| ext_inode_hdr(inode)->eh_depth = 0; |
| ext_inode_hdr(inode)->eh_max = |
| cpu_to_le16(ext4_ext_space_root(inode, 0)); |
| err = ext4_ext_dirty(handle, inode, path); |
| } |
| } |
| out: |
| ext4_ext_drop_refs(path); |
| kfree(path); |
| if (err == -EAGAIN) |
| goto again; |
| ext4_journal_stop(handle); |
| |
| return err; |
| } |
| |
| /* |
| * called at mount time |
| */ |
| void ext4_ext_init(struct super_block *sb) |
| { |
| /* |
| * possible initialization would be here |
| */ |
| |
| if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { |
| #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) |
| printk(KERN_INFO "EXT4-fs: file extents enabled"); |
| #ifdef AGGRESSIVE_TEST |
| printk(", aggressive tests"); |
| #endif |
| #ifdef CHECK_BINSEARCH |
| printk(", check binsearch"); |
| #endif |
| #ifdef EXTENTS_STATS |
| printk(", stats"); |
| #endif |
| printk("\n"); |
| #endif |
| #ifdef EXTENTS_STATS |
| spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); |
| EXT4_SB(sb)->s_ext_min = 1 << 30; |
| EXT4_SB(sb)->s_ext_max = 0; |
| #endif |
| } |
| } |
| |
| /* |
| * called at umount time |
| */ |
| void ext4_ext_release(struct super_block *sb) |
| { |
| if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) |
| return; |
| |
| #ifdef EXTENTS_STATS |
| if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { |
| struct ext4_sb_info *sbi = EXT4_SB(sb); |
| printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", |
| sbi->s_ext_blocks, sbi->s_ext_extents, |
| sbi->s_ext_blocks / sbi->s_ext_extents); |
| printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", |
| sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); |
| } |
| #endif |
| } |
| |
| /* FIXME!! we need to try to merge to left or right after zero-out */ |
| static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) |
| { |
| ext4_fsblk_t ee_pblock; |
| unsigned int ee_len; |
| int ret; |
| |
| ee_len = ext4_ext_get_actual_len(ex); |
| ee_pblock = ext4_ext_pblock(ex); |
| |
| ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); |
| if (ret > 0) |
| ret = 0; |
| |
| return ret; |
| } |
| |
| /* |
| * used by extent splitting. |
| */ |
| #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ |
| due to ENOSPC */ |
| #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ |
| #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ |
| |
| /* |
| * ext4_split_extent_at() splits an extent at given block. |
| * |
| * @handle: the journal handle |
| * @inode: the file inode |
| * @path: the path to the extent |
| * @split: the logical block where the extent is splitted. |
| * @split_flags: indicates if the extent could be zeroout if split fails, and |
| * the states(init or uninit) of new extents. |
| * @flags: flags used to insert new extent to extent tree. |
| * |
| * |
| * Splits extent [a, b] into two extents [a, @split) and [@split, b], states |
| * of which are deterimined by split_flag. |
| * |
| * There are two cases: |
| * a> the extent are splitted into two extent. |
| * b> split is not needed, and just mark the extent. |
| * |
| * return 0 on success. |
| */ |
| static int ext4_split_extent_at(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path *path, |
| ext4_lblk_t split, |
| int split_flag, |
| int flags) |
| { |
| ext4_fsblk_t newblock; |
| ext4_lblk_t ee_block; |
| struct ext4_extent *ex, newex, orig_ex; |
| struct ext4_extent *ex2 = NULL; |
| unsigned int ee_len, depth; |
| int err = 0; |
| |
| ext_debug("ext4_split_extents_at: inode %lu, logical" |
| "block %llu\n", inode->i_ino, (unsigned long long)split); |
| |
| ext4_ext_show_leaf(inode, path); |
| |
| depth = ext_depth(inode); |
| ex = path[depth].p_ext; |
| ee_block = le32_to_cpu(ex->ee_block); |
| ee_len = ext4_ext_get_actual_len(ex); |
| newblock = split - ee_block + ext4_ext_pblock(ex); |
| |
| BUG_ON(split < ee_block || split >= (ee_block + ee_len)); |
| |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto out; |
| |
| if (split == ee_block) { |
| /* |
| * case b: block @split is the block that the extent begins with |
| * then we just change the state of the extent, and splitting |
| * is not needed. |
| */ |
| if (split_flag & EXT4_EXT_MARK_UNINIT2) |
| ext4_ext_mark_uninitialized(ex); |
| else |
| ext4_ext_mark_initialized(ex); |
| |
| if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) |
| ext4_ext_try_to_merge(inode, path, ex); |
| |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| goto out; |
| } |
| |
| /* case a */ |
| memcpy(&orig_ex, ex, sizeof(orig_ex)); |
| ex->ee_len = cpu_to_le16(split - ee_block); |
| if (split_flag & EXT4_EXT_MARK_UNINIT1) |
| ext4_ext_mark_uninitialized(ex); |
| |
| /* |
| * path may lead to new leaf, not to original leaf any more |
| * after ext4_ext_insert_extent() returns, |
| */ |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| if (err) |
| goto fix_extent_len; |
| |
| ex2 = &newex; |
| ex2->ee_block = cpu_to_le32(split); |
| ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); |
| ext4_ext_store_pblock(ex2, newblock); |
| if (split_flag & EXT4_EXT_MARK_UNINIT2) |
| ext4_ext_mark_uninitialized(ex2); |
| |
| err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
| if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
| err = ext4_ext_zeroout(inode, &orig_ex); |
| if (err) |
| goto fix_extent_len; |
| /* update the extent length and mark as initialized */ |
| ex->ee_len = cpu_to_le16(ee_len); |
| ext4_ext_try_to_merge(inode, path, ex); |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| goto out; |
| } else if (err) |
| goto fix_extent_len; |
| |
| out: |
| ext4_ext_show_leaf(inode, path); |
| return err; |
| |
| fix_extent_len: |
| ex->ee_len = orig_ex.ee_len; |
| ext4_ext_dirty(handle, inode, path + depth); |
| return err; |
| } |
| |
| /* |
| * ext4_split_extents() splits an extent and mark extent which is covered |
| * by @map as split_flags indicates |
| * |
| * It may result in splitting the extent into multiple extents (upto three) |
| * There are three possibilities: |
| * a> There is no split required |
| * b> Splits in two extents: Split is happening at either end of the extent |
| * c> Splits in three extents: Somone is splitting in middle of the extent |
| * |
| */ |
| static int ext4_split_extent(handle_t *handle, |
| struct inode *inode, |
| struct ext4_ext_path *path, |
| struct ext4_map_blocks *map, |
| int split_flag, |
| int flags) |
| { |
| ext4_lblk_t ee_block; |
| struct ext4_extent *ex; |
| unsigned int ee_len, depth; |
| int err = 0; |
| int uninitialized; |
| int split_flag1, flags1; |
| |
| depth = ext_depth(inode); |
| ex = path[depth].p_ext; |
| ee_block = le32_to_cpu(ex->ee_block); |
| ee_len = ext4_ext_get_actual_len(ex); |
| uninitialized = ext4_ext_is_uninitialized(ex); |
| |
| if (map->m_lblk + map->m_len < ee_block + ee_len) { |
| split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? |
| EXT4_EXT_MAY_ZEROOUT : 0; |
| flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; |
| if (uninitialized) |
| split_flag1 |= EXT4_EXT_MARK_UNINIT1 | |
| EXT4_EXT_MARK_UNINIT2; |
| err = ext4_split_extent_at(handle, inode, path, |
| map->m_lblk + map->m_len, split_flag1, flags1); |
| if (err) |
| goto out; |
| } |
| |
| ext4_ext_drop_refs(path); |
| path = ext4_ext_find_extent(inode, map->m_lblk, path); |
| if (IS_ERR(path)) |
| return PTR_ERR(path); |
| |
| if (map->m_lblk >= ee_block) { |
| split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? |
| EXT4_EXT_MAY_ZEROOUT : 0; |
| if (uninitialized) |
| split_flag1 |= EXT4_EXT_MARK_UNINIT1; |
| if (split_flag & EXT4_EXT_MARK_UNINIT2) |
| split_flag1 |= EXT4_EXT_MARK_UNINIT2; |
| err = ext4_split_extent_at(handle, inode, path, |
| map->m_lblk, split_flag1, flags); |
| if (err) |
| goto out; |
| } |
| |
| ext4_ext_show_leaf(inode, path); |
| out: |
| return err ? err : map->m_len; |
| } |
| |
| #define EXT4_EXT_ZERO_LEN 7 |
| /* |
| * This function is called by ext4_ext_map_blocks() if someone tries to write |
| * to an uninitialized extent. It may result in splitting the uninitialized |
| * extent into multiple extents (up to three - one initialized and two |
| * uninitialized). |
| * There are three possibilities: |
| * a> There is no split required: Entire extent should be initialized |
| * b> Splits in two extents: Write is happening at either end of the extent |
| * c> Splits in three extents: Somone is writing in middle of the extent |
| * |
| * Pre-conditions: |
| * - The extent pointed to by 'path' is uninitialized. |
| * - The extent pointed to by 'path' contains a superset |
| * of the logical span [map->m_lblk, map->m_lblk + map->m_len). |
| * |
| * Post-conditions on success: |
| * - the returned value is the number of blocks beyond map->l_lblk |
| * that are allocated and initialized. |
| * It is guaranteed to be >= map->m_len. |
| */ |
| static int ext4_ext_convert_to_initialized(handle_t *handle, |
| struct inode *inode, |
| struct ext4_map_blocks *map, |
| struct ext4_ext_path *path) |
| { |
| struct ext4_extent_header *eh; |
| struct ext4_map_blocks split_map; |
| struct ext4_extent zero_ex; |
| struct ext4_extent *ex; |
| ext4_lblk_t ee_block, eof_block; |
| unsigned int ee_len, depth; |
| int allocated; |
| int err = 0; |
| int split_flag = 0; |
| |
| ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" |
| "block %llu, max_blocks %u\n", inode->i_ino, |
| (unsigned long long)map->m_lblk, map->m_len); |
| |
| eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> |
| inode->i_sb->s_blocksize_bits; |
| if (eof_block < map->m_lblk + map->m_len) |
| eof_block = map->m_lblk + map->m_len; |
| |
| depth = ext_depth(inode); |
| eh = path[depth].p_hdr; |
| ex = path[depth].p_ext; |
| ee_block = le32_to_cpu(ex->ee_block); |
| ee_len = ext4_ext_get_actual_len(ex); |
| allocated = ee_len - (map->m_lblk - ee_block); |
| |
| trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); |
| |
| /* Pre-conditions */ |
| BUG_ON(!ext4_ext_is_uninitialized(ex)); |
| BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); |
| |
| /* |
| * Attempt to transfer newly initialized blocks from the currently |
| * uninitialized extent to its left neighbor. This is much cheaper |
| * than an insertion followed by a merge as those involve costly |
| * memmove() calls. This is the common case in steady state for |
| * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append |
| * writes. |
| * |
| * Limitations of the current logic: |
| * - L1: we only deal with writes at the start of the extent. |
| * The approach could be extended to writes at the end |
| * of the extent but this scenario was deemed less common. |
| * - L2: we do not deal with writes covering the whole extent. |
| * This would require removing the extent if the transfer |
| * is possible. |
| * - L3: we only attempt to merge with an extent stored in the |
| * same extent tree node. |
| */ |
| if ((map->m_lblk == ee_block) && /*L1*/ |
| (map->m_len < ee_len) && /*L2*/ |
| (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ |
| struct ext4_extent *prev_ex; |
| ext4_lblk_t prev_lblk; |
| ext4_fsblk_t prev_pblk, ee_pblk; |
| unsigned int prev_len, write_len; |
| |
| prev_ex = ex - 1; |
| prev_lblk = le32_to_cpu(prev_ex->ee_block); |
| prev_len = ext4_ext_get_actual_len(prev_ex); |
| prev_pblk = ext4_ext_pblock(prev_ex); |
| ee_pblk = ext4_ext_pblock(ex); |
| write_len = map->m_len; |
| |
| /* |
| * A transfer of blocks from 'ex' to 'prev_ex' is allowed |
| * upon those conditions: |
| * - C1: prev_ex is initialized, |
| * - C2: prev_ex is logically abutting ex, |
| * - C3: prev_ex is physically abutting ex, |
| * - C4: prev_ex can receive the additional blocks without |
| * overflowing the (initialized) length limit. |
| */ |
| if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ |
| ((prev_lblk + prev_len) == ee_block) && /*C2*/ |
| ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ |
| (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto out; |
| |
| trace_ext4_ext_convert_to_initialized_fastpath(inode, |
| map, ex, prev_ex); |
| |
| /* Shift the start of ex by 'write_len' blocks */ |
| ex->ee_block = cpu_to_le32(ee_block + write_len); |
| ext4_ext_store_pblock(ex, ee_pblk + write_len); |
| ex->ee_len = cpu_to_le16(ee_len - write_len); |
| ext4_ext_mark_uninitialized(ex); /* Restore the flag */ |
| |
| /* Extend prev_ex by 'write_len' blocks */ |
| prev_ex->ee_len = cpu_to_le16(prev_len + write_len); |
| |
| /* Mark the block containing both extents as dirty */ |
| ext4_ext_dirty(handle, inode, path + depth); |
| |
| /* Update path to point to the right extent */ |
| path[depth].p_ext = prev_ex; |
| |
| /* Result: number of initialized blocks past m_lblk */ |
| allocated = write_len; |
| goto out; |
| } |
| } |
| |
| WARN_ON(map->m_lblk < ee_block); |
| /* |
| * It is safe to convert extent to initialized via explicit |
| * zeroout only if extent is fully insde i_size or new_size. |
| */ |
| split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
| |
| /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ |
| if (ee_len <= 2*EXT4_EXT_ZERO_LEN && |
| (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
| err = ext4_ext_zeroout(inode, ex); |
| if (err) |
| goto out; |
| |
| err = ext4_ext_get_access(handle, inode, path + depth); |
| if (err) |
| goto out; |
| ext4_ext_mark_initialized(ex); |
| ext4_ext_try_to_merge(inode, path, ex); |
| err = ext4_ext_dirty(handle, inode, path + depth); |
| goto out; |
| } |
| |
| /* |
| * four cases: |
| * 1. split the extent into three extents. |
| * 2. split the extent into two extents, zeroout the first half. |
| * 3. split the extent into two extents, zeroout the second half. |
| * 4. split the extent into two extents with out zeroout. |
| */ |
| split_map.m_lblk = map->m_lblk; |
| split_map.m_len = map->m_len; |
| |
| if (allocated > map->m_len) { |
| if (allocated <= EXT4_EXT_ZERO_LEN && |
| (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
| /* case 3 */ |
| zero_ex.ee_block = |
| cpu_to_le32(map->m_lblk); |
| zero_ex.ee_len = cpu_to_le16(allocated); |
| ext4_ext_store_pblock(&zero_ex, |
| ext4_ext_pblock(ex) + map->m_lblk - ee_block); |
| err = ext4_ext_zeroout(inode, &zero_ex); |
| if (err) |
| goto out; |
| split_map.m_lblk = map->m_lblk; |
| split_map.m_len = allocated; |
| } else if ((map->m_lblk - ee_block + map->m_len < |
| EXT4_EXT_ZERO_LEN) && |
| (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
| /* case 2 */ |
| if (map->m_lblk != ee_bloc
|