diff mbox

[23/23] ext4: add some tracepoints in ext4/extents.c

Message ID 1309970166-11770-24-git-send-email-tytso@mit.edu
State Not Applicable, archived
Headers show

Commit Message

Theodore Ts'o July 6, 2011, 4:36 p.m. UTC
From: Aditya Kali <adityakali@google.com>

This patch adds some tracepoints in ext4/extents.c and updates a tracepoint in
ext4/inode.c.

Tested: Built and ran the kernel and verified that these tracepoints work.
Also ran xfstests.

Signed-off-by: Aditya Kali <adityakali@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
---
 fs/ext4/extents.c           |   46 +++++-
 fs/ext4/inode.c             |    2 +-
 include/trace/events/ext4.h |  415 +++++++++++++++++++++++++++++++++++++++++--
 3 files changed, 443 insertions(+), 20 deletions(-)

Comments

Eric Gouriou July 6, 2011, 6:12 p.m. UTC | #1
On Wed, Jul 6, 2011 at 09:36, Theodore Ts'o <tytso@mit.edu> wrote:
> From: Aditya Kali <adityakali@google.com>
>
> This patch adds some tracepoints in ext4/extents.c and updates a tracepoint in
> ext4/inode.c.
>
> Tested: Built and ran the kernel and verified that these tracepoints work.
> Also ran xfstests.
>
> Signed-off-by: Aditya Kali <adityakali@google.com>
> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
> ---
>  fs/ext4/extents.c           |   46 +++++-
>  fs/ext4/inode.c             |    2 +-
>  include/trace/events/ext4.h |  415 +++++++++++++++++++++++++++++++++++++++++--
>  3 files changed, 443 insertions(+), 20 deletions(-)
>
> diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
> index 11bd617..0d3ba59 100644
> --- a/fs/ext4/extents.c
> +++ b/fs/ext4/extents.c
> @@ -1964,6 +1964,7 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
>        struct ext4_ext_cache *cex;
>        BUG_ON(len == 0);
>        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
> +       trace_ext4_ext_put_in_cache(inode, block, len, start);
>        cex = &EXT4_I(inode)->i_cached_extent;
>        cex->ec_block = block;
>        cex->ec_len = len;
> @@ -2065,6 +2066,7 @@ errout:
>                sbi->extent_cache_misses++;
>        else
>                sbi->extent_cache_hits++;
> +       trace_ext4_ext_in_cache(inode, block, ret);
>        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
>        return ret;
>  }
> @@ -2127,6 +2129,8 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
>        if (err)
>                return err;
>        ext_debug("index is empty, remove it, free block %llu\n", leaf);
> +       trace_ext4_ext_rm_idx(inode, leaf);
> +
>        ext4_free_blocks(handle, inode, NULL, leaf, 1,
>                         EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
>        return err;
> @@ -2212,6 +2216,9 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
>         */
>        flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
>
> +       trace_ext4_remove_blocks(inode, cpu_to_le32(ex->ee_block),
> +                                ext4_ext_pblock(ex), ee_len, from,
> +                                to, *partial_cluster);

In all the trace points that capture the logical block/physical
block/length of an extent, it would be nicer to pass the extent itself
as a parameter and perform the extraction in the TP_fast_assign()
block. This requires adding an #include <ext4_extents.h> above the
include of <trace/trace_events/ext4.h> in fs/ext4/super.c in order to
have ext4_ext_pblock() and ext4_ext_get_actual_len() available.

By itself this should not be ground to respin the whole patch series,
a follow-on cleanup patch would do (I volunteer to create one if
desired). Fortunately this wouldn't cause the trace record format to
change, so it wouldn't cause any backward/forward compatibility issue.

 Regards - Eric

>        /*
>         * If we have a partial cluster, and it's different from the
>         * cluster of the last block, we need to explicitly free the
> @@ -2326,6 +2333,9 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
>        ex_ee_block = le32_to_cpu(ex->ee_block);
>        ex_ee_len = ext4_ext_get_actual_len(ex);
>
> +       trace_ext4_ext_rm_leaf(inode, start, ex_ee_block, ext4_ext_pblock(ex),
> +                              ex_ee_len, *partial_cluster);
> +
>        while (ex >= EXT_FIRST_EXTENT(eh) &&
>                        ex_ee_block + ex_ee_len > start) {
>
> @@ -2582,6 +2592,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
>  again:
>        ext4_ext_invalidate_cache(inode);
>
> +       trace_ext4_ext_remove_space(inode, start, depth);
> +
>        /*
>         * We start scanning from right side, freeing all the blocks
>         * after i_size and walking into the tree depth-wise.
> @@ -2676,6 +2688,9 @@ again:
>                }
>        }
>
> +       trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
> +                       path->p_hdr->eh_entries);
> +
>        /* If we still have something in the partial cluster and we have removed
>         * even the first extent, then we should free the blocks in the partial
>         * cluster as well. */
> @@ -3290,6 +3305,10 @@ static int ext4_find_delalloc_range(struct inode *inode,
>                         * detect that here.
>                         */
>                        page_cache_release(page);
> +                       trace_ext4_find_delalloc_range(inode,
> +                                       lblk_start, lblk_end,
> +                                       search_hint_reverse,
> +                                       0, i);
>                        return 0;
>                }
>
> @@ -3317,6 +3336,10 @@ static int ext4_find_delalloc_range(struct inode *inode,
>
>                        if (buffer_delay(bh)) {
>                                page_cache_release(page);
> +                               trace_ext4_find_delalloc_range(inode,
> +                                               lblk_start, lblk_end,
> +                                               search_hint_reverse,
> +                                               1, i);
>                                return 1;
>                        }
>                        if (search_hint_reverse)
> @@ -3339,6 +3362,8 @@ nextpage:
>                i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
>        }
>
> +       trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
> +                                       search_hint_reverse, 0, 0);
>        return 0;
>  }
>
> @@ -3404,6 +3429,8 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
>        /* max possible clusters for this allocation */
>        allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
>
> +       trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
> +
>        /* Check towards left side */
>        c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
>        if (c_offset) {
> @@ -3443,6 +3470,9 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
>                  flags, allocated);
>        ext4_ext_show_leaf(inode, path);
>
> +       trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
> +                                                   newblock);
> +
>        /* get_block() before submit the IO, split the extent */
>        if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
>                ret = ext4_split_unwritten_extents(handle, inode, map,
> @@ -3562,7 +3592,7 @@ out2:
>  * get_implied_cluster_alloc - check to see if the requested
>  * allocation (in the map structure) overlaps with a cluster already
>  * allocated in an extent.
> - *     @sbi    The ext4-specific superblock structure
> + *     @sb     The filesystem superblock structure
>  *     @map    The requested lblk->pblk mapping
>  *     @ex     The extent structure which might contain an implied
>  *                     cluster allocation
> @@ -3599,11 +3629,12 @@ out2:
>  * ext4_ext_map_blocks() will then allocate one or more new clusters
>  * by calling ext4_mb_new_blocks().
>  */
> -static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
> +static int get_implied_cluster_alloc(struct super_block *sb,
>                                     struct ext4_map_blocks *map,
>                                     struct ext4_extent *ex,
>                                     struct ext4_ext_path *path)
>  {
> +       struct ext4_sb_info *sbi = EXT4_SB(sb);
>        ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
>        ext4_lblk_t ex_cluster_start, ex_cluster_end;
>        ext4_lblk_t rr_cluster_start, rr_cluster_end;
> @@ -3652,8 +3683,12 @@ static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
>                        ext4_lblk_t next = ext4_ext_next_allocated_block(path);
>                        map->m_len = min(map->m_len, next - map->m_lblk);
>                }
> +
> +               trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
>                return 1;
>        }
> +
> +       trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
>        return 0;
>  }
>
> @@ -3762,6 +3797,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
>                 * we split out initialized portions during a write.
>                 */
>                ee_len = ext4_ext_get_actual_len(ex);
> +
> +               trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
> +
>                /* if found extent covers block, simply return it */
>                if (in_range(map->m_lblk, ee_block, ee_len)) {
>                        newblock = map->m_lblk - ee_block + ee_start;
> @@ -3880,7 +3918,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
>         * by ext4_ext_find_extent() implies a cluster we can use.
>         */
>        if (cluster_offset && ex &&
> -           get_implied_cluster_alloc(sbi, map, ex, path)) {
> +           get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
>                ar.len = allocated = map->m_len;
>                newblock = map->m_pblk;
>                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
> @@ -3901,7 +3939,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
>        /* Check if the extent after searching to the right implies a
>         * cluster we can use. */
>        if ((sbi->s_cluster_ratio > 1) && ex2 &&
> -           get_implied_cluster_alloc(sbi, map, ex2, path)) {
> +           get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
>                ar.len = allocated = map->m_len;
>                newblock = map->m_pblk;
>                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
> diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> index 7ae8dec..2495cfb 100644
> --- a/fs/ext4/inode.c
> +++ b/fs/ext4/inode.c
> @@ -238,7 +238,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
>        struct ext4_inode_info *ei = EXT4_I(inode);
>
>        spin_lock(&ei->i_block_reservation_lock);
> -       trace_ext4_da_update_reserve_space(inode, used);
> +       trace_ext4_da_update_reserve_space(inode, used, quota_claim);
>        if (unlikely(used > ei->i_reserved_data_blocks)) {
>                ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
>                         "with only %d reserved data blocks\n",
> diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
> index 5ce2b2f..3368399 100644
> --- a/include/trace/events/ext4.h
> +++ b/include/trace/events/ext4.h
> @@ -12,6 +12,7 @@ struct ext4_allocation_request;
>  struct ext4_prealloc_space;
>  struct ext4_inode_info;
>  struct mpage_da_data;
> +struct ext4_map_blocks;
>
>  #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
>
> @@ -1034,19 +1035,20 @@ TRACE_EVENT(ext4_forget,
>  );
>
>  TRACE_EVENT(ext4_da_update_reserve_space,
> -       TP_PROTO(struct inode *inode, int used_blocks),
> +       TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
>
> -       TP_ARGS(inode, used_blocks),
> +       TP_ARGS(inode, used_blocks, quota_claim),
>
>        TP_STRUCT__entry(
> -               __field(        dev_t,  dev                     )
> -               __field(        ino_t,  ino                     )
> -               __field(        umode_t, mode                   )
> -               __field(        __u64,  i_blocks                )
> -               __field(        int,    used_blocks             )
> -               __field(        int,    reserved_data_blocks    )
> -               __field(        int,    reserved_meta_blocks    )
> -               __field(        int,    allocated_meta_blocks   )
> +               __field(        dev_t,          dev                     )
> +               __field(        ino_t,          ino                     )
> +               __field(        umode_t,        mode                    )
> +               __field(        __u64,          i_blocks                )
> +               __field(        int,            used_blocks             )
> +               __field(        int,            reserved_data_blocks    )
> +               __field(        int,            reserved_meta_blocks    )
> +               __field(        int,            allocated_meta_blocks   )
> +               __field(        int,            quota_claim             )
>        ),
>
>        TP_fast_assign(
> @@ -1055,19 +1057,24 @@ TRACE_EVENT(ext4_da_update_reserve_space,
>                __entry->mode   = inode->i_mode;
>                __entry->i_blocks = inode->i_blocks;
>                __entry->used_blocks = used_blocks;
> -               __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
> -               __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
> -               __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
> +               __entry->reserved_data_blocks =
> +                               EXT4_I(inode)->i_reserved_data_blocks;
> +               __entry->reserved_meta_blocks =
> +                               EXT4_I(inode)->i_reserved_meta_blocks;
> +               __entry->allocated_meta_blocks =
> +                               EXT4_I(inode)->i_allocated_meta_blocks;
> +               __entry->quota_claim = quota_claim;
>        ),
>
>        TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
>                  "reserved_data_blocks %d reserved_meta_blocks %d "
> -                 "allocated_meta_blocks %d",
> +                 "allocated_meta_blocks %d quota_claim %d",
>                  MAJOR(__entry->dev), MINOR(__entry->dev),
>                  (unsigned long) __entry->ino,
>                  __entry->mode, __entry->i_blocks,
>                  __entry->used_blocks, __entry->reserved_data_blocks,
> -                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
> +                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
> +                 __entry->quota_claim)
>  );
>
>  TRACE_EVENT(ext4_da_reserve_space,
> @@ -1520,6 +1527,384 @@ TRACE_EVENT(ext4_load_inode,
>                  (unsigned long) __entry->ino)
>  );
>
> +TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
> +       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
> +                unsigned int allocated, ext4_fsblk_t newblock),
> +
> +       TP_ARGS(inode, map, allocated, newblock),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino             )
> +               __field(        dev_t,          dev             )
> +               __field(        ext4_lblk_t,    lblk            )
> +               __field(        ext4_fsblk_t,   pblk            )
> +               __field(        unsigned int,   len             )
> +               __field(        int,            flags           )
> +               __field(        unsigned int,   allocated       )
> +               __field(        ext4_fsblk_t,   newblk          )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino            = inode->i_ino;
> +               __entry->dev            = inode->i_sb->s_dev;
> +               __entry->lblk           = map->m_lblk;
> +               __entry->pblk           = map->m_pblk;
> +               __entry->len            = map->m_len;
> +               __entry->flags          = map->m_flags;
> +               __entry->allocated      = allocated;
> +               __entry->newblk         = newblock;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
> +                 "allocated %d newblock %llu",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
> +                 __entry->len, __entry->flags,
> +                 (unsigned int) __entry->allocated,
> +                 (unsigned long long) __entry->newblk)
> +);
> +
> +TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
> +       TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
> +
> +       TP_ARGS(sb, map, ret),
> +
> +       TP_STRUCT__entry(
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    lblk    )
> +               __field(        ext4_fsblk_t,   pblk    )
> +               __field(        unsigned int,   len     )
> +               __field(        unsigned int,   flags   )
> +               __field(        int,            ret     )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->dev    = sb->s_dev;
> +               __entry->lblk   = map->m_lblk;
> +               __entry->pblk   = map->m_pblk;
> +               __entry->len    = map->m_len;
> +               __entry->flags  = map->m_flags;
> +               __entry->ret    = ret;
> +       ),
> +
> +       TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 __entry->lblk, (unsigned long long) __entry->pblk,
> +                 __entry->len, __entry->flags, __entry->ret)
> +);
> +
> +TRACE_EVENT(ext4_ext_put_in_cache,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
> +                ext4_fsblk_t start),
> +
> +       TP_ARGS(inode, lblk, len, start),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    lblk    )
> +               __field(        unsigned int,   len     )
> +               __field(        ext4_fsblk_t,   start   )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino    = inode->i_ino;
> +               __entry->dev    = inode->i_sb->s_dev;
> +               __entry->lblk   = lblk;
> +               __entry->len    = len;
> +               __entry->start  = start;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->lblk,
> +                 __entry->len,
> +                 (unsigned long long) __entry->start)
> +);
> +
> +TRACE_EVENT(ext4_ext_in_cache,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
> +
> +       TP_ARGS(inode, lblk, ret),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    lblk    )
> +               __field(        int,            ret     )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino    = inode->i_ino;
> +               __entry->dev    = inode->i_sb->s_dev;
> +               __entry->lblk   = lblk;
> +               __entry->ret    = ret;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu lblk %u ret %d",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->lblk,
> +                 __entry->ret)
> +
> +);
> +
> +TRACE_EVENT(ext4_find_delalloc_range,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
> +               int reverse, int found, ext4_lblk_t found_blk),
> +
> +       TP_ARGS(inode, from, to, reverse, found, found_blk),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino             )
> +               __field(        dev_t,          dev             )
> +               __field(        ext4_lblk_t,    from            )
> +               __field(        ext4_lblk_t,    to              )
> +               __field(        int,            reverse         )
> +               __field(        int,            found           )
> +               __field(        ext4_lblk_t,    found_blk       )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino            = inode->i_ino;
> +               __entry->dev            = inode->i_sb->s_dev;
> +               __entry->from           = from;
> +               __entry->to             = to;
> +               __entry->reverse        = reverse;
> +               __entry->found          = found;
> +               __entry->found_blk      = found_blk;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
> +                 "(blk = %u)",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->from, (unsigned) __entry->to,
> +                 __entry->reverse, __entry->found,
> +                 (unsigned) __entry->found_blk)
> +);
> +
> +TRACE_EVENT(ext4_get_reserved_cluster_alloc,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
> +
> +       TP_ARGS(inode, lblk, len),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    lblk    )
> +               __field(        unsigned int,   len     )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino    = inode->i_ino;
> +               __entry->dev    = inode->i_sb->s_dev;
> +               __entry->lblk   = lblk;
> +               __entry->len    = len;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu lblk %u len %u",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->lblk,
> +                 __entry->len)
> +);
> +
> +TRACE_EVENT(ext4_ext_show_extent,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
> +                unsigned short len),
> +
> +       TP_ARGS(inode, lblk, pblk, len),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    lblk    )
> +               __field(        ext4_fsblk_t,   pblk    )
> +               __field(        unsigned short, len     )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino    = inode->i_ino;
> +               __entry->dev    = inode->i_sb->s_dev;
> +               __entry->lblk   = lblk;
> +               __entry->pblk   = pblk;
> +               __entry->len    = len;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->lblk,
> +                 (unsigned long long) __entry->pblk,
> +                 (unsigned short) __entry->len)
> +);
> +
> +TRACE_EVENT(ext4_remove_blocks,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t ex_lblk,
> +               ext4_fsblk_t ex_pblk, unsigned short ex_len,
> +               ext4_lblk_t from, ext4_fsblk_t to,
> +               ext4_fsblk_t partial_cluster),
> +
> +       TP_ARGS(inode, ex_lblk, ex_pblk, ex_len, from, to, partial_cluster),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    ee_lblk )
> +               __field(        ext4_fsblk_t,   ee_pblk )
> +               __field(        unsigned short, ee_len  )
> +               __field(        ext4_lblk_t,    from    )
> +               __field(        ext4_lblk_t,    to      )
> +               __field(        ext4_fsblk_t,   partial )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino            = inode->i_ino;
> +               __entry->dev            = inode->i_sb->s_dev;
> +               __entry->ee_lblk        = ex_lblk;
> +               __entry->ee_pblk        = ex_pblk;
> +               __entry->ee_len         = ex_len;
> +               __entry->from           = from;
> +               __entry->to             = to;
> +               __entry->partial        = partial_cluster;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
> +                 "from %u to %u partial_cluster %u",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->ee_lblk,
> +                 (unsigned long long) __entry->ee_pblk,
> +                 (unsigned short) __entry->ee_len,
> +                 (unsigned) __entry->from,
> +                 (unsigned) __entry->to,
> +                 (unsigned) __entry->partial)
> +);
> +
> +TRACE_EVENT(ext4_ext_rm_leaf,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t start,
> +                ext4_lblk_t ex_lblk, ext4_fsblk_t ex_pblk,
> +                unsigned short ex_len, ext4_fsblk_t partial_cluster),
> +
> +       TP_ARGS(inode, start, ex_lblk, ex_pblk, ex_len, partial_cluster),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    start   )
> +               __field(        ext4_lblk_t,    ee_lblk )
> +               __field(        ext4_fsblk_t,   ee_pblk )
> +               __field(        short,          ee_len  )
> +               __field(        ext4_fsblk_t,   partial )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino            = inode->i_ino;
> +               __entry->dev            = inode->i_sb->s_dev;
> +               __entry->start          = start;
> +               __entry->ee_lblk        = ex_lblk;
> +               __entry->ee_pblk        = ex_pblk;
> +               __entry->ee_len         = ex_len;
> +               __entry->partial        = partial_cluster;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
> +                 "partial_cluster %u",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->start,
> +                 (unsigned) __entry->ee_lblk,
> +                 (unsigned long long) __entry->ee_pblk,
> +                 (unsigned short) __entry->ee_len,
> +                 (unsigned) __entry->partial)
> +);
> +
> +TRACE_EVENT(ext4_ext_rm_idx,
> +       TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
> +
> +       TP_ARGS(inode, pblk),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_fsblk_t,   pblk    )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino    = inode->i_ino;
> +               __entry->dev    = inode->i_sb->s_dev;
> +               __entry->pblk   = pblk;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu index_pblk %llu",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned long long) __entry->pblk)
> +);
> +
> +TRACE_EVENT(ext4_ext_remove_space,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
> +
> +       TP_ARGS(inode, start, depth),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino     )
> +               __field(        dev_t,          dev     )
> +               __field(        ext4_lblk_t,    start   )
> +               __field(        int,            depth   )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino    = inode->i_ino;
> +               __entry->dev    = inode->i_sb->s_dev;
> +               __entry->start  = start;
> +               __entry->depth  = depth;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu since %u depth %d",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->start,
> +                 __entry->depth)
> +);
> +
> +TRACE_EVENT(ext4_ext_remove_space_done,
> +       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
> +               ext4_lblk_t partial, unsigned short eh_entries),
> +
> +       TP_ARGS(inode, start, depth, partial, eh_entries),
> +
> +       TP_STRUCT__entry(
> +               __field(        ino_t,          ino             )
> +               __field(        dev_t,          dev             )
> +               __field(        ext4_lblk_t,    start           )
> +               __field(        int,            depth           )
> +               __field(        ext4_lblk_t,    partial         )
> +               __field(        unsigned short, eh_entries      )
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->ino            = inode->i_ino;
> +               __entry->dev            = inode->i_sb->s_dev;
> +               __entry->start          = start;
> +               __entry->depth          = depth;
> +               __entry->partial        = partial;
> +               __entry->eh_entries     = eh_entries;
> +       ),
> +
> +       TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
> +                 "remaining_entries %u",
> +                 MAJOR(__entry->dev), MINOR(__entry->dev),
> +                 (unsigned long) __entry->ino,
> +                 (unsigned) __entry->start,
> +                 __entry->depth,
> +                 (unsigned) __entry->partial,
> +                 (unsigned short) __entry->eh_entries)
> +);
> +
>  #endif /* _TRACE_EXT4_H */
>
>  /* This part must be outside protection */
> --
> 1.7.4.1.22.gec8e1.dirty
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
Theodore Ts'o July 8, 2011, 11:20 p.m. UTC | #2
On Wed, Jul 06, 2011 at 11:12:30AM -0700, Eric Gouriou wrote:
> 
> In all the trace points that capture the logical block/physical
> block/length of an extent, it would be nicer to pass the extent itself
> as a parameter and perform the extraction in the TP_fast_assign()
> block. This requires adding an #include <ext4_extents.h> above the
> include of <trace/trace_events/ext4.h> in fs/ext4/super.c in order to
> have ext4_ext_pblock() and ext4_ext_get_actual_len() available.
> 
> By itself this should not be ground to respin the whole patch series,
> a follow-on cleanup patch would do (I volunteer to create one if
> desired). Fortunately this wouldn't cause the trace record format to
> change, so it wouldn't cause any backward/forward compatibility issue.

Good suggestion, I've made this change.

						- Ted
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 11bd617..0d3ba59 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1964,6 +1964,7 @@  ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
 	struct ext4_ext_cache *cex;
 	BUG_ON(len == 0);
 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+	trace_ext4_ext_put_in_cache(inode, block, len, start);
 	cex = &EXT4_I(inode)->i_cached_extent;
 	cex->ec_block = block;
 	cex->ec_len = len;
@@ -2065,6 +2066,7 @@  errout:
 		sbi->extent_cache_misses++;
 	else
 		sbi->extent_cache_hits++;
+	trace_ext4_ext_in_cache(inode, block, ret);
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 	return ret;
 }
@@ -2127,6 +2129,8 @@  static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
 	if (err)
 		return err;
 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
+	trace_ext4_ext_rm_idx(inode, leaf);
+
 	ext4_free_blocks(handle, inode, NULL, leaf, 1,
 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
 	return err;
@@ -2212,6 +2216,9 @@  static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
 	 */
 	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
 
+	trace_ext4_remove_blocks(inode, cpu_to_le32(ex->ee_block),
+				 ext4_ext_pblock(ex), ee_len, from,
+				 to, *partial_cluster);
 	/*
 	 * If we have a partial cluster, and it's different from the
 	 * cluster of the last block, we need to explicitly free the
@@ -2326,6 +2333,9 @@  ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
 	ex_ee_block = le32_to_cpu(ex->ee_block);
 	ex_ee_len = ext4_ext_get_actual_len(ex);
 
+	trace_ext4_ext_rm_leaf(inode, start, ex_ee_block, ext4_ext_pblock(ex),
+			       ex_ee_len, *partial_cluster);
+
 	while (ex >= EXT_FIRST_EXTENT(eh) &&
 			ex_ee_block + ex_ee_len > start) {
 
@@ -2582,6 +2592,8 @@  static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
 again:
 	ext4_ext_invalidate_cache(inode);
 
+	trace_ext4_ext_remove_space(inode, start, depth);
+
 	/*
 	 * We start scanning from right side, freeing all the blocks
 	 * after i_size and walking into the tree depth-wise.
@@ -2676,6 +2688,9 @@  again:
 		}
 	}
 
+	trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
+			path->p_hdr->eh_entries);
+
 	/* If we still have something in the partial cluster and we have removed
 	 * even the first extent, then we should free the blocks in the partial
 	 * cluster as well. */
@@ -3290,6 +3305,10 @@  static int ext4_find_delalloc_range(struct inode *inode,
 			 * detect that here.
 			 */
 			page_cache_release(page);
+			trace_ext4_find_delalloc_range(inode,
+					lblk_start, lblk_end,
+					search_hint_reverse,
+					0, i);
 			return 0;
 		}
 
@@ -3317,6 +3336,10 @@  static int ext4_find_delalloc_range(struct inode *inode,
 
 			if (buffer_delay(bh)) {
 				page_cache_release(page);
+				trace_ext4_find_delalloc_range(inode,
+						lblk_start, lblk_end,
+						search_hint_reverse,
+						1, i);
 				return 1;
 			}
 			if (search_hint_reverse)
@@ -3339,6 +3362,8 @@  nextpage:
 		i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
 	}
 
+	trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
+					search_hint_reverse, 0, 0);
 	return 0;
 }
 
@@ -3404,6 +3429,8 @@  get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
 	/* max possible clusters for this allocation */
 	allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
 
+	trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
 	/* Check towards left side */
 	c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
 	if (c_offset) {
@@ -3443,6 +3470,9 @@  ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
 		  flags, allocated);
 	ext4_ext_show_leaf(inode, path);
 
+	trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
+						    newblock);
+
 	/* get_block() before submit the IO, split the extent */
 	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
 		ret = ext4_split_unwritten_extents(handle, inode, map,
@@ -3562,7 +3592,7 @@  out2:
  * get_implied_cluster_alloc - check to see if the requested
  * allocation (in the map structure) overlaps with a cluster already
  * allocated in an extent.
- *	@sbi	The ext4-specific superblock structure
+ *	@sb	The filesystem superblock structure
  *	@map	The requested lblk->pblk mapping
  *	@ex	The extent structure which might contain an implied
  *			cluster allocation
@@ -3599,11 +3629,12 @@  out2:
  * ext4_ext_map_blocks() will then allocate one or more new clusters
  * by calling ext4_mb_new_blocks().
  */
-static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
+static int get_implied_cluster_alloc(struct super_block *sb,
 				     struct ext4_map_blocks *map,
 				     struct ext4_extent *ex,
 				     struct ext4_ext_path *path)
 {
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
 	ext4_lblk_t ex_cluster_start, ex_cluster_end;
 	ext4_lblk_t rr_cluster_start, rr_cluster_end;
@@ -3652,8 +3683,12 @@  static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
 			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
 			map->m_len = min(map->m_len, next - map->m_lblk);
 		}
+
+		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
 		return 1;
 	}
+
+	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
 	return 0;
 }
 
@@ -3762,6 +3797,9 @@  int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 		 * we split out initialized portions during a write.
 		 */
 		ee_len = ext4_ext_get_actual_len(ex);
+
+		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
+
 		/* if found extent covers block, simply return it */
 		if (in_range(map->m_lblk, ee_block, ee_len)) {
 			newblock = map->m_lblk - ee_block + ee_start;
@@ -3880,7 +3918,7 @@  int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 	 * by ext4_ext_find_extent() implies a cluster we can use.
 	 */
 	if (cluster_offset && ex &&
-	    get_implied_cluster_alloc(sbi, map, ex, path)) {
+	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
 		ar.len = allocated = map->m_len;
 		newblock = map->m_pblk;
 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
@@ -3901,7 +3939,7 @@  int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 	/* Check if the extent after searching to the right implies a
 	 * cluster we can use. */
 	if ((sbi->s_cluster_ratio > 1) && ex2 &&
-	    get_implied_cluster_alloc(sbi, map, ex2, path)) {
+	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
 		ar.len = allocated = map->m_len;
 		newblock = map->m_pblk;
 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7ae8dec..2495cfb 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -238,7 +238,7 @@  void ext4_da_update_reserve_space(struct inode *inode,
 	struct ext4_inode_info *ei = EXT4_I(inode);
 
 	spin_lock(&ei->i_block_reservation_lock);
-	trace_ext4_da_update_reserve_space(inode, used);
+	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
 	if (unlikely(used > ei->i_reserved_data_blocks)) {
 		ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
 			 "with only %d reserved data blocks\n",
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 5ce2b2f..3368399 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -12,6 +12,7 @@  struct ext4_allocation_request;
 struct ext4_prealloc_space;
 struct ext4_inode_info;
 struct mpage_da_data;
+struct ext4_map_blocks;
 
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
@@ -1034,19 +1035,20 @@  TRACE_EVENT(ext4_forget,
 );
 
 TRACE_EVENT(ext4_da_update_reserve_space,
-	TP_PROTO(struct inode *inode, int used_blocks),
+	TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
 
-	TP_ARGS(inode, used_blocks),
+	TP_ARGS(inode, used_blocks, quota_claim),
 
 	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	__u64,	i_blocks		)
-		__field(	int,	used_blocks		)
-		__field(	int,	reserved_data_blocks	)
-		__field(	int,	reserved_meta_blocks	)
-		__field(	int,	allocated_meta_blocks	)
+		__field(	dev_t,		dev			)
+		__field(	ino_t,		ino			)
+		__field(	umode_t,	mode			)
+		__field(	__u64,		i_blocks		)
+		__field(	int,		used_blocks		)
+		__field(	int,		reserved_data_blocks	)
+		__field(	int,		reserved_meta_blocks	)
+		__field(	int,		allocated_meta_blocks	)
+		__field(	int,		quota_claim		)
 	),
 
 	TP_fast_assign(
@@ -1055,19 +1057,24 @@  TRACE_EVENT(ext4_da_update_reserve_space,
 		__entry->mode	= inode->i_mode;
 		__entry->i_blocks = inode->i_blocks;
 		__entry->used_blocks = used_blocks;
-		__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
-		__entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
-		__entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+		__entry->reserved_data_blocks =
+				EXT4_I(inode)->i_reserved_data_blocks;
+		__entry->reserved_meta_blocks =
+				EXT4_I(inode)->i_reserved_meta_blocks;
+		__entry->allocated_meta_blocks =
+				EXT4_I(inode)->i_allocated_meta_blocks;
+		__entry->quota_claim = quota_claim;
 	),
 
 	TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
 		  "reserved_data_blocks %d reserved_meta_blocks %d "
-		  "allocated_meta_blocks %d",
+		  "allocated_meta_blocks %d quota_claim %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long) __entry->ino,
 		  __entry->mode, __entry->i_blocks,
 		  __entry->used_blocks, __entry->reserved_data_blocks,
-		  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+		  __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
+		  __entry->quota_claim)
 );
 
 TRACE_EVENT(ext4_da_reserve_space,
@@ -1520,6 +1527,384 @@  TRACE_EVENT(ext4_load_inode,
 		  (unsigned long) __entry->ino)
 );
 
+TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
+	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+		 unsigned int allocated, ext4_fsblk_t newblock),
+
+	TP_ARGS(inode, map, allocated, newblock),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino		)
+		__field(	dev_t,		dev		)
+		__field(	ext4_lblk_t,	lblk		)
+		__field(	ext4_fsblk_t,	pblk		)
+		__field(	unsigned int,	len		)
+		__field(	int,		flags		)
+		__field(	unsigned int,	allocated	)
+		__field(	ext4_fsblk_t,	newblk		)
+	),
+
+	TP_fast_assign(
+		__entry->ino		= inode->i_ino;
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->lblk		= map->m_lblk;
+		__entry->pblk		= map->m_pblk;
+		__entry->len		= map->m_len;
+		__entry->flags		= map->m_flags;
+		__entry->allocated	= allocated;
+		__entry->newblk		= newblock;
+	),
+
+	TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+		  "allocated %d newblock %llu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+		  __entry->len, __entry->flags,
+		  (unsigned int) __entry->allocated,
+		  (unsigned long long) __entry->newblk)
+);
+
+TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
+	TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
+
+	TP_ARGS(sb, map, ret),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	lblk	)
+		__field(	ext4_fsblk_t,	pblk	)
+		__field(	unsigned int,	len	)
+		__field(	unsigned int,	flags	)
+		__field(	int,		ret	)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= sb->s_dev;
+		__entry->lblk	= map->m_lblk;
+		__entry->pblk	= map->m_pblk;
+		__entry->len	= map->m_len;
+		__entry->flags	= map->m_flags;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->lblk, (unsigned long long) __entry->pblk,
+		  __entry->len, __entry->flags, __entry->ret)
+);
+
+TRACE_EVENT(ext4_ext_put_in_cache,
+	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
+		 ext4_fsblk_t start),
+
+	TP_ARGS(inode, lblk, len, start),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	lblk	)
+		__field(	unsigned int,	len	)
+		__field(	ext4_fsblk_t,	start	)
+	),
+
+	TP_fast_assign(
+		__entry->ino	= inode->i_ino;
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->lblk	= lblk;
+		__entry->len	= len;
+		__entry->start	= start;
+	),
+
+	TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->lblk,
+		  __entry->len,
+		  (unsigned long long) __entry->start)
+);
+
+TRACE_EVENT(ext4_ext_in_cache,
+	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
+
+	TP_ARGS(inode, lblk, ret),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	lblk	)
+		__field(	int,		ret	)
+	),
+
+	TP_fast_assign(
+		__entry->ino	= inode->i_ino;
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->lblk	= lblk;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev %d,%d ino %lu lblk %u ret %d",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->lblk,
+		  __entry->ret)
+
+);
+
+TRACE_EVENT(ext4_find_delalloc_range,
+	TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
+		int reverse, int found, ext4_lblk_t found_blk),
+
+	TP_ARGS(inode, from, to, reverse, found, found_blk),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino		)
+		__field(	dev_t,		dev		)
+		__field(	ext4_lblk_t,	from		)
+		__field(	ext4_lblk_t,	to		)
+		__field(	int,		reverse		)
+		__field(	int,		found		)
+		__field(	ext4_lblk_t,	found_blk	)
+	),
+
+	TP_fast_assign(
+		__entry->ino		= inode->i_ino;
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->from		= from;
+		__entry->to		= to;
+		__entry->reverse	= reverse;
+		__entry->found		= found;
+		__entry->found_blk	= found_blk;
+	),
+
+	TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
+		  "(blk = %u)",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->from, (unsigned) __entry->to,
+		  __entry->reverse, __entry->found,
+		  (unsigned) __entry->found_blk)
+);
+
+TRACE_EVENT(ext4_get_reserved_cluster_alloc,
+	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
+
+	TP_ARGS(inode, lblk, len),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	lblk	)
+		__field(	unsigned int,	len	)
+	),
+
+	TP_fast_assign(
+		__entry->ino	= inode->i_ino;
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->lblk	= lblk;
+		__entry->len	= len;
+	),
+
+	TP_printk("dev %d,%d ino %lu lblk %u len %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->lblk,
+		  __entry->len)
+);
+
+TRACE_EVENT(ext4_ext_show_extent,
+	TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+		 unsigned short len),
+
+	TP_ARGS(inode, lblk, pblk, len),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	lblk	)
+		__field(	ext4_fsblk_t,	pblk	)
+		__field(	unsigned short,	len	)
+	),
+
+	TP_fast_assign(
+		__entry->ino	= inode->i_ino;
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->lblk	= lblk;
+		__entry->pblk	= pblk;
+		__entry->len	= len;
+	),
+
+	TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->lblk,
+		  (unsigned long long) __entry->pblk,
+		  (unsigned short) __entry->len)
+);
+
+TRACE_EVENT(ext4_remove_blocks,
+	TP_PROTO(struct inode *inode, ext4_lblk_t ex_lblk,
+		ext4_fsblk_t ex_pblk, unsigned short ex_len,
+		ext4_lblk_t from, ext4_fsblk_t to,
+		ext4_fsblk_t partial_cluster),
+
+	TP_ARGS(inode, ex_lblk, ex_pblk, ex_len, from, to, partial_cluster),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	ee_lblk	)
+		__field(	ext4_fsblk_t,	ee_pblk	)
+		__field(	unsigned short,	ee_len	)
+		__field(	ext4_lblk_t,	from	)
+		__field(	ext4_lblk_t,	to	)
+		__field(	ext4_fsblk_t,	partial	)
+	),
+
+	TP_fast_assign(
+		__entry->ino		= inode->i_ino;
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->ee_lblk	= ex_lblk;
+		__entry->ee_pblk	= ex_pblk;
+		__entry->ee_len		= ex_len;
+		__entry->from		= from;
+		__entry->to		= to;
+		__entry->partial	= partial_cluster;
+	),
+
+	TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
+		  "from %u to %u partial_cluster %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->ee_lblk,
+		  (unsigned long long) __entry->ee_pblk,
+		  (unsigned short) __entry->ee_len,
+		  (unsigned) __entry->from,
+		  (unsigned) __entry->to,
+		  (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_leaf,
+	TP_PROTO(struct inode *inode, ext4_lblk_t start,
+		 ext4_lblk_t ex_lblk, ext4_fsblk_t ex_pblk,
+		 unsigned short ex_len, ext4_fsblk_t partial_cluster),
+
+	TP_ARGS(inode, start, ex_lblk, ex_pblk, ex_len, partial_cluster),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	start	)
+		__field(	ext4_lblk_t,	ee_lblk	)
+		__field(	ext4_fsblk_t,	ee_pblk	)
+		__field(	short,		ee_len	)
+		__field(	ext4_fsblk_t,	partial	)
+	),
+
+	TP_fast_assign(
+		__entry->ino		= inode->i_ino;
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->start		= start;
+		__entry->ee_lblk	= ex_lblk;
+		__entry->ee_pblk	= ex_pblk;
+		__entry->ee_len		= ex_len;
+		__entry->partial	= partial_cluster;
+	),
+
+	TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
+		  "partial_cluster %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->start,
+		  (unsigned) __entry->ee_lblk,
+		  (unsigned long long) __entry->ee_pblk,
+		  (unsigned short) __entry->ee_len,
+		  (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_idx,
+	TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
+
+	TP_ARGS(inode, pblk),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_fsblk_t,	pblk	)
+	),
+
+	TP_fast_assign(
+		__entry->ino	= inode->i_ino;
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->pblk	= pblk;
+	),
+
+	TP_printk("dev %d,%d ino %lu index_pblk %llu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned long long) __entry->pblk)
+);
+
+TRACE_EVENT(ext4_ext_remove_space,
+	TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+
+	TP_ARGS(inode, start, depth),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino	)
+		__field(	dev_t,		dev	)
+		__field(	ext4_lblk_t,	start	)
+		__field(	int,		depth	)
+	),
+
+	TP_fast_assign(
+		__entry->ino	= inode->i_ino;
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->start	= start;
+		__entry->depth	= depth;
+	),
+
+	TP_printk("dev %d,%d ino %lu since %u depth %d",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->start,
+		  __entry->depth)
+);
+
+TRACE_EVENT(ext4_ext_remove_space_done,
+	TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
+		ext4_lblk_t partial, unsigned short eh_entries),
+
+	TP_ARGS(inode, start, depth, partial, eh_entries),
+
+	TP_STRUCT__entry(
+		__field(	ino_t,		ino		)
+		__field(	dev_t,		dev		)
+		__field(	ext4_lblk_t,	start		)
+		__field(	int,		depth		)
+		__field(	ext4_lblk_t,	partial		)
+		__field(	unsigned short,	eh_entries	)
+	),
+
+	TP_fast_assign(
+		__entry->ino		= inode->i_ino;
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->start		= start;
+		__entry->depth		= depth;
+		__entry->partial	= partial;
+		__entry->eh_entries	= eh_entries;
+	),
+
+	TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+		  "remaining_entries %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino,
+		  (unsigned) __entry->start,
+		  __entry->depth,
+		  (unsigned) __entry->partial,
+		  (unsigned short) __entry->eh_entries)
+);
+
 #endif /* _TRACE_EXT4_H */
 
 /* This part must be outside protection */