Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/fs/xfs/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 183 kB image not shown  

Quelle  xfs_trace.h   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2009, Christoph Hellwig
 * All Rights Reserved.
 *
 * NOTE: none of these tracepoints shall be considered a stable kernel ABI
 * as they can change at any time.
 *
 * Current conventions for printing numbers measuring specific units:
 *
 * agno: allocation group number
 *
 * agino: per-AG inode number
 * ino: filesystem inode number
 *
 * agbno: per-AG block number in fs blocks
 * rgbno: per-rtgroup block number in fs blocks
 * startblock: physical block number for file mappings.  This is either a
 *             segmented fsblock for data device mappings, or a rfsblock
 *             for realtime device mappings
 * fsbcount: number of blocks in an extent, in fs blocks
 *
 * gbno: generic allocation group block number.  This is an agbno for
 *       space in a per-AG or a rgbno for space in a realtime group.
 *
 * daddr: physical block number in 512b blocks
 * bbcount: number of blocks in a physical extent, in 512b blocks
 *
 * rtx: physical rt extent number for extent mappings
 * rtxcount: number of rt extents in an extent mapping
 *
 * owner: reverse-mapping owner, usually inodes
 *
 * fileoff: file offset, in fs blocks
 * pos: file offset, in bytes
 * bytecount: number of bytes
 *
 * dablk: directory or xattr block offset, in filesystem blocks
 *
 * disize: ondisk file size, in bytes
 * isize: incore file size, in bytes
 *
 * forkoff: inode fork offset, in bytes
 *
 * ireccount: number of inode records
 *
 * Numbers describing space allocations (blocks, extents, inodes) should be
 * formatted in hexadecimal.
 */

#undef TRACE_SYSTEM
#define TRACE_SYSTEM xfs

#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_XFS_H

#include <linux/tracepoint.h>

struct xfs_agf;
struct xfs_alloc_arg;
struct xfs_attr_list_context;
struct xfs_buf_log_item;
struct xfs_da_args;
struct xfs_da_node_entry;
struct xfs_dquot;
struct xfs_log_item;
struct xlog;
struct xlog_ticket;
struct xlog_recover;
struct xlog_recover_item;
struct xlog_rec_header;
struct xlog_in_core;
struct xfs_buf_log_format;
struct xfs_inode_log_format;
struct xfs_bmbt_irec;
struct xfs_btree_cur;
struct xfs_defer_op_type;
struct xfs_refcount_irec;
struct xfs_fsmap;
struct xfs_fsmap_irec;
struct xfs_group;
struct xfs_rmap_irec;
struct xfs_icreate_log;
struct xfs_iunlink_item;
struct xfs_owner_info;
struct xfs_trans_res;
struct xfs_inobt_rec_incore;
union xfs_btree_ptr;
struct xfs_dqtrx;
struct xfs_icwalk;
struct xfs_perag;
struct xfbtree;
struct xfs_btree_ops;
struct xfs_bmap_intent;
struct xfs_exchmaps_intent;
struct xfs_exchmaps_req;
struct xfs_exchrange;
struct xfs_getparents;
struct xfs_parent_irec;
struct xfs_attrlist_cursor_kern;
struct xfs_extent_free_item;
struct xfs_rmap_intent;
struct xfs_refcount_intent;
struct xfs_metadir_update;
struct xfs_rtgroup;
struct xfs_open_zone;

#define XFS_ATTR_FILTER_FLAGS \
 { XFS_ATTR_ROOT, "ROOT" }, \
 { XFS_ATTR_SECURE, "SECURE" }, \
 { XFS_ATTR_INCOMPLETE, "INCOMPLETE" }, \
 { XFS_ATTR_PARENT, "PARENT" }

DECLARE_EVENT_CLASS(xfs_attr_list_class,
 TP_PROTO(struct xfs_attr_list_context *ctx),
 TP_ARGS(ctx),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(u32, hashval)
  __field(u32, blkno)
  __field(u32, offset)
  __field(void *, buffer)
  __field(int, bufsize)
  __field(int, count)
  __field(int, firstu)
  __field(int, dupcnt)
  __field(unsigned int, attr_filter)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  __entry->ino = ctx->dp->i_ino;
  __entry->hashval = ctx->cursor.hashval;
  __entry->blkno = ctx->cursor.blkno;
  __entry->offset = ctx->cursor.offset;
  __entry->buffer = ctx->buffer;
  __entry->bufsize = ctx->bufsize;
  __entry->count = ctx->count;
  __entry->firstu = ctx->firstu;
  __entry->attr_filter = ctx->attr_filter;
 ),
 TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
    "buffer %p size %u count %u firstu %u filter %s",
    MAJOR(__entry->dev), MINOR(__entry->dev),
     __entry->ino,
     __entry->hashval,
     __entry->blkno,
     __entry->offset,
     __entry->dupcnt,
     __entry->buffer,
     __entry->bufsize,
     __entry->count,
     __entry->firstu,
     __print_flags(__entry->attr_filter, "|",
     XFS_ATTR_FILTER_FLAGS)
 )
)

#define DEFINE_ATTR_LIST_EVENT(name) \
DEFINE_EVENT(xfs_attr_list_class, name, \
 TP_PROTO(struct xfs_attr_list_context *ctx), \
 TP_ARGS(ctx))
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list);
DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list);

TRACE_EVENT(xfs_calc_atomic_write_unit_max,
 TP_PROTO(struct xfs_mount *mp, enum xfs_group_type type,
   unsigned int max_write, unsigned int max_ioend,
   unsigned int max_gsize, unsigned int awu_max),
 TP_ARGS(mp, type, max_write, max_ioend, max_gsize, awu_max),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(enum xfs_group_type, type)
  __field(unsigned int, max_write)
  __field(unsigned int, max_ioend)
  __field(unsigned int, max_gsize)
  __field(unsigned int, awu_max)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->type = type;
  __entry->max_write = max_write;
  __entry->max_ioend = max_ioend;
  __entry->max_gsize = max_gsize;
  __entry->awu_max = awu_max;
 ),
 TP_printk("dev %d:%d %s max_write %u max_ioend %u max_gsize %u awu_max %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __print_symbolic(__entry->type, XG_TYPE_STRINGS),
    __entry->max_write,
    __entry->max_ioend,
    __entry->max_gsize,
    __entry->awu_max)
);

TRACE_EVENT(xfs_calc_max_atomic_write_fsblocks,
 TP_PROTO(struct xfs_mount *mp, unsigned int per_intent,
   unsigned int step_size, unsigned int logres,
   unsigned int blockcount),
 TP_ARGS(mp, per_intent, step_size, logres, blockcount),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(unsigned int, per_intent)
  __field(unsigned int, step_size)
  __field(unsigned int, logres)
  __field(unsigned int, blockcount)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->per_intent = per_intent;
  __entry->step_size = step_size;
  __entry->logres = logres;
  __entry->blockcount = blockcount;
 ),
 TP_printk("dev %d:%d per_intent %u step_size %u logres %u blockcount %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->per_intent,
    __entry->step_size,
    __entry->logres,
    __entry->blockcount)
);

TRACE_EVENT(xfs_calc_max_atomic_write_log_geometry,
 TP_PROTO(struct xfs_mount *mp, unsigned int per_intent,
   unsigned int step_size, unsigned int blockcount,
   unsigned int min_logblocks, unsigned int logres),
 TP_ARGS(mp, per_intent, step_size, blockcount, min_logblocks, logres),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(unsigned int, per_intent)
  __field(unsigned int, step_size)
  __field(unsigned int, blockcount)
  __field(unsigned int, min_logblocks)
  __field(unsigned int, cur_logblocks)
  __field(unsigned int, logres)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->per_intent = per_intent;
  __entry->step_size = step_size;
  __entry->blockcount = blockcount;
  __entry->min_logblocks = min_logblocks;
  __entry->cur_logblocks = mp->m_sb.sb_logblocks;
  __entry->logres = logres;
 ),
 TP_printk("dev %d:%d per_intent %u step_size %u blockcount %u min_logblocks %u logblocks %u logres %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->per_intent,
    __entry->step_size,
    __entry->blockcount,
    __entry->min_logblocks,
    __entry->cur_logblocks,
    __entry->logres)
);

TRACE_EVENT(xlog_intent_recovery_failed,
 TP_PROTO(struct xfs_mount *mp, const struct xfs_defer_op_type *ops,
   int error),
 TP_ARGS(mp, ops, error),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __string(name, ops->name)
  __field(int, error)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __assign_str(name);
  __entry->error = error;
 ),
 TP_printk("dev %d:%d optype %s error %d",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __get_str(name),
    __entry->error)
);

DECLARE_EVENT_CLASS(xfs_perag_class,
 TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip),
 TP_ARGS(pag, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_agnumber_t, agno)
  __field(int, refcount)
  __field(int, active_refcount)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = pag_mount(pag)->m_super->s_dev;
  __entry->agno = pag_agno(pag);
  __entry->refcount = atomic_read(&pag->pag_group.xg_ref);
  __entry->active_refcount =
   atomic_read(&pag->pag_group.xg_active_ref);
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d agno 0x%x passive refs %d active refs %d caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->agno,
    __entry->refcount,
    __entry->active_refcount,
    (char *)__entry->caller_ip)
);

#define DEFINE_PERAG_REF_EVENT(name) \
DEFINE_EVENT(xfs_perag_class, name, \
 TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip), \
 TP_ARGS(pag, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count);

TRACE_DEFINE_ENUM(XG_TYPE_AG);
TRACE_DEFINE_ENUM(XG_TYPE_RTG);

DECLARE_EVENT_CLASS(xfs_group_class,
 TP_PROTO(struct xfs_group *xg, unsigned long caller_ip),
 TP_ARGS(xg, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(enum xfs_group_type, type)
  __field(xfs_agnumber_t, agno)
  __field(int, refcount)
  __field(int, active_refcount)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = xg->xg_mount->m_super->s_dev;
  __entry->type = xg->xg_type;
  __entry->agno = xg->xg_gno;
  __entry->refcount = atomic_read(&xg->xg_ref);
  __entry->active_refcount = atomic_read(&xg->xg_active_ref);
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d %sno 0x%x passive refs %d active refs %d caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __print_symbolic(__entry->type, XG_TYPE_STRINGS),
    __entry->agno,
    __entry->refcount,
    __entry->active_refcount,
    (char *)__entry->caller_ip)
);

#define DEFINE_GROUP_REF_EVENT(name) \
DEFINE_EVENT(xfs_group_class, name, \
 TP_PROTO(struct xfs_group *xg, unsigned long caller_ip), \
 TP_ARGS(xg, caller_ip))
DEFINE_GROUP_REF_EVENT(xfs_group_get);
DEFINE_GROUP_REF_EVENT(xfs_group_hold);
DEFINE_GROUP_REF_EVENT(xfs_group_put);
DEFINE_GROUP_REF_EVENT(xfs_group_grab);
DEFINE_GROUP_REF_EVENT(xfs_group_grab_next_tag);
DEFINE_GROUP_REF_EVENT(xfs_group_rele);

#ifdef CONFIG_XFS_RT
DECLARE_EVENT_CLASS(xfs_zone_class,
 TP_PROTO(struct xfs_rtgroup *rtg),
 TP_ARGS(rtg),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgno)
  __field(xfs_rgblock_t, used)
  __field(unsigned int, nr_open)
 ),
 TP_fast_assign(
  struct xfs_mount *mp = rtg_mount(rtg);

  __entry->dev = mp->m_super->s_dev;
  __entry->rgno = rtg_rgno(rtg);
  __entry->used = rtg_rmap(rtg)->i_used_blocks;
  __entry->nr_open = mp->m_zone_info->zi_nr_open_zones;
 ),
 TP_printk("dev %d:%d rgno 0x%x used 0x%x nr_open %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->rgno,
    __entry->used,
    __entry->nr_open)
);

#define DEFINE_ZONE_EVENT(name)    \
DEFINE_EVENT(xfs_zone_class, name,   \
 TP_PROTO(struct xfs_rtgroup *rtg),  \
 TP_ARGS(rtg))
DEFINE_ZONE_EVENT(xfs_zone_emptied);
DEFINE_ZONE_EVENT(xfs_zone_full);
DEFINE_ZONE_EVENT(xfs_zone_opened);
DEFINE_ZONE_EVENT(xfs_zone_reset);
DEFINE_ZONE_EVENT(xfs_zone_gc_target_opened);

TRACE_EVENT(xfs_zone_free_blocks,
 TP_PROTO(struct xfs_rtgroup *rtg, xfs_rgblock_t rgbno,
   xfs_extlen_t len),
 TP_ARGS(rtg, rgbno, len),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgno)
  __field(xfs_rgblock_t, used)
  __field(xfs_rgblock_t, rgbno)
  __field(xfs_extlen_t, len)
 ),
 TP_fast_assign(
  __entry->dev = rtg_mount(rtg)->m_super->s_dev;
  __entry->rgno = rtg_rgno(rtg);
  __entry->used = rtg_rmap(rtg)->i_used_blocks;
  __entry->rgbno = rgbno;
  __entry->len = len;
 ),
 TP_printk("dev %d:%d rgno 0x%x used 0x%x rgbno 0x%x len 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->rgno,
    __entry->used,
    __entry->rgbno,
    __entry->len)
);

DECLARE_EVENT_CLASS(xfs_zone_alloc_class,
 TP_PROTO(struct xfs_open_zone *oz, xfs_rgblock_t rgbno,
   xfs_extlen_t len),
 TP_ARGS(oz, rgbno, len),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgno)
  __field(xfs_rgblock_t, used)
  __field(xfs_rgblock_t, allocated)
  __field(xfs_rgblock_t, written)
  __field(xfs_rgblock_t, rgbno)
  __field(xfs_extlen_t, len)
 ),
 TP_fast_assign(
  __entry->dev = rtg_mount(oz->oz_rtg)->m_super->s_dev;
  __entry->rgno = rtg_rgno(oz->oz_rtg);
  __entry->used = rtg_rmap(oz->oz_rtg)->i_used_blocks;
  __entry->allocated = oz->oz_allocated;
  __entry->written = oz->oz_written;
  __entry->rgbno = rgbno;
  __entry->len = len;
 ),
 TP_printk("dev %d:%d rgno 0x%x used 0x%x alloced 0x%x written 0x%x rgbno 0x%x len 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->rgno,
    __entry->used,
    __entry->allocated,
    __entry->written,
    __entry->rgbno,
    __entry->len)
);

#define DEFINE_ZONE_ALLOC_EVENT(name)    \
DEFINE_EVENT(xfs_zone_alloc_class, name,   \
 TP_PROTO(struct xfs_open_zone *oz, xfs_rgblock_t rgbno, \
   xfs_extlen_t len),    \
 TP_ARGS(oz, rgbno, len))
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_record_blocks);
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_skip_blocks);
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_alloc_blocks);

TRACE_EVENT(xfs_zone_gc_select_victim,
 TP_PROTO(struct xfs_rtgroup *rtg, unsigned int bucket),
 TP_ARGS(rtg, bucket),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgno)
  __field(xfs_rgblock_t, used)
  __field(unsigned int, bucket)
 ),
 TP_fast_assign(
  __entry->dev = rtg_mount(rtg)->m_super->s_dev;
  __entry->rgno = rtg_rgno(rtg);
  __entry->used = rtg_rmap(rtg)->i_used_blocks;
  __entry->bucket = bucket;
 ),
 TP_printk("dev %d:%d rgno 0x%x used 0x%x bucket %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->rgno,
    __entry->used,
    __entry->bucket)
);

TRACE_EVENT(xfs_zones_mount,
 TP_PROTO(struct xfs_mount *mp),
 TP_ARGS(mp),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgcount)
  __field(uint32_t, blocks)
  __field(unsigned int, max_open_zones)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->rgcount = mp->m_sb.sb_rgcount;
  __entry->blocks = mp->m_groups[XG_TYPE_RTG].blocks;
  __entry->max_open_zones = mp->m_max_open_zones;
 ),
 TP_printk("dev %d:%d zoned %u blocks_per_zone %u, max_open %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
  __entry->rgcount,
  __entry->blocks,
  __entry->max_open_zones)
);
#endif /* CONFIG_XFS_RT */

TRACE_EVENT(xfs_inodegc_worker,
 TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits),
 TP_ARGS(mp, shrinker_hits),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(unsigned int, shrinker_hits)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->shrinker_hits = shrinker_hits;
 ),
 TP_printk("dev %d:%d shrinker_hits %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->shrinker_hits)
);

DECLARE_EVENT_CLASS(xfs_fs_class,
 TP_PROTO(struct xfs_mount *mp, void *caller_ip),
 TP_ARGS(mp, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(unsigned long long, mflags)
  __field(unsigned long, opstate)
  __field(unsigned long, sbflags)
  __field(void *, caller_ip)
 ),
 TP_fast_assign(
  if (mp) {
   __entry->dev = mp->m_super->s_dev;
   __entry->mflags = mp->m_features;
   __entry->opstate = mp->m_opstate;
   __entry->sbflags = mp->m_super->s_flags;
  }
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d m_features 0x%llx opstate (%s) s_flags 0x%lx caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->mflags,
    __print_flags(__entry->opstate, "|", XFS_OPSTATE_STRINGS),
    __entry->sbflags,
    __entry->caller_ip)
);

#define DEFINE_FS_EVENT(name) \
DEFINE_EVENT(xfs_fs_class, name,     \
 TP_PROTO(struct xfs_mount *mp, void *caller_ip), \
 TP_ARGS(mp, caller_ip))
DEFINE_FS_EVENT(xfs_inodegc_flush);
DEFINE_FS_EVENT(xfs_inodegc_push);
DEFINE_FS_EVENT(xfs_inodegc_start);
DEFINE_FS_EVENT(xfs_inodegc_stop);
DEFINE_FS_EVENT(xfs_inodegc_queue);
DEFINE_FS_EVENT(xfs_inodegc_throttle);
DEFINE_FS_EVENT(xfs_fs_sync_fs);
DEFINE_FS_EVENT(xfs_blockgc_start);
DEFINE_FS_EVENT(xfs_blockgc_stop);
DEFINE_FS_EVENT(xfs_blockgc_worker);
DEFINE_FS_EVENT(xfs_blockgc_flush_all);

TRACE_EVENT(xfs_inodegc_shrinker_scan,
 TP_PROTO(struct xfs_mount *mp, struct shrink_control *sc,
   void *caller_ip),
 TP_ARGS(mp, sc, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(unsigned long, nr_to_scan)
  __field(void *, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->nr_to_scan = sc->nr_to_scan;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d nr_to_scan %lu caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->nr_to_scan,
    __entry->caller_ip)
);

DECLARE_EVENT_CLASS(xfs_ag_class,
 TP_PROTO(const struct xfs_perag *pag),
 TP_ARGS(pag),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_agnumber_t, agno)
 ),
 TP_fast_assign(
  __entry->dev = pag_mount(pag)->m_super->s_dev;
  __entry->agno = pag_agno(pag);
 ),
 TP_printk("dev %d:%d agno 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->agno)
);
#define DEFINE_AG_EVENT(name) \
DEFINE_EVENT(xfs_ag_class, name, \
 TP_PROTO(const struct xfs_perag *pag), \
 TP_ARGS(pag))

DEFINE_AG_EVENT(xfs_read_agf);
DEFINE_AG_EVENT(xfs_alloc_read_agf);
DEFINE_AG_EVENT(xfs_read_agi);
DEFINE_AG_EVENT(xfs_ialloc_read_agi);

TRACE_EVENT(xfs_attr_list_node_descend,
 TP_PROTO(struct xfs_attr_list_context *ctx,
   struct xfs_da_node_entry *btree),
 TP_ARGS(ctx, btree),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(u32, hashval)
  __field(u32, blkno)
  __field(u32, offset)
  __field(void *, buffer)
  __field(int, bufsize)
  __field(int, count)
  __field(int, firstu)
  __field(int, dupcnt)
  __field(unsigned int, attr_filter)
  __field(u32, bt_hashval)
  __field(u32, bt_before)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  __entry->ino = ctx->dp->i_ino;
  __entry->hashval = ctx->cursor.hashval;
  __entry->blkno = ctx->cursor.blkno;
  __entry->offset = ctx->cursor.offset;
  __entry->buffer = ctx->buffer;
  __entry->bufsize = ctx->bufsize;
  __entry->count = ctx->count;
  __entry->firstu = ctx->firstu;
  __entry->attr_filter = ctx->attr_filter;
  __entry->bt_hashval = be32_to_cpu(btree->hashval);
  __entry->bt_before = be32_to_cpu(btree->before);
 ),
 TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
    "buffer %p size %u count %u firstu %u filter %s "
    "node hashval %u, node before %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
     __entry->ino,
     __entry->hashval,
     __entry->blkno,
     __entry->offset,
     __entry->dupcnt,
     __entry->buffer,
     __entry->bufsize,
     __entry->count,
     __entry->firstu,
     __print_flags(__entry->attr_filter, "|",
     XFS_ATTR_FILTER_FLAGS),
     __entry->bt_hashval,
     __entry->bt_before)
);

DECLARE_EVENT_CLASS(xfs_bmap_class,
 TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state,
   unsigned long caller_ip),
 TP_ARGS(ip, cur, state, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(void *, leaf)
  __field(int, pos)
  __field(xfs_fileoff_t, startoff)
  __field(xfs_fsblock_t, startblock)
  __field(xfs_filblks_t, blockcount)
  __field(xfs_exntst_t, state)
  __field(int, bmap_state)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  struct xfs_ifork *ifp;
  struct xfs_bmbt_irec r;

  ifp = xfs_iext_state_to_fork(ip, state);
  xfs_iext_get_extent(ifp, cur, &r);
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->leaf = cur->leaf;
  __entry->pos = cur->pos;
  __entry->startoff = r.br_startoff;
  __entry->startblock = r.br_startblock;
  __entry->blockcount = r.br_blockcount;
  __entry->state = r.br_state;
  __entry->bmap_state = state;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d ino 0x%llx state %s cur %p/%d "
    "fileoff 0x%llx startblock 0x%llx fsbcount 0x%llx flag %d caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
    __entry->leaf,
    __entry->pos,
    __entry->startoff,
    (int64_t)__entry->startblock,
    __entry->blockcount,
    __entry->state,
    (char *)__entry->caller_ip)
)

#define DEFINE_BMAP_EVENT(name) \
DEFINE_EVENT(xfs_bmap_class, name, \
 TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \
   unsigned long caller_ip), \
 TP_ARGS(ip, cur, state, caller_ip))
DEFINE_BMAP_EVENT(xfs_iext_insert);
DEFINE_BMAP_EVENT(xfs_iext_remove);
DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
DEFINE_BMAP_EVENT(xfs_bmap_post_update);
DEFINE_BMAP_EVENT(xfs_read_extent);
DEFINE_BMAP_EVENT(xfs_write_extent);

DECLARE_EVENT_CLASS(xfs_buf_class,
 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
 TP_ARGS(bp, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_daddr_t, bno)
  __field(int, nblks)
  __field(int, hold)
  __field(int, pincount)
  __field(unsigned, lockval)
  __field(unsigned, flags)
  __field(unsigned long, caller_ip)
  __field(const void *, buf_ops)
 ),
 TP_fast_assign(
  __entry->dev = bp->b_target->bt_dev;
  __entry->bno = xfs_buf_daddr(bp);
  __entry->nblks = bp->b_length;
  __entry->hold = bp->b_hold;
  __entry->pincount = atomic_read(&bp->b_pin_count);
  __entry->lockval = bp->b_sema.count;
  __entry->flags = bp->b_flags;
  __entry->caller_ip = caller_ip;
  __entry->buf_ops = bp->b_ops;
 ),
 TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
    "lock %d flags %s bufops %pS caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    (unsigned long long)__entry->bno,
    __entry->nblks,
    __entry->hold,
    __entry->pincount,
    __entry->lockval,
    __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
    __entry->buf_ops,
    (void *)__entry->caller_ip)
)

#define DEFINE_BUF_EVENT(name) \
DEFINE_EVENT(xfs_buf_class, name, \
 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
 TP_ARGS(bp, caller_ip))
DEFINE_BUF_EVENT(xfs_buf_init);
DEFINE_BUF_EVENT(xfs_buf_free);
DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele);
DEFINE_BUF_EVENT(xfs_buf_iodone);
DEFINE_BUF_EVENT(xfs_buf_submit);
DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
DEFINE_BUF_EVENT(xfs_buf_trylock);
DEFINE_BUF_EVENT(xfs_buf_unlock);
DEFINE_BUF_EVENT(xfs_buf_iowait);
DEFINE_BUF_EVENT(xfs_buf_iowait_done);
DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
DEFINE_BUF_EVENT(xfs_buf_delwri_split);
DEFINE_BUF_EVENT(xfs_buf_get_uncached);
DEFINE_BUF_EVENT(xfs_buf_item_relse);
DEFINE_BUF_EVENT(xfs_buf_iodone_async);
DEFINE_BUF_EVENT(xfs_buf_error_relse);
DEFINE_BUF_EVENT(xfs_buf_drain_buftarg);
DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
DEFINE_BUF_EVENT(xfs_buf_backing_folio);
DEFINE_BUF_EVENT(xfs_buf_backing_kmem);
DEFINE_BUF_EVENT(xfs_buf_backing_vmalloc);
DEFINE_BUF_EVENT(xfs_buf_backing_fallback);

/* not really buffer traces, but the buf provides useful information */
DEFINE_BUF_EVENT(xfs_btree_corrupt);
DEFINE_BUF_EVENT(xfs_reset_dqcounts);

/* pass flags explicitly */
DECLARE_EVENT_CLASS(xfs_buf_flags_class,
 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
 TP_ARGS(bp, flags, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_daddr_t, bno)
  __field(unsigned int, length)
  __field(int, hold)
  __field(int, pincount)
  __field(unsigned, lockval)
  __field(unsigned, flags)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = bp->b_target->bt_dev;
  __entry->bno = xfs_buf_daddr(bp);
  __entry->length = bp->b_length;
  __entry->flags = flags;
  __entry->hold = bp->b_hold;
  __entry->pincount = atomic_read(&bp->b_pin_count);
  __entry->lockval = bp->b_sema.count;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
    "lock %d flags %s caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    (unsigned long long)__entry->bno,
    __entry->length,
    __entry->hold,
    __entry->pincount,
    __entry->lockval,
    __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
    (void *)__entry->caller_ip)
)

#define DEFINE_BUF_FLAGS_EVENT(name) \
DEFINE_EVENT(xfs_buf_flags_class, name, \
 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
 TP_ARGS(bp, flags, caller_ip))
DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_readahead);

TRACE_EVENT(xfs_buf_ioerror,
 TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),
 TP_ARGS(bp, error, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_daddr_t, bno)
  __field(unsigned int, length)
  __field(unsigned, flags)
  __field(int, hold)
  __field(int, pincount)
  __field(unsigned, lockval)
  __field(int, error)
  __field(xfs_failaddr_t, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = bp->b_target->bt_dev;
  __entry->bno = xfs_buf_daddr(bp);
  __entry->length = bp->b_length;
  __entry->hold = bp->b_hold;
  __entry->pincount = atomic_read(&bp->b_pin_count);
  __entry->lockval = bp->b_sema.count;
  __entry->error = error;
  __entry->flags = bp->b_flags;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
    "lock %d error %d flags %s caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    (unsigned long long)__entry->bno,
    __entry->length,
    __entry->hold,
    __entry->pincount,
    __entry->lockval,
    __entry->error,
    __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
    (void *)__entry->caller_ip)
);

DECLARE_EVENT_CLASS(xfs_buf_item_class,
 TP_PROTO(struct xfs_buf_log_item *bip),
 TP_ARGS(bip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_daddr_t, buf_bno)
  __field(unsigned int, buf_len)
  __field(int, buf_hold)
  __field(int, buf_pincount)
  __field(int, buf_lockval)
  __field(unsigned, buf_flags)
  __field(unsigned, bli_recur)
  __field(int, bli_refcount)
  __field(unsigned, bli_flags)
  __field(unsigned long, li_flags)
 ),
 TP_fast_assign(
  __entry->dev = bip->bli_buf->b_target->bt_dev;
  __entry->bli_flags = bip->bli_flags;
  __entry->bli_recur = bip->bli_recur;
  __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  __entry->buf_bno = xfs_buf_daddr(bip->bli_buf);
  __entry->buf_len = bip->bli_buf->b_length;
  __entry->buf_flags = bip->bli_buf->b_flags;
  __entry->buf_hold = bip->bli_buf->b_hold;
  __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  __entry->buf_lockval = bip->bli_buf->b_sema.count;
  __entry->li_flags = bip->bli_item.li_flags;
 ),
 TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
    "lock %d flags %s recur %d refcount %d bliflags %s "
    "liflags %s",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    (unsigned long long)__entry->buf_bno,
    __entry->buf_len,
    __entry->buf_hold,
    __entry->buf_pincount,
    __entry->buf_lockval,
    __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
    __entry->bli_recur,
    __entry->bli_refcount,
    __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
    __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
)

#define DEFINE_BUF_ITEM_EVENT(name) \
DEFINE_EVENT(xfs_buf_item_class, name, \
 TP_PROTO(struct xfs_buf_log_item *bip), \
 TP_ARGS(bip))
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_release);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
DEFINE_BUF_ITEM_EVENT(xfs_trans_bdetach);
DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);

DECLARE_EVENT_CLASS(xfs_filestream_class,
 TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino),
 TP_ARGS(pag, ino),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_agnumber_t, agno)
  __field(int, streams)
 ),
 TP_fast_assign(
  __entry->dev = pag_mount(pag)->m_super->s_dev;
  __entry->ino = ino;
  __entry->agno = pag_agno(pag);
  __entry->streams = atomic_read(&pag->pagf_fstrms);
 ),
 TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->agno,
    __entry->streams)
)
#define DEFINE_FILESTREAM_EVENT(name) \
DEFINE_EVENT(xfs_filestream_class, name, \
 TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino), \
 TP_ARGS(pag, ino))
DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);

TRACE_EVENT(xfs_filestream_pick,
 TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino),
 TP_ARGS(pag, ino),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_agnumber_t, agno)
  __field(int, streams)
  __field(xfs_extlen_t, free)
 ),
 TP_fast_assign(
  __entry->dev = pag_mount(pag)->m_super->s_dev;
  __entry->ino = ino;
  __entry->agno = pag_agno(pag);
  __entry->streams = atomic_read(&pag->pagf_fstrms);
  __entry->free = pag->pagf_freeblks;
 ),
 TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d free %d",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->agno,
    __entry->streams,
    __entry->free)
);

DECLARE_EVENT_CLASS(xfs_lock_class,
 TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
   unsigned long caller_ip),
 TP_ARGS(ip,  lock_flags, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(int, lock_flags)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->lock_flags = lock_flags;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d ino 0x%llx flags %s caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
    (void *)__entry->caller_ip)
)

#define DEFINE_LOCK_EVENT(name) \
DEFINE_EVENT(xfs_lock_class, name, \
 TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
   unsigned long caller_ip), \
 TP_ARGS(ip,  lock_flags, caller_ip))
DEFINE_LOCK_EVENT(xfs_ilock);
DEFINE_LOCK_EVENT(xfs_ilock_nowait);
DEFINE_LOCK_EVENT(xfs_ilock_demote);
DEFINE_LOCK_EVENT(xfs_iunlock);

DECLARE_EVENT_CLASS(xfs_inode_class,
 TP_PROTO(struct xfs_inode *ip),
 TP_ARGS(ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(unsigned long, iflags)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->iflags = ip->i_flags;
 ),
 TP_printk("dev %d:%d ino 0x%llx iflags 0x%lx",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->iflags)
)

#define DEFINE_INODE_EVENT(name) \
DEFINE_EVENT(xfs_inode_class, name, \
 TP_PROTO(struct xfs_inode *ip), \
 TP_ARGS(ip))
DEFINE_INODE_EVENT(xfs_iget_skip);
DEFINE_INODE_EVENT(xfs_iget_recycle);
DEFINE_INODE_EVENT(xfs_iget_recycle_fail);
DEFINE_INODE_EVENT(xfs_iget_hit);
DEFINE_INODE_EVENT(xfs_iget_miss);

DEFINE_INODE_EVENT(xfs_getattr);
DEFINE_INODE_EVENT(xfs_setattr);
DEFINE_INODE_EVENT(xfs_readlink);
DEFINE_INODE_EVENT(xfs_inactive_symlink);
DEFINE_INODE_EVENT(xfs_alloc_file_space);
DEFINE_INODE_EVENT(xfs_free_file_space);
DEFINE_INODE_EVENT(xfs_zero_file_space);
DEFINE_INODE_EVENT(xfs_collapse_file_space);
DEFINE_INODE_EVENT(xfs_insert_file_space);
DEFINE_INODE_EVENT(xfs_readdir);
#ifdef CONFIG_XFS_POSIX_ACL
DEFINE_INODE_EVENT(xfs_get_acl);
#endif
DEFINE_INODE_EVENT(xfs_vm_bmap);
DEFINE_INODE_EVENT(xfs_file_ioctl);
#ifdef CONFIG_COMPAT
DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
#endif
DEFINE_INODE_EVENT(xfs_ioctl_setattr);
DEFINE_INODE_EVENT(xfs_dir_fsync);
DEFINE_INODE_EVENT(xfs_file_fsync);
DEFINE_INODE_EVENT(xfs_destroy_inode);
DEFINE_INODE_EVENT(xfs_update_time);

DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
DEFINE_INODE_EVENT(xfs_dquot_dqdetach);

DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
DEFINE_INODE_EVENT(xfs_inode_set_reclaimable);
DEFINE_INODE_EVENT(xfs_inode_reclaiming);
DEFINE_INODE_EVENT(xfs_inode_set_need_inactive);
DEFINE_INODE_EVENT(xfs_inode_inactivating);

/*
 * ftrace's __print_symbolic requires that all enum values be wrapped in the
 * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
 * ring buffer.  Somehow this was only worth mentioning in the ftrace sample
 * code.
 */

TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED);
TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW);

DECLARE_EVENT_CLASS(xfs_fault_class,
 TP_PROTO(struct xfs_inode *ip, unsigned int order),
 TP_ARGS(ip, order),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(unsigned int, order)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->order = order;
 ),
 TP_printk("dev %d:%d ino 0x%llx order %u",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->order)
)

#define DEFINE_FAULT_EVENT(name) \
DEFINE_EVENT(xfs_fault_class, name, \
 TP_PROTO(struct xfs_inode *ip, unsigned int order), \
 TP_ARGS(ip, order))
DEFINE_FAULT_EVENT(xfs_read_fault);
DEFINE_FAULT_EVENT(xfs_write_fault);

DECLARE_EVENT_CLASS(xfs_iref_class,
 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
 TP_ARGS(ip, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(int, count)
  __field(int, pincount)
  __field(unsigned long, iflags)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->count = atomic_read(&VFS_I(ip)->i_count);
  __entry->pincount = atomic_read(&ip->i_pincount);
  __entry->iflags = ip->i_flags;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d ino 0x%llx count %d pincount %d iflags 0x%lx caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->count,
    __entry->pincount,
    __entry->iflags,
    (char *)__entry->caller_ip)
)

TRACE_EVENT(xfs_iomap_prealloc_size,
 TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t blocks, int shift,
   unsigned int writeio_blocks),
 TP_ARGS(ip, blocks, shift, writeio_blocks),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_fsblock_t, blocks)
  __field(int, shift)
  __field(unsigned int, writeio_blocks)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->blocks = blocks;
  __entry->shift = shift;
  __entry->writeio_blocks = writeio_blocks;
 ),
 TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d "
    "m_allocsize_blocks %u",
    MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
    __entry->blocks, __entry->shift, __entry->writeio_blocks)
)

TRACE_EVENT(xfs_irec_merge_pre,
 TP_PROTO(const struct xfs_perag *pag,
   const struct xfs_inobt_rec_incore *rec,
   const struct xfs_inobt_rec_incore *nrec),
 TP_ARGS(pag, rec, nrec),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_agnumber_t, agno)
  __field(xfs_agino_t, agino)
  __field(uint16_t, holemask)
  __field(xfs_agino_t, nagino)
  __field(uint16_t, nholemask)
 ),
 TP_fast_assign(
  __entry->dev = pag_mount(pag)->m_super->s_dev;
  __entry->agno = pag_agno(pag);
  __entry->agino = rec->ir_startino;
  __entry->holemask = rec->ir_holemask;
  __entry->nagino = nrec->ir_startino;
  __entry->nholemask = nrec->ir_holemask;
 ),
 TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x new_agino 0x%x new_holemask 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->agno,
    __entry->agino,
    __entry->holemask,
    __entry->nagino,
    __entry->nholemask)
)

TRACE_EVENT(xfs_irec_merge_post,
 TP_PROTO(const struct xfs_perag *pag,
   const struct xfs_inobt_rec_incore *nrec),
 TP_ARGS(pag, nrec),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_agnumber_t, agno)
  __field(xfs_agino_t, agino)
  __field(uint16_t, holemask)
 ),
 TP_fast_assign(
  __entry->dev = pag_mount(pag)->m_super->s_dev;
  __entry->agno = pag_agno(pag);
  __entry->agino = nrec->ir_startino;
  __entry->holemask = nrec->ir_holemask;
 ),
 TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x",
    MAJOR(__entry->dev),
    MINOR(__entry->dev),
    __entry->agno,
    __entry->agino,
    __entry->holemask)
)

#define DEFINE_IREF_EVENT(name) \
DEFINE_EVENT(xfs_iref_class, name, \
 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
 TP_ARGS(ip, caller_ip))
DEFINE_IREF_EVENT(xfs_irele);
DEFINE_IREF_EVENT(xfs_inode_pin);
DEFINE_IREF_EVENT(xfs_inode_unpin);
DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
DEFINE_IREF_EVENT(xfs_inode_push_pinned);
DEFINE_IREF_EVENT(xfs_inode_push_stale);

DECLARE_EVENT_CLASS(xfs_namespace_class,
 TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name),
 TP_ARGS(dp, name),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, dp_ino)
  __field(int, namelen)
  __dynamic_array(char, name, name->len)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(dp)->i_sb->s_dev;
  __entry->dp_ino = dp->i_ino;
  __entry->namelen = name->len;
  memcpy(__get_str(name), name->name, name->len);
 ),
 TP_printk("dev %d:%d dp ino 0x%llx name %.*s",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->dp_ino,
    __entry->namelen,
    __get_str(name))
)

#define DEFINE_NAMESPACE_EVENT(name) \
DEFINE_EVENT(xfs_namespace_class, name, \
 TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name), \
 TP_ARGS(dp, name))
DEFINE_NAMESPACE_EVENT(xfs_remove);
DEFINE_NAMESPACE_EVENT(xfs_link);
DEFINE_NAMESPACE_EVENT(xfs_lookup);
DEFINE_NAMESPACE_EVENT(xfs_create);
DEFINE_NAMESPACE_EVENT(xfs_symlink);

TRACE_EVENT(xfs_rename,
 TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
   struct xfs_name *src_name, struct xfs_name *target_name),
 TP_ARGS(src_dp, target_dp, src_name, target_name),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, src_dp_ino)
  __field(xfs_ino_t, target_dp_ino)
  __field(int, src_namelen)
  __field(int, target_namelen)
  __dynamic_array(char, src_name, src_name->len)
  __dynamic_array(char, target_name, target_name->len)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
  __entry->src_dp_ino = src_dp->i_ino;
  __entry->target_dp_ino = target_dp->i_ino;
  __entry->src_namelen = src_name->len;
  __entry->target_namelen = target_name->len;
  memcpy(__get_str(src_name), src_name->name, src_name->len);
  memcpy(__get_str(target_name), target_name->name,
   target_name->len);
 ),
 TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
    " src name %.*s target name %.*s",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->src_dp_ino,
    __entry->target_dp_ino,
    __entry->src_namelen,
    __get_str(src_name),
    __entry->target_namelen,
    __get_str(target_name))
)

DECLARE_EVENT_CLASS(xfs_dquot_class,
 TP_PROTO(struct xfs_dquot *dqp),
 TP_ARGS(dqp),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(u32, id)
  __field(xfs_dqtype_t, type)
  __field(unsigned, flags)
  __field(unsigned, nrefs)
  __field(unsigned long long, res_bcount)
  __field(unsigned long long, res_rtbcount)
  __field(unsigned long long, res_icount)

  __field(unsigned long long, bcount)
  __field(unsigned long long, rtbcount)
  __field(unsigned long long, icount)

  __field(unsigned long long, blk_hardlimit)
  __field(unsigned long long, blk_softlimit)
  __field(unsigned long long, rtb_hardlimit)
  __field(unsigned long long, rtb_softlimit)
  __field(unsigned long long, ino_hardlimit)
  __field(unsigned long long, ino_softlimit)
 ),
 TP_fast_assign(
  __entry->dev = dqp->q_mount->m_super->s_dev;
  __entry->id = dqp->q_id;
  __entry->type = dqp->q_type;
  __entry->flags = dqp->q_flags;
  __entry->nrefs = dqp->q_nrefs;

  __entry->res_bcount = dqp->q_blk.reserved;
  __entry->res_rtbcount = dqp->q_rtb.reserved;
  __entry->res_icount = dqp->q_ino.reserved;

  __entry->bcount = dqp->q_blk.count;
  __entry->rtbcount = dqp->q_rtb.count;
  __entry->icount = dqp->q_ino.count;

  __entry->blk_hardlimit = dqp->q_blk.hardlimit;
  __entry->blk_softlimit = dqp->q_blk.softlimit;
  __entry->rtb_hardlimit = dqp->q_rtb.hardlimit;
  __entry->rtb_softlimit = dqp->q_rtb.softlimit;
  __entry->ino_hardlimit = dqp->q_ino.hardlimit;
  __entry->ino_softlimit = dqp->q_ino.softlimit;
 ),
 TP_printk("dev %d:%d id 0x%x type %s flags %s nrefs %u "
    "res_bc 0x%llx res_rtbc 0x%llx res_ic 0x%llx "
    "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
    "rtbcnt 0x%llx rtbhardlimit 0x%llx rtbsoftlimit 0x%llx "
    "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->id,
    __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
    __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
    __entry->nrefs,
    __entry->res_bcount,
    __entry->res_rtbcount,
    __entry->res_icount,
    __entry->bcount,
    __entry->blk_hardlimit,
    __entry->blk_softlimit,
    __entry->rtbcount,
    __entry->rtb_hardlimit,
    __entry->rtb_softlimit,
    __entry->icount,
    __entry->ino_hardlimit,
    __entry->ino_softlimit)
)

#define DEFINE_DQUOT_EVENT(name) \
DEFINE_EVENT(xfs_dquot_class, name, \
 TP_PROTO(struct xfs_dquot *dqp), \
 TP_ARGS(dqp))
DEFINE_DQUOT_EVENT(xfs_dqadjust);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
DEFINE_DQUOT_EVENT(xfs_dqattach_found);
DEFINE_DQUOT_EVENT(xfs_dqattach_get);
DEFINE_DQUOT_EVENT(xfs_dqalloc);
DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
DEFINE_DQUOT_EVENT(xfs_dqread);
DEFINE_DQUOT_EVENT(xfs_dqread_fail);
DEFINE_DQUOT_EVENT(xfs_dqget_hit);
DEFINE_DQUOT_EVENT(xfs_dqget_miss);
DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
DEFINE_DQUOT_EVENT(xfs_dqget_dup);
DEFINE_DQUOT_EVENT(xfs_dqput);
DEFINE_DQUOT_EVENT(xfs_dqput_free);
DEFINE_DQUOT_EVENT(xfs_dqrele);
DEFINE_DQUOT_EVENT(xfs_dqflush);
DEFINE_DQUOT_EVENT(xfs_dqflush_force);
DEFINE_DQUOT_EVENT(xfs_dqflush_done);
DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_before);
DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_after);

TRACE_EVENT(xfs_trans_mod_dquot,
 TP_PROTO(struct xfs_trans *tp, struct xfs_dquot *dqp,
   unsigned int field, int64_t delta),
 TP_ARGS(tp, dqp, field, delta),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_dqtype_t, type)
  __field(unsigned int, flags)
  __field(unsigned int, dqid)
  __field(unsigned int, field)
  __field(int64_t, delta)
 ),
 TP_fast_assign(
  __entry->dev = tp->t_mountp->m_super->s_dev;
  __entry->type = dqp->q_type;
  __entry->flags = dqp->q_flags;
  __entry->dqid = dqp->q_id;
  __entry->field = field;
  __entry->delta = delta;
 ),
 TP_printk("dev %d:%d dquot id 0x%x type %s flags %s field %s delta %lld",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->dqid,
    __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
    __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
    __print_flags(__entry->field, "|", XFS_QMOPT_FLAGS),
    __entry->delta)
);

DECLARE_EVENT_CLASS(xfs_dqtrx_class,
 TP_PROTO(struct xfs_dqtrx *qtrx),
 TP_ARGS(qtrx),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_dqtype_t, type)
  __field(unsigned int, flags)
  __field(u32, dqid)

  __field(uint64_t, blk_res)
  __field(int64_t,  bcount_delta)
  __field(int64_t,  delbcnt_delta)

  __field(uint64_t, rtblk_res)
  __field(uint64_t, rtblk_res_used)
  __field(int64_t,  rtbcount_delta)
  __field(int64_t,  delrtb_delta)

  __field(uint64_t, ino_res)
  __field(uint64_t, ino_res_used)
  __field(int64_t,  icount_delta)
 ),
 TP_fast_assign(
  __entry->dev = qtrx->qt_dquot->q_mount->m_super->s_dev;
  __entry->type = qtrx->qt_dquot->q_type;
  __entry->flags = qtrx->qt_dquot->q_flags;
  __entry->dqid = qtrx->qt_dquot->q_id;

  __entry->blk_res = qtrx->qt_blk_res;
  __entry->bcount_delta = qtrx->qt_bcount_delta;
  __entry->delbcnt_delta = qtrx->qt_delbcnt_delta;

  __entry->rtblk_res = qtrx->qt_rtblk_res;
  __entry->rtblk_res_used = qtrx->qt_rtblk_res_used;
  __entry->rtbcount_delta = qtrx->qt_rtbcount_delta;
  __entry->delrtb_delta = qtrx->qt_delrtb_delta;

  __entry->ino_res = qtrx->qt_ino_res;
  __entry->ino_res_used = qtrx->qt_ino_res_used;
  __entry->icount_delta = qtrx->qt_icount_delta;
 ),
 TP_printk("dev %d:%d dquot id 0x%x type %s flags %s "
    "blk_res %llu bcount_delta %lld delbcnt_delta %lld "
    "rtblk_res %llu rtblk_res_used %llu rtbcount_delta %lld delrtb_delta %lld "
    "ino_res %llu ino_res_used %llu icount_delta %lld",
  MAJOR(__entry->dev), MINOR(__entry->dev),
  __entry->dqid,
    __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
    __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),

  __entry->blk_res,
  __entry->bcount_delta,
  __entry->delbcnt_delta,

  __entry->rtblk_res,
  __entry->rtblk_res_used,
  __entry->rtbcount_delta,
  __entry->delrtb_delta,

  __entry->ino_res,
  __entry->ino_res_used,
  __entry->icount_delta)
)

#define DEFINE_DQTRX_EVENT(name) \
DEFINE_EVENT(xfs_dqtrx_class, name, \
 TP_PROTO(struct xfs_dqtrx *qtrx), \
 TP_ARGS(qtrx))
DEFINE_DQTRX_EVENT(xfs_trans_apply_dquot_deltas);
DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_before);
DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_after);

DECLARE_EVENT_CLASS(xfs_loggrant_class,
 TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
 TP_ARGS(log, tic),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(unsigned long, tic)
  __field(char, ocnt)
  __field(char, cnt)
  __field(int, curr_res)
  __field(int, unit_res)
  __field(unsigned int, flags)
  __field(int, reserveq)
  __field(int, writeq)
  __field(uint64_t, grant_reserve_bytes)
  __field(uint64_t, grant_write_bytes)
  __field(uint64_t, tail_space)
  __field(int, curr_cycle)
  __field(int, curr_block)
  __field(xfs_lsn_t, tail_lsn)
 ),
 TP_fast_assign(
  __entry->dev = log->l_mp->m_super->s_dev;
  __entry->tic = (unsigned long)tic;
  __entry->ocnt = tic->t_ocnt;
  __entry->cnt = tic->t_cnt;
  __entry->curr_res = tic->t_curr_res;
  __entry->unit_res = tic->t_unit_res;
  __entry->flags = tic->t_flags;
  __entry->reserveq = list_empty(&log->l_reserve_head.waiters);
  __entry->writeq = list_empty(&log->l_write_head.waiters);
  __entry->tail_space = READ_ONCE(log->l_tail_space);
  __entry->grant_reserve_bytes = __entry->tail_space +
   atomic64_read(&log->l_reserve_head.grant);
  __entry->grant_write_bytes = __entry->tail_space +
   atomic64_read(&log->l_write_head.grant);
  __entry->curr_cycle = log->l_curr_cycle;
  __entry->curr_block = log->l_curr_block;
  __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
 ),
 TP_printk("dev %d:%d tic 0x%lx t_ocnt %u t_cnt %u t_curr_res %u "
    "t_unit_res %u t_flags %s reserveq %s writeq %s "
    "tail space %llu grant_reserve_bytes %llu "
    "grant_write_bytes %llu curr_cycle %d curr_block %d "
    "tail_cycle %d tail_block %d",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->tic,
    __entry->ocnt,
    __entry->cnt,
    __entry->curr_res,
    __entry->unit_res,
    __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
    __entry->reserveq ? "empty" : "active",
    __entry->writeq ? "empty" : "active",
    __entry->tail_space,
    __entry->grant_reserve_bytes,
    __entry->grant_write_bytes,
    __entry->curr_cycle,
    __entry->curr_block,
    CYCLE_LSN(__entry->tail_lsn),
    BLOCK_LSN(__entry->tail_lsn)
 )
)

#define DEFINE_LOGGRANT_EVENT(name) \
DEFINE_EVENT(xfs_loggrant_class, name, \
 TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
 TP_ARGS(log, tic))
DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
DEFINE_LOGGRANT_EVENT(xfs_log_reserve_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_sub);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_sub);
DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait);

DECLARE_EVENT_CLASS(xfs_log_item_class,
 TP_PROTO(struct xfs_log_item *lip),
 TP_ARGS(lip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(void *, lip)
  __field(uint, type)
  __field(unsigned long, flags)
  __field(xfs_lsn_t, lsn)
 ),
 TP_fast_assign(
  __entry->dev = lip->li_log->l_mp->m_super->s_dev;
  __entry->lip = lip;
  __entry->type = lip->li_type;
  __entry->flags = lip->li_flags;
  __entry->lsn = lip->li_lsn;
 ),
 TP_printk("dev %d:%d lip %p lsn %d/%d type %s flags %s",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->lip,
    CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
    __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
    __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
)

TRACE_EVENT(xfs_log_force,
 TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn, unsigned long caller_ip),
 TP_ARGS(mp, lsn, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_lsn_t, lsn)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->lsn = lsn;
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d lsn 0x%llx caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->lsn, (void *)__entry->caller_ip)
)

#define DEFINE_LOG_ITEM_EVENT(name) \
DEFINE_EVENT(xfs_log_item_class, name, \
 TP_PROTO(struct xfs_log_item *lip), \
 TP_ARGS(lip))
DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_mark);
DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_skip);
DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_unpin);
DEFINE_LOG_ITEM_EVENT(xlog_ail_insert_abort);
DEFINE_LOG_ITEM_EVENT(xfs_trans_free_abort);

DECLARE_EVENT_CLASS(xfs_ail_class,
 TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn),
 TP_ARGS(lip, old_lsn, new_lsn),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(void *, lip)
  __field(uint, type)
  __field(unsigned long, flags)
  __field(xfs_lsn_t, old_lsn)
  __field(xfs_lsn_t, new_lsn)
 ),
 TP_fast_assign(
  __entry->dev = lip->li_log->l_mp->m_super->s_dev;
  __entry->lip = lip;
  __entry->type = lip->li_type;
  __entry->flags = lip->li_flags;
  __entry->old_lsn = old_lsn;
  __entry->new_lsn = new_lsn;
 ),
 TP_printk("dev %d:%d lip %p old lsn %d/%d new lsn %d/%d type %s flags %s",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->lip,
    CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
    CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
    __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
    __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
)

#define DEFINE_AIL_EVENT(name) \
DEFINE_EVENT(xfs_ail_class, name, \
 TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), \
 TP_ARGS(lip, old_lsn, new_lsn))
DEFINE_AIL_EVENT(xfs_ail_insert);
DEFINE_AIL_EVENT(xfs_ail_move);
DEFINE_AIL_EVENT(xfs_ail_delete);

TRACE_EVENT(xfs_log_assign_tail_lsn,
 TP_PROTO(struct xlog *log, xfs_lsn_t new_lsn),
 TP_ARGS(log, new_lsn),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_lsn_t, new_lsn)
  __field(xfs_lsn_t, old_lsn)
  __field(xfs_lsn_t, head_lsn)
 ),
 TP_fast_assign(
  __entry->dev = log->l_mp->m_super->s_dev;
  __entry->new_lsn = new_lsn;
  __entry->old_lsn = atomic64_read(&log->l_tail_lsn);
  __entry->head_lsn = log->l_ailp->ail_head_lsn;
 ),
 TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, head lsn %d/%d",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
    CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
    CYCLE_LSN(__entry->head_lsn), BLOCK_LSN(__entry->head_lsn))
)

DECLARE_EVENT_CLASS(xfs_file_class,
 TP_PROTO(struct kiocb *iocb, struct iov_iter *iter),
 TP_ARGS(iocb, iter),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_fsize_t, size)
  __field(loff_t, offset)
  __field(size_t, count)
 ),
 TP_fast_assign(
  __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
  __entry->ino = XFS_I(file_inode(iocb->ki_filp))->i_ino;
  __entry->size = XFS_I(file_inode(iocb->ki_filp))->i_disk_size;
  __entry->offset = iocb->ki_pos;
  __entry->count = iov_iter_count(iter);
 ),
 TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->size,
    __entry->offset,
    __entry->count)
)

#define DEFINE_RW_EVENT(name)  \
DEFINE_EVENT(xfs_file_class, name, \
 TP_PROTO(struct kiocb *iocb, struct iov_iter *iter),  \
 TP_ARGS(iocb, iter))
DEFINE_RW_EVENT(xfs_file_buffered_read);
DEFINE_RW_EVENT(xfs_file_direct_read);
DEFINE_RW_EVENT(xfs_file_dax_read);
DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_dax_write);
DEFINE_RW_EVENT(xfs_reflink_bounce_dio_write);

TRACE_EVENT(xfs_iomap_atomic_write_cow,
 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
 TP_ARGS(ip, offset, count),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_off_t, offset)
  __field(ssize_t, count)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->offset = offset;
  __entry->count = count;
 ),
 TP_printk("dev %d:%d ino 0x%llx pos 0x%llx bytecount 0x%zx",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->offset,
    __entry->count)
)

DECLARE_EVENT_CLASS(xfs_imap_class,
 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
   int whichfork, struct xfs_bmbt_irec *irec),
 TP_ARGS(ip, offset, count, whichfork, irec),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(loff_t, size)
  __field(loff_t, offset)
  __field(size_t, count)
  __field(int, whichfork)
  __field(xfs_fileoff_t, startoff)
  __field(xfs_fsblock_t, startblock)
  __field(xfs_filblks_t, blockcount)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->size = ip->i_disk_size;
  __entry->offset = offset;
  __entry->count = count;
  __entry->whichfork = whichfork;
  __entry->startoff = irec ? irec->br_startoff : 0;
  __entry->startblock = irec ? irec->br_startblock : 0;
  __entry->blockcount = irec ? irec->br_blockcount : 0;
 ),
 TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx "
    "fork %s startoff 0x%llx startblock 0x%llx fsbcount 0x%llx",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->size,
    __entry->offset,
    __entry->count,
    __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
    __entry->startoff,
    (int64_t)__entry->startblock,
    __entry->blockcount)
)

#define DEFINE_IMAP_EVENT(name) \
DEFINE_EVENT(xfs_imap_class, name, \
 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
   int whichfork, struct xfs_bmbt_irec *irec),  \
 TP_ARGS(ip, offset, count, whichfork, irec))
DEFINE_IMAP_EVENT(xfs_map_blocks_found);
DEFINE_IMAP_EVENT(xfs_map_blocks_alloc);
DEFINE_IMAP_EVENT(xfs_iomap_alloc);
DEFINE_IMAP_EVENT(xfs_iomap_found);

DECLARE_EVENT_CLASS(xfs_simple_io_class,
 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
 TP_ARGS(ip, offset, count),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(loff_t, isize)
  __field(loff_t, disize)
  __field(loff_t, offset)
  __field(size_t, count)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->isize = VFS_I(ip)->i_size;
  __entry->disize = ip->i_disk_size;
  __entry->offset = offset;
  __entry->count = count;
 ),
 TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
    "pos 0x%llx bytecount 0x%zx",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->isize,
    __entry->disize,
    __entry->offset,
    __entry->count)
);

#define DEFINE_SIMPLE_IO_EVENT(name) \
DEFINE_EVENT(xfs_simple_io_class, name, \
 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
 TP_ARGS(ip, offset, count))
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
DEFINE_SIMPLE_IO_EVENT(xfs_file_splice_read);
DEFINE_SIMPLE_IO_EVENT(xfs_zoned_map_blocks);

DECLARE_EVENT_CLASS(xfs_itrunc_class,
 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
 TP_ARGS(ip, new_size),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_fsize_t, size)
  __field(xfs_fsize_t, new_size)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->size = ip->i_disk_size;
  __entry->new_size = new_size;
 ),
 TP_printk("dev %d:%d ino 0x%llx disize 0x%llx new_size 0x%llx",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->size,
    __entry->new_size)
)

#define DEFINE_ITRUNC_EVENT(name) \
DEFINE_EVENT(xfs_itrunc_class, name, \
 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
 TP_ARGS(ip, new_size))
DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start);
DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end);

TRACE_EVENT(xfs_bunmap,
 TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t fileoff, xfs_filblks_t len,
   int flags, unsigned long caller_ip),
 TP_ARGS(ip, fileoff, len, flags, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_ino_t, ino)
  __field(xfs_fsize_t, size)
  __field(xfs_fileoff_t, fileoff)
  __field(xfs_filblks_t, len)
  __field(unsigned long, caller_ip)
  __field(int, flags)
 ),
 TP_fast_assign(
  __entry->dev = VFS_I(ip)->i_sb->s_dev;
  __entry->ino = ip->i_ino;
  __entry->size = ip->i_disk_size;
  __entry->fileoff = fileoff;
  __entry->len = len;
  __entry->caller_ip = caller_ip;
  __entry->flags = flags;
 ),
 TP_printk("dev %d:%d ino 0x%llx disize 0x%llx fileoff 0x%llx fsbcount 0x%llx "
    "flags %s caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->ino,
    __entry->size,
    __entry->fileoff,
    __entry->len,
    __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
    (void *)__entry->caller_ip)

);

DECLARE_EVENT_CLASS(xfs_extent_busy_class,
 TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno,
   xfs_extlen_t len),
 TP_ARGS(xg, agbno, len),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(enum xfs_group_type, type)
  __field(xfs_agnumber_t, agno)
  __field(xfs_agblock_t, agbno)
  __field(xfs_extlen_t, len)
 ),
 TP_fast_assign(
  __entry->dev = xg->xg_mount->m_super->s_dev;
  __entry->type = xg->xg_type;
  __entry->agno = xg->xg_gno;
  __entry->agbno = agbno;
  __entry->len = len;
 ),
 TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __print_symbolic(__entry->type, XG_TYPE_STRINGS),
    __entry->agno,
    __print_symbolic(__entry->type, XG_TYPE_STRINGS),
    __entry->agbno,
    __entry->len)
);
#define DEFINE_BUSY_EVENT(name) \
DEFINE_EVENT(xfs_extent_busy_class, name, \
 TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \
   xfs_extlen_t len), \
 TP_ARGS(xg, agbno, len))
DEFINE_BUSY_EVENT(xfs_extent_busy);
DEFINE_BUSY_EVENT(xfs_extent_busy_force);
DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
DEFINE_BUSY_EVENT(xfs_extent_busy_clear);

TRACE_EVENT(xfs_extent_busy_trim,
 TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno,
   xfs_extlen_t len, xfs_agblock_t tbno, xfs_extlen_t tlen),
 TP_ARGS(xg, agbno, len, tbno, tlen),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(enum xfs_group_type, type)
  __field(xfs_agnumber_t, agno)
  __field(xfs_agblock_t, agbno)
  __field(xfs_extlen_t, len)
  __field(xfs_agblock_t, tbno)
  __field(xfs_extlen_t, tlen)
 ),
 TP_fast_assign(
  __entry->dev = xg->xg_mount->m_super->s_dev;
  __entry->type = xg->xg_type;
  __entry->agno = xg->xg_gno;
  __entry->agbno = agbno;
  __entry->len = len;
  __entry->tbno = tbno;
  __entry->tlen = tlen;
 ),
 TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __print_symbolic(__entry->type, XG_TYPE_STRINGS),
    __entry->agno,
    __print_symbolic(__entry->type, XG_TYPE_STRINGS),
    __entry->agbno,
    __entry->len,
    __entry->tbno,
    __entry->tlen)
);

#ifdef CONFIG_XFS_RT
TRACE_EVENT(xfs_rtalloc_extent_busy,
 TP_PROTO(struct xfs_rtgroup *rtg, xfs_rtxnum_t start,
   xfs_rtxlen_t minlen, xfs_rtxlen_t maxlen,
   xfs_rtxlen_t len, xfs_rtxlen_t prod, xfs_rtxnum_t rtx,
   unsigned busy_gen),
 TP_ARGS(rtg, start, minlen, maxlen, len, prod, rtx, busy_gen),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgno)
  __field(xfs_rtxnum_t, start)
  __field(xfs_rtxlen_t, minlen)
  __field(xfs_rtxlen_t, maxlen)
  __field(xfs_rtxlen_t, mod)
  __field(xfs_rtxlen_t, prod)
  __field(xfs_rtxlen_t, len)
  __field(xfs_rtxnum_t, rtx)
  __field(unsigned, busy_gen)
 ),
 TP_fast_assign(
  __entry->dev = rtg_mount(rtg)->m_super->s_dev;
  __entry->rgno = rtg_rgno(rtg);
  __entry->start = start;
  __entry->minlen = minlen;
  __entry->maxlen = maxlen;
  __entry->prod = prod;
  __entry->len = len;
  __entry->rtx = rtx;
  __entry->busy_gen = busy_gen;
 ),
 TP_printk("dev %d:%d rgno 0x%x startrtx 0x%llx minlen %u maxlen %u "
    "prod %u len %u rtx 0%llx busy_gen 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->rgno,
    __entry->start,
    __entry->minlen,
    __entry->maxlen,
    __entry->prod,
    __entry->len,
    __entry->rtx,
    __entry->busy_gen)
)

TRACE_EVENT(xfs_rtalloc_extent_busy_trim,
 TP_PROTO(struct xfs_rtgroup *rtg, xfs_rtxnum_t old_rtx,
   xfs_rtxlen_t old_len, xfs_rtxnum_t new_rtx,
   xfs_rtxlen_t new_len),
 TP_ARGS(rtg, old_rtx, old_len, new_rtx, new_len),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_rgnumber_t, rgno)
  __field(xfs_rtxnum_t, old_rtx)
  __field(xfs_rtxnum_t, new_rtx)
  __field(xfs_rtxlen_t, old_len)
  __field(xfs_rtxlen_t, new_len)
 ),
 TP_fast_assign(
  __entry->dev = rtg_mount(rtg)->m_super->s_dev;
  __entry->rgno = rtg_rgno(rtg);
  __entry->old_rtx = old_rtx;
  __entry->old_len = old_len;
  __entry->new_rtx = new_rtx;
  __entry->new_len = new_len;
 ),
 TP_printk("dev %d:%d rgno 0x%x rtx 0x%llx rtxcount 0x%x -> rtx 0x%llx rtxcount 0x%x",
    MAJOR(__entry->dev), MINOR(__entry->dev),
    __entry->rgno,
    __entry->old_rtx,
    __entry->old_len,
    __entry->new_rtx,
    __entry->new_len)
);
#endif /* CONFIG_XFS_RT */

DECLARE_EVENT_CLASS(xfs_agf_class,
 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
   unsigned long caller_ip),
 TP_ARGS(mp, agf, flags, caller_ip),
 TP_STRUCT__entry(
  __field(dev_t, dev)
  __field(xfs_agnumber_t, agno)
  __field(int, flags)
  __field(__u32, length)
  __field(__u32, bno_root)
  __field(__u32, cnt_root)
  __field(__u32, bno_level)
  __field(__u32, cnt_level)
  __field(__u32, flfirst)
  __field(__u32, fllast)
  __field(__u32, flcount)
  __field(__u32, freeblks)
  __field(__u32, longest)
  __field(unsigned long, caller_ip)
 ),
 TP_fast_assign(
  __entry->dev = mp->m_super->s_dev;
  __entry->agno = be32_to_cpu(agf->agf_seqno),
  __entry->flags = flags;
  __entry->length = be32_to_cpu(agf->agf_length),
  __entry->bno_root = be32_to_cpu(agf->agf_bno_root),
  __entry->cnt_root = be32_to_cpu(agf->agf_cnt_root),
  __entry->bno_level = be32_to_cpu(agf->agf_bno_level),
  __entry->cnt_level = be32_to_cpu(agf->agf_cnt_level),
  __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  __entry->fllast = be32_to_cpu(agf->agf_fllast),
  __entry->flcount = be32_to_cpu(agf->agf_flcount),
  __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  __entry->longest = be32_to_cpu(agf->agf_longest);
  __entry->caller_ip = caller_ip;
 ),
 TP_printk("dev %d:%d agno 0x%x flags %s length %u roots b %u c %u "
    "levels b %u c %u flfirst %u fllast %u flcount %u "
    "freeblks %u longest %u caller %pS",
    MAJOR(__entry->dev), MINOR(__entry->dev),
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=100 H=100 G=100

¤ Dauer der Verarbeitung: 0.14 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.