@@ -618,6 +618,8 @@ xfs_defer_finish_one(
/* Done with the dfp, free it. */
list_del(&dfp->dfp_list);
kmem_cache_free(xfs_defer_pending_cache, dfp);
+ tp->t_dfops_nr--;
+ tp->t_dfops_finished++;
out:
if (ops->finish_cleanup)
ops->finish_cleanup(tp, state, error);
@@ -680,6 +682,9 @@ xfs_defer_finish_noroll(
list_splice_init(&(*tp)->t_dfops, &dop_pending);
+ (*tp)->t_dfops_nr_max = max((*tp)->t_dfops_nr,
+ (*tp)->t_dfops_nr_max);
+
if (has_intents < 0) {
error = has_intents;
goto out_shutdown;
@@ -721,6 +726,7 @@ xfs_defer_finish_noroll(
xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
trace_xfs_defer_finish_error(*tp, error);
xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
+ (*tp)->t_dfops_nr = 0;
xfs_defer_cancel(*tp);
return error;
}
@@ -768,6 +774,7 @@ xfs_defer_cancel(
trace_xfs_defer_cancel(tp, _RET_IP_);
xfs_defer_trans_abort(tp, &tp->t_dfops);
xfs_defer_cancel_list(mp, &tp->t_dfops);
+ tp->t_dfops_nr = 0;
}
/*
@@ -853,8 +860,10 @@ xfs_defer_add(
}
dfp = xfs_defer_find_last(tp, ops);
- if (!dfp || !xfs_defer_can_append(dfp, ops))
+ if (!dfp || !xfs_defer_can_append(dfp, ops)) {
dfp = xfs_defer_alloc(&tp->t_dfops, ops);
+ tp->t_dfops_nr++;
+ }
xfs_defer_add_item(dfp, li);
trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
@@ -879,6 +888,7 @@ xfs_defer_add_barrier(
return;
xfs_defer_alloc(&tp->t_dfops, &xfs_barrier_defer_type);
+ tp->t_dfops_nr++;
trace_xfs_defer_add_item(tp->t_mountp, dfp, NULL);
}
@@ -939,6 +949,12 @@ xfs_defer_move(
struct xfs_trans *stp)
{
list_splice_init(&stp->t_dfops, &dtp->t_dfops);
+ dtp->t_dfops_nr += stp->t_dfops_nr;
+ dtp->t_dfops_nr_max = stp->t_dfops_nr_max;
+ dtp->t_dfops_finished = stp->t_dfops_finished;
+ stp->t_dfops_nr = 0;
+ stp->t_dfops_nr_max = 0;
+ stp->t_dfops_finished = 0;
/*
* Low free space mode was historically controlled by a dfops field.
@@ -2880,6 +2880,25 @@ TRACE_EVENT(xfs_btree_free_block,
/* deferred ops */
struct xfs_defer_pending;
+TRACE_EVENT(xfs_defer_stats,
+ TP_PROTO(struct xfs_trans *tp),
+ TP_ARGS(tp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, max)
+ __field(unsigned int, finished)
+ ),
+ TP_fast_assign(
+ __entry->dev = tp->t_mountp->m_super->s_dev;
+ __entry->max = tp->t_dfops_nr_max;
+ __entry->finished = tp->t_dfops_finished;
+ ),
+ TP_printk("dev %d:%d max %u finished %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->max,
+ __entry->finished)
+)
+
DECLARE_EVENT_CLASS(xfs_defer_class,
TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
TP_ARGS(tp, caller_ip),
@@ -71,6 +71,9 @@ xfs_trans_free(
xfs_extent_busy_sort(&tp->t_busy);
xfs_extent_busy_clear(&tp->t_busy, false);
+ if (tp->t_dfops_finished > 0)
+ trace_xfs_defer_stats(tp);
+
trace_xfs_trans_free(tp, _RET_IP_);
xfs_trans_clear_context(tp);
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
@@ -153,6 +153,13 @@ typedef struct xfs_trans {
struct list_head t_busy; /* list of busy extents */
struct list_head t_dfops; /* deferred operations */
unsigned long t_pflags; /* saved process flags state */
+
+ /* Count of deferred ops attached to transaction. */
+ unsigned int t_dfops_nr;
+ /* Maximum t_dfops_nr seen in a loop. */
+ unsigned int t_dfops_nr_max;
+ /* Number of dfops finished. */
+ unsigned int t_dfops_finished;
} xfs_trans_t;
/*