@@ -279,6 +279,47 @@ xfs_gc_delay_dquot(
return delay_ms;
}
+/*
+ * Scale down the background work delay if we're low on free rt extents.
+ * Return value is in ms.
+ */
+static inline unsigned int
+xfs_gc_delay_freertx(
+ struct xfs_mount *mp,
+ struct xfs_inode *ip,
+ unsigned int tag,
+ unsigned int delay_ms)
+{
+ int64_t freertx;
+ unsigned int shift = 0;
+
+ if (ip && !XFS_IS_REALTIME_INODE(ip))
+ return delay_ms;
+ if (!xfs_sb_version_hasrealtime(&mp->m_sb))
+ return delay_ms;
+
+ spin_lock(&mp->m_sb_lock);
+ freertx = mp->m_sb.sb_rextents;
+ spin_unlock(&mp->m_sb_lock);
+
+ if (freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT]) {
+ shift = 2;
+ if (freertx < mp->m_low_rtexts[XFS_LOWSP_4_PCNT])
+ shift++;
+ if (freertx < mp->m_low_rtexts[XFS_LOWSP_3_PCNT])
+ shift++;
+ if (freertx < mp->m_low_rtexts[XFS_LOWSP_2_PCNT])
+ shift++;
+ if (freertx < mp->m_low_rtexts[XFS_LOWSP_1_PCNT])
+ shift++;
+ }
+
+ if (shift)
+ trace_xfs_gc_delay_frextents(mp, tag, shift);
+
+ return delay_ms >> shift;
+}
+
/*
* Scale down the background work delay if we're low on free space. Similar to
* the way that we throttle preallocations, we halve the delay time for every
@@ -324,7 +365,7 @@ xfs_gc_delay_ms(
unsigned int tag)
{
unsigned int default_ms;
- unsigned int udelay, gdelay, pdelay, fdelay;
+ unsigned int udelay, gdelay, pdelay, fdelay, rdelay;
switch (tag) {
case XFS_ICI_INODEGC_TAG:
@@ -346,8 +387,14 @@ xfs_gc_delay_ms(
gdelay = xfs_gc_delay_dquot(ip, XFS_DQTYPE_GROUP, tag, default_ms);
pdelay = xfs_gc_delay_dquot(ip, XFS_DQTYPE_PROJ, tag, default_ms);
fdelay = xfs_gc_delay_freesp(mp, tag, default_ms);
+ rdelay = xfs_gc_delay_freertx(mp, ip, tag, default_ms);
- return min(min(udelay, gdelay), min(pdelay, fdelay));
+ udelay = min(udelay, gdelay);
+ pdelay = min(pdelay, fdelay);
+
+ udelay = min(udelay, pdelay);
+
+ return min(udelay, rdelay);
}
/*
@@ -365,13 +365,16 @@ void
xfs_set_low_space_thresholds(
struct xfs_mount *mp)
{
- int i;
+ uint64_t dblocks = mp->m_sb.sb_dblocks;
+ uint64_t rtexts = mp->m_sb.sb_rextents;
+ int i;
+
+ do_div(dblocks, 100);
+ do_div(rtexts, 100);
for (i = 0; i < XFS_LOWSP_MAX; i++) {
- uint64_t space = mp->m_sb.sb_dblocks;
-
- do_div(space, 100);
- mp->m_low_space[i] = space * (i + 1);
+ mp->m_low_space[i] = dblocks * (i + 1);
+ mp->m_low_rtexts[i] = rtexts * (i + 1);
}
}
@@ -133,6 +133,7 @@ typedef struct xfs_mount {
uint m_qflags; /* quota status flags */
uint64_t m_flags; /* global mount flags */
int64_t m_low_space[XFS_LOWSP_MAX];
+ int64_t m_low_rtexts[XFS_LOWSP_MAX];
struct xfs_ino_geometry m_ino_geo; /* inode geometry */
struct xfs_trans_resv m_resv; /* precomputed res values */
/* low free space thresholds */
@@ -269,6 +269,28 @@ TRACE_EVENT(xfs_gc_delay_fdblocks,
__entry->fdblocks)
);
+TRACE_EVENT(xfs_gc_delay_frextents,
+ TP_PROTO(struct xfs_mount *mp, unsigned int tag, unsigned int shift),
+ TP_ARGS(mp, tag, shift),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long long, frextents)
+ __field(unsigned int, tag)
+ __field(unsigned int, shift)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->frextents = mp->m_sb.sb_frextents;
+ __entry->tag = tag;
+ __entry->shift = shift;
+ ),
+ TP_printk("dev %d:%d tag %u shift %u frextents %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->tag,
+ __entry->shift,
+ __entry->frextents)
+);
+
DECLARE_EVENT_CLASS(xfs_gc_queue_class,
TP_PROTO(struct xfs_mount *mp, unsigned int delay_ms),
TP_ARGS(mp, delay_ms),