@@ -173,6 +173,7 @@ __xfs_free_perag(
struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
+ ASSERT(!delayed_work_pending(&pag->pag_inodegc_work));
ASSERT(atomic_read(&pag->pag_ref) == 0);
kmem_free(pag);
}
@@ -195,6 +196,7 @@ xfs_free_perag(
ASSERT(atomic_read(&pag->pag_ref) == 0);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
+ cancel_delayed_work_sync(&pag->pag_inodegc_work);
xfs_iunlink_destroy(pag);
xfs_buf_hash_destroy(pag);
@@ -253,6 +255,7 @@ xfs_initialize_perag(
spin_lock_init(&pag->pagb_lock);
spin_lock_init(&pag->pag_state_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
+ INIT_DELAYED_WORK(&pag->pag_inodegc_work, xfs_inodegc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
init_waitqueue_head(&pag->pagb_wait);
pag->pagb_count = 0;
@@ -96,6 +96,9 @@ struct xfs_perag {
/* background prealloc block trimming */
struct delayed_work pag_blockgc_work;
+ /* background inode inactivation */
+ struct delayed_work pag_inodegc_work;
+
/*
* Unlinked inode information. This incore information reflects
* data stored in the AGI, so callers must hold the AGI buffer lock
@@ -240,14 +240,16 @@ xfs_inodegc_running(struct xfs_mount *mp)
/* Queue a new inode gc pass if there are inodes needing inactivation. */
static void
xfs_inodegc_queue(
- struct xfs_mount *mp)
+ struct xfs_perag *pag)
{
+ struct xfs_mount *mp = pag->pag_mount;
+
if (!xfs_inodegc_running(mp))
return;
rcu_read_lock();
if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_INODEGC_TAG))
- queue_delayed_work(mp->m_gc_workqueue, &mp->m_inodegc_work,
+ queue_delayed_work(mp->m_gc_workqueue, &pag->pag_inodegc_work,
msecs_to_jiffies(xfs_inodegc_centisecs * 10));
rcu_read_unlock();
}
@@ -287,7 +289,7 @@ xfs_perag_set_inode_tag(
xfs_blockgc_queue(pag);
break;
case XFS_ICI_INODEGC_TAG:
- xfs_inodegc_queue(mp);
+ xfs_inodegc_queue(pag);
break;
}
@@ -1915,8 +1917,9 @@ void
xfs_inodegc_worker(
struct work_struct *work)
{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_inodegc_work);
+ struct xfs_perag *pag = container_of(to_delayed_work(work),
+ struct xfs_perag, pag_inodegc_work);
+ struct xfs_mount *mp = pag->pag_mount;
int error;
/*
@@ -1927,24 +1930,33 @@ xfs_inodegc_worker(
if (!xfs_inodegc_running(mp))
return;
- error = xfs_inodegc_free_space(mp, NULL);
+ error = xfs_icwalk_ag(pag, XFS_ICWALK_INODEGC, NULL);
if (error && error != -EAGAIN)
xfs_err(mp, "inode inactivation failed, error %d", error);
- xfs_inodegc_queue(mp);
+ xfs_inodegc_queue(pag);
}
-/* Force all currently queued inode inactivation work to run immediately. */
+/* Force all queued inode inactivation work to run immediately. */
void
xfs_inodegc_flush(
struct xfs_mount *mp)
{
- if (!xfs_inodegc_running(mp) ||
- !radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_INODEGC_TAG))
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+ bool queued = false;
+
+ if (!xfs_inodegc_running(mp))
+ return;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INODEGC_TAG) {
+ mod_delayed_work(mp->m_gc_workqueue, &pag->pag_inodegc_work, 0);
+ queued = true;
+ }
+ if (!queued)
return;
- mod_delayed_work(mp->m_gc_workqueue, &mp->m_inodegc_work, 0);
- flush_delayed_work(&mp->m_inodegc_work);
+ flush_workqueue(mp->m_gc_workqueue);
}
/* Stop all queued inactivation work. */
@@ -1952,8 +1964,12 @@ void
xfs_inodegc_stop(
struct xfs_mount *mp)
{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
clear_bit(XFS_OPFLAG_INODEGC_RUNNING_BIT, &mp->m_opflags);
- cancel_delayed_work_sync(&mp->m_inodegc_work);
+ for_each_perag(mp, agno, pag)
+ cancel_delayed_work_sync(&pag->pag_inodegc_work);
}
/* Schedule deferred inode inactivation work. */
@@ -1961,8 +1977,12 @@ void
xfs_inodegc_start(
struct xfs_mount *mp)
{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
set_bit(XFS_OPFLAG_INODEGC_RUNNING_BIT, &mp->m_opflags);
- xfs_inodegc_queue(mp);
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INODEGC_TAG)
+ xfs_inodegc_queue(pag);
}
/* Are there files waiting for inactivation? */
@@ -186,7 +186,6 @@ typedef struct xfs_mount {
uint64_t m_resblks_avail;/* available reserved blocks */
uint64_t m_resblks_save; /* reserved blks @ remount,ro */
struct delayed_work m_reclaim_work; /* background inode reclaim */
- struct delayed_work m_inodegc_work; /* background inode inactive */
struct xfs_kobj m_kobj;
struct xfs_kobj m_error_kobj;
struct xfs_kobj m_error_meta_kobj;
@@ -1955,7 +1955,6 @@ static int xfs_init_fs_context(
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
- INIT_DELAYED_WORK(&mp->m_inodegc_work, xfs_inodegc_worker);
mp->m_kobj.kobject.kset = xfs_kset;
/*
* We don't create the finobt per-ag space reservation until after log