@@ -530,6 +530,7 @@ xfs_iwalk_threaded(
xfs_ino_t startino,
xfs_iwalk_fn iwalk_fn,
unsigned int max_prefetch,
+ bool polled,
void *data)
{
struct xfs_pwork_ctl pctl;
@@ -560,5 +561,7 @@ xfs_iwalk_threaded(
startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
}
+ if (polled)
+ return xfs_pwork_destroy_poll(&pctl);
return xfs_pwork_destroy(&pctl);
}
@@ -15,6 +15,7 @@ typedef int (*xfs_iwalk_fn)(struct xfs_mount *mp, struct xfs_trans *tp,
int xfs_iwalk(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t startino,
xfs_iwalk_fn iwalk_fn, unsigned int max_prefetch, void *data);
int xfs_iwalk_threaded(struct xfs_mount *mp, xfs_ino_t startino,
- xfs_iwalk_fn iwalk_fn, unsigned int max_prefetch, void *data);
+ xfs_iwalk_fn iwalk_fn, unsigned int max_prefetch, bool poll,
+ void *data);
#endif /* __XFS_IWALK_H__ */
@@ -13,6 +13,7 @@
#include "xfs_trace.h"
#include "xfs_sysctl.h"
#include "xfs_pwork.h"
+#include <linux/nmi.h>
/*
* Parallel Work Queue
@@ -40,6 +41,7 @@ xfs_pwork_work(
error = pctl->work_fn(pctl->mp, pwork);
if (error && !pctl->error)
pctl->error = error;
+ atomic_dec(&pctl->nr_work);
}
/*
@@ -68,6 +70,7 @@ xfs_pwork_init(
pctl->work_fn = work_fn;
pctl->error = 0;
pctl->mp = mp;
+ atomic_set(&pctl->nr_work, 0);
return 0;
}
@@ -80,6 +83,7 @@ xfs_pwork_queue(
{
INIT_WORK(&pwork->work, xfs_pwork_work);
pwork->pctl = pctl;
+ atomic_inc(&pctl->nr_work);
queue_work(pctl->wq, &pwork->work);
}
@@ -93,6 +97,23 @@ xfs_pwork_destroy(
return pctl->error;
}
+/*
+ * Wait for the work to finish and tear down the control structure.
+ * Continually poll completion status and touch the soft lockup watchdog.
+ * This is for things like mount that hold locks.
+ */
+int
+xfs_pwork_destroy_poll(
+ struct xfs_pwork_ctl *pctl)
+{
+ while (atomic_read(&pctl->nr_work) > 0) {
+ msleep(1);
+ touch_softlockup_watchdog();
+ }
+
+ return xfs_pwork_destroy(pctl);
+}
+
/*
* Return the amount of parallelism that the data device can handle, or 0 for
* no limit.
@@ -18,6 +18,7 @@ struct xfs_pwork_ctl {
struct workqueue_struct *wq;
struct xfs_mount *mp;
xfs_pwork_work_fn work_fn;
+ atomic_t nr_work;
int error;
};
@@ -45,6 +46,7 @@ int xfs_pwork_init(struct xfs_mount *mp, struct xfs_pwork_ctl *pctl,
unsigned int nr_threads);
void xfs_pwork_queue(struct xfs_pwork_ctl *pctl, struct xfs_pwork *pwork);
int xfs_pwork_destroy(struct xfs_pwork_ctl *pctl);
+int xfs_pwork_destroy_poll(struct xfs_pwork_ctl *pctl);
unsigned int xfs_pwork_guess_datadev_parallelism(struct xfs_mount *mp);
#endif /* __XFS_PWORK_H__ */
@@ -1305,7 +1305,7 @@ xfs_qm_quotacheck(
flags |= XFS_PQUOTA_CHKD;
}
- error = xfs_iwalk_threaded(mp, 0, xfs_qm_dqusage_adjust, 0, NULL);
+ error = xfs_iwalk_threaded(mp, 0, xfs_qm_dqusage_adjust, 0, true, NULL);
if (error)
goto error_return;