@@ -438,10 +438,6 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned long freed = 0;
- /* We might recurse into filesystem code, so bail out if necessary */
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
if (!mutex_trylock(&ashmem_mutex))
return -1;
@@ -478,6 +474,10 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
+ /* We might recurse into filesystem code, so bail out if necessary */
+ if (!(sc->gfp_mask & __GFP_FS))
+ sc->defer_work = true;
+
/*
* note that lru_count is count of pages on the lru, not a count of
* objects on the list. This means the scan function needs to return the
@@ -1614,14 +1614,15 @@ static long gfs2_scan_glock_lru(int nr)
static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
return gfs2_scan_glock_lru(sc->nr_to_scan);
}
static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
+ if (!(sc->gfp_mask & __GFP_FS))
+ sc->defer_work = true;
+
return vfs_pressure_ratio(atomic_read(&lru_count));
}
@@ -166,9 +166,6 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
LIST_HEAD(dispose);
unsigned long freed;
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
gfs2_qd_isolate, &dispose);
@@ -180,6 +177,9 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
+ if (!(sc->gfp_mask & __GFP_FS))
+ sc->defer_work = true;
+
return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
}
@@ -2211,10 +2211,7 @@ unsigned long
nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
int nr_to_scan = sc->nr_to_scan;
- gfp_t gfp_mask = sc->gfp_mask;
- if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
- return SHRINK_STOP;
return nfs_do_access_cache_scan(nr_to_scan);
}
@@ -2222,6 +2219,9 @@ nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long
nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{
+ if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL)
+ sc->defer_work = true;
+
return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
}
@@ -74,9 +74,6 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
* Deadlock avoidance. We may hold various FS locks, and we don't want
* to recurse into the FS that called us in clear_inode() and friends..
*/
- if (!(sc->gfp_mask & __GFP_FS))
- return SHRINK_STOP;
-
if (!trylock_super(sb))
return SHRINK_STOP;
@@ -141,6 +138,9 @@ static unsigned long super_cache_count(struct shrinker *shrink,
return 0;
smp_rmb();
+ if (!(sc->gfp_mask & __GFP_FS))
+ sc->defer_work = true;
+
if (sb->s_op && sb->s_op->nr_cached_objects)
total_objects = sb->s_op->nr_cached_objects(sb, sc);
@@ -502,9 +502,6 @@ xfs_qm_shrink_scan(
unsigned long freed;
int error;
- if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
- return 0;
-
INIT_LIST_HEAD(&isol.buffers);
INIT_LIST_HEAD(&isol.dispose);
@@ -534,6 +531,14 @@ xfs_qm_shrink_count(
struct xfs_quotainfo *qi = container_of(shrink,
struct xfs_quotainfo, qi_shrinker);
+ /*
+ * __GFP_DIRECT_RECLAIM is used here to avoid blocking kswapd
+ */
+ if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) !=
+ (__GFP_FS|__GFP_DIRECT_RECLAIM)) {
+ sc->defer_work = true;
+ }
+
return list_lru_shrink_count(&qi->qi_lru, sc);
}
@@ -527,9 +527,6 @@ static unsigned long
rpcauth_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL)
- return SHRINK_STOP;
-
/* nothing left, don't come back */
if (list_empty(&cred_unused))
return SHRINK_STOP;
@@ -541,6 +538,8 @@ static unsigned long
rpcauth_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
+ if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL)
+ sc->defer_work = true;
return number_cred_unused * sysctl_vfs_cache_pressure / 100;
}