@@ -44,6 +44,9 @@ struct shmem_sb_info {
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
unsigned long shrinklist_len; /* Length of shrinklist */
+
+ unsigned long acct_errors;
+ unsigned long space_errors;
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
@@ -212,8 +212,10 @@ static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (shmem_acct_block(info->flags, pages))
+ if (shmem_acct_block(info->flags, pages)) {
+ sbinfo->acct_errors += 1;
return false;
+ }
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
@@ -225,6 +227,7 @@ static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
return true;
unacct:
+ sbinfo->space_errors += 1;
shmem_unacct_blocks(info->flags, pages);
return false;
}
Keep a per-sb counter of failed shmem allocations for ENOMEM/ENOSPC to be reported on sysfs. The sysfs support is done separately on a later patch. Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com> --- include/linux/shmem_fs.h | 3 +++ mm/shmem.c | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-)