@@ -8,19 +8,32 @@ static int truncate(struct inode *inode, loff_t newsize)
unsigned long free_block;
struct guestmemfs_inode *guestmemfs_inode;
unsigned long *mappings;
+ int rc = 0;
+ struct guestmemfs_sb *psb = GUESTMEMFS_PSB(inode->i_sb);
+
+ spin_lock(&psb->allocation_lock);
+
+ if (psb->serialised) {
+ rc = -EBUSY;
+ goto out;
+ }
guestmemfs_inode = guestmemfs_get_persisted_inode(inode->i_sb, inode->i_ino);
mappings = guestmemfs_inode->mappings;
i_size_write(inode, newsize);
for (int block_idx = 0; block_idx * PMD_SIZE < newsize; ++block_idx) {
free_block = guestmemfs_alloc_block(inode->i_sb);
- if (free_block < 0)
+ if (free_block < 0) {
/* TODO: roll back allocations. */
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
+ }
*(mappings + block_idx) = free_block;
++guestmemfs_inode->num_mappings;
}
- return 0;
+out:
+ spin_unlock(&psb->allocation_lock);
+ return rc;
}
static int inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr)
@@ -42,6 +42,7 @@ static int guestmemfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (GUESTMEMFS_PSB(sb)) {
pr_info("Restored super block from KHO\n");
+ GUESTMEMFS_PSB(sb)->serialised = 0;
} else {
struct guestmemfs_sb *psb;
@@ -21,6 +21,7 @@ struct guestmemfs_sb {
struct guestmemfs_inode *inodes;
void *allocator_bitmap;
spinlock_t allocation_lock;
+ bool serialised;
};
// If neither of these are set the inode is not in use.
@@ -48,6 +48,12 @@ static unsigned long guestmemfs_allocate_inode(struct super_block *sb)
struct guestmemfs_sb *psb = GUESTMEMFS_PSB(sb);
spin_lock(&psb->allocation_lock);
+
+ if (psb->serialised) {
+ spin_unlock(&psb->allocation_lock);
+ return -EBUSY;
+ }
+
next_free_ino = psb->next_free_ino;
psb->allocated_inodes += 1;
if (!next_free_ino)
@@ -111,7 +111,7 @@ int guestmemfs_serialise_to_kho(struct notifier_block *self,
switch (cmd) {
case KEXEC_KHO_ABORT:
- /* No rollback action needed. */
+ GUESTMEMFS_PSB(guestmemfs_sb)->serialised = 0;
return NOTIFY_DONE;
case KEXEC_KHO_DUMP:
/* Handled below */
@@ -120,6 +120,7 @@ int guestmemfs_serialise_to_kho(struct notifier_block *self,
return NOTIFY_BAD;
}
+ spin_lock(&GUESTMEMFS_PSB(guestmemfs_sb)->allocation_lock);
err |= fdt_begin_node(fdt, "guestmemfs");
err |= fdt_property(fdt, "compatible", compatible, sizeof(compatible));
@@ -134,6 +135,11 @@ int guestmemfs_serialise_to_kho(struct notifier_block *self,
err |= fdt_end_node(fdt);
+ if (!err)
+ GUESTMEMFS_PSB(guestmemfs_sb)->serialised = 1;
+
+ spin_unlock(&GUESTMEMFS_PSB(guestmemfs_sb)->allocation_lock);
+
pr_info("Serialised extends [0x%llx + 0x%llx] via KHO: %i\n",
guestmemfs_base, guestmemfs_size, err);
Once the memory regions for inodes, mappings and allocations have been serialised, further modifications would break the serialised data; it would no longer be valid. Return an error code if attempting to create new files or allocate data for files once serialised. Signed-off-by: James Gowans <jgowans@amazon.com> --- fs/guestmemfs/file.c | 19 ++++++++++++++++--- fs/guestmemfs/guestmemfs.c | 1 + fs/guestmemfs/guestmemfs.h | 1 + fs/guestmemfs/inode.c | 6 ++++++ fs/guestmemfs/serialise.c | 8 +++++++- 5 files changed, 31 insertions(+), 4 deletions(-)