@@ -564,3 +564,63 @@ const struct xfs_buf_ops xfs_rtsb_buf_ops = {
.verify_write = xfs_rtsb_write_verify,
.verify_struct = xfs_rtsb_verify_all,
};
+
+/* Update a realtime superblock from the primary fs super */
+void
+xfs_update_rtsb(
+ struct xfs_buf *rtsb_bp,
+ const struct xfs_buf *sb_bp)
+{
+ const struct xfs_dsb *dsb = sb_bp->b_addr;
+ struct xfs_rtsb *rsb = rtsb_bp->b_addr;
+ const uuid_t *meta_uuid;
+
+ rsb->rsb_magicnum = cpu_to_be32(XFS_RTSB_MAGIC);
+
+ rsb->rsb_pad = 0;
+ memcpy(&rsb->rsb_fname, &dsb->sb_fname, XFSLABEL_MAX);
+
+ memcpy(&rsb->rsb_uuid, &dsb->sb_uuid, sizeof(rsb->rsb_uuid));
+
+ /*
+ * The metadata uuid is the fs uuid if the metauuid feature is not
+ * enabled.
+ */
+ if (dsb->sb_features_incompat &
+ cpu_to_be32(XFS_SB_FEAT_INCOMPAT_META_UUID))
+ meta_uuid = &dsb->sb_meta_uuid;
+ else
+ meta_uuid = &dsb->sb_uuid;
+ memcpy(&rsb->rsb_meta_uuid, meta_uuid, sizeof(rsb->rsb_meta_uuid));
+}
+
+/*
+ * Update the realtime superblock from a filesystem superblock and log it to
+ * the given transaction.
+ */
+struct xfs_buf *
+xfs_log_rtsb(
+ struct xfs_trans *tp,
+ const struct xfs_buf *sb_bp)
+{
+ struct xfs_buf *rtsb_bp;
+
+ if (!xfs_has_rtsb(tp->t_mountp))
+ return NULL;
+
+ rtsb_bp = xfs_trans_getrtsb(tp);
+ if (!rtsb_bp) {
+ /*
+ * It's possible for the rtgroups feature to be enabled but
+ * there is no incore rt superblock buffer if the rt geometry
+ * was specified at mkfs time but the rt section has not yet
+ * been attached. In this case, rblocks must be zero.
+ */
+ ASSERT(tp->t_mountp->m_sb.sb_rblocks == 0);
+ return NULL;
+ }
+
+ xfs_update_rtsb(rtsb_bp, sb_bp);
+ xfs_trans_ordered_buf(tp, rtsb_bp);
+ return rtsb_bp;
+}
@@ -251,6 +251,11 @@ static inline const char *xfs_rtginode_path(xfs_rgnumber_t rgno,
{
return kasprintf(GFP_KERNEL, "%u.%s", rgno, xfs_rtginode_name(type));
}
+
+void xfs_update_rtsb(struct xfs_buf *rtsb_bp,
+ const struct xfs_buf *sb_bp);
+struct xfs_buf *xfs_log_rtsb(struct xfs_trans *tp,
+ const struct xfs_buf *sb_bp);
#else
static inline void xfs_free_rtgroups(struct xfs_mount *mp,
xfs_rgnumber_t first_rgno, xfs_rgnumber_t end_rgno)
@@ -269,6 +274,8 @@ static inline int xfs_initialize_rtgroups(struct xfs_mount *mp,
# define xfs_rtgroup_lock(rtg, gf) ((void)0)
# define xfs_rtgroup_unlock(rtg, gf) ((void)0)
# define xfs_rtgroup_trans_join(tp, rtg, gf) ((void)0)
+# define xfs_update_rtsb(bp, sb_bp) ((void)0)
+# define xfs_log_rtsb(tp, sb_bp) (NULL)
#endif /* CONFIG_XFS_RT */
#endif /* __LIBXFS_RTGROUP_H */
@@ -27,6 +27,7 @@
#include "xfs_ag.h"
#include "xfs_rtbitmap.h"
#include "xfs_exchrange.h"
+#include "xfs_rtgroup.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -1276,10 +1277,12 @@ xfs_update_secondary_sbs(
*/
int
xfs_sync_sb_buf(
- struct xfs_mount *mp)
+ struct xfs_mount *mp,
+ bool update_rtsb)
{
struct xfs_trans *tp;
struct xfs_buf *bp;
+ struct xfs_buf *rtsb_bp = NULL;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp);
@@ -1289,6 +1292,11 @@ xfs_sync_sb_buf(
bp = xfs_trans_getsb(tp);
xfs_log_sb(tp);
xfs_trans_bhold(tp, bp);
+ if (update_rtsb) {
+ rtsb_bp = xfs_log_rtsb(tp, bp);
+ if (rtsb_bp)
+ xfs_trans_bhold(tp, rtsb_bp);
+ }
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp);
if (error)
@@ -1297,7 +1305,11 @@ xfs_sync_sb_buf(
* write out the sb buffer to get the changes to disk
*/
error = xfs_bwrite(bp);
+ if (!error && rtsb_bp)
+ error = xfs_bwrite(rtsb_bp);
out:
+ if (rtsb_bp)
+ xfs_buf_relse(rtsb_bp);
xfs_buf_relse(bp);
return error;
}
@@ -15,7 +15,7 @@ struct xfs_perag;
extern void xfs_log_sb(struct xfs_trans *tp);
extern int xfs_sync_sb(struct xfs_mount *mp, bool wait);
-extern int xfs_sync_sb_buf(struct xfs_mount *mp);
+extern int xfs_sync_sb_buf(struct xfs_mount *mp, bool update_rtsb);
extern void xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
void xfs_mount_sb_set_rextsize(struct xfs_mount *mp,
struct xfs_sb *sbp);
@@ -1061,6 +1061,18 @@ xlog_recover_buf_commit_pass2(
current_lsn);
if (error)
goto out_release;
+
+ /* Update the rt superblock if we have one. */
+ if (xfs_has_rtsb(mp) && mp->m_rtsb_bp) {
+ struct xfs_buf *rtsb_bp = mp->m_rtsb_bp;
+
+ xfs_buf_lock(rtsb_bp);
+ xfs_buf_hold(rtsb_bp);
+ xfs_update_rtsb(rtsb_bp, bp);
+ rtsb_bp->b_flags |= _XBF_LOGRECOVERY;
+ xfs_buf_delwri_queue(rtsb_bp, buffer_list);
+ xfs_buf_relse(rtsb_bp);
+ }
} else {
xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
}
@@ -1028,7 +1028,7 @@ xfs_ioc_setlabel(
* buffered reads from userspace (i.e. from blkid) are invalidated,
* and userspace will see the newly-written label.
*/
- error = xfs_sync_sb_buf(mp);
+ error = xfs_sync_sb_buf(mp, true);
if (error)
goto out;
/*
@@ -1039,6 +1039,8 @@ xfs_ioc_setlabel(
mutex_unlock(&mp->m_growlock);
invalidate_bdev(mp->m_ddev_targp->bt_bdev);
+ if (xfs_has_rtsb(mp) && mp->m_rtdev_targp)
+ invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
out:
mnt_drop_write_file(filp);
@@ -25,6 +25,7 @@
#include "xfs_dquot.h"
#include "xfs_icache.h"
#include "xfs_rtbitmap.h"
+#include "xfs_rtgroup.h"
struct kmem_cache *xfs_trans_cache;
@@ -214,6 +214,7 @@ xfs_trans_read_buf(
}
struct xfs_buf *xfs_trans_getsb(struct xfs_trans *);
+struct xfs_buf *xfs_trans_getrtsb(struct xfs_trans *tp);
void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *);
@@ -168,12 +168,11 @@ xfs_trans_get_buf_map(
/*
* Get and lock the superblock buffer for the given transaction.
*/
-struct xfs_buf *
-xfs_trans_getsb(
- struct xfs_trans *tp)
+static struct xfs_buf *
+__xfs_trans_getsb(
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
- struct xfs_buf *bp = tp->t_mountp->m_sb_bp;
-
/*
* Just increment the lock recursion count if the buffer is already
* attached to this transaction.
@@ -197,6 +196,22 @@ xfs_trans_getsb(
return bp;
}
+struct xfs_buf *
+xfs_trans_getsb(
+ struct xfs_trans *tp)
+{
+ return __xfs_trans_getsb(tp, tp->t_mountp->m_sb_bp);
+}
+
+struct xfs_buf *
+xfs_trans_getrtsb(
+ struct xfs_trans *tp)
+{
+ if (!tp->t_mountp->m_rtsb_bp)
+ return NULL;
+ return __xfs_trans_getsb(tp, tp->t_mountp->m_rtsb_bp);
+}
+
/*
* Get and lock the buffer for the caller if it is not already
* locked within the given transaction. If it has not yet been