@@ -2904,7 +2904,8 @@ STATIC int
xlog_valid_rec_header(
struct xlog *log,
struct xlog_rec_header *rhead,
- xfs_daddr_t blkno)
+ xfs_daddr_t blkno,
+ int hsize)
{
int hlen;
@@ -2920,10 +2921,39 @@ xlog_valid_rec_header(
return -EFSCORRUPTED;
}
- /* LR body must have data or it wouldn't have been written */
+ /*
+ * LR body must have data (or it wouldn't have been written) and
+ * h_len must not be greater than h_size with one exception.
+ *
+ * That is that xfsprogs has a bug where record length is based on
+ * lsunit but h_size (iclog size) is hardcoded to 32k. This means
+ * the log buffer allocated can be too small for the record to
+ * cause an overrun.
+ *
+ * Detect this condition here. Use lsunit for the buffer size as
+ * long as this looks like the mkfs case. Otherwise, return an
+ * error to avoid a buffer overrun.
+ */
hlen = be32_to_cpu(rhead->h_len);
- if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
+ if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0))
return -EFSCORRUPTED;
+
+ if (hsize && XFS_IS_CORRUPT(log->l_mp,
+ hsize < be32_to_cpu(rhead->h_size)))
+ return -EFSCORRUPTED;
+ hsize = be32_to_cpu(rhead->h_size);
+
+ if (unlikely(hlen >= hsize)) {
+ if (XFS_IS_CORRUPT(log->l_mp, hlen > log->l_mp->m_logbsize ||
+ rhead->h_num_logops != cpu_to_be32(1)))
+ return -EFSCORRUPTED;
+
+ xfs_warn(log->l_mp,
+ "invalid iclog size (%d bytes), using lsunit (%d bytes)",
+ hsize, log->l_mp->m_logbsize);
+ rhead->h_size = cpu_to_be32(log->l_mp->m_logbsize);
+ }
+
if (XFS_IS_CORRUPT(log->l_mp,
blkno > log->l_logBBsize || blkno > INT_MAX))
return -EFSCORRUPTED;
@@ -2951,7 +2981,7 @@ xlog_do_recovery_pass(
xfs_daddr_t rhead_blk;
char *offset;
char *hbp, *dbp;
- int error = 0, h_size, h_len;
+ int error = 0, h_size;
int error2 = 0;
int bblks, split_bblks;
int hblks, split_hblks, wrapped_hblks;
@@ -2984,37 +3014,11 @@ xlog_do_recovery_pass(
goto bread_err1;
rhead = (xlog_rec_header_t *)offset;
- error = xlog_valid_rec_header(log, rhead, tail_blk);
+ error = xlog_valid_rec_header(log, rhead, tail_blk, 0);
if (error)
goto bread_err1;
- /*
- * xfsprogs has a bug where record length is based on lsunit but
- * h_size (iclog size) is hardcoded to 32k. Now that we
- * unconditionally CRC verify the unmount record, this means the
- * log buffer can be too small for the record and cause an
- * overrun.
- *
- * Detect this condition here. Use lsunit for the buffer size as
- * long as this looks like the mkfs case. Otherwise, return an
- * error to avoid a buffer overrun.
- */
h_size = be32_to_cpu(rhead->h_size);
- h_len = be32_to_cpu(rhead->h_len);
- if (h_len > h_size) {
- if (h_len <= log->l_mp->m_logbsize &&
- be32_to_cpu(rhead->h_num_logops) == 1) {
- xfs_warn(log->l_mp,
- "invalid iclog size (%d bytes), using lsunit (%d bytes)",
- h_size, log->l_mp->m_logbsize);
- h_size = log->l_mp->m_logbsize;
- } else {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
- log->l_mp);
- error = -EFSCORRUPTED;
- goto bread_err1;
- }
- }
if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
(h_size > XLOG_HEADER_CYCLE_SIZE)) {
@@ -3096,7 +3100,7 @@ xlog_do_recovery_pass(
}
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead,
- split_hblks ? blk_no : 0);
+ split_hblks ? blk_no : 0, h_size);
if (error)
goto bread_err2;
@@ -3177,7 +3181,7 @@ xlog_do_recovery_pass(
goto bread_err2;
rhead = (xlog_rec_header_t *)offset;
- error = xlog_valid_rec_header(log, rhead, blk_no);
+ error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
if (error)
goto bread_err2;
Currently, crafted h_len has been blocked for the log header of the tail block in commit a70f9fe52daa ("xfs: detect and handle invalid iclog size set by mkfs"). However, each log record could still have crafted h_len and cause log record buffer overrun. So let's check h_len for each log record as well instead. Signed-off-by: Gao Xiang <hsiangkao@redhat.com> --- something random when I read log recovery code... fs/xfs/xfs_log_recover.c | 70 +++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 33 deletions(-)