@@ -55,7 +55,8 @@ xlog_grant_push_ail(
STATIC void
xlog_sync(
struct xlog *log,
- struct xlog_in_core *iclog);
+ struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket);
#if defined(DEBUG)
STATIC void
xlog_verify_dest_ptr(
@@ -537,7 +538,8 @@ __xlog_state_release_iclog(
int
xlog_state_release_iclog(
struct xlog *log,
- struct xlog_in_core *iclog)
+ struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket)
{
lockdep_assert_held(&log->l_icloglock);
@@ -547,7 +549,7 @@ xlog_state_release_iclog(
if (atomic_dec_and_test(&iclog->ic_refcnt) &&
__xlog_state_release_iclog(log, iclog)) {
spin_unlock(&log->l_icloglock);
- xlog_sync(log, iclog);
+ xlog_sync(log, iclog, ticket);
spin_lock(&log->l_icloglock);
}
@@ -908,7 +910,7 @@ xlog_unmount_write(
* iclog containing the unmount record is written.
*/
iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, tic);
xlog_wait_on_iclog(iclog);
if (tic) {
@@ -1939,7 +1941,8 @@ xlog_calc_iclog_size(
STATIC void
xlog_sync(
struct xlog *log,
- struct xlog_in_core *iclog)
+ struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket)
{
unsigned int count; /* byte count of bwrite */
unsigned int roundoff; /* roundoff to BB or stripe */
@@ -1950,12 +1953,20 @@ xlog_sync(
count = xlog_calc_iclog_size(log, iclog, &roundoff);
- /* move grant heads by roundoff in sync */
- xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
- xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
+ /*
+ * If we have a ticket, account for the roundoff via the ticket
+ * reservation to avoid touching the hot grant heads needlessly.
+ * Otherwise, we have to move grant heads directly.
+ */
+ if (ticket) {
+ ticket->t_curr_res -= roundoff;
+ } else {
+ xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
+ xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
+ }
/* put cycle number in every block */
- xlog_pack_data(log, iclog, roundoff);
+ xlog_pack_data(log, iclog, roundoff);
/* real byte length */
size = iclog->ic_offset;
@@ -2195,7 +2206,7 @@ xlog_write_get_more_iclog_space(
xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR);
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, ticket);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -2465,7 +2476,7 @@ xlog_write(
if (commit_iclog)
*commit_iclog = iclog;
else
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, ticket);
spin_unlock(&log->l_icloglock);
return error;
@@ -2923,7 +2934,7 @@ xlog_state_get_iclog_space(
* reference to the iclog.
*/
if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
- error = xlog_state_release_iclog(log, iclog);
+ error = xlog_state_release_iclog(log, iclog, ticket);
spin_unlock(&log->l_icloglock);
if (error)
return error;
@@ -3151,7 +3162,7 @@ xfs_log_force(
atomic_inc(&iclog->ic_refcnt);
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
- if (xlog_state_release_iclog(log, iclog))
+ if (xlog_state_release_iclog(log, iclog, NULL))
goto out_error;
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
@@ -3244,7 +3255,7 @@ xlog_force_lsn(
}
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
- if (xlog_state_release_iclog(log, iclog))
+ if (xlog_state_release_iclog(log, iclog, NULL))
goto out_error;
if (log_flushed)
*log_flushed = 1;
@@ -832,6 +832,7 @@ xlog_cil_push_work(
struct bio bio;
DECLARE_COMPLETION_ONSTACK(bdev_flush);
bool push_commit_stable;
+ struct xlog_ticket *ticket;
new_ctx = xlog_cil_ctx_alloc();
new_ctx->ticket = xlog_cil_ticket_alloc(log);
@@ -1039,12 +1040,10 @@ xlog_cil_push_work(
if (error)
goto out_abort_free_ticket;
- xfs_log_ticket_ungrant(log, ctx->ticket);
-
spin_lock(&commit_iclog->ic_callback_lock);
if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
spin_unlock(&commit_iclog->ic_callback_lock);
- goto out_abort;
+ goto out_abort_free_ticket;
}
ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
@@ -1061,6 +1060,15 @@ xlog_cil_push_work(
wake_up_all(&cil->xc_commit_wait);
spin_unlock(&cil->xc_push_lock);
+ /*
+ * Pull the ticket off the ctx so we can ungrant it after releasing the
+ * commit_iclog. The ctx may be freed by the time we return from
+ * releasing the commit_iclog (i.e. checkpoint has been completed and
+ * callback run) so we can't reference the ctx after the call to
+ * xlog_state_release_iclog().
+ */
+ ticket = ctx->ticket;
+
/*
* If the checkpoint spans multiple iclogs, wait for all previous
* iclogs to complete before we submit the commit_iclog. In this case,
@@ -1087,8 +1095,10 @@ xlog_cil_push_work(
commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
if (push_commit_stable && commit_iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, commit_iclog, 0);
- xlog_state_release_iclog(log, commit_iclog);
+ xlog_state_release_iclog(log, commit_iclog, ticket);
spin_unlock(&log->l_icloglock);
+
+ xfs_log_ticket_ungrant(log, ticket);
return;
out_skip:
@@ -1099,7 +1109,6 @@ xlog_cil_push_work(
out_abort_free_ticket:
xfs_log_ticket_ungrant(log, ctx->ticket);
-out_abort:
ASSERT(XLOG_FORCED_SHUTDOWN(log));
xlog_cil_committed(ctx);
}
@@ -488,7 +488,8 @@ void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
int eventual_size);
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
+int xlog_state_release_iclog(struct xlog *xlog, struct xlog_in_core *iclog,
+ struct xlog_ticket *ticket);
/*
* When we crack an atomic LSN, we sample it first so that the value will not