@@ -846,6 +846,8 @@ struct mm_struct {
*/
int mm_lock_seq;
#endif
+ /* Counter incremented each time mm gets write-locked */
+ unsigned long mm_wr_seq;
unsigned long hiwater_rss; /* High-watermark of RSS usage */
@@ -106,6 +106,8 @@ static inline void mmap_write_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
+ /* Pairs with ACQUIRE semantics in mmap_write_seq_read */
+ smp_store_release(&mm->mm_wr_seq, mm->mm_wr_seq + 1);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
@@ -113,6 +115,8 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
__mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
+ /* Pairs with ACQUIRE semantics in mmap_write_seq_read */
+ smp_store_release(&mm->mm_wr_seq, mm->mm_wr_seq + 1);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
@@ -122,6 +126,10 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
__mmap_lock_trace_start_locking(mm, true);
ret = down_write_killable(&mm->mmap_lock);
+ if (!ret) {
+ /* Pairs with ACQUIRE semantics in mmap_write_seq_read */
+ smp_store_release(&mm->mm_wr_seq, mm->mm_wr_seq + 1);
+ }
__mmap_lock_trace_acquire_returned(mm, true, ret == 0);
return ret;
}
@@ -140,6 +148,20 @@ static inline void mmap_write_downgrade(struct mm_struct *mm)
downgrade_write(&mm->mmap_lock);
}
+static inline unsigned long mmap_write_seq_read(struct mm_struct *mm)
+{
+ /* Pairs with RELEASE semantics in mmap_write_lock */
+ return smp_load_acquire(&mm->mm_wr_seq);
+}
+
+static inline void mmap_write_seq_record(struct mm_struct *mm,
+ unsigned long *mm_wr_seq)
+{
+ mmap_assert_locked(mm);
+ /* Nobody can concurrently modify since we hold the mmap_lock */
+ *mm_wr_seq = mm->mm_wr_seq;
+}
+
static inline void mmap_read_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, false);
Provide a way for lockless mm_struct users to detect whether mm might have been changed since some specific point in time. The API provided allows the user to record a counter when it starts using the mm and later use that counter to check if anyone write-locked mmap_lock since the counter was recorded. Recording the counter value should be done while holding mmap_lock at least for reading to prevent the counter from concurrent changes. Every time mmap_lock is write-locked mm_struct updates its mm_wr_seq counter so that checks against counters recorded before that would fail, indicating a possibility of mm being modified. Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- include/linux/mm_types.h | 2 ++ include/linux/mmap_lock.h | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+)