@@ -40,10 +40,12 @@
struct rds_page_remainder {
struct page *r_page;
unsigned long r_offset;
+ local_lock_t bh_lock;
};
-static
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
/**
* rds_page_remainder_alloc - build up regions of a message.
@@ -87,6 +89,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
}
local_bh_disable();
+ local_lock_nested_bh(&rds_page_remainders.bh_lock);
rem = this_cpu_ptr(&rds_page_remainders);
while (1) {
@@ -115,11 +118,13 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
}
/* alloc if there is nothing for us to use */
+ local_unlock_nested_bh(&rds_page_remainders.bh_lock);
local_bh_enable();
page = alloc_page(gfp);
local_bh_disable();
+ local_lock_nested_bh(&rds_page_remainders.bh_lock);
rem = this_cpu_ptr(&rds_page_remainders);
if (!page) {
@@ -138,6 +143,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
rem->r_offset = 0;
}
+ local_unlock_nested_bh(&rds_page_remainders.bh_lock);
local_bh_enable();
out:
rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret,
rds_page_remainder is a per-CPU variable and relies on disabled BH for its locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT this data structure requires explicit locking. Add a local_lock_t to the data structure and use local_lock_nested_bh() for locking. This change adds only lockdep coverage and does not alter the functional behaviour for !PREEMPT_RT. Cc: Allison Henderson <allison.henderson@oracle.com> Cc: linux-rdma@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- net/rds/page.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)