@@ -276,7 +276,7 @@ static void end_buffer_async_read(struct address_space *mapping,
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
- first = page_buffers(page);
+ first = bh_first_for_page(bh);
local_irq_save(flags);
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
@@ -332,7 +332,7 @@ void end_buffer_async_write(struct address_space *mapping, struct page *page,
SetPageError(page);
}
- first = page_buffers(page);
+ first = bh_first_for_page(bh);
local_irq_save(flags);
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
@@ -39,6 +39,12 @@ enum bh_state_bits {
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
BH_Defer_Completion, /* Defer AIO completion to workqueue */
+ /*
+ * First buffer_head for a page ie page->private is pointing to this
+ * buffer_head struct.
+ */
+ BH_FirstForPage,
+
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
*/
@@ -135,6 +141,7 @@ BUFFER_FNS(Unwritten, unwritten)
BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
+BUFFER_FNS(FirstForPage, first_for_page)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
@@ -278,11 +285,22 @@ void buffer_init(void);
* inline definitions
*/
+/*
+ * bh_first_for_page - return first buffer_head for a page
+ * @bh: buffer_head for which we want the first buffer_head for same page
+ * Returns: first buffer_head within the same page as given buffer_head
+ */
+static inline struct buffer_head *bh_first_for_page(struct buffer_head *bh)
+{
+ return page_buffers(bh->b_page);
+}
+
static inline void attach_page_buffers(struct page *page,
struct buffer_head *head)
{
get_page(page);
SetPagePrivate(page);
+ set_buffer_first_for_page(head);
set_page_private(page, (unsigned long)head);
}