diff mbox

[REPOST,1/2] rbd: encapsulate handling for a single request

Message ID 50E60981.2090801@inktank.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Elder Jan. 3, 2013, 10:43 p.m. UTC
In rbd_rq_fn(), requests are fetched from the block layer and each
request is processed, looping through the request's list of bio's
until they've all been consumed.

Separate the handling for a single request into its own function to
make it a bit easier to see what's going on.

Signed-off-by: Alex Elder <elder@inktank.com>
---
 drivers/block/rbd.c |  119
+++++++++++++++++++++++++++------------------------
 1 file changed, 63 insertions(+), 56 deletions(-)

  */
@@ -1596,10 +1654,8 @@ static void rbd_rq_fn(struct request_queue *q)
 		bool do_write;
 		unsigned int size;
 		u64 ofs;
-		int num_segs, cur_seg = 0;
-		struct rbd_req_coll *coll;
 		struct ceph_snap_context *snapc;
-		unsigned int bio_offset;
+		int result;

 		dout("fetched request\n");

@@ -1637,60 +1693,11 @@ static void rbd_rq_fn(struct request_queue *q)
 		ofs = blk_rq_pos(rq) * SECTOR_SIZE;
 		bio = rq->bio;

-		dout("%s 0x%x bytes at 0x%llx\n",
-		     do_write ? "write" : "read",
-		     size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
-
-		num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
-		if (num_segs <= 0) {
-			spin_lock_irq(q->queue_lock);
-			__blk_end_request_all(rq, num_segs);
-			ceph_put_snap_context(snapc);
-			continue;
-		}
-		coll = rbd_alloc_coll(num_segs);
-		if (!coll) {
-			spin_lock_irq(q->queue_lock);
-			__blk_end_request_all(rq, -ENOMEM);
-			ceph_put_snap_context(snapc);
-			continue;
-		}
-
-		bio_offset = 0;
-		do {
-			u64 limit = rbd_segment_length(rbd_dev, ofs, size);
-			unsigned int chain_size;
-			struct bio *bio_chain;
-
-			BUG_ON(limit > (u64) UINT_MAX);
-			chain_size = (unsigned int) limit;
-			dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
-
-			kref_get(&coll->kref);
-
-			/* Pass a cloned bio chain via an osd request */
-
-			bio_chain = bio_chain_clone_range(&bio,
-						&bio_offset, chain_size,
-						GFP_ATOMIC);
-			if (bio_chain)
-				(void) rbd_do_op(rq, rbd_dev, snapc,
-						ofs, chain_size,
-						bio_chain, coll, cur_seg);
-			else
-				rbd_coll_end_req_index(rq, coll, cur_seg,
-						       (s32) -ENOMEM,
-						       chain_size);
-			size -= chain_size;
-			ofs += chain_size;
-
-			cur_seg++;
-		} while (size > 0);
-		kref_put(&coll->kref, rbd_coll_release);
-
-		spin_lock_irq(q->queue_lock);
-
+		result = rbd_dev_do_request(rq, rbd_dev, snapc, ofs, size, bio);
 		ceph_put_snap_context(snapc);
+		spin_lock_irq(q->queue_lock);
+		if (result < 0)
+			__blk_end_request_all(rq, result);
 	}
 }

Comments

Josh Durgin Jan. 15, 2013, 11:45 p.m. UTC | #1
On 01/03/2013 02:43 PM, Alex Elder wrote:
> In rbd_rq_fn(), requests are fetched from the block layer and each
> request is processed, looping through the request's list of bio's
> until they've all been consumed.
>
> Separate the handling for a single request into its own function to
> make it a bit easier to see what's going on.
>
> Signed-off-by: Alex Elder <elder@inktank.com>
> ---
>   drivers/block/rbd.c |  119
> +++++++++++++++++++++++++++------------------------
>   1 file changed, 63 insertions(+), 56 deletions(-)
>
> diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
> index 8b79a5b..88b9b2e 100644
> --- a/drivers/block/rbd.c
> +++ b/drivers/block/rbd.c
> @@ -1583,6 +1583,64 @@ static struct rbd_req_coll *rbd_alloc_coll(int
> num_reqs)
>   	return coll;
>   }
>
> +static int rbd_dev_do_request(struct request *rq,
> +				struct rbd_device *rbd_dev,
> +				struct ceph_snap_context *snapc,
> +				u64 ofs, unsigned int size,
> +				struct bio *bio_chain)
> +{
> +	int num_segs;
> +	struct rbd_req_coll *coll;
> +	unsigned int bio_offset;
> +	int cur_seg = 0;
> +
> +	dout("%s 0x%x bytes at 0x%llx\n",
> +		rq_data_dir(rq) == WRITE ? "write" : "read",
> +		size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
> +
> +	num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
> +	if (num_segs <= 0)
> +		return num_segs;
> +
> +	coll = rbd_alloc_coll(num_segs);
> +	if (!coll)
> +		return -ENOMEM;
> +
> +	bio_offset = 0;
> +	do {
> +		u64 limit = rbd_segment_length(rbd_dev, ofs, size);
> +		unsigned int clone_size;
> +		struct bio *bio_clone;
> +
> +		BUG_ON(limit > (u64) UINT_MAX);
> +		clone_size = (unsigned int) limit;
> +		dout("bio_chain->bi_vcnt=%hu\n", bio_chain->bi_vcnt);
> +
> +		kref_get(&coll->kref);
> +
> +		/* Pass a cloned bio chain via an osd request */
> +
> +		bio_clone = bio_chain_clone_range(&bio_chain,
> +					&bio_offset, clone_size,
> +					GFP_ATOMIC);
> +		if (bio_clone)
> +			(void) rbd_do_op(rq, rbd_dev, snapc,
> +					ofs, clone_size,
> +					bio_clone, coll, cur_seg);
> +		else
> +			rbd_coll_end_req_index(rq, coll, cur_seg,
> +						(s32) -ENOMEM,
> +						clone_size);
> +		size -= clone_size;
> +		ofs += clone_size;
> +
> +		cur_seg++;
> +	} while (size > 0);
> +	kref_put(&coll->kref, rbd_coll_release);
> +
> +	return 0;
> +}
> +
>   /*
>    * block device queue callback
>    */
> @@ -1596,10 +1654,8 @@ static void rbd_rq_fn(struct request_queue *q)
>   		bool do_write;
>   		unsigned int size;
>   		u64 ofs;
> -		int num_segs, cur_seg = 0;
> -		struct rbd_req_coll *coll;
>   		struct ceph_snap_context *snapc;
> -		unsigned int bio_offset;
> +		int result;
>
>   		dout("fetched request\n");
>
> @@ -1637,60 +1693,11 @@ static void rbd_rq_fn(struct request_queue *q)
>   		ofs = blk_rq_pos(rq) * SECTOR_SIZE;
>   		bio = rq->bio;
>
> -		dout("%s 0x%x bytes at 0x%llx\n",
> -		     do_write ? "write" : "read",
> -		     size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
> -
> -		num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
> -		if (num_segs <= 0) {
> -			spin_lock_irq(q->queue_lock);
> -			__blk_end_request_all(rq, num_segs);
> -			ceph_put_snap_context(snapc);
> -			continue;
> -		}
> -		coll = rbd_alloc_coll(num_segs);
> -		if (!coll) {
> -			spin_lock_irq(q->queue_lock);
> -			__blk_end_request_all(rq, -ENOMEM);
> -			ceph_put_snap_context(snapc);
> -			continue;
> -		}
> -
> -		bio_offset = 0;
> -		do {
> -			u64 limit = rbd_segment_length(rbd_dev, ofs, size);
> -			unsigned int chain_size;
> -			struct bio *bio_chain;
> -
> -			BUG_ON(limit > (u64) UINT_MAX);
> -			chain_size = (unsigned int) limit;
> -			dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
> -
> -			kref_get(&coll->kref);
> -
> -			/* Pass a cloned bio chain via an osd request */
> -
> -			bio_chain = bio_chain_clone_range(&bio,
> -						&bio_offset, chain_size,
> -						GFP_ATOMIC);
> -			if (bio_chain)
> -				(void) rbd_do_op(rq, rbd_dev, snapc,
> -						ofs, chain_size,
> -						bio_chain, coll, cur_seg);
> -			else
> -				rbd_coll_end_req_index(rq, coll, cur_seg,
> -						       (s32) -ENOMEM,
> -						       chain_size);
> -			size -= chain_size;
> -			ofs += chain_size;
> -
> -			cur_seg++;
> -		} while (size > 0);
> -		kref_put(&coll->kref, rbd_coll_release);
> -
> -		spin_lock_irq(q->queue_lock);
> -
> +		result = rbd_dev_do_request(rq, rbd_dev, snapc, ofs, size, bio);
>   		ceph_put_snap_context(snapc);
> +		spin_lock_irq(q->queue_lock);
> +		if (result < 0)

result may be 0 if num_segs == 0, which will make the request hang.
I'm not sure if this will happen (or is even possible), but it's
different from the previous behavior, which called
__blk_end_request_all() in this case as well.

> +			__blk_end_request_all(rq, result);
>   	}
>   }
>

--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alex Elder Jan. 17, 2013, 9:01 p.m. UTC | #2
On 01/15/2013 05:45 PM, Josh Durgin wrote:
> On 01/03/2013 02:43 PM, Alex Elder wrote:
>> In rbd_rq_fn(), requests are fetched from the block layer and each
>> request is processed, looping through the request's list of bio's
>> until they've all been consumed.
>>
>> Separate the handling for a single request into its own function to
>> make it a bit easier to see what's going on.
>>
>> Signed-off-by: Alex Elder <elder@inktank.com>
>> ---

. . .

>> -
>> -        spin_lock_irq(q->queue_lock);
>> -
>> +        result = rbd_dev_do_request(rq, rbd_dev, snapc, ofs, size, bio);
>>           ceph_put_snap_context(snapc);
>> +        spin_lock_irq(q->queue_lock);
>> +        if (result < 0)
> 
> result may be 0 if num_segs == 0, which will make the request hang.
> I'm not sure if this will happen (or is even possible), but it's
> different from the previous behavior, which called
> __blk_end_request_all() in this case as well.

You're right.  And I think there may be some valid special
zero-length requests (like cache flush requests or something).

The value of num_segs comes from rbd_get_num_segments().  The
only way that will ever return 0 is if the len it is provided
is 0.

So my fix will be to change the check after the spin_lock_irq()
call from this:
	if (result < 0)
to this:
	if (!size || result < 0)

I'll re-post the result shortly.

					-Alex

>> +            __blk_end_request_all(rq, result);
>>       }
>>   }
>>
> 

--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8b79a5b..88b9b2e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1583,6 +1583,64 @@  static struct rbd_req_coll *rbd_alloc_coll(int
num_reqs)
 	return coll;
 }

+static int rbd_dev_do_request(struct request *rq,
+				struct rbd_device *rbd_dev,
+				struct ceph_snap_context *snapc,
+				u64 ofs, unsigned int size,
+				struct bio *bio_chain)
+{
+	int num_segs;
+	struct rbd_req_coll *coll;
+	unsigned int bio_offset;
+	int cur_seg = 0;
+
+	dout("%s 0x%x bytes at 0x%llx\n",
+		rq_data_dir(rq) == WRITE ? "write" : "read",
+		size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
+
+	num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
+	if (num_segs <= 0)
+		return num_segs;
+
+	coll = rbd_alloc_coll(num_segs);
+	if (!coll)
+		return -ENOMEM;
+
+	bio_offset = 0;
+	do {
+		u64 limit = rbd_segment_length(rbd_dev, ofs, size);
+		unsigned int clone_size;
+		struct bio *bio_clone;
+
+		BUG_ON(limit > (u64) UINT_MAX);
+		clone_size = (unsigned int) limit;
+		dout("bio_chain->bi_vcnt=%hu\n", bio_chain->bi_vcnt);
+
+		kref_get(&coll->kref);
+
+		/* Pass a cloned bio chain via an osd request */
+
+		bio_clone = bio_chain_clone_range(&bio_chain,
+					&bio_offset, clone_size,
+					GFP_ATOMIC);
+		if (bio_clone)
+			(void) rbd_do_op(rq, rbd_dev, snapc,
+					ofs, clone_size,
+					bio_clone, coll, cur_seg);
+		else
+			rbd_coll_end_req_index(rq, coll, cur_seg,
+						(s32) -ENOMEM,
+						clone_size);
+		size -= clone_size;
+		ofs += clone_size;
+
+		cur_seg++;
+	} while (size > 0);
+	kref_put(&coll->kref, rbd_coll_release);
+
+	return 0;
+}
+
 /*
  * block device queue callback