diff mbox

ceph/osd_client: add support for CEPH_OSD_OP_GETXATTR

Message ID 1444401789-17396-2-git-send-email-ddiss@suse.de (mailing list archive)
State New, archived
Headers show

Commit Message

David Disseldorp Oct. 9, 2015, 2:43 p.m. UTC
Allows for xattr retrieval. Response data buffer allocation is the
responsibility of the osd_req_op_xattr_init() caller.

Signed-off-by: David Disseldorp <ddiss@suse.de>
---
 include/linux/ceph/osd_client.h |  8 +++++-
 net/ceph/osd_client.c           | 55 ++++++++++++++++++++++++++++++++---------
 2 files changed, 50 insertions(+), 13 deletions(-)

Comments

David Disseldorp Oct. 14, 2015, 5:37 p.m. UTC | #1
On Fri,  9 Oct 2015 16:43:09 +0200, David Disseldorp wrote:

> Allows for xattr retrieval. Response data buffer allocation is the
> responsibility of the osd_req_op_xattr_init() caller.

Ping, any feedback on the patch?

Cheers, David
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Ilya Dryomov Oct. 14, 2015, 5:57 p.m. UTC | #2
On Wed, Oct 14, 2015 at 7:37 PM, David Disseldorp <ddiss@suse.de> wrote:
> On Fri,  9 Oct 2015 16:43:09 +0200, David Disseldorp wrote:
>
>> Allows for xattr retrieval. Response data buffer allocation is the
>> responsibility of the osd_req_op_xattr_init() caller.
>
> Ping, any feedback on the patch?

The patch itself looks OK, except for the part where you rename a local
variable for no reason, AFACT.

We've discussed some of this last week.  As it is, all rbd image
properties are stored in omap, so PR info strings stored in xattrs is
something different, but it should be fine for now.  I'd rather not
merge any kernel patches related to rbd-target work until we see
a complete patchset though.  There's been a lot of back and forth
between Mike, Christoph and target people and the general approach had
changed at least twice, so I'd like to wait for things to settle down.

Thanks,

                Ilya
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Disseldorp Oct. 14, 2015, 10:51 p.m. UTC | #3
On Wed, 14 Oct 2015 19:57:46 +0200, Ilya Dryomov wrote:

> On Wed, Oct 14, 2015 at 7:37 PM, David Disseldorp <ddiss@suse.de> wrote:
...
> > Ping, any feedback on the patch?
> 
> The patch itself looks OK, except for the part where you rename a local
> variable for no reason, AFACT.

Thanks for the feedback Ilya. I presume you're referring to the pagelist
-> req_pagelist rename - I'll drop that change and send an update.

> We've discussed some of this last week.  As it is, all rbd image
> properties are stored in omap, so PR info strings stored in xattrs is
> something different, but it should be fine for now.  I'd rather not
> merge any kernel patches related to rbd-target work until we see
> a complete patchset though.  There's been a lot of back and forth
> between Mike, Christoph and target people and the general approach had
> changed at least twice, so I'd like to wait for things to settle down.

I understand the desire to wait on any RBD-target changes until upstream
consensus has been reached, but I'd argue that this change is isolated,
and complementary to the existing SETXATTR / CMPXATTR support.

Cheers, David
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Ilya Dryomov Oct. 15, 2015, 9:21 a.m. UTC | #4
On Thu, Oct 15, 2015 at 12:51 AM, David Disseldorp <ddiss@suse.de> wrote:
> On Wed, 14 Oct 2015 19:57:46 +0200, Ilya Dryomov wrote:
>
>> On Wed, Oct 14, 2015 at 7:37 PM, David Disseldorp <ddiss@suse.de> wrote:
> ...
>> > Ping, any feedback on the patch?
>>
>> The patch itself looks OK, except for the part where you rename a local
>> variable for no reason, AFACT.
>
> Thanks for the feedback Ilya. I presume you're referring to the pagelist
> -> req_pagelist rename - I'll drop that change and send an update.

Also, the prefix for net/ceph patches is libceph.

>
>> We've discussed some of this last week.  As it is, all rbd image
>> properties are stored in omap, so PR info strings stored in xattrs is
>> something different, but it should be fine for now.  I'd rather not
>> merge any kernel patches related to rbd-target work until we see
>> a complete patchset though.  There's been a lot of back and forth
>> between Mike, Christoph and target people and the general approach had
>> changed at least twice, so I'd like to wait for things to settle down.
>
> I understand the desire to wait on any RBD-target changes until upstream
> consensus has been reached, but I'd argue that this change is isolated,
> and complementary to the existing SETXATTR / CMPXATTR support.

That's true, but we already have a bunch of unused code in net/ceph,
which was added using precisely this reasoning, so I'm a bit hesitant.

Thanks,

                Ilya
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7506b48..062efc3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -91,7 +91,8 @@  struct ceph_osd_req_op {
 			u32 value_len;
 			__u8 cmp_op;       /* CEPH_OSD_CMPXATTR_OP_* */
 			__u8 cmp_mode;     /* CEPH_OSD_CMPXATTR_MODE_* */
-			struct ceph_osd_data osd_data;
+			struct ceph_osd_data request_data;
+			struct ceph_osd_data response_data;
 		} xattr;
 		struct {
 			const char *class_name;
@@ -298,6 +299,11 @@  extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
 					struct page **pages, u64 length,
 					u32 alignment, bool pages_from_pool,
 					bool own_pages);
+extern void osd_req_op_xattr_response_data_pages(struct ceph_osd_request *,
+					unsigned int which,
+					struct page **pages, u64 length,
+					u32 alignment, bool pages_from_pool,
+					bool own_pages);
 
 extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
 					unsigned int which, u16 opcode,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 80b94e3..7899102 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -243,6 +243,18 @@  void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
 }
 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
 
+void osd_req_op_xattr_response_data_pages(struct ceph_osd_request *osd_req,
+			unsigned int which, struct page **pages, u64 length,
+			u32 alignment, bool pages_from_pool, bool own_pages)
+{
+	struct ceph_osd_data *osd_data;
+
+	osd_data = osd_req_op_data(osd_req, which, xattr, response_data);
+	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
+				pages_from_pool, own_pages);
+}
+EXPORT_SYMBOL(osd_req_op_xattr_response_data_pages);
+
 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
 {
 	switch (osd_data->type) {
@@ -294,7 +306,9 @@  static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
 		break;
 	case CEPH_OSD_OP_SETXATTR:
 	case CEPH_OSD_OP_CMPXATTR:
-		ceph_osd_data_release(&op->xattr.osd_data);
+	case CEPH_OSD_OP_GETXATTR:
+		ceph_osd_data_release(&op->xattr.request_data);
+		ceph_osd_data_release(&op->xattr.response_data);
 		break;
 	case CEPH_OSD_OP_STAT:
 		ceph_osd_data_release(&op->raw_data_in);
@@ -560,31 +574,45 @@  int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
 {
 	struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 						      opcode, 0);
-	struct ceph_pagelist *pagelist;
+	struct ceph_pagelist *req_pagelist;
 	size_t payload_len;
+	int ret;
 
-	BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
+	BUG_ON(opcode != CEPH_OSD_OP_SETXATTR
+		&& opcode != CEPH_OSD_OP_CMPXATTR
+		&& opcode != CEPH_OSD_OP_GETXATTR);
 
-	pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
-	if (!pagelist)
+	req_pagelist = kmalloc(sizeof(*req_pagelist), GFP_NOFS);
+	if (!req_pagelist)
 		return -ENOMEM;
 
-	ceph_pagelist_init(pagelist);
+	ceph_pagelist_init(req_pagelist);
 
 	payload_len = strlen(name);
 	op->xattr.name_len = payload_len;
-	ceph_pagelist_append(pagelist, name, payload_len);
+	ret = ceph_pagelist_append(req_pagelist, name, payload_len);
+	if (ret)
+		goto err_reqpl_free;
 
-	op->xattr.value_len = size;
-	ceph_pagelist_append(pagelist, value, size);
-	payload_len += size;
+	if (value) {
+		op->xattr.value_len = size;
+		ret = ceph_pagelist_append(req_pagelist, value, size);
+		if (ret)
+			goto err_reqpl_free;
+		payload_len += size;
+	}
 
 	op->xattr.cmp_op = cmp_op;
 	op->xattr.cmp_mode = cmp_mode;
 
-	ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
+	ceph_osd_data_pagelist_init(&op->xattr.request_data, req_pagelist);
 	op->payload_len = payload_len;
+
 	return 0;
+
+err_reqpl_free:
+	ceph_pagelist_release(req_pagelist);
+	return ret;
 }
 EXPORT_SYMBOL(osd_req_op_xattr_init);
 
@@ -722,13 +750,16 @@  static u64 osd_req_encode_op(struct ceph_osd_request *req,
 		break;
 	case CEPH_OSD_OP_SETXATTR:
 	case CEPH_OSD_OP_CMPXATTR:
+	case CEPH_OSD_OP_GETXATTR:
 		dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
 		dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
 		dst->xattr.cmp_op = src->xattr.cmp_op;
 		dst->xattr.cmp_mode = src->xattr.cmp_mode;
-		osd_data = &src->xattr.osd_data;
+		osd_data = &src->xattr.request_data;
 		ceph_osdc_msg_data_add(req->r_request, osd_data);
 		request_data_len = osd_data->pagelist->length;
+		osd_data = &src->xattr.response_data;
+		ceph_osdc_msg_data_add(req->r_reply, osd_data);
 		break;
 	case CEPH_OSD_OP_CREATE:
 	case CEPH_OSD_OP_DELETE: