diff mbox

rdma/cxgb4: Add support for 64Byte cqes

Message ID 20180705125601.7862-1-rajur@chelsio.com (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show

Commit Message

Raju Rangoju July 5, 2018, 12:56 p.m. UTC
- This patch adds support for iw_cxb4 to extend cqes from existing
  32Byte size to 64Byte
- Also adds backward compatibility support (for 32Byte) to work
  with older libraries

Signed-off-by: Raju Rangoju <rajur@chelsio.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
---
 drivers/infiniband/hw/cxgb4/cq.c       | 43 +++++++++++++++++++++++++++++-----
 drivers/infiniband/hw/cxgb4/ev.c       |  5 ++--
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h |  1 +
 drivers/infiniband/hw/cxgb4/t4.h       | 18 +++++++++++---
 include/uapi/rdma/cxgb4-abi.h          | 12 +++++++++-
 5 files changed, 67 insertions(+), 12 deletions(-)

Comments

Steve Wise July 12, 2018, 2:49 p.m. UTC | #1
Hey Jason/Doug,

Will you merge this soon?  We have some other important features to add
that are dependent on this, and we would like to get them in for 4.19 as
well.

Thanks,

Steve.


On 7/5/2018 7:56 AM, Raju Rangoju wrote:
> - This patch adds support for iw_cxb4 to extend cqes from existing
>   32Byte size to 64Byte
> - Also adds backward compatibility support (for 32Byte) to work
>   with older libraries
>
> Signed-off-by: Raju Rangoju <rajur@chelsio.com>
> Reviewed-by: Steve Wise <swise@opengridcomputing.com>
> ---
>  drivers/infiniband/hw/cxgb4/cq.c       | 43 +++++++++++++++++++++++++++++-----
>  drivers/infiniband/hw/cxgb4/ev.c       |  5 ++--
>  drivers/infiniband/hw/cxgb4/iw_cxgb4.h |  1 +
>  drivers/infiniband/hw/cxgb4/t4.h       | 18 +++++++++++---
>  include/uapi/rdma/cxgb4-abi.h          | 12 +++++++++-
>  5 files changed, 67 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
> index 2be2e1ac1b5f..dea3b27e105f 100644
> --- a/drivers/infiniband/hw/cxgb4/cq.c
> +++ b/drivers/infiniband/hw/cxgb4/cq.c
> @@ -77,6 +77,10 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
>  	int user = (uctx != &rdev->uctx);
>  	int ret;
>  	struct sk_buff *skb;
> +	struct c4iw_ucontext *ucontext = NULL;
> +
> +	if (user)
> +		ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
>  
>  	cq->cqid = c4iw_get_cqid(rdev, uctx);
>  	if (!cq->cqid) {
> @@ -100,6 +104,16 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
>  	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
>  	memset(cq->queue, 0, cq->memsize);
>  
> +	if (user && ucontext->is_32b_cqe) {
> +		cq->qp_errp = &((struct t4_status_page *)
> +		((u8 *)cq->queue + (cq->size - 1) *
> +		 (sizeof(*cq->queue) / 2)))->qp_err;
> +	} else {
> +		cq->qp_errp = &((struct t4_status_page *)
> +		((u8 *)cq->queue + (cq->size - 1) *
> +		 sizeof(*cq->queue)))->qp_err;
> +	}
> +
>  	/* build fw_ri_res_wr */
>  	wr_len = sizeof *res_wr + sizeof *res;
>  
> @@ -132,7 +146,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
>  			FW_RI_RES_WR_IQPCIECH_V(2) |
>  			FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
>  			FW_RI_RES_WR_IQO_F |
> -			FW_RI_RES_WR_IQESIZE_V(1));
> +			((user && ucontext->is_32b_cqe) ?
> +			 FW_RI_RES_WR_IQESIZE_V(1) :
> +			 FW_RI_RES_WR_IQESIZE_V(2)));
>  	res->u.cq.iqsize = cpu_to_be16(cq->size);
>  	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
>  
> @@ -876,6 +892,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
>  	int vector = attr->comp_vector;
>  	struct c4iw_dev *rhp;
>  	struct c4iw_cq *chp;
> +	struct c4iw_create_cq ucmd;
>  	struct c4iw_create_cq_resp uresp;
>  	struct c4iw_ucontext *ucontext = NULL;
>  	int ret, wr_len;
> @@ -891,9 +908,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
>  	if (vector >= rhp->rdev.lldi.nciq)
>  		return ERR_PTR(-EINVAL);
>  
> +	if (ib_context) {
> +		ucontext = to_c4iw_ucontext(ib_context);
> +		if (udata->inlen < sizeof(ucmd))
> +			ucontext->is_32b_cqe = 1;
> +	}
> +
>  	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
>  	if (!chp)
>  		return ERR_PTR(-ENOMEM);
> +
>  	chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
>  	if (!chp->wr_waitp) {
>  		ret = -ENOMEM;
> @@ -908,9 +932,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
>  		goto err_free_wr_wait;
>  	}
>  
> -	if (ib_context)
> -		ucontext = to_c4iw_ucontext(ib_context);
> -
>  	/* account for the status page. */
>  	entries++;
>  
> @@ -934,13 +955,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
>  	if (hwentries < 64)
>  		hwentries = 64;
>  
> -	memsize = hwentries * sizeof *chp->cq.queue;
> +	memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
> +			(sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
>  
>  	/*
>  	 * memsize must be a multiple of the page size if its a user cq.
>  	 */
>  	if (ucontext)
>  		memsize = roundup(memsize, PAGE_SIZE);
> +
>  	chp->cq.size = hwentries;
>  	chp->cq.memsize = memsize;
>  	chp->cq.vector = vector;
> @@ -971,6 +994,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
>  		if (!mm2)
>  			goto err_free_mm;
>  
> +		memset(&uresp, 0, sizeof(uresp));
>  		uresp.qid_mask = rhp->rdev.cqmask;
>  		uresp.cqid = chp->cq.cqid;
>  		uresp.size = chp->cq.size;
> @@ -980,9 +1004,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
>  		ucontext->key += PAGE_SIZE;
>  		uresp.gts_key = ucontext->key;
>  		ucontext->key += PAGE_SIZE;
> +		/* communicate to the userspace that
> +		 * kernel driver supports 64B CQE
> +		 */
> +		uresp.flags |= C4IW_64B_CQE;
> +
>  		spin_unlock(&ucontext->mmap_lock);
>  		ret = ib_copy_to_udata(udata, &uresp,
> -				       sizeof(uresp) - sizeof(uresp.reserved));
> +				       ucontext->is_32b_cqe ?
> +				       sizeof(uresp) - sizeof(uresp.flags) :
> +				       sizeof(uresp));
>  		if (ret)
>  			goto err_free_mm2;
>  
> diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
> index 3e9d8b277ab9..8741d23168f3 100644
> --- a/drivers/infiniband/hw/cxgb4/ev.c
> +++ b/drivers/infiniband/hw/cxgb4/ev.c
> @@ -70,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
>  		CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
>  		CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
>  
> -	pr_debug("%016llx %016llx %016llx %016llx\n",
> +	pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
>  		 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
> -		 be64_to_cpu(p[3]));
> +		 be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
> +		 be64_to_cpu(p[6]), be64_to_cpu(p[7]));
>  
>  	/*
>  	 * Ingress WRITE and READ_RESP errors provide
> diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> index 870649ff049c..8866bf992316 100644
> --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> @@ -566,6 +566,7 @@ struct c4iw_ucontext {
>  	spinlock_t mmap_lock;
>  	struct list_head mmaps;
>  	struct kref kref;
> +	bool is_32b_cqe;
>  };
>  
>  static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
> diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
> index 8369c7c8de83..838a7dee48bd 100644
> --- a/drivers/infiniband/hw/cxgb4/t4.h
> +++ b/drivers/infiniband/hw/cxgb4/t4.h
> @@ -179,9 +179,20 @@ struct t4_cqe {
>  			__be32 wrid_hi;
>  			__be32 wrid_low;
>  		} gen;
> +		struct {
> +			__be32 stag;
> +			__be32 msn;
> +			__be32 reserved;
> +			__be32 abs_rqe_idx;
> +		} srcqe;
> +		struct {
> +			__be64 imm_data;
> +		} imm_data_rcqe;
> +
>  		u64 drain_cookie;
> +		__be64 flits[3];
>  	} u;
> -	__be64 reserved;
> +	__be64 reserved[3];
>  	__be64 bits_type_ts;
>  };
>  
> @@ -565,6 +576,7 @@ struct t4_cq {
>  	u16 cidx_inc;
>  	u8 gen;
>  	u8 error;
> +	u8 *qp_errp;
>  	unsigned long flags;
>  };
>  
> @@ -698,12 +710,12 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
>  
>  static inline int t4_cq_in_error(struct t4_cq *cq)
>  {
> -	return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
> +	return *cq->qp_errp;
>  }
>  
>  static inline void t4_set_cq_in_error(struct t4_cq *cq)
>  {
> -	((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
> +	*cq->qp_errp = 1;
>  }
>  #endif
>  
> diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
> index a159ba8dcf8f..65c9eacd3ffb 100644
> --- a/include/uapi/rdma/cxgb4-abi.h
> +++ b/include/uapi/rdma/cxgb4-abi.h
> @@ -44,6 +44,16 @@
>   * In particular do not use pointer types -- pass pointers in __aligned_u64
>   * instead.
>   */
> +
> +enum {
> +	C4IW_64B_CQE = (1 << 0)
> +};
> +
> +struct c4iw_create_cq {
> +	__u32 flags;
> +	__u32 reserved;
> +};
> +
>  struct c4iw_create_cq_resp {
>  	__aligned_u64 key;
>  	__aligned_u64 gts_key;
> @@ -51,7 +61,7 @@ struct c4iw_create_cq_resp {
>  	__u32 cqid;
>  	__u32 size;
>  	__u32 qid_mask;
> -	__u32 reserved; /* explicit padding (optional for i386) */
> +	__u32 flags;
>  };
>  
>  enum {

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jason Gunthorpe July 12, 2018, 2:49 p.m. UTC | #2
On Thu, Jul 12, 2018 at 09:49:25AM -0500, Steve Wise wrote:
> Hey Jason/Doug,
> 
> Will you merge this soon?  We have some other important features to add
> that are dependent on this, and we would like to get them in for 4.19 as
> well.

Well, the rdma-core side only started to compile yesterday :)

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steve Wise July 12, 2018, 2:57 p.m. UTC | #3
On 7/12/2018 9:49 AM, Jason Gunthorpe wrote:
> On Thu, Jul 12, 2018 at 09:49:25AM -0500, Steve Wise wrote:
>> Hey Jason/Doug,
>>
>> Will you merge this soon?  We have some other important features to add
>> that are dependent on this, and we would like to get them in for 4.19 as
>> well.
> Well, the rdma-core side only started to compile yesterday :)
>
> Jason
>

Indeed.  So is it now standard operating procedure to only merge the
kernel side when the user side is also ready to merge?

Thanks,

Steve.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jason Gunthorpe July 12, 2018, 3:15 p.m. UTC | #4
On Thu, Jul 12, 2018 at 09:57:06AM -0500, Steve Wise wrote:
> 
> 
> On 7/12/2018 9:49 AM, Jason Gunthorpe wrote:
> > On Thu, Jul 12, 2018 at 09:49:25AM -0500, Steve Wise wrote:
> >> Hey Jason/Doug,
> >>
> >> Will you merge this soon? We have some other important features
> >> to add that are dependent on this, and we would like to get them
> >> in for 4.19 as well.
>
> > Well, the rdma-core side only started to compile yesterday :)
> 
> Indeed. So is it now standard operating procedure to only merge the
> kernel side when the user side is also ready to merge?

Yes, it has been that way for some time now - it avoids stupid API bugs in
the kernel side.

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steve Wise July 12, 2018, 3:26 p.m. UTC | #5
On 7/12/2018 10:15 AM, Jason Gunthorpe wrote:
> On Thu, Jul 12, 2018 at 09:57:06AM -0500, Steve Wise wrote:
>>
>> On 7/12/2018 9:49 AM, Jason Gunthorpe wrote:
>>> On Thu, Jul 12, 2018 at 09:49:25AM -0500, Steve Wise wrote:
>>>> Hey Jason/Doug,
>>>>
>>>> Will you merge this soon? We have some other important features
>>>> to add that are dependent on this, and we would like to get them
>>>> in for 4.19 as well.
>>> Well, the rdma-core side only started to compile yesterday :)
>> Indeed. So is it now standard operating procedure to only merge the
>> kernel side when the user side is also ready to merge?
> Yes, it has been that way for some time now - it avoids stupid API bugs in
> the kernel side.
>
> Jason

Good to know. :)

(I should have known this)

Steve.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jason Gunthorpe July 13, 2018, 5:53 p.m. UTC | #6
On Thu, Jul 05, 2018 at 06:26:01PM +0530, Raju Rangoju wrote:
> - This patch adds support for iw_cxb4 to extend cqes from existing
>   32Byte size to 64Byte
> - Also adds backward compatibility support (for 32Byte) to work
>   with older libraries
> 
> Signed-off-by: Raju Rangoju <rajur@chelsio.com>
> Reviewed-by: Steve Wise <swise@opengridcomputing.com>
> ---
>  drivers/infiniband/hw/cxgb4/cq.c       | 43 +++++++++++++++++++++++++++++-----
>  drivers/infiniband/hw/cxgb4/ev.c       |  5 ++--
>  drivers/infiniband/hw/cxgb4/iw_cxgb4.h |  1 +
>  drivers/infiniband/hw/cxgb4/t4.h       | 18 +++++++++++---
>  include/uapi/rdma/cxgb4-abi.h          | 12 +++++++++-
>  5 files changed, 67 insertions(+), 12 deletions(-)

Applied to for-next, thanks

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 2be2e1ac1b5f..dea3b27e105f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -77,6 +77,10 @@  static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 	int user = (uctx != &rdev->uctx);
 	int ret;
 	struct sk_buff *skb;
+	struct c4iw_ucontext *ucontext = NULL;
+
+	if (user)
+		ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
 
 	cq->cqid = c4iw_get_cqid(rdev, uctx);
 	if (!cq->cqid) {
@@ -100,6 +104,16 @@  static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
 	memset(cq->queue, 0, cq->memsize);
 
+	if (user && ucontext->is_32b_cqe) {
+		cq->qp_errp = &((struct t4_status_page *)
+		((u8 *)cq->queue + (cq->size - 1) *
+		 (sizeof(*cq->queue) / 2)))->qp_err;
+	} else {
+		cq->qp_errp = &((struct t4_status_page *)
+		((u8 *)cq->queue + (cq->size - 1) *
+		 sizeof(*cq->queue)))->qp_err;
+	}
+
 	/* build fw_ri_res_wr */
 	wr_len = sizeof *res_wr + sizeof *res;
 
@@ -132,7 +146,9 @@  static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
 			FW_RI_RES_WR_IQPCIECH_V(2) |
 			FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
 			FW_RI_RES_WR_IQO_F |
-			FW_RI_RES_WR_IQESIZE_V(1));
+			((user && ucontext->is_32b_cqe) ?
+			 FW_RI_RES_WR_IQESIZE_V(1) :
+			 FW_RI_RES_WR_IQESIZE_V(2)));
 	res->u.cq.iqsize = cpu_to_be16(cq->size);
 	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
 
@@ -876,6 +892,7 @@  struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	int vector = attr->comp_vector;
 	struct c4iw_dev *rhp;
 	struct c4iw_cq *chp;
+	struct c4iw_create_cq ucmd;
 	struct c4iw_create_cq_resp uresp;
 	struct c4iw_ucontext *ucontext = NULL;
 	int ret, wr_len;
@@ -891,9 +908,16 @@  struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	if (vector >= rhp->rdev.lldi.nciq)
 		return ERR_PTR(-EINVAL);
 
+	if (ib_context) {
+		ucontext = to_c4iw_ucontext(ib_context);
+		if (udata->inlen < sizeof(ucmd))
+			ucontext->is_32b_cqe = 1;
+	}
+
 	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
 	if (!chp)
 		return ERR_PTR(-ENOMEM);
+
 	chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
 	if (!chp->wr_waitp) {
 		ret = -ENOMEM;
@@ -908,9 +932,6 @@  struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 		goto err_free_wr_wait;
 	}
 
-	if (ib_context)
-		ucontext = to_c4iw_ucontext(ib_context);
-
 	/* account for the status page. */
 	entries++;
 
@@ -934,13 +955,15 @@  struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 	if (hwentries < 64)
 		hwentries = 64;
 
-	memsize = hwentries * sizeof *chp->cq.queue;
+	memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
+			(sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
 
 	/*
 	 * memsize must be a multiple of the page size if its a user cq.
 	 */
 	if (ucontext)
 		memsize = roundup(memsize, PAGE_SIZE);
+
 	chp->cq.size = hwentries;
 	chp->cq.memsize = memsize;
 	chp->cq.vector = vector;
@@ -971,6 +994,7 @@  struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 		if (!mm2)
 			goto err_free_mm;
 
+		memset(&uresp, 0, sizeof(uresp));
 		uresp.qid_mask = rhp->rdev.cqmask;
 		uresp.cqid = chp->cq.cqid;
 		uresp.size = chp->cq.size;
@@ -980,9 +1004,16 @@  struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 		ucontext->key += PAGE_SIZE;
 		uresp.gts_key = ucontext->key;
 		ucontext->key += PAGE_SIZE;
+		/* communicate to the userspace that
+		 * kernel driver supports 64B CQE
+		 */
+		uresp.flags |= C4IW_64B_CQE;
+
 		spin_unlock(&ucontext->mmap_lock);
 		ret = ib_copy_to_udata(udata, &uresp,
-				       sizeof(uresp) - sizeof(uresp.reserved));
+				       ucontext->is_32b_cqe ?
+				       sizeof(uresp) - sizeof(uresp.flags) :
+				       sizeof(uresp));
 		if (ret)
 			goto err_free_mm2;
 
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 3e9d8b277ab9..8741d23168f3 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -70,9 +70,10 @@  static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
 		CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
 		CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
 
-	pr_debug("%016llx %016llx %016llx %016llx\n",
+	pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
 		 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
-		 be64_to_cpu(p[3]));
+		 be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
+		 be64_to_cpu(p[6]), be64_to_cpu(p[7]));
 
 	/*
 	 * Ingress WRITE and READ_RESP errors provide
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 870649ff049c..8866bf992316 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -566,6 +566,7 @@  struct c4iw_ucontext {
 	spinlock_t mmap_lock;
 	struct list_head mmaps;
 	struct kref kref;
+	bool is_32b_cqe;
 };
 
 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 8369c7c8de83..838a7dee48bd 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,9 +179,20 @@  struct t4_cqe {
 			__be32 wrid_hi;
 			__be32 wrid_low;
 		} gen;
+		struct {
+			__be32 stag;
+			__be32 msn;
+			__be32 reserved;
+			__be32 abs_rqe_idx;
+		} srcqe;
+		struct {
+			__be64 imm_data;
+		} imm_data_rcqe;
+
 		u64 drain_cookie;
+		__be64 flits[3];
 	} u;
-	__be64 reserved;
+	__be64 reserved[3];
 	__be64 bits_type_ts;
 };
 
@@ -565,6 +576,7 @@  struct t4_cq {
 	u16 cidx_inc;
 	u8 gen;
 	u8 error;
+	u8 *qp_errp;
 	unsigned long flags;
 };
 
@@ -698,12 +710,12 @@  static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 
 static inline int t4_cq_in_error(struct t4_cq *cq)
 {
-	return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+	return *cq->qp_errp;
 }
 
 static inline void t4_set_cq_in_error(struct t4_cq *cq)
 {
-	((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+	*cq->qp_errp = 1;
 }
 #endif
 
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
index a159ba8dcf8f..65c9eacd3ffb 100644
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -44,6 +44,16 @@ 
  * In particular do not use pointer types -- pass pointers in __aligned_u64
  * instead.
  */
+
+enum {
+	C4IW_64B_CQE = (1 << 0)
+};
+
+struct c4iw_create_cq {
+	__u32 flags;
+	__u32 reserved;
+};
+
 struct c4iw_create_cq_resp {
 	__aligned_u64 key;
 	__aligned_u64 gts_key;
@@ -51,7 +61,7 @@  struct c4iw_create_cq_resp {
 	__u32 cqid;
 	__u32 size;
 	__u32 qid_mask;
-	__u32 reserved; /* explicit padding (optional for i386) */
+	__u32 flags;
 };
 
 enum {