b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1,5 +1,6 @@
 /*
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2009 Bart Van Assche <bart.vanassche@gmail.com>.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
@@ -53,6 +54,8 @@
 #define PFX       DRV_NAME ": "
 #define DRV_VERSION   "0.2"
 #define DRV_RELDATE   "November 1, 2005"
+/* Similar to is_power_of_2(), but can be evaluated at compile time. */
+#define IS_POWER_OF_2(n) ((n) != 0 && (((n) & ((n) - 1)) == 0))
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
@@ -82,6 +85,11 @@ static void srp_add_one(struct ib_device *device);
 static void srp_remove_one(struct ib_device *device);
 static void srp_completion(struct ib_cq *cq, void *target_ptr);
 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static void srp_process_cred_req(struct srp_target_port *target,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_cred_req *req);
+static void srp_process_aer_req(struct srp_target_port *target,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_cred_req *req);
+static int srp_post_recv(struct srp_target_port *target);
 static struct scsi_transport_template *ib_srp_transport_template;
@@ -237,7 +245,7 @@ static int srp_create_target_ib(struct
srp_target_port *target)
    ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
    init_attr->event_handler    = srp_qp_event;
-    init_attr->cap.max_send_wr   = SRP_SQ_SIZE;
+    init_attr->cap.max_send_wr   = SRP_SQ_SIZE + SRP_TXP_SIZE;
    init_attr->cap.max_recv_wr   = SRP_RQ_SIZE;
    init_attr->cap.max_recv_sge   = 1;
    init_attr->cap.max_send_sge   = 1;
@@ -272,10 +280,12 @@ static void srp_free_target_ib(struct
srp_target_port *target)
    ib_destroy_qp(target->qp);
    ib_destroy_cq(target->cq);
- Â Â Â for (i = 0; i < SRP_RQ_SIZE; ++i)
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->rx_ring); ++i)
        srp_free_iu(target->srp_host, target->rx_ring[i]);
- Â Â Â for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->tx_ring); ++i)
        srp_free_iu(target->srp_host, target->tx_ring[i]);
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->txp_ring); ++i)
+ Â Â Â Â Â Â Â srp_free_iu(target->srp_host, target->txp_ring[i]);
 }
 static void srp_path_rec_completion(int status,
@@ -888,6 +898,14 @@ static void srp_handle_recv(struct
srp_target_port *target, struct ib_wc *wc)
              PFX "Got target logout request\n");
        break;
+ Â Â Â case SRP_CRED_REQ:
+ Â Â Â Â Â Â Â srp_process_cred_req(target, iu->buf);
+ Â Â Â Â Â Â Â break;
+
+ Â Â Â case SRP_AER_REQ:
+ Â Â Â Â Â Â Â srp_process_aer_req(target, iu->buf);
+ Â Â Â Â Â Â Â break;
+
    default:
        shost_printk(KERN_WARNING, target->scsi_host,
              PFX "Unhandled SRP opcode 0x%02x\n", opcode);
@@ -908,7 +926,11 @@ static void srp_completion(struct ib_cq *cq, void
*target_ptr)
        if (wc.status) {
            shost_printk(KERN_ERR, target->scsi_host,
                  PFX "failed %s status %d\n",
- Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â wc.wr_id & SRP_OP_RECV ?
"receive" : "send",
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â wc.wr_id & SRP_OP_RECV
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ? "receiving"
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â : wc.wr_id & SRP_OP_TXP
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ? "sending response"
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â : "sending request",
                  wc.status);
            target->qp_in_error = 1;
            break;
@@ -916,6 +938,8 @@ static void srp_completion(struct ib_cq *cq, void
*target_ptr)
        if (wc.wr_id & SRP_OP_RECV)
            srp_handle_recv(target, &wc);
+ Â Â Â Â Â Â Â else if (wc.wr_id & SRP_OP_TXP)
+ Â Â Â Â Â Â Â Â Â Â Â ++target->txp_tail;
        else
            ++target->tx_tail;
    }
@@ -961,15 +985,19 @@ static int srp_post_recv(struct srp_target_port *target)
 }
 /*
+ * Obtain an information unit for sending a request to the target.
+ *
 * Must be called with target->scsi_host->host_lock held to protect
 * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * call to __srp_post_send_req().
 */
 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
                    enum srp_request_type req_type)
 {
    s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
+ Â Â Â BUILD_BUG_ON(!IS_POWER_OF_2(ARRAY_SIZE(target->tx_ring)));
+
    if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
        return NULL;
@@ -978,26 +1006,31 @@ static struct srp_iu *__srp_get_tx_iu(struct
srp_target_port *target,
        return NULL;
    }
- Â Â Â return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
+ Â Â Â return target->tx_ring[target->tx_head
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â & (ARRAY_SIZE(target->tx_ring) - 1)];
 }
 /*
+ * Send a request to the target.
+ *
 * Must be called with target->scsi_host->host_lock held to protect
 * req_lim and tx_head.
 */
-static int __srp_post_send(struct srp_target_port *target,
- Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_iu *iu, int len)
+static int __srp_post_send_req(struct srp_target_port *target,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_iu *iu, int len)
 {
    struct ib_sge list;
    struct ib_send_wr wr, *bad_wr;
    int ret = 0;
+ Â Â Â BUILD_BUG_ON(!IS_POWER_OF_2(ARRAY_SIZE(target->tx_ring)));
+
    list.addr  = iu->dma;
    list.length = len;
    list.lkey  = target->srp_host->srp_dev->mr->lkey;
    wr.next    = NULL;
-    wr.wr_id    = target->tx_head & SRP_SQ_SIZE;
+    wr.wr_id    = target->tx_head & (ARRAY_SIZE(target->tx_ring) - 1);
    wr.sg_list   = &list;
    wr.num_sge   = 1;
    wr.opcode   = IB_WR_SEND;
@@ -1013,6 +1046,117 @@ static int __srp_post_send(struct
srp_target_port *target,
    return ret;
 }
+/*
+ * Obtain an information unit for sending a response to the target.
+ *
+ * Must be called with target->scsi_host->host_lock held to protect
+ * req_lim and tx_head. Â Lock cannot be dropped between call here and
+ * call to __srp_post_send_rsp().
+ */
+static struct srp_iu *__srp_get_txp_iu(struct srp_target_port *target)
+{
+ Â Â Â BUILD_BUG_ON(!IS_POWER_OF_2(SRP_TXP_SIZE));
+
+ Â Â Â if (WARN_ON(target->txp_head - target->txp_tail >= SRP_TXP_SIZE))
+ Â Â Â Â Â Â Â return NULL;
+
+ Â Â Â return target->txp_ring[target->txp_head & (SRP_TXP_SIZE - 1)];
+}
+
+/*
+ * Send a response to a request received from the target.
+ *
+ * Must be called with target->scsi_host->host_lock held to protect txp_head.
+ */
+static int __srp_post_send_rsp(struct srp_target_port *target,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_iu *iu, int len)
+{
+ Â Â Â struct ib_sge list;
+ Â Â Â struct ib_send_wr wr, *bad_wr;
+ Â Â Â int ret = 0;
+
+ Â Â Â BUILD_BUG_ON(!IS_POWER_OF_2(SRP_TXP_SIZE));
+
+    list.addr  = iu->dma;
+ Â Â Â list.length = len;
+    list.lkey  = target->srp_host->srp_dev->mr->lkey;
+
+    wr.next    = NULL;
+    wr.wr_id    = (target->txp_head & (SRP_TXP_SIZE - 1)) | SRP_OP_TXP;
+    wr.sg_list   = &list;
+    wr.num_sge   = 1;
+    wr.opcode   = IB_WR_SEND;
+ Â Â Â wr.send_flags = IB_SEND_SIGNALED;
+
+ Â Â Â ret = ib_post_send(target->qp, &wr, &bad_wr);
+
+ Â Â Â if (!ret)
+ Â Â Â Â Â Â Â ++target->txp_head;
+
+ Â Â Â return ret;
+}
+
+static void srp_process_cred_req(struct srp_target_port *target,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_cred_req *req)
+{
+ Â Â Â struct ib_device *dev;
+ Â Â Â struct srp_iu *iu;
+ Â Â Â struct srp_cred_rsp *rsp;
+ Â Â Â unsigned long flags;
+ Â Â Â int res;
+ Â Â Â s32 delta;
+ Â Â Â u64 tag;
+
+ Â Â Â dev = target->srp_host->srp_dev->dev;
+ Â Â Â delta = (s32) be32_to_cpu(req->req_lim_delta);
+ Â Â Â tag = req->tag;
+
+ Â Â Â spin_lock_irqsave(target->scsi_host->host_lock, flags);
+
+ Â Â Â target->req_lim += delta;
+
+ Â Â Â iu = __srp_get_txp_iu(target);
+ Â Â Â if (!iu)
+ Â Â Â Â Â Â Â goto out;
+
+ Â Â Â rsp = iu->buf;
+
+ Â Â Â ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof(*rsp),
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â DMA_TO_DEVICE);
+
+ Â Â Â memset(rsp, 0, sizeof *rsp);
+ Â Â Â rsp->opcode = SRP_CRED_RSP;
+    rsp->tag   = tag;
+
+ Â Â Â ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â DMA_TO_DEVICE);
+
+ Â Â Â if (__srp_post_send_rsp(target, iu, sizeof(*rsp)))
+ Â Â Â Â Â Â Â shost_printk(KERN_ERR, target->scsi_host,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â PFX "Sending response failed\n");
+
+ Â Â Â res = __srp_post_recv(target);
+ Â Â Â if (res)
+ Â Â Â Â Â Â Â shost_printk(KERN_ERR, target->scsi_host,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â PFX "ib_post_recv() failed -- res = %d\n", res);
+
+out:
+ Â Â Â spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
+}
+
+static void srp_process_aer_req(struct srp_target_port *target,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct srp_cred_req *req)
+{
+ Â Â Â int res;
+
+ Â Â Â shost_printk(KERN_ERR, target->scsi_host,
+ Â Â Â Â Â Â Â Â Â Â PFX "received and ignored SRP_AER_REQ\n");
+ Â Â Â res = srp_post_recv(target);
+ Â Â Â if (res)
+ Â Â Â Â Â Â Â shost_printk(KERN_ERR, target->scsi_host,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â PFX "ib_post_recv() failed -- res = %d\n", res);
+}
+
 static int srp_queuecommand(struct scsi_cmnd *scmnd,
              void (*done)(struct scsi_cmnd *))
 {
@@ -1075,8 +1219,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
    ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
                   DMA_TO_DEVICE);
- Â Â Â if (__srp_post_send(target, iu, len)) {
- Â Â Â Â Â Â Â shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
+ Â Â Â if (__srp_post_send_req(target, iu, len)) {
+ Â Â Â Â Â Â Â shost_printk(KERN_ERR, target->scsi_host,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â PFX "Sending request failed\n");
        goto err_unmap;
    }
@@ -1095,7 +1240,7 @@ static int srp_alloc_iu_bufs(struct
srp_target_port *target)
 {
    int i;
- Â Â Â for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->rx_ring); ++i) {
        target->rx_ring[i] = srp_alloc_iu(target->srp_host,
                         target->max_ti_iu_len,
                         GFP_KERNEL, DMA_FROM_DEVICE);
@@ -1103,7 +1248,7 @@ static int srp_alloc_iu_bufs(struct
srp_target_port *target)
            goto err;
    }
- Â Â Â for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->tx_ring); ++i) {
        target->tx_ring[i] = srp_alloc_iu(target->srp_host,
                         srp_max_iu_len,
                         GFP_KERNEL, DMA_TO_DEVICE);
@@ -1111,19 +1256,32 @@ static int srp_alloc_iu_bufs(struct
srp_target_port *target)
            goto err;
    }
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->txp_ring); ++i) {
+ Â Â Â Â Â Â Â target->txp_ring[i] = srp_alloc_iu(target->srp_host,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â srp_max_iu_len,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â GFP_KERNEL, DMA_TO_DEVICE);
+ Â Â Â Â Â Â Â if (!target->txp_ring[i])
+ Â Â Â Â Â Â Â Â Â Â Â goto err;
+ Â Â Â }
+
    return 0;
 err:
- Â Â Â for (i = 0; i < SRP_RQ_SIZE; ++i) {
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->rx_ring); ++i) {
        srp_free_iu(target->srp_host, target->rx_ring[i]);
        target->rx_ring[i] = NULL;
    }
- Â Â Â for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->tx_ring); ++i) {
        srp_free_iu(target->srp_host, target->tx_ring[i]);
        target->tx_ring[i] = NULL;
    }
+ Â Â Â for (i = 0; i < ARRAY_SIZE(target->txp_ring); ++i) {
+ Â Â Â Â Â Â Â srp_free_iu(target->srp_host, target->txp_ring[i]);
+ Â Â Â Â Â Â Â target->tx_ring[i] = NULL;
+ Â Â Â }
+
    return -ENOMEM;
 }
@@ -1211,6 +1369,7 @@ static int srp_cm_handler(struct ib_cm_id
*cm_id, struct ib_cm_event *event)
 {
    struct srp_target_port *target = cm_id->context;
    struct ib_qp_attr *qp_attr = NULL;
+ Â Â Â int i;
    int attr_mask = 0;
    int comp = 0;
    int opcode = 0;
@@ -1263,7 +1422,11 @@ static int srp_cm_handler(struct ib_cm_id
*cm_id, struct ib_cm_event *event)
        if (target->status)
            break;
- Â Â Â Â Â Â Â target->status = srp_post_recv(target);
+ Â Â Â Â Â Â Â for (i = 0; i < SRP_RXR_SIZE; ++i) {
+ Â Â Â Â Â Â Â Â Â Â Â target->status = srp_post_recv(target);
+ Â Â Â Â Â Â Â Â Â Â Â if (target->status)
+ Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â break;
+ Â Â Â Â Â Â Â }
        if (target->status)
            break;
@@ -1353,7 +1516,7 @@ static int srp_send_tsk_mgmt(struct
srp_target_port *target,
    tsk_mgmt->tsk_mgmt_func = func;
    tsk_mgmt->task_tag    = req->index;
- Â Â Â if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
+ Â Â Â if (__srp_post_send_req(target, iu, sizeof *tsk_mgmt))
        goto out;
    req->tsk_mgmt = iu;
@@ -1529,6 +1692,18 @@ static ssize_t show_orig_dgid(struct device *dev,
    return sprintf(buf, "%pI6\n", target->orig_dgid);
 }
+static ssize_t show_req_lim(struct device *dev,
+ Â Â Â Â Â Â Â Â Â Â Â Â Â struct device_attribute *attr, char *buf)
+{
+ Â Â Â struct srp_target_port *target = host_to_target(class_to_shost(dev));
+
+ Â Â Â if (target->state == SRP_TARGET_DEAD ||
+ Â Â Â Â Â target->state == SRP_TARGET_REMOVED)
+ Â Â Â Â Â Â Â return -ENODEV;
+
+ Â Â Â return sprintf(buf, "%d\n", target->req_lim);
+}
+
 static ssize_t show_zero_req_lim(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
@@ -1563,6 +1738,7 @@ static DEVICE_ATTR(service_id, Â Â Â Â S_IRUGO,
show_service_id, Â Â Â Â Â NULL);
 static DEVICE_ATTR(pkey,      S_IRUGO, show_pkey,       NULL);
 static DEVICE_ATTR(dgid,      S_IRUGO, show_dgid,       NULL);
 static DEVICE_ATTR(orig_dgid,    S_IRUGO, show_orig_dgid,    NULL);
+static DEVICE_ATTR(req_lim, Â Â Â Â S_IRUGO, show_req_lim, Â Â Â Â NULL);
 static DEVICE_ATTR(zero_req_lim,   S_IRUGO, show_zero_req_lim,
  NULL);
 static DEVICE_ATTR(local_ib_port,  S_IRUGO, show_local_ib_port,  NULL);
 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
@@ -1574,6 +1750,7 @@ static struct device_attribute *srp_host_attrs[] = {
    &dev_attr_pkey,
    &dev_attr_dgid,
    &dev_attr_orig_dgid,
+ Â Â Â &dev_attr_req_lim,
    &dev_attr_zero_req_lim,
    &dev_attr_local_ib_port,
    &dev_attr_local_ib_device,
b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -57,10 +57,20 @@ enum {
    SRP_MAX_LUN       = 512,
    SRP_DEF_SG_TABLESIZE   = 12,
+ Â Â Â /*
+ Â Â Â Â * Receive queue: queue for receiving responses and requests from the
+ Â Â Â Â * target.
+ Â Â Â Â */
    SRP_RQ_SHIFT       = 6,
    SRP_RQ_SIZE       = 1 << SRP_RQ_SHIFT,
- Â Â Â SRP_SQ_SIZE Â Â Â Â Â Â = SRP_RQ_SIZE - 1,
- Â Â Â SRP_CQ_SIZE Â Â Â Â Â Â = SRP_SQ_SIZE + SRP_RQ_SIZE,
+ Â Â Â /* Number of receive slots reserved for receiving requests. */
+ Â Â Â SRP_RXR_SIZE Â Â Â Â Â Â = 2,
+ Â Â Â /* Send queue: queue for sending requests to the target. */
+ Â Â Â SRP_SQ_SIZE Â Â Â Â Â Â = SRP_RQ_SIZE - SRP_RXR_SIZE - 1,
+ Â Â Â /* Size of the queue for sending responses to the target. */
+ Â Â Â SRP_TXP_SIZE Â Â Â Â Â Â = 2,
+ Â Â Â /* Completion queue. */
+ Â Â Â SRP_CQ_SIZE Â Â Â Â Â Â = SRP_SQ_SIZE + SRP_TXP_SIZE + SRP_RQ_SIZE,
    SRP_TAG_TSK_MGMT     = 1 << (SRP_RQ_SHIFT + 1),
@@ -69,6 +79,9 @@ enum {
    SRP_FMR_DIRTY_SIZE    = SRP_FMR_POOL_SIZE / 4
 };
+/* wr_id / wc_id flag for marking responses sent to the target. */
+#define SRP_OP_TXP Â Â Â Â Â Â (1 << 30)
+/* wr_id / wc_id flag for marking receive operations. */
 #define SRP_OP_RECV       (1 << 31)
 enum srp_target_state {
@@ -141,12 +154,20 @@ struct srp_target_port {
    int           zero_req_lim;
+
+ Â Â Â /* Receive ring for responses and requests received by the initiator. */
    unsigned         rx_head;
    struct srp_iu      *rx_ring[SRP_RQ_SIZE];
+ Â Â Â /* Transmit ring for requests sent to the target. */
    unsigned         tx_head;
    unsigned         tx_tail;
-    struct srp_iu      *tx_ring[SRP_SQ_SIZE + 1];
+    struct srp_iu      *tx_ring[SRP_SQ_SIZE + SRP_RXR_SIZE + 1];
+
+ Â Â Â /* Transmit ring for responses sent to the target. */
+    unsigned         txp_head;
+    unsigned         txp_tail;
+    struct srp_iu      *txp_ring[SRP_TXP_SIZE];
    struct list_head     free_reqs;
    struct list_head     req_queue;
@@ -239,4 +239,26 @@ struct srp_rsp {
    u8    data[0];
 } __attribute__((packed));
+/*
+ * SRP_CRED_REQ information unit, as defined in section 6.10 of the
+ * T10 SRP r16a document.
+ */
+struct srp_cred_req {
+ Â Â Â u8 Â Â Â opcode;
+ Â Â Â u8 Â Â Â sol_not;
+ Â Â Â u8 Â Â Â reserved[2];
+ Â Â Â __be32 Â req_lim_delta;
+ Â Â Â u64 Â Â tag;
+} __attribute__((packed));
+
+/*
+ * SRP_CRED_RSP information unit, as defined in section 6.11 of the
+ * T10 SRP r16a document.
+ */
+struct srp_cred_rsp {
+ Â Â Â u8 Â Â Â opcode;
+ Â Â Â u8 Â Â Â reserved[7];
+ Â Â Â u64 Â Â tag;
+} __attribute__((packed));
+
 #endif /* SCSI_SRP_H */