@@ -1304,4 +1304,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct ib_cq *ib_cq);
+int hns_roce_fill_res_ctx_entry(struct sk_buff *msg, struct ib_ucontext *ctx);
+
#endif /* _HNS_ROCE_DEVICE_H */
@@ -546,6 +546,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
+ .fill_res_ctx_entry = hns_roce_fill_res_ctx_entry,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
@@ -118,3 +118,53 @@ int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
kfree(context);
return ret;
}
+
+static int hns_roce_fill_dca_uctx(struct hns_roce_dca_ctx *ctx,
+ struct sk_buff *msg)
+{
+ unsigned long flags;
+ u64 total, free;
+
+ spin_lock_irqsave(&ctx->pool_lock, flags);
+ total = ctx->total_size;
+ free = ctx->free_size;
+ spin_unlock_irqrestore(&ctx->pool_lock, flags);
+
+ if (rdma_nl_put_driver_u64(msg, "dca-total", total))
+ goto err;
+
+ if (rdma_nl_put_driver_u64(msg, "dca-free", free))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_ctx_entry(struct sk_buff *msg, struct ib_ucontext *ctx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ctx->device);
+ struct hns_roce_ucontext *uctx = to_hr_ucontext(ctx);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DCA_MODE) {
+ if (hns_roce_fill_dca_uctx(&uctx->dca_ctx, msg))
+ goto err_cancel_table;
+ }
+
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+
+ return 0;
+}