@@ -2599,6 +2599,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
+ SET_DEVICE_OP(dev_ops, alloc_mr_crypto);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
SET_DEVICE_OP(dev_ops, alloc_mw);
SET_DEVICE_OP(dev_ops, alloc_pd);
@@ -44,6 +44,8 @@ int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
if (type == IB_MR_TYPE_INTEGRITY)
mr = ib_alloc_mr_integrity(qp->pd, max_num_sg,
max_num_meta_sg);
+ else if (type == IB_MR_TYPE_CRYPTO)
+ mr = ib_alloc_mr_crypto(qp->pd, max_num_sg);
else
mr = ib_alloc_mr(qp->pd, type, max_num_sg);
if (IS_ERR(mr)) {
@@ -2179,6 +2179,7 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
struct ib_pd *pd = mr->pd;
struct ib_dm *dm = mr->dm;
struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
+ struct ib_crypto_attrs *crypto_attrs = mr->crypto_attrs;
int ret;
trace_mr_dereg(mr);
@@ -2189,6 +2190,7 @@ int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
if (dm)
atomic_dec(&dm->usecnt);
kfree(sig_attrs);
+ kfree(crypto_attrs);
}
return ret;
@@ -2217,7 +2219,8 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
goto out;
}
- if (mr_type == IB_MR_TYPE_INTEGRITY) {
+ if (mr_type == IB_MR_TYPE_INTEGRITY ||
+ mr_type == IB_MR_TYPE_CRYPTO) {
WARN_ON_ONCE(1);
mr = ERR_PTR(-EINVAL);
goto out;
@@ -2294,6 +2297,7 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
mr->need_inval = false;
+ mr->crypto_attrs = NULL;
mr->type = IB_MR_TYPE_INTEGRITY;
mr->sig_attrs = sig_attrs;
@@ -2306,6 +2310,56 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
}
EXPORT_SYMBOL(ib_alloc_mr_integrity);
+/**
+ * ib_alloc_mr_crypto() - Allocates a crypto memory region
+ * @pd: protection domain associated with the region
+ * @max_num_sg: maximum sg entries available for registration
+ *
+ * Notes:
+ * Memory registration page/sg lists must not exceed max_num_sg.
+ *
+ */
+struct ib_mr *ib_alloc_mr_crypto(struct ib_pd *pd, u32 max_num_sg)
+{
+ struct ib_mr *mr;
+ struct ib_crypto_attrs *crypto_attrs;
+
+ if (!pd->device->ops.alloc_mr_crypto) {
+ mr = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
+
+ crypto_attrs = kzalloc(sizeof(*crypto_attrs), GFP_KERNEL);
+ if (!crypto_attrs) {
+ mr = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ mr = pd->device->ops.alloc_mr_crypto(pd, max_num_sg);
+ if (IS_ERR(mr)) {
+ kfree(crypto_attrs);
+ goto out;
+ }
+
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->dm = NULL;
+ mr->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
+ mr->sig_attrs = NULL;
+ mr->type = IB_MR_TYPE_CRYPTO;
+ mr->crypto_attrs = crypto_attrs;
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
+out:
+ trace_mr_crypto_alloc(pd, max_num_sg, mr);
+ return mr;
+}
+EXPORT_SYMBOL(ib_alloc_mr_crypto);
+
/**
* ib_create_dek - Create a DEK (Data Encryption Key) associated with the
* specific protection domain.
@@ -34,6 +34,49 @@ struct ib_crypto_caps {
u32 max_num_deks;
};
+/**
+ * enum ib_crypto_domain - Encryption domain
+ * According to the encryption domain and the data direction, the HW can
+ * conclude if need to encrypt or decrypt the data.
+ * @IB_CRYPTO_ENCRYPTED_WIRE_DOMAIN: encrypted data is in the wire domain.
+ * @IB_CRYPTO_ENCRYPTED_MEM_DOMAIN: encrypted data is in the memory domain.
+ */
+enum ib_crypto_domain {
+ IB_CRYPTO_ENCRYPTED_WIRE_DOMAIN,
+ IB_CRYPTO_ENCRYPTED_MEM_DOMAIN,
+};
+
+/**
+ * enum ib_crypto_standard - Encryption standard
+ * @IB_CRYPTO_AES_XTS: AES-XTS encryption.
+ */
+enum ib_crypto_standard {
+ IB_CRYPTO_AES_XTS,
+};
+
+/* XTS initial tweak size is up to 128 bits, 16 bytes. */
+#define IB_CRYPTO_XTS_TWEAK_MAX_SIZE 16
+
+/**
+ * struct ib_crypto_attrs - Parameters for crypto handover operation
+ * @encrypt_domain: specific encryption domain.
+ * @encrypt_standard: specific encryption standard.
+ * @data_unit_size: data unit size in bytes. It might be e.g., the filesystem
+ * block size or the disk sector size.
+ * @xts_init_tweak: a value to be used during encryption of each data unit.
+ * This value is incremented by the device for every data_unit_size in the
+ * message.
+ * @dek: Data Encryption Key index.
+ */
+struct ib_crypto_attrs {
+ enum ib_crypto_domain encrypt_domain;
+ enum ib_crypto_standard encrypt_standard;
+ int data_unit_size;
+ /* Today we support only AES-XTS */
+ u32 xts_init_tweak[IB_CRYPTO_XTS_TWEAK_MAX_SIZE / sizeof(u32)];
+ u32 dek;
+};
+
/**
* enum ib_crypto_key_type - Cryptographic key types
* @IB_CRYPTO_KEY_TYPE_AES_XTS: Key of type AES-XTS, which can be used when
@@ -876,6 +876,8 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
* without address translations (VA=PA)
* @IB_MR_TYPE_INTEGRITY: memory region that is used for
* data integrity operations
+ * @IB_MR_TYPE_CRYPTO: memory region that is used for cryptographic
+ * operations
*/
enum ib_mr_type {
IB_MR_TYPE_MEM_REG,
@@ -884,6 +886,7 @@ enum ib_mr_type {
IB_MR_TYPE_USER,
IB_MR_TYPE_DMA,
IB_MR_TYPE_INTEGRITY,
+ IB_MR_TYPE_CRYPTO,
};
enum ib_mr_status_check {
@@ -1854,6 +1857,7 @@ struct ib_mr {
struct ib_dm *dm;
struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
+ struct ib_crypto_attrs *crypto_attrs; /* only for IB_MR_TYPE_CRYPTO */
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2512,6 +2516,7 @@ struct ib_device_ops {
struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
u32 max_num_data_sg,
u32 max_num_meta_sg);
+ struct ib_mr *(*alloc_mr_crypto)(struct ib_pd *pd, u32 max_num_sg);
struct ib_dek *(*create_dek)(struct ib_pd *pd,
struct ib_dek_attr *attr);
void (*destroy_dek)(struct ib_dek *dek);
@@ -4295,6 +4300,8 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
u32 max_num_data_sg,
u32 max_num_meta_sg);
+struct ib_mr *ib_alloc_mr_crypto(struct ib_pd *pd, u32 max_num_sg);
+
/**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR
* R_Key and L_Key.
@@ -371,6 +371,39 @@ TRACE_EVENT(mr_integ_alloc,
__entry->max_num_meta_sg, __entry->rc)
);
+TRACE_EVENT(mr_crypto_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ u32 max_num_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, max_num_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_sg)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_sg = max_num_sg;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u max_num_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id, __entry->max_num_sg,
+ __entry->rc)
+);
+
TRACE_EVENT(mr_dereg,
TP_PROTO(
const struct ib_mr *mr