diff mbox series

[07/12] nvme: Implement In-Band authentication

Message ID 20211112125928.97318-8-hare@suse.de (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series nvme: In-band authentication support | expand

Commit Message

Hannes Reinecke Nov. 12, 2021, 12:59 p.m. UTC
Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
This patch adds two new fabric options 'dhchap_secret' to specify the
pre-shared key (in ASCII respresentation according to NVMe 2.0 section
8.13.5.8 'Secret representation') and 'dhchap_ctrl_secret' to specify
the pre-shared controller key for bi-directional authentication of both
the host and the controller.
Re-authentication can be triggered by writing the PSK into the new
controller sysfs attribute 'dhchap_secret' or 'dhchap_ctrl_secret'.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/host/Kconfig   |   11 +
 drivers/nvme/host/Makefile  |    1 +
 drivers/nvme/host/auth.c    | 1164 +++++++++++++++++++++++++++++++++++
 drivers/nvme/host/auth.h    |   25 +
 drivers/nvme/host/core.c    |  133 +++-
 drivers/nvme/host/fabrics.c |   79 ++-
 drivers/nvme/host/fabrics.h |    7 +
 drivers/nvme/host/nvme.h    |   36 ++
 drivers/nvme/host/tcp.c     |    1 +
 drivers/nvme/host/trace.c   |   32 +
 10 files changed, 1482 insertions(+), 7 deletions(-)
 create mode 100644 drivers/nvme/host/auth.c
 create mode 100644 drivers/nvme/host/auth.h

Comments

Sagi Grimberg Nov. 16, 2021, 10:25 a.m. UTC | #1
On 11/12/21 2:59 PM, Hannes Reinecke wrote:
> Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
> This patch adds two new fabric options 'dhchap_secret' to specify the
> pre-shared key (in ASCII respresentation according to NVMe 2.0 section
> 8.13.5.8 'Secret representation') and 'dhchap_ctrl_secret' to specify
> the pre-shared controller key for bi-directional authentication of both
> the host and the controller.
> Re-authentication can be triggered by writing the PSK into the new
> controller sysfs attribute 'dhchap_secret' or 'dhchap_ctrl_secret'.
> 
> Signed-off-by: Hannes Reinecke <hare@suse.de>
> ---
>   drivers/nvme/host/Kconfig   |   11 +
>   drivers/nvme/host/Makefile  |    1 +
>   drivers/nvme/host/auth.c    | 1164 +++++++++++++++++++++++++++++++++++
>   drivers/nvme/host/auth.h    |   25 +
>   drivers/nvme/host/core.c    |  133 +++-
>   drivers/nvme/host/fabrics.c |   79 ++-
>   drivers/nvme/host/fabrics.h |    7 +
>   drivers/nvme/host/nvme.h    |   36 ++
>   drivers/nvme/host/tcp.c     |    1 +
>   drivers/nvme/host/trace.c   |   32 +
>   10 files changed, 1482 insertions(+), 7 deletions(-)
>   create mode 100644 drivers/nvme/host/auth.c
>   create mode 100644 drivers/nvme/host/auth.h
> 
> diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
> index dc0450ca23a3..49269c581ec4 100644
> --- a/drivers/nvme/host/Kconfig
> +++ b/drivers/nvme/host/Kconfig
> @@ -83,3 +83,14 @@ config NVME_TCP
>   	  from https://github.com/linux-nvme/nvme-cli.
>   
>   	  If unsure, say N.
> +
> +config NVME_AUTH
> +	bool "NVM Express over Fabrics In-Band Authentication"
> +	depends on NVME_CORE
> +	select CRYPTO_HMAC
> +	select CRYPTO_SHA256
> +	select CRYPTO_SHA512
> +	help
> +	  This provides support for NVMe over Fabrics In-Band Authentication.
> +
> +	  If unsure, say N.
> diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
> index dfaacd472e5d..4bae2a4a8d8c 100644
> --- a/drivers/nvme/host/Makefile
> +++ b/drivers/nvme/host/Makefile
> @@ -15,6 +15,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH)	+= multipath.o
>   nvme-core-$(CONFIG_BLK_DEV_ZONED)	+= zns.o
>   nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS)	+= fault_inject.o
>   nvme-core-$(CONFIG_NVME_HWMON)		+= hwmon.o
> +nvme-core-$(CONFIG_NVME_AUTH)		+= auth.o
>   
>   nvme-y					+= pci.o
>   
> diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
> new file mode 100644
> index 000000000000..6ab95a178213
> --- /dev/null
> +++ b/drivers/nvme/host/auth.c
> @@ -0,0 +1,1164 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
> + */
> +
> +#include <linux/crc32.h>
> +#include <linux/base64.h>
> +#include <asm/unaligned.h>
> +#include <crypto/hash.h>
> +#include <crypto/dh.h>
> +#include <crypto/ffdhe.h>
> +#include "nvme.h"
> +#include "fabrics.h"
> +#include "auth.h"
> +
> +static atomic_t nvme_dhchap_seqnum = ATOMIC_INIT(0);
> +
> +struct nvme_dhchap_queue_context {
> +	struct list_head entry;
> +	struct work_struct auth_work;
> +	struct nvme_ctrl *ctrl;
> +	struct crypto_shash *shash_tfm;
> +	void *buf;
> +	size_t buf_size;
> +	int qid;
> +	int error;
> +	u32 s1;
> +	u32 s2;
> +	u16 transaction;
> +	u8 status;
> +	u8 hash_id;
> +	u8 hash_len;
> +	u8 dhgroup_id;
> +	u8 c1[64];
> +	u8 c2[64];
> +	u8 response[64];
> +	u8 *host_response;
> +};
> +
> +static struct nvme_auth_dhgroup_map {
> +	int id;
> +	const char name[16];
> +	const char kpp[16];
> +	int privkey_size;
> +	int pubkey_size;
> +} dhgroup_map[] = {
> +	{ .id = NVME_AUTH_DHCHAP_DHGROUP_NULL,
> +	  .name = "null", .kpp = "null",
> +	  .privkey_size = 0, .pubkey_size = 0 },
> +	{ .id = NVME_AUTH_DHCHAP_DHGROUP_2048,
> +	  .name = "ffdhe2048", .kpp = "dh",
> +	  .privkey_size = 256, .pubkey_size = 256 },
> +	{ .id = NVME_AUTH_DHCHAP_DHGROUP_3072,
> +	  .name = "ffdhe3072", .kpp = "dh",
> +	  .privkey_size = 384, .pubkey_size = 384 },
> +	{ .id = NVME_AUTH_DHCHAP_DHGROUP_4096,
> +	  .name = "ffdhe4096", .kpp = "dh",
> +	  .privkey_size = 512, .pubkey_size = 512 },
> +	{ .id = NVME_AUTH_DHCHAP_DHGROUP_6144,
> +	  .name = "ffdhe6144", .kpp = "dh",
> +	  .privkey_size = 768, .pubkey_size = 768 },
> +	{ .id = NVME_AUTH_DHCHAP_DHGROUP_8192,
> +	  .name = "ffdhe8192", .kpp = "dh",
> +	  .privkey_size = 1024, .pubkey_size = 1024 },
> +};
> +
> +const char *nvme_auth_dhgroup_name(int dhgroup_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
> +		if (dhgroup_map[i].id == dhgroup_id)
> +			return dhgroup_map[i].name;
> +	}
> +	return NULL;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
> +
> +int nvme_auth_dhgroup_pubkey_size(int dhgroup_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
> +		if (dhgroup_map[i].id == dhgroup_id)
> +			return dhgroup_map[i].pubkey_size;
> +	}
> +	return -1;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_pubkey_size);
> +
> +int nvme_auth_dhgroup_privkey_size(int dhgroup_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
> +		if (dhgroup_map[i].id == dhgroup_id)
> +			return dhgroup_map[i].privkey_size;
> +	}
> +	return -1;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_privkey_size);
> +
> +const char *nvme_auth_dhgroup_kpp(int dhgroup_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
> +		if (dhgroup_map[i].id == dhgroup_id)
> +			return dhgroup_map[i].kpp;
> +	}
> +	return NULL;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
> +
> +int nvme_auth_dhgroup_id(const char *dhgroup_name)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
> +		if (!strncmp(dhgroup_map[i].name, dhgroup_name,
> +			     strlen(dhgroup_map[i].name)))
> +			return dhgroup_map[i].id;
> +	}
> +	return -1;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
> +
> +static struct nvme_dhchap_hash_map {
> +	int id;
> +	int len;
> +	const char hmac[15];
> +	const char digest[15];
> +} hash_map[] = {
> +	{.id = NVME_AUTH_DHCHAP_SHA256, .len = 32,
> +	 .hmac = "hmac(sha256)", .digest = "sha256" },
> +	{.id = NVME_AUTH_DHCHAP_SHA384, .len = 48,
> +	 .hmac = "hmac(sha384)", .digest = "sha384" },
> +	{.id = NVME_AUTH_DHCHAP_SHA512, .len = 64,
> +	 .hmac = "hmac(sha512)", .digest = "sha512" },
> +};
> +
> +const char *nvme_auth_hmac_name(int hmac_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
> +		if (hash_map[i].id == hmac_id)
> +			return hash_map[i].hmac;
> +	}
> +	return NULL;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
> +
> +const char *nvme_auth_digest_name(int hmac_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
> +		if (hash_map[i].id == hmac_id)
> +			return hash_map[i].digest;
> +	}
> +	return NULL;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
> +
> +int nvme_auth_hmac_id(const char *hmac_name)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
> +		if (!strncmp(hash_map[i].hmac, hmac_name,
> +			     strlen(hash_map[i].hmac)))
> +			return hash_map[i].id;
> +	}
> +	return -1;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
> +
> +int nvme_auth_hmac_hash_len(int hmac_id)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
> +		if (hash_map[i].id == hmac_id)
> +			return hash_map[i].len;
> +	}
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
> +
> +unsigned char *nvme_auth_extract_secret(unsigned char *secret, u8 key_hash,
> +					size_t *out_len)
> +{
> +	unsigned char *key, *p;
> +	u32 crc;
> +	int key_len;
> +	size_t allocated_len = strlen(secret);
> +
> +	/* Secret might be affixed with a ':' */
> +	p = strrchr(secret, ':');
> +	if (p)
> +		allocated_len = p - secret;
> +	key = kzalloc(allocated_len, GFP_KERNEL);
> +	if (!key)
> +		return ERR_PTR(-ENOMEM);
> +
> +	key_len = base64_decode(secret, allocated_len, key);
> +	if (key_len < 0) {
> +		pr_debug("base64 key decoding error %d\n",
> +			 key_len);
> +		return ERR_PTR(key_len);
> +	}
> +	if (key_len != 36 && key_len != 52 &&
> +	    key_len != 68) {
> +		pr_debug("Invalid key len %d\n",
> +			 key_len);

pr_err?

> +		kfree_sensitive(key);
> +		return ERR_PTR(-EINVAL);
> +	}
> +	if (key_hash > 0 &&
> +	    (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
> +		pr_debug("Invalid key len %d for %s\n", key_len,
> +			 nvme_auth_hmac_name(key_hash));

pr_err?

> +		kfree_sensitive(key);
> +		return ERR_PTR(-EINVAL);
> +	}
> +
> +	/* The last four bytes is the CRC in little-endian format */
> +	key_len -= 4;
> +	/*
> +	 * The linux implementation doesn't do pre- and post-increments,
> +	 * so we have to do it manually.
> +	 */
> +	crc = ~crc32(~0, key, key_len);
> +
> +	if (get_unaligned_le32(key + key_len) != crc) {
> +		pr_debug("DH-HMAC-CHAP key crc mismatch (key %08x, crc %08x)\n",
> +		       get_unaligned_le32(key + key_len), crc);

pr_err?

> +		kfree_sensitive(key);
> +		return ERR_PTR(-EKEYREJECTED);
> +	}
> +	*out_len = key_len;
> +	return key;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_extract_secret);
> +
> +u8 *nvme_auth_transform_key(u8 *key, size_t key_len, u8 key_hash, char *nqn)
> +{
> +	const char *hmac_name = nvme_auth_hmac_name(key_hash);
> +	struct crypto_shash *key_tfm;
> +	struct shash_desc *shash;
> +	u8 *transformed_key;
> +	int ret;
> +
> +	if (key_hash == 0) {
> +		transformed_key = kmemdup(key, key_len, GFP_KERNEL);
> +		return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
> +	}
> +
> +	if (!key || !key_len) {
> +		pr_warn("No key specified\n");

pr_err?

> +		return ERR_PTR(-ENOKEY);
> +	}
> +	if (!hmac_name) {
> +		pr_warn("Invalid key hash id %d\n", key_hash);

pr_err?

> +		return ERR_PTR(-EINVAL);
> +	}
> +
> +	key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
> +	if (IS_ERR(key_tfm))
> +		return (u8 *)key_tfm;
> +
> +	shash = kmalloc(sizeof(struct shash_desc) +
> +			crypto_shash_descsize(key_tfm),
> +			GFP_KERNEL);
> +	if (!shash) {
> +		ret = -ENOMEM;
> +		goto out_free_key;
> +	}
> +
> +	transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
> +	if (!transformed_key) {
> +		ret = -ENOMEM;
> +		goto out_free_shash;
> +	}
> +
> +	shash->tfm = key_tfm;
> +	ret = crypto_shash_setkey(key_tfm, key, key_len);
> +	if (ret < 0)
> +		goto out_free_shash;
> +	ret = crypto_shash_init(shash);
> +	if (ret < 0)
> +		goto out_free_shash;
> +	ret = crypto_shash_update(shash, nqn, strlen(nqn));
> +	if (ret < 0)
> +		goto out_free_shash;
> +	ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
> +	if (ret < 0)
> +		goto out_free_shash;
> +	ret = crypto_shash_final(shash, transformed_key);
> +out_free_shash:
> +	kfree(shash);
> +out_free_key:
> +	crypto_free_shash(key_tfm);
> +	if (ret < 0) {
> +		kfree_sensitive(transformed_key);
> +		return ERR_PTR(ret);
> +	}
> +	return transformed_key;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
> +
> +static int nvme_auth_send(struct nvme_ctrl *ctrl, int qid,
> +		void *data, size_t tl)
> +{
> +	struct nvme_command cmd = {};
> +	blk_mq_req_flags_t flags = qid == NVME_QID_ANY ?
> +		0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED;
> +	struct request_queue *q = qid == NVME_QID_ANY ?
> +		ctrl->fabrics_q : ctrl->connect_q;
> +	int ret;
> +
> +	cmd.auth_send.opcode = nvme_fabrics_command;
> +	cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
> +	cmd.auth_send.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
> +	cmd.auth_send.spsp0 = 0x01;
> +	cmd.auth_send.spsp1 = 0x01;
> +	cmd.auth_send.tl = cpu_to_le32(tl);
> +
> +	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, tl, 0, qid,
> +				     0, flags);
> +	if (ret > 0)
> +		dev_dbg(ctrl->device,
> +			"%s: qid %d nvme status %d\n", __func__, qid, ret);

dev_err? Also can we phrase "failed auth_send" instead of the __func__?

> +	else if (ret < 0)
> +		dev_dbg(ctrl->device,
> +			"%s: qid %d error %d\n", __func__, qid, ret);

dev_err?

> +	return ret;
> +}
> +
> +static int nvme_auth_receive(struct nvme_ctrl *ctrl, int qid,
> +		void *buf, size_t al)
> +{
> +	struct nvme_command cmd = {};
> +	blk_mq_req_flags_t flags = qid == NVME_QID_ANY ?
> +		0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED;
> +	struct request_queue *q = qid == NVME_QID_ANY ?
> +		ctrl->fabrics_q : ctrl->connect_q;
> +	int ret;
> +
> +	cmd.auth_receive.opcode = nvme_fabrics_command;
> +	cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
> +	cmd.auth_receive.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
> +	cmd.auth_receive.spsp0 = 0x01;
> +	cmd.auth_receive.spsp1 = 0x01;
> +	cmd.auth_receive.al = cpu_to_le32(al);
> +
> +	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, buf, al, 0, qid,
> +				     0, flags);
> +	if (ret > 0) {
> +		dev_dbg(ctrl->device, "%s: qid %d nvme status %x\n",
> +			__func__, qid, ret);

dev_err? "failed auth_recv" instead of the __func__

> +		ret = -EIO;
> +	}
> +	if (ret < 0) {
> +		dev_dbg(ctrl->device, "%s: qid %d error %d\n",
> +			__func__, qid, ret);

dev_err

> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
> +		struct nvmf_auth_dhchap_failure_data *data,
> +		u16 transaction, u8 expected_msg)
> +{
> +	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
> +		__func__, qid, data->auth_type, data->auth_id);
> +
> +	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
> +	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
> +		return data->rescode_exp;
> +	}
> +	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
> +	    data->auth_id != expected_msg) {
> +		dev_warn(ctrl->device,
> +			 "qid %d invalid message %02x/%02x\n",
> +			 qid, data->auth_type, data->auth_id);
> +		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
> +	}
> +	if (le16_to_cpu(data->t_id) != transaction) {
> +		dev_warn(ctrl->device,
> +			 "qid %d invalid transaction ID %d\n",
> +			 qid, le16_to_cpu(data->t_id));

why not dev_err?

> +		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
> +	}
> +	return 0;
> +}
> +
> +static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
> +	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
> +
> +	if (chap->buf_size < size) {
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
> +		return -EINVAL;
> +	}
> +	memset((u8 *)chap->buf, 0, size);
> +	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
> +	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
> +	data->t_id = cpu_to_le16(chap->transaction);
> +	data->sc_c = 0; /* No secure channel concatenation */
> +	data->napd = 1;
> +	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
> +	data->auth_protocol[0].dhchap.halen = 3;
> +	data->auth_protocol[0].dhchap.dhlen = 6;
> +	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_DHCHAP_SHA256;
> +	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_DHCHAP_SHA384;
> +	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_DHCHAP_SHA512;
> +	data->auth_protocol[0].dhchap.idlist[3] = NVME_AUTH_DHCHAP_DHGROUP_NULL;
> +	data->auth_protocol[0].dhchap.idlist[4] = NVME_AUTH_DHCHAP_DHGROUP_2048;
> +	data->auth_protocol[0].dhchap.idlist[5] = NVME_AUTH_DHCHAP_DHGROUP_3072;
> +	data->auth_protocol[0].dhchap.idlist[6] = NVME_AUTH_DHCHAP_DHGROUP_4096;
> +	data->auth_protocol[0].dhchap.idlist[7] = NVME_AUTH_DHCHAP_DHGROUP_6144;
> +	data->auth_protocol[0].dhchap.idlist[8] = NVME_AUTH_DHCHAP_DHGROUP_8192;
> +
> +	return size;
> +}
> +
> +static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
> +	u16 dhvlen = le16_to_cpu(data->dhvlen);
> +	size_t size = sizeof(*data) + data->hl + dhvlen;
> +	const char *hmac_name, *kpp_name;
> +
> +	if (chap->buf_size < size) {
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
> +		return NVME_SC_INVALID_FIELD;
> +	}
> +
> +	hmac_name = nvme_auth_hmac_name(data->hashid);
> +	if (!hmac_name) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: invalid HASH ID %d\n",
> +			 chap->qid, data->hashid);
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
> +		return NVME_SC_INVALID_FIELD;
> +	}
> +
> +	if (chap->hash_id == data->hashid && chap->shash_tfm &&
> +	    !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
> +	    crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
> +		dev_dbg(ctrl->device,
> +			"qid %d: reuse existing hash %s\n",
> +			chap->qid, hmac_name);
> +		goto select_kpp;
> +	}
> +
> +	/* Reset if hash cannot be reused */
> +	if (chap->shash_tfm) {
> +		crypto_free_shash(chap->shash_tfm);
> +		chap->hash_id = 0;
> +		chap->hash_len = 0;
> +	}
> +	chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
> +					     CRYPTO_ALG_ALLOCATES_MEMORY);
> +	if (IS_ERR(chap->shash_tfm)) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: failed to allocate hash %s, error %ld\n",
> +			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
> +		chap->shash_tfm = NULL;
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
> +		return NVME_SC_AUTH_REQUIRED;
> +	}
> +
> +	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: invalid hash length %d\n",
> +			 chap->qid, data->hl);
> +		crypto_free_shash(chap->shash_tfm);
> +		chap->shash_tfm = NULL;
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
> +		return NVME_SC_AUTH_REQUIRED;
> +	}
> +
> +	/* Reset host response if the hash had been changed */
> +	if (chap->hash_id != data->hashid) {
> +		kfree(chap->host_response);
> +		chap->host_response = NULL;
> +	}
> +
> +	chap->hash_id = data->hashid;
> +	chap->hash_len = data->hl;
> +	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
> +		chap->qid, hmac_name);
> +
> +select_kpp:
> +	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
> +	if (!kpp_name) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: invalid DH group id %d\n",
> +			 chap->qid, data->dhgid);
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
> +		return NVME_SC_AUTH_REQUIRED;
> +	}
> +
> +	if (data->dhgid != NVME_AUTH_DHCHAP_DHGROUP_NULL) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: unsupported DH group %s\n",
> +			 chap->qid, kpp_name);
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
> +		return NVME_SC_AUTH_REQUIRED;
> +	} else if (dhvlen != 0) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: invalid DH value for NULL DH\n",
> +			 chap->qid);
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
> +		return NVME_SC_INVALID_FIELD;
> +	}
> +	chap->dhgroup_id = data->dhgid;
> +
> +	chap->s1 = le32_to_cpu(data->seqnum);
> +	memcpy(chap->c1, data->cval, chap->hash_len);
> +
> +	return 0;
> +}
> +
> +static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
> +	size_t size = sizeof(*data);
> +
> +	size += 2 * chap->hash_len;
> +
> +	if (chap->buf_size < size) {
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
> +		return -EINVAL;
> +	}
> +
> +	memset(chap->buf, 0, size);
> +	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
> +	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
> +	data->t_id = cpu_to_le16(chap->transaction);
> +	data->hl = chap->hash_len;
> +	data->dhvlen = 0;
> +	memcpy(data->rval, chap->response, chap->hash_len);
> +	if (ctrl->opts->dhchap_ctrl_secret) {
> +		get_random_bytes(chap->c2, chap->hash_len);
> +		data->cvalid = 1;
> +		chap->s2 = atomic_inc_return(&nvme_dhchap_seqnum);
> +		memcpy(data->rval + chap->hash_len, chap->c2,
> +		       chap->hash_len);
> +		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
> +			__func__, chap->qid,
> +			chap->hash_len, chap->c2);
> +	} else {
> +		memset(chap->c2, 0, chap->hash_len);
> +		chap->s2 = 0;
> +	}
> +	data->seqnum = cpu_to_le32(chap->s2);
> +	return size;
> +}
> +
> +static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
> +	size_t size = sizeof(*data);
> +
> +	if (ctrl->opts->dhchap_ctrl_secret)
> +		size += chap->hash_len;
> +
> +	if (chap->buf_size < size) {
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
> +		return NVME_SC_INVALID_FIELD;
> +	}
> +
> +	if (data->hl != chap->hash_len) {
> +		dev_warn(ctrl->device,
> +			 "qid %d: invalid hash length %d\n",
> +			 chap->qid, data->hl);
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
> +		return NVME_SC_INVALID_FIELD;
> +	}
> +
> +	/* Just print out information for the admin queue */
> +	if (chap->qid == -1)
> +		dev_info(ctrl->device,
> +			 "qid 0: authenticated with hash %s dhgroup %s\n",
> +			 nvme_auth_hmac_name(chap->hash_id),
> +			 nvme_auth_dhgroup_name(chap->dhgroup_id));
> +
> +	if (!data->rvalid)
> +		return 0;
> +
> +	/* Validate controller response */
> +	if (memcmp(chap->response, data->rval, data->hl)) {
> +		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
> +			__func__, chap->qid, chap->hash_len, data->rval);
> +		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
> +			__func__, chap->qid, chap->hash_len, chap->response);
> +		dev_warn(ctrl->device,
> +			 "qid %d: controller authentication failed\n",
> +			 chap->qid);
> +		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
> +		return NVME_SC_AUTH_REQUIRED;
> +	}
> +
> +	/* Just print out information for the admin queue */
> +	if (chap->qid == -1)
> +		dev_info(ctrl->device,
> +			 "qid 0: controller authenticated\n");
> +	return 0;
> +}
> +
> +static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
> +	size_t size = sizeof(*data);
> +
> +	memset(chap->buf, 0, size);
> +	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
> +	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
> +	data->t_id = cpu_to_le16(chap->transaction);
> +
> +	return size;
> +}
> +
> +static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
> +	size_t size = sizeof(*data);
> +
> +	memset(chap->buf, 0, size);
> +	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
> +	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
> +	data->t_id = cpu_to_le16(chap->transaction);
> +	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
> +	data->rescode_exp = chap->status;
> +
> +	return size;
> +}
> +
> +static int nvme_auth_dhchap_host_response(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
> +	u8 buf[4], *challenge = chap->c1;
> +	int ret;
> +
> +	dev_dbg(ctrl->device, "%s: qid %d host response seq %d transaction %d\n",
> +		__func__, chap->qid, chap->s1, chap->transaction);
> +
> +	if (!chap->host_response) {
> +		chap->host_response = nvme_auth_transform_key(ctrl->dhchap_key,
> +					ctrl->dhchap_key_len,
> +					ctrl->dhchap_key_hash,
> +					ctrl->opts->host->nqn);
> +		if (IS_ERR(chap->host_response)) {
> +			ret = PTR_ERR(chap->host_response);
> +			chap->host_response = NULL;
> +			return ret;
> +		}
> +	} else {
> +		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
> +			__func__, chap->qid);
> +	}
> +
> +	ret = crypto_shash_setkey(chap->shash_tfm,
> +			chap->host_response, ctrl->dhchap_key_len);
> +	if (ret) {
> +		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
> +			 chap->qid, ret);
> +		goto out;
> +	}
> +
> +	shash->tfm = chap->shash_tfm;
> +	ret = crypto_shash_init(shash);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, challenge, chap->hash_len);
> +	if (ret)
> +		goto out;
> +	put_unaligned_le32(chap->s1, buf);
> +	ret = crypto_shash_update(shash, buf, 4);
> +	if (ret)
> +		goto out;
> +	put_unaligned_le16(chap->transaction, buf);
> +	ret = crypto_shash_update(shash, buf, 2);
> +	if (ret)
> +		goto out;
> +	memset(buf, 0, sizeof(buf));
> +	ret = crypto_shash_update(shash, buf, 1);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, "HostHost", 8);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
> +				  strlen(ctrl->opts->host->nqn));
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, buf, 1);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
> +			    strlen(ctrl->opts->subsysnqn));
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_final(shash, chap->response);
> +out:
> +	if (challenge != chap->c1)
> +		kfree(challenge);
> +	return ret;
> +}
> +
> +static int nvme_auth_dhchap_ctrl_response(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)
> +{
> +	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
> +	u8 *ctrl_response;
> +	u8 buf[4], *challenge = chap->c2;
> +	int ret;
> +
> +	ctrl_response = nvme_auth_transform_key(ctrl->dhchap_ctrl_key,
> +				ctrl->dhchap_ctrl_key_len,
> +				ctrl->dhchap_ctrl_key_hash,
> +				ctrl->opts->subsysnqn);
> +	if (IS_ERR(ctrl_response)) {
> +		ret = PTR_ERR(ctrl_response);
> +		return ret;
> +	}
> +	ret = crypto_shash_setkey(chap->shash_tfm,
> +			ctrl_response, ctrl->dhchap_ctrl_key_len);
> +	if (ret) {
> +		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
> +			 chap->qid, ret);
> +		goto out;
> +	}
> +
> +	dev_dbg(ctrl->device, "%s: qid %d host response seq %d transaction %d\n",
> +		__func__, chap->qid, chap->s2, chap->transaction);
> +	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
> +		__func__, chap->qid, chap->hash_len, challenge);
> +	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
> +		__func__, chap->qid, ctrl->opts->subsysnqn);
> +	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
> +		__func__, chap->qid, ctrl->opts->host->nqn);
> +	shash->tfm = chap->shash_tfm;
> +	ret = crypto_shash_init(shash);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, challenge, chap->hash_len);
> +	if (ret)
> +		goto out;
> +	put_unaligned_le32(chap->s2, buf);
> +	ret = crypto_shash_update(shash, buf, 4);
> +	if (ret)
> +		goto out;
> +	put_unaligned_le16(chap->transaction, buf);
> +	ret = crypto_shash_update(shash, buf, 2);
> +	if (ret)
> +		goto out;
> +	memset(buf, 0, 4);
> +	ret = crypto_shash_update(shash, buf, 1);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, "Controller", 10);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
> +				  strlen(ctrl->opts->subsysnqn));
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, buf, 1);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
> +				  strlen(ctrl->opts->host->nqn));
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_final(shash, chap->response);
> +out:
> +	if (challenge != chap->c2)
> +		kfree(challenge);
> +	return ret;
> +}
> +
> +int nvme_auth_generate_key(struct nvme_ctrl *ctrl)
> +{
> +	u8 *secret = ctrl->opts->dhchap_secret;
> +	u8 *key;
> +	size_t key_len;
> +	u8 key_hash;
> +
> +	if (!secret)
> +		return 0;
> +
> +	if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
> +		return -EINVAL;
> +
> +	/* Pass in the secret without the 'DHHC-1:XX:' prefix */
> +	key = nvme_auth_extract_secret(secret + 10, key_hash,
> +				       &key_len);
> +	if (IS_ERR(key)) {
> +		dev_dbg(ctrl->device, "failed to extract key, error %ld\n",
> +			PTR_ERR(key));
> +		return PTR_ERR(key);
> +	}
> +
> +	ctrl->dhchap_key = key;
> +	key = NULL;
> +	ctrl->dhchap_key_len = key_len;
> +	ctrl->dhchap_key_hash = key_hash;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
> +
> +int nvme_auth_generate_ctrl_key(struct nvme_ctrl *ctrl)
> +{
> +	u8 *secret = ctrl->opts->dhchap_ctrl_secret;
> +	u8 *key;
> +	size_t key_len;
> +	u8 key_hash;
> +
> +	if (!secret)
> +		return 0;
> +
> +	if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
> +		return -EINVAL;
> +
> +	/* Pass in the secret without the 'DHHC-1:XX:' prefix */
> +	key = nvme_auth_extract_secret(secret + 10, key_hash,
> +				       &key_len);
> +	if (IS_ERR(key))
> +		return PTR_ERR(key);
> +
> +	ctrl->dhchap_ctrl_key = key;
> +	key = NULL;
> +	ctrl->dhchap_ctrl_key_len = key_len;
> +	ctrl->dhchap_ctrl_key_hash = key_hash;
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(nvme_auth_generate_ctrl_key);

This and the other look identical just operate on
a different key, perhaps merge them into one?

Overall this looks sane to me.
Just nitpicking on the logging to use err when you
hit a errors.
Sagi Grimberg Nov. 16, 2021, 10:35 a.m. UTC | #2
> +static int nvme_auth_dhchap_host_response(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)

Maybe better to call it nvme_auth_dhchap_setup_host_response()?

> +{
> +	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
> +	u8 buf[4], *challenge = chap->c1;
> +	int ret;
> +
> +	dev_dbg(ctrl->device, "%s: qid %d host response seq %d transaction %d\n",
> +		__func__, chap->qid, chap->s1, chap->transaction);
> +
> +	if (!chap->host_response) {
> +		chap->host_response = nvme_auth_transform_key(ctrl->dhchap_key,
> +					ctrl->dhchap_key_len,
> +					ctrl->dhchap_key_hash,
> +					ctrl->opts->host->nqn);
> +		if (IS_ERR(chap->host_response)) {
> +			ret = PTR_ERR(chap->host_response);
> +			chap->host_response = NULL;
> +			return ret;
> +		}
> +	} else {
> +		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
> +			__func__, chap->qid);
> +	}
> +
> +	ret = crypto_shash_setkey(chap->shash_tfm,
> +			chap->host_response, ctrl->dhchap_key_len);
> +	if (ret) {
> +		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
> +			 chap->qid, ret);
> +		goto out;
> +	}
> +
> +	shash->tfm = chap->shash_tfm;
> +	ret = crypto_shash_init(shash);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, challenge, chap->hash_len);
> +	if (ret)
> +		goto out;
> +	put_unaligned_le32(chap->s1, buf);
> +	ret = crypto_shash_update(shash, buf, 4);
> +	if (ret)
> +		goto out;
> +	put_unaligned_le16(chap->transaction, buf);
> +	ret = crypto_shash_update(shash, buf, 2);
> +	if (ret)
> +		goto out;
> +	memset(buf, 0, sizeof(buf));
> +	ret = crypto_shash_update(shash, buf, 1);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, "HostHost", 8);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
> +				  strlen(ctrl->opts->host->nqn));
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, buf, 1);
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
> +			    strlen(ctrl->opts->subsysnqn));
> +	if (ret)
> +		goto out;
> +	ret = crypto_shash_final(shash, chap->response);
> +out:
> +	if (challenge != chap->c1)
> +		kfree(challenge);
> +	return ret;
> +}
> +
> +static int nvme_auth_dhchap_ctrl_response(struct nvme_ctrl *ctrl,
> +		struct nvme_dhchap_queue_context *chap)

Maybe better to call it nvme_auth_dhchap_validate_ctrl_response()?
Hannes Reinecke Nov. 16, 2021, 10:40 a.m. UTC | #3
On 11/16/21 11:25 AM, Sagi Grimberg wrote:
> 
> 
> On 11/12/21 2:59 PM, Hannes Reinecke wrote:
>> Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
>> This patch adds two new fabric options 'dhchap_secret' to specify the
>> pre-shared key (in ASCII respresentation according to NVMe 2.0 section
>> 8.13.5.8 'Secret representation') and 'dhchap_ctrl_secret' to specify
>> the pre-shared controller key for bi-directional authentication of both
>> the host and the controller.
>> Re-authentication can be triggered by writing the PSK into the new
>> controller sysfs attribute 'dhchap_secret' or 'dhchap_ctrl_secret'.
>>
>> Signed-off-by: Hannes Reinecke <hare@suse.de>
>> ---
>>   drivers/nvme/host/Kconfig   |   11 +
>>   drivers/nvme/host/Makefile  |    1 +
>>   drivers/nvme/host/auth.c    | 1164 +++++++++++++++++++++++++++++++++++
>>   drivers/nvme/host/auth.h    |   25 +
>>   drivers/nvme/host/core.c    |  133 +++-
>>   drivers/nvme/host/fabrics.c |   79 ++-
>>   drivers/nvme/host/fabrics.h |    7 +
>>   drivers/nvme/host/nvme.h    |   36 ++
>>   drivers/nvme/host/tcp.c     |    1 +
>>   drivers/nvme/host/trace.c   |   32 +
>>   10 files changed, 1482 insertions(+), 7 deletions(-)
>>   create mode 100644 drivers/nvme/host/auth.c
>>   create mode 100644 drivers/nvme/host/auth.h
>>
>> diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
>> index dc0450ca23a3..49269c581ec4 100644
>> --- a/drivers/nvme/host/Kconfig
>> +++ b/drivers/nvme/host/Kconfig
>> @@ -83,3 +83,14 @@ config NVME_TCP
>>         from https://github.com/linux-nvme/nvme-cli.
>>           If unsure, say N.
>> +
>> +config NVME_AUTH
>> +    bool "NVM Express over Fabrics In-Band Authentication"
>> +    depends on NVME_CORE
>> +    select CRYPTO_HMAC
>> +    select CRYPTO_SHA256
>> +    select CRYPTO_SHA512
>> +    help
>> +      This provides support for NVMe over Fabrics In-Band
>> Authentication.
>> +
>> +      If unsure, say N.
>> diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
>> index dfaacd472e5d..4bae2a4a8d8c 100644
>> --- a/drivers/nvme/host/Makefile
>> +++ b/drivers/nvme/host/Makefile
>> @@ -15,6 +15,7 @@ nvme-core-$(CONFIG_NVME_MULTIPATH)    += multipath.o
>>   nvme-core-$(CONFIG_BLK_DEV_ZONED)    += zns.o
>>   nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS)    += fault_inject.o
>>   nvme-core-$(CONFIG_NVME_HWMON)        += hwmon.o
>> +nvme-core-$(CONFIG_NVME_AUTH)        += auth.o
>>     nvme-y                    += pci.o
>>   diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
>> new file mode 100644
>> index 000000000000..6ab95a178213
>> --- /dev/null
>> +++ b/drivers/nvme/host/auth.c
>> @@ -0,0 +1,1164 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
>> + */
>> +
>> +#include <linux/crc32.h>
>> +#include <linux/base64.h>
>> +#include <asm/unaligned.h>
>> +#include <crypto/hash.h>
>> +#include <crypto/dh.h>
>> +#include <crypto/ffdhe.h>
>> +#include "nvme.h"
>> +#include "fabrics.h"
>> +#include "auth.h"
>> +
>> +static atomic_t nvme_dhchap_seqnum = ATOMIC_INIT(0);
>> +
>> +struct nvme_dhchap_queue_context {
>> +    struct list_head entry;
>> +    struct work_struct auth_work;
>> +    struct nvme_ctrl *ctrl;
>> +    struct crypto_shash *shash_tfm;
>> +    void *buf;
>> +    size_t buf_size;
>> +    int qid;
>> +    int error;
>> +    u32 s1;
>> +    u32 s2;
>> +    u16 transaction;
>> +    u8 status;
>> +    u8 hash_id;
>> +    u8 hash_len;
>> +    u8 dhgroup_id;
>> +    u8 c1[64];
>> +    u8 c2[64];
>> +    u8 response[64];
>> +    u8 *host_response;
>> +};
>> +
>> +static struct nvme_auth_dhgroup_map {
>> +    int id;
>> +    const char name[16];
>> +    const char kpp[16];
>> +    int privkey_size;
>> +    int pubkey_size;
>> +} dhgroup_map[] = {
>> +    { .id = NVME_AUTH_DHCHAP_DHGROUP_NULL,
>> +      .name = "null", .kpp = "null",
>> +      .privkey_size = 0, .pubkey_size = 0 },
>> +    { .id = NVME_AUTH_DHCHAP_DHGROUP_2048,
>> +      .name = "ffdhe2048", .kpp = "dh",
>> +      .privkey_size = 256, .pubkey_size = 256 },
>> +    { .id = NVME_AUTH_DHCHAP_DHGROUP_3072,
>> +      .name = "ffdhe3072", .kpp = "dh",
>> +      .privkey_size = 384, .pubkey_size = 384 },
>> +    { .id = NVME_AUTH_DHCHAP_DHGROUP_4096,
>> +      .name = "ffdhe4096", .kpp = "dh",
>> +      .privkey_size = 512, .pubkey_size = 512 },
>> +    { .id = NVME_AUTH_DHCHAP_DHGROUP_6144,
>> +      .name = "ffdhe6144", .kpp = "dh",
>> +      .privkey_size = 768, .pubkey_size = 768 },
>> +    { .id = NVME_AUTH_DHCHAP_DHGROUP_8192,
>> +      .name = "ffdhe8192", .kpp = "dh",
>> +      .privkey_size = 1024, .pubkey_size = 1024 },
>> +};
>> +
>> +const char *nvme_auth_dhgroup_name(int dhgroup_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
>> +        if (dhgroup_map[i].id == dhgroup_id)
>> +            return dhgroup_map[i].name;
>> +    }
>> +    return NULL;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
>> +
>> +int nvme_auth_dhgroup_pubkey_size(int dhgroup_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
>> +        if (dhgroup_map[i].id == dhgroup_id)
>> +            return dhgroup_map[i].pubkey_size;
>> +    }
>> +    return -1;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_pubkey_size);
>> +
>> +int nvme_auth_dhgroup_privkey_size(int dhgroup_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
>> +        if (dhgroup_map[i].id == dhgroup_id)
>> +            return dhgroup_map[i].privkey_size;
>> +    }
>> +    return -1;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_privkey_size);
>> +
>> +const char *nvme_auth_dhgroup_kpp(int dhgroup_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
>> +        if (dhgroup_map[i].id == dhgroup_id)
>> +            return dhgroup_map[i].kpp;
>> +    }
>> +    return NULL;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
>> +
>> +int nvme_auth_dhgroup_id(const char *dhgroup_name)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
>> +        if (!strncmp(dhgroup_map[i].name, dhgroup_name,
>> +                 strlen(dhgroup_map[i].name)))
>> +            return dhgroup_map[i].id;
>> +    }
>> +    return -1;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
>> +
>> +static struct nvme_dhchap_hash_map {
>> +    int id;
>> +    int len;
>> +    const char hmac[15];
>> +    const char digest[15];
>> +} hash_map[] = {
>> +    {.id = NVME_AUTH_DHCHAP_SHA256, .len = 32,
>> +     .hmac = "hmac(sha256)", .digest = "sha256" },
>> +    {.id = NVME_AUTH_DHCHAP_SHA384, .len = 48,
>> +     .hmac = "hmac(sha384)", .digest = "sha384" },
>> +    {.id = NVME_AUTH_DHCHAP_SHA512, .len = 64,
>> +     .hmac = "hmac(sha512)", .digest = "sha512" },
>> +};
>> +
>> +const char *nvme_auth_hmac_name(int hmac_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
>> +        if (hash_map[i].id == hmac_id)
>> +            return hash_map[i].hmac;
>> +    }
>> +    return NULL;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
>> +
>> +const char *nvme_auth_digest_name(int hmac_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
>> +        if (hash_map[i].id == hmac_id)
>> +            return hash_map[i].digest;
>> +    }
>> +    return NULL;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
>> +
>> +int nvme_auth_hmac_id(const char *hmac_name)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
>> +        if (!strncmp(hash_map[i].hmac, hmac_name,
>> +                 strlen(hash_map[i].hmac)))
>> +            return hash_map[i].id;
>> +    }
>> +    return -1;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
>> +
>> +int nvme_auth_hmac_hash_len(int hmac_id)
>> +{
>> +    int i;
>> +
>> +    for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
>> +        if (hash_map[i].id == hmac_id)
>> +            return hash_map[i].len;
>> +    }
>> +    return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
>> +
>> +unsigned char *nvme_auth_extract_secret(unsigned char *secret, u8
>> key_hash,
>> +                    size_t *out_len)
>> +{
>> +    unsigned char *key, *p;
>> +    u32 crc;
>> +    int key_len;
>> +    size_t allocated_len = strlen(secret);
>> +
>> +    /* Secret might be affixed with a ':' */
>> +    p = strrchr(secret, ':');
>> +    if (p)
>> +        allocated_len = p - secret;
>> +    key = kzalloc(allocated_len, GFP_KERNEL);
>> +    if (!key)
>> +        return ERR_PTR(-ENOMEM);
>> +
>> +    key_len = base64_decode(secret, allocated_len, key);
>> +    if (key_len < 0) {
>> +        pr_debug("base64 key decoding error %d\n",
>> +             key_len);
>> +        return ERR_PTR(key_len);
>> +    }
>> +    if (key_len != 36 && key_len != 52 &&
>> +        key_len != 68) {
>> +        pr_debug("Invalid key len %d\n",
>> +             key_len);
> 
> pr_err?
> 
Yeah; I've been running with debug enabled, so I would've seen it anyway.

>> +        kfree_sensitive(key);
>> +        return ERR_PTR(-EINVAL);
>> +    }
>> +    if (key_hash > 0 &&
>> +        (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
>> +        pr_debug("Invalid key len %d for %s\n", key_len,
>> +             nvme_auth_hmac_name(key_hash));
> 
> pr_err?
> 
Same; will be fixing it.

>> +        kfree_sensitive(key);
>> +        return ERR_PTR(-EINVAL);
>> +    }
>> +
>> +    /* The last four bytes is the CRC in little-endian format */
>> +    key_len -= 4;
>> +    /*
>> +     * The linux implementation doesn't do pre- and post-increments,
>> +     * so we have to do it manually.
>> +     */
>> +    crc = ~crc32(~0, key, key_len);
>> +
>> +    if (get_unaligned_le32(key + key_len) != crc) {
>> +        pr_debug("DH-HMAC-CHAP key crc mismatch (key %08x, crc %08x)\n",
>> +               get_unaligned_le32(key + key_len), crc);
> 
> pr_err?
> 
>> +        kfree_sensitive(key);
>> +        return ERR_PTR(-EKEYREJECTED);
>> +    }
>> +    *out_len = key_len;
>> +    return key;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_extract_secret);
>> +
>> +u8 *nvme_auth_transform_key(u8 *key, size_t key_len, u8 key_hash,
>> char *nqn)
>> +{
>> +    const char *hmac_name = nvme_auth_hmac_name(key_hash);
>> +    struct crypto_shash *key_tfm;
>> +    struct shash_desc *shash;
>> +    u8 *transformed_key;
>> +    int ret;
>> +
>> +    if (key_hash == 0) {
>> +        transformed_key = kmemdup(key, key_len, GFP_KERNEL);
>> +        return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
>> +    }
>> +
>> +    if (!key || !key_len) {
>> +        pr_warn("No key specified\n");
> 
> pr_err?
> 
>> +        return ERR_PTR(-ENOKEY);
>> +    }
>> +    if (!hmac_name) {
>> +        pr_warn("Invalid key hash id %d\n", key_hash);
> 
> pr_err?
> 
>> +        return ERR_PTR(-EINVAL);
>> +    }
>> +
>> +    key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
>> +    if (IS_ERR(key_tfm))
>> +        return (u8 *)key_tfm;
>> +
>> +    shash = kmalloc(sizeof(struct shash_desc) +
>> +            crypto_shash_descsize(key_tfm),
>> +            GFP_KERNEL);
>> +    if (!shash) {
>> +        ret = -ENOMEM;
>> +        goto out_free_key;
>> +    }
>> +
>> +    transformed_key = kzalloc(crypto_shash_digestsize(key_tfm),
>> GFP_KERNEL);
>> +    if (!transformed_key) {
>> +        ret = -ENOMEM;
>> +        goto out_free_shash;
>> +    }
>> +
>> +    shash->tfm = key_tfm;
>> +    ret = crypto_shash_setkey(key_tfm, key, key_len);
>> +    if (ret < 0)
>> +        goto out_free_shash;
>> +    ret = crypto_shash_init(shash);
>> +    if (ret < 0)
>> +        goto out_free_shash;
>> +    ret = crypto_shash_update(shash, nqn, strlen(nqn));
>> +    if (ret < 0)
>> +        goto out_free_shash;
>> +    ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
>> +    if (ret < 0)
>> +        goto out_free_shash;
>> +    ret = crypto_shash_final(shash, transformed_key);
>> +out_free_shash:
>> +    kfree(shash);
>> +out_free_key:
>> +    crypto_free_shash(key_tfm);
>> +    if (ret < 0) {
>> +        kfree_sensitive(transformed_key);
>> +        return ERR_PTR(ret);
>> +    }
>> +    return transformed_key;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
>> +
>> +static int nvme_auth_send(struct nvme_ctrl *ctrl, int qid,
>> +        void *data, size_t tl)
>> +{
>> +    struct nvme_command cmd = {};
>> +    blk_mq_req_flags_t flags = qid == NVME_QID_ANY ?
>> +        0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED;
>> +    struct request_queue *q = qid == NVME_QID_ANY ?
>> +        ctrl->fabrics_q : ctrl->connect_q;
>> +    int ret;
>> +
>> +    cmd.auth_send.opcode = nvme_fabrics_command;
>> +    cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
>> +    cmd.auth_send.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
>> +    cmd.auth_send.spsp0 = 0x01;
>> +    cmd.auth_send.spsp1 = 0x01;
>> +    cmd.auth_send.tl = cpu_to_le32(tl);
>> +
>> +    ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, tl, 0, qid,
>> +                     0, flags);
>> +    if (ret > 0)
>> +        dev_dbg(ctrl->device,
>> +            "%s: qid %d nvme status %d\n", __func__, qid, ret);
> 
> dev_err? Also can we phrase "failed auth_send" instead of the __func__?
> 
Yeah; the logging messages are inconsistent as it is.
I've tried to stick the __func__ argument to any debug messages, and
some human readable string for 'normal' errors.
Possibly not that consistent, though.

>> +    else if (ret < 0)
>> +        dev_dbg(ctrl->device,
>> +            "%s: qid %d error %d\n", __func__, qid, ret);
> 
> dev_err?
> 
>> +    return ret;
>> +}
>> +
>> +static int nvme_auth_receive(struct nvme_ctrl *ctrl, int qid,
>> +        void *buf, size_t al)
>> +{
>> +    struct nvme_command cmd = {};
>> +    blk_mq_req_flags_t flags = qid == NVME_QID_ANY ?
>> +        0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED;
>> +    struct request_queue *q = qid == NVME_QID_ANY ?
>> +        ctrl->fabrics_q : ctrl->connect_q;
>> +    int ret;
>> +
>> +    cmd.auth_receive.opcode = nvme_fabrics_command;
>> +    cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
>> +    cmd.auth_receive.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
>> +    cmd.auth_receive.spsp0 = 0x01;
>> +    cmd.auth_receive.spsp1 = 0x01;
>> +    cmd.auth_receive.al = cpu_to_le32(al);
>> +
>> +    ret = __nvme_submit_sync_cmd(q, &cmd, NULL, buf, al, 0, qid,
>> +                     0, flags);
>> +    if (ret > 0) {
>> +        dev_dbg(ctrl->device, "%s: qid %d nvme status %x\n",
>> +            __func__, qid, ret);
> 
> dev_err? "failed auth_recv" instead of the __func__
> 
>> +        ret = -EIO;
>> +    }
>> +    if (ret < 0) {
>> +        dev_dbg(ctrl->device, "%s: qid %d error %d\n",
>> +            __func__, qid, ret);
> 
> dev_err
> 
>> +        return ret;
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>> +static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
>> +        struct nvmf_auth_dhchap_failure_data *data,
>> +        u16 transaction, u8 expected_msg)
>> +{
>> +    dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
>> +        __func__, qid, data->auth_type, data->auth_id);
>> +
>> +    if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
>> +        data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
>> +        return data->rescode_exp;
>> +    }
>> +    if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
>> +        data->auth_id != expected_msg) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d invalid message %02x/%02x\n",
>> +             qid, data->auth_type, data->auth_id);
>> +        return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
>> +    }
>> +    if (le16_to_cpu(data->t_id) != transaction) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d invalid transaction ID %d\n",
>> +             qid, le16_to_cpu(data->t_id));
> 
> why not dev_err?
> 
Because it's a protocol error, and we can invoke the protocol error
handling here.
Any dev_err() messages are inhibiting us to run the protocol at all.
Or, at least, that's how I tried to handle things.

>> +        return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
>> +    }
>> +    return 0;
>> +}
>> +
>> +static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
>> +    size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
>> +
>> +    if (chap->buf_size < size) {
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
>> +        return -EINVAL;
>> +    }
>> +    memset((u8 *)chap->buf, 0, size);
>> +    data->auth_type = NVME_AUTH_COMMON_MESSAGES;
>> +    data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
>> +    data->t_id = cpu_to_le16(chap->transaction);
>> +    data->sc_c = 0; /* No secure channel concatenation */
>> +    data->napd = 1;
>> +    data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
>> +    data->auth_protocol[0].dhchap.halen = 3;
>> +    data->auth_protocol[0].dhchap.dhlen = 6;
>> +    data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_DHCHAP_SHA256;
>> +    data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_DHCHAP_SHA384;
>> +    data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_DHCHAP_SHA512;
>> +    data->auth_protocol[0].dhchap.idlist[3] =
>> NVME_AUTH_DHCHAP_DHGROUP_NULL;
>> +    data->auth_protocol[0].dhchap.idlist[4] =
>> NVME_AUTH_DHCHAP_DHGROUP_2048;
>> +    data->auth_protocol[0].dhchap.idlist[5] =
>> NVME_AUTH_DHCHAP_DHGROUP_3072;
>> +    data->auth_protocol[0].dhchap.idlist[6] =
>> NVME_AUTH_DHCHAP_DHGROUP_4096;
>> +    data->auth_protocol[0].dhchap.idlist[7] =
>> NVME_AUTH_DHCHAP_DHGROUP_6144;
>> +    data->auth_protocol[0].dhchap.idlist[8] =
>> NVME_AUTH_DHCHAP_DHGROUP_8192;
>> +
>> +    return size;
>> +}
>> +
>> +static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
>> +    u16 dhvlen = le16_to_cpu(data->dhvlen);
>> +    size_t size = sizeof(*data) + data->hl + dhvlen;
>> +    const char *hmac_name, *kpp_name;
>> +
>> +    if (chap->buf_size < size) {
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
>> +        return NVME_SC_INVALID_FIELD;
>> +    }
>> +
>> +    hmac_name = nvme_auth_hmac_name(data->hashid);
>> +    if (!hmac_name) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: invalid HASH ID %d\n",
>> +             chap->qid, data->hashid);
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
>> +        return NVME_SC_INVALID_FIELD;
>> +    }
>> +
>> +    if (chap->hash_id == data->hashid && chap->shash_tfm &&
>> +        !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
>> +        crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
>> +        dev_dbg(ctrl->device,
>> +            "qid %d: reuse existing hash %s\n",
>> +            chap->qid, hmac_name);
>> +        goto select_kpp;
>> +    }
>> +
>> +    /* Reset if hash cannot be reused */
>> +    if (chap->shash_tfm) {
>> +        crypto_free_shash(chap->shash_tfm);
>> +        chap->hash_id = 0;
>> +        chap->hash_len = 0;
>> +    }
>> +    chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
>> +                         CRYPTO_ALG_ALLOCATES_MEMORY);
>> +    if (IS_ERR(chap->shash_tfm)) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: failed to allocate hash %s, error %ld\n",
>> +             chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
>> +        chap->shash_tfm = NULL;
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
>> +        return NVME_SC_AUTH_REQUIRED;
>> +    }
>> +
>> +    if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: invalid hash length %d\n",
>> +             chap->qid, data->hl);
>> +        crypto_free_shash(chap->shash_tfm);
>> +        chap->shash_tfm = NULL;
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
>> +        return NVME_SC_AUTH_REQUIRED;
>> +    }
>> +
>> +    /* Reset host response if the hash had been changed */
>> +    if (chap->hash_id != data->hashid) {
>> +        kfree(chap->host_response);
>> +        chap->host_response = NULL;
>> +    }
>> +
>> +    chap->hash_id = data->hashid;
>> +    chap->hash_len = data->hl;
>> +    dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
>> +        chap->qid, hmac_name);
>> +
>> +select_kpp:
>> +    kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
>> +    if (!kpp_name) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: invalid DH group id %d\n",
>> +             chap->qid, data->dhgid);
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
>> +        return NVME_SC_AUTH_REQUIRED;
>> +    }
>> +
>> +    if (data->dhgid != NVME_AUTH_DHCHAP_DHGROUP_NULL) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: unsupported DH group %s\n",
>> +             chap->qid, kpp_name);
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
>> +        return NVME_SC_AUTH_REQUIRED;
>> +    } else if (dhvlen != 0) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: invalid DH value for NULL DH\n",
>> +             chap->qid);
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
>> +        return NVME_SC_INVALID_FIELD;
>> +    }
>> +    chap->dhgroup_id = data->dhgid;
>> +
>> +    chap->s1 = le32_to_cpu(data->seqnum);
>> +    memcpy(chap->c1, data->cval, chap->hash_len);
>> +
>> +    return 0;
>> +}
>> +
>> +static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    struct nvmf_auth_dhchap_reply_data *data = chap->buf;
>> +    size_t size = sizeof(*data);
>> +
>> +    size += 2 * chap->hash_len;
>> +
>> +    if (chap->buf_size < size) {
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
>> +        return -EINVAL;
>> +    }
>> +
>> +    memset(chap->buf, 0, size);
>> +    data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
>> +    data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
>> +    data->t_id = cpu_to_le16(chap->transaction);
>> +    data->hl = chap->hash_len;
>> +    data->dhvlen = 0;
>> +    memcpy(data->rval, chap->response, chap->hash_len);
>> +    if (ctrl->opts->dhchap_ctrl_secret) {
>> +        get_random_bytes(chap->c2, chap->hash_len);
>> +        data->cvalid = 1;
>> +        chap->s2 = atomic_inc_return(&nvme_dhchap_seqnum);
>> +        memcpy(data->rval + chap->hash_len, chap->c2,
>> +               chap->hash_len);
>> +        dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
>> +            __func__, chap->qid,
>> +            chap->hash_len, chap->c2);
>> +    } else {
>> +        memset(chap->c2, 0, chap->hash_len);
>> +        chap->s2 = 0;
>> +    }
>> +    data->seqnum = cpu_to_le32(chap->s2);
>> +    return size;
>> +}
>> +
>> +static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    struct nvmf_auth_dhchap_success1_data *data = chap->buf;
>> +    size_t size = sizeof(*data);
>> +
>> +    if (ctrl->opts->dhchap_ctrl_secret)
>> +        size += chap->hash_len;
>> +
>> +    if (chap->buf_size < size) {
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
>> +        return NVME_SC_INVALID_FIELD;
>> +    }
>> +
>> +    if (data->hl != chap->hash_len) {
>> +        dev_warn(ctrl->device,
>> +             "qid %d: invalid hash length %d\n",
>> +             chap->qid, data->hl);
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
>> +        return NVME_SC_INVALID_FIELD;
>> +    }
>> +
>> +    /* Just print out information for the admin queue */
>> +    if (chap->qid == -1)
>> +        dev_info(ctrl->device,
>> +             "qid 0: authenticated with hash %s dhgroup %s\n",
>> +             nvme_auth_hmac_name(chap->hash_id),
>> +             nvme_auth_dhgroup_name(chap->dhgroup_id));
>> +
>> +    if (!data->rvalid)
>> +        return 0;
>> +
>> +    /* Validate controller response */
>> +    if (memcmp(chap->response, data->rval, data->hl)) {
>> +        dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
>> +            __func__, chap->qid, chap->hash_len, data->rval);
>> +        dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
>> +            __func__, chap->qid, chap->hash_len, chap->response);
>> +        dev_warn(ctrl->device,
>> +             "qid %d: controller authentication failed\n",
>> +             chap->qid);
>> +        chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
>> +        return NVME_SC_AUTH_REQUIRED;
>> +    }
>> +
>> +    /* Just print out information for the admin queue */
>> +    if (chap->qid == -1)
>> +        dev_info(ctrl->device,
>> +             "qid 0: controller authenticated\n");
>> +    return 0;
>> +}
>> +
>> +static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    struct nvmf_auth_dhchap_success2_data *data = chap->buf;
>> +    size_t size = sizeof(*data);
>> +
>> +    memset(chap->buf, 0, size);
>> +    data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
>> +    data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
>> +    data->t_id = cpu_to_le16(chap->transaction);
>> +
>> +    return size;
>> +}
>> +
>> +static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    struct nvmf_auth_dhchap_failure_data *data = chap->buf;
>> +    size_t size = sizeof(*data);
>> +
>> +    memset(chap->buf, 0, size);
>> +    data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
>> +    data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
>> +    data->t_id = cpu_to_le16(chap->transaction);
>> +    data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
>> +    data->rescode_exp = chap->status;
>> +
>> +    return size;
>> +}
>> +
>> +static int nvme_auth_dhchap_host_response(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
>> +    u8 buf[4], *challenge = chap->c1;
>> +    int ret;
>> +
>> +    dev_dbg(ctrl->device, "%s: qid %d host response seq %d
>> transaction %d\n",
>> +        __func__, chap->qid, chap->s1, chap->transaction);
>> +
>> +    if (!chap->host_response) {
>> +        chap->host_response = nvme_auth_transform_key(ctrl->dhchap_key,
>> +                    ctrl->dhchap_key_len,
>> +                    ctrl->dhchap_key_hash,
>> +                    ctrl->opts->host->nqn);
>> +        if (IS_ERR(chap->host_response)) {
>> +            ret = PTR_ERR(chap->host_response);
>> +            chap->host_response = NULL;
>> +            return ret;
>> +        }
>> +    } else {
>> +        dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
>> +            __func__, chap->qid);
>> +    }
>> +
>> +    ret = crypto_shash_setkey(chap->shash_tfm,
>> +            chap->host_response, ctrl->dhchap_key_len);
>> +    if (ret) {
>> +        dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
>> +             chap->qid, ret);
>> +        goto out;
>> +    }
>> +
>> +    shash->tfm = chap->shash_tfm;
>> +    ret = crypto_shash_init(shash);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, challenge, chap->hash_len);
>> +    if (ret)
>> +        goto out;
>> +    put_unaligned_le32(chap->s1, buf);
>> +    ret = crypto_shash_update(shash, buf, 4);
>> +    if (ret)
>> +        goto out;
>> +    put_unaligned_le16(chap->transaction, buf);
>> +    ret = crypto_shash_update(shash, buf, 2);
>> +    if (ret)
>> +        goto out;
>> +    memset(buf, 0, sizeof(buf));
>> +    ret = crypto_shash_update(shash, buf, 1);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, "HostHost", 8);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
>> +                  strlen(ctrl->opts->host->nqn));
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, buf, 1);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
>> +                strlen(ctrl->opts->subsysnqn));
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_final(shash, chap->response);
>> +out:
>> +    if (challenge != chap->c1)
>> +        kfree(challenge);
>> +    return ret;
>> +}
>> +
>> +static int nvme_auth_dhchap_ctrl_response(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
>> +{
>> +    SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
>> +    u8 *ctrl_response;
>> +    u8 buf[4], *challenge = chap->c2;
>> +    int ret;
>> +
>> +    ctrl_response = nvme_auth_transform_key(ctrl->dhchap_ctrl_key,
>> +                ctrl->dhchap_ctrl_key_len,
>> +                ctrl->dhchap_ctrl_key_hash,
>> +                ctrl->opts->subsysnqn);
>> +    if (IS_ERR(ctrl_response)) {
>> +        ret = PTR_ERR(ctrl_response);
>> +        return ret;
>> +    }
>> +    ret = crypto_shash_setkey(chap->shash_tfm,
>> +            ctrl_response, ctrl->dhchap_ctrl_key_len);
>> +    if (ret) {
>> +        dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
>> +             chap->qid, ret);
>> +        goto out;
>> +    }
>> +
>> +    dev_dbg(ctrl->device, "%s: qid %d host response seq %d
>> transaction %d\n",
>> +        __func__, chap->qid, chap->s2, chap->transaction);
>> +    dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
>> +        __func__, chap->qid, chap->hash_len, challenge);
>> +    dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
>> +        __func__, chap->qid, ctrl->opts->subsysnqn);
>> +    dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
>> +        __func__, chap->qid, ctrl->opts->host->nqn);
>> +    shash->tfm = chap->shash_tfm;
>> +    ret = crypto_shash_init(shash);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, challenge, chap->hash_len);
>> +    if (ret)
>> +        goto out;
>> +    put_unaligned_le32(chap->s2, buf);
>> +    ret = crypto_shash_update(shash, buf, 4);
>> +    if (ret)
>> +        goto out;
>> +    put_unaligned_le16(chap->transaction, buf);
>> +    ret = crypto_shash_update(shash, buf, 2);
>> +    if (ret)
>> +        goto out;
>> +    memset(buf, 0, 4);
>> +    ret = crypto_shash_update(shash, buf, 1);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, "Controller", 10);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
>> +                  strlen(ctrl->opts->subsysnqn));
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, buf, 1);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
>> +                  strlen(ctrl->opts->host->nqn));
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_final(shash, chap->response);
>> +out:
>> +    if (challenge != chap->c2)
>> +        kfree(challenge);
>> +    return ret;
>> +}
>> +
>> +int nvme_auth_generate_key(struct nvme_ctrl *ctrl)
>> +{
>> +    u8 *secret = ctrl->opts->dhchap_secret;
>> +    u8 *key;
>> +    size_t key_len;
>> +    u8 key_hash;
>> +
>> +    if (!secret)
>> +        return 0;
>> +
>> +    if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
>> +        return -EINVAL;
>> +
>> +    /* Pass in the secret without the 'DHHC-1:XX:' prefix */
>> +    key = nvme_auth_extract_secret(secret + 10, key_hash,
>> +                       &key_len);
>> +    if (IS_ERR(key)) {
>> +        dev_dbg(ctrl->device, "failed to extract key, error %ld\n",
>> +            PTR_ERR(key));
>> +        return PTR_ERR(key);
>> +    }
>> +
>> +    ctrl->dhchap_key = key;
>> +    key = NULL;
>> +    ctrl->dhchap_key_len = key_len;
>> +    ctrl->dhchap_key_hash = key_hash;
>> +
>> +    return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
>> +
>> +int nvme_auth_generate_ctrl_key(struct nvme_ctrl *ctrl)
>> +{
>> +    u8 *secret = ctrl->opts->dhchap_ctrl_secret;
>> +    u8 *key;
>> +    size_t key_len;
>> +    u8 key_hash;
>> +
>> +    if (!secret)
>> +        return 0;
>> +
>> +    if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
>> +        return -EINVAL;
>> +
>> +    /* Pass in the secret without the 'DHHC-1:XX:' prefix */
>> +    key = nvme_auth_extract_secret(secret + 10, key_hash,
>> +                       &key_len);
>> +    if (IS_ERR(key))
>> +        return PTR_ERR(key);
>> +
>> +    ctrl->dhchap_ctrl_key = key;
>> +    key = NULL;
>> +    ctrl->dhchap_ctrl_key_len = key_len;
>> +    ctrl->dhchap_ctrl_key_hash = key_hash;
>> +    return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(nvme_auth_generate_ctrl_key);
> 
> This and the other look identical just operate on
> a different key, perhaps merge them into one?
> 
Yeah; was too lazy here.
I could easily add a flag to differentiate between host and controller key.

> Overall this looks sane to me.
> Just nitpicking on the logging to use err when you
> hit a errors.
Will be fixing stuff up and send out a new version.

Cheers,

Hannes
Hannes Reinecke Nov. 16, 2021, 10:41 a.m. UTC | #4
On 11/16/21 11:35 AM, Sagi Grimberg wrote:
> 
>> +static int nvme_auth_dhchap_host_response(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
> 
> Maybe better to call it nvme_auth_dhchap_setup_host_response()?
> 
Ok.

>> +{
>> +    SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
>> +    u8 buf[4], *challenge = chap->c1;
>> +    int ret;
>> +
>> +    dev_dbg(ctrl->device, "%s: qid %d host response seq %d
>> transaction %d\n",
>> +        __func__, chap->qid, chap->s1, chap->transaction);
>> +
>> +    if (!chap->host_response) {
>> +        chap->host_response = nvme_auth_transform_key(ctrl->dhchap_key,
>> +                    ctrl->dhchap_key_len,
>> +                    ctrl->dhchap_key_hash,
>> +                    ctrl->opts->host->nqn);
>> +        if (IS_ERR(chap->host_response)) {
>> +            ret = PTR_ERR(chap->host_response);
>> +            chap->host_response = NULL;
>> +            return ret;
>> +        }
>> +    } else {
>> +        dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
>> +            __func__, chap->qid);
>> +    }
>> +
>> +    ret = crypto_shash_setkey(chap->shash_tfm,
>> +            chap->host_response, ctrl->dhchap_key_len);
>> +    if (ret) {
>> +        dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
>> +             chap->qid, ret);
>> +        goto out;
>> +    }
>> +
>> +    shash->tfm = chap->shash_tfm;
>> +    ret = crypto_shash_init(shash);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, challenge, chap->hash_len);
>> +    if (ret)
>> +        goto out;
>> +    put_unaligned_le32(chap->s1, buf);
>> +    ret = crypto_shash_update(shash, buf, 4);
>> +    if (ret)
>> +        goto out;
>> +    put_unaligned_le16(chap->transaction, buf);
>> +    ret = crypto_shash_update(shash, buf, 2);
>> +    if (ret)
>> +        goto out;
>> +    memset(buf, 0, sizeof(buf));
>> +    ret = crypto_shash_update(shash, buf, 1);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, "HostHost", 8);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
>> +                  strlen(ctrl->opts->host->nqn));
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, buf, 1);
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
>> +                strlen(ctrl->opts->subsysnqn));
>> +    if (ret)
>> +        goto out;
>> +    ret = crypto_shash_final(shash, chap->response);
>> +out:
>> +    if (challenge != chap->c1)
>> +        kfree(challenge);
>> +    return ret;
>> +}
>> +
>> +static int nvme_auth_dhchap_ctrl_response(struct nvme_ctrl *ctrl,
>> +        struct nvme_dhchap_queue_context *chap)
> 
> Maybe better to call it nvme_auth_dhchap_validate_ctrl_response()?

Will be doing so for the next round.

Thanks for the review.

Cheers,

Hannes
diff mbox series

Patch

diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index dc0450ca23a3..49269c581ec4 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -83,3 +83,14 @@  config NVME_TCP
 	  from https://github.com/linux-nvme/nvme-cli.
 
 	  If unsure, say N.
+
+config NVME_AUTH
+	bool "NVM Express over Fabrics In-Band Authentication"
+	depends on NVME_CORE
+	select CRYPTO_HMAC
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	help
+	  This provides support for NVMe over Fabrics In-Band Authentication.
+
+	  If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index dfaacd472e5d..4bae2a4a8d8c 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -15,6 +15,7 @@  nvme-core-$(CONFIG_NVME_MULTIPATH)	+= multipath.o
 nvme-core-$(CONFIG_BLK_DEV_ZONED)	+= zns.o
 nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS)	+= fault_inject.o
 nvme-core-$(CONFIG_NVME_HWMON)		+= hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH)		+= auth.o
 
 nvme-y					+= pci.o
 
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 000000000000..6ab95a178213
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1164 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <crypto/ffdhe.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include "auth.h"
+
+static atomic_t nvme_dhchap_seqnum = ATOMIC_INIT(0);
+
+struct nvme_dhchap_queue_context {
+	struct list_head entry;
+	struct work_struct auth_work;
+	struct nvme_ctrl *ctrl;
+	struct crypto_shash *shash_tfm;
+	void *buf;
+	size_t buf_size;
+	int qid;
+	int error;
+	u32 s1;
+	u32 s2;
+	u16 transaction;
+	u8 status;
+	u8 hash_id;
+	u8 hash_len;
+	u8 dhgroup_id;
+	u8 c1[64];
+	u8 c2[64];
+	u8 response[64];
+	u8 *host_response;
+};
+
+static struct nvme_auth_dhgroup_map {
+	int id;
+	const char name[16];
+	const char kpp[16];
+	int privkey_size;
+	int pubkey_size;
+} dhgroup_map[] = {
+	{ .id = NVME_AUTH_DHCHAP_DHGROUP_NULL,
+	  .name = "null", .kpp = "null",
+	  .privkey_size = 0, .pubkey_size = 0 },
+	{ .id = NVME_AUTH_DHCHAP_DHGROUP_2048,
+	  .name = "ffdhe2048", .kpp = "dh",
+	  .privkey_size = 256, .pubkey_size = 256 },
+	{ .id = NVME_AUTH_DHCHAP_DHGROUP_3072,
+	  .name = "ffdhe3072", .kpp = "dh",
+	  .privkey_size = 384, .pubkey_size = 384 },
+	{ .id = NVME_AUTH_DHCHAP_DHGROUP_4096,
+	  .name = "ffdhe4096", .kpp = "dh",
+	  .privkey_size = 512, .pubkey_size = 512 },
+	{ .id = NVME_AUTH_DHCHAP_DHGROUP_6144,
+	  .name = "ffdhe6144", .kpp = "dh",
+	  .privkey_size = 768, .pubkey_size = 768 },
+	{ .id = NVME_AUTH_DHCHAP_DHGROUP_8192,
+	  .name = "ffdhe8192", .kpp = "dh",
+	  .privkey_size = 1024, .pubkey_size = 1024 },
+};
+
+const char *nvme_auth_dhgroup_name(int dhgroup_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+		if (dhgroup_map[i].id == dhgroup_id)
+			return dhgroup_map[i].name;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+int nvme_auth_dhgroup_pubkey_size(int dhgroup_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+		if (dhgroup_map[i].id == dhgroup_id)
+			return dhgroup_map[i].pubkey_size;
+	}
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_pubkey_size);
+
+int nvme_auth_dhgroup_privkey_size(int dhgroup_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+		if (dhgroup_map[i].id == dhgroup_id)
+			return dhgroup_map[i].privkey_size;
+	}
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_privkey_size);
+
+const char *nvme_auth_dhgroup_kpp(int dhgroup_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+		if (dhgroup_map[i].id == dhgroup_id)
+			return dhgroup_map[i].kpp;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+int nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+		if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+			     strlen(dhgroup_map[i].name)))
+			return dhgroup_map[i].id;
+	}
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+	int id;
+	int len;
+	const char hmac[15];
+	const char digest[15];
+} hash_map[] = {
+	{.id = NVME_AUTH_DHCHAP_SHA256, .len = 32,
+	 .hmac = "hmac(sha256)", .digest = "sha256" },
+	{.id = NVME_AUTH_DHCHAP_SHA384, .len = 48,
+	 .hmac = "hmac(sha384)", .digest = "sha384" },
+	{.id = NVME_AUTH_DHCHAP_SHA512, .len = 64,
+	 .hmac = "hmac(sha512)", .digest = "sha512" },
+};
+
+const char *nvme_auth_hmac_name(int hmac_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+		if (hash_map[i].id == hmac_id)
+			return hash_map[i].hmac;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(int hmac_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+		if (hash_map[i].id == hmac_id)
+			return hash_map[i].digest;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+int nvme_auth_hmac_id(const char *hmac_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+		if (!strncmp(hash_map[i].hmac, hmac_name,
+			     strlen(hash_map[i].hmac)))
+			return hash_map[i].id;
+	}
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+int nvme_auth_hmac_hash_len(int hmac_id)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+		if (hash_map[i].id == hmac_id)
+			return hash_map[i].len;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+unsigned char *nvme_auth_extract_secret(unsigned char *secret, u8 key_hash,
+					size_t *out_len)
+{
+	unsigned char *key, *p;
+	u32 crc;
+	int key_len;
+	size_t allocated_len = strlen(secret);
+
+	/* Secret might be affixed with a ':' */
+	p = strrchr(secret, ':');
+	if (p)
+		allocated_len = p - secret;
+	key = kzalloc(allocated_len, GFP_KERNEL);
+	if (!key)
+		return ERR_PTR(-ENOMEM);
+
+	key_len = base64_decode(secret, allocated_len, key);
+	if (key_len < 0) {
+		pr_debug("base64 key decoding error %d\n",
+			 key_len);
+		return ERR_PTR(key_len);
+	}
+	if (key_len != 36 && key_len != 52 &&
+	    key_len != 68) {
+		pr_debug("Invalid key len %d\n",
+			 key_len);
+		kfree_sensitive(key);
+		return ERR_PTR(-EINVAL);
+	}
+	if (key_hash > 0 &&
+	    (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+		pr_debug("Invalid key len %d for %s\n", key_len,
+			 nvme_auth_hmac_name(key_hash));
+		kfree_sensitive(key);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* The last four bytes is the CRC in little-endian format */
+	key_len -= 4;
+	/*
+	 * The linux implementation doesn't do pre- and post-increments,
+	 * so we have to do it manually.
+	 */
+	crc = ~crc32(~0, key, key_len);
+
+	if (get_unaligned_le32(key + key_len) != crc) {
+		pr_debug("DH-HMAC-CHAP key crc mismatch (key %08x, crc %08x)\n",
+		       get_unaligned_le32(key + key_len), crc);
+		kfree_sensitive(key);
+		return ERR_PTR(-EKEYREJECTED);
+	}
+	*out_len = key_len;
+	return key;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_secret);
+
+u8 *nvme_auth_transform_key(u8 *key, size_t key_len, u8 key_hash, char *nqn)
+{
+	const char *hmac_name = nvme_auth_hmac_name(key_hash);
+	struct crypto_shash *key_tfm;
+	struct shash_desc *shash;
+	u8 *transformed_key;
+	int ret;
+
+	if (key_hash == 0) {
+		transformed_key = kmemdup(key, key_len, GFP_KERNEL);
+		return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+	}
+
+	if (!key || !key_len) {
+		pr_warn("No key specified\n");
+		return ERR_PTR(-ENOKEY);
+	}
+	if (!hmac_name) {
+		pr_warn("Invalid key hash id %d\n", key_hash);
+		return ERR_PTR(-EINVAL);
+	}
+
+	key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+	if (IS_ERR(key_tfm))
+		return (u8 *)key_tfm;
+
+	shash = kmalloc(sizeof(struct shash_desc) +
+			crypto_shash_descsize(key_tfm),
+			GFP_KERNEL);
+	if (!shash) {
+		ret = -ENOMEM;
+		goto out_free_key;
+	}
+
+	transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+	if (!transformed_key) {
+		ret = -ENOMEM;
+		goto out_free_shash;
+	}
+
+	shash->tfm = key_tfm;
+	ret = crypto_shash_setkey(key_tfm, key, key_len);
+	if (ret < 0)
+		goto out_free_shash;
+	ret = crypto_shash_init(shash);
+	if (ret < 0)
+		goto out_free_shash;
+	ret = crypto_shash_update(shash, nqn, strlen(nqn));
+	if (ret < 0)
+		goto out_free_shash;
+	ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+	if (ret < 0)
+		goto out_free_shash;
+	ret = crypto_shash_final(shash, transformed_key);
+out_free_shash:
+	kfree(shash);
+out_free_key:
+	crypto_free_shash(key_tfm);
+	if (ret < 0) {
+		kfree_sensitive(transformed_key);
+		return ERR_PTR(ret);
+	}
+	return transformed_key;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_send(struct nvme_ctrl *ctrl, int qid,
+		void *data, size_t tl)
+{
+	struct nvme_command cmd = {};
+	blk_mq_req_flags_t flags = qid == NVME_QID_ANY ?
+		0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED;
+	struct request_queue *q = qid == NVME_QID_ANY ?
+		ctrl->fabrics_q : ctrl->connect_q;
+	int ret;
+
+	cmd.auth_send.opcode = nvme_fabrics_command;
+	cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+	cmd.auth_send.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+	cmd.auth_send.spsp0 = 0x01;
+	cmd.auth_send.spsp1 = 0x01;
+	cmd.auth_send.tl = cpu_to_le32(tl);
+
+	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, tl, 0, qid,
+				     0, flags);
+	if (ret > 0)
+		dev_dbg(ctrl->device,
+			"%s: qid %d nvme status %d\n", __func__, qid, ret);
+	else if (ret < 0)
+		dev_dbg(ctrl->device,
+			"%s: qid %d error %d\n", __func__, qid, ret);
+	return ret;
+}
+
+static int nvme_auth_receive(struct nvme_ctrl *ctrl, int qid,
+		void *buf, size_t al)
+{
+	struct nvme_command cmd = {};
+	blk_mq_req_flags_t flags = qid == NVME_QID_ANY ?
+		0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED;
+	struct request_queue *q = qid == NVME_QID_ANY ?
+		ctrl->fabrics_q : ctrl->connect_q;
+	int ret;
+
+	cmd.auth_receive.opcode = nvme_fabrics_command;
+	cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+	cmd.auth_receive.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+	cmd.auth_receive.spsp0 = 0x01;
+	cmd.auth_receive.spsp1 = 0x01;
+	cmd.auth_receive.al = cpu_to_le32(al);
+
+	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, buf, al, 0, qid,
+				     0, flags);
+	if (ret > 0) {
+		dev_dbg(ctrl->device, "%s: qid %d nvme status %x\n",
+			__func__, qid, ret);
+		ret = -EIO;
+	}
+	if (ret < 0) {
+		dev_dbg(ctrl->device, "%s: qid %d error %d\n",
+			__func__, qid, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+		struct nvmf_auth_dhchap_failure_data *data,
+		u16 transaction, u8 expected_msg)
+{
+	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+		__func__, qid, data->auth_type, data->auth_id);
+
+	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+		return data->rescode_exp;
+	}
+	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+	    data->auth_id != expected_msg) {
+		dev_warn(ctrl->device,
+			 "qid %d invalid message %02x/%02x\n",
+			 qid, data->auth_type, data->auth_id);
+		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+	}
+	if (le16_to_cpu(data->t_id) != transaction) {
+		dev_warn(ctrl->device,
+			 "qid %d invalid transaction ID %d\n",
+			 qid, le16_to_cpu(data->t_id));
+		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+	}
+	return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+	if (chap->buf_size < size) {
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+		return -EINVAL;
+	}
+	memset((u8 *)chap->buf, 0, size);
+	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+	data->t_id = cpu_to_le16(chap->transaction);
+	data->sc_c = 0; /* No secure channel concatenation */
+	data->napd = 1;
+	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+	data->auth_protocol[0].dhchap.halen = 3;
+	data->auth_protocol[0].dhchap.dhlen = 6;
+	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_DHCHAP_SHA256;
+	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_DHCHAP_SHA384;
+	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_DHCHAP_SHA512;
+	data->auth_protocol[0].dhchap.idlist[3] = NVME_AUTH_DHCHAP_DHGROUP_NULL;
+	data->auth_protocol[0].dhchap.idlist[4] = NVME_AUTH_DHCHAP_DHGROUP_2048;
+	data->auth_protocol[0].dhchap.idlist[5] = NVME_AUTH_DHCHAP_DHGROUP_3072;
+	data->auth_protocol[0].dhchap.idlist[6] = NVME_AUTH_DHCHAP_DHGROUP_4096;
+	data->auth_protocol[0].dhchap.idlist[7] = NVME_AUTH_DHCHAP_DHGROUP_6144;
+	data->auth_protocol[0].dhchap.idlist[8] = NVME_AUTH_DHCHAP_DHGROUP_8192;
+
+	return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+	u16 dhvlen = le16_to_cpu(data->dhvlen);
+	size_t size = sizeof(*data) + data->hl + dhvlen;
+	const char *hmac_name, *kpp_name;
+
+	if (chap->buf_size < size) {
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+		return NVME_SC_INVALID_FIELD;
+	}
+
+	hmac_name = nvme_auth_hmac_name(data->hashid);
+	if (!hmac_name) {
+		dev_warn(ctrl->device,
+			 "qid %d: invalid HASH ID %d\n",
+			 chap->qid, data->hashid);
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+		return NVME_SC_INVALID_FIELD;
+	}
+
+	if (chap->hash_id == data->hashid && chap->shash_tfm &&
+	    !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+	    crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+		dev_dbg(ctrl->device,
+			"qid %d: reuse existing hash %s\n",
+			chap->qid, hmac_name);
+		goto select_kpp;
+	}
+
+	/* Reset if hash cannot be reused */
+	if (chap->shash_tfm) {
+		crypto_free_shash(chap->shash_tfm);
+		chap->hash_id = 0;
+		chap->hash_len = 0;
+	}
+	chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+					     CRYPTO_ALG_ALLOCATES_MEMORY);
+	if (IS_ERR(chap->shash_tfm)) {
+		dev_warn(ctrl->device,
+			 "qid %d: failed to allocate hash %s, error %ld\n",
+			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+		chap->shash_tfm = NULL;
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+		return NVME_SC_AUTH_REQUIRED;
+	}
+
+	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+		dev_warn(ctrl->device,
+			 "qid %d: invalid hash length %d\n",
+			 chap->qid, data->hl);
+		crypto_free_shash(chap->shash_tfm);
+		chap->shash_tfm = NULL;
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+		return NVME_SC_AUTH_REQUIRED;
+	}
+
+	/* Reset host response if the hash had been changed */
+	if (chap->hash_id != data->hashid) {
+		kfree(chap->host_response);
+		chap->host_response = NULL;
+	}
+
+	chap->hash_id = data->hashid;
+	chap->hash_len = data->hl;
+	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+		chap->qid, hmac_name);
+
+select_kpp:
+	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+	if (!kpp_name) {
+		dev_warn(ctrl->device,
+			 "qid %d: invalid DH group id %d\n",
+			 chap->qid, data->dhgid);
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+		return NVME_SC_AUTH_REQUIRED;
+	}
+
+	if (data->dhgid != NVME_AUTH_DHCHAP_DHGROUP_NULL) {
+		dev_warn(ctrl->device,
+			 "qid %d: unsupported DH group %s\n",
+			 chap->qid, kpp_name);
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+		return NVME_SC_AUTH_REQUIRED;
+	} else if (dhvlen != 0) {
+		dev_warn(ctrl->device,
+			 "qid %d: invalid DH value for NULL DH\n",
+			 chap->qid);
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+		return NVME_SC_INVALID_FIELD;
+	}
+	chap->dhgroup_id = data->dhgid;
+
+	chap->s1 = le32_to_cpu(data->seqnum);
+	memcpy(chap->c1, data->cval, chap->hash_len);
+
+	return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+	size_t size = sizeof(*data);
+
+	size += 2 * chap->hash_len;
+
+	if (chap->buf_size < size) {
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+		return -EINVAL;
+	}
+
+	memset(chap->buf, 0, size);
+	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+	data->t_id = cpu_to_le16(chap->transaction);
+	data->hl = chap->hash_len;
+	data->dhvlen = 0;
+	memcpy(data->rval, chap->response, chap->hash_len);
+	if (ctrl->opts->dhchap_ctrl_secret) {
+		get_random_bytes(chap->c2, chap->hash_len);
+		data->cvalid = 1;
+		chap->s2 = atomic_inc_return(&nvme_dhchap_seqnum);
+		memcpy(data->rval + chap->hash_len, chap->c2,
+		       chap->hash_len);
+		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+			__func__, chap->qid,
+			chap->hash_len, chap->c2);
+	} else {
+		memset(chap->c2, 0, chap->hash_len);
+		chap->s2 = 0;
+	}
+	data->seqnum = cpu_to_le32(chap->s2);
+	return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+	size_t size = sizeof(*data);
+
+	if (ctrl->opts->dhchap_ctrl_secret)
+		size += chap->hash_len;
+
+	if (chap->buf_size < size) {
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+		return NVME_SC_INVALID_FIELD;
+	}
+
+	if (data->hl != chap->hash_len) {
+		dev_warn(ctrl->device,
+			 "qid %d: invalid hash length %d\n",
+			 chap->qid, data->hl);
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+		return NVME_SC_INVALID_FIELD;
+	}
+
+	/* Just print out information for the admin queue */
+	if (chap->qid == -1)
+		dev_info(ctrl->device,
+			 "qid 0: authenticated with hash %s dhgroup %s\n",
+			 nvme_auth_hmac_name(chap->hash_id),
+			 nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+	if (!data->rvalid)
+		return 0;
+
+	/* Validate controller response */
+	if (memcmp(chap->response, data->rval, data->hl)) {
+		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+			__func__, chap->qid, chap->hash_len, data->rval);
+		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+			__func__, chap->qid, chap->hash_len, chap->response);
+		dev_warn(ctrl->device,
+			 "qid %d: controller authentication failed\n",
+			 chap->qid);
+		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+		return NVME_SC_AUTH_REQUIRED;
+	}
+
+	/* Just print out information for the admin queue */
+	if (chap->qid == -1)
+		dev_info(ctrl->device,
+			 "qid 0: controller authenticated\n");
+	return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+	size_t size = sizeof(*data);
+
+	memset(chap->buf, 0, size);
+	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+	data->t_id = cpu_to_le16(chap->transaction);
+
+	return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+	size_t size = sizeof(*data);
+
+	memset(chap->buf, 0, size);
+	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+	data->t_id = cpu_to_le16(chap->transaction);
+	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+	data->rescode_exp = chap->status;
+
+	return size;
+}
+
+static int nvme_auth_dhchap_host_response(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+	u8 buf[4], *challenge = chap->c1;
+	int ret;
+
+	dev_dbg(ctrl->device, "%s: qid %d host response seq %d transaction %d\n",
+		__func__, chap->qid, chap->s1, chap->transaction);
+
+	if (!chap->host_response) {
+		chap->host_response = nvme_auth_transform_key(ctrl->dhchap_key,
+					ctrl->dhchap_key_len,
+					ctrl->dhchap_key_hash,
+					ctrl->opts->host->nqn);
+		if (IS_ERR(chap->host_response)) {
+			ret = PTR_ERR(chap->host_response);
+			chap->host_response = NULL;
+			return ret;
+		}
+	} else {
+		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+			__func__, chap->qid);
+	}
+
+	ret = crypto_shash_setkey(chap->shash_tfm,
+			chap->host_response, ctrl->dhchap_key_len);
+	if (ret) {
+		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+			 chap->qid, ret);
+		goto out;
+	}
+
+	shash->tfm = chap->shash_tfm;
+	ret = crypto_shash_init(shash);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, challenge, chap->hash_len);
+	if (ret)
+		goto out;
+	put_unaligned_le32(chap->s1, buf);
+	ret = crypto_shash_update(shash, buf, 4);
+	if (ret)
+		goto out;
+	put_unaligned_le16(chap->transaction, buf);
+	ret = crypto_shash_update(shash, buf, 2);
+	if (ret)
+		goto out;
+	memset(buf, 0, sizeof(buf));
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, "HostHost", 8);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+				  strlen(ctrl->opts->host->nqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+			    strlen(ctrl->opts->subsysnqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_final(shash, chap->response);
+out:
+	if (challenge != chap->c1)
+		kfree(challenge);
+	return ret;
+}
+
+static int nvme_auth_dhchap_ctrl_response(struct nvme_ctrl *ctrl,
+		struct nvme_dhchap_queue_context *chap)
+{
+	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+	u8 *ctrl_response;
+	u8 buf[4], *challenge = chap->c2;
+	int ret;
+
+	ctrl_response = nvme_auth_transform_key(ctrl->dhchap_ctrl_key,
+				ctrl->dhchap_ctrl_key_len,
+				ctrl->dhchap_ctrl_key_hash,
+				ctrl->opts->subsysnqn);
+	if (IS_ERR(ctrl_response)) {
+		ret = PTR_ERR(ctrl_response);
+		return ret;
+	}
+	ret = crypto_shash_setkey(chap->shash_tfm,
+			ctrl_response, ctrl->dhchap_ctrl_key_len);
+	if (ret) {
+		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+			 chap->qid, ret);
+		goto out;
+	}
+
+	dev_dbg(ctrl->device, "%s: qid %d host response seq %d transaction %d\n",
+		__func__, chap->qid, chap->s2, chap->transaction);
+	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+		__func__, chap->qid, chap->hash_len, challenge);
+	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+		__func__, chap->qid, ctrl->opts->subsysnqn);
+	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+		__func__, chap->qid, ctrl->opts->host->nqn);
+	shash->tfm = chap->shash_tfm;
+	ret = crypto_shash_init(shash);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, challenge, chap->hash_len);
+	if (ret)
+		goto out;
+	put_unaligned_le32(chap->s2, buf);
+	ret = crypto_shash_update(shash, buf, 4);
+	if (ret)
+		goto out;
+	put_unaligned_le16(chap->transaction, buf);
+	ret = crypto_shash_update(shash, buf, 2);
+	if (ret)
+		goto out;
+	memset(buf, 0, 4);
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, "Controller", 10);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+				  strlen(ctrl->opts->subsysnqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+				  strlen(ctrl->opts->host->nqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_final(shash, chap->response);
+out:
+	if (challenge != chap->c2)
+		kfree(challenge);
+	return ret;
+}
+
+int nvme_auth_generate_key(struct nvme_ctrl *ctrl)
+{
+	u8 *secret = ctrl->opts->dhchap_secret;
+	u8 *key;
+	size_t key_len;
+	u8 key_hash;
+
+	if (!secret)
+		return 0;
+
+	if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+		return -EINVAL;
+
+	/* Pass in the secret without the 'DHHC-1:XX:' prefix */
+	key = nvme_auth_extract_secret(secret + 10, key_hash,
+				       &key_len);
+	if (IS_ERR(key)) {
+		dev_dbg(ctrl->device, "failed to extract key, error %ld\n",
+			PTR_ERR(key));
+		return PTR_ERR(key);
+	}
+
+	ctrl->dhchap_key = key;
+	key = NULL;
+	ctrl->dhchap_key_len = key_len;
+	ctrl->dhchap_key_hash = key_hash;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+int nvme_auth_generate_ctrl_key(struct nvme_ctrl *ctrl)
+{
+	u8 *secret = ctrl->opts->dhchap_ctrl_secret;
+	u8 *key;
+	size_t key_len;
+	u8 key_hash;
+
+	if (!secret)
+		return 0;
+
+	if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+		return -EINVAL;
+
+	/* Pass in the secret without the 'DHHC-1:XX:' prefix */
+	key = nvme_auth_extract_secret(secret + 10, key_hash,
+				       &key_len);
+	if (IS_ERR(key))
+		return PTR_ERR(key);
+
+	ctrl->dhchap_ctrl_key = key;
+	key = NULL;
+	ctrl->dhchap_ctrl_key_len = key_len;
+	ctrl->dhchap_ctrl_key_hash = key_hash;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_ctrl_key);
+
+static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+{
+	chap->status = 0;
+	chap->error = 0;
+	chap->s1 = 0;
+	chap->s2 = 0;
+	chap->transaction = 0;
+	memset(chap->c1, 0, sizeof(chap->c1));
+	memset(chap->c2, 0, sizeof(chap->c2));
+}
+
+static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+{
+	if (chap->shash_tfm)
+		crypto_free_shash(chap->shash_tfm);
+	kfree_sensitive(chap->host_response);
+	kfree(chap->buf);
+	kfree(chap);
+}
+
+static void __nvme_auth_work(struct work_struct *work)
+{
+	struct nvme_dhchap_queue_context *chap =
+		container_of(work, struct nvme_dhchap_queue_context, auth_work);
+	struct nvme_ctrl *ctrl = chap->ctrl;
+	size_t tl;
+	int ret = 0;
+
+	chap->transaction = ctrl->transaction++;
+
+	/* DH-HMAC-CHAP Step 1: send negotiate */
+	dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+		__func__, chap->qid);
+	ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+	if (ret < 0) {
+		chap->error = ret;
+		return;
+	}
+	tl = ret;
+	ret = nvme_auth_send(ctrl, chap->qid, chap->buf, tl);
+	if (ret) {
+		chap->error = ret;
+		return;
+	}
+
+	/* DH-HMAC-CHAP Step 2: receive challenge */
+	dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+		__func__, chap->qid);
+
+	memset(chap->buf, 0, chap->buf_size);
+	ret = nvme_auth_receive(ctrl, chap->qid, chap->buf, chap->buf_size);
+	if (ret) {
+		dev_warn(ctrl->device,
+			 "qid %d failed to receive challenge, %s %d\n",
+			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
+		chap->error = ret;
+		return;
+	}
+	ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+	if (ret) {
+		chap->status = ret;
+		chap->error = NVME_SC_AUTH_REQUIRED;
+		return;
+	}
+
+	ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+	if (ret) {
+		/* Invalid challenge parameters */
+		goto fail2;
+	}
+
+	dev_dbg(ctrl->device, "%s: qid %d host response\n",
+		__func__, chap->qid);
+	ret = nvme_auth_dhchap_host_response(ctrl, chap);
+	if (ret)
+		goto fail2;
+
+	/* DH-HMAC-CHAP Step 3: send reply */
+	dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+		__func__, chap->qid);
+	ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+	if (ret < 0)
+		goto fail2;
+
+	tl = ret;
+	ret = nvme_auth_send(ctrl, chap->qid, chap->buf, tl);
+	if (ret)
+		goto fail2;
+
+	/* DH-HMAC-CHAP Step 4: receive success1 */
+	dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+		__func__, chap->qid);
+
+	memset(chap->buf, 0, chap->buf_size);
+	ret = nvme_auth_receive(ctrl, chap->qid, chap->buf, chap->buf_size);
+	if (ret) {
+		dev_warn(ctrl->device,
+			 "qid %d failed to receive success1, %s %d\n",
+			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
+		chap->error = ret;
+		return;
+	}
+	ret = nvme_auth_receive_validate(ctrl, chap->qid,
+					 chap->buf, chap->transaction,
+					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+	if (ret) {
+		chap->status = ret;
+		chap->error = NVME_SC_AUTH_REQUIRED;
+		return;
+	}
+
+	if (ctrl->opts->dhchap_ctrl_secret) {
+		dev_dbg(ctrl->device,
+			"%s: qid %d controller response\n",
+			__func__, chap->qid);
+		ret = nvme_auth_dhchap_ctrl_response(ctrl, chap);
+		if (ret)
+			goto fail2;
+	}
+
+	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+	if (ret < 0) {
+		/* Controller authentication failed */
+		goto fail2;
+	}
+
+	/* DH-HMAC-CHAP Step 5: send success2 */
+	dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+		__func__, chap->qid);
+	tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+	ret = nvme_auth_send(ctrl, chap->qid, chap->buf, tl);
+	if (!ret) {
+		chap->error = 0;
+		return;
+	}
+
+fail2:
+	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+		__func__, chap->qid, chap->status);
+	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+	ret = nvme_auth_send(ctrl, chap->qid, chap->buf, tl);
+	if (!ret)
+		ret = -EPROTO;
+	chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+	struct nvme_dhchap_queue_context *chap;
+
+	if (!ctrl->dhchap_key || !ctrl->dhchap_key_len) {
+		dev_warn(ctrl->device, "qid %d: no key\n", qid);
+		return -ENOKEY;
+	}
+
+	mutex_lock(&ctrl->dhchap_auth_mutex);
+	/* Check if the context is already queued */
+	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+		WARN_ON(!chap->buf);
+		if (chap->qid == qid) {
+			dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+			mutex_unlock(&ctrl->dhchap_auth_mutex);
+			flush_work(&chap->auth_work);
+			__nvme_auth_reset(chap);
+			queue_work(nvme_wq, &chap->auth_work);
+			return 0;
+		}
+	}
+	chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+	if (!chap) {
+		mutex_unlock(&ctrl->dhchap_auth_mutex);
+		return -ENOMEM;
+	}
+	chap->qid = qid;
+	chap->ctrl = ctrl;
+
+	/*
+	 * Allocate a large enough buffer for the entire negotiation:
+	 * 4k should be enough to ffdhe8192.
+	 */
+	chap->buf_size = 4096;
+	chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
+	if (!chap->buf) {
+		mutex_unlock(&ctrl->dhchap_auth_mutex);
+		kfree(chap);
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&chap->auth_work, __nvme_auth_work);
+	list_add(&chap->entry, &ctrl->dhchap_auth_list);
+	mutex_unlock(&ctrl->dhchap_auth_mutex);
+	queue_work(nvme_wq, &chap->auth_work);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+	struct nvme_dhchap_queue_context *chap;
+	int ret;
+
+	mutex_lock(&ctrl->dhchap_auth_mutex);
+	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+		if (chap->qid != qid)
+			continue;
+		mutex_unlock(&ctrl->dhchap_auth_mutex);
+		flush_work(&chap->auth_work);
+		ret = chap->error;
+		__nvme_auth_reset(chap);
+		return ret;
+	}
+	mutex_unlock(&ctrl->dhchap_auth_mutex);
+	return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+void nvme_auth_reset(struct nvme_ctrl *ctrl)
+{
+	struct nvme_dhchap_queue_context *chap;
+
+	mutex_lock(&ctrl->dhchap_auth_mutex);
+	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+		mutex_unlock(&ctrl->dhchap_auth_mutex);
+		flush_work(&chap->auth_work);
+		__nvme_auth_reset(chap);
+	}
+	mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_reset);
+
+static void nvme_dhchap_auth_work(struct work_struct *work)
+{
+	struct nvme_ctrl *ctrl =
+		container_of(work, struct nvme_ctrl, dhchap_auth_work);
+	int ret, q;
+
+	/* Authenticate admin queue first */
+	ret = nvme_auth_negotiate(ctrl, NVME_QID_ANY);
+	if (ret) {
+		dev_warn(ctrl->device,
+			 "qid 0: error %d setting up authentication\n", ret);
+		return;
+	}
+	ret = nvme_auth_wait(ctrl, NVME_QID_ANY);
+	if (ret) {
+		dev_warn(ctrl->device,
+			 "qid 0: authentication failed\n");
+		return;
+	}
+
+	for (q = 1; q < ctrl->queue_count; q++) {
+		ret = nvme_auth_negotiate(ctrl, q);
+		if (ret) {
+			dev_warn(ctrl->device,
+				 "qid %d: error %d setting up authentication\n",
+				 q, ret);
+			break;
+		}
+	}
+
+	/*
+	 * Failure is a soft-state; credentials remain valid until
+	 * the controller terminates the connection.
+	 */
+}
+
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+	INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+	INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+	mutex_init(&ctrl->dhchap_auth_mutex);
+	nvme_auth_generate_key(ctrl);
+	nvme_auth_generate_ctrl_key(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+	cancel_work_sync(&ctrl->dhchap_auth_work);
+	mutex_lock(&ctrl->dhchap_auth_mutex);
+	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+		cancel_work_sync(&chap->auth_work);
+	mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+	mutex_lock(&ctrl->dhchap_auth_mutex);
+	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+		list_del_init(&chap->entry);
+		flush_work(&chap->auth_work);
+		__nvme_auth_free(chap);
+	}
+	mutex_unlock(&ctrl->dhchap_auth_mutex);
+	kfree(ctrl->dhchap_key);
+	ctrl->dhchap_key = NULL;
+	ctrl->dhchap_key_len = 0;
+	ctrl->dhchap_key_hash = 0;
+	kfree(ctrl->dhchap_ctrl_key);
+	ctrl->dhchap_ctrl_key = NULL;
+	ctrl->dhchap_ctrl_key_len = 0;
+	ctrl->dhchap_ctrl_key_hash = 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
diff --git a/drivers/nvme/host/auth.h b/drivers/nvme/host/auth.h
new file mode 100644
index 000000000000..216957848398
--- /dev/null
+++ b/drivers/nvme/host/auth.h
@@ -0,0 +1,25 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
+ */
+
+#ifndef _NVME_AUTH_H
+#define _NVME_AUTH_H
+
+#include <crypto/kpp.h>
+
+const char *nvme_auth_dhgroup_name(int dhgroup_id);
+int nvme_auth_dhgroup_pubkey_size(int dhgroup_id);
+int nvme_auth_dhgroup_privkey_size(int dhgroup_id);
+const char *nvme_auth_dhgroup_kpp(int dhgroup_id);
+int nvme_auth_dhgroup_id(const char *dhgroup_name);
+
+const char *nvme_auth_hmac_name(int hmac_id);
+const char *nvme_auth_digest_name(int hmac_id);
+int nvme_auth_hmac_id(const char *hmac_name);
+
+unsigned char *nvme_auth_extract_secret(unsigned char *secret,
+					u8 key_hash, size_t *key_len);
+u8 *nvme_auth_transform_key(u8 *key, size_t key_len, u8 key_hash, char *nqn);
+
+#endif /* _NVME_AUTH_H */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4b5de8f5435a..42eea7391066 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -24,6 +24,7 @@ 
 
 #include "nvme.h"
 #include "fabrics.h"
+#include "auth.h"
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -303,6 +304,7 @@  enum nvme_disposition {
 	COMPLETE,
 	RETRY,
 	FAILOVER,
+	AUTHENTICATE,
 };
 
 static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
@@ -310,6 +312,9 @@  static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
 	if (likely(nvme_req(req)->status == 0))
 		return COMPLETE;
 
+	if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+		return AUTHENTICATE;
+
 	if (blk_noretry_request(req) ||
 	    (nvme_req(req)->status & NVME_SC_DNR) ||
 	    nvme_req(req)->retries >= nvme_max_retries)
@@ -346,11 +351,13 @@  static inline void nvme_end_req(struct request *req)
 
 void nvme_complete_rq(struct request *req)
 {
+	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
 	trace_nvme_complete_rq(req);
 	nvme_cleanup_cmd(req);
 
-	if (nvme_req(req)->ctrl->kas)
-		nvme_req(req)->ctrl->comp_seen = true;
+	if (ctrl->kas)
+		ctrl->comp_seen = true;
 
 	switch (nvme_decide_disposition(req)) {
 	case COMPLETE:
@@ -362,6 +369,14 @@  void nvme_complete_rq(struct request *req)
 	case FAILOVER:
 		nvme_failover_req(req);
 		return;
+	case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+		queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+		nvme_retry_req(req);
+#else
+		nvme_end_req(req);
+#endif
+		return;
 	}
 }
 EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -699,7 +714,9 @@  bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
 		switch (ctrl->state) {
 		case NVME_CTRL_CONNECTING:
 			if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
-			    req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+			    (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+			     req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+			     req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
 				return true;
 			break;
 		default:
@@ -3494,6 +3511,100 @@  static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
 	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
 
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+	struct nvmf_ctrl_options *opts = ctrl->opts;
+
+	if (!opts->dhchap_secret)
+		return sysfs_emit(buf, "none\n");
+	return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+	struct nvmf_ctrl_options *opts = ctrl->opts;
+	char *dhchap_secret;
+
+	if (!ctrl->opts->dhchap_secret)
+		return -EINVAL;
+	if (count < 7)
+		return -EINVAL;
+	if (memcmp(buf, "DHHC-1:", 7))
+		return -EINVAL;
+
+	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+	if (!dhchap_secret)
+		return -ENOMEM;
+	memcpy(dhchap_secret, buf, count);
+	nvme_auth_stop(ctrl);
+	if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+		kfree(opts->dhchap_secret);
+		opts->dhchap_secret = dhchap_secret;
+		/* Key has changed; re-authentication with new key */
+		nvme_auth_reset(ctrl);
+		nvme_auth_generate_key(ctrl);
+	}
+	/* Start re-authentication */
+	dev_info(ctrl->device, "re-authenticating controller\n");
+	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+	return count;
+}
+DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+	nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+	struct nvmf_ctrl_options *opts = ctrl->opts;
+
+	if (!opts->dhchap_ctrl_secret)
+		return sysfs_emit(buf, "none\n");
+	return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+	struct nvmf_ctrl_options *opts = ctrl->opts;
+	char *dhchap_secret;
+
+	if (!ctrl->opts->dhchap_ctrl_secret)
+		return -EINVAL;
+	if (count < 7)
+		return -EINVAL;
+	if (memcmp(buf, "DHHC-1:", 7))
+		return -EINVAL;
+
+	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+	if (!dhchap_secret)
+		return -ENOMEM;
+	memcpy(dhchap_secret, buf, count);
+	nvme_auth_stop(ctrl);
+	if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+		kfree(opts->dhchap_ctrl_secret);
+		opts->dhchap_ctrl_secret = dhchap_secret;
+		/* Key has changed; re-authentication with new key */
+		nvme_auth_reset(ctrl);
+		nvme_auth_generate_ctrl_key(ctrl);
+	}
+	/* Start re-authentication */
+	dev_info(ctrl->device, "re-authenticating controller\n");
+	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+	return count;
+}
+DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+	nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
 static struct attribute *nvme_dev_attrs[] = {
 	&dev_attr_reset_controller.attr,
 	&dev_attr_rescan_controller.attr,
@@ -3515,6 +3626,10 @@  static struct attribute *nvme_dev_attrs[] = {
 	&dev_attr_reconnect_delay.attr,
 	&dev_attr_fast_io_fail_tmo.attr,
 	&dev_attr_kato.attr,
+#ifdef CONFIG_NVME_AUTH
+	&dev_attr_dhchap_secret.attr,
+	&dev_attr_dhchap_ctrl_secret.attr,
+#endif
 	NULL
 };
 
@@ -3538,6 +3653,10 @@  static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
 		return 0;
 	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
 		return 0;
+#ifdef CONFIG_NVME_AUTH
+	if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+		return 0;
+#endif
 
 	return a->mode;
 }
@@ -4302,8 +4421,10 @@  static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
 		 * recovery actions from interfering with the controller's
 		 * firmware activation.
 		 */
-		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+			nvme_auth_stop(ctrl);
 			queue_work(nvme_wq, &ctrl->fw_act_work);
+		}
 		break;
 #ifdef CONFIG_NVME_MULTIPATH
 	case NVME_AER_NOTICE_ANA:
@@ -4350,6 +4471,7 @@  EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
 {
 	nvme_mpath_stop(ctrl);
+	nvme_auth_stop(ctrl);
 	nvme_stop_keep_alive(ctrl);
 	nvme_stop_failfast_work(ctrl);
 	flush_work(&ctrl->async_event_work);
@@ -4404,6 +4526,8 @@  static void nvme_free_ctrl(struct device *dev)
 
 	nvme_free_cels(ctrl);
 	nvme_mpath_uninit(ctrl);
+	nvme_auth_stop(ctrl);
+	nvme_auth_free(ctrl);
 	__free_page(ctrl->discard_page);
 
 	if (subsys) {
@@ -4494,6 +4618,7 @@  int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
 
 	nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
 	nvme_mpath_init_ctrl(ctrl);
+	nvme_auth_init_ctrl(ctrl);
 
 	return 0;
 out_free_name:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index a1343a0790f6..0ac054f80a82 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -370,6 +370,7 @@  int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	union nvme_result res;
 	struct nvmf_connect_data *data;
 	int ret;
+	u32 result;
 
 	cmd.connect.opcode = nvme_fabrics_command;
 	cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -402,8 +403,25 @@  int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 		goto out_free_data;
 	}
 
-	ctrl->cntlid = le16_to_cpu(res.u16);
-
+	result = le32_to_cpu(res.u32);
+	ctrl->cntlid = result & 0xFFFF;
+	if ((result >> 16) & 2) {
+		/* Authentication required */
+		ret = nvme_auth_negotiate(ctrl, NVME_QID_ANY);
+		if (ret) {
+			dev_warn(ctrl->device,
+				 "qid 0: authentication setup failed\n");
+			ret = NVME_SC_AUTH_REQUIRED;
+			goto out_free_data;
+		}
+		ret = nvme_auth_wait(ctrl, NVME_QID_ANY);
+		if (ret)
+			dev_warn(ctrl->device,
+				 "qid 0: authentication failed\n");
+		else
+			dev_info(ctrl->device,
+				 "qid 0: authenticated\n");
+	}
 out_free_data:
 	kfree(data);
 	return ret;
@@ -436,6 +454,7 @@  int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 	struct nvmf_connect_data *data;
 	union nvme_result res;
 	int ret;
+	u32 result;
 
 	cmd.connect.opcode = nvme_fabrics_command;
 	cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -461,6 +480,21 @@  int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
 	}
+	result = le32_to_cpu(res.u32);
+	if ((result >> 16) & 2) {
+		/* Authentication required */
+		ret = nvme_auth_negotiate(ctrl, qid);
+		if (ret) {
+			dev_warn(ctrl->device,
+				 "qid %d: authentication setup failed\n", qid);
+			ret = NVME_SC_AUTH_REQUIRED;
+		} else {
+			ret = nvme_auth_wait(ctrl, qid);
+			if (ret)
+				dev_warn(ctrl->device,
+					 "qid %u: authentication failed\n", qid);
+		}
+	}
 	kfree(data);
 	return ret;
 }
@@ -553,6 +587,8 @@  static const match_table_t opt_tokens = {
 	{ NVMF_OPT_TOS,			"tos=%d"		},
 	{ NVMF_OPT_FAIL_FAST_TMO,	"fast_io_fail_tmo=%d"	},
 	{ NVMF_OPT_DISCOVERY,		"discovery"		},
+	{ NVMF_OPT_DHCHAP_SECRET,	"dhchap_secret=%s"	},
+	{ NVMF_OPT_DHCHAP_CTRL_SECRET,	"dhchap_ctrl_secret=%s"	},
 	{ NVMF_OPT_ERR,			NULL			}
 };
 
@@ -831,6 +867,34 @@  static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 		case NVMF_OPT_DISCOVERY:
 			opts->discovery_nqn = true;
 			break;
+		case NVMF_OPT_DHCHAP_SECRET:
+			p = match_strdup(args);
+			if (!p) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+				pr_err("Invalid DH-CHAP secret %s\n", p);
+				ret = -EINVAL;
+				goto out;
+			}
+			kfree(opts->dhchap_secret);
+			opts->dhchap_secret = p;
+			break;
+		case NVMF_OPT_DHCHAP_CTRL_SECRET:
+			p = match_strdup(args);
+			if (!p) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+				pr_err("Invalid DH-CHAP secret %s\n", p);
+				ret = -EINVAL;
+				goto out;
+			}
+			kfree(opts->dhchap_ctrl_secret);
+			opts->dhchap_ctrl_secret = p;
+			break;
 		default:
 			pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
 				p);
@@ -949,6 +1013,7 @@  void nvmf_free_options(struct nvmf_ctrl_options *opts)
 	kfree(opts->subsysnqn);
 	kfree(opts->host_traddr);
 	kfree(opts->host_iface);
+	kfree(opts->dhchap_secret);
 	kfree(opts);
 }
 EXPORT_SYMBOL_GPL(nvmf_free_options);
@@ -958,7 +1023,8 @@  EXPORT_SYMBOL_GPL(nvmf_free_options);
 				 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
 				 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
 				 NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
-				 NVMF_OPT_FAIL_FAST_TMO)
+				 NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+				 NVMF_OPT_DHCHAP_CTRL_SECRET)
 
 static struct nvme_ctrl *
 nvmf_create_ctrl(struct device *dev, const char *buf)
@@ -1175,7 +1241,14 @@  static void __exit nvmf_exit(void)
 	BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
 	BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
 	BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
 	BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+	BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
 }
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index c3203ff1c654..c2a03d99ac26 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,8 @@  enum {
 	NVMF_OPT_FAIL_FAST_TMO	= 1 << 20,
 	NVMF_OPT_HOST_IFACE	= 1 << 21,
 	NVMF_OPT_DISCOVERY	= 1 << 22,
+	NVMF_OPT_DHCHAP_SECRET	= 1 << 23,
+	NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
 };
 
 /**
@@ -97,6 +99,9 @@  enum {
  * @max_reconnects: maximum number of allowed reconnect attempts before removing
  *              the controller, (-1) means reconnect forever, zero means remove
  *              immediately;
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ *              authentication
  * @disable_sqflow: disable controller sq flow control
  * @hdr_digest: generate/verify header digest (TCP)
  * @data_digest: generate/verify data digest (TCP)
@@ -121,6 +126,8 @@  struct nvmf_ctrl_options {
 	unsigned int		kato;
 	struct nvmf_host	*host;
 	int			max_reconnects;
+	char			*dhchap_secret;
+	char			*dhchap_ctrl_secret;
 	bool			disable_sqflow;
 	bool			hdr_digest;
 	bool			data_digest;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b334af8aa264..4d3e33cc6e27 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -324,6 +324,19 @@  struct nvme_ctrl {
 	struct work_struct ana_work;
 #endif
 
+#ifdef CONFIG_NVME_AUTH
+	struct work_struct dhchap_auth_work;
+	struct list_head dhchap_auth_list;
+	struct mutex dhchap_auth_mutex;
+	unsigned char *dhchap_key;
+	unsigned char *dhchap_ctrl_key;
+	size_t dhchap_key_len;
+	size_t dhchap_ctrl_key_len;
+	u8 dhchap_key_hash;
+	u8 dhchap_ctrl_key_hash;
+	u16 transaction;
+#endif
+
 	/* Power saving configuration */
 	u64 ps_max_latency_us;
 	bool apst_enabled;
@@ -910,6 +923,29 @@  static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
 	return ctrl->sgls & ((1 << 0) | (1 << 1));
 }
 
+#ifdef CONFIG_NVME_AUTH
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_reset(struct nvme_ctrl *ctrl);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+int nvme_auth_generate_key(struct nvme_ctrl *ctrl);
+int nvme_auth_generate_ctrl_key(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+	return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+	return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 			 u8 opcode);
 int nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 33bc83d8d992..bd8c724b3d13 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2096,6 +2096,7 @@  static void nvme_tcp_error_recovery_work(struct work_struct *work)
 				struct nvme_tcp_ctrl, err_work);
 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
 
+	nvme_auth_stop(ctrl);
 	nvme_stop_keep_alive(ctrl);
 	nvme_tcp_teardown_io_queues(ctrl, false);
 	/* unquiesce to fail fast pending requests */
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 2a89c5aa0790..1c36fcedea20 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -287,6 +287,34 @@  static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
 	return ret;
 }
 
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+	u8 spsp0 = spc[1];
+	u8 spsp1 = spc[2];
+	u8 secp = spc[3];
+	u32 tl = get_unaligned_le32(spc + 4);
+
+	trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+			 spsp0, spsp1, secp, tl);
+	trace_seq_putc(p, 0);
+	return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+	u8 spsp0 = spc[1];
+	u8 spsp1 = spc[2];
+	u8 secp = spc[3];
+	u32 al = get_unaligned_le32(spc + 4);
+
+	trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+			 spsp0, spsp1, secp, al);
+	trace_seq_putc(p, 0);
+	return ret;
+}
+
 static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
 {
 	const char *ret = trace_seq_buffer_ptr(p);
@@ -306,6 +334,10 @@  const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
 		return nvme_trace_fabrics_connect(p, spc);
 	case nvme_fabrics_type_property_get:
 		return nvme_trace_fabrics_property_get(p, spc);
+	case nvme_fabrics_type_auth_send:
+		return nvme_trace_fabrics_auth_send(p, spc);
+	case nvme_fabrics_type_auth_receive:
+		return nvme_trace_fabrics_auth_receive(p, spc);
 	default:
 		return nvme_trace_fabrics_common(p, spc);
 	}