diff mbox series

[10/12] nvmet: Implement basic In-Band Authentication

Message ID 20210910064322.67705-11-hare@suse.de (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series nvme: In-band authentication support | expand

Commit Message

Hannes Reinecke Sept. 10, 2021, 6:43 a.m. UTC
Implement NVMe-oF In-Band authentication according to NVMe TPAR 8006.
This patch adds two additional configfs entries 'dhchap_key' and 'dhchap_hash'
to the 'host' configfs directory. The 'dhchap_key' needs to be
in the ASCII format as specified in NVMe 2.0 section 8.13.5.8 'Secret
representation'.
'dhchap_hash' is taken from the hash specified in the ASCII
representation of the key, or defaults to 'hmac(sha256)' if no
key transformation has been specified.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/target/Kconfig            |  11 +
 drivers/nvme/target/Makefile           |   1 +
 drivers/nvme/target/admin-cmd.c        |   4 +
 drivers/nvme/target/auth.c             | 301 ++++++++++++++++
 drivers/nvme/target/configfs.c         |  71 +++-
 drivers/nvme/target/core.c             |   8 +
 drivers/nvme/target/fabrics-cmd-auth.c | 464 +++++++++++++++++++++++++
 drivers/nvme/target/fabrics-cmd.c      |  30 +-
 drivers/nvme/target/nvmet.h            |  63 ++++
 9 files changed, 950 insertions(+), 3 deletions(-)
 create mode 100644 drivers/nvme/target/auth.c
 create mode 100644 drivers/nvme/target/fabrics-cmd-auth.c

Comments

Sagi Grimberg Sept. 26, 2021, 2:30 p.m. UTC | #1
> +int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
> +{
> +	int ret = 0;
> +	struct nvmet_host_link *p;
> +	struct nvmet_host *host = NULL;
> +	const char *hash_name;
> +
> +	down_read(&nvmet_config_sem);
> +	if (ctrl->subsys->type == NVME_NQN_DISC)
> +		goto out_unlock;

+       if (ctrl->subsys->allow_any_host)
+               goto out_unlock;
Sagi Grimberg Sept. 26, 2021, 10:51 p.m. UTC | #2
> +void nvmet_execute_auth_send(struct nvmet_req *req)
> +{
> +	struct nvmet_ctrl *ctrl = req->sq->ctrl;
> +	struct nvmf_auth_dhchap_success2_data *data;
> +	void *d;
> +	u32 tl;
> +	u16 status = 0;
> +
> +	if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
> +		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
> +		req->error_loc =
> +			offsetof(struct nvmf_auth_send_command, secp);
> +		goto done;
> +	}
> +	if (req->cmd->auth_send.spsp0 != 0x01) {
> +		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
> +		req->error_loc =
> +			offsetof(struct nvmf_auth_send_command, spsp0);
> +		goto done;
> +	}
> +	if (req->cmd->auth_send.spsp1 != 0x01) {
> +		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
> +		req->error_loc =
> +			offsetof(struct nvmf_auth_send_command, spsp1);
> +		goto done;
> +	}
> +	tl = le32_to_cpu(req->cmd->auth_send.tl);
> +	if (!tl) {
> +		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
> +		req->error_loc =
> +			offsetof(struct nvmf_auth_send_command, tl);
> +		goto done;
> +	}
> +	if (!nvmet_check_transfer_len(req, tl)) {
> +		pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
> +		return;
> +	}
> +
> +	d = kmalloc(tl, GFP_KERNEL);
> +	if (!d) {
> +		status = NVME_SC_INTERNAL;
> +		goto done;
> +	}
> +
> +	status = nvmet_copy_from_sgl(req, 0, d, tl);
> +	if (status) {
> +		kfree(d);
> +		goto done;
> +	}
> +
> +	data = d;
> +	pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
> +		 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
> +		 req->sq->dhchap_step);
> +	if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
> +	    data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
> +		goto done_failure1;
> +	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
> +		if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
> +			/* Restart negotiation */
> +			pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
> +				 ctrl->cntlid, req->sq->qid);

This is the point where you need to reset also auth config as this may
have changed and the host will not create a new controller but rather
re-authenticate on the existing controller.

i.e.

+                       if (!req->sq->qid) {
+                               nvmet_destroy_auth(ctrl);
+                               if (nvmet_setup_auth(ctrl) < 0) {
+                                       pr_err("Failed to setup 
re-authentication\n");
+                                       goto done_failure1;
+                               }
+                       }



> +			req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
> +		} else if (data->auth_id != req->sq->dhchap_step)
> +			goto done_failure1;
> +		/* Validate negotiation parameters */
> +		status = nvmet_auth_negotiate(req, d);/
Hannes Reinecke Sept. 27, 2021, 6:40 a.m. UTC | #3
On 9/27/21 12:51 AM, Sagi Grimberg wrote:
> 
>> +void nvmet_execute_auth_send(struct nvmet_req *req)
>> +{
>> +    struct nvmet_ctrl *ctrl = req->sq->ctrl;
>> +    struct nvmf_auth_dhchap_success2_data *data;
>> +    void *d;
>> +    u32 tl;
>> +    u16 status = 0;
>> +
>> +    if (req->cmd->auth_send.secp != 
>> NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>> +        req->error_loc =
>> +            offsetof(struct nvmf_auth_send_command, secp);
>> +        goto done;
>> +    }
>> +    if (req->cmd->auth_send.spsp0 != 0x01) {
>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>> +        req->error_loc =
>> +            offsetof(struct nvmf_auth_send_command, spsp0);
>> +        goto done;
>> +    }
>> +    if (req->cmd->auth_send.spsp1 != 0x01) {
>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>> +        req->error_loc =
>> +            offsetof(struct nvmf_auth_send_command, spsp1);
>> +        goto done;
>> +    }
>> +    tl = le32_to_cpu(req->cmd->auth_send.tl);
>> +    if (!tl) {
>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>> +        req->error_loc =
>> +            offsetof(struct nvmf_auth_send_command, tl);
>> +        goto done;
>> +    }
>> +    if (!nvmet_check_transfer_len(req, tl)) {
>> +        pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
>> +        return;
>> +    }
>> +
>> +    d = kmalloc(tl, GFP_KERNEL);
>> +    if (!d) {
>> +        status = NVME_SC_INTERNAL;
>> +        goto done;
>> +    }
>> +
>> +    status = nvmet_copy_from_sgl(req, 0, d, tl);
>> +    if (status) {
>> +        kfree(d);
>> +        goto done;
>> +    }
>> +
>> +    data = d;
>> +    pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
>> +         ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
>> +         req->sq->dhchap_step);
>> +    if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
>> +        data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
>> +        goto done_failure1;
>> +    if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
>> +        if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
>> +            /* Restart negotiation */
>> +            pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
>> +                 ctrl->cntlid, req->sq->qid);
> 
> This is the point where you need to reset also auth config as this may
> have changed and the host will not create a new controller but rather
> re-authenticate on the existing controller.
> 
> i.e.
> 
> +                       if (!req->sq->qid) {
> +                               nvmet_destroy_auth(ctrl);
> +                               if (nvmet_setup_auth(ctrl) < 0) {
> +                                       pr_err("Failed to setup 
> re-authentication\n");
> +                                       goto done_failure1;
> +                               }
> +                       }
> 
> 
> 

Not sure. We have two paths how re-authentication can be triggered.
The one is from the host, which sends a 'negotiate' command to the 
controller (ie this path).  Then nothing on the controller has changed, 
and we just need to ensure that we restart negotiation.
IE we should _not_ reset the authentication (as that would also remove 
the controller keys, which haven't changed). We should just ensure that 
all ephemeral data is regenerated. But that should be handled in-line, 
and I _think_ I have covered all of that.
The other path to trigger re-authentication is when changing values on 
the controller via configfs. Then sure we need to reset the controller 
data, and trigger reauthentication.
And there I do agree, that path isn't fully implemented / tested.
But should be started whenever the configfs values change.

Cheers,

Hannes
Hannes Reinecke Sept. 27, 2021, 7:17 a.m. UTC | #4
On 9/27/21 8:40 AM, Hannes Reinecke wrote:
> On 9/27/21 12:51 AM, Sagi Grimberg wrote:
>>
>>> +void nvmet_execute_auth_send(struct nvmet_req *req)
>>> +{
>>> +    struct nvmet_ctrl *ctrl = req->sq->ctrl;
>>> +    struct nvmf_auth_dhchap_success2_data *data;
>>> +    void *d;
>>> +    u32 tl;
>>> +    u16 status = 0;
>>> +
>>> +    if (req->cmd->auth_send.secp != 
>>> NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>> +        req->error_loc =
>>> +            offsetof(struct nvmf_auth_send_command, secp);
>>> +        goto done;
>>> +    }
>>> +    if (req->cmd->auth_send.spsp0 != 0x01) {
>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>> +        req->error_loc =
>>> +            offsetof(struct nvmf_auth_send_command, spsp0);
>>> +        goto done;
>>> +    }
>>> +    if (req->cmd->auth_send.spsp1 != 0x01) {
>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>> +        req->error_loc =
>>> +            offsetof(struct nvmf_auth_send_command, spsp1);
>>> +        goto done;
>>> +    }
>>> +    tl = le32_to_cpu(req->cmd->auth_send.tl);
>>> +    if (!tl) {
>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>> +        req->error_loc =
>>> +            offsetof(struct nvmf_auth_send_command, tl);
>>> +        goto done;
>>> +    }
>>> +    if (!nvmet_check_transfer_len(req, tl)) {
>>> +        pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
>>> +        return;
>>> +    }
>>> +
>>> +    d = kmalloc(tl, GFP_KERNEL);
>>> +    if (!d) {
>>> +        status = NVME_SC_INTERNAL;
>>> +        goto done;
>>> +    }
>>> +
>>> +    status = nvmet_copy_from_sgl(req, 0, d, tl);
>>> +    if (status) {
>>> +        kfree(d);
>>> +        goto done;
>>> +    }
>>> +
>>> +    data = d;
>>> +    pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
>>> +         ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
>>> +         req->sq->dhchap_step);
>>> +    if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
>>> +        data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
>>> +        goto done_failure1;
>>> +    if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
>>> +        if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
>>> +            /* Restart negotiation */
>>> +            pr_debug("%s: ctrl %d qid %d reset negotiation\n", 
>>> __func__,
>>> +                 ctrl->cntlid, req->sq->qid);
>>
>> This is the point where you need to reset also auth config as this may
>> have changed and the host will not create a new controller but rather
>> re-authenticate on the existing controller.
>>
>> i.e.
>>
>> +                       if (!req->sq->qid) {
>> +                               nvmet_destroy_auth(ctrl);
>> +                               if (nvmet_setup_auth(ctrl) < 0) {
>> +                                       pr_err("Failed to setup 
>> re-authentication\n");
>> +                                       goto done_failure1;
>> +                               }
>> +                       }
>>
>>
>>
> 
> Not sure. We have two paths how re-authentication can be triggered.
> The one is from the host, which sends a 'negotiate' command to the 
> controller (ie this path).  Then nothing on the controller has changed, 
> and we just need to ensure that we restart negotiation.
> IE we should _not_ reset the authentication (as that would also remove 
> the controller keys, which haven't changed). We should just ensure that 
> all ephemeral data is regenerated. But that should be handled in-line, 
> and I _think_ I have covered all of that.
> The other path to trigger re-authentication is when changing values on 
> the controller via configfs. Then sure we need to reset the controller 
> data, and trigger reauthentication.
> And there I do agree, that path isn't fully implemented / tested.
> But should be started whenever the configfs values change.
> 
Actually, having re-read the spec I'm not sure if the second path is 
correct.
As per spec only the _host_ can trigger re-authentication. There is no 
provision for the controller to trigger re-authentication, and given 
that re-auth is a soft-state anyway (ie the current authentication stays 
valid until re-auth enters a final state) I _think_ we should be good 
with the current implementation, where we can change the controller keys
via configfs, but they will only become active once the host triggers
re-authentication.

And indeed, that's the only way how it could work, otherwise it'll be 
tricky to change keys in a running connection.
If we were to force renegotiation when changing controller keys we would 
immediately fail the connection, as we cannot guarantee that controller 
_and_ host keys are changed at the same time.

Cheers,

Hannes
Sagi Grimberg Sept. 27, 2021, 7:55 a.m. UTC | #5
On 9/27/21 10:17 AM, Hannes Reinecke wrote:
> On 9/27/21 8:40 AM, Hannes Reinecke wrote:
>> On 9/27/21 12:51 AM, Sagi Grimberg wrote:
>>>
>>>> +void nvmet_execute_auth_send(struct nvmet_req *req)
>>>> +{
>>>> +    struct nvmet_ctrl *ctrl = req->sq->ctrl;
>>>> +    struct nvmf_auth_dhchap_success2_data *data;
>>>> +    void *d;
>>>> +    u32 tl;
>>>> +    u16 status = 0;
>>>> +
>>>> +    if (req->cmd->auth_send.secp != 
>>>> NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>> +        req->error_loc =
>>>> +            offsetof(struct nvmf_auth_send_command, secp);
>>>> +        goto done;
>>>> +    }
>>>> +    if (req->cmd->auth_send.spsp0 != 0x01) {
>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>> +        req->error_loc =
>>>> +            offsetof(struct nvmf_auth_send_command, spsp0);
>>>> +        goto done;
>>>> +    }
>>>> +    if (req->cmd->auth_send.spsp1 != 0x01) {
>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>> +        req->error_loc =
>>>> +            offsetof(struct nvmf_auth_send_command, spsp1);
>>>> +        goto done;
>>>> +    }
>>>> +    tl = le32_to_cpu(req->cmd->auth_send.tl);
>>>> +    if (!tl) {
>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>> +        req->error_loc =
>>>> +            offsetof(struct nvmf_auth_send_command, tl);
>>>> +        goto done;
>>>> +    }
>>>> +    if (!nvmet_check_transfer_len(req, tl)) {
>>>> +        pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    d = kmalloc(tl, GFP_KERNEL);
>>>> +    if (!d) {
>>>> +        status = NVME_SC_INTERNAL;
>>>> +        goto done;
>>>> +    }
>>>> +
>>>> +    status = nvmet_copy_from_sgl(req, 0, d, tl);
>>>> +    if (status) {
>>>> +        kfree(d);
>>>> +        goto done;
>>>> +    }
>>>> +
>>>> +    data = d;
>>>> +    pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
>>>> +         ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
>>>> +         req->sq->dhchap_step);
>>>> +    if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
>>>> +        data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
>>>> +        goto done_failure1;
>>>> +    if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
>>>> +        if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
>>>> +            /* Restart negotiation */
>>>> +            pr_debug("%s: ctrl %d qid %d reset negotiation\n", 
>>>> __func__,
>>>> +                 ctrl->cntlid, req->sq->qid);
>>>
>>> This is the point where you need to reset also auth config as this may
>>> have changed and the host will not create a new controller but rather
>>> re-authenticate on the existing controller.
>>>
>>> i.e.
>>>
>>> +                       if (!req->sq->qid) {
>>> +                               nvmet_destroy_auth(ctrl);
>>> +                               if (nvmet_setup_auth(ctrl) < 0) {
>>> +                                       pr_err("Failed to setup 
>>> re-authentication\n");
>>> +                                       goto done_failure1;
>>> +                               }
>>> +                       }
>>>
>>>
>>>
>>
>> Not sure. We have two paths how re-authentication can be triggered.
>> The one is from the host, which sends a 'negotiate' command to the 
>> controller (ie this path).  Then nothing on the controller has 
>> changed, and we just need to ensure that we restart negotiation.
>> IE we should _not_ reset the authentication (as that would also remove 
>> the controller keys, which haven't changed). We should just ensure 
>> that all ephemeral data is regenerated. But that should be handled 
>> in-line, and I _think_ I have covered all of that.
>> The other path to trigger re-authentication is when changing values on 
>> the controller via configfs. Then sure we need to reset the controller 
>> data, and trigger reauthentication.
>> And there I do agree, that path isn't fully implemented / tested.
>> But should be started whenever the configfs values change.
>>
> Actually, having re-read the spec I'm not sure if the second path is 
> correct.
> As per spec only the _host_ can trigger re-authentication. There is no 
> provision for the controller to trigger re-authentication, and given 
> that re-auth is a soft-state anyway (ie the current authentication stays 
> valid until re-auth enters a final state) I _think_ we should be good 
> with the current implementation, where we can change the controller keys
> via configfs, but they will only become active once the host triggers
> re-authentication.

Agree, so the proposed addition is good with you?

> And indeed, that's the only way how it could work, otherwise it'll be 
> tricky to change keys in a running connection.
> If we were to force renegotiation when changing controller keys we would 
> immediately fail the connection, as we cannot guarantee that controller 
> _and_ host keys are changed at the same time.

Exactly, changing the hostkey in the controller must not trigger
re-auth, the host will remain connected and operational as it
authenticated before. As the host re-authenticates or reconnect
it needs to authenticate against the new key.
Hannes Reinecke Sept. 27, 2021, 8:28 a.m. UTC | #6
On 9/27/21 9:55 AM, Sagi Grimberg wrote:
> 
> 
> On 9/27/21 10:17 AM, Hannes Reinecke wrote:
>> On 9/27/21 8:40 AM, Hannes Reinecke wrote:
>>> On 9/27/21 12:51 AM, Sagi Grimberg wrote:
>>>>
>>>>> +void nvmet_execute_auth_send(struct nvmet_req *req)
>>>>> +{
>>>>> +    struct nvmet_ctrl *ctrl = req->sq->ctrl;
>>>>> +    struct nvmf_auth_dhchap_success2_data *data;
>>>>> +    void *d;
>>>>> +    u32 tl;
>>>>> +    u16 status = 0;
>>>>> +
>>>>> +    if (req->cmd->auth_send.secp !=
>>>>> NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
>>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>>> +        req->error_loc =
>>>>> +            offsetof(struct nvmf_auth_send_command, secp);
>>>>> +        goto done;
>>>>> +    }
>>>>> +    if (req->cmd->auth_send.spsp0 != 0x01) {
>>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>>> +        req->error_loc =
>>>>> +            offsetof(struct nvmf_auth_send_command, spsp0);
>>>>> +        goto done;
>>>>> +    }
>>>>> +    if (req->cmd->auth_send.spsp1 != 0x01) {
>>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>>> +        req->error_loc =
>>>>> +            offsetof(struct nvmf_auth_send_command, spsp1);
>>>>> +        goto done;
>>>>> +    }
>>>>> +    tl = le32_to_cpu(req->cmd->auth_send.tl);
>>>>> +    if (!tl) {
>>>>> +        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
>>>>> +        req->error_loc =
>>>>> +            offsetof(struct nvmf_auth_send_command, tl);
>>>>> +        goto done;
>>>>> +    }
>>>>> +    if (!nvmet_check_transfer_len(req, tl)) {
>>>>> +        pr_debug("%s: transfer length mismatch (%u)\n", __func__,
>>>>> tl);
>>>>> +        return;
>>>>> +    }
>>>>> +
>>>>> +    d = kmalloc(tl, GFP_KERNEL);
>>>>> +    if (!d) {
>>>>> +        status = NVME_SC_INTERNAL;
>>>>> +        goto done;
>>>>> +    }
>>>>> +
>>>>> +    status = nvmet_copy_from_sgl(req, 0, d, tl);
>>>>> +    if (status) {
>>>>> +        kfree(d);
>>>>> +        goto done;
>>>>> +    }
>>>>> +
>>>>> +    data = d;
>>>>> +    pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
>>>>> +         ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
>>>>> +         req->sq->dhchap_step);
>>>>> +    if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
>>>>> +        data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
>>>>> +        goto done_failure1;
>>>>> +    if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
>>>>> +        if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
>>>>> +            /* Restart negotiation */
>>>>> +            pr_debug("%s: ctrl %d qid %d reset negotiation\n",
>>>>> __func__,
>>>>> +                 ctrl->cntlid, req->sq->qid);
>>>>
>>>> This is the point where you need to reset also auth config as this may
>>>> have changed and the host will not create a new controller but rather
>>>> re-authenticate on the existing controller.
>>>>
>>>> i.e.
>>>>
>>>> +                       if (!req->sq->qid) {
>>>> +                               nvmet_destroy_auth(ctrl);
>>>> +                               if (nvmet_setup_auth(ctrl) < 0) {
>>>> +                                       pr_err("Failed to setup
>>>> re-authentication\n");
>>>> +                                       goto done_failure1;
>>>> +                               }
>>>> +                       }
>>>>
>>>>
>>>>
>>>
>>> Not sure. We have two paths how re-authentication can be triggered.
>>> The one is from the host, which sends a 'negotiate' command to the
>>> controller (ie this path).  Then nothing on the controller has
>>> changed, and we just need to ensure that we restart negotiation.
>>> IE we should _not_ reset the authentication (as that would also
>>> remove the controller keys, which haven't changed). We should just
>>> ensure that all ephemeral data is regenerated. But that should be
>>> handled in-line, and I _think_ I have covered all of that.
>>> The other path to trigger re-authentication is when changing values
>>> on the controller via configfs. Then sure we need to reset the
>>> controller data, and trigger reauthentication.
>>> And there I do agree, that path isn't fully implemented / tested.
>>> But should be started whenever the configfs values change.
>>>
>> Actually, having re-read the spec I'm not sure if the second path is
>> correct.
>> As per spec only the _host_ can trigger re-authentication. There is no
>> provision for the controller to trigger re-authentication, and given
>> that re-auth is a soft-state anyway (ie the current authentication
>> stays valid until re-auth enters a final state) I _think_ we should be
>> good with the current implementation, where we can change the
>> controller keys
>> via configfs, but they will only become active once the host triggers
>> re-authentication.
> 
> Agree, so the proposed addition is good with you?
> 
Why would we need it?
I do agree there's a bit missing for removing the old shash_tfm if there
is a hash-id mismatch, but why would we need to reset the entire
authentication?
The important (ie cryptographically relevant) bits are cleared in
nvmet_auth_sq_free(), and they are cleared after authentication is
completed.
So why would we need to reset keys and TFMs?

>> And indeed, that's the only way how it could work, otherwise it'll be
>> tricky to change keys in a running connection.
>> If we were to force renegotiation when changing controller keys we
>> would immediately fail the connection, as we cannot guarantee that
>> controller _and_ host keys are changed at the same time.
> 
> Exactly, changing the hostkey in the controller must not trigger
> re-auth, the host will remain connected and operational as it
> authenticated before. As the host re-authenticates or reconnect
> it needs to authenticate against the new key.

Right. I'll be adding a comment to the configfs functions to the effect.

Cheers,

Hannes
Sagi Grimberg Sept. 28, 2021, 10:36 p.m. UTC | #7
>>> Actually, having re-read the spec I'm not sure if the second path is
>>> correct.
>>> As per spec only the _host_ can trigger re-authentication. There is no
>>> provision for the controller to trigger re-authentication, and given
>>> that re-auth is a soft-state anyway (ie the current authentication
>>> stays valid until re-auth enters a final state) I _think_ we should be
>>> good with the current implementation, where we can change the
>>> controller keys
>>> via configfs, but they will only become active once the host triggers
>>> re-authentication.
>>
>> Agree, so the proposed addition is good with you?
>>
> Why would we need it?
> I do agree there's a bit missing for removing the old shash_tfm if there
> is a hash-id mismatch, but why would we need to reset the entire
> authentication?

Just need to update the new host dhchap_key from the host at this point
as the host is doing a re-authentication. I agree we don't need a big
hammer but we do need the re-authentication to not access old host
dhchap_key.
Hannes Reinecke Sept. 29, 2021, 6:01 a.m. UTC | #8
On 9/29/21 12:36 AM, Sagi Grimberg wrote:
> 
>>>> Actually, having re-read the spec I'm not sure if the second path is
>>>> correct.
>>>> As per spec only the _host_ can trigger re-authentication. There is no
>>>> provision for the controller to trigger re-authentication, and given
>>>> that re-auth is a soft-state anyway (ie the current authentication
>>>> stays valid until re-auth enters a final state) I _think_ we should be
>>>> good with the current implementation, where we can change the
>>>> controller keys
>>>> via configfs, but they will only become active once the host triggers
>>>> re-authentication.
>>>
>>> Agree, so the proposed addition is good with you?
>>>
>> Why would we need it?
>> I do agree there's a bit missing for removing the old shash_tfm if there
>> is a hash-id mismatch, but why would we need to reset the entire
>> authentication?
> 
> Just need to update the new host dhchap_key from the host at this point
> as the host is doing a re-authentication. I agree we don't need a big
> hammer but we do need the re-authentication to not access old host
> dhchap_key.

Sure. And, upon reviewing, I guess you are right; will be including your 
snippet.
For the next round :-)

Cheers,

Hannes
diff mbox series

Patch

diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 973561c93888..70f3c385fc9f 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -83,3 +83,14 @@  config NVME_TARGET_TCP
 	  devices over TCP.
 
 	  If unsure, say N.
+
+config NVME_TARGET_AUTH
+	bool "NVMe over Fabrics In-band Authentication support"
+	depends on NVME_TARGET
+	select CRYPTO_HMAC
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	help
+	  This enables support for NVMe over Fabrics In-band Authentication
+
+	  If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 9837e580fa7e..c66820102493 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -13,6 +13,7 @@  nvmet-y		+= core.o configfs.o admin-cmd.o fabrics-cmd.o \
 			discovery.o io-cmd-file.o io-cmd-bdev.o
 nvmet-$(CONFIG_NVME_TARGET_PASSTHRU)	+= passthru.o
 nvmet-$(CONFIG_BLK_DEV_ZONED)		+= zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH)	+= fabrics-cmd-auth.o auth.o
 nvme-loop-y	+= loop.o
 nvmet-rdma-y	+= rdma.o
 nvmet-fc-y	+= fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index aa6d84d8848e..868d65c869cd 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1008,6 +1008,10 @@  u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 
 	if (nvme_is_fabrics(cmd))
 		return nvmet_parse_fabrics_cmd(req);
+
+	if (unlikely(!nvmet_check_auth_status(req)))
+		return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
 	if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
 		return nvmet_parse_discovery_cmd(req);
 
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 000000000000..5b5f3cd4f914
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,301 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+#include "../host/auth.h"
+
+int nvmet_auth_set_host_key(struct nvmet_host *host, const char *secret)
+{
+	if (sscanf(secret, "DHHC-1:%hhd:%*s", &host->dhchap_key_hash) != 1)
+		return -EINVAL;
+	if (host->dhchap_key_hash > 3) {
+		pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+			 host->dhchap_key_hash);
+		return -EINVAL;
+	}
+	if (host->dhchap_key_hash > 0) {
+		/* Validate selected hash algorithm */
+		const char *hmac = nvme_auth_hmac_name(host->dhchap_key_hash);
+
+		if (!crypto_has_shash(hmac, 0, 0)) {
+			pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+			host->dhchap_key_hash = -1;
+			return -ENOTSUPP;
+		}
+		/* Use this hash as default */
+		if (!host->dhchap_hash_id)
+			host->dhchap_hash_id = host->dhchap_key_hash;
+	}
+	host->dhchap_secret = kstrdup(secret, GFP_KERNEL);
+	if (!host->dhchap_secret)
+		return -ENOMEM;
+	/* Default to SHA256 */
+	if (!host->dhchap_hash_id)
+		host->dhchap_hash_id = NVME_AUTH_DHCHAP_SHA256;
+
+	pr_debug("Using hash %s\n",
+		 nvme_auth_hmac_name(host->dhchap_hash_id));
+	return 0;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+	int ret = 0;
+	struct nvmet_host_link *p;
+	struct nvmet_host *host = NULL;
+	const char *hash_name;
+
+	down_read(&nvmet_config_sem);
+	if (ctrl->subsys->type == NVME_NQN_DISC)
+		goto out_unlock;
+
+	list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+		pr_debug("check %s\n", nvmet_host_name(p->host));
+		if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+			continue;
+		host = p->host;
+		break;
+	}
+	if (!host) {
+		pr_debug("host %s not found\n", ctrl->hostnqn);
+		ret = -EPERM;
+		goto out_unlock;
+	}
+	if (!host->dhchap_secret) {
+		pr_debug("No authentication provided\n");
+		goto out_unlock;
+	}
+	if (ctrl->shash_tfm &&
+	    host->dhchap_hash_id == ctrl->shash_id) {
+		pr_debug("Re-use existing hash ID %d\n",
+			 ctrl->shash_id);
+		ret = 0;
+		goto out_unlock;
+	}
+	hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+	if (!hash_name) {
+		pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+	ctrl->shash_tfm = crypto_alloc_shash(hash_name, 0,
+					     CRYPTO_ALG_ALLOCATES_MEMORY);
+	if (IS_ERR(ctrl->shash_tfm)) {
+		pr_err("failed to allocate shash %s\n", hash_name);
+		ret = PTR_ERR(ctrl->shash_tfm);
+		ctrl->shash_tfm = NULL;
+		goto out_unlock;
+	}
+	ctrl->shash_id = host->dhchap_hash_id;
+
+	/* Skip the 'DHHC-1:XX:' prefix */
+	ctrl->dhchap_key = nvme_auth_extract_secret(host->dhchap_secret + 10,
+						    &ctrl->dhchap_key_len);
+	if (IS_ERR(ctrl->dhchap_key)) {
+		pr_debug("failed to extract host key, error %d\n", ret);
+		ret = PTR_ERR(ctrl->dhchap_key);
+		ctrl->dhchap_key = NULL;
+		goto out_free_hash;
+	}
+	pr_debug("%s: using key %*ph\n", __func__,
+		 (int)ctrl->dhchap_key_len, ctrl->dhchap_key);
+out_free_hash:
+	if (ret) {
+		if (ctrl->dhchap_key) {
+			kfree_sensitive(ctrl->dhchap_key);
+			ctrl->dhchap_key = NULL;
+		}
+		crypto_free_shash(ctrl->shash_tfm);
+		ctrl->shash_tfm = NULL;
+		ctrl->shash_id = 0;
+	}
+out_unlock:
+	up_read(&nvmet_config_sem);
+
+	return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+	kfree(sq->dhchap_c1);
+	sq->dhchap_c1 = NULL;
+	kfree(sq->dhchap_c2);
+	sq->dhchap_c2 = NULL;
+	kfree(sq->dhchap_skey);
+	sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+	if (ctrl->shash_tfm) {
+		crypto_free_shash(ctrl->shash_tfm);
+		ctrl->shash_tfm = NULL;
+		ctrl->shash_id = 0;
+	}
+	if (ctrl->dhchap_key) {
+		kfree(ctrl->dhchap_key);
+		ctrl->dhchap_key = NULL;
+	}
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+	if (req->sq->ctrl->shash_tfm &&
+	    !req->sq->authenticated)
+		return false;
+	return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+			 unsigned int shash_len)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	SHASH_DESC_ON_STACK(shash, ctrl->shash_tfm);
+	u8 *challenge = req->sq->dhchap_c1, *host_response;
+	u8 buf[4];
+	int ret;
+
+	host_response = nvme_auth_transform_key(ctrl->dhchap_key,
+				shash_len, ctrl->shash_id,
+				ctrl->hostnqn);
+	if (IS_ERR(host_response))
+		return PTR_ERR(host_response);
+
+	ret = crypto_shash_setkey(ctrl->shash_tfm, host_response, shash_len);
+	if (ret) {
+		kfree_sensitive(host_response);
+		return ret;
+	}
+	if (ctrl->dh_gid != NVME_AUTH_DHCHAP_DHGROUP_NULL) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	shash->tfm = ctrl->shash_tfm;
+	ret = crypto_shash_init(shash);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, challenge, shash_len);
+	if (ret)
+		goto out;
+	put_unaligned_le32(req->sq->dhchap_s1, buf);
+	ret = crypto_shash_update(shash, buf, 4);
+	if (ret)
+		goto out;
+	put_unaligned_le16(req->sq->dhchap_tid, buf);
+	ret = crypto_shash_update(shash, buf, 2);
+	if (ret)
+		goto out;
+	memset(buf, 0, 4);
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, "HostHost", 8);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->subsysnqn,
+				  strlen(ctrl->subsysnqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_final(shash, response);
+out:
+	if (challenge != req->sq->dhchap_c1)
+		kfree(challenge);
+	kfree_sensitive(host_response);
+	return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+			 unsigned int shash_len)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	SHASH_DESC_ON_STACK(shash, ctrl->shash_tfm);
+	u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+	u8 buf[4];
+	int ret;
+
+	pr_debug("%s: ctrl %d hash seq %d transaction %u\n", __func__,
+		 ctrl->cntlid, req->sq->dhchap_s2, req->sq->dhchap_tid);
+	pr_debug("%s: ctrl %d challenge %*ph\n", __func__,
+		 ctrl->cntlid, shash_len, req->sq->dhchap_c2);
+	pr_debug("%s: ctrl %d subsysnqn %s\n", __func__,
+		 ctrl->cntlid, ctrl->subsysnqn);
+	pr_debug("%s: ctrl %d hostnqn %s\n", __func__,
+		 ctrl->cntlid, ctrl->hostnqn);
+
+	ctrl_response = nvme_auth_transform_key(ctrl->dhchap_key,
+				shash_len, ctrl->shash_id,
+				ctrl->subsysnqn);
+	if (IS_ERR(ctrl_response))
+		return PTR_ERR(ctrl_response);
+
+	ret = crypto_shash_setkey(ctrl->shash_tfm, ctrl_response, shash_len);
+	if (ret) {
+		kfree_sensitive(ctrl_response);
+		return ret;
+	}
+	if (ctrl->dh_gid != NVME_AUTH_DHCHAP_DHGROUP_NULL) {
+		ret = -ENOTSUPP;
+		goto out;
+	}
+
+	shash->tfm = ctrl->shash_tfm;
+	ret = crypto_shash_init(shash);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, challenge, shash_len);
+	if (ret)
+		goto out;
+	put_unaligned_le32(req->sq->dhchap_s2, buf);
+	ret = crypto_shash_update(shash, buf, 4);
+	if (ret)
+		goto out;
+	put_unaligned_le16(req->sq->dhchap_tid, buf);
+	ret = crypto_shash_update(shash, buf, 2);
+	if (ret)
+		goto out;
+	memset(buf, 0, 4);
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, "Controller", 10);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->subsysnqn,
+			    strlen(ctrl->subsysnqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, buf, 1);
+	if (ret)
+		goto out;
+	ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+	if (ret)
+		goto out;
+	ret = crypto_shash_final(shash, response);
+out:
+	if (challenge != req->sq->dhchap_c2)
+		kfree(challenge);
+	kfree_sensitive(ctrl_response);
+	return 0;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d784f3c200b4..7c13810a637f 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -11,8 +11,13 @@ 
 #include <linux/ctype.h>
 #include <linux/pci.h>
 #include <linux/pci-p2pdma.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
 
 #include "nvmet.h"
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include "../host/auth.h"
+#endif
 
 static const struct config_item_type nvmet_host_type;
 static const struct config_item_type nvmet_subsys_type;
@@ -1657,10 +1662,71 @@  static const struct config_item_type nvmet_ports_type = {
 static struct config_group nvmet_subsystems_group;
 static struct config_group nvmet_ports_group;
 
-static void nvmet_host_release(struct config_item *item)
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+		char *page)
+{
+	u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+	if (!dhchap_secret)
+		return sprintf(page, "\n");
+	return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+		const char *page, size_t count)
 {
 	struct nvmet_host *host = to_host(item);
+	int ret;
 
+	ret = nvmet_auth_set_host_key(host, page);
+	if (ret < 0)
+		return ret;
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+		char *page)
+{
+	struct nvmet_host *host = to_host(item);
+	const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+	return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_host *host = to_host(item);
+	int hmac_id;
+
+	hmac_id = nvme_auth_hmac_id(page);
+	if (hmac_id < 0)
+		return -EINVAL;
+	if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+		return -ENOTSUPP;
+	host->dhchap_hash_id = hmac_id;
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+	&nvmet_host_attr_dhchap_key,
+	&nvmet_host_attr_dhchap_hash,
+	NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
+static void nvmet_host_release(struct config_item *item)
+{
+	struct nvmet_host *host = to_host(item);
+#ifdef CONFIG_NVME_TARGET_AUTH
+	if (host->dhchap_secret)
+		kfree(host->dhchap_secret);
+#endif
 	kfree(host);
 }
 
@@ -1670,6 +1736,9 @@  static struct configfs_item_operations nvmet_host_item_ops = {
 
 static const struct config_item_type nvmet_host_type = {
 	.ct_item_ops		= &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+	.ct_attrs		= nvmet_host_attrs,
+#endif
 	.ct_owner		= THIS_MODULE,
 };
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6e253c3c5e0f..afe7ca1f9175 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -793,6 +793,7 @@  void nvmet_sq_destroy(struct nvmet_sq *sq)
 	wait_for_completion(&sq->confirm_done);
 	wait_for_completion(&sq->free_done);
 	percpu_ref_exit(&sq->ref);
+	nvmet_auth_sq_free(sq);
 
 	if (ctrl) {
 		/*
@@ -1268,6 +1269,11 @@  u16 nvmet_check_ctrl_status(struct nvmet_req *req)
 		       req->cmd->common.opcode, req->sq->qid);
 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
 	}
+
+	if (unlikely(!nvmet_check_auth_status(req))) {
+		pr_warn("qid %d not authenticated\n", req->sq->qid);
+		return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+	}
 	return 0;
 }
 
@@ -1459,6 +1465,8 @@  static void nvmet_ctrl_free(struct kref *ref)
 	flush_work(&ctrl->async_event_work);
 	cancel_work_sync(&ctrl->fatal_err_work);
 
+	nvmet_destroy_auth(ctrl);
+
 	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
 
 	nvmet_async_events_free(ctrl);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 000000000000..ab9dfc06bac0
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,464 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+#include "../host/auth.h"
+
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+	/* Initialize in-band authentication */
+	req->sq->authenticated = false;
+	req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+	req->cqe->result.u32 |= 0x2 << 16;
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvmf_auth_dhchap_negotiate_data *data = d;
+	int i, hash_id, null_dh = -1;
+
+	pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+		 __func__, ctrl->cntlid, req->sq->qid,
+		 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+		 data->auth_protocol[0].dhchap.halen,
+		 data->auth_protocol[0].dhchap.dhlen);
+	req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+	if (data->sc_c)
+		return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+	if (data->napd != 1)
+		return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+	if (data->auth_protocol[0].dhchap.authid !=
+	    NVME_AUTH_DHCHAP_AUTH_ID)
+		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+	hash_id = 0;
+	for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+		if (ctrl->shash_id != data->auth_protocol[0].dhchap.idlist[i])
+			continue;
+		hash_id = ctrl->shash_id;
+		break;
+	}
+	if (hash_id == 0) {
+		pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+			 __func__, ctrl->cntlid, req->sq->qid);
+		return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+	}
+
+	for (i = data->auth_protocol[0].dhchap.halen;
+	     i < data->auth_protocol[0].dhchap.halen +
+		     data->auth_protocol[0].dhchap.dhlen; i++) {
+		int dhgid = data->auth_protocol[0].dhchap.idlist[i];
+
+		if (dhgid == NVME_AUTH_DHCHAP_DHGROUP_NULL) {
+			null_dh = dhgid;
+			continue;
+		}
+	}
+	if (null_dh < 0) {
+		pr_debug("%s: ctrl %d qid %d: no DH group selected\n",
+			 __func__, ctrl->cntlid, req->sq->qid);
+		return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+	}
+	ctrl->dh_gid = null_dh;
+	pr_debug("%s: ctrl %d qid %d: DH group %s (%d)\n",
+		 __func__, ctrl->cntlid, req->sq->qid,
+		 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+	return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvmf_auth_dhchap_reply_data *data = d;
+	int hash_len = crypto_shash_digestsize(ctrl->shash_tfm);
+	u8 *response;
+
+	pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %d\n",
+		 __func__, ctrl->cntlid, req->sq->qid,
+		 data->hl, data->cvalid, data->dhvlen);
+	if (data->hl != hash_len)
+		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+	if (data->dhvlen) {
+		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+	}
+
+	response = kmalloc(data->hl, GFP_KERNEL);
+	if (!response)
+		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+	if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+		pr_debug("ctrl %d qid %d DH-HMAC-CHAP hash failed\n",
+			 ctrl->cntlid, req->sq->qid);
+		kfree(response);
+		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+	}
+
+	if (memcmp(data->rval, response, data->hl)) {
+		pr_info("ctrl %d qid %d DH-HMAC-CHAP response mismatch\n",
+			ctrl->cntlid, req->sq->qid);
+		kfree(response);
+		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+	}
+	kfree(response);
+	pr_info("ctrl %d qid %d DH-HMAC-CHAP host authenticated\n",
+		ctrl->cntlid, req->sq->qid);
+	if (data->cvalid) {
+		req->sq->dhchap_c2 = kmalloc(data->hl, GFP_KERNEL);
+		if (!req->sq->dhchap_c2)
+			return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+		memcpy(req->sq->dhchap_c2, data->rval + data->hl, data->hl);
+
+		pr_debug("ctrl %d qid %d challenge %*ph\n",
+			 ctrl->cntlid, req->sq->qid, data->hl,
+			 req->sq->dhchap_c2);
+		req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+	} else
+		req->sq->dhchap_c2 = NULL;
+
+	return 0;
+}
+
+static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+{
+	struct nvmf_auth_dhchap_failure_data *data = d;
+
+	return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	struct nvmf_auth_dhchap_success2_data *data;
+	void *d;
+	u32 tl;
+	u16 status = 0;
+
+	if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_send_command, secp);
+		goto done;
+	}
+	if (req->cmd->auth_send.spsp0 != 0x01) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_send_command, spsp0);
+		goto done;
+	}
+	if (req->cmd->auth_send.spsp1 != 0x01) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_send_command, spsp1);
+		goto done;
+	}
+	tl = le32_to_cpu(req->cmd->auth_send.tl);
+	if (!tl) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_send_command, tl);
+		goto done;
+	}
+	if (!nvmet_check_transfer_len(req, tl)) {
+		pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+		return;
+	}
+
+	d = kmalloc(tl, GFP_KERNEL);
+	if (!d) {
+		status = NVME_SC_INTERNAL;
+		goto done;
+	}
+
+	status = nvmet_copy_from_sgl(req, 0, d, tl);
+	if (status) {
+		kfree(d);
+		goto done;
+	}
+
+	data = d;
+	pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+		 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+		 req->sq->dhchap_step);
+	if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+	    data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+		goto done_failure1;
+	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+		if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+			/* Restart negotiation */
+			pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+				 ctrl->cntlid, req->sq->qid);
+			req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+		} else if (data->auth_id != req->sq->dhchap_step)
+			goto done_failure1;
+		/* Validate negotiation parameters */
+		status = nvmet_auth_negotiate(req, d);
+		if (status == 0)
+			req->sq->dhchap_step =
+				NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+		else {
+			req->sq->dhchap_step =
+				NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+			req->sq->dhchap_status = status;
+			status = 0;
+		}
+		goto done_kfree;
+	}
+	if (data->auth_id != req->sq->dhchap_step) {
+		pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+			 __func__, ctrl->cntlid, req->sq->qid,
+			 data->auth_id, req->sq->dhchap_step);
+		goto done_failure1;
+	}
+	if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+		pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+			 __func__, ctrl->cntlid, req->sq->qid,
+			 le16_to_cpu(data->t_id),
+			 req->sq->dhchap_tid);
+		req->sq->dhchap_step =
+			NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+		req->sq->dhchap_status =
+			NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+		goto done_kfree;
+	}
+	switch (data->auth_id) {
+	case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+		status = nvmet_auth_reply(req, d);
+		if (status == 0)
+			req->sq->dhchap_step =
+				NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+		else {
+			req->sq->dhchap_step =
+				NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+			req->sq->dhchap_status = status;
+			status = 0;
+		}
+		goto done_kfree;
+		break;
+	case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+		req->sq->authenticated = true;
+		pr_debug("%s: ctrl %d qid %d authenticated\n",
+			 __func__, ctrl->cntlid, req->sq->qid);
+		goto done_kfree;
+		break;
+	case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+		status = nvmet_auth_failure2(req, d);
+		if (status) {
+			pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+				ctrl->cntlid, req->sq->qid, status);
+			req->sq->dhchap_status = status;
+			status = 0;
+		}
+		goto done_kfree;
+		break;
+	default:
+		req->sq->dhchap_status =
+			NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+		req->sq->dhchap_step =
+			NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+		goto done_kfree;
+		break;
+	}
+done_failure1:
+	req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+	req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+	kfree(d);
+done:
+	pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+		 ctrl->cntlid, req->sq->qid,
+		 req->sq->dhchap_status, req->sq->dhchap_step);
+	if (status)
+		pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+			 __func__, ctrl->cntlid, req->sq->qid,
+			 status, req->error_loc);
+	req->cqe->result.u64 = 0;
+	nvmet_req_complete(req, status);
+	if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+	    req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+		return;
+	/* Final states, clear up variables */
+	nvmet_auth_sq_free(req->sq);
+	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+		nvmet_ctrl_fatal_error(ctrl);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+	struct nvmf_auth_dhchap_challenge_data *data = d;
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	int ret = 0;
+	int hash_len = crypto_shash_digestsize(ctrl->shash_tfm);
+	int data_size = sizeof(*d) + hash_len;
+
+	if (al < data_size) {
+		pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+			 al, data_size);
+		return -EINVAL;
+	}
+	memset(data, 0, data_size);
+	req->sq->dhchap_s1 = ctrl->dhchap_seqnum++;
+	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+	data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+	data->hashid = ctrl->shash_id;
+	data->hl = hash_len;
+	data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+	req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+	if (!req->sq->dhchap_c1)
+		return -ENOMEM;
+	get_random_bytes(req->sq->dhchap_c1, data->hl);
+	memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+	pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %d\n",
+		 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+		 req->sq->dhchap_tid, data->hl, data->dhvlen);
+	return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+	struct nvmf_auth_dhchap_success1_data *data = d;
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	int hash_len = crypto_shash_digestsize(ctrl->shash_tfm);
+
+	WARN_ON(al < sizeof(*data));
+	memset(data, 0, sizeof(*data));
+	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+	data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+	data->hl = hash_len;
+	if (req->sq->dhchap_c2) {
+		if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+			return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+		data->rvalid = 1;
+		pr_debug("ctrl %d qid %d response %*ph\n",
+			 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+	}
+	return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+	struct nvmf_auth_dhchap_failure_data *data = d;
+
+	WARN_ON(al < sizeof(*data));
+	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+	data->t_id = cpu_to_le32(req->sq->dhchap_tid);
+	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+	data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+	void *d;
+	u32 al;
+	u16 status = 0;
+
+	if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_receive_command, secp);
+		goto done;
+	}
+	if (req->cmd->auth_receive.spsp0 != 0x01) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_receive_command, spsp0);
+		goto done;
+	}
+	if (req->cmd->auth_receive.spsp1 != 0x01) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_receive_command, spsp1);
+		goto done;
+	}
+	al = le32_to_cpu(req->cmd->auth_receive.al);
+	if (!al) {
+		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+		req->error_loc =
+			offsetof(struct nvmf_auth_receive_command, al);
+		goto done;
+	}
+	if (!nvmet_check_transfer_len(req, al)) {
+		pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+		return;
+	}
+
+	d = kmalloc(al, GFP_KERNEL);
+	if (!d) {
+		status = NVME_SC_INTERNAL;
+		goto done;
+	}
+	pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+		 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+	switch (req->sq->dhchap_step) {
+	case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+		status = nvmet_auth_challenge(req, d, al);
+		if (status < 0) {
+			pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+				ctrl->cntlid, req->sq->qid, status);
+			status = NVME_SC_INTERNAL;
+			break;
+		}
+		if (status) {
+			req->sq->dhchap_status = status;
+			nvmet_auth_failure1(req, d, al);
+			pr_warn("ctrl %d qid %d: challenge status (%x)\n",
+				ctrl->cntlid, req->sq->qid,
+				req->sq->dhchap_status);
+			status = 0;
+			break;
+		}
+		req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+		break;
+	case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+		status = nvmet_auth_success1(req, d, al);
+		if (status) {
+			req->sq->dhchap_status = status;
+			nvmet_auth_failure1(req, d, al);
+			pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+				ctrl->cntlid, req->sq->qid,
+				req->sq->dhchap_status);
+			break;
+		}
+		req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+		break;
+	case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+		nvmet_auth_failure1(req, d, al);
+		pr_warn("ctrl %d qid %d failure1 (%x)\n",
+			ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+		break;
+	default:
+		pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+			ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+		req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+		req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+		nvmet_auth_failure1(req, d, al);
+		status = 0;
+		break;
+	}
+
+	status = nvmet_copy_to_sgl(req, 0, d, al);
+	kfree(d);
+done:
+	req->cqe->result.u64 = 0;
+	nvmet_req_complete(req, status);
+	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+		nvmet_auth_sq_free(req->sq);
+		nvmet_ctrl_fatal_error(ctrl);
+	}
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 7d0454cee920..d5a4a9a68ee1 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -93,6 +93,14 @@  u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
 	case nvme_fabrics_type_property_get:
 		req->execute = nvmet_execute_prop_get;
 		break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+	case nvme_fabrics_type_auth_send:
+		req->execute = nvmet_execute_auth_send;
+		break;
+	case nvme_fabrics_type_auth_receive:
+		req->execute = nvmet_execute_auth_receive;
+		break;
+#endif
 	default:
 		pr_debug("received unknown capsule type 0x%x\n",
 			cmd->fabrics.fctype);
@@ -173,6 +181,7 @@  static void nvmet_execute_admin_connect(struct nvmet_req *req)
 	struct nvmf_connect_data *d;
 	struct nvmet_ctrl *ctrl = NULL;
 	u16 status = 0;
+	int ret;
 
 	if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
 		return;
@@ -215,17 +224,31 @@  static void nvmet_execute_admin_connect(struct nvmet_req *req)
 
 	uuid_copy(&ctrl->hostid, &d->hostid);
 
+	ret = nvmet_setup_auth(ctrl);
+	if (ret < 0) {
+		pr_err("Failed to setup authentication, error %d\n", ret);
+		nvmet_ctrl_put(ctrl);
+		if (ret == -EPERM)
+			status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+		else
+			status = NVME_SC_INTERNAL;
+		goto out;
+	}
+
 	status = nvmet_install_queue(ctrl, req);
 	if (status) {
 		nvmet_ctrl_put(ctrl);
 		goto out;
 	}
 
-	pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
+	pr_info("creating controller %d for subsystem %s for NQN %s%s%s.\n",
 		ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
-		ctrl->pi_support ? " T10-PI is enabled" : "");
+		ctrl->pi_support ? " T10-PI is enabled" : "",
+		nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
 	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 
+	if (nvmet_has_auth(ctrl))
+		nvmet_init_auth(ctrl, req);
 out:
 	kfree(d);
 complete:
@@ -285,6 +308,9 @@  static void nvmet_execute_io_connect(struct nvmet_req *req)
 	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 
 	pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+	if (nvmet_has_auth(ctrl))
+		nvmet_init_auth(ctrl, req);
 
 out:
 	kfree(d);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7143c7fa7464..ab25f9e18027 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -108,6 +108,18 @@  struct nvmet_sq {
 	u16			size;
 	u32			sqhd;
 	bool			sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+	bool			authenticated;
+	u16			dhchap_tid;
+	u16			dhchap_status;
+	int			dhchap_step;
+	u8			*dhchap_c1;
+	u8			*dhchap_c2;
+	u32			dhchap_s1;
+	u32			dhchap_s2;
+	u8			*dhchap_skey;
+	int			dhchap_skey_len;
+#endif
 	struct completion	free_done;
 	struct completion	confirm_done;
 };
@@ -209,6 +221,15 @@  struct nvmet_ctrl {
 	u64			err_counter;
 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
 	bool			pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+	u32			dhchap_seqnum;
+	u8			*dhchap_key;
+	size_t			dhchap_key_len;
+	struct crypto_shash	*shash_tfm;
+	u8			shash_id;
+	u32			dh_gid;
+	u32			dh_keysize;
+#endif
 };
 
 struct nvmet_subsys {
@@ -270,6 +291,10 @@  static inline struct nvmet_subsys *namespaces_to_subsys(
 
 struct nvmet_host {
 	struct config_group	group;
+	u8			*dhchap_secret;
+	u8			dhchap_key_hash;
+	u8			dhchap_hash_id;
+	u8			dhchap_dhgroup_id;
 };
 
 static inline struct nvmet_host *to_host(struct config_item *item)
@@ -660,4 +685,42 @@  static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
 		bio_put(bio);
 }
 
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_host_key(struct nvmet_host *host, const char *secret);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+			 unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+			 unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+	return ctrl->shash_tfm != NULL;
+}
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+	return 0;
+}
+static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
+				   struct nvmet_req *req) {};
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+	return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+	return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(int dhgid) { return NULL; }
+#endif
+
 #endif /* _NVMET_H */