diff mbox series

[v4,3/7] soc: qcom: add pd-mapper implementation

Message ID 20240311-qcom-pd-mapper-v4-3-24679cca5c24@linaro.org (mailing list archive)
State Superseded
Headers show
Series soc: qcom: add in-kernel pd-mapper implementation | expand

Commit Message

Dmitry Baryshkov March 11, 2024, 3:34 p.m. UTC
Existing userspace protection domain mapper implementation has several
issue. It doesn't play well with CONFIG_EXTRA_FIRMWARE, it doesn't
reread JSON files if firmware location is changed (or if firmware was
not available at the time pd-mapper was started but the corresponding
directory is mounted later), etc.

Provide in-kernel service implementing protection domain mapping
required to work with several services, which are provided by the DSP
firmware.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
---
 drivers/soc/qcom/Kconfig           |  10 ++
 drivers/soc/qcom/Makefile          |   2 +
 drivers/soc/qcom/qcom_pdm.c        | 346 +++++++++++++++++++++++++++++++++++++
 drivers/soc/qcom/qcom_pdm_msg.c    | 188 ++++++++++++++++++++
 drivers/soc/qcom/qcom_pdm_msg.h    |  66 +++++++
 include/linux/soc/qcom/pd_mapper.h |  39 +++++
 6 files changed, 651 insertions(+)

Comments

Konrad Dybcio March 12, 2024, 12:55 a.m. UTC | #1
On 3/11/24 16:34, Dmitry Baryshkov wrote:
> Existing userspace protection domain mapper implementation has several
> issue. It doesn't play well with CONFIG_EXTRA_FIRMWARE, it doesn't
> reread JSON files if firmware location is changed (or if firmware was
> not available at the time pd-mapper was started but the corresponding
> directory is mounted later), etc.
> 
> Provide in-kernel service implementing protection domain mapping
> required to work with several services, which are provided by the DSP
> firmware.
> 
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> ---

I'm far from an expert on how this works, but to a non-expert eye,
there is nothing outrageously wrong.

One suggestion I have is to use cleanup.h and scoped guards to
save on some LoC

Konrad
Johan Hovold March 12, 2024, 9:36 a.m. UTC | #2
On Mon, Mar 11, 2024 at 05:34:03PM +0200, Dmitry Baryshkov wrote:
> Existing userspace protection domain mapper implementation has several
> issue. It doesn't play well with CONFIG_EXTRA_FIRMWARE, it doesn't
> reread JSON files if firmware location is changed (or if firmware was
> not available at the time pd-mapper was started but the corresponding
> directory is mounted later), etc.
> 
> Provide in-kernel service implementing protection domain mapping
> required to work with several services, which are provided by the DSP
> firmware.
> 
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>

Just a couple of drive-by comments below.

> +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> +{
> +	int ret;
> +	int i;
> +
> +	mutex_lock(&qcom_pdm_mutex);
> +
> +	if (qcom_pdm_server_added) {
> +		ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +				     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +		if (ret)
> +			goto err_out;

Name error labels after what they do, that is, 'err_unlock' in this
case.

> +	}
> +
> +	for (i = 0; i < num_data; i++) {
> +		ret = qcom_pdm_add_domain_locked(data[i]);
> +		if (ret)
> +			goto err;

And err_del_domains here.

> +	}
> +
> +	ret = qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +			     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +	if (ret) {
> +		pr_err("PDM: error adding server %d\n", ret);
> +		goto err;
> +	}
> +
> +	qcom_pdm_server_added = true;
> +
> +	mutex_unlock(&qcom_pdm_mutex);
> +
> +	return 0;
> +
> +err:
> +	while (--i >= 0)
> +		qcom_pdm_del_domain_locked(data[i]);
> +
> +	qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +		       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);

Should the server really be added unconditionally here? And if so,
shouldn't you update that flag?

> +
> +err_out:
> +	mutex_unlock(&qcom_pdm_mutex);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(qcom_pdm_add_domains);

> +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
> +				     struct sockaddr_qrtr *sq,
> +				     struct qmi_txn *txn,
> +				     const void *decoded)
> +{
> +	const struct servreg_loc_get_domain_list_req *req = decoded;
> +	struct servreg_loc_get_domain_list_resp *rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
> +	struct qcom_pdm_service *service;
> +	u32 offset;
> +	int ret;
> +
> +	offset = req->offset_valid ? req->offset : 0;
> +
> +	rsp->rsp.result = QMI_RESULT_SUCCESS_V01;
> +	rsp->rsp.error = QMI_ERR_NONE_V01;
> +
> +	rsp->db_revision_valid = true;
> +	rsp->db_revision = 1;
> +
> +	rsp->total_domains_valid = true;
> +	rsp->total_domains = 0;
> +
> +	mutex_lock(&qcom_pdm_mutex);
> +
> +	service = qcom_pdm_find_locked(req->name);
> +	if (service) {
> +		struct qcom_pdm_domain *domain;
> +
> +		rsp->domain_list_valid = true;
> +		rsp->domain_list_len = 0;
> +
> +		list_for_each_entry(domain, &service->domains, list) {
> +			u32 i = rsp->total_domains++;
> +
> +			if (i >= offset && i < SERVREG_LOC_MAX_DOMAINS) {
> +				u32 j = rsp->domain_list_len++;
> +
> +				strscpy(rsp->domain_list[j].name, domain->name,
> +					sizeof(rsp->domain_list[i].name));
> +				rsp->domain_list[j].instance_id = domain->instance_id;
> +
> +				pr_debug("PDM: found %s / %d\n", domain->name,
> +					 domain->instance_id);
> +			}
> +		}
> +

Stray newline.

> +	}
> +
> +	mutex_unlock(&qcom_pdm_mutex);
> +
> +	pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->name,
> +		 req->offset_valid ? req->offset : -1, rsp->domain_list_len, rsp->total_domains);
> +
> +	ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_GET_DOMAIN_LIST,
> +				2658,
> +				servreg_loc_get_domain_list_resp_ei, rsp);
> +	if (ret)
> +		pr_err("Error sending servreg response: %d\n", ret);
> +
> +	kfree(rsp);
> +}

Johan
Johan Hovold March 12, 2024, 9:43 a.m. UTC | #3
On Tue, Mar 12, 2024 at 01:55:27AM +0100, Konrad Dybcio wrote:

> One suggestion I have is to use cleanup.h and scoped guards to
> save on some LoC

LoC is not the best metric for code quality, it really depends on if it
makes the code more readable or not. Often being explicit, so that it's
obvious what is going on, is preferred even if it adds a few more lines
(c.f. all the ways that people manage to use devres wrong).

After just skimming this patch, I don't see that there would be any
benefit from using scoped guards.

Johan
Chris Lew March 14, 2024, 7:44 p.m. UTC | #4
On 3/11/2024 8:34 AM, Dmitry Baryshkov wrote:
> +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> +{
> +	int ret;
> +	int i;
> +
> +	mutex_lock(&qcom_pdm_mutex);
> +
> +	if (qcom_pdm_server_added) {
> +		ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +				     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +		if (ret)
> +			goto err_out;
> +	}
> +
> +	for (i = 0; i < num_data; i++) {
> +		ret = qcom_pdm_add_domain_locked(data[i]);
> +		if (ret)
> +			goto err;
> +	}
> +
> +	ret = qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +			     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +	if (ret) {
> +		pr_err("PDM: error adding server %d\n", ret);
> +		goto err;
> +	}
> +
> +	qcom_pdm_server_added = true;
> +
> +	mutex_unlock(&qcom_pdm_mutex);
> +
> +	return 0;
> +
> +err:
> +	while (--i >= 0)
> +		qcom_pdm_del_domain_locked(data[i]);
> +
> +	qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +		       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +
> +err_out:
> +	mutex_unlock(&qcom_pdm_mutex);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(qcom_pdm_add_domains);
> +
> +void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> +{
> +	int i;
> +
> +	mutex_lock(&qcom_pdm_mutex);
> +
> +	if (qcom_pdm_server_added) {
> +		qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +			       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +	}

I am confused as to why we need to reset the qmi handle anytime
qcom_pdm_del_domains or qcom_pdm_add_domains is called. Is this to
trigger some kind of re-broadcast type notification to clients because
the service list has been updated?

My concern would be that this causes some kind of unintended side-effect
on the rprocs that are not restarting.

> +
> +	for (i = 0; i < num_data; i++)
> +		qcom_pdm_del_domain_locked(data[i]);
> +
> +	qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +		       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> +	qcom_pdm_server_added = true;
> +
> +	mutex_unlock(&qcom_pdm_mutex);
> +}
> +EXPORT_SYMBOL_GPL(qcom_pdm_del_domains);
> +
> +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
> +				     struct sockaddr_qrtr *sq,
> +				     struct qmi_txn *txn,
> +				     const void *decoded)
> +{
> +	const struct servreg_loc_get_domain_list_req *req = decoded;
> +	struct servreg_loc_get_domain_list_resp *rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
> +	struct qcom_pdm_service *service;
> +	u32 offset;
> +	int ret;
> +
> +	offset = req->offset_valid ? req->offset : 0;
> +
> +	rsp->rsp.result = QMI_RESULT_SUCCESS_V01;
> +	rsp->rsp.error = QMI_ERR_NONE_V01;
> +
> +	rsp->db_revision_valid = true;
> +	rsp->db_revision = 1;
> +
> +	rsp->total_domains_valid = true;
> +	rsp->total_domains = 0;
> +
> +	mutex_lock(&qcom_pdm_mutex);
> +
> +	service = qcom_pdm_find_locked(req->name);
> +	if (service) {
> +		struct qcom_pdm_domain *domain;
> +
> +		rsp->domain_list_valid = true;
> +		rsp->domain_list_len = 0;
> +
> +		list_for_each_entry(domain, &service->domains, list) {
> +			u32 i = rsp->total_domains++;
> +
> +			if (i >= offset && i < SERVREG_LOC_MAX_DOMAINS) {
> +				u32 j = rsp->domain_list_len++;
> +
> +				strscpy(rsp->domain_list[j].name, domain->name,
> +					sizeof(rsp->domain_list[i].name));
> +				rsp->domain_list[j].instance_id = domain->instance_id;
> +
> +				pr_debug("PDM: found %s / %d\n", domain->name,
> +					 domain->instance_id);
> +			}
> +		}
> +
> +	}
> +
> +	mutex_unlock(&qcom_pdm_mutex);
> +
> +	pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->name,
> +		 req->offset_valid ? req->offset : -1, rsp->domain_list_len, rsp->total_domains);
> +
> +	ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_GET_DOMAIN_LIST,
> +				2658,
> +				servreg_loc_get_domain_list_resp_ei, rsp);

Other QMI clients like pdr_interface have macros for the message size.
Can we considering adding one instead of using 2658 directly?

> +	if (ret)
> +		pr_err("Error sending servreg response: %d\n", ret);
> +
> +	kfree(rsp);
> +}
> +
> +static void qcom_pdm_pfr(struct qmi_handle *qmi,
> +			 struct sockaddr_qrtr *sq,
> +			 struct qmi_txn *txn,
> +			 const void *decoded)
> +{
> +	const struct servreg_loc_pfr_req *req = decoded;
> +	struct servreg_loc_pfr_resp rsp = {};
> +	int ret;
> +
> +	pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason);
> +
> +	rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
> +	rsp.rsp.error = QMI_ERR_NONE_V01;
> +
> +	ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR,
> +				SERVREG_LOC_PFR_RESP_MSG_SIZE,
> +				servreg_loc_pfr_resp_ei, &rsp);
> +	if (ret)
> +		pr_err("Error sending servreg response: %d\n", ret);
> +}
> +
> diff --git a/drivers/soc/qcom/qcom_pdm_msg.h b/drivers/soc/qcom/qcom_pdm_msg.h
> new file mode 100644
> index 000000000000..e576b87c67c0
> --- /dev/null
> +++ b/drivers/soc/qcom/qcom_pdm_msg.h
> @@ -0,0 +1,66 @@
> +// SPDX-License-Identifier: BSD-3-Clause
> +/*
> + * Copyright (c) 2018, Linaro Ltd.
> + * Copyright (c) 2016, Bjorn Andersson
> + */
> +
> +#ifndef __QMI_SERVREG_LOC_H__
> +#define __QMI_SERVREG_LOC_H__
> +

Should we update the header guards to reflect the new file name?

> +#include <linux/types.h>
> +#include <linux/soc/qcom/qmi.h>
> +
> +#define SERVREG_QMI_SERVICE 64
> +#define SERVREG_QMI_VERSION 257
> +#define SERVREG_QMI_INSTANCE 0
> +#define QMI_RESULT_SUCCESS 0
> +#define QMI_RESULT_FAILURE 1
> +#define QMI_ERR_NONE 0
> +#define QMI_ERR_INTERNAL 1
> +#define QMI_ERR_MALFORMED_MSG 2

I think these common QMI macro definitions are wrong. They should match
what is defined in <soc/qcom/qmi.h>. This is a bug in the userspace
pd-mapper header as well.

> +#endif
> diff --git a/include/linux/soc/qcom/pd_mapper.h b/include/linux/soc/qcom/pd_mapper.h
> new file mode 100644
> index 000000000000..86438b7ca6fe
> --- /dev/null
> +++ b/include/linux/soc/qcom/pd_mapper.h
> @@ -0,0 +1,39 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Qualcomm Protection Domain mapper
> + *
> + * Copyright (c) 2023 Linaro Ltd.
> + */
> +#ifndef __QCOM_PD_MAPPER__
> +#define __QCOM_PD_MAPPER__
> +
> +struct qcom_pdm_domain_data {
> +	const char *domain;
> +	u32 instance_id;
> +	/* NULL-terminated array */
> +	const char * services[];

s/char * services[]/char *services[]/
Dmitry Baryshkov March 14, 2024, 9:30 p.m. UTC | #5
On Thu, 14 Mar 2024 at 21:44, Chris Lew <quic_clew@quicinc.com> wrote:
>
>
>
> On 3/11/2024 8:34 AM, Dmitry Baryshkov wrote:
> > +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> > +{
> > +     int ret;
> > +     int i;
> > +
> > +     mutex_lock(&qcom_pdm_mutex);
> > +
> > +     if (qcom_pdm_server_added) {
> > +             ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> > +                                  SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> > +             if (ret)
> > +                     goto err_out;
> > +     }
> > +
> > +     for (i = 0; i < num_data; i++) {
> > +             ret = qcom_pdm_add_domain_locked(data[i]);
> > +             if (ret)
> > +                     goto err;
> > +     }
> > +
> > +     ret = qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> > +                          SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> > +     if (ret) {
> > +             pr_err("PDM: error adding server %d\n", ret);
> > +             goto err;
> > +     }
> > +
> > +     qcom_pdm_server_added = true;
> > +
> > +     mutex_unlock(&qcom_pdm_mutex);
> > +
> > +     return 0;
> > +
> > +err:
> > +     while (--i >= 0)
> > +             qcom_pdm_del_domain_locked(data[i]);
> > +
> > +     qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> > +                    SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> > +
> > +err_out:
> > +     mutex_unlock(&qcom_pdm_mutex);
> > +
> > +     return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(qcom_pdm_add_domains);
> > +
> > +void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> > +{
> > +     int i;
> > +
> > +     mutex_lock(&qcom_pdm_mutex);
> > +
> > +     if (qcom_pdm_server_added) {
> > +             qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> > +                            SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> > +     }
>
> I am confused as to why we need to reset the qmi handle anytime
> qcom_pdm_del_domains or qcom_pdm_add_domains is called. Is this to
> trigger some kind of re-broadcast type notification to clients because
> the service list has been updated?

Yes. Otherwise clients like pmic-glink will miss new domains.

>
> My concern would be that this causes some kind of unintended side-effect
> on the rprocs that are not restarting.

Well, an alternative is to match machine compatible strings and to
build a full list of domains right from the beginning.

>
> > +
> > +     for (i = 0; i < num_data; i++)
> > +             qcom_pdm_del_domain_locked(data[i]);
> > +
> > +     qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> > +                    SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
> > +     qcom_pdm_server_added = true;
> > +
> > +     mutex_unlock(&qcom_pdm_mutex);
> > +}
> > +EXPORT_SYMBOL_GPL(qcom_pdm_del_domains);
> > +
> > +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
> > +                                  struct sockaddr_qrtr *sq,
> > +                                  struct qmi_txn *txn,
> > +                                  const void *decoded)
> > +{
> > +     const struct servreg_loc_get_domain_list_req *req = decoded;
> > +     struct servreg_loc_get_domain_list_resp *rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
> > +     struct qcom_pdm_service *service;
> > +     u32 offset;
> > +     int ret;
> > +
> > +     offset = req->offset_valid ? req->offset : 0;
> > +
> > +     rsp->rsp.result = QMI_RESULT_SUCCESS_V01;
> > +     rsp->rsp.error = QMI_ERR_NONE_V01;
> > +
> > +     rsp->db_revision_valid = true;
> > +     rsp->db_revision = 1;
> > +
> > +     rsp->total_domains_valid = true;
> > +     rsp->total_domains = 0;
> > +
> > +     mutex_lock(&qcom_pdm_mutex);
> > +
> > +     service = qcom_pdm_find_locked(req->name);
> > +     if (service) {
> > +             struct qcom_pdm_domain *domain;
> > +
> > +             rsp->domain_list_valid = true;
> > +             rsp->domain_list_len = 0;
> > +
> > +             list_for_each_entry(domain, &service->domains, list) {
> > +                     u32 i = rsp->total_domains++;
> > +
> > +                     if (i >= offset && i < SERVREG_LOC_MAX_DOMAINS) {
> > +                             u32 j = rsp->domain_list_len++;
> > +
> > +                             strscpy(rsp->domain_list[j].name, domain->name,
> > +                                     sizeof(rsp->domain_list[i].name));
> > +                             rsp->domain_list[j].instance_id = domain->instance_id;
> > +
> > +                             pr_debug("PDM: found %s / %d\n", domain->name,
> > +                                      domain->instance_id);
> > +                     }
> > +             }
> > +
> > +     }
> > +
> > +     mutex_unlock(&qcom_pdm_mutex);
> > +
> > +     pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->name,
> > +              req->offset_valid ? req->offset : -1, rsp->domain_list_len, rsp->total_domains);
> > +
> > +     ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_GET_DOMAIN_LIST,
> > +                             2658,
> > +                             servreg_loc_get_domain_list_resp_ei, rsp);
>
> Other QMI clients like pdr_interface have macros for the message size.
> Can we considering adding one instead of using 2658 directly?


Ack

>
> > +     if (ret)
> > +             pr_err("Error sending servreg response: %d\n", ret);
> > +
> > +     kfree(rsp);
> > +}
> > +
> > +static void qcom_pdm_pfr(struct qmi_handle *qmi,
> > +                      struct sockaddr_qrtr *sq,
> > +                      struct qmi_txn *txn,
> > +                      const void *decoded)
> > +{
> > +     const struct servreg_loc_pfr_req *req = decoded;
> > +     struct servreg_loc_pfr_resp rsp = {};
> > +     int ret;
> > +
> > +     pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason);
> > +
> > +     rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
> > +     rsp.rsp.error = QMI_ERR_NONE_V01;
> > +
> > +     ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR,
> > +                             SERVREG_LOC_PFR_RESP_MSG_SIZE,
> > +                             servreg_loc_pfr_resp_ei, &rsp);
> > +     if (ret)
> > +             pr_err("Error sending servreg response: %d\n", ret);
> > +}
> > +
> > diff --git a/drivers/soc/qcom/qcom_pdm_msg.h b/drivers/soc/qcom/qcom_pdm_msg.h
> > new file mode 100644
> > index 000000000000..e576b87c67c0
> > --- /dev/null
> > +++ b/drivers/soc/qcom/qcom_pdm_msg.h
> > @@ -0,0 +1,66 @@
> > +// SPDX-License-Identifier: BSD-3-Clause
> > +/*
> > + * Copyright (c) 2018, Linaro Ltd.
> > + * Copyright (c) 2016, Bjorn Andersson
> > + */
> > +
> > +#ifndef __QMI_SERVREG_LOC_H__
> > +#define __QMI_SERVREG_LOC_H__
> > +
>
> Should we update the header guards to reflect the new file name?

Ack

>
> > +#include <linux/types.h>
> > +#include <linux/soc/qcom/qmi.h>
> > +
> > +#define SERVREG_QMI_SERVICE 64
> > +#define SERVREG_QMI_VERSION 257
> > +#define SERVREG_QMI_INSTANCE 0
> > +#define QMI_RESULT_SUCCESS 0
> > +#define QMI_RESULT_FAILURE 1
> > +#define QMI_ERR_NONE 0
> > +#define QMI_ERR_INTERNAL 1
> > +#define QMI_ERR_MALFORMED_MSG 2
>
> I think these common QMI macro definitions are wrong. They should match
> what is defined in <soc/qcom/qmi.h>. This is a bug in the userspace
> pd-mapper header as well.

Ack

>
> > +#endif
> > diff --git a/include/linux/soc/qcom/pd_mapper.h b/include/linux/soc/qcom/pd_mapper.h
> > new file mode 100644
> > index 000000000000..86438b7ca6fe
> > --- /dev/null
> > +++ b/include/linux/soc/qcom/pd_mapper.h
> > @@ -0,0 +1,39 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/*
> > + * Qualcomm Protection Domain mapper
> > + *
> > + * Copyright (c) 2023 Linaro Ltd.
> > + */
> > +#ifndef __QCOM_PD_MAPPER__
> > +#define __QCOM_PD_MAPPER__
> > +
> > +struct qcom_pdm_domain_data {
> > +     const char *domain;
> > +     u32 instance_id;
> > +     /* NULL-terminated array */
> > +     const char * services[];
>
> s/char * services[]/char *services[]/
Chris Lew March 15, 2024, 12:57 a.m. UTC | #6
On 3/14/2024 2:30 PM, Dmitry Baryshkov wrote:
> On Thu, 14 Mar 2024 at 21:44, Chris Lew <quic_clew@quicinc.com> wrote:
>>
>>
>>
>> On 3/11/2024 8:34 AM, Dmitry Baryshkov wrote:
>>> +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
>>> +{
>>> +     int ret;
>>> +     int i;
>>> +
>>> +     mutex_lock(&qcom_pdm_mutex);
>>> +
>>> +     if (qcom_pdm_server_added) {
>>> +             ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
>>> +                                  SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
>>> +             if (ret)
>>> +                     goto err_out;
>>> +     }
>>> +
>>> +     for (i = 0; i < num_data; i++) {
>>> +             ret = qcom_pdm_add_domain_locked(data[i]);
>>> +             if (ret)
>>> +                     goto err;
>>> +     }
>>> +
>>> +     ret = qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
>>> +                          SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
>>> +     if (ret) {
>>> +             pr_err("PDM: error adding server %d\n", ret);
>>> +             goto err;
>>> +     }
>>> +
>>> +     qcom_pdm_server_added = true;
>>> +
>>> +     mutex_unlock(&qcom_pdm_mutex);
>>> +
>>> +     return 0;
>>> +
>>> +err:
>>> +     while (--i >= 0)
>>> +             qcom_pdm_del_domain_locked(data[i]);
>>> +
>>> +     qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
>>> +                    SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
>>> +
>>> +err_out:
>>> +     mutex_unlock(&qcom_pdm_mutex);
>>> +
>>> +     return ret;
>>> +}
>>> +EXPORT_SYMBOL_GPL(qcom_pdm_add_domains);
>>> +
>>> +void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
>>> +{
>>> +     int i;
>>> +
>>> +     mutex_lock(&qcom_pdm_mutex);
>>> +
>>> +     if (qcom_pdm_server_added) {
>>> +             qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
>>> +                            SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
>>> +     }
>>
>> I am confused as to why we need to reset the qmi handle anytime
>> qcom_pdm_del_domains or qcom_pdm_add_domains is called. Is this to
>> trigger some kind of re-broadcast type notification to clients because
>> the service list has been updated?
> 
> Yes. Otherwise clients like pmic-glink will miss new domains.
> 
>>
>> My concern would be that this causes some kind of unintended side-effect
>> on the rprocs that are not restarting.
> 
> Well, an alternative is to match machine compatible strings and to
> build a full list of domains right from the beginning.
> 

Ok, thanks for clarifying. I spoke to some of the firmware developers
and a quick scan seems to indicate their handling is robust enough to
handle this. It is a change in expected behavior but I think the current
approach is reasonable.

>>
>>> +
>>> +     for (i = 0; i < num_data; i++)
>>> +             qcom_pdm_del_domain_locked(data[i]);
>>> +
>>> +     qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
>>> +                    SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
>>> +     qcom_pdm_server_added = true;
>>> +
>>> +     mutex_unlock(&qcom_pdm_mutex);
>>> +}
>>> +EXPORT_SYMBOL_GPL(qcom_pdm_del_domains);
>>> +
>>> +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
>>> +                                  struct sockaddr_qrtr *sq,
>>> +                                  struct qmi_txn *txn,
>>> +                                  const void *decoded)
>>> +{
>>> +     const struct servreg_loc_get_domain_list_req *req = decoded;
>>> +     struct servreg_loc_get_domain_list_resp *rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
>>> +     struct qcom_pdm_service *service;
>>> +     u32 offset;
>>> +     int ret;
>>> +
>>> +     offset = req->offset_valid ? req->offset : 0;
>>> +
>>> +     rsp->rsp.result = QMI_RESULT_SUCCESS_V01;
>>> +     rsp->rsp.error = QMI_ERR_NONE_V01;
>>> +
>>> +     rsp->db_revision_valid = true;
>>> +     rsp->db_revision = 1;
>>> +
>>> +     rsp->total_domains_valid = true;
>>> +     rsp->total_domains = 0;
>>> +
>>> +     mutex_lock(&qcom_pdm_mutex);
>>> +
>>> +     service = qcom_pdm_find_locked(req->name);
>>> +     if (service) {
>>> +             struct qcom_pdm_domain *domain;
>>> +
>>> +             rsp->domain_list_valid = true;
>>> +             rsp->domain_list_len = 0;
>>> +
>>> +             list_for_each_entry(domain, &service->domains, list) {
>>> +                     u32 i = rsp->total_domains++;
>>> +
>>> +                     if (i >= offset && i < SERVREG_LOC_MAX_DOMAINS) {
>>> +                             u32 j = rsp->domain_list_len++;
>>> +
>>> +                             strscpy(rsp->domain_list[j].name, domain->name,
>>> +                                     sizeof(rsp->domain_list[i].name));
>>> +                             rsp->domain_list[j].instance_id = domain->instance_id;
>>> +
>>> +                             pr_debug("PDM: found %s / %d\n", domain->name,
>>> +                                      domain->instance_id);
>>> +                     }
>>> +             }
>>> +
>>> +     }
>>> +
>>> +     mutex_unlock(&qcom_pdm_mutex);
>>> +
>>> +     pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->name,
>>> +              req->offset_valid ? req->offset : -1, rsp->domain_list_len, rsp->total_domains);
>>> +
>>> +     ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_GET_DOMAIN_LIST,
>>> +                             2658,
>>> +                             servreg_loc_get_domain_list_resp_ei, rsp);
>>
>> Other QMI clients like pdr_interface have macros for the message size.
>> Can we considering adding one instead of using 2658 directly?
> 
> 
> Ack
> 
>>
>>> +     if (ret)
>>> +             pr_err("Error sending servreg response: %d\n", ret);
>>> +
>>> +     kfree(rsp);
>>> +}
>>> +
>>> +static void qcom_pdm_pfr(struct qmi_handle *qmi,
>>> +                      struct sockaddr_qrtr *sq,
>>> +                      struct qmi_txn *txn,
>>> +                      const void *decoded)
>>> +{
>>> +     const struct servreg_loc_pfr_req *req = decoded;
>>> +     struct servreg_loc_pfr_resp rsp = {};
>>> +     int ret;
>>> +
>>> +     pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason);
>>> +
>>> +     rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
>>> +     rsp.rsp.error = QMI_ERR_NONE_V01;
>>> +
>>> +     ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR,
>>> +                             SERVREG_LOC_PFR_RESP_MSG_SIZE,
>>> +                             servreg_loc_pfr_resp_ei, &rsp);
>>> +     if (ret)
>>> +             pr_err("Error sending servreg response: %d\n", ret);
>>> +}
>>> +
>>> diff --git a/drivers/soc/qcom/qcom_pdm_msg.h b/drivers/soc/qcom/qcom_pdm_msg.h
>>> new file mode 100644
>>> index 000000000000..e576b87c67c0
>>> --- /dev/null
>>> +++ b/drivers/soc/qcom/qcom_pdm_msg.h
>>> @@ -0,0 +1,66 @@
>>> +// SPDX-License-Identifier: BSD-3-Clause
>>> +/*
>>> + * Copyright (c) 2018, Linaro Ltd.
>>> + * Copyright (c) 2016, Bjorn Andersson
>>> + */
>>> +
>>> +#ifndef __QMI_SERVREG_LOC_H__
>>> +#define __QMI_SERVREG_LOC_H__
>>> +
>>
>> Should we update the header guards to reflect the new file name?
> 
> Ack
> 
>>
>>> +#include <linux/types.h>
>>> +#include <linux/soc/qcom/qmi.h>
>>> +
>>> +#define SERVREG_QMI_SERVICE 64
>>> +#define SERVREG_QMI_VERSION 257
>>> +#define SERVREG_QMI_INSTANCE 0
>>> +#define QMI_RESULT_SUCCESS 0
>>> +#define QMI_RESULT_FAILURE 1
>>> +#define QMI_ERR_NONE 0
>>> +#define QMI_ERR_INTERNAL 1
>>> +#define QMI_ERR_MALFORMED_MSG 2
>>
>> I think these common QMI macro definitions are wrong. They should match
>> what is defined in <soc/qcom/qmi.h>. This is a bug in the userspace
>> pd-mapper header as well.
> 
> Ack
> 
>>
>>> +#endif
>>> diff --git a/include/linux/soc/qcom/pd_mapper.h b/include/linux/soc/qcom/pd_mapper.h
>>> new file mode 100644
>>> index 000000000000..86438b7ca6fe
>>> --- /dev/null
>>> +++ b/include/linux/soc/qcom/pd_mapper.h
>>> @@ -0,0 +1,39 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>> +/*
>>> + * Qualcomm Protection Domain mapper
>>> + *
>>> + * Copyright (c) 2023 Linaro Ltd.
>>> + */
>>> +#ifndef __QCOM_PD_MAPPER__
>>> +#define __QCOM_PD_MAPPER__
>>> +
>>> +struct qcom_pdm_domain_data {
>>> +     const char *domain;
>>> +     u32 instance_id;
>>> +     /* NULL-terminated array */
>>> +     const char * services[];
>>
>> s/char * services[]/char *services[]/
> 
> 
>
Bjorn Andersson April 7, 2024, 11:14 p.m. UTC | #7
On Mon, Mar 11, 2024 at 05:34:03PM +0200, Dmitry Baryshkov wrote:
> diff --git a/drivers/soc/qcom/qcom_pdm.c b/drivers/soc/qcom/qcom_pdm.c
[..]
> +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> +{
> +	int ret;
> +	int i;
> +
> +	mutex_lock(&qcom_pdm_mutex);
> +
> +	if (qcom_pdm_server_added) {
> +		ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> +				     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);

Sorry for the late reply.

I met with the owners of the firmware side of this and we concluded that
this will unfortunately not work.

The typical case is that when the firmware comes up, it queries the
information from the pd-mapper about itself, so this would obviously
work just fine.

Further more, if another core causes the server to be deleted and then
re-added the firmware will wait for pd-mapper to come up. So this will
work as well - as reported by Chris.

There is however a not too uncommon case where the firmware on one
remoteproc will inquiry about information of another remoteproc. One
example is modem coming up, inquiring about where to find audio
services. This inquiry will be performed once at firmware boot and
decisions will be made based on the responses, no retries or updates.

As such, starting pd-mapper with an incomplete "database" is
unfortunately not supported.

Regards,
Bjorn
Dmitry Baryshkov April 7, 2024, 11:20 p.m. UTC | #8
On Mon, 8 Apr 2024 at 02:14, Bjorn Andersson <andersson@kernel.org> wrote:
>
> On Mon, Mar 11, 2024 at 05:34:03PM +0200, Dmitry Baryshkov wrote:
> > diff --git a/drivers/soc/qcom/qcom_pdm.c b/drivers/soc/qcom/qcom_pdm.c
> [..]
> > +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
> > +{
> > +     int ret;
> > +     int i;
> > +
> > +     mutex_lock(&qcom_pdm_mutex);
> > +
> > +     if (qcom_pdm_server_added) {
> > +             ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
> > +                                  SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
>
> Sorry for the late reply.
>
> I met with the owners of the firmware side of this and we concluded that
> this will unfortunately not work.
>
> The typical case is that when the firmware comes up, it queries the
> information from the pd-mapper about itself, so this would obviously
> work just fine.
>
> Further more, if another core causes the server to be deleted and then
> re-added the firmware will wait for pd-mapper to come up. So this will
> work as well - as reported by Chris.
>
> There is however a not too uncommon case where the firmware on one
> remoteproc will inquiry about information of another remoteproc. One
> example is modem coming up, inquiring about where to find audio
> services. This inquiry will be performed once at firmware boot and
> decisions will be made based on the responses, no retries or updates.
>
> As such, starting pd-mapper with an incomplete "database" is
> unfortunately not supported.

Ack. Thanks for the info.

Xilin Wu has also reported a rare race condition seemingly between
pmic-glink and the pd-mapper.

I think I will rework the code to bring up the full database based on
the machine compatible.

>
> Regards,
> Bjorn





--
With best wishes
Dmitry
diff mbox series

Patch

diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5af33b0e3470..f236ce376c1b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -72,6 +72,16 @@  config QCOM_OCMEM
 	  requirements. This is typically used by the GPU, camera/video, and
 	  audio components on some Snapdragon SoCs.
 
+config QCOM_PD_MAPPER
+	tristate "Qualcomm Protection Domain Mapper"
+	select QCOM_QMI_HELPERS
+	depends on NET && QRTR
+	help
+	  The Protection Domain Mapper maps registered services to the domains
+	  and instances handled by the remote DSPs. This is a kernel-space
+	  implementation of the service. It is a simpler alternative to the
+	  userspace daemon.
+
 config QCOM_PDR_HELPERS
 	tristate
 	select QCOM_QMI_HELPERS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ca0bece0dfff..65e33b5a2231 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -7,6 +7,8 @@  obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_MDT_LOADER)	+= mdt_loader.o
 obj-$(CONFIG_QCOM_OCMEM)	+= ocmem.o
+obj-$(CONFIG_QCOM_PD_MAPPER)	+= qcom_pd_mapper.o
+qcom_pd_mapper-y += qcom_pdm.o qcom_pdm_msg.o
 obj-$(CONFIG_QCOM_PDR_HELPERS)	+= pdr_interface.o
 obj-$(CONFIG_QCOM_PMIC_GLINK)	+= pmic_glink.o
 obj-$(CONFIG_QCOM_PMIC_GLINK)	+= pmic_glink_altmode.o
diff --git a/drivers/soc/qcom/qcom_pdm.c b/drivers/soc/qcom/qcom_pdm.c
new file mode 100644
index 000000000000..9d14b18b8590
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pdm.c
@@ -0,0 +1,346 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Protection Domain mapper
+ *
+ * Copyright (c) 2023 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/pd_mapper.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "qcom_pdm_msg.h"
+
+#define TMS_SERVREG_SERVICE "tms/servreg"
+
+struct qcom_pdm_domain {
+	struct list_head list;
+	const char *name;
+	u32 instance_id;
+};
+
+struct qcom_pdm_service {
+	struct list_head list;
+	struct list_head domains;
+	const char *name;
+};
+
+static LIST_HEAD(qcom_pdm_services);
+static DEFINE_MUTEX(qcom_pdm_mutex);
+static bool qcom_pdm_server_added;
+static struct qmi_handle qcom_pdm_handle;
+
+static struct qcom_pdm_service *qcom_pdm_find_locked(const char *name)
+{
+	struct qcom_pdm_service *service;
+
+	list_for_each_entry(service, &qcom_pdm_services, list) {
+		if (!strcmp(service->name, name))
+			return service;
+	}
+
+	return NULL;
+}
+
+static void qcom_pdm_del_service_domain_locked(const char *service_name, const char *domain_name)
+{
+	struct qcom_pdm_service *service;
+	struct qcom_pdm_domain *domain, *temp;
+
+	service = qcom_pdm_find_locked(service_name);
+	if (WARN_ON(!service))
+		return;
+
+	list_for_each_entry_safe(domain, temp, &service->domains, list) {
+		if (!strcmp(domain->name, domain_name)) {
+			list_del(&domain->list);
+			kfree(domain);
+
+			if (list_empty(&service->domains)) {
+				list_del(&service->list);
+				kfree(service->name);
+				kfree(service);
+			}
+
+			return;
+		}
+	}
+
+	WARN(1, "domain not found");
+}
+
+static int qcom_pdm_add_service_domain_locked(const char *service_name,
+					      const char *domain_name,
+					      u32 instance_id)
+{
+	struct qcom_pdm_service *service;
+	struct qcom_pdm_domain *domain;
+
+	service = qcom_pdm_find_locked(service_name);
+	if (service) {
+		list_for_each_entry(domain, &service->domains, list) {
+			if (!strcmp(domain->name, domain_name))
+				return -EBUSY;
+		}
+	} else {
+		service = kzalloc(sizeof(*service), GFP_KERNEL);
+		if (!service)
+			return -ENOMEM;
+
+		INIT_LIST_HEAD(&service->domains);
+		service->name = kstrdup(service_name, GFP_KERNEL);
+
+		list_add_tail(&service->list, &qcom_pdm_services);
+	}
+
+	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+	if (!domain) {
+		if (list_empty(&service->domains)) {
+			list_del(&service->list);
+			kfree(service->name);
+			kfree(service);
+		}
+
+		return -ENOMEM;
+	}
+
+	/*
+	 * service name can outlive calling module and so it should be strdup'ed.
+	 * domain name can not outlive the module, so there is no need to strdup it.
+	 */
+	domain->name = domain_name;
+	domain->instance_id = instance_id;
+	list_add_tail(&domain->list, &service->domains);
+
+	return 0;
+}
+
+static int qcom_pdm_add_domain_locked(const struct qcom_pdm_domain_data *data)
+{
+	int ret;
+	int i;
+
+	ret = qcom_pdm_add_service_domain_locked(TMS_SERVREG_SERVICE,
+						 data->domain,
+						 data->instance_id);
+	if (ret)
+		return ret;
+
+	for (i = 0; data->services[i]; i++) {
+		ret = qcom_pdm_add_service_domain_locked(data->services[i],
+							 data->domain,
+							 data->instance_id);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	while (--i >= 0)
+		qcom_pdm_del_service_domain_locked(data->services[i], data->domain);
+
+	qcom_pdm_del_service_domain_locked(TMS_SERVREG_SERVICE, data->domain);
+
+	return ret;
+}
+
+static void qcom_pdm_del_domain_locked(const struct qcom_pdm_domain_data *data)
+{
+	int i;
+
+	for (i = 0; data->services[i]; i++)
+		qcom_pdm_del_service_domain_locked(data->services[i], data->domain);
+
+	qcom_pdm_del_service_domain_locked(TMS_SERVREG_SERVICE, data->domain);
+}
+
+int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
+{
+	int ret;
+	int i;
+
+	mutex_lock(&qcom_pdm_mutex);
+
+	if (qcom_pdm_server_added) {
+		ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
+				     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+		if (ret)
+			goto err_out;
+	}
+
+	for (i = 0; i < num_data; i++) {
+		ret = qcom_pdm_add_domain_locked(data[i]);
+		if (ret)
+			goto err;
+	}
+
+	ret = qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
+			     SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+	if (ret) {
+		pr_err("PDM: error adding server %d\n", ret);
+		goto err;
+	}
+
+	qcom_pdm_server_added = true;
+
+	mutex_unlock(&qcom_pdm_mutex);
+
+	return 0;
+
+err:
+	while (--i >= 0)
+		qcom_pdm_del_domain_locked(data[i]);
+
+	qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
+		       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+
+err_out:
+	mutex_unlock(&qcom_pdm_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_pdm_add_domains);
+
+void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data)
+{
+	int i;
+
+	mutex_lock(&qcom_pdm_mutex);
+
+	if (qcom_pdm_server_added) {
+		qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
+			       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+	}
+
+	for (i = 0; i < num_data; i++)
+		qcom_pdm_del_domain_locked(data[i]);
+
+	qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE,
+		       SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE);
+	qcom_pdm_server_added = true;
+
+	mutex_unlock(&qcom_pdm_mutex);
+}
+EXPORT_SYMBOL_GPL(qcom_pdm_del_domains);
+
+static void qcom_pdm_get_domain_list(struct qmi_handle *qmi,
+				     struct sockaddr_qrtr *sq,
+				     struct qmi_txn *txn,
+				     const void *decoded)
+{
+	const struct servreg_loc_get_domain_list_req *req = decoded;
+	struct servreg_loc_get_domain_list_resp *rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+	struct qcom_pdm_service *service;
+	u32 offset;
+	int ret;
+
+	offset = req->offset_valid ? req->offset : 0;
+
+	rsp->rsp.result = QMI_RESULT_SUCCESS_V01;
+	rsp->rsp.error = QMI_ERR_NONE_V01;
+
+	rsp->db_revision_valid = true;
+	rsp->db_revision = 1;
+
+	rsp->total_domains_valid = true;
+	rsp->total_domains = 0;
+
+	mutex_lock(&qcom_pdm_mutex);
+
+	service = qcom_pdm_find_locked(req->name);
+	if (service) {
+		struct qcom_pdm_domain *domain;
+
+		rsp->domain_list_valid = true;
+		rsp->domain_list_len = 0;
+
+		list_for_each_entry(domain, &service->domains, list) {
+			u32 i = rsp->total_domains++;
+
+			if (i >= offset && i < SERVREG_LOC_MAX_DOMAINS) {
+				u32 j = rsp->domain_list_len++;
+
+				strscpy(rsp->domain_list[j].name, domain->name,
+					sizeof(rsp->domain_list[i].name));
+				rsp->domain_list[j].instance_id = domain->instance_id;
+
+				pr_debug("PDM: found %s / %d\n", domain->name,
+					 domain->instance_id);
+			}
+		}
+
+	}
+
+	mutex_unlock(&qcom_pdm_mutex);
+
+	pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->name,
+		 req->offset_valid ? req->offset : -1, rsp->domain_list_len, rsp->total_domains);
+
+	ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_GET_DOMAIN_LIST,
+				2658,
+				servreg_loc_get_domain_list_resp_ei, rsp);
+	if (ret)
+		pr_err("Error sending servreg response: %d\n", ret);
+
+	kfree(rsp);
+}
+
+static void qcom_pdm_pfr(struct qmi_handle *qmi,
+			 struct sockaddr_qrtr *sq,
+			 struct qmi_txn *txn,
+			 const void *decoded)
+{
+	const struct servreg_loc_pfr_req *req = decoded;
+	struct servreg_loc_pfr_resp rsp = {};
+	int ret;
+
+	pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason);
+
+	rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+	rsp.rsp.error = QMI_ERR_NONE_V01;
+
+	ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR,
+				SERVREG_LOC_PFR_RESP_MSG_SIZE,
+				servreg_loc_pfr_resp_ei, &rsp);
+	if (ret)
+		pr_err("Error sending servreg response: %d\n", ret);
+}
+
+static const struct qmi_msg_handler qcom_pdm_msg_handlers[] = {
+	{
+		.type = QMI_REQUEST,
+		.msg_id = SERVREG_LOC_GET_DOMAIN_LIST,
+		.ei = servreg_loc_get_domain_list_req_ei,
+		.decoded_size = sizeof(struct servreg_loc_get_domain_list_req),
+		.fn = qcom_pdm_get_domain_list,
+	},
+	{
+		.type = QMI_REQUEST,
+		.msg_id = SERVREG_LOC_PFR,
+		.ei = servreg_loc_pfr_req_ei,
+		.decoded_size = sizeof(struct servreg_loc_pfr_req),
+		.fn = qcom_pdm_pfr,
+	},
+	{ },
+};
+
+static int qcom_pdm_init(void)
+{
+	return qmi_handle_init(&qcom_pdm_handle, 1024,
+			       NULL, qcom_pdm_msg_handlers);
+}
+
+static void qcom_pdm_exit(void)
+{
+	qmi_handle_release(&qcom_pdm_handle);
+
+	WARN_ON(!list_empty(&qcom_pdm_services));
+}
+
+module_init(qcom_pdm_init);
+module_exit(qcom_pdm_exit);
+
+MODULE_DESCRIPTION("Qualcomm Protection Domain Mapper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_pdm_msg.c b/drivers/soc/qcom/qcom_pdm_msg.c
new file mode 100644
index 000000000000..6f858df8f3dc
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pdm_msg.c
@@ -0,0 +1,188 @@ 
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2018, Linaro Ltd.
+ * Copyright (c) 2016, Bjorn Andersson
+ */
+
+#include "qcom_pdm_msg.h"
+
+struct qmi_elem_info servreg_loc_domain_list_entry_ei[] = {
+	{
+		.data_type = QMI_STRING,
+		.elem_len = 65,
+		.elem_size = sizeof(char),
+		.offset = offsetof(struct servreg_loc_domain_list_entry, name)
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.offset = offsetof(struct servreg_loc_domain_list_entry, instance_id),
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(uint8_t),
+		.offset = offsetof(struct servreg_loc_domain_list_entry, service_data_valid),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.offset = offsetof(struct servreg_loc_domain_list_entry, service_data),
+	},
+	{}
+};
+
+struct qmi_elem_info servreg_loc_get_domain_list_req_ei[] = {
+	{
+		.data_type = QMI_STRING,
+		.elem_len = 65,
+		.elem_size = sizeof(char),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 1,
+		.offset = offsetof(struct servreg_loc_get_domain_list_req, name)
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.tlv_type = 16,
+		.offset = offsetof(struct servreg_loc_get_domain_list_req, offset_valid),
+	},
+	{
+		.data_type = QMI_UNSIGNED_4_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u32),
+		.tlv_type = 16,
+		.offset = offsetof(struct servreg_loc_get_domain_list_req, offset),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	}
+};
+
+struct qmi_elem_info servreg_loc_get_domain_list_resp_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof_field(struct servreg_loc_get_domain_list_resp, rsp),
+		.tlv_type = 2,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, rsp),
+		.ei_array = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.tlv_type = 16,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, total_domains_valid),
+	},
+	{
+		.data_type = QMI_UNSIGNED_2_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u16),
+		.tlv_type = 16,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, total_domains),
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.tlv_type = 17,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, db_revision_valid),
+	},
+	{
+		.data_type = QMI_UNSIGNED_2_BYTE,
+		.elem_len = 1,
+		.elem_size = sizeof(u16),
+		.tlv_type = 17,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, db_revision),
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len = 1,
+		.elem_size = sizeof(u8),
+		.tlv_type = 18,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, domain_list_valid),
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len = 1,
+		.elem_size = sizeof(uint8_t),
+		.tlv_type = 18,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, domain_list_len),
+	},
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 32,
+		.elem_size = sizeof(struct servreg_loc_domain_list_entry),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 18,
+		.offset = offsetof(struct servreg_loc_get_domain_list_resp, domain_list),
+		.ei_array = servreg_loc_domain_list_entry_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	}
+};
+
+struct qmi_elem_info servreg_loc_pfr_req_ei[] = {
+	{
+		.data_type = QMI_STRING,
+		.elem_len = 65,
+		.elem_size = sizeof(char),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 1,
+		.offset = offsetof(struct servreg_loc_pfr_req, service)
+	},
+	{
+		.data_type = QMI_STRING,
+		.elem_len = 65,
+		.elem_size = sizeof(char),
+		.array_type = VAR_LEN_ARRAY,
+		.tlv_type = 2,
+		.offset = offsetof(struct servreg_loc_pfr_req, reason)
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	}
+};
+
+struct qmi_elem_info servreg_loc_pfr_resp_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len = 1,
+		.elem_size = sizeof_field(struct servreg_loc_pfr_resp, rsp),
+		.tlv_type = 2,
+		.offset = offsetof(struct servreg_loc_pfr_resp, rsp),
+		.ei_array = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	}
+};
diff --git a/drivers/soc/qcom/qcom_pdm_msg.h b/drivers/soc/qcom/qcom_pdm_msg.h
new file mode 100644
index 000000000000..e576b87c67c0
--- /dev/null
+++ b/drivers/soc/qcom/qcom_pdm_msg.h
@@ -0,0 +1,66 @@ 
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2018, Linaro Ltd.
+ * Copyright (c) 2016, Bjorn Andersson
+ */
+
+#ifndef __QMI_SERVREG_LOC_H__
+#define __QMI_SERVREG_LOC_H__
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define SERVREG_QMI_SERVICE 64
+#define SERVREG_QMI_VERSION 257
+#define SERVREG_QMI_INSTANCE 0
+#define QMI_RESULT_SUCCESS 0
+#define QMI_RESULT_FAILURE 1
+#define QMI_ERR_NONE 0
+#define QMI_ERR_INTERNAL 1
+#define QMI_ERR_MALFORMED_MSG 2
+#define SERVREG_LOC_GET_DOMAIN_LIST 33
+#define SERVREG_LOC_PFR 36
+
+struct servreg_loc_domain_list_entry {
+	char name[65];
+	u32 instance_id;
+	u8 service_data_valid;
+	u32 service_data;
+};
+
+struct servreg_loc_get_domain_list_req {
+	char name[65];
+	u8 offset_valid;
+	u32 offset;
+};
+
+#define SERVREG_LOC_MAX_DOMAINS 32
+
+struct servreg_loc_get_domain_list_resp {
+	struct qmi_response_type_v01 rsp;
+	u8 total_domains_valid;
+	u16 total_domains;
+	u8 db_revision_valid;
+	u16 db_revision;
+	u8 domain_list_valid;
+	u32 domain_list_len;
+	struct servreg_loc_domain_list_entry domain_list[SERVREG_LOC_MAX_DOMAINS];
+};
+
+struct servreg_loc_pfr_req {
+	char service[65];
+	char reason[257];
+};
+
+struct servreg_loc_pfr_resp {
+	struct qmi_response_type_v01 rsp;
+};
+
+#define SERVREG_LOC_PFR_RESP_MSG_SIZE 10
+
+extern struct qmi_elem_info servreg_loc_get_domain_list_req_ei[];
+extern struct qmi_elem_info servreg_loc_get_domain_list_resp_ei[];
+extern struct qmi_elem_info servreg_loc_pfr_req_ei[];
+extern struct qmi_elem_info servreg_loc_pfr_resp_ei[];
+
+#endif
diff --git a/include/linux/soc/qcom/pd_mapper.h b/include/linux/soc/qcom/pd_mapper.h
new file mode 100644
index 000000000000..86438b7ca6fe
--- /dev/null
+++ b/include/linux/soc/qcom/pd_mapper.h
@@ -0,0 +1,39 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm Protection Domain mapper
+ *
+ * Copyright (c) 2023 Linaro Ltd.
+ */
+#ifndef __QCOM_PD_MAPPER__
+#define __QCOM_PD_MAPPER__
+
+struct qcom_pdm_domain_data {
+	const char *domain;
+	u32 instance_id;
+	/* NULL-terminated array */
+	const char * services[];
+};
+
+#if IS_ENABLED(CONFIG_QCOM_PD_MAPPER)
+
+int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data,
+			 size_t num_data);
+void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data,
+			  size_t num_data);
+
+#else
+
+static inline int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data,
+				      size_t num_data)
+{
+	return 0;
+}
+
+static inline void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data,
+					size_t num_data)
+{
+}
+
+#endif
+
+#endif