diff mbox series

[V2,5/8] soundwire: amd: add soundwire manager interrupt handling

Message ID 20230213094031.2231058-6-Vijendar.Mukunda@amd.com (mailing list archive)
State New, archived
Headers show
Series [V2,1/8] soundwire: export sdw_compute_slave_ports() function | expand

Commit Message

Vijendar Mukunda Feb. 13, 2023, 9:40 a.m. UTC
Add support for handling soundwire manager interrupts.

Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
Signed-off-by: Mastan Katragadda <Mastan.Katragadda@amd.com>
---
 drivers/soundwire/amd_manager.c   | 132 ++++++++++++++++++++++++++++++
 drivers/soundwire/amd_manager.h   |   1 +
 include/linux/soundwire/sdw_amd.h |   7 ++
 3 files changed, 140 insertions(+)

Comments

Pierre-Louis Bossart Feb. 13, 2023, 6:15 p.m. UTC | #1
On 2/13/23 03:40, Vijendar Mukunda wrote:
> Add support for handling soundwire manager interrupts.

Try using the MIPI spelling: SoundWire

> 
> Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
> Signed-off-by: Mastan Katragadda <Mastan.Katragadda@amd.com>
> ---
>  drivers/soundwire/amd_manager.c   | 132 ++++++++++++++++++++++++++++++
>  drivers/soundwire/amd_manager.h   |   1 +
>  include/linux/soundwire/sdw_amd.h |   7 ++
>  3 files changed, 140 insertions(+)
> 
> diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
> index 14c88b80ab6d..87f9a987d93a 100644
> --- a/drivers/soundwire/amd_manager.c
> +++ b/drivers/soundwire/amd_manager.c
> @@ -417,6 +417,47 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
>  	return SDW_CMD_OK;
>  }
>  
> +static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
> +{
> +	u64 slave_stat = 0;

useless init

> +	u32 val = 0;

useless init

> +	u16 dev_index;
> +
> +	/* slave status response*/

response */

> +	slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
> +
> +	dev_dbg(amd_manager->dev, "%s: slave_stat:0x%llx\n", __func__, slave_stat);

newline?

> +	for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
> +		val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
> +		dev_dbg(amd_manager->dev, "%s val:0x%x\n", __func__, val);

you don't need __func__ in dev_dbg() logs, they can be added e.g. with
the option dyndbg=+pmf

> +		switch (val) {
> +		case SDW_SLAVE_ATTACHED:
> +			amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
> +			break;
> +		case SDW_SLAVE_UNATTACHED:
> +			amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
> +			break;
> +		case SDW_SLAVE_ALERT:
> +			amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
> +			break;
> +		default:
> +			amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
> +			break;
> +		}
> +	}
> +}
> +
> +static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
> +{
> +	u64 response = 0;

useless init

> +
> +	mutex_lock(&amd_manager->bus.msg_lock);
> +	response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
> +	mutex_unlock(&amd_manager->bus.msg_lock);
> +	amd_sdw_process_ping_status(response, amd_manager);
> +}
> +
>  static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
>  {
>  	struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
> @@ -817,6 +858,95 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
>  					       dais, num_dais);
>  }
>  
> +static void amd_sdw_update_slave_status_work(struct work_struct *work)
> +{
> +	struct amd_sdw_manager *amd_manager =
> +		container_of(work, struct amd_sdw_manager, amd_sdw_work);
> +	int retry_count = 0;
> +
> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
> +	}
> +
> +update_status:
> +	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
> +		if (retry_count++ < SDW_MAX_DEVICES) {
> +			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
> +				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
> +			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
> +				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
> +			amd_sdw_read_and_process_ping_status(amd_manager);
> +			goto update_status;
> +		} else {
> +			dev_err_ratelimited(amd_manager->dev,
> +					    "Device0 detected after %d iterations\n",
> +					    retry_count);
> +		}
> +	}

this seems rather inspired by the Cadence code, but is there really a
case where you need to re-check for devices? In the Cadence case, this
was added because we have a logical OR and new devices would not be handled.
> +}
> +
> +static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
> +					struct amd_sdw_manager *amd_manager)
> +{
> +	u64 slave_stat = 0;

useless init

> +	u32 val = 0;

useless init

> +	int dev_index;
> +
> +	if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
> +		memset(amd_manager->status, 0, sizeof(amd_manager->status));
> +	slave_stat = status_change_0to7;
> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
> +	dev_dbg(amd_manager->dev, "%s: status_change_0to7:0x%x status_change_8to11:0x%x\n",
> +		__func__, status_change_0to7, status_change_8to11);
> +	if (slave_stat) {
> +		for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
> +			if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
> +				val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
> +				      AMD_SDW_MCP_SLAVE_STATUS_MASK;
> +				switch (val) {
> +				case SDW_SLAVE_ATTACHED:
> +					amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
> +					break;
> +				case SDW_SLAVE_UNATTACHED:
> +					amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
> +					break;
> +				case SDW_SLAVE_ALERT:
> +					amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
> +					break;
> +				default:
> +					amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
> +					break;
> +				}

the code seems identical to that in amd_sdw_process_ping_status(), is
there a need for a helper function?

> +			}
> +		}
> +	}
> +}
> +
> +static void amd_sdw_irq_thread(struct work_struct *work)
> +{
> +	struct amd_sdw_manager *amd_manager =
> +			container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
> +	u32 status_change_8to11;
> +	u32 status_change_0to7;
> +
> +	status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
> +	status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
> +	dev_dbg(amd_manager->dev, "%s [SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
> +		__func__, amd_manager->instance, status_change_0to7, status_change_8to11);

remove __func__

> +	if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
> +		amd_sdw_read_and_process_ping_status(amd_manager);
> +	} else {
> +		/* Check for the updated status on peripheral device */
> +		amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
> +	}
> +	if (status_change_8to11 || status_change_0to7)
> +		schedule_work(&amd_manager->amd_sdw_work);
> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
> +}
Vijendar Mukunda Feb. 14, 2023, 5:56 a.m. UTC | #2
On 13/02/23 23:45, Pierre-Louis Bossart wrote:
>
> On 2/13/23 03:40, Vijendar Mukunda wrote:
>> Add support for handling soundwire manager interrupts.
> Try using the MIPI spelling: SoundWire
Will fix it.
>
>> Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
>> Signed-off-by: Mastan Katragadda <Mastan.Katragadda@amd.com>
>> ---
>>  drivers/soundwire/amd_manager.c   | 132 ++++++++++++++++++++++++++++++
>>  drivers/soundwire/amd_manager.h   |   1 +
>>  include/linux/soundwire/sdw_amd.h |   7 ++
>>  3 files changed, 140 insertions(+)
>>
>> diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
>> index 14c88b80ab6d..87f9a987d93a 100644
>> --- a/drivers/soundwire/amd_manager.c
>> +++ b/drivers/soundwire/amd_manager.c
>> @@ -417,6 +417,47 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
>>  	return SDW_CMD_OK;
>>  }
>>  
>> +static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
>> +{
>> +	u64 slave_stat = 0;
> useless init
will fix it.
>
>> +	u32 val = 0;
> useless init
will fix it.
>
>> +	u16 dev_index;
>> +
>> +	/* slave status response*/
> response */
will fix it.
>
>> +	slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
>> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
>> +
>> +	dev_dbg(amd_manager->dev, "%s: slave_stat:0x%llx\n", __func__, slave_stat);
> newline?
will remove it.
>
>> +	for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
>> +		val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
>> +		dev_dbg(amd_manager->dev, "%s val:0x%x\n", __func__, val);
> you don't need __func__ in dev_dbg() logs, they can be added e.g. with
> the option dyndbg=+pmf
it's overlooked. we will modify it.
>
>> +		switch (val) {
>> +		case SDW_SLAVE_ATTACHED:
>> +			amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
>> +			break;
>> +		case SDW_SLAVE_UNATTACHED:
>> +			amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
>> +			break;
>> +		case SDW_SLAVE_ALERT:
>> +			amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
>> +			break;
>> +		default:
>> +			amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
>> +			break;
>> +		}
>> +	}
>> +}
>> +
>> +static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
>> +{
>> +	u64 response = 0;
> useless init
will fix it
>
>> +
>> +	mutex_lock(&amd_manager->bus.msg_lock);
>> +	response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
>> +	mutex_unlock(&amd_manager->bus.msg_lock);
>> +	amd_sdw_process_ping_status(response, amd_manager);
>> +}
>> +
>>  static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
>>  {
>>  	struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
>> @@ -817,6 +858,95 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
>>  					       dais, num_dais);
>>  }
>>  
>> +static void amd_sdw_update_slave_status_work(struct work_struct *work)
>> +{
>> +	struct amd_sdw_manager *amd_manager =
>> +		container_of(work, struct amd_sdw_manager, amd_sdw_work);
>> +	int retry_count = 0;
>> +
>> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
>> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
>> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
>> +	}
>> +
>> +update_status:
>> +	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
>> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
>> +		if (retry_count++ < SDW_MAX_DEVICES) {
>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
>> +				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
>> +				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
>> +			amd_sdw_read_and_process_ping_status(amd_manager);
>> +			goto update_status;
>> +		} else {
>> +			dev_err_ratelimited(amd_manager->dev,
>> +					    "Device0 detected after %d iterations\n",
>> +					    retry_count);
>> +		}
>> +	}
> this seems rather inspired by the Cadence code, but is there really a
> case where you need to re-check for devices? In the Cadence case, this
> was added because we have a logical OR and new devices would not be handled.
As mentioned in V1 set, we have corner cases during enumeration sequence.
We observed device alerts are missing during peripheral enumeration sequence
when multiple peripheral devices are connected over the same link.
This is not inspired by Intel code.

As per V1 version review comment, we have included retry_count logic to address
faulty case.

We forgot to include comment. we will fix it.
>> +}
>> +
>> +static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
>> +					struct amd_sdw_manager *amd_manager)
>> +{
>> +	u64 slave_stat = 0;
> useless init
will fix it.
>> +	u32 val = 0;
> useless init
will fix it.
>
>> +	int dev_index;
>> +
>> +	if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
>> +		memset(amd_manager->status, 0, sizeof(amd_manager->status));
>> +	slave_stat = status_change_0to7;
>> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
>> +	dev_dbg(amd_manager->dev, "%s: status_change_0to7:0x%x status_change_8to11:0x%x\n",
>> +		__func__, status_change_0to7, status_change_8to11);
>> +	if (slave_stat) {
>> +		for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
>> +			if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
>> +				val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
>> +				      AMD_SDW_MCP_SLAVE_STATUS_MASK;
>> +				switch (val) {
>> +				case SDW_SLAVE_ATTACHED:
>> +					amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
>> +					break;
>> +				case SDW_SLAVE_UNATTACHED:
>> +					amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
>> +					break;
>> +				case SDW_SLAVE_ALERT:
>> +					amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
>> +					break;
>> +				default:
>> +					amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
>> +					break;
>> +				}
> the code seems identical to that in amd_sdw_process_ping_status(), is
> there a need for a helper function?
will use helper function for status update.
>
>> +			}
>> +		}
>> +	}
>> +}
>> +
>> +static void amd_sdw_irq_thread(struct work_struct *work)
>> +{
>> +	struct amd_sdw_manager *amd_manager =
>> +			container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
>> +	u32 status_change_8to11;
>> +	u32 status_change_0to7;
>> +
>> +	status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
>> +	status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
>> +	dev_dbg(amd_manager->dev, "%s [SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
>> +		__func__, amd_manager->instance, status_change_0to7, status_change_8to11);
> remove __func__
will fix it.
>
>> +	if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
>> +		amd_sdw_read_and_process_ping_status(amd_manager);
>> +	} else {
>> +		/* Check for the updated status on peripheral device */
>> +		amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
>> +	}
>> +	if (status_change_8to11 || status_change_0to7)
>> +		schedule_work(&amd_manager->amd_sdw_work);
>> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
>> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
>> +}
Vijendar Mukunda Feb. 14, 2023, 7:54 a.m. UTC | #3
On 14/02/23 11:26, Mukunda,Vijendar wrote:
> On 13/02/23 23:45, Pierre-Louis Bossart wrote:
>> On 2/13/23 03:40, Vijendar Mukunda wrote:
>>> Add support for handling soundwire manager interrupts.
>> Try using the MIPI spelling: SoundWire
> Will fix it.
>>> Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
>>> Signed-off-by: Mastan Katragadda <Mastan.Katragadda@amd.com>
>>> ---
>>>  drivers/soundwire/amd_manager.c   | 132 ++++++++++++++++++++++++++++++
>>>  drivers/soundwire/amd_manager.h   |   1 +
>>>  include/linux/soundwire/sdw_amd.h |   7 ++
>>>  3 files changed, 140 insertions(+)
>>>
>>> diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
>>> index 14c88b80ab6d..87f9a987d93a 100644
>>> --- a/drivers/soundwire/amd_manager.c
>>> +++ b/drivers/soundwire/amd_manager.c
>>> @@ -417,6 +417,47 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
>>>  	return SDW_CMD_OK;
>>>  }
>>>  
>>> +static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
>>> +{
>>> +	u64 slave_stat = 0;
>> useless init
> will fix it.
>>> +	u32 val = 0;
>> useless init
> will fix it.
>>> +	u16 dev_index;
>>> +
>>> +	/* slave status response*/
>> response */
> will fix it.
>>> +	slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
>>> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
>>> +
>>> +	dev_dbg(amd_manager->dev, "%s: slave_stat:0x%llx\n", __func__, slave_stat);
>> newline?
> will remove it.
>>> +	for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
>>> +		val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
>>> +		dev_dbg(amd_manager->dev, "%s val:0x%x\n", __func__, val);
>> you don't need __func__ in dev_dbg() logs, they can be added e.g. with
>> the option dyndbg=+pmf
> it's overlooked. we will modify it.
>>> +		switch (val) {
>>> +		case SDW_SLAVE_ATTACHED:
>>> +			amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
>>> +			break;
>>> +		case SDW_SLAVE_UNATTACHED:
>>> +			amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
>>> +			break;
>>> +		case SDW_SLAVE_ALERT:
>>> +			amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
>>> +			break;
>>> +		default:
>>> +			amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
>>> +			break;
>>> +		}
>>> +	}
>>> +}
>>> +
>>> +static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
>>> +{
>>> +	u64 response = 0;
>> useless init
> will fix it
>>> +
>>> +	mutex_lock(&amd_manager->bus.msg_lock);
>>> +	response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
>>> +	mutex_unlock(&amd_manager->bus.msg_lock);
>>> +	amd_sdw_process_ping_status(response, amd_manager);
>>> +}
>>> +
>>>  static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
>>>  {
>>>  	struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
>>> @@ -817,6 +858,95 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
>>>  					       dais, num_dais);
>>>  }
>>>  
>>> +static void amd_sdw_update_slave_status_work(struct work_struct *work)
>>> +{
>>> +	struct amd_sdw_manager *amd_manager =
>>> +		container_of(work, struct amd_sdw_manager, amd_sdw_work);
>>> +	int retry_count = 0;
>>> +
>>> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
>>> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
>>> +		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
>>> +	}
>>> +
>>> +update_status:
>>> +	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
>>> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
>>> +		if (retry_count++ < SDW_MAX_DEVICES) {
>>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
>>> +				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
>>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
>>> +				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
>>> +			amd_sdw_read_and_process_ping_status(amd_manager);
>>> +			goto update_status;
>>> +		} else {
>>> +			dev_err_ratelimited(amd_manager->dev,
>>> +					    "Device0 detected after %d iterations\n",
>>> +					    retry_count);
>>> +		}
>>> +	}
>> this seems rather inspired by the Cadence code, but is there really a
>> case where you need to re-check for devices? In the Cadence case, this
>> was added because we have a logical OR and new devices would not be handled.
> As mentioned in V1 set, we have corner cases during enumeration sequence.
> We observed device alerts are missing during peripheral enumeration sequence
> when multiple peripheral devices are connected over the same link.
> This is not inspired by Intel code.
>
> As per V1 version review comment, we have included retry_count logic to address
> faulty case.
>
> We forgot to include comment. we will fix it.
Slight correction in the explanation.

During the peripheral enumeration sequence, the soundwire peripheral interrupts
are masked.
If soundwire interrupts are not masked, it will cause side effects when multiple
peripheral devices connected over the same link.
As interrupts are masked, during device slot programming for each peripheral,
soundwire manager driver won't receive any interrupts.

Once the device number programming is done for all peripherals, the soundwire
interrupts will be unmasked. Read the peripheral device status from ping command
and process the response, which will invoke the peripheral device initialization sequence.
This sequence will ensure all peripheral devices enumerated and initialized
properly.







>>> +}
>>> +
>>> +static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
>>> +					struct amd_sdw_manager *amd_manager)
>>> +{
>>> +	u64 slave_stat = 0;
>> useless init
> will fix it.
>>> +	u32 val = 0;
>> useless init
> will fix it.
>>> +	int dev_index;
>>> +
>>> +	if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
>>> +		memset(amd_manager->status, 0, sizeof(amd_manager->status));
>>> +	slave_stat = status_change_0to7;
>>> +	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
>>> +	dev_dbg(amd_manager->dev, "%s: status_change_0to7:0x%x status_change_8to11:0x%x\n",
>>> +		__func__, status_change_0to7, status_change_8to11);
>>> +	if (slave_stat) {
>>> +		for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
>>> +			if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
>>> +				val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
>>> +				      AMD_SDW_MCP_SLAVE_STATUS_MASK;
>>> +				switch (val) {
>>> +				case SDW_SLAVE_ATTACHED:
>>> +					amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
>>> +					break;
>>> +				case SDW_SLAVE_UNATTACHED:
>>> +					amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
>>> +					break;
>>> +				case SDW_SLAVE_ALERT:
>>> +					amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
>>> +					break;
>>> +				default:
>>> +					amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
>>> +					break;
>>> +				}
>> the code seems identical to that in amd_sdw_process_ping_status(), is
>> there a need for a helper function?
> will use helper function for status update.
>>> +			}
>>> +		}
>>> +	}
>>> +}
>>> +
>>> +static void amd_sdw_irq_thread(struct work_struct *work)
>>> +{
>>> +	struct amd_sdw_manager *amd_manager =
>>> +			container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
>>> +	u32 status_change_8to11;
>>> +	u32 status_change_0to7;
>>> +
>>> +	status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
>>> +	status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
>>> +	dev_dbg(amd_manager->dev, "%s [SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
>>> +		__func__, amd_manager->instance, status_change_0to7, status_change_8to11);
>> remove __func__
> will fix it.
>>> +	if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
>>> +		amd_sdw_read_and_process_ping_status(amd_manager);
>>> +	} else {
>>> +		/* Check for the updated status on peripheral device */
>>> +		amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
>>> +	}
>>> +	if (status_change_8to11 || status_change_0to7)
>>> +		schedule_work(&amd_manager->amd_sdw_work);
>>> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
>>> +	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
>>> +}
Pierre-Louis Bossart Feb. 14, 2023, 1:28 p.m. UTC | #4
>>>> +update_status:
>>>> +	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
>>>> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
>>>> +		if (retry_count++ < SDW_MAX_DEVICES) {
>>>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
>>>> +				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
>>>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
>>>> +				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
>>>> +			amd_sdw_read_and_process_ping_status(amd_manager);
>>>> +			goto update_status;
>>>> +		} else {
>>>> +			dev_err_ratelimited(amd_manager->dev,
>>>> +					    "Device0 detected after %d iterations\n",
>>>> +					    retry_count);
>>>> +		}
>>>> +	}
>>> this seems rather inspired by the Cadence code, but is there really a
>>> case where you need to re-check for devices? In the Cadence case, this
>>> was added because we have a logical OR and new devices would not be handled.
>> As mentioned in V1 set, we have corner cases during enumeration sequence.
>> We observed device alerts are missing during peripheral enumeration sequence
>> when multiple peripheral devices are connected over the same link.
>> This is not inspired by Intel code.
>>
>> As per V1 version review comment, we have included retry_count logic to address
>> faulty case.
>>
>> We forgot to include comment. we will fix it.
> Slight correction in the explanation.
> 
> During the peripheral enumeration sequence, the soundwire peripheral interrupts
> are masked.
> If soundwire interrupts are not masked, it will cause side effects when multiple
> peripheral devices connected over the same link.
> As interrupts are masked, during device slot programming for each peripheral,
> soundwire manager driver won't receive any interrupts.
> 
> Once the device number programming is done for all peripherals, the soundwire
> interrupts will be unmasked. Read the peripheral device status from ping command
> and process the response, which will invoke the peripheral device initialization sequence.
> This sequence will ensure all peripheral devices enumerated and initialized
> properly.

Humm, the explanation is difficult to follow, it's not clear on which
side the interrupts are masked. Are you talking about the peripheral
being prevented from generating interrupts, or about the manager not
detecting peripheral state changes with an interrupt-based mechanism?

I am not sure what 'side effects' can happen, during enumeration all
devices show up as device0 and the SoundWire bus provides a mechanism to
enumerate without conflicts.
Vijendar Mukunda Feb. 14, 2023, 10:18 p.m. UTC | #5
On 14/02/23 18:58, Pierre-Louis Bossart wrote:
>>>>> +update_status:
>>>>> +	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
>>>>> +	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
>>>>> +		if (retry_count++ < SDW_MAX_DEVICES) {
>>>>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
>>>>> +				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
>>>>> +			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
>>>>> +				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
>>>>> +			amd_sdw_read_and_process_ping_status(amd_manager);
>>>>> +			goto update_status;
>>>>> +		} else {
>>>>> +			dev_err_ratelimited(amd_manager->dev,
>>>>> +					    "Device0 detected after %d iterations\n",
>>>>> +					    retry_count);
>>>>> +		}
>>>>> +	}
>>>> this seems rather inspired by the Cadence code, but is there really a
>>>> case where you need to re-check for devices? In the Cadence case, this
>>>> was added because we have a logical OR and new devices would not be handled.
>>> As mentioned in V1 set, we have corner cases during enumeration sequence.
>>> We observed device alerts are missing during peripheral enumeration sequence
>>> when multiple peripheral devices are connected over the same link.
>>> This is not inspired by Intel code.
>>>
>>> As per V1 version review comment, we have included retry_count logic to address
>>> faulty case.
>>>
>>> We forgot to include comment. we will fix it.
>> Slight correction in the explanation.
>>
>> During the peripheral enumeration sequence, the soundwire peripheral interrupts
>> are masked.
>> If soundwire interrupts are not masked, it will cause side effects when multiple
>> peripheral devices connected over the same link.
>> As interrupts are masked, during device slot programming for each peripheral,
>> soundwire manager driver won't receive any interrupts.
>>
>> Once the device number programming is done for all peripherals, the soundwire
>> interrupts will be unmasked. Read the peripheral device status from ping command
>> and process the response, which will invoke the peripheral device initialization sequence.
>> This sequence will ensure all peripheral devices enumerated and initialized
>> properly.
> Humm, the explanation is difficult to follow, it's not clear on which
> side the interrupts are masked. Are you talking about the peripheral
> being prevented from generating interrupts, or about the manager not
> detecting peripheral state changes with an interrupt-based mechanism?
>
> I am not sure what 'side effects' can happen, during enumeration all
> devices show up as device0 and the SoundWire bus provides a mechanism to
> enumerate without conflicts.
I am talking about manager side detecting peripheral state changes
with interrupt-based mechanism.
We are referring to the scenario where multiple peripheral devices are
connected over the same link.
Till peripheral devices enumeration gets completed, we are masking
manager side interrupts, and later we are unmasking the manager side
interrupts.
diff mbox series

Patch

diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 14c88b80ab6d..87f9a987d93a 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -417,6 +417,47 @@  static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
 	return SDW_CMD_OK;
 }
 
+static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
+{
+	u64 slave_stat = 0;
+	u32 val = 0;
+	u16 dev_index;
+
+	/* slave status response*/
+	slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
+	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
+
+	dev_dbg(amd_manager->dev, "%s: slave_stat:0x%llx\n", __func__, slave_stat);
+	for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
+		val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
+		dev_dbg(amd_manager->dev, "%s val:0x%x\n", __func__, val);
+		switch (val) {
+		case SDW_SLAVE_ATTACHED:
+			amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
+			break;
+		case SDW_SLAVE_UNATTACHED:
+			amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
+			break;
+		case SDW_SLAVE_ALERT:
+			amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
+			break;
+		default:
+			amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
+			break;
+		}
+	}
+}
+
+static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
+{
+	u64 response = 0;
+
+	mutex_lock(&amd_manager->bus.msg_lock);
+	response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
+	mutex_unlock(&amd_manager->bus.msg_lock);
+	amd_sdw_process_ping_status(response, amd_manager);
+}
+
 static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
 {
 	struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
@@ -817,6 +858,95 @@  static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
 					       dais, num_dais);
 }
 
+static void amd_sdw_update_slave_status_work(struct work_struct *work)
+{
+	struct amd_sdw_manager *amd_manager =
+		container_of(work, struct amd_sdw_manager, amd_sdw_work);
+	int retry_count = 0;
+
+	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
+		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
+		acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+	}
+
+update_status:
+	sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
+	if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
+		if (retry_count++ < SDW_MAX_DEVICES) {
+			acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
+				       ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
+			acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
+				       amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+			amd_sdw_read_and_process_ping_status(amd_manager);
+			goto update_status;
+		} else {
+			dev_err_ratelimited(amd_manager->dev,
+					    "Device0 detected after %d iterations\n",
+					    retry_count);
+		}
+	}
+}
+
+static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
+					struct amd_sdw_manager *amd_manager)
+{
+	u64 slave_stat = 0;
+	u32 val = 0;
+	int dev_index;
+
+	if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
+		memset(amd_manager->status, 0, sizeof(amd_manager->status));
+	slave_stat = status_change_0to7;
+	slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
+	dev_dbg(amd_manager->dev, "%s: status_change_0to7:0x%x status_change_8to11:0x%x\n",
+		__func__, status_change_0to7, status_change_8to11);
+	if (slave_stat) {
+		for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
+			if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
+				val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
+				      AMD_SDW_MCP_SLAVE_STATUS_MASK;
+				switch (val) {
+				case SDW_SLAVE_ATTACHED:
+					amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
+					break;
+				case SDW_SLAVE_UNATTACHED:
+					amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
+					break;
+				case SDW_SLAVE_ALERT:
+					amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
+					break;
+				default:
+					amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
+					break;
+				}
+			}
+		}
+	}
+}
+
+static void amd_sdw_irq_thread(struct work_struct *work)
+{
+	struct amd_sdw_manager *amd_manager =
+			container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
+	u32 status_change_8to11;
+	u32 status_change_0to7;
+
+	status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
+	status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
+	dev_dbg(amd_manager->dev, "%s [SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
+		__func__, amd_manager->instance, status_change_0to7, status_change_8to11);
+	if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
+		amd_sdw_read_and_process_ping_status(amd_manager);
+	} else {
+		/* Check for the updated status on peripheral device */
+		amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
+	}
+	if (status_change_8to11 || status_change_0to7)
+		schedule_work(&amd_manager->amd_sdw_work);
+	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
+	acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
+}
+
 static void amd_sdw_probe_work(struct work_struct *work)
 {
 	struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
@@ -909,6 +1039,8 @@  static int amd_sdw_manager_probe(struct platform_device *pdev)
 		return ret;
 	}
 	dev_set_drvdata(dev, amd_manager);
+	INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
+	INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
 	INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
 	schedule_work(&amd_manager->probe_work);
 	return 0;
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 3e1bded1e769..5bcaf7a763bb 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -186,6 +186,7 @@ 
 #define AMD_SDW1_PAD_KEEPER_EN_MASK			0x10
 #define AMD_SDW0_PAD_KEEPER_DISABLE_MASK		0x1E
 #define AMD_SDW1_PAD_KEEPER_DISABLE_MASK		0xF
+#define AMD_SDW_PREQ_INTR_STAT				BIT(19)
 
 enum amd_sdw_cmd_type {
 	AMD_SDW_CMD_PING = 0,
diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h
index c978cfbc0207..003b8a197fc5 100644
--- a/include/linux/soundwire/sdw_amd.h
+++ b/include/linux/soundwire/sdw_amd.h
@@ -45,8 +45,11 @@  struct sdw_amd_dai_runtime {
  * @mmio: soundwire registers mmio base
  * @acp_mmio: acp registers mmio base
  * @reg_mask: register mask structure per manager instance
+ * @amd_sdw_irq_thread: soundwire manager irq workqueue
+ * @amd_sdw_work: peripheral status work queue
  * @probe_work: soundwire manager probe workqueue
  * @sdw_lock: mutex to protect acp share register access
+ * @status: peripheral devices status array
  * @num_din_ports: number of input ports
  * @num_dout_ports: number of output ports
  * @cols_index: Column index in frame shape
@@ -65,10 +68,14 @@  struct amd_sdw_manager {
 	void __iomem *acp_mmio;
 
 	struct sdw_manager_reg_mask *reg_mask;
+	struct work_struct amd_sdw_irq_thread;
+	struct work_struct amd_sdw_work;
 	struct work_struct probe_work;
 	/* mutex to protect acp common register access */
 	struct mutex *sdw_lock;
 
+	enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+
 	int num_din_ports;
 	int num_dout_ports;