diff mbox series

cxl: Convert pioson ops rwsem usages to scope based resource management

Message ID 169998626910.1958731.10157698499207717733.stgit@djiang5-mobl3
State New, archived
Headers show
Series cxl: Convert pioson ops rwsem usages to scope based resource management | expand

Commit Message

Dave Jiang Nov. 14, 2023, 6:25 p.m. UTC
Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
code lines.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---

Hi Alison, follow on patch to yours. Can't be backported, but nice clean
up going forward.

 drivers/cxl/core/memdev.c |   32 ++++++++++++--------------------
 1 file changed, 12 insertions(+), 20 deletions(-)

Comments

Alison Schofield Nov. 15, 2023, 11:32 p.m. UTC | #1
On Tue, Nov 14, 2023 at 11:25:20AM -0700, Dave Jiang wrote:
> Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
> code lines.
> 
> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> ---
> 
> Hi Alison, follow on patch to yours. Can't be backported, but nice clean
> up going forward.

Tell me more about your backport worry.  Are we expected to avoid using
the new guard any place where there is a backport possibility?  

I'm thinking I should just rev the patch with your updates.

> 
>  drivers/cxl/core/memdev.c |   32 ++++++++++++--------------------
>  1 file changed, 12 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index 961da365b097..9ab748fadb38 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -227,8 +227,8 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
>  	if (!port || !is_cxl_endpoint(port))
>  		return -EINVAL;
>  
> -	down_read(&cxl_region_rwsem);
> -	down_read(&cxl_dpa_rwsem);
> +	guard(rwsem_read)(&cxl_region_rwsem);
> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>  
>  	if (cxl_num_decoders_committed(port) == 0) {
>  		/* No regions mapped to this memdev */
> @@ -237,8 +237,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
>  		/* Regions mapped, collect poison by endpoint */
>  		rc =  cxl_get_poison_by_endpoint(port);
>  	}
> -	up_read(&cxl_dpa_rwsem);
> -	up_read(&cxl_region_rwsem);
>  
>  	return rc;
>  }
> @@ -324,12 +322,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
>  		return 0;
>  
> -	down_read(&cxl_region_rwsem);
> -	down_read(&cxl_dpa_rwsem);
> +	guard(rwsem_read)(&cxl_region_rwsem);
> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>  
>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
>  	if (rc)
> -		goto out;
> +		return rc;
>  
>  	inject.address = cpu_to_le64(dpa);
>  	mbox_cmd = (struct cxl_mbox_cmd) {
> @@ -339,7 +337,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>  	};
>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
>  	if (rc)
> -		goto out;
> +		return rc;
>  
>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
>  	if (cxlr)
> @@ -352,11 +350,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>  		.length = cpu_to_le32(1),
>  	};
>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
> -out:
> -	up_read(&cxl_dpa_rwsem);
> -	up_read(&cxl_region_rwsem);
>  
> -	return rc;
> +	return 0;
>  }
>  EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
>  
> @@ -372,12 +367,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
>  		return 0;
>  
> -	down_read(&cxl_region_rwsem);
> -	down_read(&cxl_dpa_rwsem);
> +	guard(rwsem_read)(&cxl_region_rwsem);
> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>  
>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
>  	if (rc)
> -		goto out;
> +		return rc;
>  
>  	/*
>  	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
> @@ -396,7 +391,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>  
>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
>  	if (rc)
> -		goto out;
> +		return rc;
>  
>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
>  	if (cxlr)
> @@ -409,11 +404,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>  		.length = cpu_to_le32(1),
>  	};
>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
> -out:
> -	up_read(&cxl_dpa_rwsem);
> -	up_read(&cxl_region_rwsem);
>  
> -	return rc;
> +	return 0;
>  }
>  EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
>  
> 
>
Dave Jiang Nov. 15, 2023, 11:55 p.m. UTC | #2
On 11/15/23 16:32, Alison Schofield wrote:
> On Tue, Nov 14, 2023 at 11:25:20AM -0700, Dave Jiang wrote:
>> Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
>> code lines.
>>
>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>> ---
>>
>> Hi Alison, follow on patch to yours. Can't be backported, but nice clean
>> up going forward.
> 
> Tell me more about your backport worry.  Are we expected to avoid using
> the new guard any place where there is a backport possibility?  

Given that there's none of the cleanup.h support in stable kernels, I don't see how we can backport the guard() code automatically. Thus your original fix with a fixes tag plus a new cleanup patch follow on w/o backport issues seems necessary. Otherwise a separate backport patch would be needed no?
> 
> I'm thinking I should just rev the patch with your updates.
> 
>>
>>  drivers/cxl/core/memdev.c |   32 ++++++++++++--------------------
>>  1 file changed, 12 insertions(+), 20 deletions(-)
>>
>> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
>> index 961da365b097..9ab748fadb38 100644
>> --- a/drivers/cxl/core/memdev.c
>> +++ b/drivers/cxl/core/memdev.c
>> @@ -227,8 +227,8 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
>>  	if (!port || !is_cxl_endpoint(port))
>>  		return -EINVAL;
>>  
>> -	down_read(&cxl_region_rwsem);
>> -	down_read(&cxl_dpa_rwsem);
>> +	guard(rwsem_read)(&cxl_region_rwsem);
>> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>>  
>>  	if (cxl_num_decoders_committed(port) == 0) {
>>  		/* No regions mapped to this memdev */
>> @@ -237,8 +237,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
>>  		/* Regions mapped, collect poison by endpoint */
>>  		rc =  cxl_get_poison_by_endpoint(port);
>>  	}
>> -	up_read(&cxl_dpa_rwsem);
>> -	up_read(&cxl_region_rwsem);
>>  
>>  	return rc;
>>  }
>> @@ -324,12 +322,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
>>  		return 0;
>>  
>> -	down_read(&cxl_region_rwsem);
>> -	down_read(&cxl_dpa_rwsem);
>> +	guard(rwsem_read)(&cxl_region_rwsem);
>> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>>  
>>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
>>  	if (rc)
>> -		goto out;
>> +		return rc;
>>  
>>  	inject.address = cpu_to_le64(dpa);
>>  	mbox_cmd = (struct cxl_mbox_cmd) {
>> @@ -339,7 +337,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>  	};
>>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
>>  	if (rc)
>> -		goto out;
>> +		return rc;
>>  
>>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
>>  	if (cxlr)
>> @@ -352,11 +350,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>  		.length = cpu_to_le32(1),
>>  	};
>>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
>> -out:
>> -	up_read(&cxl_dpa_rwsem);
>> -	up_read(&cxl_region_rwsem);
>>  
>> -	return rc;
>> +	return 0;
>>  }
>>  EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
>>  
>> @@ -372,12 +367,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
>>  		return 0;
>>  
>> -	down_read(&cxl_region_rwsem);
>> -	down_read(&cxl_dpa_rwsem);
>> +	guard(rwsem_read)(&cxl_region_rwsem);
>> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>>  
>>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
>>  	if (rc)
>> -		goto out;
>> +		return rc;
>>  
>>  	/*
>>  	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
>> @@ -396,7 +391,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>  
>>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
>>  	if (rc)
>> -		goto out;
>> +		return rc;
>>  
>>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
>>  	if (cxlr)
>> @@ -409,11 +404,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>  		.length = cpu_to_le32(1),
>>  	};
>>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
>> -out:
>> -	up_read(&cxl_dpa_rwsem);
>> -	up_read(&cxl_region_rwsem);
>>  
>> -	return rc;
>> +	return 0;
>>  }
>>  EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
>>  
>>
>>
Alison Schofield Nov. 16, 2023, 2:17 a.m. UTC | #3
On Wed, Nov 15, 2023 at 04:55:11PM -0700, Dave Jiang wrote:
> 
> 
> On 11/15/23 16:32, Alison Schofield wrote:
> > On Tue, Nov 14, 2023 at 11:25:20AM -0700, Dave Jiang wrote:
> >> Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
> >> code lines.
> >>
> >> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> >> ---
> >>
> >> Hi Alison, follow on patch to yours. Can't be backported, but nice clean
> >> up going forward.
> > 
> > Tell me more about your backport worry.  Are we expected to avoid using
> > the new guard any place where there is a backport possibility?  
> 
> Given that there's none of the cleanup.h support in stable kernels, I don't see how we can backport the guard() code automatically. Thus your original fix with a fixes tag plus a new cleanup patch follow on w/o backport issues seems necessary. Otherwise a separate backport patch would be needed no?

Sure, it would be needed. I guess I'm looking for why this backport
issue is so special. (not being sarcastic). Is there specific guidance
not to use the cleanup stuff if we think a patch might be backported?
I don't usually consider backportability when adding a Fixes tag to a
Patch. Have 'backport folks' asked us not to use it?

I'm imagining the slippery slope of not fixing something the best way
because we are worried that backport folks can't figure out how to
merge it.

> > 
> > I'm thinking I should just rev the patch with your updates.
> > 
> >>
> >>  drivers/cxl/core/memdev.c |   32 ++++++++++++--------------------
> >>  1 file changed, 12 insertions(+), 20 deletions(-)
> >>
> >> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> >> index 961da365b097..9ab748fadb38 100644
> >> --- a/drivers/cxl/core/memdev.c
> >> +++ b/drivers/cxl/core/memdev.c
> >> @@ -227,8 +227,8 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
> >>  	if (!port || !is_cxl_endpoint(port))
> >>  		return -EINVAL;
> >>  
> >> -	down_read(&cxl_region_rwsem);
> >> -	down_read(&cxl_dpa_rwsem);
> >> +	guard(rwsem_read)(&cxl_region_rwsem);
> >> +	guard(rwsem_read)(&cxl_dpa_rwsem);
> >>  
> >>  	if (cxl_num_decoders_committed(port) == 0) {
> >>  		/* No regions mapped to this memdev */
> >> @@ -237,8 +237,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
> >>  		/* Regions mapped, collect poison by endpoint */
> >>  		rc =  cxl_get_poison_by_endpoint(port);
> >>  	}
> >> -	up_read(&cxl_dpa_rwsem);
> >> -	up_read(&cxl_region_rwsem);
> >>  
> >>  	return rc;
> >>  }
> >> @@ -324,12 +322,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
> >>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
> >>  		return 0;
> >>  
> >> -	down_read(&cxl_region_rwsem);
> >> -	down_read(&cxl_dpa_rwsem);
> >> +	guard(rwsem_read)(&cxl_region_rwsem);
> >> +	guard(rwsem_read)(&cxl_dpa_rwsem);
> >>  
> >>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
> >>  	if (rc)
> >> -		goto out;
> >> +		return rc;
> >>  
> >>  	inject.address = cpu_to_le64(dpa);
> >>  	mbox_cmd = (struct cxl_mbox_cmd) {
> >> @@ -339,7 +337,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
> >>  	};
> >>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
> >>  	if (rc)
> >> -		goto out;
> >> +		return rc;
> >>  
> >>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
> >>  	if (cxlr)
> >> @@ -352,11 +350,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
> >>  		.length = cpu_to_le32(1),
> >>  	};
> >>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
> >> -out:
> >> -	up_read(&cxl_dpa_rwsem);
> >> -	up_read(&cxl_region_rwsem);
> >>  
> >> -	return rc;
> >> +	return 0;
> >>  }
> >>  EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
> >>  
> >> @@ -372,12 +367,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
> >>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
> >>  		return 0;
> >>  
> >> -	down_read(&cxl_region_rwsem);
> >> -	down_read(&cxl_dpa_rwsem);
> >> +	guard(rwsem_read)(&cxl_region_rwsem);
> >> +	guard(rwsem_read)(&cxl_dpa_rwsem);
> >>  
> >>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
> >>  	if (rc)
> >> -		goto out;
> >> +		return rc;
> >>  
> >>  	/*
> >>  	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
> >> @@ -396,7 +391,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
> >>  
> >>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
> >>  	if (rc)
> >> -		goto out;
> >> +		return rc;
> >>  
> >>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
> >>  	if (cxlr)
> >> @@ -409,11 +404,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
> >>  		.length = cpu_to_le32(1),
> >>  	};
> >>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
> >> -out:
> >> -	up_read(&cxl_dpa_rwsem);
> >> -	up_read(&cxl_region_rwsem);
> >>  
> >> -	return rc;
> >> +	return 0;
> >>  }
> >>  EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
> >>  
> >>
> >>
Dave Jiang Nov. 16, 2023, 4:27 p.m. UTC | #4
On 11/15/23 19:17, Alison Schofield wrote:
> On Wed, Nov 15, 2023 at 04:55:11PM -0700, Dave Jiang wrote:
>>
>>
>> On 11/15/23 16:32, Alison Schofield wrote:
>>> On Tue, Nov 14, 2023 at 11:25:20AM -0700, Dave Jiang wrote:
>>>> Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
>>>> code lines.
>>>>
>>>> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
>>>> ---
>>>>
>>>> Hi Alison, follow on patch to yours. Can't be backported, but nice clean
>>>> up going forward.
>>>
>>> Tell me more about your backport worry.  Are we expected to avoid using
>>> the new guard any place where there is a backport possibility?  
>>
>> Given that there's none of the cleanup.h support in stable kernels, I don't see how we can backport the guard() code automatically. Thus your original fix with a fixes tag plus a new cleanup patch follow on w/o backport issues seems necessary. Otherwise a separate backport patch would be needed no?
> 
> Sure, it would be needed. I guess I'm looking for why this backport
> issue is so special. (not being sarcastic). Is there specific guidance
> not to use the cleanup stuff if we think a patch might be backported?
> I don't usually consider backportability when adding a Fixes tag to a
> Patch. Have 'backport folks' asked us not to use it?
> 
> I'm imagining the slippery slope of not fixing something the best way
> because we are worried that backport folks can't figure out how to
> merge it.

Just auto backport vs manual backport. And if you don't mind doing the work, then I guess use the new calls. 


> 
>>>
>>> I'm thinking I should just rev the patch with your updates.
>>>
>>>>
>>>>  drivers/cxl/core/memdev.c |   32 ++++++++++++--------------------
>>>>  1 file changed, 12 insertions(+), 20 deletions(-)
>>>>
>>>> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
>>>> index 961da365b097..9ab748fadb38 100644
>>>> --- a/drivers/cxl/core/memdev.c
>>>> +++ b/drivers/cxl/core/memdev.c
>>>> @@ -227,8 +227,8 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
>>>>  	if (!port || !is_cxl_endpoint(port))
>>>>  		return -EINVAL;
>>>>  
>>>> -	down_read(&cxl_region_rwsem);
>>>> -	down_read(&cxl_dpa_rwsem);
>>>> +	guard(rwsem_read)(&cxl_region_rwsem);
>>>> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>>>>  
>>>>  	if (cxl_num_decoders_committed(port) == 0) {
>>>>  		/* No regions mapped to this memdev */
>>>> @@ -237,8 +237,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
>>>>  		/* Regions mapped, collect poison by endpoint */
>>>>  		rc =  cxl_get_poison_by_endpoint(port);
>>>>  	}
>>>> -	up_read(&cxl_dpa_rwsem);
>>>> -	up_read(&cxl_region_rwsem);
>>>>  
>>>>  	return rc;
>>>>  }
>>>> @@ -324,12 +322,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>>>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
>>>>  		return 0;
>>>>  
>>>> -	down_read(&cxl_region_rwsem);
>>>> -	down_read(&cxl_dpa_rwsem);
>>>> +	guard(rwsem_read)(&cxl_region_rwsem);
>>>> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>>>>  
>>>>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
>>>>  	if (rc)
>>>> -		goto out;
>>>> +		return rc;
>>>>  
>>>>  	inject.address = cpu_to_le64(dpa);
>>>>  	mbox_cmd = (struct cxl_mbox_cmd) {
>>>> @@ -339,7 +337,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>>>  	};
>>>>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
>>>>  	if (rc)
>>>> -		goto out;
>>>> +		return rc;
>>>>  
>>>>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
>>>>  	if (cxlr)
>>>> @@ -352,11 +350,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>>>  		.length = cpu_to_le32(1),
>>>>  	};
>>>>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
>>>> -out:
>>>> -	up_read(&cxl_dpa_rwsem);
>>>> -	up_read(&cxl_region_rwsem);
>>>>  
>>>> -	return rc;
>>>> +	return 0;
>>>>  }
>>>>  EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
>>>>  
>>>> @@ -372,12 +367,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>>>  	if (!IS_ENABLED(CONFIG_DEBUG_FS))
>>>>  		return 0;
>>>>  
>>>> -	down_read(&cxl_region_rwsem);
>>>> -	down_read(&cxl_dpa_rwsem);
>>>> +	guard(rwsem_read)(&cxl_region_rwsem);
>>>> +	guard(rwsem_read)(&cxl_dpa_rwsem);
>>>>  
>>>>  	rc = cxl_validate_poison_dpa(cxlmd, dpa);
>>>>  	if (rc)
>>>> -		goto out;
>>>> +		return rc;
>>>>  
>>>>  	/*
>>>>  	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
>>>> @@ -396,7 +391,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>>>  
>>>>  	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
>>>>  	if (rc)
>>>> -		goto out;
>>>> +		return rc;
>>>>  
>>>>  	cxlr = cxl_dpa_to_region(cxlmd, dpa);
>>>>  	if (cxlr)
>>>> @@ -409,11 +404,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
>>>>  		.length = cpu_to_le32(1),
>>>>  	};
>>>>  	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
>>>> -out:
>>>> -	up_read(&cxl_dpa_rwsem);
>>>> -	up_read(&cxl_region_rwsem);
>>>>  
>>>> -	return rc;
>>>> +	return 0;
>>>>  }
>>>>  EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
>>>>  
>>>>
>>>>
Dan Williams Nov. 23, 2023, 1:12 a.m. UTC | #5
Alison Schofield wrote:
> On Wed, Nov 15, 2023 at 04:55:11PM -0700, Dave Jiang wrote:
> > 
> > 
> > On 11/15/23 16:32, Alison Schofield wrote:
> > > On Tue, Nov 14, 2023 at 11:25:20AM -0700, Dave Jiang wrote:
> > >> Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
> > >> code lines.
> > >>
> > >> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> > >> ---
> > >>
> > >> Hi Alison, follow on patch to yours. Can't be backported, but nice clean
> > >> up going forward.
> > > 
> > > Tell me more about your backport worry.  Are we expected to avoid using
> > > the new guard any place where there is a backport possibility?  
> > 
> > Given that there's none of the cleanup.h support in stable kernels, I don't see how we can backport the guard() code automatically. Thus your original fix with a fixes tag plus a new cleanup patch follow on w/o backport issues seems necessary. Otherwise a separate backport patch would be needed no?
> 
> Sure, it would be needed. I guess I'm looking for why this backport
> issue is so special. (not being sarcastic). Is there specific guidance
> not to use the cleanup stuff if we think a patch might be backported?
> I don't usually consider backportability when adding a Fixes tag to a
> Patch. Have 'backport folks' asked us not to use it?
> 
> I'm imagining the slippery slope of not fixing something the best way
> because we are worried that backport folks can't figure out how to
> merge it.

Upstream should be fixing things "the best way" in the current kernel.
If the best way requires some work when backporting, so be it.

The general rule for making backports easier is for when a fix
identifies additional cleanups. In that scenario the fix should be made
first and then the cleanups layered on top.

The cleanup.h helpers are an interesting case because they allow adding
new locking calls and defining the scope of the lock at the same time. I
submit that cleanup.h helpers are as easy / difficult to backport as
open-coded lock / unlock calls.
Dan Williams Nov. 23, 2023, 1:33 a.m. UTC | #6
Dan Williams wrote:
> Alison Schofield wrote:
> > On Wed, Nov 15, 2023 at 04:55:11PM -0700, Dave Jiang wrote:
> > > 
> > > 
> > > On 11/15/23 16:32, Alison Schofield wrote:
> > > > On Tue, Nov 14, 2023 at 11:25:20AM -0700, Dave Jiang wrote:
> > > >> Cleanup the rwsem usages in the poison ops to reduce complexity and reduce
> > > >> code lines.
> > > >>
> > > >> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
> > > >> ---
> > > >>
> > > >> Hi Alison, follow on patch to yours. Can't be backported, but nice clean
> > > >> up going forward.
> > > > 
> > > > Tell me more about your backport worry.  Are we expected to avoid using
> > > > the new guard any place where there is a backport possibility?  
> > > 
> > > Given that there's none of the cleanup.h support in stable kernels, I don't see how we can backport the guard() code automatically. Thus your original fix with a fixes tag plus a new cleanup patch follow on w/o backport issues seems necessary. Otherwise a separate backport patch would be needed no?
> > 
> > Sure, it would be needed. I guess I'm looking for why this backport
> > issue is so special. (not being sarcastic). Is there specific guidance
> > not to use the cleanup stuff if we think a patch might be backported?
> > I don't usually consider backportability when adding a Fixes tag to a
> > Patch. Have 'backport folks' asked us not to use it?
> > 
> > I'm imagining the slippery slope of not fixing something the best way
> > because we are worried that backport folks can't figure out how to
> > merge it.
> 
> Upstream should be fixing things "the best way" in the current kernel.
> If the best way requires some work when backporting, so be it.
> 
> The general rule for making backports easier is for when a fix
> identifies additional cleanups. In that scenario the fix should be made
> first and then the cleanups layered on top.
> 
> The cleanup.h helpers are an interesting case because they allow adding
> new locking calls and defining the scope of the lock at the same time. I
> submit that cleanup.h helpers are as easy / difficult to backport as
> open-coded lock / unlock calls.

The other concern here though is mixing a conversion to use cleanup.h
with a cleanup to use scope-based locking, and the fact that the
_interruptible version of the scope based locking is not available until
v6.8. So while I think it is ok to introduce new locking as a fix with
the cleanup.h headers. The old-style should be used when the fix overlaps
a conversion.
diff mbox series

Patch

diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 961da365b097..9ab748fadb38 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -227,8 +227,8 @@  int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
 	if (!port || !is_cxl_endpoint(port))
 		return -EINVAL;
 
-	down_read(&cxl_region_rwsem);
-	down_read(&cxl_dpa_rwsem);
+	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read)(&cxl_dpa_rwsem);
 
 	if (cxl_num_decoders_committed(port) == 0) {
 		/* No regions mapped to this memdev */
@@ -237,8 +237,6 @@  int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
 		/* Regions mapped, collect poison by endpoint */
 		rc =  cxl_get_poison_by_endpoint(port);
 	}
-	up_read(&cxl_dpa_rwsem);
-	up_read(&cxl_region_rwsem);
 
 	return rc;
 }
@@ -324,12 +322,12 @@  int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
 
-	down_read(&cxl_region_rwsem);
-	down_read(&cxl_dpa_rwsem);
+	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read)(&cxl_dpa_rwsem);
 
 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
 	if (rc)
-		goto out;
+		return rc;
 
 	inject.address = cpu_to_le64(dpa);
 	mbox_cmd = (struct cxl_mbox_cmd) {
@@ -339,7 +337,7 @@  int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 	};
 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 	if (rc)
-		goto out;
+		return rc;
 
 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
 	if (cxlr)
@@ -352,11 +350,8 @@  int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 		.length = cpu_to_le32(1),
 	};
 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
-out:
-	up_read(&cxl_dpa_rwsem);
-	up_read(&cxl_region_rwsem);
 
-	return rc;
+	return 0;
 }
 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
 
@@ -372,12 +367,12 @@  int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
 
-	down_read(&cxl_region_rwsem);
-	down_read(&cxl_dpa_rwsem);
+	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read)(&cxl_dpa_rwsem);
 
 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
 	if (rc)
-		goto out;
+		return rc;
 
 	/*
 	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
@@ -396,7 +391,7 @@  int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 
 	rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 	if (rc)
-		goto out;
+		return rc;
 
 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
 	if (cxlr)
@@ -409,11 +404,8 @@  int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 		.length = cpu_to_le32(1),
 	};
 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
-out:
-	up_read(&cxl_dpa_rwsem);
-	up_read(&cxl_region_rwsem);
 
-	return rc;
+	return 0;
 }
 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);