diff mbox series

[v2,4/5] mm/hugetlb: use PMD page lock to protect CONT-PTE entries

Message ID 88c8a8c68d87429f0fc48e81100f19b71f6e664f.1661240170.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series Fix some issues when looking up hugetlb page | expand

Commit Message

Baolin Wang Aug. 23, 2022, 7:50 a.m. UTC
Considering the pmd entries of a CONT-PMD hugetlb can not span on
multiple PMDs, we can change to use the PMD page lock, which can
be much finer grain that lock in the mm.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 include/linux/hugetlb.h | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

Comments

David Hildenbrand Aug. 23, 2022, 8:14 a.m. UTC | #1
On 23.08.22 09:50, Baolin Wang wrote:
> Considering the pmd entries of a CONT-PMD hugetlb can not span on
> multiple PMDs, we can change to use the PMD page lock, which can
> be much finer grain that lock in the mm.
> 
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
>  include/linux/hugetlb.h | 12 ++++++++++--
>  1 file changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 3a96f67..d4803a89 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -892,9 +892,17 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
>  static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
>  					   struct mm_struct *mm, pte_t *pte)
>  {
> -	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
> +	unsigned long hp_size = huge_page_size(h);
>  
> -	if (huge_page_size(h) == PMD_SIZE) {
> +	VM_BUG_ON(hp_size == PAGE_SIZE);
> +
> +	/*
> +	 * Considering CONT-PMD size hugetlb, since the CONT-PMD entry
> +	 * can not span multiple PMDs, then we can use the fine grained
> +	 * PMD page lock.
> +	 */
> +	if (hp_size == PMD_SIZE ||
> +	    (hp_size > PMD_SIZE && hp_size < PUD_SIZE)) {
>  		return pmd_lockptr(mm, (pmd_t *) pte);
>  	} else if (huge_page_size(h) < PMD_SIZE) {
>  		unsigned long mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);

Is there a measurable performance gain? IOW, do we really care?
Baolin Wang Aug. 23, 2022, 10:12 a.m. UTC | #2
On 8/23/2022 4:14 PM, David Hildenbrand wrote:
> On 23.08.22 09:50, Baolin Wang wrote:
>> Considering the pmd entries of a CONT-PMD hugetlb can not span on
>> multiple PMDs, we can change to use the PMD page lock, which can
>> be much finer grain that lock in the mm.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>>   include/linux/hugetlb.h | 12 ++++++++++--
>>   1 file changed, 10 insertions(+), 2 deletions(-)
>>
>> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
>> index 3a96f67..d4803a89 100644
>> --- a/include/linux/hugetlb.h
>> +++ b/include/linux/hugetlb.h
>> @@ -892,9 +892,17 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
>>   static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
>>   					   struct mm_struct *mm, pte_t *pte)
>>   {
>> -	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
>> +	unsigned long hp_size = huge_page_size(h);
>>   
>> -	if (huge_page_size(h) == PMD_SIZE) {
>> +	VM_BUG_ON(hp_size == PAGE_SIZE);
>> +
>> +	/*
>> +	 * Considering CONT-PMD size hugetlb, since the CONT-PMD entry
>> +	 * can not span multiple PMDs, then we can use the fine grained
>> +	 * PMD page lock.
>> +	 */
>> +	if (hp_size == PMD_SIZE ||
>> +	    (hp_size > PMD_SIZE && hp_size < PUD_SIZE)) {
>>   		return pmd_lockptr(mm, (pmd_t *) pte);
>>   	} else if (huge_page_size(h) < PMD_SIZE) {
>>   		unsigned long mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
> 
> Is there a measurable performance gain? IOW, do we really care?

IMO, It's just a theoretical analysis now:) Let me think about how to 
measure the performance gain.
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3a96f67..d4803a89 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -892,9 +892,17 @@  static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 					   struct mm_struct *mm, pte_t *pte)
 {
-	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
+	unsigned long hp_size = huge_page_size(h);
 
-	if (huge_page_size(h) == PMD_SIZE) {
+	VM_BUG_ON(hp_size == PAGE_SIZE);
+
+	/*
+	 * Considering CONT-PMD size hugetlb, since the CONT-PMD entry
+	 * can not span multiple PMDs, then we can use the fine grained
+	 * PMD page lock.
+	 */
+	if (hp_size == PMD_SIZE ||
+	    (hp_size > PMD_SIZE && hp_size < PUD_SIZE)) {
 		return pmd_lockptr(mm, (pmd_t *) pte);
 	} else if (huge_page_size(h) < PMD_SIZE) {
 		unsigned long mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);