diff mbox series

[v4,2/7] kexec: define functions to map and unmap segments

Message ID 20240122183804.3293904-3-tusharsu@linux.microsoft.com (mailing list archive)
State New, archived
Headers show
Series ima: kexec: measure events between kexec load and execute | expand

Commit Message

Tushar Sugandhi Jan. 22, 2024, 6:37 p.m. UTC
Implement kimage_map_segment() to enable mapping of IMA buffer source
pages to the kimage structure post kexec 'load'.  This function,
accepting a kimage pointer, an address, and a size, will gather the
source pages within the specified address range, create an array of page
pointers, and map these to a contiguous virtual address range.  The
function returns the start of this range if successful, or NULL if
unsuccessful.

Implement kimage_unmap_segment() for unmapping segments
using vunmap().  Relocate 'for_each_kimage_entry()' macro from
kexec_core.c to kexec.h for broader accessibility.

Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
---
 include/linux/kexec.h              | 13 +++++++
 kernel/kexec_core.c                | 59 +++++++++++++++++++++++++++---
 security/integrity/ima/ima_kexec.c |  1 +
 3 files changed, 68 insertions(+), 5 deletions(-)

Comments

Stefan Berger Jan. 23, 2024, 5:03 p.m. UTC | #1
On 1/22/24 13:37, Tushar Sugandhi wrote:
> Implement kimage_map_segment() to enable mapping of IMA buffer source
> pages to the kimage structure post kexec 'load'.  This function,
> accepting a kimage pointer, an address, and a size, will gather the
> source pages within the specified address range, create an array of page
> pointers, and map these to a contiguous virtual address range.  The
> function returns the start of this range if successful, or NULL if
> unsuccessful.
> 
> Implement kimage_unmap_segment() for unmapping segments
> using vunmap().  Relocate 'for_each_kimage_entry()' macro from
> kexec_core.c to kexec.h for broader accessibility.
> 
> Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
> ---
>   include/linux/kexec.h              | 13 +++++++
>   kernel/kexec_core.c                | 59 +++++++++++++++++++++++++++---
>   security/integrity/ima/ima_kexec.c |  1 +
>   3 files changed, 68 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/kexec.h b/include/linux/kexec.h
> index 22b5cd24f581..e00b8101b53b 100644
> --- a/include/linux/kexec.h
> +++ b/include/linux/kexec.h
> @@ -490,6 +490,15 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
>   static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
>   #endif
>   
> +#define for_each_kimage_entry(image, ptr, entry) \
> +	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
> +		ptr = (entry & IND_INDIRECTION) ? \
> +			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
> +
> +extern void *kimage_map_segment(struct kimage *image,
> +				unsigned long addr, unsigned long size);
> +extern void kimage_unmap_segment(void *buffer);
> +

This series applies to v6.5. You may want to rebase to 6.7.

>   #else /* !CONFIG_KEXEC_CORE */
>   struct pt_regs;
>   struct task_struct;
> @@ -497,6 +506,10 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
>   static inline void crash_kexec(struct pt_regs *regs) { }
>   static inline int kexec_should_crash(struct task_struct *p) { return 0; }
>   static inline int kexec_crash_loaded(void) { return 0; }
> +static inline void *kimage_map_segment(struct kimage *image,
> +				       unsigned long addr, unsigned long size)
> +{ return NULL; }
> +static inline void kimage_unmap_segment(void *buffer) { }
>   #define kexec_in_progress false
>   #endif /* CONFIG_KEXEC_CORE */
>   
> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
> index 3d578c6fefee..26978ad02676 100644
> --- a/kernel/kexec_core.c
> +++ b/kernel/kexec_core.c
> @@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image)
>   	*image->entry = IND_DONE;
>   }
>   
> -#define for_each_kimage_entry(image, ptr, entry) \
> -	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
> -		ptr = (entry & IND_INDIRECTION) ? \
> -			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
> -
>   static void kimage_free_entry(kimage_entry_t entry)
>   {
>   	struct page *page;
> @@ -921,6 +916,60 @@ int kimage_load_segment(struct kimage *image,
>   	return result;
>   }
>   
> +void *kimage_map_segment(struct kimage *image,
> +			 unsigned long addr, unsigned long size)
> +{
> +	unsigned long eaddr = addr + size;
> +	unsigned long src_page_addr, dest_page_addr;
> +	unsigned int npages;
> +	struct page **src_pages;
> +	int i;
> +	kimage_entry_t *ptr, entry;
> +	void *vaddr = NULL;
> +
> +	/*
> +	 * Collect the source pages and map them in a contiguous VA range.
> +	 */
> +	npages = PFN_UP(eaddr) - PFN_DOWN(addr);
> +	src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
> +	if (!src_pages) {
> +		pr_err("%s: Could not allocate ima pages array.\n", __func__);
> +		return NULL;
> +	}
> +
> +	i = 0;
> +	for_each_kimage_entry(image, ptr, entry) {
> +		if (entry & IND_DESTINATION)
> +			dest_page_addr = entry & PAGE_MASK;
> +		else if (entry & IND_SOURCE) {
> +			if (dest_page_addr >= addr && dest_page_addr < eaddr) {
> +				src_page_addr = entry & PAGE_MASK;
> +				src_pages[i++] =
> +					virt_to_page(__va(src_page_addr));
> +				if (i == npages)
> +					break;
> +				dest_page_addr += PAGE_SIZE;
> +			}
> +		}
> +	}
> +
> +	/* Sanity check. */
> +	WARN_ON(i < npages);
> +
> +	vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
> +	kfree(src_pages);
> +
> +	if (!vaddr)
> +		pr_err("%s: Could not map imap buffer.\n", __func__);

imap -> ima

> +
> +	return vaddr;
> +}
> +
> +void kimage_unmap_segment(void *segment_buffer)
> +{
> +	vunmap(segment_buffer);
> +}
> +
>   struct kexec_load_limit {
>   	/* Mutex protects the limit count. */
>   	struct mutex mutex;
> diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
> index 99daac355c70..4f944c9b4168 100644
> --- a/security/integrity/ima/ima_kexec.c
> +++ b/security/integrity/ima/ima_kexec.c
> @@ -170,6 +170,7 @@ void ima_add_kexec_buffer(struct kimage *image)
>   	pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
>   		 kbuf.mem);
>   }
> +

remove

>   #endif /* IMA_KEXEC */
>   
>   /*
Tushar Sugandhi Jan. 23, 2024, 8:39 p.m. UTC | #2
Thanks Stefan for taking a look.

On 1/23/24 09:03, Stefan Berger wrote:
> 
> 
> On 1/22/24 13:37, Tushar Sugandhi wrote:
>> Implement kimage_map_segment() to enable mapping of IMA buffer source
>> pages to the kimage structure post kexec 'load'.  This function,
>> accepting a kimage pointer, an address, and a size, will gather the
>> source pages within the specified address range, create an array of page
>> pointers, and map these to a contiguous virtual address range.  The
>> function returns the start of this range if successful, or NULL if
>> unsuccessful.
>>
>> Implement kimage_unmap_segment() for unmapping segments
>> using vunmap().  Relocate 'for_each_kimage_entry()' macro from
>> kexec_core.c to kexec.h for broader accessibility.
>>
>> Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
>> ---
>>   include/linux/kexec.h              | 13 +++++++
>>   kernel/kexec_core.c                | 59 +++++++++++++++++++++++++++---
>>   security/integrity/ima/ima_kexec.c |  1 +
>>   3 files changed, 68 insertions(+), 5 deletions(-)
>>
>> diff --git a/include/linux/kexec.h b/include/linux/kexec.h
>> index 22b5cd24f581..e00b8101b53b 100644
>> --- a/include/linux/kexec.h
>> +++ b/include/linux/kexec.h
>> @@ -490,6 +490,15 @@ static inline int 
>> arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
>>   static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned 
>> int pages) { }
>>   #endif
>> +#define for_each_kimage_entry(image, ptr, entry) \
>> +    for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
>> +        ptr = (entry & IND_INDIRECTION) ? \
>> +            boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
>> +
>> +extern void *kimage_map_segment(struct kimage *image,
>> +                unsigned long addr, unsigned long size);
>> +extern void kimage_unmap_segment(void *buffer);
>> +
> 
> This series applies to v6.5. You may want to rebase to 6.7.
> 
Will rebase. Thanks for catching this.
>>   #else /* !CONFIG_KEXEC_CORE */
>>   struct pt_regs;
>>   struct task_struct;
>> @@ -497,6 +506,10 @@ static inline void __crash_kexec(struct pt_regs 
>> *regs) { }
>>   static inline void crash_kexec(struct pt_regs *regs) { }
>>   static inline int kexec_should_crash(struct task_struct *p) { return 
>> 0; }
>>   static inline int kexec_crash_loaded(void) { return 0; }
>> +static inline void *kimage_map_segment(struct kimage *image,
>> +                       unsigned long addr, unsigned long size)
>> +{ return NULL; }
>> +static inline void kimage_unmap_segment(void *buffer) { }
>>   #define kexec_in_progress false
>>   #endif /* CONFIG_KEXEC_CORE */
>> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
>> index 3d578c6fefee..26978ad02676 100644
>> --- a/kernel/kexec_core.c
>> +++ b/kernel/kexec_core.c
>> @@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image)
>>       *image->entry = IND_DONE;
>>   }
>> -#define for_each_kimage_entry(image, ptr, entry) \
>> -    for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
>> -        ptr = (entry & IND_INDIRECTION) ? \
>> -            boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
>> -
>>   static void kimage_free_entry(kimage_entry_t entry)
>>   {
>>       struct page *page;
>> @@ -921,6 +916,60 @@ int kimage_load_segment(struct kimage *image,
>>       return result;
>>   }
>> +void *kimage_map_segment(struct kimage *image,
>> +             unsigned long addr, unsigned long size)
>> +{
>> +    unsigned long eaddr = addr + size;
>> +    unsigned long src_page_addr, dest_page_addr;
>> +    unsigned int npages;
>> +    struct page **src_pages;
>> +    int i;
>> +    kimage_entry_t *ptr, entry;
>> +    void *vaddr = NULL;
>> +
>> +    /*
>> +     * Collect the source pages and map them in a contiguous VA range.
>> +     */
>> +    npages = PFN_UP(eaddr) - PFN_DOWN(addr);
>> +    src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
>> +    if (!src_pages) {
>> +        pr_err("%s: Could not allocate ima pages array.\n", __func__);
>> +        return NULL;
>> +    }
>> +
>> +    i = 0;
>> +    for_each_kimage_entry(image, ptr, entry) {
>> +        if (entry & IND_DESTINATION)
>> +            dest_page_addr = entry & PAGE_MASK;
>> +        else if (entry & IND_SOURCE) {
>> +            if (dest_page_addr >= addr && dest_page_addr < eaddr) {
>> +                src_page_addr = entry & PAGE_MASK;
>> +                src_pages[i++] =
>> +                    virt_to_page(__va(src_page_addr));
>> +                if (i == npages)
>> +                    break;
>> +                dest_page_addr += PAGE_SIZE;
>> +            }
>> +        }
>> +    }
>> +
>> +    /* Sanity check. */
>> +    WARN_ON(i < npages);
>> +
>> +    vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
>> +    kfree(src_pages);
>> +
>> +    if (!vaddr)
>> +        pr_err("%s: Could not map imap buffer.\n", __func__);
> 
> imap -> ima
> 
Good eye catching this.
Will fix.
>> +
>> +    return vaddr;
>> +}
>> +
>> +void kimage_unmap_segment(void *segment_buffer)
>> +{
>> +    vunmap(segment_buffer);
>> +}
>> +
>>   struct kexec_load_limit {
>>       /* Mutex protects the limit count. */
>>       struct mutex mutex;
>> diff --git a/security/integrity/ima/ima_kexec.c 
>> b/security/integrity/ima/ima_kexec.c
>> index 99daac355c70..4f944c9b4168 100644
>> --- a/security/integrity/ima/ima_kexec.c
>> +++ b/security/integrity/ima/ima_kexec.c
>> @@ -170,6 +170,7 @@ void ima_add_kexec_buffer(struct kimage *image)
>>       pr_debug("kexec measurement buffer for the loaded kernel at 
>> 0x%lx.\n",
>>            kbuf.mem);
>>   }
>> +
> 
> remove
>
Will do.

~Tushar

>>   #endif /* IMA_KEXEC */
>>   /*
diff mbox series

Patch

diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 22b5cd24f581..e00b8101b53b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -490,6 +490,15 @@  static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
 static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
 #endif
 
+#define for_each_kimage_entry(image, ptr, entry) \
+	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+		ptr = (entry & IND_INDIRECTION) ? \
+			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
+
+extern void *kimage_map_segment(struct kimage *image,
+				unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
+
 #else /* !CONFIG_KEXEC_CORE */
 struct pt_regs;
 struct task_struct;
@@ -497,6 +506,10 @@  static inline void __crash_kexec(struct pt_regs *regs) { }
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
 static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image,
+				       unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
 #define kexec_in_progress false
 #endif /* CONFIG_KEXEC_CORE */
 
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 3d578c6fefee..26978ad02676 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -594,11 +594,6 @@  void kimage_terminate(struct kimage *image)
 	*image->entry = IND_DONE;
 }
 
-#define for_each_kimage_entry(image, ptr, entry) \
-	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
-		ptr = (entry & IND_INDIRECTION) ? \
-			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
-
 static void kimage_free_entry(kimage_entry_t entry)
 {
 	struct page *page;
@@ -921,6 +916,60 @@  int kimage_load_segment(struct kimage *image,
 	return result;
 }
 
+void *kimage_map_segment(struct kimage *image,
+			 unsigned long addr, unsigned long size)
+{
+	unsigned long eaddr = addr + size;
+	unsigned long src_page_addr, dest_page_addr;
+	unsigned int npages;
+	struct page **src_pages;
+	int i;
+	kimage_entry_t *ptr, entry;
+	void *vaddr = NULL;
+
+	/*
+	 * Collect the source pages and map them in a contiguous VA range.
+	 */
+	npages = PFN_UP(eaddr) - PFN_DOWN(addr);
+	src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
+	if (!src_pages) {
+		pr_err("%s: Could not allocate ima pages array.\n", __func__);
+		return NULL;
+	}
+
+	i = 0;
+	for_each_kimage_entry(image, ptr, entry) {
+		if (entry & IND_DESTINATION)
+			dest_page_addr = entry & PAGE_MASK;
+		else if (entry & IND_SOURCE) {
+			if (dest_page_addr >= addr && dest_page_addr < eaddr) {
+				src_page_addr = entry & PAGE_MASK;
+				src_pages[i++] =
+					virt_to_page(__va(src_page_addr));
+				if (i == npages)
+					break;
+				dest_page_addr += PAGE_SIZE;
+			}
+		}
+	}
+
+	/* Sanity check. */
+	WARN_ON(i < npages);
+
+	vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
+	kfree(src_pages);
+
+	if (!vaddr)
+		pr_err("%s: Could not map imap buffer.\n", __func__);
+
+	return vaddr;
+}
+
+void kimage_unmap_segment(void *segment_buffer)
+{
+	vunmap(segment_buffer);
+}
+
 struct kexec_load_limit {
 	/* Mutex protects the limit count. */
 	struct mutex mutex;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 99daac355c70..4f944c9b4168 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -170,6 +170,7 @@  void ima_add_kexec_buffer(struct kimage *image)
 	pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
 		 kbuf.mem);
 }
+
 #endif /* IMA_KEXEC */
 
 /*