diff mbox series

[v2,3/7] ima: kexec: map source pages containing IMA buffer to image post kexec load

Message ID 20231005182602.634615-4-tusharsu@linux.microsoft.com (mailing list archive)
State New, archived
Headers show
Series ima: kexec: measure events between kexec load and execute | expand

Commit Message

Tushar Sugandhi Oct. 5, 2023, 6:25 p.m. UTC
Currently, the mechanism to map and unmap segments to the kimage
structure is not available to the subsystems outside of kexec.  This
functionality is needed when IMA is allocating the memory segments
during kexec 'load' operation.

Implement kimage_map_segment() which takes a kimage pointer, an address,
and a size.  Ensure that the entire segment is being mapped by comparing
the given address and size to each segment in the kimage's segment array.
Collect the source pages that correspond to the given address range,
allocate an array of pointers to these pages, and map them to a
contiguous range of virtual addresses.  If the mapping operation is
successful, the function returns the start of this range.  Otherwise, it
frees the page pointer array and returns NULL.

Implement kimage_unmap_segment() that takes a pointer to a segment buffer
and unmaps it using vunmap().

Implement function ima_kexec_post_load(), to be called by IMA after kexec
loads the new Kernel image.  ima_kexec_post_load() would map the IMA
buffer allocated during kexec 'load' to a segment in the loaded image.

Finally, move for_each_kimage_entry() macro from kexec_core.c to kexec.h.

Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
---
 include/linux/ima.h                |  3 ++
 include/linux/kexec.h              | 13 ++++++
 kernel/kexec_core.c                | 73 ++++++++++++++++++++++++++++--
 security/integrity/ima/ima_kexec.c | 32 +++++++++++++
 4 files changed, 116 insertions(+), 5 deletions(-)

Comments

Stefan Berger Oct. 13, 2023, 12:29 a.m. UTC | #1
On 10/5/23 14:25, Tushar Sugandhi wrote:
> Currently, the mechanism to map and unmap segments to the kimage
> structure is not available to the subsystems outside of kexec.  This
> functionality is needed when IMA is allocating the memory segments
> during kexec 'load' operation.
>
> Implement kimage_map_segment() which takes a kimage pointer, an address,
> and a size.  Ensure that the entire segment is being mapped by comparing
> the given address and size to each segment in the kimage's segment array.
> Collect the source pages that correspond to the given address range,
> allocate an array of pointers to these pages, and map them to a
> contiguous range of virtual addresses.  If the mapping operation is
> successful, the function returns the start of this range.  Otherwise, it
> frees the page pointer array and returns NULL.
>
> Implement kimage_unmap_segment() that takes a pointer to a segment buffer
> and unmaps it using vunmap().
>
> Implement function ima_kexec_post_load(), to be called by IMA after kexec
> loads the new Kernel image.  ima_kexec_post_load() would map the IMA
> buffer allocated during kexec 'load' to a segment in the loaded image.
>
> Finally, move for_each_kimage_entry() macro from kexec_core.c to kexec.h.
>
> Signed-off-by: Tushar Sugandhi<tusharsu@linux.microsoft.com>
> ---
>   include/linux/ima.h                |  3 ++
>   include/linux/kexec.h              | 13 ++++++
>   kernel/kexec_core.c                | 73 ++++++++++++++++++++++++++++--
>   security/integrity/ima/ima_kexec.c | 32 +++++++++++++
>   4 files changed, 116 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/ima.h b/include/linux/ima.h
> index 86b57757c7b1..006db20f852d 100644
> --- a/include/linux/ima.h
> +++ b/include/linux/ima.h
> @@ -49,6 +49,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
>   
>   #ifdef CONFIG_IMA_KEXEC
>   extern void ima_add_kexec_buffer(struct kimage *image);
> +extern void ima_kexec_post_load(struct kimage *image);
> +#else
> +static inline void ima_kexec_post_load(struct kimage *image) {}
>   #endif
>   
>   #else
> diff --git a/include/linux/kexec.h b/include/linux/kexec.h
> index 22b5cd24f581..e00b8101b53b 100644
> --- a/include/linux/kexec.h
> +++ b/include/linux/kexec.h
> @@ -490,6 +490,15 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
>   static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
>   #endif
>   
> +#define for_each_kimage_entry(image, ptr, entry) \
> +	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
> +		ptr = (entry & IND_INDIRECTION) ? \
> +			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
> +
> +extern void *kimage_map_segment(struct kimage *image,
> +				unsigned long addr, unsigned long size);
> +extern void kimage_unmap_segment(void *buffer);
> +
>   #else /* !CONFIG_KEXEC_CORE */
>   struct pt_regs;
>   struct task_struct;
> @@ -497,6 +506,10 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
>   static inline void crash_kexec(struct pt_regs *regs) { }
>   static inline int kexec_should_crash(struct task_struct *p) { return 0; }
>   static inline int kexec_crash_loaded(void) { return 0; }
> +static inline void *kimage_map_segment(struct kimage *image,
> +				       unsigned long addr, unsigned long size)
> +{ return NULL; }
> +static inline void kimage_unmap_segment(void *buffer) { }
>   #define kexec_in_progress false
>   #endif /* CONFIG_KEXEC_CORE */
>   
> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
> index 3d578c6fefee..e01156f3c404 100644
> --- a/kernel/kexec_core.c
> +++ b/kernel/kexec_core.c
> @@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image)
>   	*image->entry = IND_DONE;
>   }
>   
> -#define for_each_kimage_entry(image, ptr, entry) \
> -	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
> -		ptr = (entry & IND_INDIRECTION) ? \
> -			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
> -
>   static void kimage_free_entry(kimage_entry_t entry)
>   {
>   	struct page *page;
> @@ -921,6 +916,74 @@ int kimage_load_segment(struct kimage *image,
>   	return result;
>   }
>   
> +void *kimage_map_segment(struct kimage *image,
> +			 unsigned long addr, unsigned long size)
> +{
> +	unsigned long eaddr = addr + size;
> +	unsigned long src_page_addr, dest_page_addr;
> +	struct page **src_pages;
> +	int i, npages;
> +	kimage_entry_t *ptr, entry;
> +	void *vaddr = NULL;
> +
> +	/*
> +	 * Make sure that we are mapping a whole segment.
> +	 */
> +	for (i = 0; i < image->nr_segments; i++) {
> +		if (addr == image->segment[i].mem &&
> +		    size == image->segment[i].memsz) {
> +			break;
> +		}
> +	}

I wonder whether this distrust in the segments or in the passed 
pointer&size are justifyable since you call this function like this:

         ima_kexec_buffer = kimage_map_segment(image,
image->ima_buffer_addr,
image->ima_buffer_size);

and previously kexec_add_buffer() was also called:

         kbuf.buffer = kexec_buffer;
         kbuf.bufsz = kexec_buffer_size;
         kbuf.memsz = kexec_segment_size;
         ret = kexec_add_buffer(&kbuf);
         if (ret) {
                 pr_err("Error passing over kexec measurement buffer.\n");
                 vfree(kexec_buffer);
                 return;
         }

         image->ima_buffer_addr = kbuf.mem;
         image->ima_buffer_size = kexec_segment_size;

So the one segment should be matching these addresses.


> +
> +	if (i == image->nr_segments) {
> +		pr_err("%s: No segment matching [%lx, %lx)\n", __func__,
> +		       addr, eaddr);
> +		return NULL;
> +	}
> +
> +	/*
> +	 * Collect the source pages and map them in a contiguous VA range.
> +	 */
> +	npages = PFN_UP(eaddr) - PFN_DOWN(addr);
> +	src_pages = kmalloc(sizeof(*src_pages) * npages, GFP_KERNEL);
> +	if (!src_pages) {
> +		pr_err("%s: Could not allocate ima pages array.\n", __func__);
> +		return NULL;
> +	}
> +
> +	i = 0;
> +	for_each_kimage_entry(image, ptr, entry) {
> +		if (entry & IND_DESTINATION)
> +			dest_page_addr = entry & PAGE_MASK;
> +		else if (entry & IND_SOURCE) {
> +			if (dest_page_addr >= addr && dest_page_addr < eaddr) {
> +				src_page_addr = entry & PAGE_MASK;
> +				src_pages[i++] =
> +					virt_to_page(__va(src_page_addr));
> +				if (i == npages)
> +					break;
> +				dest_page_addr += PAGE_SIZE;
> +			}
> +		}
> +	}
> +
> +	/* Sanity check. */
> +	WARN_ON(i < npages);
> +
> +	vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
On either success or failure kfree(sec_pages); should go right here to 
not have a memory leak.
> +	if (!vaddr) {
> +		pr_err("%s: Could not map imap buffer.\n", __func__);
> +		kfree(src_pages);
> +	}
> +	return vaddr;
> +}
> +
> +void kimage_unmap_segment(void *segment_buffer)
> +{
> +	vunmap(segment_buffer);
> +}
> +
>   struct kexec_load_limit {
>   	/* Mutex protects the limit count. */
>   	struct mutex mutex;
> diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
> index 2c11bbe6efef..13fbbb90319b 100644
> --- a/security/integrity/ima/ima_kexec.c
> +++ b/security/integrity/ima/ima_kexec.c
> @@ -12,6 +12,8 @@
>   #include <linux/kexec.h>
>   #include <linux/of.h>
>   #include <linux/ima.h>
> +#include <linux/reboot.h>
> +#include <asm/page.h>
>   #include "ima.h"
>   
>   #ifdef CONFIG_IMA_KEXEC
> @@ -19,6 +21,7 @@ struct seq_file ima_kexec_file;
>   struct ima_kexec_hdr ima_khdr;
>   static void *ima_kexec_buffer;
>   static size_t kexec_segment_size;
> +static bool ima_kexec_update_registered;
>   
>   void ima_clear_kexec_file(void)
>   {
> @@ -221,6 +224,7 @@ static int ima_update_kexec_buffer(struct notifier_block *self,
>   	}
>   	memcpy(ima_kexec_buffer, buf, buf_size);
>   out:
> +	kimage_unmap_segment(ima_kexec_buffer);
>   	ima_kexec_buffer = NULL;
>   
>   	if (resume)
> @@ -232,6 +236,34 @@ struct notifier_block update_buffer_nb = {
>   	.notifier_call = ima_update_kexec_buffer,
>   };
>   
> +/*
> + * Create a mapping for the source pages that contain the IMA buffer
> + * so we can update it later.
> + */
> +void ima_kexec_post_load(struct kimage *image)
> +{
> +	if (ima_kexec_buffer) {
> +		kimage_unmap_segment(ima_kexec_buffer);
> +		ima_kexec_buffer = NULL;
> +	}
> +
> +	if (!image->ima_buffer_addr)
> +		return;
> +
> +	ima_kexec_buffer = kimage_map_segment(image,
> +					      image->ima_buffer_addr,
> +					      image->ima_buffer_size);
> +	if (!ima_kexec_buffer) {
> +		pr_err("%s: Could not map measurements buffer.\n", __func__);
> +		return;
> +	}
> +
> +	if (!ima_kexec_update_registered) {
> +		register_reboot_notifier(&update_buffer_nb);
> +		ima_kexec_update_registered = true;
> +	}
> +}
> +
>   #endif /* IMA_KEXEC */
>   
>   /*
Tushar Sugandhi Oct. 20, 2023, 8:36 p.m. UTC | #2
On 10/12/23 17:29, Stefan Berger wrote:
> 
> On 10/5/23 14:25, Tushar Sugandhi wrote:
>> Currently, the mechanism to map and unmap segments to the kimage
>> structure is not available to the subsystems outside of kexec.  This
>> functionality is needed when IMA is allocating the memory segments
>> during kexec 'load' operation.
>>
>> Implement kimage_map_segment() which takes a kimage pointer, an address,
>> and a size.  Ensure that the entire segment is being mapped by comparing
>> the given address and size to each segment in the kimage's segment array.
>> Collect the source pages that correspond to the given address range,
>> allocate an array of pointers to these pages, and map them to a
>> contiguous range of virtual addresses.  If the mapping operation is
>> successful, the function returns the start of this range.  Otherwise, it
>> frees the page pointer array and returns NULL.
>>
>> Implement kimage_unmap_segment() that takes a pointer to a segment buffer
>> and unmaps it using vunmap().
>>
>> Implement function ima_kexec_post_load(), to be called by IMA after kexec
>> loads the new Kernel image.  ima_kexec_post_load() would map the IMA
>> buffer allocated during kexec 'load' to a segment in the loaded image.
>>
>> Finally, move for_each_kimage_entry() macro from kexec_core.c to kexec.h.
>>
>> Signed-off-by: Tushar Sugandhi<tusharsu@linux.microsoft.com>
>> ---
>>   include/linux/ima.h                |  3 ++
>>   include/linux/kexec.h              | 13 ++++++
>>   kernel/kexec_core.c                | 73 ++++++++++++++++++++++++++++--
>>   security/integrity/ima/ima_kexec.c | 32 +++++++++++++
>>   4 files changed, 116 insertions(+), 5 deletions(-)
>>
>> diff --git a/include/linux/ima.h b/include/linux/ima.h
>> index 86b57757c7b1..006db20f852d 100644
>> --- a/include/linux/ima.h
>> +++ b/include/linux/ima.h
>> @@ -49,6 +49,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
>>   #ifdef CONFIG_IMA_KEXEC
>>   extern void ima_add_kexec_buffer(struct kimage *image);
>> +extern void ima_kexec_post_load(struct kimage *image);
>> +#else
>> +static inline void ima_kexec_post_load(struct kimage *image) {}
>>   #endif
>>   #else
>> diff --git a/include/linux/kexec.h b/include/linux/kexec.h
>> index 22b5cd24f581..e00b8101b53b 100644
>> --- a/include/linux/kexec.h
>> +++ b/include/linux/kexec.h
>> @@ -490,6 +490,15 @@ static inline int 
>> arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
>>   static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned 
>> int pages) { }
>>   #endif
>> +#define for_each_kimage_entry(image, ptr, entry) \
>> +    for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
>> +        ptr = (entry & IND_INDIRECTION) ? \
>> +            boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
>> +
>> +extern void *kimage_map_segment(struct kimage *image,
>> +                unsigned long addr, unsigned long size);
>> +extern void kimage_unmap_segment(void *buffer);
>> +
>>   #else /* !CONFIG_KEXEC_CORE */
>>   struct pt_regs;
>>   struct task_struct;
>> @@ -497,6 +506,10 @@ static inline void __crash_kexec(struct pt_regs 
>> *regs) { }
>>   static inline void crash_kexec(struct pt_regs *regs) { }
>>   static inline int kexec_should_crash(struct task_struct *p) { return 
>> 0; }
>>   static inline int kexec_crash_loaded(void) { return 0; }
>> +static inline void *kimage_map_segment(struct kimage *image,
>> +                       unsigned long addr, unsigned long size)
>> +{ return NULL; }
>> +static inline void kimage_unmap_segment(void *buffer) { }
>>   #define kexec_in_progress false
>>   #endif /* CONFIG_KEXEC_CORE */
>> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
>> index 3d578c6fefee..e01156f3c404 100644
>> --- a/kernel/kexec_core.c
>> +++ b/kernel/kexec_core.c
>> @@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image)
>>       *image->entry = IND_DONE;
>>   }
>> -#define for_each_kimage_entry(image, ptr, entry) \
>> -    for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
>> -        ptr = (entry & IND_INDIRECTION) ? \
>> -            boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
>> -
>>   static void kimage_free_entry(kimage_entry_t entry)
>>   {
>>       struct page *page;
>> @@ -921,6 +916,74 @@ int kimage_load_segment(struct kimage *image,
>>       return result;
>>   }
>> +void *kimage_map_segment(struct kimage *image,
>> +             unsigned long addr, unsigned long size)
>> +{
>> +    unsigned long eaddr = addr + size;
>> +    unsigned long src_page_addr, dest_page_addr;
>> +    struct page **src_pages;
>> +    int i, npages;
>> +    kimage_entry_t *ptr, entry;
>> +    void *vaddr = NULL;
>> +
>> +    /*
>> +     * Make sure that we are mapping a whole segment.
>> +     */
>> +    for (i = 0; i < image->nr_segments; i++) {
>> +        if (addr == image->segment[i].mem &&
>> +            size == image->segment[i].memsz) {
>> +            break;
>> +        }
>> +    }
> 
> I wonder whether this distrust in the segments or in the passed 
> pointer&size are justifyable since you call this function like this:
> 
>          ima_kexec_buffer = kimage_map_segment(image,
> image->ima_buffer_addr,
> image->ima_buffer_size);
> 
> and previously kexec_add_buffer() was also called:
> 
>          kbuf.buffer = kexec_buffer;
>          kbuf.bufsz = kexec_buffer_size;
>          kbuf.memsz = kexec_segment_size;
>          ret = kexec_add_buffer(&kbuf);
>          if (ret) {
>                  pr_err("Error passing over kexec measurement buffer.\n");
>                  vfree(kexec_buffer);
>                  return;
>          }
> 
>          image->ima_buffer_addr = kbuf.mem;
>          image->ima_buffer_size = kexec_segment_size;
> 
> So the one segment should be matching these addresses.
> 
> 
Yeh. This check is an overkill at this point.
I agree. I will remove this extra check.
>> +
>> +    if (i == image->nr_segments) {
>> +        pr_err("%s: No segment matching [%lx, %lx)\n", __func__,
>> +               addr, eaddr);
>> +        return NULL;
>> +    }
>> +
>> +    /*
>> +     * Collect the source pages and map them in a contiguous VA range.
>> +     */
>> +    npages = PFN_UP(eaddr) - PFN_DOWN(addr);
>> +    src_pages = kmalloc(sizeof(*src_pages) * npages, GFP_KERNEL);
>> +    if (!src_pages) {
>> +        pr_err("%s: Could not allocate ima pages array.\n", __func__);
>> +        return NULL;
>> +    }
>> +
>> +    i = 0;
>> +    for_each_kimage_entry(image, ptr, entry) {
>> +        if (entry & IND_DESTINATION)
>> +            dest_page_addr = entry & PAGE_MASK;
>> +        else if (entry & IND_SOURCE) {
>> +            if (dest_page_addr >= addr && dest_page_addr < eaddr) {
>> +                src_page_addr = entry & PAGE_MASK;
>> +                src_pages[i++] =
>> +                    virt_to_page(__va(src_page_addr));
>> +                if (i == npages)
>> +                    break;
>> +                dest_page_addr += PAGE_SIZE;
>> +            }
>> +        }
>> +    }
>> +
>> +    /* Sanity check. */
>> +    WARN_ON(i < npages);
>> +
>> +    vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
> On either success or failure kfree(sec_pages); should go right here to 
> not have a memory leak.
Will do.

~Tushar
>> +    if (!vaddr) {
>> +        pr_err("%s: Could not map imap buffer.\n", __func__);
>> +        kfree(src_pages);
>> +    }
>> +    return vaddr;
>> +}
>> +
>> +void kimage_unmap_segment(void *segment_buffer)
>> +{
>> +    vunmap(segment_buffer);
>> +}
>> +
>>   struct kexec_load_limit {
>>       /* Mutex protects the limit count. */
>>       struct mutex mutex;
>> diff --git a/security/integrity/ima/ima_kexec.c 
>> b/security/integrity/ima/ima_kexec.c
>> index 2c11bbe6efef..13fbbb90319b 100644
>> --- a/security/integrity/ima/ima_kexec.c
>> +++ b/security/integrity/ima/ima_kexec.c
>> @@ -12,6 +12,8 @@
>>   #include <linux/kexec.h>
>>   #include <linux/of.h>
>>   #include <linux/ima.h>
>> +#include <linux/reboot.h>
>> +#include <asm/page.h>
>>   #include "ima.h"
>>   #ifdef CONFIG_IMA_KEXEC
>> @@ -19,6 +21,7 @@ struct seq_file ima_kexec_file;
>>   struct ima_kexec_hdr ima_khdr;
>>   static void *ima_kexec_buffer;
>>   static size_t kexec_segment_size;
>> +static bool ima_kexec_update_registered;
>>   void ima_clear_kexec_file(void)
>>   {
>> @@ -221,6 +224,7 @@ static int ima_update_kexec_buffer(struct 
>> notifier_block *self,
>>       }
>>       memcpy(ima_kexec_buffer, buf, buf_size);
>>   out:
>> +    kimage_unmap_segment(ima_kexec_buffer);
>>       ima_kexec_buffer = NULL;
>>       if (resume)
>> @@ -232,6 +236,34 @@ struct notifier_block update_buffer_nb = {
>>       .notifier_call = ima_update_kexec_buffer,
>>   };
>> +/*
>> + * Create a mapping for the source pages that contain the IMA buffer
>> + * so we can update it later.
>> + */
>> +void ima_kexec_post_load(struct kimage *image)
>> +{
>> +    if (ima_kexec_buffer) {
>> +        kimage_unmap_segment(ima_kexec_buffer);
>> +        ima_kexec_buffer = NULL;
>> +    }
>> +
>> +    if (!image->ima_buffer_addr)
>> +        return;
>> +
>> +    ima_kexec_buffer = kimage_map_segment(image,
>> +                          image->ima_buffer_addr,
>> +                          image->ima_buffer_size);
>> +    if (!ima_kexec_buffer) {
>> +        pr_err("%s: Could not map measurements buffer.\n", __func__);
>> +        return;
>> +    }
>> +
>> +    if (!ima_kexec_update_registered) {
>> +        register_reboot_notifier(&update_buffer_nb);
>> +        ima_kexec_update_registered = true;
>> +    }
>> +}
>> +
>>   #endif /* IMA_KEXEC */
>>   /*
diff mbox series

Patch

diff --git a/include/linux/ima.h b/include/linux/ima.h
index 86b57757c7b1..006db20f852d 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -49,6 +49,9 @@  static inline void ima_appraise_parse_cmdline(void) {}
 
 #ifdef CONFIG_IMA_KEXEC
 extern void ima_add_kexec_buffer(struct kimage *image);
+extern void ima_kexec_post_load(struct kimage *image);
+#else
+static inline void ima_kexec_post_load(struct kimage *image) {}
 #endif
 
 #else
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 22b5cd24f581..e00b8101b53b 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -490,6 +490,15 @@  static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
 static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
 #endif
 
+#define for_each_kimage_entry(image, ptr, entry) \
+	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+		ptr = (entry & IND_INDIRECTION) ? \
+			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
+
+extern void *kimage_map_segment(struct kimage *image,
+				unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
+
 #else /* !CONFIG_KEXEC_CORE */
 struct pt_regs;
 struct task_struct;
@@ -497,6 +506,10 @@  static inline void __crash_kexec(struct pt_regs *regs) { }
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
 static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image,
+				       unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
 #define kexec_in_progress false
 #endif /* CONFIG_KEXEC_CORE */
 
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 3d578c6fefee..e01156f3c404 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -594,11 +594,6 @@  void kimage_terminate(struct kimage *image)
 	*image->entry = IND_DONE;
 }
 
-#define for_each_kimage_entry(image, ptr, entry) \
-	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
-		ptr = (entry & IND_INDIRECTION) ? \
-			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
-
 static void kimage_free_entry(kimage_entry_t entry)
 {
 	struct page *page;
@@ -921,6 +916,74 @@  int kimage_load_segment(struct kimage *image,
 	return result;
 }
 
+void *kimage_map_segment(struct kimage *image,
+			 unsigned long addr, unsigned long size)
+{
+	unsigned long eaddr = addr + size;
+	unsigned long src_page_addr, dest_page_addr;
+	struct page **src_pages;
+	int i, npages;
+	kimage_entry_t *ptr, entry;
+	void *vaddr = NULL;
+
+	/*
+	 * Make sure that we are mapping a whole segment.
+	 */
+	for (i = 0; i < image->nr_segments; i++) {
+		if (addr == image->segment[i].mem &&
+		    size == image->segment[i].memsz) {
+			break;
+		}
+	}
+
+	if (i == image->nr_segments) {
+		pr_err("%s: No segment matching [%lx, %lx)\n", __func__,
+		       addr, eaddr);
+		return NULL;
+	}
+
+	/*
+	 * Collect the source pages and map them in a contiguous VA range.
+	 */
+	npages = PFN_UP(eaddr) - PFN_DOWN(addr);
+	src_pages = kmalloc(sizeof(*src_pages) * npages, GFP_KERNEL);
+	if (!src_pages) {
+		pr_err("%s: Could not allocate ima pages array.\n", __func__);
+		return NULL;
+	}
+
+	i = 0;
+	for_each_kimage_entry(image, ptr, entry) {
+		if (entry & IND_DESTINATION)
+			dest_page_addr = entry & PAGE_MASK;
+		else if (entry & IND_SOURCE) {
+			if (dest_page_addr >= addr && dest_page_addr < eaddr) {
+				src_page_addr = entry & PAGE_MASK;
+				src_pages[i++] =
+					virt_to_page(__va(src_page_addr));
+				if (i == npages)
+					break;
+				dest_page_addr += PAGE_SIZE;
+			}
+		}
+	}
+
+	/* Sanity check. */
+	WARN_ON(i < npages);
+
+	vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
+	if (!vaddr) {
+		pr_err("%s: Could not map imap buffer.\n", __func__);
+		kfree(src_pages);
+	}
+	return vaddr;
+}
+
+void kimage_unmap_segment(void *segment_buffer)
+{
+	vunmap(segment_buffer);
+}
+
 struct kexec_load_limit {
 	/* Mutex protects the limit count. */
 	struct mutex mutex;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 2c11bbe6efef..13fbbb90319b 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -12,6 +12,8 @@ 
 #include <linux/kexec.h>
 #include <linux/of.h>
 #include <linux/ima.h>
+#include <linux/reboot.h>
+#include <asm/page.h>
 #include "ima.h"
 
 #ifdef CONFIG_IMA_KEXEC
@@ -19,6 +21,7 @@  struct seq_file ima_kexec_file;
 struct ima_kexec_hdr ima_khdr;
 static void *ima_kexec_buffer;
 static size_t kexec_segment_size;
+static bool ima_kexec_update_registered;
 
 void ima_clear_kexec_file(void)
 {
@@ -221,6 +224,7 @@  static int ima_update_kexec_buffer(struct notifier_block *self,
 	}
 	memcpy(ima_kexec_buffer, buf, buf_size);
 out:
+	kimage_unmap_segment(ima_kexec_buffer);
 	ima_kexec_buffer = NULL;
 
 	if (resume)
@@ -232,6 +236,34 @@  struct notifier_block update_buffer_nb = {
 	.notifier_call = ima_update_kexec_buffer,
 };
 
+/*
+ * Create a mapping for the source pages that contain the IMA buffer
+ * so we can update it later.
+ */
+void ima_kexec_post_load(struct kimage *image)
+{
+	if (ima_kexec_buffer) {
+		kimage_unmap_segment(ima_kexec_buffer);
+		ima_kexec_buffer = NULL;
+	}
+
+	if (!image->ima_buffer_addr)
+		return;
+
+	ima_kexec_buffer = kimage_map_segment(image,
+					      image->ima_buffer_addr,
+					      image->ima_buffer_size);
+	if (!ima_kexec_buffer) {
+		pr_err("%s: Could not map measurements buffer.\n", __func__);
+		return;
+	}
+
+	if (!ima_kexec_update_registered) {
+		register_reboot_notifier(&update_buffer_nb);
+		ima_kexec_update_registered = true;
+	}
+}
+
 #endif /* IMA_KEXEC */
 
 /*