diff mbox

[v2,4/6] kexec_file: Add mechanism to update kexec segments.

Message ID 1734678.Uaq7DTajoE@hactar (mailing list archive)
State New, archived
Headers show

Commit Message

Thiago Jung Bauermann Aug. 16, 2016, 5 p.m. UTC
Hello Andrew,

Thank you for your review!

Am Montag, 15 August 2016, 15:27:56 schrieb Andrew Morton:
> On Sat, 13 Aug 2016 00:18:23 -0300 Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com> wrote:
> > +/**
> > + * kexec_update_segment - update the contents of a kimage segment
> > + * @buffer:	New contents of the segment.
> > + * @bufsz:	@buffer size.
> > + * @load_addr:	Segment's physical address in the next kernel.
> > + * @memsz:	Segment size.
> > + *
> > + * This function assumes kexec_mutex is held.
> > + *
> > + * Return: 0 on success, negative errno on error.
> > + */
> > +int kexec_update_segment(const char *buffer, unsigned long bufsz,
> > +			 unsigned long load_addr, unsigned long memsz)
> > +{
> > +	int i;
> > +	unsigned long entry;
> > +	unsigned long *ptr = NULL;
> > +	void *dest = NULL;
> > +
> > +	if (kexec_image == NULL) {
> > +		pr_err("Can't update segment: no kexec image loaded.\n");
> > +		return -EINVAL;
> > +	}
> > +
> > +	/*
> > +	 * kexec_add_buffer rounds up segment sizes to PAGE_SIZE, so
> > +	 * we have to do it here as well.
> > +	 */
> > +	memsz = ALIGN(memsz, PAGE_SIZE);
> > +
> > +	for (i = 0; i < kexec_image->nr_segments; i++)
> > +		/* We only support updating whole segments. */
> > +		if (load_addr == kexec_image->segment[i].mem &&
> > +		    memsz == kexec_image->segment[i].memsz) {
> > +			if (kexec_image->segment[i].do_checksum) {
> > +				pr_err("Trying to update non-modifiable segment.\n");
> > +				return -EINVAL;
> > +			}
> > +
> > +			break;
> > +		}
> > +	if (i == kexec_image->nr_segments) {
> > +		pr_err("Couldn't find segment to update: 0x%lx, size 0x%lx\n",
> > +		       load_addr, memsz);
> > +		return -EINVAL;
> > +	}
> > +
> > +	for (entry = kexec_image->head; !(entry & IND_DONE) && memsz;
> > +	     entry = *ptr++) {
> > +		void *addr = (void *) (entry & PAGE_MASK);
> > +
> > +		switch (entry & IND_FLAGS) {
> > +		case IND_DESTINATION:
> > +			dest = addr;
> > +			break;
> > +		case IND_INDIRECTION:
> > +			ptr = __va(addr);
> > +			break;
> > +		case IND_SOURCE:
> > +			/* Shouldn't happen, but verify just to be safe. */
> > +			if (dest == NULL) {
> > +				pr_err("Invalid kexec entries list.");
> > +				return -EINVAL;
> > +			}
> > +
> > +			if (dest == (void *) load_addr) {
> > +				struct page *page;
> > +				char *ptr;
> > +				size_t uchunk, mchunk;
> > +
> > +				page = kmap_to_page(addr);
> > +
> > +				ptr = kmap(page);
> 
> kmap_atomic() could be used here, and it is appreciably faster.

Good idea. The patch below implements your suggestion.

This has a consequence for patch 5/6 in this series, because it makes
this code be used in the path of the kexec_file_load and
kexec_load syscalls.

In the latter case, there's a call to copy_from_user and thus kmap_atomic 
can't be used. I can change the patch to use kmap_atomic if
state->from_kernel is true and kmap otherwise, but perhaps this is one more 
hint that patch 5/6 is not a very good idea after all.
diff mbox

Patch

diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 37eea32fdff1..14dda81e3e01 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -259,6 +259,8 @@  extern int kexec_purgatory_get_set_symbol(struct kimage *image,
 					  unsigned int size, bool get_value);
 extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
 					     const char *name);
+int kexec_update_segment(const char *buffer, unsigned long bufsz,
+			 unsigned long load_addr, unsigned long memsz);
 extern void __crash_kexec(struct pt_regs *);
 extern void crash_kexec(struct pt_regs *);
 int kexec_should_crash(struct task_struct *);
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 561675589511..9782b292714e 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -721,6 +721,105 @@  static struct page *kimage_alloc_page(struct kimage *image,
 	return page;
 }
 
+/**
+ * kexec_update_segment - update the contents of a kimage segment
+ * @buffer:	New contents of the segment.
+ * @bufsz:	@buffer size.
+ * @load_addr:	Segment's physical address in the next kernel.
+ * @memsz:	Segment size.
+ *
+ * This function assumes kexec_mutex is held.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int kexec_update_segment(const char *buffer, unsigned long bufsz,
+			 unsigned long load_addr, unsigned long memsz)
+{
+	int i;
+	unsigned long entry;
+	unsigned long *ptr = NULL;
+	void *dest = NULL;
+
+	if (kexec_image == NULL) {
+		pr_err("Can't update segment: no kexec image loaded.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * kexec_add_buffer rounds up segment sizes to PAGE_SIZE, so
+	 * we have to do it here as well.
+	 */
+	memsz = ALIGN(memsz, PAGE_SIZE);
+
+	for (i = 0; i < kexec_image->nr_segments; i++)
+		/* We only support updating whole segments. */
+		if (load_addr == kexec_image->segment[i].mem &&
+		    memsz == kexec_image->segment[i].memsz) {
+			if (kexec_image->segment[i].do_checksum) {
+				pr_err("Trying to update non-modifiable segment.\n");
+				return -EINVAL;
+			}
+
+			break;
+		}
+	if (i == kexec_image->nr_segments) {
+		pr_err("Couldn't find segment to update: 0x%lx, size 0x%lx\n",
+		       load_addr, memsz);
+		return -EINVAL;
+	}
+
+	for (entry = kexec_image->head; !(entry & IND_DONE) && memsz;
+	     entry = *ptr++) {
+		void *addr = (void *) (entry & PAGE_MASK);
+
+		switch (entry & IND_FLAGS) {
+		case IND_DESTINATION:
+			dest = addr;
+			break;
+		case IND_INDIRECTION:
+			ptr = __va(addr);
+			break;
+		case IND_SOURCE:
+			/* Shouldn't happen, but verify just to be safe. */
+			if (dest == NULL) {
+				pr_err("Invalid kexec entries list.");
+				return -EINVAL;
+			}
+
+			if (dest == (void *) load_addr) {
+				struct page *page;
+				char *ptr;
+				size_t uchunk, mchunk;
+
+				page = kmap_to_page(addr);
+
+				ptr = kmap_atomic(page);
+				ptr += load_addr & ~PAGE_MASK;
+				mchunk = min_t(size_t, memsz,
+					       PAGE_SIZE - (load_addr & ~PAGE_MASK));
+				uchunk = min(bufsz, mchunk);
+				memcpy(ptr, buffer, uchunk);
+
+				kunmap_atomic(ptr);
+
+				bufsz -= uchunk;
+				load_addr += mchunk;
+				buffer += mchunk;
+				memsz -= mchunk;
+			}
+			dest += PAGE_SIZE;
+		}
+
+		/* Shouldn't happen, but verify just to be safe. */
+		if (ptr == NULL) {
+			pr_err("Invalid kexec entries list.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
 static int kimage_load_normal_segment(struct kimage *image,
 					 struct kexec_segment *segment)
 {