Message ID | 20200623172327.5701-10-kristen@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Function Granular KASLR | expand |
On Tue, Jun 23, 2020 at 10:23:26AM -0700, Kristen Carlson Accardi wrote: > This patch makes /proc/kallsyms display alphabetically by symbol > name rather than sorted by address in order to hide the newly > randomized address layout. > > Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> > Reviewed-by: Tony Luck <tony.luck@intel.com> > Tested-by: Tony Luck <tony.luck@intel.com> > --- > kernel/kallsyms.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 128 insertions(+) > > diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c > index 16c8c605f4b0..df2b20e1b7f2 100644 > --- a/kernel/kallsyms.c > +++ b/kernel/kallsyms.c > @@ -25,6 +25,7 @@ > #include <linux/filter.h> > #include <linux/ftrace.h> > #include <linux/compiler.h> > +#include <linux/list_sort.h> > > /* > * These will be re-linked against their real values > @@ -446,6 +447,11 @@ struct kallsym_iter { > int show_value; > }; > > +struct kallsyms_iter_list { > + struct kallsym_iter iter; > + struct list_head next; > +}; > + > int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, > char *type, char *name) > { > @@ -660,6 +666,127 @@ int kallsyms_show_value(void) > } > } The #ifdef can be moved up to here: #ifdef CONFIG_FG_KASLR Otherwise without CONFIG_FG_KASLR, I get: kernel/kallsyms.c:714:12: warning: ‘kallsyms_list_cmp’ defined but not used [-Wunused-function] 714 | static int kallsyms_list_cmp(void *priv, struct list_head *a, | ^~~~~~~~~~~~~~~~~ > > +static int sorted_show(struct seq_file *m, void *p) > +{ > + struct list_head *list = m->private; > + struct kallsyms_iter_list *iter; > + int rc; > + > + if (list_empty(list)) > + return 0; > + > + iter = list_first_entry(list, struct kallsyms_iter_list, next); > + > + m->private = iter; > + rc = s_show(m, p); > + m->private = list; > + > + list_del(&iter->next); > + kfree(iter); > + > + return rc; > +} > + > +static void *sorted_start(struct seq_file *m, loff_t *pos) > +{ > + return m->private; > +} > + > +static void *sorted_next(struct seq_file *m, void *p, loff_t *pos) > +{ > + struct list_head *list = m->private; > + > + (*pos)++; > + > + if (list_empty(list)) > + return NULL; > + > + return p; > +} > + > +static const struct seq_operations kallsyms_sorted_op = { > + .start = sorted_start, > + .next = sorted_next, > + .stop = s_stop, > + .show = sorted_show > +}; > + > +static int kallsyms_list_cmp(void *priv, struct list_head *a, > + struct list_head *b) > +{ > + struct kallsyms_iter_list *iter_a, *iter_b; > + > + iter_a = list_entry(a, struct kallsyms_iter_list, next); > + iter_b = list_entry(b, struct kallsyms_iter_list, next); > + > + return strcmp(iter_a->iter.name, iter_b->iter.name); > +} > + > +int get_all_symbol_name(void *data, const char *name, struct module *mod, > + unsigned long addr) This can be static too. Otherwise, looks good! Reviewed-by: Kees Cook <keescook@chromium.org>
On Tue, Jun 23, 2020 at 10:23:26AM -0700, Kristen Carlson Accardi wrote: > +static int kallsyms_open(struct inode *inode, struct file *file) > +{ > + int ret; > + struct list_head *list; > + > + list = __seq_open_private(file, &kallsyms_sorted_op, sizeof(*list)); > + if (!list) > + return -ENOMEM; > + > + INIT_LIST_HEAD(list); > + > + ret = kallsyms_on_each_symbol(get_all_symbol_name, list); > + if (ret != 0) > + return ret; > + > + list_sort(NULL, list, kallsyms_list_cmp); > + > + return 0; > +} Oh, wait, one thing! I think this feedback to v2 got missed: https://lore.kernel.org/lkml/202005211441.F63205B7@keescook/ This bug still exists, and has the same solution.
On Tue, Jun 23, 2020 at 7:26 PM Kristen Carlson Accardi <kristen@linux.intel.com> wrote: > This patch makes /proc/kallsyms display alphabetically by symbol > name rather than sorted by address in order to hide the newly > randomized address layout. [...] > +static int sorted_show(struct seq_file *m, void *p) > +{ > + struct list_head *list = m->private; > + struct kallsyms_iter_list *iter; > + int rc; > + > + if (list_empty(list)) > + return 0; > + > + iter = list_first_entry(list, struct kallsyms_iter_list, next); > + > + m->private = iter; > + rc = s_show(m, p); > + m->private = list; > + > + list_del(&iter->next); > + kfree(iter); Does anything like this kfree() happen if someone only reads the start of kallsyms and then closes the file? IOW, does "while true; do head -n1 /proc/kallsyms; done" leak memory? > + return rc; > +} [...] > +static int kallsyms_list_cmp(void *priv, struct list_head *a, > + struct list_head *b) > +{ > + struct kallsyms_iter_list *iter_a, *iter_b; > + > + iter_a = list_entry(a, struct kallsyms_iter_list, next); > + iter_b = list_entry(b, struct kallsyms_iter_list, next); > + > + return strcmp(iter_a->iter.name, iter_b->iter.name); > +} This sorts only by name, but kallsyms prints more information (module names and types). This means that if there are elements whose names are the same, but whose module names or types are different, then some amount of information will still be leaked by the ordering of elements with the same name. In practice, since list_sort() is stable, this means you can see the ordering of many modules, and you can see the ordering of symbols with same name but different visibility (e.g. "t user_read" from security/selinux/ss/policydb.c vs "T user_read" from security/keys/user_defined.c, and a couple other similar cases). [...] > +#if defined(CONFIG_FG_KASLR) > +/* > + * When fine grained kaslr is enabled, we need to > + * print out the symbols sorted by name rather than by > + * by address, because this reveals the randomization order. > + */ > +static int kallsyms_open(struct inode *inode, struct file *file) > +{ > + int ret; > + struct list_head *list; > + > + list = __seq_open_private(file, &kallsyms_sorted_op, sizeof(*list)); > + if (!list) > + return -ENOMEM; > + > + INIT_LIST_HEAD(list); > + > + ret = kallsyms_on_each_symbol(get_all_symbol_name, list); > + if (ret != 0) > + return ret; > + > + list_sort(NULL, list, kallsyms_list_cmp); This permits running an algorithm (essentially mergesort) with secret-dependent branches and memory addresses on essentially secret data, triggerable and arbitrarily repeatable (although with partly different addresses on each run) by the attacker, and probably a fairly low throughput (comparisons go through indirect function calls, which are slowed down by retpolines, and linked list iteration implies slow pointer chases). Those are fairly favorable conditions for typical side-channel attacks. Do you have estimates of how hard it would be to leverage such side channels to recover function ordering (both on old hardware that only has microcode fixes for Spectre and such, and on newer hardware with enhanced IBRS and such)? > + return 0; > +}
On Wed, Jun 24, 2020 at 12:21:16PM +0200, Jann Horn wrote: > On Tue, Jun 23, 2020 at 7:26 PM Kristen Carlson Accardi > <kristen@linux.intel.com> wrote: > > This patch makes /proc/kallsyms display alphabetically by symbol > > name rather than sorted by address in order to hide the newly > > randomized address layout. > [...] > > +static int sorted_show(struct seq_file *m, void *p) > > +{ > > + struct list_head *list = m->private; > > + struct kallsyms_iter_list *iter; > > + int rc; > > + > > + if (list_empty(list)) > > + return 0; > > + > > + iter = list_first_entry(list, struct kallsyms_iter_list, next); > > + > > + m->private = iter; > > + rc = s_show(m, p); > > + m->private = list; > > + > > + list_del(&iter->next); > > + kfree(iter); > > Does anything like this kfree() happen if someone only reads the start > of kallsyms and then closes the file? IOW, does "while true; do head > -n1 /proc/kallsyms; done" leak memory? Oop, nice catch. It seems the list would need to be walked on s_stop. > > > + return rc; > > +} > [...] > > +static int kallsyms_list_cmp(void *priv, struct list_head *a, > > + struct list_head *b) > > +{ > > + struct kallsyms_iter_list *iter_a, *iter_b; > > + > > + iter_a = list_entry(a, struct kallsyms_iter_list, next); > > + iter_b = list_entry(b, struct kallsyms_iter_list, next); > > + > > + return strcmp(iter_a->iter.name, iter_b->iter.name); > > +} > > This sorts only by name, but kallsyms prints more information (module > names and types). This means that if there are elements whose names > are the same, but whose module names or types are different, then some > amount of information will still be leaked by the ordering of elements > with the same name. In practice, since list_sort() is stable, this > means you can see the ordering of many modules, and you can see the > ordering of symbols with same name but different visibility (e.g. "t > user_read" from security/selinux/ss/policydb.c vs "T user_read" from > security/keys/user_defined.c, and a couple other similar cases). i.e. sub-sort by visibility? > > [...] > > +#if defined(CONFIG_FG_KASLR) > > +/* > > + * When fine grained kaslr is enabled, we need to > > + * print out the symbols sorted by name rather than by > > + * by address, because this reveals the randomization order. > > + */ > > +static int kallsyms_open(struct inode *inode, struct file *file) > > +{ > > + int ret; > > + struct list_head *list; > > + > > + list = __seq_open_private(file, &kallsyms_sorted_op, sizeof(*list)); > > + if (!list) > > + return -ENOMEM; > > + > > + INIT_LIST_HEAD(list); > > + > > + ret = kallsyms_on_each_symbol(get_all_symbol_name, list); > > + if (ret != 0) > > + return ret; > > + > > + list_sort(NULL, list, kallsyms_list_cmp); > > This permits running an algorithm (essentially mergesort) with > secret-dependent branches and memory addresses on essentially secret > data, triggerable and arbitrarily repeatable (although with partly > different addresses on each run) by the attacker, and probably a > fairly low throughput (comparisons go through indirect function calls, > which are slowed down by retpolines, and linked list iteration implies > slow pointer chases). Those are fairly favorable conditions for > typical side-channel attacks. > > Do you have estimates of how hard it would be to leverage such side > channels to recover function ordering (both on old hardware that only > has microcode fixes for Spectre and such, and on newer hardware with > enhanced IBRS and such)? I wonder, instead, if sorting should be just done once per module load/unload? That would make the performance and memory management easier too.
On Wed, 2020-06-24 at 08:18 -0700, Kees Cook wrote: > On Wed, Jun 24, 2020 at 12:21:16PM +0200, Jann Horn wrote: > > On Tue, Jun 23, 2020 at 7:26 PM Kristen Carlson Accardi > > <kristen@linux.intel.com> wrote: > > > This patch makes /proc/kallsyms display alphabetically by symbol > > > name rather than sorted by address in order to hide the newly > > > randomized address layout. > > [...] > > > +static int sorted_show(struct seq_file *m, void *p) > > > +{ > > > + struct list_head *list = m->private; > > > + struct kallsyms_iter_list *iter; > > > + int rc; > > > + > > > + if (list_empty(list)) > > > + return 0; > > > + > > > + iter = list_first_entry(list, struct kallsyms_iter_list, > > > next); > > > + > > > + m->private = iter; > > > + rc = s_show(m, p); > > > + m->private = list; > > > + > > > + list_del(&iter->next); > > > + kfree(iter); > > > > Does anything like this kfree() happen if someone only reads the > > start > > of kallsyms and then closes the file? IOW, does "while true; do > > head > > -n1 /proc/kallsyms; done" leak memory? > > Oop, nice catch. It seems the list would need to be walked on s_stop. > > > > + return rc; > > > +} > > [...] > > > +static int kallsyms_list_cmp(void *priv, struct list_head *a, > > > + struct list_head *b) > > > +{ > > > + struct kallsyms_iter_list *iter_a, *iter_b; > > > + > > > + iter_a = list_entry(a, struct kallsyms_iter_list, next); > > > + iter_b = list_entry(b, struct kallsyms_iter_list, next); > > > + > > > + return strcmp(iter_a->iter.name, iter_b->iter.name); > > > +} > > > > This sorts only by name, but kallsyms prints more information > > (module > > names and types). This means that if there are elements whose names > > are the same, but whose module names or types are different, then > > some > > amount of information will still be leaked by the ordering of > > elements > > with the same name. In practice, since list_sort() is stable, this > > means you can see the ordering of many modules, and you can see the > > ordering of symbols with same name but different visibility (e.g. > > "t > > user_read" from security/selinux/ss/policydb.c vs "T user_read" > > from > > security/keys/user_defined.c, and a couple other similar cases). > > i.e. sub-sort by visibility? > > > [...] > > > +#if defined(CONFIG_FG_KASLR) > > > +/* > > > + * When fine grained kaslr is enabled, we need to > > > + * print out the symbols sorted by name rather than by > > > + * by address, because this reveals the randomization order. > > > + */ > > > +static int kallsyms_open(struct inode *inode, struct file *file) > > > +{ > > > + int ret; > > > + struct list_head *list; > > > + > > > + list = __seq_open_private(file, &kallsyms_sorted_op, > > > sizeof(*list)); > > > + if (!list) > > > + return -ENOMEM; > > > + > > > + INIT_LIST_HEAD(list); > > > + > > > + ret = kallsyms_on_each_symbol(get_all_symbol_name, list); > > > + if (ret != 0) > > > + return ret; > > > + > > > + list_sort(NULL, list, kallsyms_list_cmp); > > > > This permits running an algorithm (essentially mergesort) with > > secret-dependent branches and memory addresses on essentially > > secret > > data, triggerable and arbitrarily repeatable (although with partly > > different addresses on each run) by the attacker, and probably a > > fairly low throughput (comparisons go through indirect function > > calls, > > which are slowed down by retpolines, and linked list iteration > > implies > > slow pointer chases). Those are fairly favorable conditions for > > typical side-channel attacks. > > > > Do you have estimates of how hard it would be to leverage such side > > channels to recover function ordering (both on old hardware that > > only > > has microcode fixes for Spectre and such, and on newer hardware > > with > > enhanced IBRS and such)? > > I wonder, instead, if sorting should be just done once per module > load/unload? That would make the performance and memory management > easier too. > My first solution (just don't show kallsyms at all for non-root) is looking better and better :). But seriously, I will rewrite this one with something like this, but I think we are going to need another close look to see if sidechannel issues still exist with the new implementation. Hopefully Jann can take another look then.
On Wed, 2020-06-24 at 08:18 -0700, Kees Cook wrote: > On Wed, Jun 24, 2020 at 12:21:16PM +0200, Jann Horn wrote: > > On Tue, Jun 23, 2020 at 7:26 PM Kristen Carlson Accardi > > <kristen@linux.intel.com> wrote: > > > This patch makes /proc/kallsyms display alphabetically by symbol > > > name rather than sorted by address in order to hide the newly > > > randomized address layout. > > [...] > > > +static int sorted_show(struct seq_file *m, void *p) > > > +{ > > > + struct list_head *list = m->private; > > > + struct kallsyms_iter_list *iter; > > > + int rc; > > > + > > > + if (list_empty(list)) > > > + return 0; > > > + > > > + iter = list_first_entry(list, struct kallsyms_iter_list, > > > next); > > > + > > > + m->private = iter; > > > + rc = s_show(m, p); > > > + m->private = list; > > > + > > > + list_del(&iter->next); > > > + kfree(iter); > > > > Does anything like this kfree() happen if someone only reads the > > start > > of kallsyms and then closes the file? IOW, does "while true; do > > head > > -n1 /proc/kallsyms; done" leak memory? > > Oop, nice catch. It seems the list would need to be walked on s_stop. > > > > + return rc; > > > +} > > [...] > > > +static int kallsyms_list_cmp(void *priv, struct list_head *a, > > > + struct list_head *b) > > > +{ > > > + struct kallsyms_iter_list *iter_a, *iter_b; > > > + > > > + iter_a = list_entry(a, struct kallsyms_iter_list, next); > > > + iter_b = list_entry(b, struct kallsyms_iter_list, next); > > > + > > > + return strcmp(iter_a->iter.name, iter_b->iter.name); > > > +} > > > > This sorts only by name, but kallsyms prints more information > > (module > > names and types). This means that if there are elements whose names > > are the same, but whose module names or types are different, then > > some > > amount of information will still be leaked by the ordering of > > elements > > with the same name. In practice, since list_sort() is stable, this > > means you can see the ordering of many modules, and you can see the > > ordering of symbols with same name but different visibility (e.g. > > "t > > user_read" from security/selinux/ss/policydb.c vs "T user_read" > > from > > security/keys/user_defined.c, and a couple other similar cases). > > i.e. sub-sort by visibility? > > > [...] > > > +#if defined(CONFIG_FG_KASLR) > > > +/* > > > + * When fine grained kaslr is enabled, we need to > > > + * print out the symbols sorted by name rather than by > > > + * by address, because this reveals the randomization order. > > > + */ > > > +static int kallsyms_open(struct inode *inode, struct file *file) > > > +{ > > > + int ret; > > > + struct list_head *list; > > > + > > > + list = __seq_open_private(file, &kallsyms_sorted_op, > > > sizeof(*list)); > > > + if (!list) > > > + return -ENOMEM; > > > + > > > + INIT_LIST_HEAD(list); > > > + > > > + ret = kallsyms_on_each_symbol(get_all_symbol_name, list); > > > + if (ret != 0) > > > + return ret; > > > + > > > + list_sort(NULL, list, kallsyms_list_cmp); > > > > This permits running an algorithm (essentially mergesort) with > > secret-dependent branches and memory addresses on essentially > > secret > > data, triggerable and arbitrarily repeatable (although with partly > > different addresses on each run) by the attacker, and probably a > > fairly low throughput (comparisons go through indirect function > > calls, > > which are slowed down by retpolines, and linked list iteration > > implies > > slow pointer chases). Those are fairly favorable conditions for > > typical side-channel attacks. > > > > Do you have estimates of how hard it would be to leverage such side > > channels to recover function ordering (both on old hardware that > > only > > has microcode fixes for Spectre and such, and on newer hardware > > with > > enhanced IBRS and such)? > > I wonder, instead, if sorting should be just done once per module > load/unload? That would make the performance and memory management > easier too. > I've been thinking about something like the below instead - I was thinking that there was no reason to sort kallsyms at all, so instead I just randomly shuffle an index list. Do you see any issues with this approach? (I guess I need to rename my functions at least...) From 2e8eb63c957f469af047dd4b056e54aa93f6fe35 Mon Sep 17 00:00:00 2001 From: Kristen Carlson Accardi <kristen@linux.intel.com> Date: Mon, 3 Feb 2020 12:06:44 -0800 Subject: [PATCH 1/2] kallsyms: Hide layout This patch makes /proc/kallsyms display in a random order, rather than sorted by address in order to hide the newly randomized address layout. Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> Reviewed-by: Tony Luck <tony.luck@intel.com> Tested-by: Tony Luck <tony.luck@intel.com> --- kernel/kallsyms.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 16c8c605f4b0..7e4939b9ed61 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -25,6 +25,7 @@ #include <linux/filter.h> #include <linux/ftrace.h> #include <linux/compiler.h> +#include <linux/list_sort.h> /* * These will be re-linked against their real values @@ -446,6 +447,17 @@ struct kallsym_iter { int show_value; }; +struct kallsyms_iter_list { + struct kallsym_iter iter; + struct list_head next; +}; + +struct kallsyms_sorted_iter { + struct kallsym_iter iter; + loff_t total_syms; + loff_t shuffled_index[]; +}; + int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name) { @@ -660,6 +672,110 @@ int kallsyms_show_value(void) } } +static void *sorted_start(struct seq_file *m, loff_t *pos) +{ + struct kallsyms_sorted_iter *s_iter = m->private; + + if (*pos >= s_iter->total_syms) + return NULL; + + return s_start(m, &s_iter->shuffled_index[*pos]); +} + +static void *sorted_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct kallsyms_sorted_iter *s_iter = m->private; + + (*pos)++; + + if (*pos >= s_iter->total_syms) + return NULL; + + if (!update_iter(m->private, s_iter->shuffled_index[*pos])) + return NULL; + + return p; +} + +static const struct seq_operations kallsyms_sorted_op = { + .start = sorted_start, + .next = sorted_next, + .stop = s_stop, + .show = s_show +}; + +/* + * shuffle_text_list() + * Use a Fisher Yates algorithm to shuffle a list of text sections. + */ +static void shuffle_index_list(loff_t *indexes, int size) +{ + int i; + unsigned int j; + loff_t temp; + + for (i = size - 1; i > 0; i--) { + /* pick a random index from 0 to i */ + get_random_bytes(&j, sizeof(j)); + j = j % (i + 1); + + temp = indexes[i]; + indexes[i] = indexes[j]; + indexes[j] = temp; + } +} + +#if defined(CONFIG_FG_KASLR) +/* + * When fine grained kaslr is enabled, we need to + * print out the symbols sorted by name rather than by + * by address, because this reveals the randomization order. + */ +static int kallsyms_open(struct inode *inode, struct file *file) +{ + loff_t pos; + struct kallsyms_sorted_iter *sorted_iter; + struct kallsym_iter iter; + + /* + * we need to figure out how many extra symbols there are + * to print out past kallsyms_num_syms + */ + pos = kallsyms_num_syms; + reset_iter(&iter, 0); + do { + if (!update_iter(&iter, pos)) + break; + pos++; + } while (1); + + /* + * add storage space for an array of loff_t equal to the size + * of the total number of symbols we need to print + */ + sorted_iter = __seq_open_private(file, &kallsyms_sorted_op, + sizeof(*sorted_iter) + + (sizeof(pos) * pos)); + if (!sorted_iter) + return -ENOMEM; + + reset_iter(&sorted_iter->iter, 0); + sorted_iter->iter.show_value = kallsyms_show_value(); + sorted_iter->total_syms = pos; + + /* + * initialize the array with all possible pos values, then + * shuffle the array so that the values will display in a random + * order. + */ + for (pos = 0; pos < sorted_iter->total_syms; pos++) + sorted_iter->shuffled_index[pos] = pos; + + shuffle_index_list(sorted_iter->shuffled_index, sorted_iter- >total_syms); + + return 0; +} +#else static int kallsyms_open(struct inode *inode, struct file *file) { /* @@ -676,6 +792,7 @@ static int kallsyms_open(struct inode *inode, struct file *file) iter->show_value = kallsyms_show_value(); return 0; } +#endif /* CONFIG_FG_KASLR */ #ifdef CONFIG_KGDB_KDB const char *kdb_walk_kallsyms(loff_t *pos)
> Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> > Reviewed-by: Tony Luck <tony.luck@intel.com> > Tested-by: Tony Luck <tony.luck@intel.com> I'll happily review and test again ... but since you've made substantive changes, you should drop these tags until I do. FWIW I think random order is a good idea. Do you shuffle once? Or every time somebody opens /proc/kallsyms? -Tony
On Tue, 2020-07-07 at 23:16 +0000, Luck, Tony wrote: > > Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com> > > Reviewed-by: Tony Luck <tony.luck@intel.com> > > Tested-by: Tony Luck <tony.luck@intel.com> > > I'll happily review and test again ... but since you've made > substantive > changes, you should drop these tags until I do. Will do - thanks! If nobody thinks this is a horrible direction, I'll clean up this patch and submit it with the rest as part of v4. > > FWIW I think random order is a good idea. Do you shuffle once? > Or every time somebody opens /proc/kallsyms? I am shuffling every single time that somebody opens /proc/kallsyms - this is because it's possible that somebody has loaded modules or bpf stuff and there may be new/different symbols to display.
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 16c8c605f4b0..df2b20e1b7f2 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -25,6 +25,7 @@ #include <linux/filter.h> #include <linux/ftrace.h> #include <linux/compiler.h> +#include <linux/list_sort.h> /* * These will be re-linked against their real values @@ -446,6 +447,11 @@ struct kallsym_iter { int show_value; }; +struct kallsyms_iter_list { + struct kallsym_iter iter; + struct list_head next; +}; + int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name) { @@ -660,6 +666,127 @@ int kallsyms_show_value(void) } } +static int sorted_show(struct seq_file *m, void *p) +{ + struct list_head *list = m->private; + struct kallsyms_iter_list *iter; + int rc; + + if (list_empty(list)) + return 0; + + iter = list_first_entry(list, struct kallsyms_iter_list, next); + + m->private = iter; + rc = s_show(m, p); + m->private = list; + + list_del(&iter->next); + kfree(iter); + + return rc; +} + +static void *sorted_start(struct seq_file *m, loff_t *pos) +{ + return m->private; +} + +static void *sorted_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct list_head *list = m->private; + + (*pos)++; + + if (list_empty(list)) + return NULL; + + return p; +} + +static const struct seq_operations kallsyms_sorted_op = { + .start = sorted_start, + .next = sorted_next, + .stop = s_stop, + .show = sorted_show +}; + +static int kallsyms_list_cmp(void *priv, struct list_head *a, + struct list_head *b) +{ + struct kallsyms_iter_list *iter_a, *iter_b; + + iter_a = list_entry(a, struct kallsyms_iter_list, next); + iter_b = list_entry(b, struct kallsyms_iter_list, next); + + return strcmp(iter_a->iter.name, iter_b->iter.name); +} + +int get_all_symbol_name(void *data, const char *name, struct module *mod, + unsigned long addr) +{ + unsigned long sym_pos; + struct kallsyms_iter_list *node, *last; + struct list_head *head = (struct list_head *)data; + + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + if (list_empty(head)) { + sym_pos = 0; + memset(node, 0, sizeof(*node)); + reset_iter(&node->iter, 0); + node->iter.show_value = kallsyms_show_value(); + } else { + last = list_first_entry(head, struct kallsyms_iter_list, next); + memcpy(node, last, sizeof(*node)); + sym_pos = last->iter.pos; + } + + INIT_LIST_HEAD(&node->next); + list_add(&node->next, head); + + /* + * update_iter returns false when at end of file + * which in this case we don't care about and can + * safely ignore. update_iter() will increment + * the value of iter->pos, for ksymbol_core. + */ + if (sym_pos >= kallsyms_num_syms) + sym_pos++; + + (void)update_iter(&node->iter, sym_pos); + + return 0; +} + +#if defined(CONFIG_FG_KASLR) +/* + * When fine grained kaslr is enabled, we need to + * print out the symbols sorted by name rather than by + * by address, because this reveals the randomization order. + */ +static int kallsyms_open(struct inode *inode, struct file *file) +{ + int ret; + struct list_head *list; + + list = __seq_open_private(file, &kallsyms_sorted_op, sizeof(*list)); + if (!list) + return -ENOMEM; + + INIT_LIST_HEAD(list); + + ret = kallsyms_on_each_symbol(get_all_symbol_name, list); + if (ret != 0) + return ret; + + list_sort(NULL, list, kallsyms_list_cmp); + + return 0; +} +#else static int kallsyms_open(struct inode *inode, struct file *file) { /* @@ -676,6 +803,7 @@ static int kallsyms_open(struct inode *inode, struct file *file) iter->show_value = kallsyms_show_value(); return 0; } +#endif /* CONFIG_FG_KASLR */ #ifdef CONFIG_KGDB_KDB const char *kdb_walk_kallsyms(loff_t *pos)