diff mbox

mark most percpu globals as __ro_after_init

Message ID 20170510173637.25116-1-danielmicay@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Daniel Micay May 10, 2017, 5:36 p.m. UTC
Moving pcpu_base_addr to this section comes from PaX where it's part of
KERNEXEC. This extends it to the rest of the globals only written by the
init code.

Signed-off-by: Daniel Micay <danielmicay@gmail.com>
---
 mm/percpu.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

Comments

Tejun Heo May 10, 2017, 5:52 p.m. UTC | #1
Hello, Daniel.

On Wed, May 10, 2017 at 01:36:37PM -0400, Daniel Micay wrote:
> Moving pcpu_base_addr to this section comes from PaX where it's part of
> KERNEXEC. This extends it to the rest of the globals only written by the
> init code.

How did you test the patch?

> -static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
> +static struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */

At least this one isn't read only.

Thanks.
Daniel Micay May 10, 2017, 5:59 p.m. UTC | #2
On Wed, 2017-05-10 at 13:52 -0400, Tejun Heo wrote:
> Hello, Daniel.
> 
> On Wed, May 10, 2017 at 01:36:37PM -0400, Daniel Micay wrote:
> > Moving pcpu_base_addr to this section comes from PaX where it's part
> > of
> > KERNEXEC. This extends it to the rest of the globals only written by
> > the
> > init code.
> 
> How did you test the patch?

Booted / did some stuff on x86 (it's running right now), and currently
building it for a 3.18 arm64 kernel to test there.

> 
> > -static struct list_head *pcpu_slot __read_mostly; /* chunk list
> > slots */
> > +static struct list_head *pcpu_slot __ro_after_init; /* chunk list
> > slots */
> 
> At least this one isn't read only.

It's the array it points to being modified after it gets assigned to
during init with pcpu_slot = memblock_virt_alloc(...), not the pointer
variable itself. The references after init are all pcpu_slot[...]
including taking references to slots in the array so there's always a
dereference happening first.
Kees Cook May 10, 2017, 6:16 p.m. UTC | #3
On Wed, May 10, 2017 at 10:36 AM, Daniel Micay <danielmicay@gmail.com> wrote:
> Moving pcpu_base_addr to this section comes from PaX where it's part of
> KERNEXEC. This extends it to the rest of the globals only written by the
> init code.
>
> Signed-off-by: Daniel Micay <danielmicay@gmail.com>

Excellent, thanks!

Acked-by: Kees Cook <keescook@chromium.org>

-Kees

> ---
>  mm/percpu.c | 36 ++++++++++++++++++------------------
>  1 file changed, 18 insertions(+), 18 deletions(-)
>
> diff --git a/mm/percpu.c b/mm/percpu.c
> index e0aa8ae7bde7..c03753054099 100644
> --- a/mm/percpu.c
> +++ b/mm/percpu.c
> @@ -121,35 +121,35 @@ struct pcpu_chunk {
>         unsigned long           populated[];    /* populated bitmap */
>  };
>
> -static int pcpu_unit_pages __read_mostly;
> -static int pcpu_unit_size __read_mostly;
> -static int pcpu_nr_units __read_mostly;
> -static int pcpu_atom_size __read_mostly;
> -static int pcpu_nr_slots __read_mostly;
> -static size_t pcpu_chunk_struct_size __read_mostly;
> +static int pcpu_unit_pages __ro_after_init;
> +static int pcpu_unit_size __ro_after_init;
> +static int pcpu_nr_units __ro_after_init;
> +static int pcpu_atom_size __ro_after_init;
> +static int pcpu_nr_slots __ro_after_init;
> +static size_t pcpu_chunk_struct_size __ro_after_init;
>
>  /* cpus with the lowest and highest unit addresses */
> -static unsigned int pcpu_low_unit_cpu __read_mostly;
> -static unsigned int pcpu_high_unit_cpu __read_mostly;
> +static unsigned int pcpu_low_unit_cpu __ro_after_init;
> +static unsigned int pcpu_high_unit_cpu __ro_after_init;
>
>  /* the address of the first chunk which starts with the kernel static area */
> -void *pcpu_base_addr __read_mostly;
> +void *pcpu_base_addr __ro_after_init;
>  EXPORT_SYMBOL_GPL(pcpu_base_addr);
>
> -static const int *pcpu_unit_map __read_mostly;         /* cpu -> unit */
> -const unsigned long *pcpu_unit_offsets __read_mostly;  /* cpu -> unit offset */
> +static const int *pcpu_unit_map __ro_after_init;               /* cpu -> unit */
> +const unsigned long *pcpu_unit_offsets __ro_after_init;        /* cpu -> unit offset */
>
>  /* group information, used for vm allocation */
> -static int pcpu_nr_groups __read_mostly;
> -static const unsigned long *pcpu_group_offsets __read_mostly;
> -static const size_t *pcpu_group_sizes __read_mostly;
> +static int pcpu_nr_groups __ro_after_init;
> +static const unsigned long *pcpu_group_offsets __ro_after_init;
> +static const size_t *pcpu_group_sizes __ro_after_init;
>
>  /*
>   * The first chunk which always exists.  Note that unlike other
>   * chunks, this one can be allocated and mapped in several different
>   * ways and thus often doesn't live in the vmalloc area.
>   */
> -static struct pcpu_chunk *pcpu_first_chunk;
> +static struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
>
>  /*
>   * Optional reserved chunk.  This chunk reserves part of the first
> @@ -158,13 +158,13 @@ static struct pcpu_chunk *pcpu_first_chunk;
>   * area doesn't exist, the following variables contain NULL and 0
>   * respectively.
>   */
> -static struct pcpu_chunk *pcpu_reserved_chunk;
> -static int pcpu_reserved_chunk_limit;
> +static struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
> +static int pcpu_reserved_chunk_limit __ro_after_init;
>
>  static DEFINE_SPINLOCK(pcpu_lock);     /* all internal data structures */
>  static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
>
> -static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
> +static struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
>
>  /* chunks which need their map areas extended, protected by pcpu_lock */
>  static LIST_HEAD(pcpu_map_extend_chunks);
> --
> 2.12.2
>
Daniel Micay May 10, 2017, 6:31 p.m. UTC | #4
> Booted / did some stuff on x86 (it's running right now), and currently
> building it for a 3.18 arm64 kernel to test there.

Yeah, confirmed to work on an Android 3.18 arm64 kernel (Pixel phone)
too in addition to 4.11 and master for x86_64.
Tejun Heo May 10, 2017, 7:20 p.m. UTC | #5
On Wed, May 10, 2017 at 01:59:32PM -0400, Daniel Micay wrote:
> On Wed, 2017-05-10 at 13:52 -0400, Tejun Heo wrote:
> > Hello, Daniel.
> > 
> > On Wed, May 10, 2017 at 01:36:37PM -0400, Daniel Micay wrote:
> > > Moving pcpu_base_addr to this section comes from PaX where it's part
> > > of
> > > KERNEXEC. This extends it to the rest of the globals only written by
> > > the
> > > init code.
> > 
> > How did you test the patch?
> 
> Booted / did some stuff on x86 (it's running right now), and currently
> building it for a 3.18 arm64 kernel to test there.
> 
> > 
> > > -static struct list_head *pcpu_slot __read_mostly; /* chunk list
> > > slots */
> > > +static struct list_head *pcpu_slot __ro_after_init; /* chunk list
> > > slots */
> > 
> > At least this one isn't read only.
> 
> It's the array it points to being modified after it gets assigned to
> during init with pcpu_slot = memblock_virt_alloc(...), not the pointer
> variable itself. The references after init are all pcpu_slot[...]
> including taking references to slots in the array so there's always a
> dereference happening first.

Ah, right.  Cool, applying to percpu/for-4.13.

Thanks.
diff mbox

Patch

diff --git a/mm/percpu.c b/mm/percpu.c
index e0aa8ae7bde7..c03753054099 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -121,35 +121,35 @@  struct pcpu_chunk {
 	unsigned long		populated[];	/* populated bitmap */
 };
 
-static int pcpu_unit_pages __read_mostly;
-static int pcpu_unit_size __read_mostly;
-static int pcpu_nr_units __read_mostly;
-static int pcpu_atom_size __read_mostly;
-static int pcpu_nr_slots __read_mostly;
-static size_t pcpu_chunk_struct_size __read_mostly;
+static int pcpu_unit_pages __ro_after_init;
+static int pcpu_unit_size __ro_after_init;
+static int pcpu_nr_units __ro_after_init;
+static int pcpu_atom_size __ro_after_init;
+static int pcpu_nr_slots __ro_after_init;
+static size_t pcpu_chunk_struct_size __ro_after_init;
 
 /* cpus with the lowest and highest unit addresses */
-static unsigned int pcpu_low_unit_cpu __read_mostly;
-static unsigned int pcpu_high_unit_cpu __read_mostly;
+static unsigned int pcpu_low_unit_cpu __ro_after_init;
+static unsigned int pcpu_high_unit_cpu __ro_after_init;
 
 /* the address of the first chunk which starts with the kernel static area */
-void *pcpu_base_addr __read_mostly;
+void *pcpu_base_addr __ro_after_init;
 EXPORT_SYMBOL_GPL(pcpu_base_addr);
 
-static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
-const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
+static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
+const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
 
 /* group information, used for vm allocation */
-static int pcpu_nr_groups __read_mostly;
-static const unsigned long *pcpu_group_offsets __read_mostly;
-static const size_t *pcpu_group_sizes __read_mostly;
+static int pcpu_nr_groups __ro_after_init;
+static const unsigned long *pcpu_group_offsets __ro_after_init;
+static const size_t *pcpu_group_sizes __ro_after_init;
 
 /*
  * The first chunk which always exists.  Note that unlike other
  * chunks, this one can be allocated and mapped in several different
  * ways and thus often doesn't live in the vmalloc area.
  */
-static struct pcpu_chunk *pcpu_first_chunk;
+static struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
 
 /*
  * Optional reserved chunk.  This chunk reserves part of the first
@@ -158,13 +158,13 @@  static struct pcpu_chunk *pcpu_first_chunk;
  * area doesn't exist, the following variables contain NULL and 0
  * respectively.
  */
-static struct pcpu_chunk *pcpu_reserved_chunk;
-static int pcpu_reserved_chunk_limit;
+static struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
+static int pcpu_reserved_chunk_limit __ro_after_init;
 
 static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
 static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
 
-static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+static struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
 
 /* chunks which need their map areas extended, protected by pcpu_lock */
 static LIST_HEAD(pcpu_map_extend_chunks);