@@ -260,14 +260,14 @@ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
# ifdef CONFIG_MMU
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
- size_t align);
+ size_t align, ulong flags);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# else
static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
- size_t align)
+ size_t align, ulong flags)
{
return NULL;
}
@@ -153,8 +153,12 @@ static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
struct page **pages, int page_start, int page_end)
{
+ struct vm_struct **vms = (struct vm_struct **)chunk->data;
unsigned int cpu;
int i;
+ ulong addr, nr_pages;
+
+ nr_pages = page_end - page_start;
for_each_possible_cpu(cpu) {
for (i = page_start; i < page_end; i++) {
@@ -164,8 +168,14 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
WARN_ON(!page);
pages[pcpu_page_idx(cpu, i)] = page;
}
- __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
- page_end - page_start);
+ addr = pcpu_chunk_addr(chunk, cpu, page_start);
+
+ /* TODO: We should batch the TLB flushes */
+ if (vms[0]->flags & VM_GLOBAL_NONSENSITIVE)
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE, (void *)addr,
+ nr_pages * PAGE_SIZE, true);
+
+ __pcpu_unmap_pages(addr, nr_pages);
}
}
@@ -212,18 +222,30 @@ static int __pcpu_map_pages(unsigned long addr, struct page **pages,
* reverse lookup (addr -> chunk).
*/
static int pcpu_map_pages(struct pcpu_chunk *chunk,
- struct page **pages, int page_start, int page_end)
+ struct page **pages, int page_start, int page_end,
+ gfp_t gfp)
{
unsigned int cpu, tcpu;
int i, err;
+ ulong addr, nr_pages;
+
+ nr_pages = page_end - page_start;
for_each_possible_cpu(cpu) {
- err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
+ addr = pcpu_chunk_addr(chunk, cpu, page_start);
+ err = __pcpu_map_pages(addr,
&pages[pcpu_page_idx(cpu, page_start)],
- page_end - page_start);
+ nr_pages);
if (err < 0)
goto err;
+ if (gfp & __GFP_GLOBAL_NONSENSITIVE) {
+ err = asi_map(ASI_GLOBAL_NONSENSITIVE, (void *)addr,
+ nr_pages * PAGE_SIZE);
+ if (err)
+ goto err;
+ }
+
for (i = page_start; i < page_end; i++)
pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
chunk);
@@ -231,10 +253,15 @@ static int pcpu_map_pages(struct pcpu_chunk *chunk,
return 0;
err:
for_each_possible_cpu(tcpu) {
+ addr = pcpu_chunk_addr(chunk, tcpu, page_start);
+
+ if (gfp & __GFP_GLOBAL_NONSENSITIVE)
+ asi_unmap(ASI_GLOBAL_NONSENSITIVE, (void *)addr,
+ nr_pages * PAGE_SIZE, false);
+
+ __pcpu_unmap_pages(addr, nr_pages);
if (tcpu == cpu)
break;
- __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
- page_end - page_start);
}
pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
return err;
@@ -285,7 +312,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
return -ENOMEM;
- if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
+ if (pcpu_map_pages(chunk, pages, page_start, page_end, gfp)) {
pcpu_free_pages(chunk, pages, page_start, page_end);
return -ENOMEM;
}
@@ -334,13 +361,19 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
{
struct pcpu_chunk *chunk;
struct vm_struct **vms;
+ ulong vm_flags = 0;
+
+ if (static_asi_enabled() && (gfp & __GFP_GLOBAL_NONSENSITIVE))
+ vm_flags = VM_GLOBAL_NONSENSITIVE;
+
+ gfp &= ~__GFP_GLOBAL_NONSENSITIVE;
chunk = pcpu_alloc_chunk(gfp);
if (!chunk)
return NULL;
vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
- pcpu_nr_groups, pcpu_atom_size);
+ pcpu_nr_groups, pcpu_atom_size, vm_flags);
if (!vms) {
pcpu_free_chunk(chunk);
return NULL;
@@ -3664,10 +3664,10 @@ pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
*/
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
- size_t align)
+ size_t align, ulong flags)
{
- const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
- const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
+ unsigned long vmalloc_start = VMALLOC_START;
+ unsigned long vmalloc_end = VMALLOC_END;
struct vmap_area **vas, *va;
struct vm_struct **vms;
int area, area2, last_area, term_area;
@@ -3677,6 +3677,15 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
/* verify parameters and allocate data structures */
BUG_ON(offset_in_page(align) || !is_power_of_2(align));
+
+ if (static_asi_enabled() && (flags & VM_GLOBAL_NONSENSITIVE)) {
+ vmalloc_start = VMALLOC_GLOBAL_NONSENSITIVE_START;
+ vmalloc_end = VMALLOC_GLOBAL_NONSENSITIVE_END;
+ }
+
+ vmalloc_start = ALIGN(vmalloc_start, align);
+ vmalloc_end = vmalloc_end & ~(align - 1);
+
for (last_area = 0, area = 0; area < nr_vms; area++) {
start = offsets[area];
end = start + sizes[area];
@@ -3815,7 +3824,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
for (area = 0; area < nr_vms; area++) {
insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
- setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
+ setup_vmalloc_vm_locked(vms[area], vas[area], flags | VM_ALLOC,
pcpu_get_vm_areas);
}
spin_unlock(&vmap_area_lock);
@@ -68,7 +68,7 @@ config PAGE_TABLE_ISOLATION
config ADDRESS_SPACE_ISOLATION
bool "Allow code to run with a reduced kernel address space"
default n
- depends on X86_64 && !UML && SLAB
+ depends on X86_64 && !UML && SLAB && !NEED_PER_CPU_KM
depends on !PARAVIRT
help
This feature provides the ability to run some kernel code
This adds support for mapping and unmapping dynamic percpu chunks as globally non-sensitive. A later patch will modify the percpu allocator to use this for dynamically allocating non-sensitive percpu memory. Signed-off-by: Junaid Shahid <junaids@google.com> --- include/linux/vmalloc.h | 4 ++-- mm/percpu-vm.c | 51 +++++++++++++++++++++++++++++++++-------- mm/vmalloc.c | 17 ++++++++++---- security/Kconfig | 2 +- 4 files changed, 58 insertions(+), 16 deletions(-)