@@ -397,92 +397,73 @@ static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
* Mark portion of the address space read only.
*/
extern struct shared_info shared_info;
-static void set_readonly(void *text, void *etext)
-{
- unsigned long start_address =
- ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
- unsigned long end_address = (unsigned long) etext;
- pgentry_t *tab = pt_base, page;
- unsigned long mfn = pfn_to_mfn(virt_to_pfn(pt_base));
- unsigned long offset;
- unsigned long page_size = PAGE_SIZE;
+
+struct set_readonly_par {
+ unsigned long etext;
#ifdef CONFIG_PARAVIRT
- int count = 0;
- int rc;
+ unsigned int count;
#endif
+};
- printk("setting %p-%p readonly\n", text, etext);
+static int set_readonly_func(unsigned long va, unsigned int lvl, bool is_leaf,
+ pgentry_t *pte, void *par)
+{
+ struct set_readonly_par *ro = par;
- while ( start_address + page_size <= end_address )
- {
- tab = pt_base;
- mfn = pfn_to_mfn(virt_to_pfn(pt_base));
+ if ( !is_leaf )
+ return 0;
-#if defined(__x86_64__)
- offset = l4_table_offset(start_address);
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
- offset = l3_table_offset(start_address);
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
- offset = l2_table_offset(start_address);
- if ( !(tab[offset] & _PAGE_PSE) )
- {
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+ if ( va + (1UL << ptdata[lvl].shift) > ro->etext )
+ return 1;
- offset = l1_table_offset(start_address);
- }
+ if ( va == (unsigned long)&shared_info )
+ {
+ printk("skipped %lx\n", va);
+ return 0;
+ }
- if ( start_address != (unsigned long)&shared_info )
- {
#ifdef CONFIG_PARAVIRT
- mmu_updates[count].ptr =
- ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
- mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
- count++;
+ mmu_updates[ro->count].ptr = virt_to_mach(pte);
+ mmu_updates[ro->count].val = *pte & ~_PAGE_RW;
+ ro->count++;
+
+ if ( (ro->count == L1_PAGETABLE_ENTRIES ||
+ va + 2 * PAGE_SIZE > ro->etext) &&
+ HYPERVISOR_mmu_update(mmu_updates, ro->count, NULL, DOMID_SELF) < 0 )
+ {
+ printk("ERROR: set_readonly(): PTE could not be updated\n");
+ do_exit();
+ }
#else
- tab[offset] &= ~_PAGE_RW;
+ *pte &= ~_PAGE_RW;
#endif
- }
- else
- printk("skipped %lx\n", start_address);
- start_address += page_size;
+ return 0;
+}
#ifdef CONFIG_PARAVIRT
- if ( count == L1_PAGETABLE_ENTRIES ||
- start_address + page_size > end_address )
- {
- rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
- if ( rc < 0 )
- {
- printk("ERROR: set_readonly(): PTE could not be updated\n");
- do_exit();
- }
- count = 0;
- }
-#else
- if ( start_address == (1UL << L2_PAGETABLE_SHIFT) )
- page_size = 1UL << L2_PAGETABLE_SHIFT;
-#endif
- }
+static void tlb_flush(void)
+{
+ mmuext_op_t op = { .cmd = MMUEXT_TLB_FLUSH_ALL };
+ int count;
-#ifdef CONFIG_PARAVIRT
- {
- mmuext_op_t op = {
- .cmd = MMUEXT_TLB_FLUSH_ALL,
- };
- int count;
- HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
- }
+ HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
+}
#else
+static void tlb_flush(void)
+{
write_cr3((unsigned long)pt_base);
+}
#endif
+
+static void set_readonly(void *text, void *etext)
+{
+ struct set_readonly_par setro = { .etext = (unsigned long)etext };
+ unsigned long start_address = PAGE_ALIGN((unsigned long)text);
+
+ printk("setting %p-%p readonly\n", text, etext);
+ walk_pt(start_address, setro.etext, set_readonly_func, &setro);
+ tlb_flush();
}
/*
Instead of having another copy of a page table walk in set_readonly(), just use walk_pt(). As it will be needed later anyway, split out the TLB flushing into a dedicated function. Signed-off-by: Juergen Gross <jgross@suse.com> --- arch/x86/mm.c | 119 +++++++++++++++++++++----------------------------- 1 file changed, 50 insertions(+), 69 deletions(-)