Skip to content

Commit 1de14c3

Browse files
hansendctorvalds
authored andcommitted
x86-32: Fix possible incomplete TLB invalidate with PAE pagetables
This patch attempts to fix: https://bugzilla.kernel.org/show_bug.cgi?id=56461 The symptom is a crash and messages like this: chrome: Corrupted page table at address 34a03000 *pdpt = 0000000000000000 *pde = 0000000000000000 Bad pagetable: 000f [#1] PREEMPT SMP Ingo guesses this got introduced by commit 611ae8e ("x86/tlb: enable tlb flush range support for x86") since that code started to free unused pagetables. On x86-32 PAE kernels, that new code has the potential to free an entire PMD page and will clear one of the four page-directory-pointer-table (aka pgd_t entries). The hardware aggressively "caches" these top-level entries and invlpg does not actually affect the CPU's copy. If we clear one we *HAVE* to do a full TLB flush, otherwise we might continue using a freed pmd page. (note, we do this properly on the population side in pud_populate()). This patch tracks whenever we clear one of these entries in the 'struct mmu_gather', and ensures that we follow up with a full tlb flush. BTW, I disassembled and checked that: if (tlb->fullmm == 0) and if (!tlb->fullmm && !tlb->need_flush_all) generate essentially the same code, so there should be zero impact there to the !PAE case. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Artem S Tashkinov <t.artem@mailcity.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent bf81710 commit 1de14c3

4 files changed

Lines changed: 15 additions & 2 deletions

File tree

arch/x86/include/asm/tlb.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
#define tlb_flush(tlb) \
99
{ \
10-
if (tlb->fullmm == 0) \
10+
if (!tlb->fullmm && !tlb->need_flush_all) \
1111
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
1212
else \
1313
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \

arch/x86/mm/pgtable.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
5858
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
5959
{
6060
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61+
/*
62+
* NOTE! For PAE, any changes to the top page-directory-pointer-table
63+
* entries need a full cr3 reload to flush.
64+
*/
65+
#ifdef CONFIG_X86_PAE
66+
tlb->need_flush_all = 1;
67+
#endif
6168
tlb_remove_page(tlb, virt_to_page(pmd));
6269
}
6370

include/asm-generic/tlb.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,12 @@ struct mmu_gather {
9999
unsigned int need_flush : 1, /* Did free PTEs */
100100
fast_mode : 1; /* No batching */
101101

102-
unsigned int fullmm;
102+
/* we are in the middle of an operation to clear
103+
* a full mm and can make some optimizations */
104+
unsigned int fullmm : 1,
105+
/* we have performed an operation which
106+
* requires a complete flush of the tlb */
107+
need_flush_all : 1;
103108

104109
struct mmu_gather_batch *active;
105110
struct mmu_gather_batch local;

mm/memory.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
216216
tlb->mm = mm;
217217

218218
tlb->fullmm = fullmm;
219+
tlb->need_flush_all = 0;
219220
tlb->start = -1UL;
220221
tlb->end = 0;
221222
tlb->need_flush = 0;

0 commit comments

Comments
 (0)