Skip to content
Snippets Groups Projects
Commit c78e710c authored by John David Anglin's avatar John David Anglin Committed by Helge Deller
Browse files

parisc: Purge TLB before setting PTE


The attached change interchanges the order of purging the TLB and
setting the corresponding page table entry.  TLB purges are strongly
ordered.  It occurred to me one night that setting the PTE first might
have subtle ordering issues on SMP machines and cause random memory
corruption.

A TLB lock guards the insertion of user TLB entries.  So after the TLB
is purged, a new entry can't be inserted until the lock is released.
This ensures that the new PTE value is used when the lock is released.

Since making this change, no random segmentation faults have been
observed on the Debian hppa buildd servers.

Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: <stable@vger.kernel.org> # v3.16+
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent bc3913a5
No related branches found
No related tags found
No related merge requests found
...@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
unsigned long flags; \ unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \ spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \ old_pte = *ptep; \
set_pte(ptep, pteval); \
if (pte_inserted(old_pte)) \ if (pte_inserted(old_pte)) \
purge_tlb_entries(mm, addr); \ purge_tlb_entries(mm, addr); \
set_pte(ptep, pteval); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \ spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0) } while (0)
...@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned ...@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0; return 0;
} }
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr); purge_tlb_entries(vma->vm_mm, addr);
set_pte(ptep, pte_mkold(pte));
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1; return 1;
} }
...@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep; old_pte = *ptep;
set_pte(ptep, __pte(0));
if (pte_inserted(old_pte)) if (pte_inserted(old_pte))
purge_tlb_entries(mm, addr); purge_tlb_entries(mm, addr);
set_pte(ptep, __pte(0));
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte; return old_pte;
...@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, ...@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(&pa_tlb_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr); purge_tlb_entries(mm, addr);
set_pte(ptep, pte_wrprotect(*ptep));
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment