diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 4a5a914b34321bcc69b714ca17223b08bc898edb..90089c14c23d8612673fed024e1a59ae23c98090 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -493,6 +493,7 @@ int __init set_kernel_exec(unsigned long vaddr, int enable)
 		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
 	else
 		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
+	pte_update_defer(&init_mm, vaddr, pte);
 	__flush_tlb_all();
 out:
 	return ret;
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 8cb708a6bed057e764daf33f4eceae5f46bb3b94..7d398f493ddeedfc2d5e826d595028767ae64b76 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -246,6 +246,23 @@ static inline pte_t pte_mkhuge(pte_t pte)	{ (pte).pte_low |= _PAGE_PSE; return p
 # include <asm/pgtable-2level.h>
 #endif
 
+/*
+ * Rules for using pte_update - it must be called after any PTE update which
+ * has not been done using the set_pte / clear_pte interfaces.  It is used by
+ * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
+ * updates should either be sets, clears, or set_pte_atomic for P->P
+ * transitions, which means this hook should only be called for user PTEs.
+ * This hook implies a P->P protection or access change has taken place, which
+ * requires a subsequent TLB flush.  The notification can optionally be delayed
+ * until the TLB flush event by using the pte_update_defer form of the
+ * interface, but care must be taken to assure that the flush happens while
+ * still holding the same page table lock so that the shadow and primary pages
+ * do not become out of sync on SMP.
+ */
+#define pte_update(mm, addr, ptep)		do { } while (0)
+#define pte_update_defer(mm, addr, ptep)	do { } while (0)
+
+
 /*
  * We only update the dirty/accessed state if we set
  * the dirty bit by hand in the kernel, since the hardware
@@ -258,6 +275,7 @@ static inline pte_t pte_mkhuge(pte_t pte)	{ (pte).pte_low |= _PAGE_PSE; return p
 do {									\
 	if (dirty) {							\
 		(ptep)->pte_low = (entry).pte_low;			\
+		pte_update_defer((vma)->vm_mm, (addr), (ptep));		\
 		flush_tlb_page(vma, address);				\
 	}								\
 } while (0)
@@ -287,6 +305,7 @@ do {									\
 	__dirty = pte_dirty(*(ptep));					\
 	if (__dirty) {							\
 		clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low);		\
+		pte_update_defer((vma)->vm_mm, (addr), (ptep));		\
 		flush_tlb_page(vma, address);				\
 	}								\
 	__dirty;							\
@@ -299,6 +318,7 @@ do {									\
 	__young = pte_young(*(ptep));					\
 	if (__young) {							\
 		clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low);	\
+		pte_update_defer((vma)->vm_mm, (addr), (ptep));		\
 		flush_tlb_page(vma, address);				\
 	}								\
 	__young;							\
@@ -321,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
 	clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+	pte_update(mm, addr, ptep);
 }
 
 /*