diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index b49d4ddaab93a45f71f559631e2fed66a98c70f6..0166a984709536238d8f97641992038ce17cd27a 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -231,13 +231,16 @@ smp_flush_tlb_all (void)
 void
 smp_flush_tlb_mm (struct mm_struct *mm)
 {
+	preempt_disable();
 	/* this happens for the common case of a single-threaded fork():  */
 	if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
 	{
 		local_finish_flush_tlb_mm(mm);
+		preempt_enable();
 		return;
 	}
 
+	preempt_enable();
 	/*
 	 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
 	 * have been running in the address space.  It's not clear that this is worth the
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index 0096e7e05012705baecbeaf4cbe654b76bbed767..e3e5fededb04c4c63301afd1e309c1f022d749b0 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -132,6 +132,9 @@ reload_context (mm_context_t context)
 	ia64_srlz_i();			/* srlz.i implies srlz.d */
 }
 
+/*
+ * Must be called with preemption off
+ */
 static inline void
 activate_context (struct mm_struct *mm)
 {