diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 4c6dd8da1e5daf2f44e90441e03826194d5aee8f..241c34518465d9404f56b786a86554a1e89824a3 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -17,13 +17,8 @@
 
 typedef struct {
 	unsigned int __softirq_pending;
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
 	unsigned int kernel_stack_usage;
-#ifdef CONFIG_IRQSTACKS
 	unsigned int irq_stack_usage;
-	unsigned int irq_stack_counter;
-#endif
-#endif
 #ifdef CONFIG_SMP
 	unsigned int irq_resched_count;
 	unsigned int irq_call_count;
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index cfbc43929cf6d20e2e53df0be823b8166ff576c0..cc2290a3cace1e0d65f8e69be0138c0c09cc8b2a 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -17,7 +17,6 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 #include <asm/percpu.h>
-
 #endif /* __ASSEMBLY__ */
 
 /*
@@ -58,26 +57,6 @@
 
 #ifndef __ASSEMBLY__
 
-/*
- * IRQ STACK - used for irq handler
- */
-#ifdef __KERNEL__
-
-#include <linux/spinlock_types.h>
-
-#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
-
-union irq_stack_union {
-	unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
-	raw_spinlock_t lock;
-};
-
-DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
-
-void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
-
-#endif /* __KERNEL__ */
-
 /*
  * Data detected about CPUs at boot time which is the same for all CPU's.
  * HP boxes are SMP - ie identical processors.
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 9c2d953f3de5b3f380fce7568fb85a4ea81b05e3..2e6443b1e9228426ba94d8602c8956c5daec93c2 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -27,11 +27,11 @@
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
 #include <asm/io.h>
 
 #include <asm/smp.h>
+#include <asm/ldcw.h>
 
 #undef PARISC_IRQ_CR16_COUNTS
 
@@ -172,10 +172,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
 	seq_puts(p, "  Interrupt stack usage\n");
-	seq_printf(p, "%*s: ", prec, "ISC");
-	for_each_online_cpu(j)
-		seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
-	seq_puts(p, "  Interrupt stack usage counter\n");
 # endif
 #endif
 #ifdef CONFIG_SMP
@@ -384,6 +380,24 @@ static inline int eirr_to_irq(unsigned long eirr)
 	return (BITS_PER_LONG - bit) + TIMER_IRQ;
 }
 
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+	unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+	volatile unsigned int slock[4];
+	volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+		.slock = { 1,1,1,1 },
+	};
+#endif
+
+
 int sysctl_panic_on_stackoverflow = 1;
 
 static inline void stack_overflow_check(struct pt_regs *regs)
@@ -450,27 +464,26 @@ panic_check:
 }
 
 #ifdef CONFIG_IRQSTACKS
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
-		.lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
-	};
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
 
 static void execute_on_irq_stack(void *func, unsigned long param1)
 {
 	union irq_stack_union *union_ptr;
 	unsigned long irq_stack;
-	raw_spinlock_t *irq_stack_in_use;
+	volatile unsigned int *irq_stack_in_use;
 
 	union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
 	irq_stack = (unsigned long) &union_ptr->stack;
-	irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
+	irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
 			 64); /* align for stack frame usage */
 
 	/* We may be called recursive. If we are already using the irq stack,
 	 * just continue to use it. Use spinlocks to serialize
 	 * the irq stack usage.
 	 */
-	irq_stack_in_use = &union_ptr->lock;
-	if (!raw_spin_trylock(irq_stack_in_use)) {
+	irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+	if (!__ldcw(irq_stack_in_use)) {
 		void (*direct_call)(unsigned long p1) = func;
 
 		/* We are using the IRQ stack already.
@@ -482,10 +495,8 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
 	/* This is where we switch to the IRQ stack. */
 	call_on_stack(param1, func, irq_stack);
 
-	__inc_irq_stat(irq_stack_counter);
-
 	/* free up irq stack usage. */
-	do_raw_spin_unlock(irq_stack_in_use);
+	*irq_stack_in_use = 1;
 }
 
 asmlinkage void do_softirq(void)