diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 5724683cef16b0193877d482bc17c6c0a7024c8f..6dbf7d856f80f015793021272f3a4b497339bbb3 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -230,6 +230,64 @@ void xhci_print_registers(struct xhci_hcd *xhci)
 	xhci_print_op_regs(xhci);
 }
 
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+	int i;
+	for (i = 0; i < 4; ++i)
+		xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
+				i*4, trb->generic.field[i]);
+}
+
+/**
+ * Debug a transfer request block (TRB).
+ */
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+	u64	address;
+	u32	type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
+
+	switch (type) {
+	case TRB_TYPE(TRB_LINK):
+		xhci_dbg(xhci, "Link TRB:\n");
+		xhci_print_trb_offsets(xhci, trb);
+
+		address = trb->link.segment_ptr[0] +
+			(((u64) trb->link.segment_ptr[1]) << 32);
+		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
+
+		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
+				GET_INTR_TARGET(trb->link.intr_target));
+		xhci_dbg(xhci, "Cycle bit = %u\n",
+				(unsigned int) (trb->link.control & TRB_CYCLE));
+		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
+				(unsigned int) (trb->link.control & LINK_TOGGLE));
+		xhci_dbg(xhci, "No Snoop bit = %u\n",
+				(unsigned int) (trb->link.control & TRB_NO_SNOOP));
+		break;
+	case TRB_TYPE(TRB_TRANSFER):
+		address = trb->trans_event.buffer[0] +
+			(((u64) trb->trans_event.buffer[1]) << 32);
+		/*
+		 * FIXME: look at flags to figure out if it's an address or if
+		 * the data is directly in the buffer field.
+		 */
+		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
+		break;
+	case TRB_TYPE(TRB_COMPLETION):
+		address = trb->event_cmd.cmd_trb[0] +
+			(((u64) trb->event_cmd.cmd_trb[1]) << 32);
+		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
+		xhci_dbg(xhci, "Completion status = %u\n",
+				(unsigned int) GET_COMP_CODE(trb->event_cmd.status));
+		xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
+		break;
+	default:
+		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
+				(unsigned int) type>>10);
+		xhci_print_trb_offsets(xhci, trb);
+		break;
+	}
+}
 
 /**
  * Debug a segment with an xHCI ring.
@@ -261,6 +319,20 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
 	}
 }
 
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	xhci_dbg(xhci, "Ring deq = 0x%x (virt), 0x%x (dma)\n",
+			(unsigned int) ring->dequeue,
+			trb_virt_to_dma(ring->deq_seg, ring->dequeue));
+	xhci_dbg(xhci, "Ring deq updated %u times\n",
+			ring->deq_updates);
+	xhci_dbg(xhci, "Ring enq = 0x%x (virt), 0x%x (dma)\n",
+			(unsigned int) ring->enqueue,
+			trb_virt_to_dma(ring->enq_seg, ring->enqueue));
+	xhci_dbg(xhci, "Ring enq updated %u times\n",
+			ring->enq_updates);
+}
+
 /**
  * Debugging for an xHCI ring, which is a queue broken into multiple segments.
  *
@@ -277,6 +349,10 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
 	struct xhci_segment *first_seg = ring->first_seg;
 	xhci_debug_segment(xhci, first_seg);
 
+	if (!ring->enq_updates && !ring->deq_updates) {
+		xhci_dbg(xhci, "  Ring has not been updated\n");
+		return;
+	}
 	for (seg = first_seg->next; seg != first_seg; seg = seg->next)
 		xhci_debug_segment(xhci, seg);
 }
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 011f478106652990389545cabe1be6e70984f58c..a99c119e9fd91851d3a98ec9bca374b8eed20cae 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -217,6 +217,120 @@ int xhci_init(struct usb_hcd *hcd)
 	return retval;
 }
 
+/*
+ * Called in interrupt context when there might be work
+ * queued on the event ring
+ *
+ * xhci->lock must be held by caller.
+ */
+static void xhci_work(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	/*
+	 * Clear the op reg interrupt status first,
+	 * so we can receive interrupts from other MSI-X interrupters.
+	 * Write 1 to clear the interrupt status.
+	 */
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	temp |= STS_EINT;
+	xhci_writel(xhci, temp, &xhci->op_regs->status);
+	/* FIXME when MSI-X is supported and there are multiple vectors */
+	/* Clear the MSI-X event interrupt status */
+
+	/* Acknowledge the interrupt */
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	temp |= 0x3;
+	xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
+	/* Flush posted writes */
+	xhci_readl(xhci, &xhci->ir_set->irq_pending);
+
+	/* FIXME this should be a delayed service routine that clears the EHB */
+	handle_event(xhci);
+
+	/* Clear the event handler busy flag; the event ring should be empty. */
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
+	/* Flush posted writes -- FIXME is this necessary? */
+	xhci_readl(xhci, &xhci->ir_set->irq_pending);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	u32 temp, temp2;
+
+	spin_lock(&xhci->lock);
+	/* Check if the xHC generated the interrupt, or the irq is shared */
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
+		spin_unlock(&xhci->lock);
+		return IRQ_NONE;
+	}
+
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	if (temp & STS_FATAL) {
+		xhci_warn(xhci, "WARNING: Host System Error\n");
+		xhci_halt(xhci);
+		xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+		return -ESHUTDOWN;
+	}
+
+	xhci_work(xhci);
+	spin_unlock(&xhci->lock);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+void event_ring_work(unsigned long arg)
+{
+	unsigned long flags;
+	int temp;
+	struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+	int i, j;
+
+	xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = xhci_readl(xhci, &xhci->op_regs->status);
+	xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
+	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
+	xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
+	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
+	xhci->error_bitmask = 0;
+	xhci_dbg(xhci, "Event ring:\n");
+	xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
+	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	temp &= ERST_PTR_MASK;
+	xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+	xhci_dbg(xhci, "Command ring:\n");
+	xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
+	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+	xhci_dbg_cmd_ptrs(xhci);
+
+	if (xhci->noops_submitted != NUM_TEST_NOOPS)
+		if (setup_one_noop(xhci))
+			ring_cmd_db(xhci);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	if (!xhci->zombie)
+		mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
+	else
+		xhci_dbg(xhci, "Quit polling the event ring.\n");
+}
+#endif
+
 /*
  * Start the HC after it was halted.
  *
@@ -233,8 +347,9 @@ int xhci_run(struct usb_hcd *hcd)
 {
 	u32 temp;
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-	xhci_dbg(xhci, "xhci_run\n");
+	void (*doorbell)(struct xhci_hcd *) = NULL;
 
+	xhci_dbg(xhci, "xhci_run\n");
 #if 0	/* FIXME: MSI not setup yet */
 	/* Do this at the very last minute */
 	ret = xhci_setup_msix(xhci);
@@ -243,6 +358,17 @@ int xhci_run(struct usb_hcd *hcd)
 
 	return -ENOSYS;
 #endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+	init_timer(&xhci->event_ring_timer);
+	xhci->event_ring_timer.data = (unsigned long) xhci;
+	xhci->event_ring_timer.function = event_ring_work;
+	/* Poll the event ring */
+	xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
+	xhci->zombie = 0;
+	xhci_dbg(xhci, "Setting event ring polling timer\n");
+	add_timer(&xhci->event_ring_timer);
+#endif
+
 	xhci_dbg(xhci, "// Set the interrupt modulation register\n");
 	temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
 	temp &= 0xffff;
@@ -266,10 +392,24 @@ int xhci_run(struct usb_hcd *hcd)
 			&xhci->ir_set->irq_pending);
 	xhci_print_ir_set(xhci, xhci->ir_set, 0);
 
+	if (NUM_TEST_NOOPS > 0)
+		doorbell = setup_one_noop(xhci);
+
 	xhci_dbg(xhci, "Command ring memory map follows:\n");
 	xhci_debug_ring(xhci, xhci->cmd_ring);
+	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+	xhci_dbg_cmd_ptrs(xhci);
+
 	xhci_dbg(xhci, "ERST memory map follows:\n");
 	xhci_dbg_erst(xhci, &xhci->erst);
+	xhci_dbg(xhci, "Event ring:\n");
+	xhci_debug_ring(xhci, xhci->event_ring);
+	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
+	xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	temp &= ERST_PTR_MASK;
+	xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
 
 	temp = xhci_readl(xhci, &xhci->op_regs->command);
 	temp |= (CMD_RUN);
@@ -280,6 +420,8 @@ int xhci_run(struct usb_hcd *hcd)
 	temp = xhci_readl(xhci, &xhci->op_regs->command);
 	xhci_dbg(xhci, "// @%x = 0x%x\n",
 			(unsigned int) &xhci->op_regs->command, temp);
+	if (doorbell)
+		(*doorbell)(xhci);
 
 	xhci_dbg(xhci, "Finished xhci_run\n");
 	return 0;
@@ -309,6 +451,12 @@ void xhci_stop(struct usb_hcd *hcd)
 #if 0	/* No MSI yet */
 	xhci_cleanup_msix(xhci);
 #endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+	/* Tell the event ring poll function not to reschedule */
+	xhci->zombie = 1;
+	del_timer_sync(&xhci->event_ring_timer);
+#endif
+
 	xhci_dbg(xhci, "// Disabling event ring interrupts\n");
 	temp = xhci_readl(xhci, &xhci->op_regs->status);
 	xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -346,6 +494,8 @@ void xhci_shutdown(struct usb_hcd *hcd)
 		    xhci_readl(xhci, &xhci->op_regs->status));
 }
 
+/*-------------------------------------------------------------------------*/
+
 int xhci_get_frame(struct usb_hcd *hcd)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index be5a05b2021c1c58b1aaedde7e6c993104a66bd0..005d44641d81040ed7a961990f4d4a0a1bdaf8cc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -172,7 +172,9 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
 	}
 	/* The ring is empty, so the enqueue pointer == dequeue pointer */
 	ring->enqueue = ring->first_seg->trbs;
+	ring->enq_seg = ring->first_seg;
 	ring->dequeue = ring->enqueue;
+	ring->deq_seg = ring->first_seg;
 	/* The ring is initialized to 0. The producer must write 1 to the cycle
 	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
 	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
@@ -374,14 +376,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
 
 	/* Set the event ring dequeue address */
-	xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n",
-			xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]);
-	val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
-	val &= ERST_PTR_MASK;
-	val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK);
-	xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
-	xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1],
-			&xhci->run_regs->ir_set[0].erst_dequeue[1]);
+	set_hc_event_deq(xhci);
 	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
 	xhci_print_ir_set(xhci, xhci->ir_set, 0);
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4015082adf60037e221cff5625ffdc5115b06687..89614af80d20b3511acbe73edaf5dd0aedea50db 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -96,6 +96,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
 	/*
 	 * generic hardware linkage
 	 */
+	.irq =			xhci_irq,
 	.flags =		HCD_MEMORY | HCD_USB3,
 
 	/*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 0000000000000000000000000000000000000000..c7e3c7142b9d6cf6ff1afce0541002f34184118e
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,367 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
+ *    Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
+ *    least one free TRB in the ring.  This is useful if you want to turn that
+ *    into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ *    link TRB, then load the pointer with the address in the link TRB.  If the
+ *    link TRB had its toggle bit set, you may need to update the ring cycle
+ *    state (see cycle bit rules).  You may have to do this multiple times
+ *    until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ *    equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ *    Update enqueue pointer between each write (which may update the ring
+ *    cycle state).
+ * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
+ *    and endpoint rings.  If HC is the producer for the event ring,
+ *    and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
+ *    the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ *    continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer.  SW is the consumer for the event ring, and it
+ *   updates event ring dequeue pointer.  HC is the consumer for the command and
+ *   endpoint rings; it generates events on the event ring for these.
+ */
+
+#include "xhci.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
+		union xhci_trb *trb)
+{
+	unsigned int offset;
+
+	if (!seg || !trb || (void *) trb < (void *) seg->trbs)
+		return 0;
+	/* offset in bytes, since these are byte-addressable */
+	offset = (unsigned int) trb - (unsigned int) seg->trbs;
+	/* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
+	if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
+		return 0;
+	return seg->dma + offset;
+}
+
+/* Does this link TRB point to the first segment in a ring,
+ * or was the previous TRB the last TRB on the last segment in the ERST?
+ */
+static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		struct xhci_segment *seg, union xhci_trb *trb)
+{
+	if (ring == xhci->event_ring)
+		return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
+			(seg->next == xhci->event_ring->first_seg);
+	else
+		return trb->link.control & LINK_TOGGLE;
+}
+
+/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
+ * segment?  I.e. would the updated event TRB pointer step off the end of the
+ * event seg?
+ */
+static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		struct xhci_segment *seg, union xhci_trb *trb)
+{
+	if (ring == xhci->event_ring)
+		return trb == &seg->trbs[TRBS_PER_SEGMENT];
+	else
+		return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ */
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+	union xhci_trb *next = ++(ring->dequeue);
+
+	ring->deq_updates++;
+	/* Update the dequeue pointer further if that was a link TRB or we're at
+	 * the end of an event ring segment (which doesn't have link TRBS)
+	 */
+	while (last_trb(xhci, ring, ring->deq_seg, next)) {
+		if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+			ring->cycle_state = (ring->cycle_state ? 0 : 1);
+			if (!in_interrupt())
+				xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
+						(unsigned int) ring,
+						(unsigned int) ring->cycle_state);
+		}
+		ring->deq_seg = ring->deq_seg->next;
+		ring->dequeue = ring->deq_seg->trbs;
+		next = ring->dequeue;
+	}
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set.
+ * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+	u32 chain;
+	union xhci_trb *next;
+
+	chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
+	next = ++(ring->enqueue);
+
+	ring->enq_updates++;
+	/* Update the dequeue pointer further if that was a link TRB or we're at
+	 * the end of an event ring segment (which doesn't have link TRBS)
+	 */
+	while (last_trb(xhci, ring, ring->enq_seg, next)) {
+		if (!consumer) {
+			if (ring != xhci->event_ring) {
+				/* Give this link TRB to the hardware */
+				if (next->link.control & TRB_CYCLE)
+					next->link.control &= (u32) ~TRB_CYCLE;
+				else
+					next->link.control |= (u32) TRB_CYCLE;
+				next->link.control &= TRB_CHAIN;
+				next->link.control |= chain;
+			}
+			/* Toggle the cycle bit after the last ring segment. */
+			if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+				ring->cycle_state = (ring->cycle_state ? 0 : 1);
+				if (!in_interrupt())
+					xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
+							(unsigned int) ring,
+							(unsigned int) ring->cycle_state);
+			}
+		}
+		ring->enq_seg = ring->enq_seg->next;
+		ring->enqueue = ring->enq_seg->trbs;
+		next = ring->enqueue;
+	}
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring.  See rules
+ * above.
+ * FIXME: this would be simpler and faster if we just kept track of the number
+ * of free TRBs in a ring.
+ */
+static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		unsigned int num_trbs)
+{
+	int i;
+	union xhci_trb *enq = ring->enqueue;
+	struct xhci_segment *enq_seg = ring->enq_seg;
+
+	/* Check if ring is empty */
+	if (enq == ring->dequeue)
+		return 1;
+	/* Make sure there's an extra empty TRB available */
+	for (i = 0; i <= num_trbs; ++i) {
+		if (enq == ring->dequeue)
+			return 0;
+		enq++;
+		while (last_trb(xhci, ring, enq_seg, enq)) {
+			enq_seg = enq_seg->next;
+			enq = enq_seg->trbs;
+		}
+	}
+	return 1;
+}
+
+void set_hc_event_deq(struct xhci_hcd *xhci)
+{
+	u32 temp;
+	dma_addr_t deq;
+
+	deq = trb_virt_to_dma(xhci->event_ring->deq_seg,
+			xhci->event_ring->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci, "WARN something wrong with SW event ring "
+				"dequeue ptr.\n");
+	/* Update HC event ring dequeue pointer */
+	temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+	temp &= ERST_PTR_MASK;
+	if (!in_interrupt())
+		xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
+	xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+	xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
+			&xhci->ir_set->erst_dequeue[0]);
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void ring_cmd_db(struct xhci_hcd *xhci)
+{
+	u32 temp;
+
+	xhci_dbg(xhci, "// Ding dong!\n");
+	temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+	xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+	/* Flush PCI posted writes */
+	xhci_readl(xhci, &xhci->dba->doorbell[0]);
+}
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+		struct xhci_event_cmd *event)
+{
+	u64 cmd_dma;
+	dma_addr_t cmd_dequeue_dma;
+
+	/* Check completion code */
+	if (GET_COMP_CODE(event->status) != COMP_SUCCESS)
+		xhci_dbg(xhci, "WARN: unsuccessful no-op command\n");
+
+	cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
+	cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+			xhci->cmd_ring->dequeue);
+	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
+	if (cmd_dequeue_dma == 0) {
+		xhci->error_bitmask |= 1 << 4;
+		return;
+	}
+	/* Does the DMA address match our internal dequeue pointer address? */
+	if (cmd_dma != (u64) cmd_dequeue_dma) {
+		xhci->error_bitmask |= 1 << 5;
+		return;
+	}
+	switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
+	case TRB_TYPE(TRB_CMD_NOOP):
+		++xhci->noops_handled;
+		break;
+	default:
+		/* Skip over unknown commands on the event ring */
+		xhci->error_bitmask |= 1 << 6;
+		break;
+	}
+	inc_deq(xhci, xhci->cmd_ring, false);
+}
+
+void handle_event(struct xhci_hcd *xhci)
+{
+	union xhci_trb *event;
+
+	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+		xhci->error_bitmask |= 1 << 1;
+		return;
+	}
+
+	event = xhci->event_ring->dequeue;
+	/* Does the HC or OS own the TRB? */
+	if ((event->event_cmd.flags & TRB_CYCLE) !=
+			xhci->event_ring->cycle_state) {
+		xhci->error_bitmask |= 1 << 2;
+		return;
+	}
+
+	/* FIXME: Only handles command completion events. */
+	switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
+	case TRB_TYPE(TRB_COMPLETION):
+		handle_cmd_completion(xhci, &event->event_cmd);
+		break;
+	default:
+		xhci->error_bitmask |= 1 << 3;
+	}
+
+	/* Update SW and HC event ring dequeue pointer */
+	inc_deq(xhci, xhci->event_ring, true);
+	set_hc_event_deq(xhci);
+	/* Are there more items on the event ring? */
+	handle_event(xhci);
+}
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+		bool consumer,
+		u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	struct xhci_generic_trb *trb;
+
+	trb = &ring->enqueue->generic;
+	trb->field[0] = field1;
+	trb->field[1] = field2;
+	trb->field[2] = field3;
+	trb->field[3] = field4;
+	inc_enq(xhci, ring, consumer);
+}
+
+/* Generic function for queueing a command TRB on the command ring */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+{
+	if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+		if (!in_interrupt())
+			xhci_err(xhci, "ERR: No room for command on command ring\n");
+		return -ENOMEM;
+	}
+	queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+			field4 | xhci->cmd_ring->cycle_state);
+	return 0;
+}
+
+/* Queue a no-op command on the command ring */
+static int queue_cmd_noop(struct xhci_hcd *xhci)
+{
+	return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *setup_one_noop(struct xhci_hcd *xhci)
+{
+	if (queue_cmd_noop(xhci) < 0)
+		return NULL;
+	xhci->noops_submitted++;
+	return ring_cmd_db;
+}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f168fcac59990bb725771a494911685148c14725..66be134b8921f0f1bc9cc0bafa593496bce35b77 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -24,6 +24,7 @@
 #define __LINUX_XHCI_HCD_H
 
 #include <linux/usb.h>
+#include <linux/timer.h>
 
 #include "../core/hcd.h"
 /* Code sharing between pci-quirks and xhci hcd */
@@ -377,6 +378,7 @@ struct intr_reg {
 /* irq_pending bitmasks */
 #define	ER_IRQ_PENDING(p)	((p) & 0x1)
 /* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
 #define	ER_IRQ_CLEAR(p)		((p) & 0xfffffffe)
 #define	ER_IRQ_ENABLE(p)	((ER_IRQ_CLEAR(p)) | 0x2)
 #define	ER_IRQ_DISABLE(p)	((ER_IRQ_CLEAR(p)) & ~(0x2))
@@ -699,11 +701,14 @@ struct xhci_link_trb {
 /* control bitfields */
 #define LINK_TOGGLE	(0x1<<1)
 
+/* Command completion event TRB */
+struct xhci_event_cmd {
+	/* Pointer to command TRB, or the value passed by the event data trb */
+	u32 cmd_trb[2];
+	u32 status;
+	u32 flags;
+} __attribute__ ((packed));
 
-union xhci_trb {
-	struct xhci_link_trb		link;
-	struct xhci_transfer_event	trans_event;
-};
 
 /* Normal TRB fields */
 /* transfer_len bitmasks - bits 0:16 */
@@ -737,6 +742,17 @@ union xhci_trb {
 /* Control transfer TRB specific fields */
 #define TRB_DIR_IN		(1<<16)
 
+struct xhci_generic_trb {
+	u32 field[4];
+} __attribute__ ((packed));
+
+union xhci_trb {
+	struct xhci_link_trb		link;
+	struct xhci_transfer_event	trans_event;
+	struct xhci_event_cmd		event_cmd;
+	struct xhci_generic_trb		generic;
+};
+
 /* TRB bit mask */
 #define	TRB_TYPE_BITMASK	(0xfc00)
 #define TRB_TYPE(p)		((p) << 10)
@@ -825,7 +841,11 @@ struct xhci_segment {
 struct xhci_ring {
 	struct xhci_segment	*first_seg;
 	union  xhci_trb		*enqueue;
+	struct xhci_segment	*enq_seg;
+	unsigned int		enq_updates;
 	union  xhci_trb		*dequeue;
+	struct xhci_segment	*deq_seg;
+	unsigned int		deq_updates;
 	/*
 	 * Write the cycle state into the TRB cycle field to give ownership of
 	 * the TRB to the host controller (if we are the producer), or to check
@@ -861,6 +881,8 @@ struct xhci_erst {
 #define	ERST_SIZE	64
 /* Initial number of event segment rings allocated */
 #define	ERST_ENTRIES	1
+/* Poll every 60 seconds */
+#define	POLL_TIMEOUT	60
 /* XXX: Make these module parameters */
 
 
@@ -907,8 +929,21 @@ struct xhci_hcd {
 	/* DMA pools */
 	struct dma_pool	*device_pool;
 	struct dma_pool	*segment_pool;
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+	/* Poll the rings - for debugging */
+	struct timer_list	event_ring_timer;
+	int			zombie;
+#endif
+	/* Statistics */
+	int			noops_submitted;
+	int			noops_handled;
+	int			error_bitmask;
 };
 
+/* For testing purposes */
+#define NUM_TEST_NOOPS	0
+
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
 static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
 {
@@ -956,9 +991,11 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_n
 void xhci_print_registers(struct xhci_hcd *xhci);
 void xhci_dbg_regs(struct xhci_hcd *xhci);
 void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
 void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
 void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
 
 /* xHCI memory managment */
 void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -978,5 +1015,13 @@ int xhci_run(struct usb_hcd *hcd);
 void xhci_stop(struct usb_hcd *hcd);
 void xhci_shutdown(struct usb_hcd *hcd);
 int xhci_get_frame(struct usb_hcd *hcd);
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+void ring_cmd_db(struct xhci_hcd *xhci);
+void *setup_one_noop(struct xhci_hcd *xhci);
+void handle_event(struct xhci_hcd *xhci);
+void set_hc_event_deq(struct xhci_hcd *xhci);
 
 #endif /* __LINUX_XHCI_HCD_H */