Log when an interrupt handler takes more than 3ms to complete.

This doesn't appear to add much overhead, and could help narrow down future
packet loss problems.  At the moment, on our platform, the only IRQ that
seems to have this problem is SATA.  Probably someone should try to fix
that, sometime, because there's no obvious reason the SATA IRQ would need to
spend milliseconds with interrupts disabled.

Change-Id: I1c36f72957bbcac59bdcc0082a09b995daee553f
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 87c8be5..89e772b 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -34,6 +34,7 @@
 #include <linux/list.h>
 #include <linux/kallsyms.h>
 #include <linux/proc_fs.h>
+#include <linux/ratelimit.h>
 
 #include <asm/exception.h>
 #include <asm/system.h>
@@ -71,6 +72,8 @@
 void handle_IRQ(unsigned int irq, struct pt_regs *regs)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
+	ktime_t kstart, kend, kdiff;
+	unsigned int usdiff;
 
 	irq_enter();
 
@@ -78,6 +81,7 @@
 	 * Some hardware gives randomly wrong interrupts.  Rather
 	 * than crashing, do something sensible.
 	 */
+	kstart = ktime_get();
 	if (unlikely(irq >= nr_irqs)) {
 		if (printk_ratelimit())
 			printk(KERN_WARNING "Bad IRQ%u\n", irq);
@@ -85,6 +89,11 @@
 	} else {
 		generic_handle_irq(irq);
 	}
+	kend = ktime_get();
+	kdiff = ktime_sub(kend, kstart);
+	usdiff = ktime_to_ns(kdiff) / 1024;
+	if (usdiff >= 3000)
+		pr_notice_ratelimited("slowirq %d (%uus)\n", irq, usdiff);
 
 	/* AT91 specific workaround */
 	irq_finish(irq);