[linux-yocto] [PATCH 26/28] LSI AXM55xx: Fix PMU handler issue

Charlie Paul cpaul.windriver at gmail.com
Fri May 2 12:17:03 PDT 2014


From: David Mercado <david.mercado at windriver.com>

The AXM55xx has the PMU IRQ lines from each core in a cluster OR'ed
together. As a workaround for this, a PMU handler extension was created
in the BSP to dynamically call irq_set_affinity() to rotate the PMU IRQ
assignment as needed, in order to maintain perf accuracy. However, with
the introduction of the Axxia GIC driver "slow" bus implementation,
irq_set_affinity() cannot be called directly from interrupt context.

Instead, the PMU IRQ handler extension is removed from the BSP and the
rotation of IRQ affinity is performed directly within the Axxia GIC
driver.

Signed-off-by: David Mercado <david.mercado at windriver.com>
---
 arch/arm/mach-axxia/axxia-gic.c |   25 +++++++++++++++++++++++++
 arch/arm/mach-axxia/axxia.c     |   37 -------------------------------------
 2 files changed, 25 insertions(+), 37 deletions(-)

diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c
index 701d65f..c716dc9 100644
--- a/arch/arm/mach-axxia/axxia-gic.c
+++ b/arch/arm/mach-axxia/axxia-gic.c
@@ -907,6 +907,10 @@ asmlinkage void __exception_irq_entry axxia_gic_handle_irq(struct pt_regs *regs)
 	u32 irqstat, irqnr;
 	struct gic_chip_data *gic = &gic_data;
 	void __iomem *cpu_base = gic_data_cpu_base(gic);
+	void __iomem *dist_base = gic_data_dist_base(gic);
+	u32 pcpu = cpu_logical_map(smp_processor_id());
+	u32 cluster = pcpu / CORES_PER_CLUSTER;
+	u32 next, mask;
 
 	do {
 		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
@@ -967,6 +971,27 @@ asmlinkage void __exception_irq_entry axxia_gic_handle_irq(struct pt_regs *regs)
 				writel_relaxed(irqnr, cpu_base + GIC_CPU_EOI);
 				break;
 
+			case IRQ_PMU:
+				/*
+				 * The PMU IRQ line is OR'ed among all cores
+				 * within a cluster, so no way to tell which
+				 * core actually generated this interrupt.
+				 * Therefore, rotate PMU IRQ affinity to allow
+				 * perf to work as accurately as possible. Skip
+				 * over offline cpus.
+				 */
+				do {
+					next = (++pcpu % CORES_PER_CLUSTER) +
+						(cluster * CORES_PER_CLUSTER);
+				} while (!cpu_online(next));
+
+				mask = 0x01 << (next % CORES_PER_CLUSTER);
+				raw_spin_lock(&irq_controller_lock);
+				writeb_relaxed(mask, dist_base +
+						GIC_DIST_TARGET + IRQ_PMU);
+				raw_spin_unlock(&irq_controller_lock);
+				/* Fall through ... */
+
 			default:
 				/* External interrupt */
 				handle_IRQ(irqnr, regs);
diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c
index af85305..81d39d1 100644
--- a/arch/arm/mach-axxia/axxia.c
+++ b/arch/arm/mach-axxia/axxia.c
@@ -192,48 +192,11 @@ static struct resource axxia_pmu_resources[] = {
 	},
 };
 
-/*
- * The PMU IRQ lines of four cores are wired together into a single interrupt.
- * Bounce the interrupt to other cores if it's not ours.
- */
-#define CORES_PER_CLUSTER  4
-static irqreturn_t axxia_pmu_handler(int irq, void *dev, irq_handler_t handler)
-{
-	irqreturn_t ret = handler(irq, dev);
-	int cpu = smp_processor_id();
-	int cluster = cpu / CORES_PER_CLUSTER;
-	int other;
-
-	if (ret == IRQ_NONE) {
-
-		/* Look until we find another cpu that's online. */
-		do {
-			other = (++cpu % CORES_PER_CLUSTER) +
-				(cluster * CORES_PER_CLUSTER);
-		} while (!cpu_online(other));
-
-		irq_set_affinity(irq, cpumask_of(other));
-	}
-
-	/*
-	 * We should be able to get away with the amount of IRQ_NONEs we give,
-	 * while still having the spurious IRQ detection code kick in if the
-	 * interrupt really starts hitting spuriously.
-	 */
-	return ret;
-}
-
-static struct arm_pmu_platdata axxia_pmu_platdata = {
-	.handle_irq		= axxia_pmu_handler,
-};
-
-
 static struct platform_device pmu_device = {
 	.name			= "arm-pmu",
 	.id			= ARM_PMU_DEVICE_CPU,
 	.num_resources		= ARRAY_SIZE(axxia_pmu_resources),
 	.resource		= axxia_pmu_resources,
-	.dev.platform_data	= &axxia_pmu_platdata,
 };
 
 static inline void
-- 
1.7.9.5



More information about the linux-yocto mailing list