[linux-yocto] [PATCH 08/15] arch/powerpc: Address IBM PPC476 Erratum 48

Daniel Dragomir daniel.dragomir at windriver.com
Tue Jun 27 08:41:08 PDT 2017


From: John Jacques <john.jacques at intel.com>

Signed-off-by: John Jacques <john.jacques at intel.com>
---
 arch/powerpc/kernel/head_44x.S | 41 ++++++++++++++++++++++++++++-----
 arch/powerpc/kernel/traps.c    | 51 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 86 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index bb36595..4d737e7 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -632,8 +632,8 @@ finish_tlb_load_44x:
 
 	andc.	r13,r13,r12		/* Check permission */
 
-	 /* Jump to common tlb load */
-	beq	finish_tlb_load_47x
+	 /* Jump to tlb data load */
+	beq	finish_tlb_load_data_47x
 
 2:	/* The bailout.  Restore registers to pre-exception conditions
 	 * and call the heavyweights to help us out.
@@ -652,6 +652,7 @@ finish_tlb_load_44x:
 	 * information from different registers and bailout
 	 * to a different point.
 	 */
+	.globl InstructionTLBError47x
 	START_EXCEPTION(InstructionTLBError47x)
 	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
 	mtspr	SPRN_SPRG_WSCRATCH1,r11
@@ -715,8 +716,8 @@ finish_tlb_load_44x:
 
 	andc.	r13,r13,r12		/* Check permission */
 
-	/* Jump to common TLB load point */
-	beq	finish_tlb_load_47x
+	/* Jump to TLB instruction load point */
+	beq	finish_tlb_load_instruction_47x
 
 2:	/* The bailout.  Restore registers to pre-exception conditions
 	 * and call the heavyweights to help us out.
@@ -730,7 +731,8 @@ finish_tlb_load_44x:
 	b	InstructionStorage
 
 /*
- * Both the instruction and data TLB miss get to this
+ * The instruction data TLB miss gets to finish_tlb_load_instruction_47x
+ * and data TLB miss gets to finish_tlb_load_data_47x
  * point to load the TLB.
  * 	r10 - free to use
  * 	r11 - PTE high word value
@@ -739,7 +741,34 @@ finish_tlb_load_44x:
  *	MMUCR - loaded with proper value when we get here
  *	Upon exit, we reload everything and RFI.
  */
-finish_tlb_load_47x:
+finish_tlb_load_data_47x:
+	/* Combine RPN & ERPN an write WS 1 */
+	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
+	tlbwe	r11,r13,1
+
+	/* And make up word 2 */
+	li	r10,0xf85			/* Mask to apply from PTE */
+	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
+	and	r11,r12,r10			/* Mask PTE bits to keep */
+	andi.	r10,r12,_PAGE_USER		/* User page ? */
+	beq	1f				/* nope, leave U bits empty */
+	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
+1:	tlbwe	r11,r13,2
+
+	mfspr   r10,SPRN_DEAR
+	dcbt    0,r10
+
+	/* Done...restore registers and get out of here.
+	*/
+	mfspr	r11, SPRN_SPRG_RSCRATCH4
+	mtcr	r11
+	mfspr	r13, SPRN_SPRG_RSCRATCH3
+	mfspr	r12, SPRN_SPRG_RSCRATCH2
+	mfspr	r11, SPRN_SPRG_RSCRATCH1
+	mfspr	r10, SPRN_SPRG_RSCRATCH0
+	rfi
+	.globl finish_tlb_load_instruction_47x
+finish_tlb_load_instruction_47x:
 	/* Combine RPN & ERPN an write WS 1 */
 	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
 	tlbwe	r11,r13,1
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 885d154..c282647 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -37,6 +37,10 @@
 #include <linux/ratelimit.h>
 #include <linux/context_tracking.h>
 
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/dcr-native.h>
+
 #include <asm/emulated_ops.h>
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -695,13 +699,60 @@ int machine_check_generic(struct pt_regs *regs)
 }
 #endif /* everything else */
 
+extern unsigned int finish_tlb_load_instruction_47x;
+extern unsigned int InstructionTLBError47x;
+
 void machine_check_exception(struct pt_regs *regs)
 {
 	enum ctx_state prev_state = exception_enter();
 	int recover = 0;
+	unsigned int cpu;
+	unsigned int  p2a_status;
+	unsigned int  p2a_address;
+	unsigned int  l2plbstats1;
+	unsigned int  mcsr;
+	unsigned int  mcsr0;
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
+	mcsr = mfspr(SPRN_MCSR);
+	mcsr0 = mfspr(SPRN_MCSRR0);
+	p2a_status = mfdcr(0x100C) & 0x00000010;
+	p2a_address = mfdcr(0x1045);
+	p2a_address = (p2a_address << 4) & 0x000FFFF0;
+	cpu = smp_processor_id();
+
+	if (cpu < 4) {
+		mtdcr((cpu + 3) * 256, 0x304);
+		l2plbstats1 = mfdcr((cpu + 3) * 256 + 4);
+	} else {
+		mtdcr((cpu + 15) * 256, 0x304);
+		l2plbstats1 = mfdcr((cpu + 15) * 256 + 4);
+	}
+
+	if (((mcsr & 0x80200000) == 0x80200000) &&
+		(InstructionTLBError47x < mcsr0) &&
+		(mcsr0 < finish_tlb_load_instruction_47x) &&
+		(p2a_status == 0x00000010) &&
+		(l2plbstats1 == 0x000C0000)) {
+
+		if (cpu < 4) {
+			mtdcr((cpu + 3) * 256 + 4,
+				0x000C0000);
+		} else {
+			mtdcr((cpu + 15) * 256 + 4,
+				0x000C0000);
+		}
+
+		printk(KERN_INFO
+		       "machine_check_exception: Core %d: MCSR=0x%x l2plbstats1=0x%x",
+		       cpu, mcsr, l2plbstats1);
+		mtdcr(0x100C, 0xFFFFFFFF);
+		mtspr(SPRN_MCSR, 0);
+		recover = 1;
+		return;
+	}
+
 	/* See if any machine dependent calls. In theory, we would want
 	 * to call the CPU first, and call the ppc_md. one if the CPU
 	 * one returns a positive number. However there is existing code
-- 
2.7.4



More information about the linux-yocto mailing list