From 618e9ed98aed924a1fc664eb6522db4a5e927043 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Thu, 9 Feb 2006 17:21:53 -0800
Subject: [PATCH] [SPARC64]: Hypervisor TSB context switching.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc64/kernel/tsb.S         | 42 +++++++++++++++++----------
 arch/sparc64/mm/tsb.c             | 48 ++++++++++++++++++++++++++++++-
 include/asm-sparc64/mmu.h         | 16 ++++++-----
 include/asm-sparc64/mmu_context.h | 10 +++++--
 4 files changed, 90 insertions(+), 26 deletions(-)

diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index c848c8847cdcbb..a53ec6fb769766 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -4,6 +4,7 @@
  */
 
 #include <asm/tsb.h>
+#include <asm/hypervisor.h>
 
 	.text
 	.align	32
@@ -233,6 +234,7 @@ tsb_flush:
 	 * %o1:	TSB register value
 	 * %o2:	TSB virtual address
 	 * %o3:	TSB mapping locked PTE
+	 * %o4:	Hypervisor TSB descriptor physical address
 	 *
 	 * We have to run this whole thing with interrupts
 	 * disabled so that the current cpu doesn't change
@@ -251,30 +253,40 @@ __tsb_context_switch:
 	add	%g2, %g1, %g2
 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-661:	mov	TSB_REG, %g1
-	stxa	%o1, [%g1] ASI_DMMU
-	.section .sun4v_2insn_patch, "ax"
-	.word	661b
+	sethi	%hi(tlb_type), %g1
+	lduw	[%g1 + %lo(tlb_type)], %g1
+	cmp	%g1, 3
+	bne,pt	%icc, 1f
+	 nop
+
+	/* Hypervisor TSB switch. */
 	mov	SCRATCHPAD_UTSBREG1, %g1
 	stxa	%o1, [%g1] ASI_SCRATCHPAD
-	.previous
+	mov	-1, %g2
+	mov	SCRATCHPAD_UTSBREG2, %g1
+	stxa	%g2, [%g1] ASI_SCRATCHPAD
 
-	membar	#Sync
+	mov	HV_FAST_MMU_TSB_CTXNON0, %o0
+	mov	1, %o1
+	mov	%o4, %o2
+	ta	HV_FAST_TRAP
+
+	ba,pt	%xcc, 9f
+	 nop
 
-661:	stxa	%o1, [%g1] ASI_IMMU
+	/* SUN4U TSB switch.  */
+1:	mov	TSB_REG, %g1
+	stxa	%o1, [%g1] ASI_DMMU
+	membar	#Sync
+	stxa	%o1, [%g1] ASI_IMMU
 	membar	#Sync
-	.section .sun4v_2insn_patch, "ax"
-	.word	661b
-	nop
-	nop
-	.previous
 
-	brz	%o2, 9f
+2:	brz	%o2, 9f
 	 nop
 
-	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %o4
+	sethi	%hi(sparc64_highest_unlocked_tlb_ent), %g2
 	mov	TLB_TAG_ACCESS, %g1
-	lduw	[%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
+	lduw	[%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
 	stxa	%o2, [%g1] ASI_DMMU
 	membar	#Sync
 	sllx	%g2, 3, %g2
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 2cc8e6528c6332..6ae2a5a702cb89 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -149,7 +149,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		BUG();
 	};
 
-	if (tlb_type == cheetah_plus) {
+	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
 		/* Physical mapping, no locked TLB entry for TSB.  */
 		tsb_reg |= tsb_paddr;
 
@@ -166,6 +166,52 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
 		mm->context.tsb_map_pte = tte;
 	}
 
+	/* Setup the Hypervisor TSB descriptor.  */
+	if (tlb_type == hypervisor) {
+		struct hv_tsb_descr *hp = &mm->context.tsb_descr;
+
+		switch (PAGE_SIZE) {
+		case 8192:
+		default:
+			hp->pgsz_idx = HV_PGSZ_IDX_8K;
+			break;
+
+		case 64 * 1024:
+			hp->pgsz_idx = HV_PGSZ_IDX_64K;
+			break;
+
+		case 512 * 1024:
+			hp->pgsz_idx = HV_PGSZ_IDX_512K;
+			break;
+
+		case 4 * 1024 * 1024:
+			hp->pgsz_idx = HV_PGSZ_IDX_4MB;
+			break;
+		};
+		hp->assoc = 1;
+		hp->num_ttes = tsb_bytes / 16;
+		hp->ctx_idx = 0;
+		switch (PAGE_SIZE) {
+		case 8192:
+		default:
+			hp->pgsz_mask = HV_PGSZ_MASK_8K;
+			break;
+
+		case 64 * 1024:
+			hp->pgsz_mask = HV_PGSZ_MASK_64K;
+			break;
+
+		case 512 * 1024:
+			hp->pgsz_mask = HV_PGSZ_MASK_512K;
+			break;
+
+		case 4 * 1024 * 1024:
+			hp->pgsz_mask = HV_PGSZ_MASK_4MB;
+			break;
+		};
+		hp->tsb_base = tsb_paddr;
+		hp->resv = 0;
+	}
 }
 
 /* The page tables are locked against modifications while this
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 55e622711b9681..473d990848ee91 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -4,6 +4,7 @@
 #include <linux/config.h>
 #include <asm/page.h>
 #include <asm/const.h>
+#include <asm/hypervisor.h>
 
 /*
  * For the 8k pagesize kernel, use only 10 hw context bits to optimize some
@@ -101,13 +102,14 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
 extern void tsb_flush(unsigned long ent, unsigned long tag);
 
 typedef struct {
-	unsigned long	sparc64_ctx_val;
-	struct tsb	*tsb;
-	unsigned long	tsb_rss_limit;
-	unsigned long	tsb_nentries;
-	unsigned long	tsb_reg_val;
-	unsigned long	tsb_map_vaddr;
-	unsigned long	tsb_map_pte;
+	unsigned long		sparc64_ctx_val;
+	struct tsb		*tsb;
+	unsigned long		tsb_rss_limit;
+	unsigned long		tsb_nentries;
+	unsigned long		tsb_reg_val;
+	unsigned long		tsb_map_vaddr;
+	unsigned long		tsb_map_pte;
+	struct hv_tsb_descr	tsb_descr;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 2760353591ab10..eb660b1609c4d7 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -22,14 +22,18 @@ extern void get_new_mmu_context(struct mm_struct *mm);
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
-extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
-				 unsigned long tsb_vaddr, unsigned long tsb_pte);
+extern void __tsb_context_switch(unsigned long pgd_pa,
+				 unsigned long tsb_reg,
+				 unsigned long tsb_vaddr,
+				 unsigned long tsb_pte,
+				 unsigned long tsb_descr_pa);
 
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
 	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
 			     mm->context.tsb_map_vaddr,
-			     mm->context.tsb_map_pte);
+			     mm->context.tsb_map_pte,
+			     __pa(&mm->context.tsb_descr));
 }
 
 extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
-- 
GitLab