ppc/85xx: Move code around to prep for NAND_SPL

If we move some of the functions in tlb.c around we need less
ifdefs.  The first stage loader just needs invalidate_tlb and
init_tlbs.

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
diff --git a/cpu/mpc85xx/tlb.c b/cpu/mpc85xx/tlb.c
index f87a10d..44e80b1 100644
--- a/cpu/mpc85xx/tlb.c
+++ b/cpu/mpc85xx/tlb.c
@@ -32,6 +32,29 @@
 
 DECLARE_GLOBAL_DATA_PTR;
 
+void invalidate_tlb(u8 tlb)
+{
+	if (tlb == 0)
+		mtspr(MMUCSR0, 0x4);
+	if (tlb == 1)
+		mtspr(MMUCSR0, 0x2);
+}
+
+void init_tlbs(void)
+{
+	int i;
+
+	for (i = 0; i < num_tlb_entries; i++) {
+		write_tlb(tlb_table[i].mas0,
+			  tlb_table[i].mas1,
+			  tlb_table[i].mas2,
+			  tlb_table[i].mas3,
+			  tlb_table[i].mas7);
+	}
+
+	return ;
+}
+
 void set_tlb(u8 tlb, u32 epn, u64 rpn,
 	     u8 perms, u8 wimge,
 	     u8 ts, u8 esel, u8 tsize, u8 iprot)
@@ -77,29 +100,6 @@
 #endif
 }
 
-void invalidate_tlb(u8 tlb)
-{
-	if (tlb == 0)
-		mtspr(MMUCSR0, 0x4);
-	if (tlb == 1)
-		mtspr(MMUCSR0, 0x2);
-}
-
-void init_tlbs(void)
-{
-	int i;
-
-	for (i = 0; i < num_tlb_entries; i++) {
-		write_tlb(tlb_table[i].mas0,
-			  tlb_table[i].mas1,
-			  tlb_table[i].mas2,
-			  tlb_table[i].mas3,
-			  tlb_table[i].mas7);
-	}
-
-	return ;
-}
-
 static void tlbsx (const volatile unsigned *addr)
 {
 	__asm__ __volatile__ ("tlbsx 0,%0" : : "r" (addr), "m" (*addr));