Replace "extern inline" with "static inline"

A number of headers define functions as "extern inline" which is
causing problems with gcc5.  The reason is that starting with
version 5.1, gcc defaults to the standard C99 semantics for the
inline keyword.

Under the traditional GNU inline semantics, an "extern inline"
function would never create an external definition, the same
as inline *without* extern in C99.  In C99, and "extern inline"
definition is simply an external definition with an inline hint.
In short, the meanings of inline with and without extern are
swapped between GNU and C99.

The upshot is that all these definitions in header files create
an external definition wherever those headers are included,
resulting in multiple definition errors at link time.

Changing all these functions to "static inline" fixes the problem
since this works as desired in all gcc versions.  Although the
semantics are slightly different (a static inline definition may
result in an actual function being emitted), it works as intended
in practice.

This patch also removes extern prototype declarations for the
changed functions where they existed.

Signed-off-by: Mans Rullgard <mans@mansr.com>
diff --git a/arch/sparc/include/asm/srmmu.h b/arch/sparc/include/asm/srmmu.h
index 74b1554..8da2f67 100644
--- a/arch/sparc/include/asm/srmmu.h
+++ b/arch/sparc/include/asm/srmmu.h
@@ -148,7 +148,7 @@
 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
 
 /* Accessing the MMU control register. */
-extern __inline__ unsigned int srmmu_get_mmureg(void)
+static __inline__ unsigned int srmmu_get_mmureg(void)
 {
 	unsigned int retval;
 	__asm__ __volatile__("lda [%%g0] %1, %0\n\t":
@@ -156,14 +156,14 @@
 	return retval;
 }
 
-extern __inline__ void srmmu_set_mmureg(unsigned long regval)
+static __inline__ void srmmu_set_mmureg(unsigned long regval)
 {
 	__asm__ __volatile__("sta %0, [%%g0] %1\n\t"::"r"(regval),
 			     "i"(ASI_M_MMUREGS):"memory");
 
 }
 
-extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
+static __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
 {
 	paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
 	__asm__ __volatile__("sta %0, [%1] %2\n\t"::"r"(paddr),
@@ -171,7 +171,7 @@
 			     "i"(ASI_M_MMUREGS):"memory");
 }
 
-extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
+static __inline__ unsigned long srmmu_get_ctable_ptr(void)
 {
 	unsigned int retval;
 
@@ -181,13 +181,13 @@
 	return (retval & SRMMU_CTX_PMASK) << 4;
 }
 
-extern __inline__ void srmmu_set_context(int context)
+static __inline__ void srmmu_set_context(int context)
 {
 	__asm__ __volatile__("sta %0, [%1] %2\n\t"::"r"(context),
 			     "r"(SRMMU_CTX_REG), "i"(ASI_M_MMUREGS):"memory");
 }
 
-extern __inline__ int srmmu_get_context(void)
+static __inline__ int srmmu_get_context(void)
 {
 	register int retval;
 	__asm__ __volatile__("lda [%1] %2, %0\n\t":
@@ -196,7 +196,7 @@
 	return retval;
 }
 
-extern __inline__ unsigned int srmmu_get_fstatus(void)
+static __inline__ unsigned int srmmu_get_fstatus(void)
 {
 	unsigned int retval;
 
@@ -206,7 +206,7 @@
 	return retval;
 }
 
-extern __inline__ unsigned int srmmu_get_faddr(void)
+static __inline__ unsigned int srmmu_get_faddr(void)
 {
 	unsigned int retval;
 
@@ -217,7 +217,7 @@
 }
 
 /* This is guaranteed on all SRMMU's. */
-extern __inline__ void srmmu_flush_whole_tlb(void)
+static __inline__ void srmmu_flush_whole_tlb(void)
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(0x400),	/* Flush entire TLB!! */
 			     "i"(ASI_M_FLUSH_PROBE):"memory");
@@ -225,14 +225,14 @@
 }
 
 /* These flush types are not available on all chips... */
-extern __inline__ void srmmu_flush_tlb_ctx(void)
+static __inline__ void srmmu_flush_tlb_ctx(void)
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(0x300),	/* Flush TLB ctx.. */
 			     "i"(ASI_M_FLUSH_PROBE):"memory");
 
 }
 
-extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
+static __inline__ void srmmu_flush_tlb_region(unsigned long addr)
 {
 	addr &= SRMMU_PGDIR_MASK;
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(addr | 0x200),	/* Flush TLB region.. */
@@ -240,7 +240,7 @@
 
 }
 
-extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
+static __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
 {
 	addr &= SRMMU_REAL_PMD_MASK;
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(addr | 0x100),	/* Flush TLB segment.. */
@@ -248,7 +248,7 @@
 
 }
 
-extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
+static __inline__ void srmmu_flush_tlb_page(unsigned long page)
 {
 	page &= PAGE_MASK;
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(page),	/* Flush TLB page.. */
@@ -256,7 +256,7 @@
 
 }
 
-extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
+static __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
 {
 	unsigned long retval;
 
@@ -268,7 +268,7 @@
 	return retval;
 }
 
-extern __inline__ int srmmu_get_pte(unsigned long addr)
+static __inline__ int srmmu_get_pte(unsigned long addr)
 {
 	register unsigned long entry;