blob: 36d868818a1820777d9a03afdc8229c48f7c82d9 [file] [log] [blame]
Zhi-zhou Zhang32afad72012-10-16 15:02:08 +02001/*
2 * Cache-handling routined for MIPS CPUs
3 *
4 * Copyright (c) 2003 Wolfgang Denk <wd@denx.de>
5 *
Wolfgang Denk1a459662013-07-08 09:37:19 +02006 * SPDX-License-Identifier: GPL-2.0+
Zhi-zhou Zhang32afad72012-10-16 15:02:08 +02007 */
8
9#include <asm-offsets.h>
10#include <config.h>
11#include <asm/asm.h>
12#include <asm/regdef.h>
13#include <asm/mipsregs.h>
14#include <asm/addrspace.h>
15#include <asm/cacheops.h>
16
17#define RA t9
18
19/*
20 * 16kB is the maximum size of instruction and data caches on MIPS 4K,
21 * 64kB is on 4KE, 24K, 5K, etc. Set bigger size for convenience.
22 *
23 * Note that the above size is the maximum size of primary cache. U-Boot
24 * doesn't have L2 cache support for now.
25 */
26#define MIPS_MAX_CACHE_SIZE 0x10000
27
28#define INDEX_BASE CKSEG0
29
30 .macro cache_op op addr
31 .set push
32 .set noreorder
33 .set mips3
34 cache \op, 0(\addr)
35 .set pop
36 .endm
37
38 .macro f_fill64 dst, offset, val
39 LONG_S \val, (\offset + 0 * LONGSIZE)(\dst)
40 LONG_S \val, (\offset + 1 * LONGSIZE)(\dst)
41 LONG_S \val, (\offset + 2 * LONGSIZE)(\dst)
42 LONG_S \val, (\offset + 3 * LONGSIZE)(\dst)
43 LONG_S \val, (\offset + 4 * LONGSIZE)(\dst)
44 LONG_S \val, (\offset + 5 * LONGSIZE)(\dst)
45 LONG_S \val, (\offset + 6 * LONGSIZE)(\dst)
46 LONG_S \val, (\offset + 7 * LONGSIZE)(\dst)
47#if LONGSIZE == 4
48 LONG_S \val, (\offset + 8 * LONGSIZE)(\dst)
49 LONG_S \val, (\offset + 9 * LONGSIZE)(\dst)
50 LONG_S \val, (\offset + 10 * LONGSIZE)(\dst)
51 LONG_S \val, (\offset + 11 * LONGSIZE)(\dst)
52 LONG_S \val, (\offset + 12 * LONGSIZE)(\dst)
53 LONG_S \val, (\offset + 13 * LONGSIZE)(\dst)
54 LONG_S \val, (\offset + 14 * LONGSIZE)(\dst)
55 LONG_S \val, (\offset + 15 * LONGSIZE)(\dst)
56#endif
57 .endm
58
59/*
60 * mips_init_icache(uint PRId, ulong icache_size, unchar icache_linesz)
61 */
62LEAF(mips_init_icache)
63 blez a1, 9f
64 mtc0 zero, CP0_TAGLO
65 /* clear tag to invalidate */
66 PTR_LI t0, INDEX_BASE
67 PTR_ADDU t1, t0, a1
681: cache_op INDEX_STORE_TAG_I t0
69 PTR_ADDU t0, a2
70 bne t0, t1, 1b
71 /* fill once, so data field parity is correct */
72 PTR_LI t0, INDEX_BASE
732: cache_op FILL t0
74 PTR_ADDU t0, a2
75 bne t0, t1, 2b
76 /* invalidate again - prudent but not strictly neccessary */
77 PTR_LI t0, INDEX_BASE
781: cache_op INDEX_STORE_TAG_I t0
79 PTR_ADDU t0, a2
80 bne t0, t1, 1b
819: jr ra
82 END(mips_init_icache)
83
84/*
85 * mips_init_dcache(uint PRId, ulong dcache_size, unchar dcache_linesz)
86 */
87LEAF(mips_init_dcache)
88 blez a1, 9f
89 mtc0 zero, CP0_TAGLO
90 /* clear all tags */
91 PTR_LI t0, INDEX_BASE
92 PTR_ADDU t1, t0, a1
931: cache_op INDEX_STORE_TAG_D t0
94 PTR_ADDU t0, a2
95 bne t0, t1, 1b
96 /* load from each line (in cached space) */
97 PTR_LI t0, INDEX_BASE
982: LONG_L zero, 0(t0)
99 PTR_ADDU t0, a2
100 bne t0, t1, 2b
101 /* clear all tags */
102 PTR_LI t0, INDEX_BASE
1031: cache_op INDEX_STORE_TAG_D t0
104 PTR_ADDU t0, a2
105 bne t0, t1, 1b
1069: jr ra
107 END(mips_init_dcache)
108
109/*
110 * mips_cache_reset - low level initialisation of the primary caches
111 *
112 * This routine initialises the primary caches to ensure that they have good
113 * parity. It must be called by the ROM before any cached locations are used
114 * to prevent the possibility of data with bad parity being written to memory.
115 *
116 * To initialise the instruction cache it is essential that a source of data
117 * with good parity is available. This routine will initialise an area of
118 * memory starting at location zero to be used as a source of parity.
119 *
120 * RETURNS: N/A
121 *
122 */
123NESTED(mips_cache_reset, 0, ra)
124 move RA, ra
125 li t2, CONFIG_SYS_ICACHE_SIZE
126 li t3, CONFIG_SYS_DCACHE_SIZE
127 li t8, CONFIG_SYS_CACHELINE_SIZE
128
129 li v0, MIPS_MAX_CACHE_SIZE
130
131 /*
132 * Now clear that much memory starting from zero.
133 */
134 PTR_LI a0, CKSEG1
135 PTR_ADDU a1, a0, v0
1362: PTR_ADDIU a0, 64
137 f_fill64 a0, -64, zero
138 bne a0, a1, 2b
139
140 /*
141 * The caches are probably in an indeterminate state,
142 * so we force good parity into them by doing an
143 * invalidate, load/fill, invalidate for each line.
144 */
145
146 /*
147 * Assume bottom of RAM will generate good parity for the cache.
148 */
149
150 /*
151 * Initialize the I-cache first,
152 */
153 move a1, t2
154 move a2, t8
155 PTR_LA v1, mips_init_icache
156 jalr v1
157
158 /*
159 * then initialize D-cache.
160 */
161 move a1, t3
162 move a2, t8
163 PTR_LA v1, mips_init_dcache
164 jalr v1
165
166 jr RA
167 END(mips_cache_reset)
168
169/*
170 * dcache_status - get cache status
171 *
172 * RETURNS: 0 - cache disabled; 1 - cache enabled
173 *
174 */
175LEAF(dcache_status)
176 mfc0 t0, CP0_CONFIG
177 li t1, CONF_CM_UNCACHED
178 andi t0, t0, CONF_CM_CMASK
179 move v0, zero
180 beq t0, t1, 2f
181 li v0, 1
1822: jr ra
183 END(dcache_status)
184
185/*
186 * dcache_disable - disable cache
187 *
188 * RETURNS: N/A
189 *
190 */
191LEAF(dcache_disable)
192 mfc0 t0, CP0_CONFIG
193 li t1, -8
194 and t0, t0, t1
195 ori t0, t0, CONF_CM_UNCACHED
196 mtc0 t0, CP0_CONFIG
197 jr ra
198 END(dcache_disable)
199
200/*
201 * dcache_enable - enable cache
202 *
203 * RETURNS: N/A
204 *
205 */
206LEAF(dcache_enable)
207 mfc0 t0, CP0_CONFIG
208 ori t0, CONF_CM_CMASK
209 xori t0, CONF_CM_CMASK
210 ori t0, CONF_CM_CACHABLE_NONCOHERENT
211 mtc0 t0, CP0_CONFIG
212 jr ra
213 END(dcache_enable)