blob: c22f7b6a51e6c6b8209a08a8998c1e3eb314cfec [file] [log] [blame]
David Feng0ae76532013-12-14 11:47:35 +08001/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8#include <common.h>
9#include <asm/system.h>
10#include <asm/armv8/mmu.h>
11
12DECLARE_GLOBAL_DATA_PTR;
13
14#ifndef CONFIG_SYS_DCACHE_OFF
York Sun22932ff2014-06-23 15:15:53 -070015void set_pgtable_section(u64 *page_table, u64 index, u64 section,
16 u64 memory_type)
David Feng0ae76532013-12-14 11:47:35 +080017{
David Feng0ae76532013-12-14 11:47:35 +080018 u64 value;
19
York Sun22932ff2014-06-23 15:15:53 -070020 value = section | PMD_TYPE_SECT | PMD_SECT_AF;
David Feng0ae76532013-12-14 11:47:35 +080021 value |= PMD_ATTRINDX(memory_type);
York Sun22932ff2014-06-23 15:15:53 -070022 page_table[index] = value;
David Feng0ae76532013-12-14 11:47:35 +080023}
24
25/* to activate the MMU we need to set up virtual memory */
26static void mmu_setup(void)
27{
David Feng0ae76532013-12-14 11:47:35 +080028 bd_t *bd = gd->bd;
Thierry Reding8b19dff2015-07-22 17:10:11 -060029 u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j;
30 int el;
David Feng0ae76532013-12-14 11:47:35 +080031
32 /* Setup an identity-mapping for all spaces */
York Sun22932ff2014-06-23 15:15:53 -070033 for (i = 0; i < (PGTABLE_SIZE >> 3); i++) {
34 set_pgtable_section(page_table, i, i << SECTION_SHIFT,
35 MT_DEVICE_NGNRNE);
36 }
David Feng0ae76532013-12-14 11:47:35 +080037
38 /* Setup an identity-mapping for all RAM space */
39 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
40 ulong start = bd->bi_dram[i].start;
41 ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size;
42 for (j = start >> SECTION_SHIFT;
43 j < end >> SECTION_SHIFT; j++) {
York Sun22932ff2014-06-23 15:15:53 -070044 set_pgtable_section(page_table, j, j << SECTION_SHIFT,
45 MT_NORMAL);
David Feng0ae76532013-12-14 11:47:35 +080046 }
47 }
48
49 /* load TTBR0 */
50 el = current_el();
York Sunf5222cf2014-02-26 13:26:02 -080051 if (el == 1) {
York Sun22932ff2014-06-23 15:15:53 -070052 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
53 TCR_FLAGS | TCR_EL1_IPS_BITS,
54 MEMORY_ATTRIBUTES);
York Sunf5222cf2014-02-26 13:26:02 -080055 } else if (el == 2) {
York Sun22932ff2014-06-23 15:15:53 -070056 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
57 TCR_FLAGS | TCR_EL2_IPS_BITS,
58 MEMORY_ATTRIBUTES);
York Sunf5222cf2014-02-26 13:26:02 -080059 } else {
York Sun22932ff2014-06-23 15:15:53 -070060 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
61 TCR_FLAGS | TCR_EL3_IPS_BITS,
62 MEMORY_ATTRIBUTES);
York Sunf5222cf2014-02-26 13:26:02 -080063 }
David Feng0ae76532013-12-14 11:47:35 +080064 /* enable the mmu */
65 set_sctlr(get_sctlr() | CR_M);
66}
67
68/*
69 * Performs a invalidation of the entire data cache at all levels
70 */
71void invalidate_dcache_all(void)
72{
York Sun1e6ad552014-02-26 13:26:04 -080073 __asm_invalidate_dcache_all();
David Feng0ae76532013-12-14 11:47:35 +080074}
75
76/*
York Sundcd468b2015-01-06 13:18:42 -080077 * Performs a clean & invalidation of the entire data cache at all levels.
78 * This function needs to be inline to avoid using stack.
79 * __asm_flush_l3_cache return status of timeout
David Feng0ae76532013-12-14 11:47:35 +080080 */
York Sundcd468b2015-01-06 13:18:42 -080081inline void flush_dcache_all(void)
David Feng0ae76532013-12-14 11:47:35 +080082{
York Sundcd468b2015-01-06 13:18:42 -080083 int ret;
84
David Feng0ae76532013-12-14 11:47:35 +080085 __asm_flush_dcache_all();
York Sundcd468b2015-01-06 13:18:42 -080086 ret = __asm_flush_l3_cache();
87 if (ret)
88 debug("flushing dcache returns 0x%x\n", ret);
89 else
90 debug("flushing dcache successfully.\n");
David Feng0ae76532013-12-14 11:47:35 +080091}
92
93/*
94 * Invalidates range in all levels of D-cache/unified cache
95 */
96void invalidate_dcache_range(unsigned long start, unsigned long stop)
97{
98 __asm_flush_dcache_range(start, stop);
99}
100
101/*
102 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
103 */
104void flush_dcache_range(unsigned long start, unsigned long stop)
105{
106 __asm_flush_dcache_range(start, stop);
107}
108
109void dcache_enable(void)
110{
111 /* The data cache is not active unless the mmu is enabled */
112 if (!(get_sctlr() & CR_M)) {
113 invalidate_dcache_all();
114 __asm_invalidate_tlb_all();
115 mmu_setup();
116 }
117
118 set_sctlr(get_sctlr() | CR_C);
119}
120
121void dcache_disable(void)
122{
123 uint32_t sctlr;
124
125 sctlr = get_sctlr();
126
127 /* if cache isn't enabled no need to disable */
128 if (!(sctlr & CR_C))
129 return;
130
131 set_sctlr(sctlr & ~(CR_C|CR_M));
132
133 flush_dcache_all();
134 __asm_invalidate_tlb_all();
135}
136
137int dcache_status(void)
138{
139 return (get_sctlr() & CR_C) != 0;
140}
141
Siva Durga Prasad Paladugudad17fd2015-06-26 18:05:07 +0530142u64 *__weak arch_get_page_table(void) {
143 puts("No page table offset defined\n");
144
145 return NULL;
146}
147
148void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
149 enum dcache_option option)
150{
151 u64 *page_table = arch_get_page_table();
152 u64 upto, end;
153
154 if (page_table == NULL)
155 return;
156
157 end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >>
158 MMU_SECTION_SHIFT;
159 start = start >> MMU_SECTION_SHIFT;
160 for (upto = start; upto < end; upto++) {
161 page_table[upto] &= ~PMD_ATTRINDX_MASK;
162 page_table[upto] |= PMD_ATTRINDX(option);
163 }
164 asm volatile("dsb sy");
165 __asm_invalidate_tlb_all();
166 asm volatile("dsb sy");
167 asm volatile("isb");
168 start = start << MMU_SECTION_SHIFT;
169 end = end << MMU_SECTION_SHIFT;
170 flush_dcache_range(start, end);
171 asm volatile("dsb sy");
172}
David Feng0ae76532013-12-14 11:47:35 +0800173#else /* CONFIG_SYS_DCACHE_OFF */
174
175void invalidate_dcache_all(void)
176{
177}
178
179void flush_dcache_all(void)
180{
181}
182
183void invalidate_dcache_range(unsigned long start, unsigned long stop)
184{
185}
186
187void flush_dcache_range(unsigned long start, unsigned long stop)
188{
189}
190
191void dcache_enable(void)
192{
193}
194
195void dcache_disable(void)
196{
197}
198
199int dcache_status(void)
200{
201 return 0;
202}
203
Siva Durga Prasad Paladugudad17fd2015-06-26 18:05:07 +0530204void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
205 enum dcache_option option)
206{
207}
208
David Feng0ae76532013-12-14 11:47:35 +0800209#endif /* CONFIG_SYS_DCACHE_OFF */
210
211#ifndef CONFIG_SYS_ICACHE_OFF
212
213void icache_enable(void)
214{
York Sun1e6ad552014-02-26 13:26:04 -0800215 __asm_invalidate_icache_all();
David Feng0ae76532013-12-14 11:47:35 +0800216 set_sctlr(get_sctlr() | CR_I);
217}
218
219void icache_disable(void)
220{
221 set_sctlr(get_sctlr() & ~CR_I);
222}
223
224int icache_status(void)
225{
226 return (get_sctlr() & CR_I) != 0;
227}
228
229void invalidate_icache_all(void)
230{
231 __asm_invalidate_icache_all();
232}
233
234#else /* CONFIG_SYS_ICACHE_OFF */
235
236void icache_enable(void)
237{
238}
239
240void icache_disable(void)
241{
242}
243
244int icache_status(void)
245{
246 return 0;
247}
248
249void invalidate_icache_all(void)
250{
251}
252
253#endif /* CONFIG_SYS_ICACHE_OFF */
254
255/*
256 * Enable dCache & iCache, whether cache is actually enabled
257 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
258 */
York Sun2f78eae2014-06-23 15:15:54 -0700259void __weak enable_caches(void)
David Feng0ae76532013-12-14 11:47:35 +0800260{
261 icache_enable();
262 dcache_enable();
263}
264
265/*
266 * Flush range from all levels of d-cache/unified-cache
267 */
268void flush_cache(unsigned long start, unsigned long size)
269{
270 flush_dcache_range(start, start + size);
271}