blob: b4ca8b6934171f69122c02376deac106426b026c [file] [log] [blame]
Masahiro Yamada5894ca02014-10-03 19:21:06 +09001/*
Masahiro Yamadaf6e7f072015-05-29 17:30:00 +09002 * Copyright (C) 2012-2015 Masahiro Yamada <yamada.masahiro@socionext.com>
Masahiro Yamada5894ca02014-10-03 19:21:06 +09003 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7#include <common.h>
Masahiro Yamadaf6e7f072015-05-29 17:30:00 +09008#include <linux/io.h>
Masahiro Yamada5894ca02014-10-03 19:21:06 +09009#include <asm/armv7.h>
Masahiro Yamada107b3fb2016-01-09 01:51:13 +090010
11#include "ssc-regs.h"
Masahiro Yamada5894ca02014-10-03 19:21:06 +090012
13#ifdef CONFIG_UNIPHIER_L2CACHE_ON
14static void uniphier_cache_maint_all(u32 operation)
15{
16 /* try until the command is successfully set */
17 do {
18 writel(SSCOQM_S_ALL | SSCOQM_CE | operation, SSCOQM);
19 } while (readl(SSCOPPQSEF) & (SSCOPPQSEF_FE | SSCOPPQSEF_OE));
20
21 /* wait until the operation is completed */
22 while (readl(SSCOLPQS) != SSCOLPQS_EF)
23 ;
24
25 /* clear the complete notification flag */
26 writel(SSCOLPQS_EF, SSCOLPQS);
27
28 writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */
29 readl(SSCOPE); /* need a read back to confirm */
30}
31
32void v7_outer_cache_flush_all(void)
33{
34 uniphier_cache_maint_all(SSCOQM_CM_WB_INV);
35}
36
37void v7_outer_cache_inval_all(void)
38{
39 uniphier_cache_maint_all(SSCOQM_CM_INV);
40}
41
42static void __uniphier_cache_maint_range(u32 start, u32 size, u32 operation)
43{
44 /* try until the command is successfully set */
45 do {
46 writel(SSCOQM_S_ADDRESS | SSCOQM_CE | operation, SSCOQM);
47 writel(start, SSCOQAD);
48 writel(size, SSCOQSZ);
49
50 } while (readl(SSCOPPQSEF) & (SSCOPPQSEF_FE | SSCOPPQSEF_OE));
51
52 /* wait until the operation is completed */
53 while (readl(SSCOLPQS) != SSCOLPQS_EF)
54 ;
55
56 /* clear the complete notification flag */
57 writel(SSCOLPQS_EF, SSCOLPQS);
58}
59
60static void uniphier_cache_maint_range(u32 start, u32 end, u32 operation)
61{
62 u32 size;
63
64 /*
65 * If start address is not aligned to cache-line,
66 * do cache operation for the first cache-line
67 */
68 start = start & ~(SSC_LINE_SIZE - 1);
69
70 if (start == 0 && end >= (u32)(-SSC_LINE_SIZE)) {
71 /* this means cache operation for all range */
72 uniphier_cache_maint_all(operation);
73 return;
74 }
75
76 /*
77 * If end address is not aligned to cache-line,
78 * do cache operation for the last cache-line
79 */
80 size = (end - start + SSC_LINE_SIZE - 1) & ~(SSC_LINE_SIZE - 1);
81
82 while (size) {
83 u32 chunk_size = size > SSC_RANGE_OP_MAX_SIZE ?
84 SSC_RANGE_OP_MAX_SIZE : size;
85 __uniphier_cache_maint_range(start, chunk_size, operation);
86
87 start += chunk_size;
88 size -= chunk_size;
89 }
90
91 writel(SSCOPE_CM_SYNC, SSCOPE); /* drain internal buffers */
92 readl(SSCOPE); /* need a read back to confirm */
93}
94
95void v7_outer_cache_flush_range(u32 start, u32 end)
96{
97 uniphier_cache_maint_range(start, end, SSCOQM_CM_WB_INV);
98}
99
100void v7_outer_cache_inval_range(u32 start, u32 end)
101{
102 uniphier_cache_maint_range(start, end, SSCOQM_CM_INV);
103}
104
105void v7_outer_cache_enable(void)
106{
107 u32 tmp;
108 tmp = readl(SSCC);
109 tmp |= SSCC_ON;
110 writel(tmp, SSCC);
111}
112#endif
113
114void v7_outer_cache_disable(void)
115{
116 u32 tmp;
117 tmp = readl(SSCC);
118 tmp &= ~SSCC_ON;
119 writel(tmp, SSCC);
120}
121
Masahiro Yamada5894ca02014-10-03 19:21:06 +0900122void enable_caches(void)
123{
Masahiro Yamada5894ca02014-10-03 19:21:06 +0900124 dcache_enable();
Masahiro Yamada5894ca02014-10-03 19:21:06 +0900125}