blob: a43cb7fc15429e9cdc11f48fb886f5e5f7f55728 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassaff25232015-01-01 16:18:07 -07002/*
3 * (C) Copyright 2014 Google, Inc
4 *
Simon Glassaff25232015-01-01 16:18:07 -07005 * Memory Type Range Regsters - these are used to tell the CPU whether
6 * memory is cacheable and if so the cache write mode to use.
7 *
8 * These can speed up booting. See the mtrr command.
9 *
10 * Reference: Intel Architecture Software Developer's Manual, Volume 3:
11 * System Programming
12 */
13
Simon Glass590cee82018-10-01 12:22:37 -060014/*
15 * Note that any console output (e.g. debug()) in this file will likely fail
16 * since the MTRR registers are sometimes in flux.
17 */
18
Simon Glassaff25232015-01-01 16:18:07 -070019#include <common.h>
Simon Glass9edefc22019-11-14 12:57:37 -070020#include <cpu_func.h>
Simon Glassaff25232015-01-01 16:18:07 -070021#include <asm/io.h>
22#include <asm/msr.h>
23#include <asm/mtrr.h>
24
Bin Meng566d1752015-01-22 11:29:39 +080025DECLARE_GLOBAL_DATA_PTR;
26
Simon Glassaff25232015-01-01 16:18:07 -070027/* Prepare to adjust MTRRs */
Simon Glass590cee82018-10-01 12:22:37 -060028void mtrr_open(struct mtrr_state *state, bool do_caches)
Simon Glassaff25232015-01-01 16:18:07 -070029{
Bin Meng3b621cc2015-01-22 11:29:41 +080030 if (!gd->arch.has_mtrr)
31 return;
32
Simon Glass590cee82018-10-01 12:22:37 -060033 if (do_caches) {
34 state->enable_cache = dcache_status();
Simon Glassaff25232015-01-01 16:18:07 -070035
Simon Glass590cee82018-10-01 12:22:37 -060036 if (state->enable_cache)
37 disable_caches();
38 }
Simon Glassaff25232015-01-01 16:18:07 -070039 state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
40 wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
41}
42
43/* Clean up after adjusting MTRRs, and enable them */
Simon Glass590cee82018-10-01 12:22:37 -060044void mtrr_close(struct mtrr_state *state, bool do_caches)
Simon Glassaff25232015-01-01 16:18:07 -070045{
Bin Meng3b621cc2015-01-22 11:29:41 +080046 if (!gd->arch.has_mtrr)
47 return;
48
Simon Glassaff25232015-01-01 16:18:07 -070049 wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
Simon Glass590cee82018-10-01 12:22:37 -060050 if (do_caches && state->enable_cache)
Simon Glassaff25232015-01-01 16:18:07 -070051 enable_caches();
52}
53
Simon Glass6ccb2f82019-09-25 08:56:45 -060054static void set_var_mtrr(uint reg, uint type, uint64_t start, uint64_t size)
55{
56 u64 mask;
57
58 wrmsrl(MTRR_PHYS_BASE_MSR(reg), start | type);
59 mask = ~(size - 1);
60 mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
61 wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask | MTRR_PHYS_MASK_VALID);
62}
63
Simon Glassaff25232015-01-01 16:18:07 -070064int mtrr_commit(bool do_caches)
65{
66 struct mtrr_request *req = gd->arch.mtrr_req;
67 struct mtrr_state state;
Simon Glassaff25232015-01-01 16:18:07 -070068 int i;
69
Simon Glass590cee82018-10-01 12:22:37 -060070 debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
71 gd->arch.mtrr_req_count);
Bin Meng3b621cc2015-01-22 11:29:41 +080072 if (!gd->arch.has_mtrr)
73 return -ENOSYS;
74
Simon Glass590cee82018-10-01 12:22:37 -060075 debug("open\n");
76 mtrr_open(&state, do_caches);
77 debug("open done\n");
Simon Glass6ccb2f82019-09-25 08:56:45 -060078 for (i = 0; i < gd->arch.mtrr_req_count; i++, req++)
79 set_var_mtrr(i, req->type, req->start, req->size);
Simon Glassaff25232015-01-01 16:18:07 -070080
81 /* Clear the ones that are unused */
Simon Glass590cee82018-10-01 12:22:37 -060082 debug("clear\n");
Simon Glassaff25232015-01-01 16:18:07 -070083 for (; i < MTRR_COUNT; i++)
84 wrmsrl(MTRR_PHYS_MASK_MSR(i), 0);
Simon Glass590cee82018-10-01 12:22:37 -060085 debug("close\n");
86 mtrr_close(&state, do_caches);
87 debug("mtrr done\n");
Simon Glassaff25232015-01-01 16:18:07 -070088
89 return 0;
90}
91
92int mtrr_add_request(int type, uint64_t start, uint64_t size)
93{
94 struct mtrr_request *req;
95 uint64_t mask;
96
Simon Glass590cee82018-10-01 12:22:37 -060097 debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
Bin Meng3b621cc2015-01-22 11:29:41 +080098 if (!gd->arch.has_mtrr)
99 return -ENOSYS;
100
Simon Glassaff25232015-01-01 16:18:07 -0700101 if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
102 return -ENOSPC;
103 req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
104 req->type = type;
105 req->start = start;
106 req->size = size;
107 debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
108 req->type, req->start, req->size);
109 mask = ~(req->size - 1);
110 mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
111 mask |= MTRR_PHYS_MASK_VALID;
112 debug(" %016llx %016llx\n", req->start | req->type, mask);
113
114 return 0;
115}
Simon Glassadd3f4c2019-09-25 08:56:46 -0600116
117static int get_var_mtrr_count(void)
118{
119 return msr_read(MSR_MTRR_CAP_MSR).lo & MSR_MTRR_CAP_VCNT;
120}
121
122static int get_free_var_mtrr(void)
123{
124 struct msr_t maskm;
125 int vcnt;
126 int i;
127
128 vcnt = get_var_mtrr_count();
129
130 /* Identify the first var mtrr which is not valid */
131 for (i = 0; i < vcnt; i++) {
132 maskm = msr_read(MTRR_PHYS_MASK_MSR(i));
133 if ((maskm.lo & MTRR_PHYS_MASK_VALID) == 0)
134 return i;
135 }
136
137 /* No free var mtrr */
138 return -ENOSPC;
139}
140
141int mtrr_set_next_var(uint type, uint64_t start, uint64_t size)
142{
143 int mtrr;
144
145 mtrr = get_free_var_mtrr();
146 if (mtrr < 0)
147 return mtrr;
148
149 set_var_mtrr(mtrr, type, start, size);
150 debug("MTRR %x: start=%x, size=%x\n", mtrr, (uint)start, (uint)size);
151
152 return 0;
153}