Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Procedures for maintaining information about logical memory blocks. |
| 3 | * |
| 4 | * Peter Bergner, IBM Corp. June 2001. |
| 5 | * Copyright (C) 2001 Peter Bergner. |
| 6 | * |
Wolfgang Denk | 1a45966 | 2013-07-08 09:37:19 +0200 | [diff] [blame] | 7 | * SPDX-License-Identifier: GPL-2.0+ |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <common.h> |
| 11 | #include <lmb.h> |
| 12 | |
| 13 | #define LMB_ALLOC_ANYWHERE 0 |
| 14 | |
| 15 | void lmb_dump_all(struct lmb *lmb) |
| 16 | { |
| 17 | #ifdef DEBUG |
| 18 | unsigned long i; |
| 19 | |
| 20 | debug("lmb_dump_all:\n"); |
| 21 | debug(" memory.cnt = 0x%lx\n", lmb->memory.cnt); |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 22 | debug(" memory.size = 0x%llx\n", |
| 23 | (unsigned long long)lmb->memory.size); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 24 | for (i=0; i < lmb->memory.cnt ;i++) { |
Wolfgang Denk | 9b55a25 | 2008-07-11 01:16:00 +0200 | [diff] [blame] | 25 | debug(" memory.reg[0x%lx].base = 0x%llx\n", i, |
| 26 | (long long unsigned)lmb->memory.region[i].base); |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 27 | debug(" .size = 0x%llx\n", |
Wolfgang Denk | 9b55a25 | 2008-07-11 01:16:00 +0200 | [diff] [blame] | 28 | (long long unsigned)lmb->memory.region[i].size); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 29 | } |
| 30 | |
Wolfgang Denk | 9b55a25 | 2008-07-11 01:16:00 +0200 | [diff] [blame] | 31 | debug("\n reserved.cnt = 0x%lx\n", |
| 32 | lmb->reserved.cnt); |
| 33 | debug(" reserved.size = 0x%llx\n", |
| 34 | (long long unsigned)lmb->reserved.size); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 35 | for (i=0; i < lmb->reserved.cnt ;i++) { |
Wolfgang Denk | 9b55a25 | 2008-07-11 01:16:00 +0200 | [diff] [blame] | 36 | debug(" reserved.reg[0x%lx].base = 0x%llx\n", i, |
| 37 | (long long unsigned)lmb->reserved.region[i].base); |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 38 | debug(" .size = 0x%llx\n", |
Wolfgang Denk | 9b55a25 | 2008-07-11 01:16:00 +0200 | [diff] [blame] | 39 | (long long unsigned)lmb->reserved.region[i].size); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 40 | } |
| 41 | #endif /* DEBUG */ |
| 42 | } |
| 43 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 44 | static long lmb_addrs_overlap(phys_addr_t base1, |
| 45 | phys_size_t size1, phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 46 | { |
| 47 | return ((base1 < (base2+size2)) && (base2 < (base1+size1))); |
| 48 | } |
| 49 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 50 | static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, |
| 51 | phys_addr_t base2, phys_size_t size2) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 52 | { |
| 53 | if (base2 == base1 + size1) |
| 54 | return 1; |
| 55 | else if (base1 == base2 + size2) |
| 56 | return -1; |
| 57 | |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | static long lmb_regions_adjacent(struct lmb_region *rgn, |
| 62 | unsigned long r1, unsigned long r2) |
| 63 | { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 64 | phys_addr_t base1 = rgn->region[r1].base; |
| 65 | phys_size_t size1 = rgn->region[r1].size; |
| 66 | phys_addr_t base2 = rgn->region[r2].base; |
| 67 | phys_size_t size2 = rgn->region[r2].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 68 | |
| 69 | return lmb_addrs_adjacent(base1, size1, base2, size2); |
| 70 | } |
| 71 | |
| 72 | static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) |
| 73 | { |
| 74 | unsigned long i; |
| 75 | |
| 76 | for (i = r; i < rgn->cnt - 1; i++) { |
| 77 | rgn->region[i].base = rgn->region[i + 1].base; |
| 78 | rgn->region[i].size = rgn->region[i + 1].size; |
| 79 | } |
| 80 | rgn->cnt--; |
| 81 | } |
| 82 | |
| 83 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
| 84 | static void lmb_coalesce_regions(struct lmb_region *rgn, |
| 85 | unsigned long r1, unsigned long r2) |
| 86 | { |
| 87 | rgn->region[r1].size += rgn->region[r2].size; |
| 88 | lmb_remove_region(rgn, r2); |
| 89 | } |
| 90 | |
| 91 | void lmb_init(struct lmb *lmb) |
| 92 | { |
| 93 | /* Create a dummy zero size LMB which will get coalesced away later. |
| 94 | * This simplifies the lmb_add() code below... |
| 95 | */ |
| 96 | lmb->memory.region[0].base = 0; |
| 97 | lmb->memory.region[0].size = 0; |
| 98 | lmb->memory.cnt = 1; |
| 99 | lmb->memory.size = 0; |
| 100 | |
| 101 | /* Ditto. */ |
| 102 | lmb->reserved.region[0].base = 0; |
| 103 | lmb->reserved.region[0].size = 0; |
| 104 | lmb->reserved.cnt = 1; |
| 105 | lmb->reserved.size = 0; |
| 106 | } |
| 107 | |
| 108 | /* This routine called with relocation disabled. */ |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 109 | static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 110 | { |
| 111 | unsigned long coalesced = 0; |
| 112 | long adjacent, i; |
| 113 | |
| 114 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { |
| 115 | rgn->region[0].base = base; |
| 116 | rgn->region[0].size = size; |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | /* First try and coalesce this LMB with another. */ |
| 121 | for (i=0; i < rgn->cnt; i++) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 122 | phys_addr_t rgnbase = rgn->region[i].base; |
| 123 | phys_size_t rgnsize = rgn->region[i].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 124 | |
| 125 | if ((rgnbase == base) && (rgnsize == size)) |
| 126 | /* Already have this region, so we're done */ |
| 127 | return 0; |
| 128 | |
| 129 | adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); |
| 130 | if ( adjacent > 0 ) { |
| 131 | rgn->region[i].base -= size; |
| 132 | rgn->region[i].size += size; |
| 133 | coalesced++; |
| 134 | break; |
| 135 | } |
| 136 | else if ( adjacent < 0 ) { |
| 137 | rgn->region[i].size += size; |
| 138 | coalesced++; |
| 139 | break; |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { |
| 144 | lmb_coalesce_regions(rgn, i, i+1); |
| 145 | coalesced++; |
| 146 | } |
| 147 | |
| 148 | if (coalesced) |
| 149 | return coalesced; |
| 150 | if (rgn->cnt >= MAX_LMB_REGIONS) |
| 151 | return -1; |
| 152 | |
| 153 | /* Couldn't coalesce the LMB, so add it to the sorted table. */ |
| 154 | for (i = rgn->cnt-1; i >= 0; i--) { |
| 155 | if (base < rgn->region[i].base) { |
| 156 | rgn->region[i+1].base = rgn->region[i].base; |
| 157 | rgn->region[i+1].size = rgn->region[i].size; |
| 158 | } else { |
| 159 | rgn->region[i+1].base = base; |
| 160 | rgn->region[i+1].size = size; |
| 161 | break; |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | if (base < rgn->region[0].base) { |
| 166 | rgn->region[0].base = base; |
| 167 | rgn->region[0].size = size; |
| 168 | } |
| 169 | |
| 170 | rgn->cnt++; |
| 171 | |
| 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | /* This routine may be called with relocation disabled. */ |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 176 | long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 177 | { |
| 178 | struct lmb_region *_rgn = &(lmb->memory); |
| 179 | |
| 180 | return lmb_add_region(_rgn, base, size); |
| 181 | } |
| 182 | |
Andy Fleming | 98874ff | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 183 | long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 184 | { |
| 185 | struct lmb_region *rgn = &(lmb->reserved); |
Andy Fleming | 98874ff | 2008-07-07 14:24:39 -0500 | [diff] [blame] | 186 | phys_addr_t rgnbegin, rgnend; |
| 187 | phys_addr_t end = base + size; |
Andy Fleming | 63796c4 | 2008-06-16 13:58:54 -0500 | [diff] [blame] | 188 | int i; |
| 189 | |
| 190 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
| 191 | |
| 192 | /* Find the region where (base, size) belongs to */ |
| 193 | for (i=0; i < rgn->cnt; i++) { |
| 194 | rgnbegin = rgn->region[i].base; |
| 195 | rgnend = rgnbegin + rgn->region[i].size; |
| 196 | |
| 197 | if ((rgnbegin <= base) && (end <= rgnend)) |
| 198 | break; |
| 199 | } |
| 200 | |
| 201 | /* Didn't find the region */ |
| 202 | if (i == rgn->cnt) |
| 203 | return -1; |
| 204 | |
| 205 | /* Check to see if we are removing entire region */ |
| 206 | if ((rgnbegin == base) && (rgnend == end)) { |
| 207 | lmb_remove_region(rgn, i); |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | /* Check to see if region is matching at the front */ |
| 212 | if (rgnbegin == base) { |
| 213 | rgn->region[i].base = end; |
| 214 | rgn->region[i].size -= size; |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | /* Check to see if the region is matching at the end */ |
| 219 | if (rgnend == end) { |
| 220 | rgn->region[i].size -= size; |
| 221 | return 0; |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * We need to split the entry - adjust the current one to the |
| 226 | * beginging of the hole and add the region after hole. |
| 227 | */ |
| 228 | rgn->region[i].size = base - rgn->region[i].base; |
| 229 | return lmb_add_region(rgn, end, rgnend - end); |
| 230 | } |
| 231 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 232 | long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 233 | { |
| 234 | struct lmb_region *_rgn = &(lmb->reserved); |
| 235 | |
| 236 | return lmb_add_region(_rgn, base, size); |
| 237 | } |
| 238 | |
Jeroen Hofstee | 750a6ff | 2014-10-08 22:57:39 +0200 | [diff] [blame] | 239 | static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 240 | phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 241 | { |
| 242 | unsigned long i; |
| 243 | |
| 244 | for (i=0; i < rgn->cnt; i++) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 245 | phys_addr_t rgnbase = rgn->region[i].base; |
| 246 | phys_size_t rgnsize = rgn->region[i].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 247 | if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { |
| 248 | break; |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | return (i < rgn->cnt) ? i : -1; |
| 253 | } |
| 254 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 255 | phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 256 | { |
| 257 | return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); |
| 258 | } |
| 259 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 260 | phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 261 | { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 262 | phys_addr_t alloc; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 263 | |
| 264 | alloc = __lmb_alloc_base(lmb, size, align, max_addr); |
| 265 | |
| 266 | if (alloc == 0) |
| 267 | printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", |
Wolfgang Denk | 9b55a25 | 2008-07-11 01:16:00 +0200 | [diff] [blame] | 268 | (ulong)size, (ulong)max_addr); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 269 | |
| 270 | return alloc; |
| 271 | } |
| 272 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 273 | static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 274 | { |
| 275 | return addr & ~(size - 1); |
| 276 | } |
| 277 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 278 | static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 279 | { |
| 280 | return (addr + (size - 1)) & ~(size - 1); |
| 281 | } |
| 282 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 283 | phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 284 | { |
| 285 | long i, j; |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 286 | phys_addr_t base = 0; |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 287 | phys_addr_t res_base; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 288 | |
| 289 | for (i = lmb->memory.cnt-1; i >= 0; i--) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 290 | phys_addr_t lmbbase = lmb->memory.region[i].base; |
| 291 | phys_size_t lmbsize = lmb->memory.region[i].size; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 292 | |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 293 | if (lmbsize < size) |
| 294 | continue; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 295 | if (max_addr == LMB_ALLOC_ANYWHERE) |
| 296 | base = lmb_align_down(lmbbase + lmbsize - size, align); |
| 297 | else if (lmbbase < max_addr) { |
Stephen Warren | ad3fda5 | 2014-07-31 13:40:07 -0600 | [diff] [blame] | 298 | base = lmbbase + lmbsize; |
| 299 | if (base < lmbbase) |
| 300 | base = -1; |
| 301 | base = min(base, max_addr); |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 302 | base = lmb_align_down(base - size, align); |
| 303 | } else |
| 304 | continue; |
| 305 | |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 306 | while (base && lmbbase <= base) { |
| 307 | j = lmb_overlaps_region(&lmb->reserved, base, size); |
| 308 | if (j < 0) { |
| 309 | /* This area isn't reserved, take it */ |
| 310 | if (lmb_add_region(&lmb->reserved, base, |
| 311 | lmb_align_up(size, |
| 312 | align)) < 0) |
| 313 | return 0; |
| 314 | return base; |
| 315 | } |
| 316 | res_base = lmb->reserved.region[j].base; |
| 317 | if (res_base < size) |
| 318 | break; |
| 319 | base = lmb_align_down(res_base - size, align); |
| 320 | } |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 321 | } |
Andy Fleming | 7570a99 | 2008-06-16 13:58:55 -0500 | [diff] [blame] | 322 | return 0; |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 323 | } |
| 324 | |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 325 | int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 326 | { |
| 327 | int i; |
| 328 | |
| 329 | for (i = 0; i < lmb->reserved.cnt; i++) { |
Becky Bruce | 391fd93 | 2008-06-09 20:37:18 -0500 | [diff] [blame] | 330 | phys_addr_t upper = lmb->reserved.region[i].base + |
Kumar Gala | 4ed6552 | 2008-02-27 21:51:47 -0600 | [diff] [blame] | 331 | lmb->reserved.region[i].size - 1; |
| 332 | if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) |
| 333 | return 1; |
| 334 | } |
| 335 | return 0; |
| 336 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 337 | |
Jeroen Hofstee | 2c34f3f | 2014-06-26 20:04:37 +0200 | [diff] [blame] | 338 | __weak void board_lmb_reserve(struct lmb *lmb) |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 339 | { |
| 340 | /* please define platform specific board_lmb_reserve() */ |
| 341 | } |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 342 | |
Jeroen Hofstee | 2c34f3f | 2014-06-26 20:04:37 +0200 | [diff] [blame] | 343 | __weak void arch_lmb_reserve(struct lmb *lmb) |
Mike Frysinger | a16028d | 2009-11-03 11:35:59 -0500 | [diff] [blame] | 344 | { |
| 345 | /* please define platform specific arch_lmb_reserve() */ |
| 346 | } |