blob: 7507801ee72feccc017a04582010ad5732e00b7e [file] [log] [blame]
wdenk42d1f032003-10-15 23:53:47 +00001/*
wdenk97d80fc2004-06-09 00:34:46 +00002 * Copyright 2004 Freescale Semiconductor.
wdenk42d1f032003-10-15 23:53:47 +00003 * (C) Copyright 2003 Motorola Inc.
4 * Xianghua Xiao (X.Xiao@motorola.com)
5 *
6 * See file CREDITS for list of people who contributed to this
7 * project.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
22 * MA 02111-1307 USA
23 */
24
25#include <common.h>
26#include <asm/processor.h>
27#include <i2c.h>
28#include <spd.h>
29#include <asm/mmu.h>
30
Jon Loeligerd9b94f22005-07-25 14:05:07 -050031
32#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
33extern void dma_init(void);
wdenk9aea9532004-08-01 23:02:45 +000034extern uint dma_check(void);
Jon Loeligerd9b94f22005-07-25 14:05:07 -050035extern int dma_xfer(void *dest, uint count, void *src);
wdenk42d1f032003-10-15 23:53:47 +000036#endif
37
wdenk384cc682005-04-03 22:35:21 +000038#ifdef CONFIG_SPD_EEPROM
wdenk42d1f032003-10-15 23:53:47 +000039
wdenk9aea9532004-08-01 23:02:45 +000040#ifndef CFG_READ_SPD
41#define CFG_READ_SPD i2c_read
42#endif
43
Jon Loeligerd9b94f22005-07-25 14:05:07 -050044static unsigned int setup_laws_and_tlbs(unsigned int memsize);
45
46
wdenk9aea9532004-08-01 23:02:45 +000047/*
48 * Convert picoseconds into clock cycles (rounding up if needed).
49 */
50
51int
52picos_to_clk(int picos)
53{
54 int clks;
55
56 clks = picos / (2000000000 / (get_bus_freq(0) / 1000));
57 if (picos % (2000000000 / (get_bus_freq(0) / 1000)) != 0) {
58 clks++;
59 }
60
61 return clks;
62}
63
Jon Loeligerd9b94f22005-07-25 14:05:07 -050064
65/*
66 * Calculate the Density of each Physical Rank.
67 * Returned size is in bytes.
68 *
69 * Study these table from Byte 31 of JEDEC SPD Spec.
70 *
71 * DDR I DDR II
72 * Bit Size Size
73 * --- ----- ------
74 * 7 high 512MB 512MB
75 * 6 256MB 256MB
76 * 5 128MB 128MB
77 * 4 64MB 16GB
78 * 3 32MB 8GB
79 * 2 16MB 4GB
80 * 1 2GB 2GB
81 * 0 low 1GB 1GB
82 *
83 * Reorder Table to be linear by stripping the bottom
84 * 2 or 5 bits off and shifting them up to the top.
85 */
86
wdenk9aea9532004-08-01 23:02:45 +000087unsigned int
Jon Loeligerd9b94f22005-07-25 14:05:07 -050088compute_banksize(unsigned int mem_type, unsigned char row_dens)
wdenk9aea9532004-08-01 23:02:45 +000089{
Jon Loeligerd9b94f22005-07-25 14:05:07 -050090 unsigned int bsize;
91
92 if (mem_type == SPD_MEMTYPE_DDR) {
93 /* Bottom 2 bits up to the top. */
94 bsize = ((row_dens >> 2) | ((row_dens & 3) << 6)) << 24;
95 debug("DDR: DDR I rank density = 0x%08x\n", bsize);
96 } else {
97 /* Bottom 5 bits up to the top. */
98 bsize = ((row_dens >> 5) | ((row_dens & 31) << 3)) << 27;
99 debug("DDR: DDR II rank density = 0x%08x\n", bsize);
100 }
101 return bsize;
wdenk9aea9532004-08-01 23:02:45 +0000102}
103
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500104
105/*
106 * Convert a two-nibble BCD value into a cycle time.
107 * While the spec calls for nano-seconds, picos are returned.
108 *
109 * This implements the tables for bytes 9, 23 and 25 for both
110 * DDR I and II. No allowance for distinguishing the invalid
111 * fields absent for DDR I yet present in DDR II is made.
112 * (That is, cycle times of .25, .33, .66 and .75 ns are
113 * allowed for both DDR II and I.)
114 */
115
116unsigned int
117convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
118{
119 /*
120 * Table look up the lower nibble, allow DDR I & II.
121 */
122 unsigned int tenths_ps[16] = {
123 0,
124 100,
125 200,
126 300,
127 400,
128 500,
129 600,
130 700,
131 800,
132 900,
133 250,
Jon Loeliger1fd56992006-10-10 17:19:03 -0500134 330,
135 660,
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500136 750,
137 0, /* undefined */
138 0 /* undefined */
139 };
140
141 unsigned int whole_ns = (spd_val & 0xF0) >> 4;
142 unsigned int tenth_ns = spd_val & 0x0F;
143 unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
144
145 return ps;
146}
147
148
Jon Loeliger1fd56992006-10-10 17:19:03 -0500149/*
150 * Determine Refresh Rate. Ignore self refresh bit on DDR I.
151 * Table from SPD Spec, Byte 12, converted to picoseconds and
152 * filled in with "default" normal values.
153 */
154unsigned int determine_refresh_rate(unsigned int spd_refresh)
155{
156 unsigned int refresh_time_ns[8] = {
157 15625000, /* 0 Normal 1.00x */
158 3900000, /* 1 Reduced .25x */
159 7800000, /* 2 Extended .50x */
160 31300000, /* 3 Extended 2.00x */
161 62500000, /* 4 Extended 4.00x */
162 125000000, /* 5 Extended 8.00x */
163 15625000, /* 6 Normal 1.00x filler */
164 15625000, /* 7 Normal 1.00x filler */
165 };
166
167 return picos_to_clk(refresh_time_ns[spd_refresh & 0x7]);
168}
169
170
wdenk9aea9532004-08-01 23:02:45 +0000171long int
172spd_sdram(void)
173{
174 volatile immap_t *immap = (immap_t *)CFG_IMMR;
175 volatile ccsr_ddr_t *ddr = &immap->im_ddr;
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500176 volatile ccsr_gur_t *gur = &immap->im_gur;
wdenk9aea9532004-08-01 23:02:45 +0000177 spd_eeprom_t spd;
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500178 unsigned int n_ranks;
179 unsigned int rank_density;
180 unsigned int odt_rd_cfg, odt_wr_cfg;
181 unsigned int odt_cfg, mode_odt_enable;
Jon Loeliger1fd56992006-10-10 17:19:03 -0500182 unsigned int refresh_clk;
183#ifdef MPC85xx_DDR_SDRAM_CLK_CNTL
184 unsigned char clk_adjust;
185#endif
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500186 unsigned int dqs_cfg;
187 unsigned char twr_clk, twtr_clk, twr_auto_clk;
188 unsigned int tCKmin_ps, tCKmax_ps;
189 unsigned int max_data_rate, effective_data_rate;
190 unsigned int busfreq;
191 unsigned sdram_cfg;
wdenk9aea9532004-08-01 23:02:45 +0000192 unsigned int memsize;
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500193 unsigned char caslat, caslat_ctrl;
194 unsigned int trfc, trfc_clk, trfc_low, trfc_high;
195 unsigned int trcd_clk;
196 unsigned int trtp_clk;
197 unsigned char cke_min_clk;
198 unsigned char add_lat;
199 unsigned char wr_lat;
200 unsigned char wr_data_delay;
201 unsigned char four_act;
202 unsigned char cpo;
203 unsigned char burst_len;
204 unsigned int mode_caslat;
205 unsigned char sdram_type;
206 unsigned char d_init;
wdenk9aea9532004-08-01 23:02:45 +0000207
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500208 /*
209 * Read SPD information.
210 */
211 CFG_READ_SPD(SPD_EEPROM_ADDRESS, 0, 1, (uchar *) &spd, sizeof(spd));
wdenk9aea9532004-08-01 23:02:45 +0000212
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500213 /*
214 * Check for supported memory module types.
215 */
216 if (spd.mem_type != SPD_MEMTYPE_DDR &&
217 spd.mem_type != SPD_MEMTYPE_DDR2) {
218 printf("Unable to locate DDR I or DDR II module.\n"
219 " Fundamental memory type is 0x%0x\n",
220 spd.mem_type);
wdenk9aea9532004-08-01 23:02:45 +0000221 return 0;
222 }
223
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500224 /*
225 * These test gloss over DDR I and II differences in interpretation
226 * of bytes 3 and 4, but irrelevantly. Multiple asymmetric banks
227 * are not supported on DDR I; and not encoded on DDR II.
228 *
229 * Also note that the 8548 controller can support:
230 * 12 <= nrow <= 16
231 * and
232 * 8 <= ncol <= 11 (still, for DDR)
233 * 6 <= ncol <= 9 (for FCRAM)
234 */
235 if (spd.nrow_addr < 12 || spd.nrow_addr > 14) {
236 printf("DDR: Unsupported number of Row Addr lines: %d.\n",
237 spd.nrow_addr);
238 return 0;
239 }
240 if (spd.ncol_addr < 8 || spd.ncol_addr > 11) {
241 printf("DDR: Unsupported number of Column Addr lines: %d.\n",
242 spd.ncol_addr);
wdenk9aea9532004-08-01 23:02:45 +0000243 return 0;
244 }
245
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500246 /*
247 * Determine the number of physical banks controlled by
248 * different Chip Select signals. This is not quite the
249 * same as the number of DIMM modules on the board. Feh.
250 */
251 if (spd.mem_type == SPD_MEMTYPE_DDR) {
252 n_ranks = spd.nrows;
253 } else {
254 n_ranks = (spd.nrows & 0x7) + 1;
255 }
256
257 debug("DDR: number of ranks = %d\n", n_ranks);
258
259 if (n_ranks > 2) {
260 printf("DDR: Only 2 chip selects are supported: %d\n",
261 n_ranks);
262 return 0;
263 }
264
265 /*
266 * Adjust DDR II IO voltage biasing. It just makes it work.
267 */
268 if (spd.mem_type == SPD_MEMTYPE_DDR2) {
269 gur->ddrioovcr = (0
270 | 0x80000000 /* Enable */
271 | 0x10000000 /* VSEL to 1.8V */
272 );
273 }
274
275 /*
276 * Determine the size of each Rank in bytes.
277 */
278 rank_density = compute_banksize(spd.mem_type, spd.row_dens);
279
280
281 /*
282 * Eg: Bounds: 0x0000_0000 to 0x0f000_0000 first 256 Meg
283 */
284 ddr->cs0_bnds = (rank_density >> 24) - 1;
285
286 /*
287 * ODT configuration recommendation from DDR Controller Chapter.
288 */
289 odt_rd_cfg = 0; /* Never assert ODT */
290 odt_wr_cfg = 0; /* Never assert ODT */
291 if (spd.mem_type == SPD_MEMTYPE_DDR2) {
292 odt_wr_cfg = 1; /* Assert ODT on writes to CS0 */
293#if 0
294 /* FIXME: How to determine the number of dimm modules? */
295 if (n_dimm_modules == 2) {
296 odt_rd_cfg = 1; /* Assert ODT on reads to CS0 */
297 }
298#endif
299 }
300
wdenk9aea9532004-08-01 23:02:45 +0000301 ddr->cs0_config = ( 1 << 31
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500302 | (odt_rd_cfg << 20)
303 | (odt_wr_cfg << 16)
wdenk9aea9532004-08-01 23:02:45 +0000304 | (spd.nrow_addr - 12) << 8
305 | (spd.ncol_addr - 8) );
306 debug("\n");
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500307 debug("DDR: cs0_bnds = 0x%08x\n", ddr->cs0_bnds);
308 debug("DDR: cs0_config = 0x%08x\n", ddr->cs0_config);
wdenk9aea9532004-08-01 23:02:45 +0000309
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500310 if (n_ranks == 2) {
311 /*
312 * Eg: Bounds: 0x0f00_0000 to 0x1e0000_0000, second 256 Meg
313 */
314 ddr->cs1_bnds = ( (rank_density >> 8)
315 | ((rank_density >> (24 - 1)) - 1) );
wdenk9aea9532004-08-01 23:02:45 +0000316 ddr->cs1_config = ( 1<<31
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500317 | (odt_rd_cfg << 20)
318 | (odt_wr_cfg << 16)
319 | (spd.nrow_addr - 12) << 8
320 | (spd.ncol_addr - 8) );
321 debug("DDR: cs1_bnds = 0x%08x\n", ddr->cs1_bnds);
322 debug("DDR: cs1_config = 0x%08x\n", ddr->cs1_config);
wdenk9aea9532004-08-01 23:02:45 +0000323 }
324
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500325
326 /*
327 * Find the largest CAS by locating the highest 1 bit
328 * in the spd.cas_lat field. Translate it to a DDR
329 * controller field value:
330 *
331 * CAS Lat DDR I DDR II Ctrl
332 * Clocks SPD Bit SPD Bit Value
333 * ------- ------- ------- -----
334 * 1.0 0 0001
335 * 1.5 1 0010
336 * 2.0 2 2 0011
337 * 2.5 3 0100
338 * 3.0 4 3 0101
339 * 3.5 5 0110
340 * 4.0 4 0111
341 * 4.5 1000
342 * 5.0 5 1001
343 */
344 caslat = __ilog2(spd.cas_lat);
345 if ((spd.mem_type == SPD_MEMTYPE_DDR)
346 && (caslat > 5)) {
347 printf("DDR I: Invalid SPD CAS Latency: 0x%x.\n", spd.cas_lat);
348 return 0;
349
350 } else if (spd.mem_type == SPD_MEMTYPE_DDR2
351 && (caslat < 2 || caslat > 5)) {
352 printf("DDR II: Invalid SPD CAS Latency: 0x%x.\n",
353 spd.cas_lat);
wdenk9aea9532004-08-01 23:02:45 +0000354 return 0;
355 }
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500356 debug("DDR: caslat SPD bit is %d\n", caslat);
357
358 /*
359 * Calculate the Maximum Data Rate based on the Minimum Cycle time.
360 * The SPD clk_cycle field (tCKmin) is measured in tenths of
361 * nanoseconds and represented as BCD.
362 */
363 tCKmin_ps = convert_bcd_tenths_to_cycle_time_ps(spd.clk_cycle);
364 debug("DDR: tCKmin = %d ps\n", tCKmin_ps);
365
366 /*
367 * Double-data rate, scaled 1000 to picoseconds, and back down to MHz.
368 */
369 max_data_rate = 2 * 1000 * 1000 / tCKmin_ps;
370 debug("DDR: Module max data rate = %d Mhz\n", max_data_rate);
371
372
373 /*
374 * Adjust the CAS Latency to allow for bus speeds that
375 * are slower than the DDR module.
376 */
377 busfreq = get_bus_freq(0) / 1000000; /* MHz */
378
379 effective_data_rate = max_data_rate;
380 if (busfreq < 90) {
381 /* DDR rate out-of-range */
382 puts("DDR: platform frequency is not fit for DDR rate\n");
383 return 0;
384
385 } else if (90 <= busfreq && busfreq < 230 && max_data_rate >= 230) {
386 /*
387 * busfreq 90~230 range, treated as DDR 200.
388 */
389 effective_data_rate = 200;
390 if (spd.clk_cycle3 == 0xa0) /* 10 ns */
391 caslat -= 2;
392 else if (spd.clk_cycle2 == 0xa0)
393 caslat--;
394
395 } else if (230 <= busfreq && busfreq < 280 && max_data_rate >= 280) {
396 /*
397 * busfreq 230~280 range, treated as DDR 266.
398 */
399 effective_data_rate = 266;
400 if (spd.clk_cycle3 == 0x75) /* 7.5 ns */
401 caslat -= 2;
402 else if (spd.clk_cycle2 == 0x75)
403 caslat--;
404
405 } else if (280 <= busfreq && busfreq < 350 && max_data_rate >= 350) {
406 /*
407 * busfreq 280~350 range, treated as DDR 333.
408 */
409 effective_data_rate = 333;
410 if (spd.clk_cycle3 == 0x60) /* 6.0 ns */
411 caslat -= 2;
412 else if (spd.clk_cycle2 == 0x60)
413 caslat--;
414
415 } else if (350 <= busfreq && busfreq < 460 && max_data_rate >= 460) {
416 /*
417 * busfreq 350~460 range, treated as DDR 400.
418 */
419 effective_data_rate = 400;
420 if (spd.clk_cycle3 == 0x50) /* 5.0 ns */
421 caslat -= 2;
422 else if (spd.clk_cycle2 == 0x50)
423 caslat--;
424
425 } else if (460 <= busfreq && busfreq < 560 && max_data_rate >= 560) {
426 /*
427 * busfreq 460~560 range, treated as DDR 533.
428 */
429 effective_data_rate = 533;
430 if (spd.clk_cycle3 == 0x3D) /* 3.75 ns */
431 caslat -= 2;
432 else if (spd.clk_cycle2 == 0x3D)
433 caslat--;
434
435 } else if (560 <= busfreq && busfreq < 700 && max_data_rate >= 700) {
436 /*
437 * busfreq 560~700 range, treated as DDR 667.
438 */
439 effective_data_rate = 667;
440 if (spd.clk_cycle3 == 0x30) /* 3.0 ns */
441 caslat -= 2;
442 else if (spd.clk_cycle2 == 0x30)
443 caslat--;
444
445 } else if (700 <= busfreq) {
446 /*
447 * DDR rate out-of-range
448 */
449 printf("DDR: Bus freq %d MHz is not fit for DDR rate %d MHz\n",
450 busfreq, max_data_rate);
451 return 0;
452 }
453
454
455 /*
456 * Convert caslat clocks to DDR controller value.
457 * Force caslat_ctrl to be DDR Controller field-sized.
458 */
459 if (spd.mem_type == SPD_MEMTYPE_DDR) {
460 caslat_ctrl = (caslat + 1) & 0x07;
461 } else {
462 caslat_ctrl = (2 * caslat - 1) & 0x0f;
463 }
464
465 debug("DDR: effective data rate is %d MHz\n", effective_data_rate);
466 debug("DDR: caslat SPD bit is %d, controller field is 0x%x\n",
467 caslat, caslat_ctrl);
468
469 /*
470 * Timing Config 0.
471 * Avoid writing for DDR I. The new PQ38 DDR controller
472 * dreams up non-zero default values to be backwards compatible.
473 */
474 if (spd.mem_type == SPD_MEMTYPE_DDR2) {
475 unsigned char taxpd_clk = 8; /* By the book. */
476 unsigned char tmrd_clk = 2; /* By the book. */
477 unsigned char act_pd_exit = 2; /* Empirical? */
478 unsigned char pre_pd_exit = 6; /* Empirical? */
479
480 ddr->timing_cfg_0 = (0
481 | ((act_pd_exit & 0x7) << 20) /* ACT_PD_EXIT */
482 | ((pre_pd_exit & 0x7) << 16) /* PRE_PD_EXIT */
483 | ((taxpd_clk & 0xf) << 8) /* ODT_PD_EXIT */
484 | ((tmrd_clk & 0xf) << 0) /* MRS_CYC */
485 );
486#if 0
487 ddr->timing_cfg_0 |= 0xaa000000; /* extra cycles */
488#endif
489 debug("DDR: timing_cfg_0 = 0x%08x\n", ddr->timing_cfg_0);
490
491 } else {
492#if 0
493 /*
494 * Force extra cycles with 0xaa bits.
495 * Incidentally supply the dreamt-up backwards compat value!
496 */
497 ddr->timing_cfg_0 = 0x00110105; /* backwards compat value */
498 ddr->timing_cfg_0 |= 0xaa000000; /* extra cycles */
499 debug("DDR: HACK timing_cfg_0 = 0x%08x\n", ddr->timing_cfg_0);
500#endif
501 }
502
503
504 /*
505 * Some Timing Config 1 values now.
506 * Sneak Extended Refresh Recovery in here too.
507 */
508
509 /*
510 * For DDR I, WRREC(Twr) and WRTORD(Twtr) are not in SPD,
511 * use conservative value.
512 * For DDR II, they are bytes 36 and 37, in quarter nanos.
513 */
514
515 if (spd.mem_type == SPD_MEMTYPE_DDR) {
516 twr_clk = 3; /* Clocks */
517 twtr_clk = 1; /* Clocks */
518 } else {
519 twr_clk = picos_to_clk(spd.twr * 250);
520 twtr_clk = picos_to_clk(spd.twtr * 250);
521 }
522
523 /*
524 * Calculate Trfc, in picos.
525 * DDR I: Byte 42 straight up in ns.
526 * DDR II: Byte 40 and 42 swizzled some, in ns.
527 */
528 if (spd.mem_type == SPD_MEMTYPE_DDR) {
529 trfc = spd.trfc * 1000; /* up to ps */
530 } else {
531 unsigned int byte40_table_ps[8] = {
532 0,
533 250,
534 330,
535 500,
536 660,
537 750,
538 0,
539 0
540 };
541
542 trfc = (((spd.trctrfc_ext & 0x1) * 256) + spd.trfc) * 1000
543 + byte40_table_ps[(spd.trctrfc_ext >> 1) & 0x7];
544 }
545 trfc_clk = picos_to_clk(trfc);
546
547 /*
548 * Trcd, Byte 29, from quarter nanos to ps and clocks.
549 */
550 trcd_clk = picos_to_clk(spd.trcd * 250) & 0x7;
551
552 /*
553 * Convert trfc_clk to DDR controller fields. DDR I should
554 * fit in the REFREC field (16-19) of TIMING_CFG_1, but the
555 * 8548 controller has an extended REFREC field of three bits.
556 * The controller automatically adds 8 clocks to this value,
557 * so preadjust it down 8 first before splitting it up.
558 */
559 trfc_low = (trfc_clk - 8) & 0xf;
560 trfc_high = ((trfc_clk - 8) >> 4) & 0x3;
561
562 /*
563 * Sneak in some Extended Refresh Recovery.
564 */
565 ddr->ext_refrec = (trfc_high << 16);
566 debug("DDR: ext_refrec = 0x%08x\n", ddr->ext_refrec);
567
568 ddr->timing_cfg_1 =
569 (0
570 | ((picos_to_clk(spd.trp * 250) & 0x07) << 28) /* PRETOACT */
571 | ((picos_to_clk(spd.tras * 1000) & 0x0f ) << 24) /* ACTTOPRE */
572 | (trcd_clk << 20) /* ACTTORW */
573 | (caslat_ctrl << 16) /* CASLAT */
574 | (trfc_low << 12) /* REFEC */
575 | ((twr_clk & 0x07) << 8) /* WRRREC */
576 | ((picos_to_clk(spd.trrd * 250) & 0x07) << 4) /* ACTTOACT */
577 | ((twtr_clk & 0x07) << 0) /* WRTORD */
578 );
579
580 debug("DDR: timing_cfg_1 = 0x%08x\n", ddr->timing_cfg_1);
581
582
583 /*
584 * Timing_Config_2
585 * Was: 0x00000800;
586 */
587
588 /*
589 * Additive Latency
590 * For DDR I, 0.
591 * For DDR II, with ODT enabled, use "a value" less than ACTTORW,
592 * which comes from Trcd, and also note that:
593 * add_lat + caslat must be >= 4
594 */
595 add_lat = 0;
596 if (spd.mem_type == SPD_MEMTYPE_DDR2
597 && (odt_wr_cfg || odt_rd_cfg)
598 && (caslat < 4)) {
599 add_lat = 4 - caslat;
600 if (add_lat > trcd_clk) {
601 add_lat = trcd_clk - 1;
602 }
603 }
604
605 /*
606 * Write Data Delay
607 * Historically 0x2 == 4/8 clock delay.
608 * Empirically, 0x3 == 6/8 clock delay is suggested for DDR I 266.
609 */
610 wr_data_delay = 3;
611
612 /*
613 * Write Latency
614 * Read to Precharge
615 * Minimum CKE Pulse Width.
616 * Four Activate Window
617 */
618 if (spd.mem_type == SPD_MEMTYPE_DDR) {
619 /*
620 * This is a lie. It should really be 1, but if it is
621 * set to 1, bits overlap into the old controller's
622 * otherwise unused ACSM field. If we leave it 0, then
623 * the HW will magically treat it as 1 for DDR 1. Oh Yea.
624 */
625 wr_lat = 0;
626
627 trtp_clk = 2; /* By the book. */
628 cke_min_clk = 1; /* By the book. */
629 four_act = 1; /* By the book. */
630
631 } else {
632 wr_lat = caslat - 1;
633
634 /* Convert SPD value from quarter nanos to picos. */
635 trtp_clk = picos_to_clk(spd.trtp * 250);
636
637 cke_min_clk = 3; /* By the book. */
638 four_act = picos_to_clk(37500); /* By the book. 1k pages? */
639 }
640
641 /*
642 * Empirically set ~MCAS-to-preamble override for DDR 2.
643 * Your milage will vary.
644 */
645 cpo = 0;
646 if (spd.mem_type == SPD_MEMTYPE_DDR2) {
647 if (effective_data_rate == 266 || effective_data_rate == 333) {
648 cpo = 0x7; /* READ_LAT + 5/4 */
649 } else if (effective_data_rate == 400) {
650 cpo = 0x9; /* READ_LAT + 7/4 */
651 } else {
652 /* Pure speculation */
653 cpo = 0xb;
654 }
655 }
656
657 ddr->timing_cfg_2 = (0
658 | ((add_lat & 0x7) << 28) /* ADD_LAT */
Jon Loeligerde1d0a62005-08-01 13:20:47 -0500659 | ((cpo & 0x1f) << 23) /* CPO */
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500660 | ((wr_lat & 0x7) << 19) /* WR_LAT */
661 | ((trtp_clk & 0x7) << 13) /* RD_TO_PRE */
662 | ((wr_data_delay & 0x7) << 10) /* WR_DATA_DELAY */
663 | ((cke_min_clk & 0x7) << 6) /* CKE_PLS */
664 | ((four_act & 0x1f) << 0) /* FOUR_ACT */
665 );
666
667 debug("DDR: timing_cfg_2 = 0x%08x\n", ddr->timing_cfg_2);
668
669
670 /*
671 * Determine the Mode Register Set.
672 *
673 * This is nominally part specific, but it appears to be
674 * consistent for all DDR I devices, and for all DDR II devices.
675 *
676 * caslat must be programmed
677 * burst length is always 4
678 * burst type is sequential
679 *
680 * For DDR I:
681 * operating mode is "normal"
682 *
683 * For DDR II:
684 * other stuff
685 */
686
687 mode_caslat = 0;
688
689 /*
690 * Table lookup from DDR I or II Device Operation Specs.
691 */
692 if (spd.mem_type == SPD_MEMTYPE_DDR) {
693 if (1 <= caslat && caslat <= 4) {
694 unsigned char mode_caslat_table[4] = {
695 0x5, /* 1.5 clocks */
696 0x2, /* 2.0 clocks */
697 0x6, /* 2.5 clocks */
698 0x3 /* 3.0 clocks */
699 };
700 mode_caslat = mode_caslat_table[caslat - 1];
701 } else {
702 puts("DDR I: Only CAS Latencies of 1.5, 2.0, "
703 "2.5 and 3.0 clocks are supported.\n");
704 return 0;
705 }
706
707 } else {
708 if (2 <= caslat && caslat <= 5) {
709 mode_caslat = caslat;
710 } else {
711 puts("DDR II: Only CAS Latencies of 2.0, 3.0, "
712 "4.0 and 5.0 clocks are supported.\n");
713 return 0;
714 }
715 }
716
717 /*
718 * Encoded Burst Lenght of 4.
719 */
720 burst_len = 2; /* Fiat. */
721
722 if (spd.mem_type == SPD_MEMTYPE_DDR) {
723 twr_auto_clk = 0; /* Historical */
724 } else {
725 /*
726 * Determine tCK max in picos. Grab tWR and convert to picos.
727 * Auto-precharge write recovery is:
728 * WR = roundup(tWR_ns/tCKmax_ns).
729 *
730 * Ponder: Is twr_auto_clk different than twr_clk?
731 */
732 tCKmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd.tckmax);
733 twr_auto_clk = (spd.twr * 250 + tCKmax_ps - 1) / tCKmax_ps;
734 }
735
736
737 /*
738 * Mode Reg in bits 16 ~ 31,
739 * Extended Mode Reg 1 in bits 0 ~ 15.
740 */
741 mode_odt_enable = 0x0; /* Default disabled */
742 if (odt_wr_cfg || odt_rd_cfg) {
743 /*
744 * Bits 6 and 2 in Extended MRS(1)
745 * Bit 2 == 0x04 == 75 Ohm, with 2 DIMM modules.
746 * Bit 6 == 0x40 == 150 Ohm, with 1 DIMM module.
747 */
748 mode_odt_enable = 0x40; /* 150 Ohm */
749 }
750
751 ddr->sdram_mode =
752 (0
753 | (add_lat << (16 + 3)) /* Additive Latency in EMRS1 */
754 | (mode_odt_enable << 16) /* ODT Enable in EMRS1 */
755 | (twr_auto_clk << 9) /* Write Recovery Autopre */
756 | (mode_caslat << 4) /* caslat */
757 | (burst_len << 0) /* Burst length */
758 );
759
760 debug("DDR: sdram_mode = 0x%08x\n", ddr->sdram_mode);
761
762
763 /*
764 * Clear EMRS2 and EMRS3.
765 */
766 ddr->sdram_mode_2 = 0;
767 debug("DDR: sdram_mode_2 = 0x%08x\n", ddr->sdram_mode_2);
768
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500769 /*
Jon Loeliger1fd56992006-10-10 17:19:03 -0500770 * Determine Refresh Rate.
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500771 */
Jon Loeliger1fd56992006-10-10 17:19:03 -0500772 refresh_clk = determine_refresh_rate(spd.refresh & 0x7);
773
774 /*
775 * Set BSTOPRE to 0x100 for page mode
776 * If auto-charge is used, set BSTOPRE = 0
777 */
778 ddr->sdram_interval =
779 (0
780 | (refresh_clk & 0x3fff) << 16
781 | 0x100
782 );
783 debug("DDR: sdram_interval = 0x%08x\n", ddr->sdram_interval);
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500784
785 /*
786 * Is this an ECC DDR chip?
787 * But don't mess with it if the DDR controller will init mem.
788 */
789#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
790 if (spd.config == 0x02) {
791 ddr->err_disable = 0x0000000d;
792 ddr->err_sbe = 0x00ff0000;
793 }
794 debug("DDR: err_disable = 0x%08x\n", ddr->err_disable);
795 debug("DDR: err_sbe = 0x%08x\n", ddr->err_sbe);
796#endif
797
798 asm("sync;isync;msync");
799 udelay(500);
800
801 /*
802 * SDRAM Cfg 2
803 */
804
805 /*
806 * When ODT is enabled, Chap 9 suggests asserting ODT to
807 * internal IOs only during reads.
808 */
809 odt_cfg = 0;
810 if (odt_rd_cfg | odt_wr_cfg) {
811 odt_cfg = 0x2; /* ODT to IOs during reads */
812 }
813
814 /*
815 * Try to use differential DQS with DDR II.
816 */
817 if (spd.mem_type == SPD_MEMTYPE_DDR) {
818 dqs_cfg = 0; /* No Differential DQS for DDR I */
819 } else {
820 dqs_cfg = 0x1; /* Differential DQS for DDR II */
821 }
822
823#if defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
824 /*
825 * Use the DDR controller to auto initialize memory.
826 */
827 d_init = 1;
828 ddr->sdram_data_init = CONFIG_MEM_INIT_VALUE;
829 debug("DDR: ddr_data_init = 0x%08x\n", ddr->sdram_data_init);
830#else
831 /*
832 * Memory will be initialized via DMA, or not at all.
833 */
Jon Loeligerde1d0a62005-08-01 13:20:47 -0500834 d_init = 0;
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500835#endif
836
837 ddr->sdram_cfg_2 = (0
838 | (dqs_cfg << 26) /* Differential DQS */
839 | (odt_cfg << 21) /* ODT */
840 | (d_init << 4) /* D_INIT auto init DDR */
841 );
842
843 debug("DDR: sdram_cfg_2 = 0x%08x\n", ddr->sdram_cfg_2);
844
845
846#ifdef MPC85xx_DDR_SDRAM_CLK_CNTL
Jon Loeliger1fd56992006-10-10 17:19:03 -0500847 /*
848 * Setup the clock control.
849 * SDRAM_CLK_CNTL[0] = Source synchronous enable == 1
850 * SDRAM_CLK_CNTL[5-7] = Clock Adjust
851 * 0110 3/4 cycle late
852 * 0111 7/8 cycle late
853 */
854 if (spd.mem_type == SPD_MEMTYPE_DDR)
855 clk_adjust = 0x6;
856 else
857 clk_adjust = 0x7;
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500858
Jon Loeliger1fd56992006-10-10 17:19:03 -0500859 ddr->sdram_clk_cntl = (0
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500860 | 0x80000000
861 | (clk_adjust << 23)
862 );
Jon Loeliger1fd56992006-10-10 17:19:03 -0500863 debug("DDR: sdram_clk_cntl = 0x%08x\n", ddr->sdram_clk_cntl);
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500864#endif
865
866 /*
867 * Figure out the settings for the sdram_cfg register.
868 * Build up the entire register in 'sdram_cfg' before writing
869 * since the write into the register will actually enable the
870 * memory controller; all settings must be done before enabling.
871 *
872 * sdram_cfg[0] = 1 (ddr sdram logic enable)
873 * sdram_cfg[1] = 1 (self-refresh-enable)
874 * sdram_cfg[5:7] = (SDRAM type = DDR SDRAM)
875 * 010 DDR 1 SDRAM
876 * 011 DDR 2 SDRAM
877 */
878 sdram_type = (spd.mem_type == SPD_MEMTYPE_DDR) ? 2 : 3;
879 sdram_cfg = (0
880 | (1 << 31) /* Enable */
881 | (1 << 30) /* Self refresh */
882 | (sdram_type << 24) /* SDRAM type */
883 );
884
885 /*
886 * sdram_cfg[3] = RD_EN - registered DIMM enable
887 * A value of 0x26 indicates micron registered DIMMS (micron.com)
888 */
889 if (spd.mem_type == SPD_MEMTYPE_DDR && spd.mod_attr == 0x26) {
890 sdram_cfg |= 0x10000000; /* RD_EN */
891 }
892
893#if defined(CONFIG_DDR_ECC)
894 /*
895 * If the user wanted ECC (enabled via sdram_cfg[2])
896 */
897 if (spd.config == 0x02) {
898 sdram_cfg |= 0x20000000; /* ECC_EN */
899 }
900#endif
901
902 /*
903 * REV1 uses 1T timing.
904 * REV2 may use 1T or 2T as configured by the user.
905 */
906 {
907 uint pvr = get_pvr();
908
909 if (pvr != PVR_85xx_REV1) {
910#if defined(CONFIG_DDR_2T_TIMING)
911 /*
912 * Enable 2T timing by setting sdram_cfg[16].
913 */
914 sdram_cfg |= 0x8000; /* 2T_EN */
915#endif
916 }
917 }
918
919 /*
920 * 200 painful micro-seconds must elapse between
921 * the DDR clock setup and the DDR config enable.
922 */
923 udelay(200);
924
925 /*
926 * Go!
927 */
928 ddr->sdram_cfg = sdram_cfg;
929
930 asm("sync;isync;msync");
931 udelay(500);
932
933 debug("DDR: sdram_cfg = 0x%08x\n", ddr->sdram_cfg);
934
935
936#if defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
937 /*
938 * Poll until memory is initialized.
939 * 512 Meg at 400 might hit this 200 times or so.
940 */
941 while ((ddr->sdram_cfg_2 & (d_init << 4)) != 0) {
942 udelay(1000);
943 }
944#endif
945
wdenk9aea9532004-08-01 23:02:45 +0000946
947 /*
948 * Figure out memory size in Megabytes.
949 */
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500950 memsize = n_ranks * rank_density / 0x100000;
wdenk9aea9532004-08-01 23:02:45 +0000951
952 /*
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500953 * Establish Local Access Window and TLB mappings for DDR memory.
wdenk9aea9532004-08-01 23:02:45 +0000954 */
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500955 memsize = setup_laws_and_tlbs(memsize);
956 if (memsize == 0) {
957 return 0;
958 }
959
960 return memsize * 1024 * 1024;
961}
962
963
964/*
965 * Setup Local Access Window and TLB1 mappings for the requested
966 * amount of memory. Returns the amount of memory actually mapped
967 * (usually the original request size), or 0 on error.
968 */
969
970static unsigned int
971setup_laws_and_tlbs(unsigned int memsize)
972{
973 volatile immap_t *immap = (immap_t *)CFG_IMMR;
974 volatile ccsr_local_ecm_t *ecm = &immap->im_local_ecm;
975 unsigned int tlb_size;
976 unsigned int law_size;
977 unsigned int ram_tlb_index;
978 unsigned int ram_tlb_address;
wdenk9aea9532004-08-01 23:02:45 +0000979
980 /*
981 * Determine size of each TLB1 entry.
982 */
983 switch (memsize) {
984 case 16:
985 case 32:
986 tlb_size = BOOKE_PAGESZ_16M;
987 break;
988 case 64:
989 case 128:
990 tlb_size = BOOKE_PAGESZ_64M;
991 break;
992 case 256:
993 case 512:
994 case 1024:
995 case 2048:
996 tlb_size = BOOKE_PAGESZ_256M;
997 break;
998 default:
Jon Loeligerd9b94f22005-07-25 14:05:07 -0500999 puts("DDR: only 16M,32M,64M,128M,256M,512M,1G and 2G are supported.\n");
1000
1001 /*
1002 * The memory was not able to be mapped.
1003 */
wdenk9aea9532004-08-01 23:02:45 +00001004 return 0;
1005 break;
1006 }
1007
1008 /*
1009 * Configure DDR TLB1 entries.
1010 * Starting at TLB1 8, use no more than 8 TLB1 entries.
1011 */
1012 ram_tlb_index = 8;
1013 ram_tlb_address = (unsigned int)CFG_DDR_SDRAM_BASE;
1014 while (ram_tlb_address < (memsize * 1024 * 1024)
1015 && ram_tlb_index < 16) {
1016 mtspr(MAS0, TLB1_MAS0(1, ram_tlb_index, 0));
1017 mtspr(MAS1, TLB1_MAS1(1, 1, 0, 0, tlb_size));
1018 mtspr(MAS2, TLB1_MAS2(E500_TLB_EPN(ram_tlb_address),
1019 0, 0, 0, 0, 0, 0, 0, 0));
1020 mtspr(MAS3, TLB1_MAS3(E500_TLB_RPN(ram_tlb_address),
1021 0, 0, 0, 0, 0, 1, 0, 1, 0, 1));
1022 asm volatile("isync;msync;tlbwe;isync");
1023
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001024 debug("DDR: MAS0=0x%08x\n", TLB1_MAS0(1, ram_tlb_index, 0));
1025 debug("DDR: MAS1=0x%08x\n", TLB1_MAS1(1, 1, 0, 0, tlb_size));
1026 debug("DDR: MAS2=0x%08x\n",
wdenk9aea9532004-08-01 23:02:45 +00001027 TLB1_MAS2(E500_TLB_EPN(ram_tlb_address),
1028 0, 0, 0, 0, 0, 0, 0, 0));
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001029 debug("DDR: MAS3=0x%08x\n",
wdenk9aea9532004-08-01 23:02:45 +00001030 TLB1_MAS3(E500_TLB_RPN(ram_tlb_address),
1031 0, 0, 0, 0, 0, 1, 0, 1, 0, 1));
1032
1033 ram_tlb_address += (0x1000 << ((tlb_size - 1) * 2));
1034 ram_tlb_index++;
1035 }
1036
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001037
1038 /*
1039 * First supported LAW size is 16M, at LAWAR_SIZE_16M == 23. Fnord.
1040 */
1041 law_size = 19 + __ilog2(memsize);
1042
wdenk9aea9532004-08-01 23:02:45 +00001043 /*
1044 * Set up LAWBAR for all of DDR.
1045 */
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001046 ecm->lawbar1 = ((CFG_DDR_SDRAM_BASE >> 12) & 0xfffff);
1047 ecm->lawar1 = (LAWAR_EN
1048 | LAWAR_TRGT_IF_DDR
1049 | (LAWAR_SIZE & law_size));
1050 debug("DDR: LAWBAR1=0x%08x\n", ecm->lawbar1);
1051 debug("DDR: LARAR1=0x%08x\n", ecm->lawar1);
wdenk9aea9532004-08-01 23:02:45 +00001052
1053 /*
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001054 * Confirm that the requested amount of memory was mapped.
wdenk9aea9532004-08-01 23:02:45 +00001055 */
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001056 return memsize;
wdenk42d1f032003-10-15 23:53:47 +00001057}
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001058
wdenk42d1f032003-10-15 23:53:47 +00001059#endif /* CONFIG_SPD_EEPROM */
wdenk9aea9532004-08-01 23:02:45 +00001060
1061
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001062#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
1063
wdenk9aea9532004-08-01 23:02:45 +00001064/*
1065 * Initialize all of memory for ECC, then enable errors.
1066 */
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001067
wdenk9aea9532004-08-01 23:02:45 +00001068void
1069ddr_enable_ecc(unsigned int dram_size)
1070{
1071 uint *p = 0;
1072 uint i = 0;
1073 volatile immap_t *immap = (immap_t *)CFG_IMMR;
1074 volatile ccsr_ddr_t *ddr= &immap->im_ddr;
1075
1076 dma_init();
1077
1078 for (*p = 0; p < (uint *)(8 * 1024); p++) {
1079 if (((unsigned int)p & 0x1f) == 0) {
1080 ppcDcbz((unsigned long) p);
1081 }
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001082 *p = (unsigned int)CONFIG_MEM_INIT_VALUE;
wdenk9aea9532004-08-01 23:02:45 +00001083 if (((unsigned int)p & 0x1c) == 0x1c) {
1084 ppcDcbf((unsigned long) p);
1085 }
1086 }
1087
Jon Loeliger1fd56992006-10-10 17:19:03 -05001088 dma_xfer((uint *)0x002000, 0x002000, (uint *)0); /* 8K */
1089 dma_xfer((uint *)0x004000, 0x004000, (uint *)0); /* 16K */
1090 dma_xfer((uint *)0x008000, 0x008000, (uint *)0); /* 32K */
1091 dma_xfer((uint *)0x010000, 0x010000, (uint *)0); /* 64K */
1092 dma_xfer((uint *)0x020000, 0x020000, (uint *)0); /* 128k */
1093 dma_xfer((uint *)0x040000, 0x040000, (uint *)0); /* 256k */
1094 dma_xfer((uint *)0x080000, 0x080000, (uint *)0); /* 512k */
1095 dma_xfer((uint *)0x100000, 0x100000, (uint *)0); /* 1M */
1096 dma_xfer((uint *)0x200000, 0x200000, (uint *)0); /* 2M */
1097 dma_xfer((uint *)0x400000, 0x400000, (uint *)0); /* 4M */
wdenk9aea9532004-08-01 23:02:45 +00001098
1099 for (i = 1; i < dram_size / 0x800000; i++) {
1100 dma_xfer((uint *)(0x800000*i), 0x800000, (uint *)0);
1101 }
1102
1103 /*
1104 * Enable errors for ECC.
1105 */
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001106 debug("DMA DDR: err_disable = 0x%08x\n", ddr->err_disable);
wdenk9aea9532004-08-01 23:02:45 +00001107 ddr->err_disable = 0x00000000;
1108 asm("sync;isync;msync");
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001109 debug("DMA DDR: err_disable = 0x%08x\n", ddr->err_disable);
wdenk9aea9532004-08-01 23:02:45 +00001110}
Jon Loeligerd9b94f22005-07-25 14:05:07 -05001111
1112#endif /* CONFIG_DDR_ECC && ! CONFIG_ECC_INIT_VIA_DDRCONTROLLER */