Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Altera Corporation <www.altera.com> |
| 3 | * |
| 4 | * SPDX-License-Identifier: GPL-2.0+ |
| 5 | */ |
| 6 | |
| 7 | #include <common.h> |
| 8 | #include <asm/io.h> |
| 9 | #include <asm/arch/clock_manager.h> |
| 10 | |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 11 | DECLARE_GLOBAL_DATA_PTR; |
| 12 | |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 13 | static const struct socfpga_clock_manager *clock_manager_base = |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 14 | (struct socfpga_clock_manager *)SOCFPGA_CLKMGR_ADDRESS; |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 15 | |
Marek Vasut | 4425e62 | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 16 | static void cm_wait_for_lock(uint32_t mask) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 17 | { |
| 18 | register uint32_t inter_val; |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 19 | uint32_t retry = 0; |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 20 | do { |
| 21 | inter_val = readl(&clock_manager_base->inter) & mask; |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 22 | if (inter_val == mask) |
| 23 | retry++; |
| 24 | else |
| 25 | retry = 0; |
| 26 | if (retry >= 10) |
| 27 | break; |
| 28 | } while (1); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 29 | } |
| 30 | |
| 31 | /* function to poll in the fsm busy bit */ |
Marek Vasut | 4425e62 | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 32 | static void cm_wait_for_fsm(void) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 33 | { |
| 34 | while (readl(&clock_manager_base->stat) & CLKMGR_STAT_BUSY) |
| 35 | ; |
| 36 | } |
| 37 | |
| 38 | /* |
| 39 | * function to write the bypass register which requires a poll of the |
| 40 | * busy bit |
| 41 | */ |
Marek Vasut | 4425e62 | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 42 | static void cm_write_bypass(uint32_t val) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 43 | { |
| 44 | writel(val, &clock_manager_base->bypass); |
| 45 | cm_wait_for_fsm(); |
| 46 | } |
| 47 | |
| 48 | /* function to write the ctrl register which requires a poll of the busy bit */ |
Marek Vasut | 4425e62 | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 49 | static void cm_write_ctrl(uint32_t val) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 50 | { |
| 51 | writel(val, &clock_manager_base->ctrl); |
| 52 | cm_wait_for_fsm(); |
| 53 | } |
| 54 | |
| 55 | /* function to write a clock register that has phase information */ |
Marek Vasut | 4425e62 | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 56 | static void cm_write_with_phase(uint32_t value, |
| 57 | uint32_t reg_address, uint32_t mask) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 58 | { |
| 59 | /* poll until phase is zero */ |
| 60 | while (readl(reg_address) & mask) |
| 61 | ; |
| 62 | |
| 63 | writel(value, reg_address); |
| 64 | |
| 65 | while (readl(reg_address) & mask) |
| 66 | ; |
| 67 | } |
| 68 | |
| 69 | /* |
| 70 | * Setup clocks while making no assumptions about previous state of the clocks. |
| 71 | * |
| 72 | * Start by being paranoid and gate all sw managed clocks |
| 73 | * Put all plls in bypass |
| 74 | * Put all plls VCO registers back to reset value (bandgap power down). |
| 75 | * Put peripheral and main pll src to reset value to avoid glitch. |
| 76 | * Delay 5 us. |
| 77 | * Deassert bandgap power down and set numerator and denominator |
| 78 | * Start 7 us timer. |
| 79 | * set internal dividers |
| 80 | * Wait for 7 us timer. |
| 81 | * Enable plls |
| 82 | * Set external dividers while plls are locking |
| 83 | * Wait for pll lock |
| 84 | * Assert/deassert outreset all. |
| 85 | * Take all pll's out of bypass |
| 86 | * Clear safe mode |
| 87 | * set source main and peripheral clocks |
| 88 | * Ungate clocks |
| 89 | */ |
| 90 | |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 91 | void cm_basic_init(const struct cm_config * const cfg) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 92 | { |
Marek Vasut | 7e4d2fa | 2015-08-11 00:54:12 +0200 | [diff] [blame] | 93 | unsigned long end; |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 94 | |
| 95 | /* Start by being paranoid and gate all sw managed clocks */ |
| 96 | |
| 97 | /* |
| 98 | * We need to disable nandclk |
| 99 | * and then do another apb access before disabling |
| 100 | * gatting off the rest of the periperal clocks. |
| 101 | */ |
| 102 | writel(~CLKMGR_PERPLLGRP_EN_NANDCLK_MASK & |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 103 | readl(&clock_manager_base->per_pll.en), |
| 104 | &clock_manager_base->per_pll.en); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 105 | |
| 106 | /* DO NOT GATE OFF DEBUG CLOCKS & BRIDGE CLOCKS */ |
| 107 | writel(CLKMGR_MAINPLLGRP_EN_DBGTIMERCLK_MASK | |
| 108 | CLKMGR_MAINPLLGRP_EN_DBGTRACECLK_MASK | |
| 109 | CLKMGR_MAINPLLGRP_EN_DBGCLK_MASK | |
| 110 | CLKMGR_MAINPLLGRP_EN_DBGATCLK_MASK | |
| 111 | CLKMGR_MAINPLLGRP_EN_S2FUSER0CLK_MASK | |
| 112 | CLKMGR_MAINPLLGRP_EN_L4MPCLK_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 113 | &clock_manager_base->main_pll.en); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 114 | |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 115 | writel(0, &clock_manager_base->sdr_pll.en); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 116 | |
| 117 | /* now we can gate off the rest of the peripheral clocks */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 118 | writel(0, &clock_manager_base->per_pll.en); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 119 | |
| 120 | /* Put all plls in bypass */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 121 | cm_write_bypass(CLKMGR_BYPASS_PERPLL | CLKMGR_BYPASS_SDRPLL | |
| 122 | CLKMGR_BYPASS_MAINPLL); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 123 | |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 124 | /* Put all plls VCO registers back to reset value. */ |
| 125 | writel(CLKMGR_MAINPLLGRP_VCO_RESET_VALUE & |
| 126 | ~CLKMGR_MAINPLLGRP_VCO_REGEXTSEL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 127 | &clock_manager_base->main_pll.vco); |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 128 | writel(CLKMGR_PERPLLGRP_VCO_RESET_VALUE & |
| 129 | ~CLKMGR_PERPLLGRP_VCO_REGEXTSEL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 130 | &clock_manager_base->per_pll.vco); |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 131 | writel(CLKMGR_SDRPLLGRP_VCO_RESET_VALUE & |
| 132 | ~CLKMGR_SDRPLLGRP_VCO_REGEXTSEL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 133 | &clock_manager_base->sdr_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 134 | |
| 135 | /* |
| 136 | * The clocks to the flash devices and the L4_MAIN clocks can |
| 137 | * glitch when coming out of safe mode if their source values |
| 138 | * are different from their reset value. So the trick it to |
| 139 | * put them back to their reset state, and change input |
| 140 | * after exiting safe mode but before ungating the clocks. |
| 141 | */ |
| 142 | writel(CLKMGR_PERPLLGRP_SRC_RESET_VALUE, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 143 | &clock_manager_base->per_pll.src); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 144 | writel(CLKMGR_MAINPLLGRP_L4SRC_RESET_VALUE, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 145 | &clock_manager_base->main_pll.l4src); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 146 | |
| 147 | /* read back for the required 5 us delay. */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 148 | readl(&clock_manager_base->main_pll.vco); |
| 149 | readl(&clock_manager_base->per_pll.vco); |
| 150 | readl(&clock_manager_base->sdr_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 151 | |
| 152 | |
| 153 | /* |
| 154 | * We made sure bgpwr down was assert for 5 us. Now deassert BG PWR DN |
| 155 | * with numerator and denominator. |
| 156 | */ |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 157 | writel(cfg->main_vco_base, &clock_manager_base->main_pll.vco); |
| 158 | writel(cfg->peri_vco_base, &clock_manager_base->per_pll.vco); |
| 159 | writel(cfg->sdram_vco_base, &clock_manager_base->sdr_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 160 | |
| 161 | /* |
Marek Vasut | 7e4d2fa | 2015-08-11 00:54:12 +0200 | [diff] [blame] | 162 | * Time starts here. Must wait 7 us from |
| 163 | * BGPWRDN_SET(0) to VCO_ENABLE_SET(1). |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 164 | */ |
Marek Vasut | 7e4d2fa | 2015-08-11 00:54:12 +0200 | [diff] [blame] | 165 | end = timer_get_us() + 7; |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 166 | |
| 167 | /* main mpu */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 168 | writel(cfg->mpuclk, &clock_manager_base->main_pll.mpuclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 169 | |
| 170 | /* main main clock */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 171 | writel(cfg->mainclk, &clock_manager_base->main_pll.mainclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 172 | |
| 173 | /* main for dbg */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 174 | writel(cfg->dbgatclk, &clock_manager_base->main_pll.dbgatclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 175 | |
| 176 | /* main for cfgs2fuser0clk */ |
| 177 | writel(cfg->cfg2fuser0clk, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 178 | &clock_manager_base->main_pll.cfgs2fuser0clk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 179 | |
| 180 | /* Peri emac0 50 MHz default to RMII */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 181 | writel(cfg->emac0clk, &clock_manager_base->per_pll.emac0clk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 182 | |
| 183 | /* Peri emac1 50 MHz default to RMII */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 184 | writel(cfg->emac1clk, &clock_manager_base->per_pll.emac1clk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 185 | |
| 186 | /* Peri QSPI */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 187 | writel(cfg->mainqspiclk, &clock_manager_base->main_pll.mainqspiclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 188 | |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 189 | writel(cfg->perqspiclk, &clock_manager_base->per_pll.perqspiclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 190 | |
| 191 | /* Peri pernandsdmmcclk */ |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 192 | writel(cfg->mainnandsdmmcclk, |
| 193 | &clock_manager_base->main_pll.mainnandsdmmcclk); |
| 194 | |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 195 | writel(cfg->pernandsdmmcclk, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 196 | &clock_manager_base->per_pll.pernandsdmmcclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 197 | |
| 198 | /* Peri perbaseclk */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 199 | writel(cfg->perbaseclk, &clock_manager_base->per_pll.perbaseclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 200 | |
| 201 | /* Peri s2fuser1clk */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 202 | writel(cfg->s2fuser1clk, &clock_manager_base->per_pll.s2fuser1clk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 203 | |
| 204 | /* 7 us must have elapsed before we can enable the VCO */ |
Marek Vasut | 7e4d2fa | 2015-08-11 00:54:12 +0200 | [diff] [blame] | 205 | while (timer_get_us() < end) |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 206 | ; |
| 207 | |
| 208 | /* Enable vco */ |
| 209 | /* main pll vco */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 210 | writel(cfg->main_vco_base | CLKMGR_MAINPLLGRP_VCO_EN, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 211 | &clock_manager_base->main_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 212 | |
| 213 | /* periferal pll */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 214 | writel(cfg->peri_vco_base | CLKMGR_MAINPLLGRP_VCO_EN, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 215 | &clock_manager_base->per_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 216 | |
| 217 | /* sdram pll vco */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 218 | writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN, |
| 219 | &clock_manager_base->sdr_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 220 | |
| 221 | /* L3 MP and L3 SP */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 222 | writel(cfg->maindiv, &clock_manager_base->main_pll.maindiv); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 223 | |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 224 | writel(cfg->dbgdiv, &clock_manager_base->main_pll.dbgdiv); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 225 | |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 226 | writel(cfg->tracediv, &clock_manager_base->main_pll.tracediv); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 227 | |
| 228 | /* L4 MP, L4 SP, can0, and can1 */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 229 | writel(cfg->perdiv, &clock_manager_base->per_pll.div); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 230 | |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 231 | writel(cfg->gpiodiv, &clock_manager_base->per_pll.gpiodiv); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 232 | |
| 233 | #define LOCKED_MASK \ |
| 234 | (CLKMGR_INTER_SDRPLLLOCKED_MASK | \ |
| 235 | CLKMGR_INTER_PERPLLLOCKED_MASK | \ |
| 236 | CLKMGR_INTER_MAINPLLLOCKED_MASK) |
| 237 | |
| 238 | cm_wait_for_lock(LOCKED_MASK); |
| 239 | |
| 240 | /* write the sdram clock counters before toggling outreset all */ |
| 241 | writel(cfg->ddrdqsclk & CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 242 | &clock_manager_base->sdr_pll.ddrdqsclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 243 | |
| 244 | writel(cfg->ddr2xdqsclk & CLKMGR_SDRPLLGRP_DDR2XDQSCLK_CNT_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 245 | &clock_manager_base->sdr_pll.ddr2xdqsclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 246 | |
| 247 | writel(cfg->ddrdqclk & CLKMGR_SDRPLLGRP_DDRDQCLK_CNT_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 248 | &clock_manager_base->sdr_pll.ddrdqclk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 249 | |
| 250 | writel(cfg->s2fuser2clk & CLKMGR_SDRPLLGRP_S2FUSER2CLK_CNT_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 251 | &clock_manager_base->sdr_pll.s2fuser2clk); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 252 | |
| 253 | /* |
| 254 | * after locking, but before taking out of bypass |
| 255 | * assert/deassert outresetall |
| 256 | */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 257 | uint32_t mainvco = readl(&clock_manager_base->main_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 258 | |
| 259 | /* assert main outresetall */ |
| 260 | writel(mainvco | CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 261 | &clock_manager_base->main_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 262 | |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 263 | uint32_t periphvco = readl(&clock_manager_base->per_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 264 | |
| 265 | /* assert pheriph outresetall */ |
| 266 | writel(periphvco | CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 267 | &clock_manager_base->per_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 268 | |
| 269 | /* assert sdram outresetall */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 270 | writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN| |
| 271 | CLKMGR_SDRPLLGRP_VCO_OUTRESETALL, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 272 | &clock_manager_base->sdr_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 273 | |
| 274 | /* deassert main outresetall */ |
| 275 | writel(mainvco & ~CLKMGR_MAINPLLGRP_VCO_OUTRESETALL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 276 | &clock_manager_base->main_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 277 | |
| 278 | /* deassert pheriph outresetall */ |
| 279 | writel(periphvco & ~CLKMGR_PERPLLGRP_VCO_OUTRESETALL_MASK, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 280 | &clock_manager_base->per_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 281 | |
| 282 | /* deassert sdram outresetall */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 283 | writel(cfg->sdram_vco_base | CLKMGR_MAINPLLGRP_VCO_EN, |
| 284 | &clock_manager_base->sdr_pll.vco); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 285 | |
| 286 | /* |
| 287 | * now that we've toggled outreset all, all the clocks |
| 288 | * are aligned nicely; so we can change any phase. |
| 289 | */ |
| 290 | cm_write_with_phase(cfg->ddrdqsclk, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 291 | (uint32_t)&clock_manager_base->sdr_pll.ddrdqsclk, |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 292 | CLKMGR_SDRPLLGRP_DDRDQSCLK_PHASE_MASK); |
| 293 | |
| 294 | /* SDRAM DDR2XDQSCLK */ |
| 295 | cm_write_with_phase(cfg->ddr2xdqsclk, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 296 | (uint32_t)&clock_manager_base->sdr_pll.ddr2xdqsclk, |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 297 | CLKMGR_SDRPLLGRP_DDR2XDQSCLK_PHASE_MASK); |
| 298 | |
| 299 | cm_write_with_phase(cfg->ddrdqclk, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 300 | (uint32_t)&clock_manager_base->sdr_pll.ddrdqclk, |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 301 | CLKMGR_SDRPLLGRP_DDRDQCLK_PHASE_MASK); |
| 302 | |
| 303 | cm_write_with_phase(cfg->s2fuser2clk, |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 304 | (uint32_t)&clock_manager_base->sdr_pll.s2fuser2clk, |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 305 | CLKMGR_SDRPLLGRP_S2FUSER2CLK_PHASE_MASK); |
| 306 | |
| 307 | /* Take all three PLLs out of bypass when safe mode is cleared. */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 308 | cm_write_bypass(0); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 309 | |
| 310 | /* clear safe mode */ |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 311 | cm_write_ctrl(readl(&clock_manager_base->ctrl) | CLKMGR_CTRL_SAFEMODE); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 312 | |
| 313 | /* |
| 314 | * now that safe mode is clear with clocks gated |
| 315 | * it safe to change the source mux for the flashes the the L4_MAIN |
| 316 | */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 317 | writel(cfg->persrc, &clock_manager_base->per_pll.src); |
| 318 | writel(cfg->l4src, &clock_manager_base->main_pll.l4src); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 319 | |
| 320 | /* Now ungate non-hw-managed clocks */ |
Pavel Machek | 51fb455 | 2014-07-19 23:57:59 +0200 | [diff] [blame] | 321 | writel(~0, &clock_manager_base->main_pll.en); |
| 322 | writel(~0, &clock_manager_base->per_pll.en); |
| 323 | writel(~0, &clock_manager_base->sdr_pll.en); |
Marek Vasut | 036ba54 | 2014-09-16 19:54:32 +0200 | [diff] [blame] | 324 | |
| 325 | /* Clear the loss of lock bits (write 1 to clear) */ |
| 326 | writel(CLKMGR_INTER_SDRPLLLOST_MASK | CLKMGR_INTER_PERPLLLOST_MASK | |
| 327 | CLKMGR_INTER_MAINPLLLOST_MASK, |
| 328 | &clock_manager_base->inter); |
Chin Liang See | ddfeb0a | 2014-03-04 22:13:53 -0600 | [diff] [blame] | 329 | } |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 330 | |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 331 | static unsigned int cm_get_main_vco_clk_hz(void) |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 332 | { |
| 333 | uint32_t reg, clock; |
| 334 | |
| 335 | /* get the main VCO clock */ |
| 336 | reg = readl(&clock_manager_base->main_pll.vco); |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 337 | clock = cm_get_osc_clk_hz(1); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 338 | clock /= ((reg & CLKMGR_MAINPLLGRP_VCO_DENOM_MASK) >> |
| 339 | CLKMGR_MAINPLLGRP_VCO_DENOM_OFFSET) + 1; |
| 340 | clock *= ((reg & CLKMGR_MAINPLLGRP_VCO_NUMER_MASK) >> |
| 341 | CLKMGR_MAINPLLGRP_VCO_NUMER_OFFSET) + 1; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 342 | |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 343 | return clock; |
| 344 | } |
| 345 | |
| 346 | static unsigned int cm_get_per_vco_clk_hz(void) |
| 347 | { |
| 348 | uint32_t reg, clock = 0; |
| 349 | |
| 350 | /* identify PER PLL clock source */ |
| 351 | reg = readl(&clock_manager_base->per_pll.vco); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 352 | reg = (reg & CLKMGR_PERPLLGRP_VCO_SSRC_MASK) >> |
| 353 | CLKMGR_PERPLLGRP_VCO_SSRC_OFFSET; |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 354 | if (reg == CLKMGR_VCO_SSRC_EOSC1) |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 355 | clock = cm_get_osc_clk_hz(1); |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 356 | else if (reg == CLKMGR_VCO_SSRC_EOSC2) |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 357 | clock = cm_get_osc_clk_hz(2); |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 358 | else if (reg == CLKMGR_VCO_SSRC_F2S) |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 359 | clock = cm_get_f2s_per_ref_clk_hz(); |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 360 | |
| 361 | /* get the PER VCO clock */ |
| 362 | reg = readl(&clock_manager_base->per_pll.vco); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 363 | clock /= ((reg & CLKMGR_PERPLLGRP_VCO_DENOM_MASK) >> |
| 364 | CLKMGR_PERPLLGRP_VCO_DENOM_OFFSET) + 1; |
| 365 | clock *= ((reg & CLKMGR_PERPLLGRP_VCO_NUMER_MASK) >> |
| 366 | CLKMGR_PERPLLGRP_VCO_NUMER_OFFSET) + 1; |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 367 | |
| 368 | return clock; |
| 369 | } |
| 370 | |
| 371 | unsigned long cm_get_mpu_clk_hz(void) |
| 372 | { |
| 373 | uint32_t reg, clock; |
| 374 | |
| 375 | clock = cm_get_main_vco_clk_hz(); |
| 376 | |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 377 | /* get the MPU clock */ |
| 378 | reg = readl(&clock_manager_base->altera.mpuclk); |
| 379 | clock /= (reg + 1); |
| 380 | reg = readl(&clock_manager_base->main_pll.mpuclk); |
| 381 | clock /= (reg + 1); |
| 382 | return clock; |
| 383 | } |
| 384 | |
| 385 | unsigned long cm_get_sdram_clk_hz(void) |
| 386 | { |
| 387 | uint32_t reg, clock = 0; |
| 388 | |
| 389 | /* identify SDRAM PLL clock source */ |
| 390 | reg = readl(&clock_manager_base->sdr_pll.vco); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 391 | reg = (reg & CLKMGR_SDRPLLGRP_VCO_SSRC_MASK) >> |
| 392 | CLKMGR_SDRPLLGRP_VCO_SSRC_OFFSET; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 393 | if (reg == CLKMGR_VCO_SSRC_EOSC1) |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 394 | clock = cm_get_osc_clk_hz(1); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 395 | else if (reg == CLKMGR_VCO_SSRC_EOSC2) |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 396 | clock = cm_get_osc_clk_hz(2); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 397 | else if (reg == CLKMGR_VCO_SSRC_F2S) |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 398 | clock = cm_get_f2s_sdr_ref_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 399 | |
| 400 | /* get the SDRAM VCO clock */ |
| 401 | reg = readl(&clock_manager_base->sdr_pll.vco); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 402 | clock /= ((reg & CLKMGR_SDRPLLGRP_VCO_DENOM_MASK) >> |
| 403 | CLKMGR_SDRPLLGRP_VCO_DENOM_OFFSET) + 1; |
| 404 | clock *= ((reg & CLKMGR_SDRPLLGRP_VCO_NUMER_MASK) >> |
| 405 | CLKMGR_SDRPLLGRP_VCO_NUMER_OFFSET) + 1; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 406 | |
| 407 | /* get the SDRAM (DDR_DQS) clock */ |
| 408 | reg = readl(&clock_manager_base->sdr_pll.ddrdqsclk); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 409 | reg = (reg & CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_MASK) >> |
| 410 | CLKMGR_SDRPLLGRP_DDRDQSCLK_CNT_OFFSET; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 411 | clock /= (reg + 1); |
| 412 | |
| 413 | return clock; |
| 414 | } |
| 415 | |
| 416 | unsigned int cm_get_l4_sp_clk_hz(void) |
| 417 | { |
| 418 | uint32_t reg, clock = 0; |
| 419 | |
| 420 | /* identify the source of L4 SP clock */ |
| 421 | reg = readl(&clock_manager_base->main_pll.l4src); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 422 | reg = (reg & CLKMGR_MAINPLLGRP_L4SRC_L4SP) >> |
| 423 | CLKMGR_MAINPLLGRP_L4SRC_L4SP_OFFSET; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 424 | |
| 425 | if (reg == CLKMGR_L4_SP_CLK_SRC_MAINPLL) { |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 426 | clock = cm_get_main_vco_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 427 | |
| 428 | /* get the clock prior L4 SP divider (main clk) */ |
| 429 | reg = readl(&clock_manager_base->altera.mainclk); |
| 430 | clock /= (reg + 1); |
| 431 | reg = readl(&clock_manager_base->main_pll.mainclk); |
| 432 | clock /= (reg + 1); |
| 433 | } else if (reg == CLKMGR_L4_SP_CLK_SRC_PERPLL) { |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 434 | clock = cm_get_per_vco_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 435 | |
| 436 | /* get the clock prior L4 SP divider (periph_base_clk) */ |
| 437 | reg = readl(&clock_manager_base->per_pll.perbaseclk); |
| 438 | clock /= (reg + 1); |
| 439 | } |
| 440 | |
| 441 | /* get the L4 SP clock which supplied to UART */ |
| 442 | reg = readl(&clock_manager_base->main_pll.maindiv); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 443 | reg = (reg & CLKMGR_MAINPLLGRP_MAINDIV_L4SPCLK_MASK) >> |
| 444 | CLKMGR_MAINPLLGRP_MAINDIV_L4SPCLK_OFFSET; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 445 | clock = clock / (1 << reg); |
| 446 | |
| 447 | return clock; |
| 448 | } |
| 449 | |
| 450 | unsigned int cm_get_mmc_controller_clk_hz(void) |
| 451 | { |
| 452 | uint32_t reg, clock = 0; |
| 453 | |
| 454 | /* identify the source of MMC clock */ |
| 455 | reg = readl(&clock_manager_base->per_pll.src); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 456 | reg = (reg & CLKMGR_PERPLLGRP_SRC_SDMMC_MASK) >> |
| 457 | CLKMGR_PERPLLGRP_SRC_SDMMC_OFFSET; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 458 | |
| 459 | if (reg == CLKMGR_SDMMC_CLK_SRC_F2S) { |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 460 | clock = cm_get_f2s_per_ref_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 461 | } else if (reg == CLKMGR_SDMMC_CLK_SRC_MAIN) { |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 462 | clock = cm_get_main_vco_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 463 | |
| 464 | /* get the SDMMC clock */ |
| 465 | reg = readl(&clock_manager_base->main_pll.mainnandsdmmcclk); |
| 466 | clock /= (reg + 1); |
| 467 | } else if (reg == CLKMGR_SDMMC_CLK_SRC_PER) { |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 468 | clock = cm_get_per_vco_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 469 | |
| 470 | /* get the SDMMC clock */ |
| 471 | reg = readl(&clock_manager_base->per_pll.pernandsdmmcclk); |
| 472 | clock /= (reg + 1); |
| 473 | } |
| 474 | |
| 475 | /* further divide by 4 as we have fixed divider at wrapper */ |
| 476 | clock /= 4; |
| 477 | return clock; |
| 478 | } |
| 479 | |
| 480 | unsigned int cm_get_qspi_controller_clk_hz(void) |
| 481 | { |
| 482 | uint32_t reg, clock = 0; |
| 483 | |
| 484 | /* identify the source of QSPI clock */ |
| 485 | reg = readl(&clock_manager_base->per_pll.src); |
Marek Vasut | 44428ab | 2014-09-16 17:21:00 +0200 | [diff] [blame] | 486 | reg = (reg & CLKMGR_PERPLLGRP_SRC_QSPI_MASK) >> |
| 487 | CLKMGR_PERPLLGRP_SRC_QSPI_OFFSET; |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 488 | |
| 489 | if (reg == CLKMGR_QSPI_CLK_SRC_F2S) { |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 490 | clock = cm_get_f2s_per_ref_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 491 | } else if (reg == CLKMGR_QSPI_CLK_SRC_MAIN) { |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 492 | clock = cm_get_main_vco_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 493 | |
| 494 | /* get the qspi clock */ |
| 495 | reg = readl(&clock_manager_base->main_pll.mainqspiclk); |
| 496 | clock /= (reg + 1); |
| 497 | } else if (reg == CLKMGR_QSPI_CLK_SRC_PER) { |
Marek Vasut | 5d8ad0c | 2014-09-13 08:27:16 +0200 | [diff] [blame] | 498 | clock = cm_get_per_vco_clk_hz(); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 499 | |
| 500 | /* get the qspi clock */ |
| 501 | reg = readl(&clock_manager_base->per_pll.perqspiclk); |
| 502 | clock /= (reg + 1); |
| 503 | } |
| 504 | |
| 505 | return clock; |
| 506 | } |
| 507 | |
Stefan Roese | d2bb937 | 2014-11-07 13:50:29 +0100 | [diff] [blame] | 508 | unsigned int cm_get_spi_controller_clk_hz(void) |
| 509 | { |
| 510 | uint32_t reg, clock = 0; |
| 511 | |
| 512 | clock = cm_get_per_vco_clk_hz(); |
| 513 | |
| 514 | /* get the clock prior L4 SP divider (periph_base_clk) */ |
| 515 | reg = readl(&clock_manager_base->per_pll.perbaseclk); |
| 516 | clock /= (reg + 1); |
| 517 | |
| 518 | return clock; |
| 519 | } |
| 520 | |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 521 | static void cm_print_clock_quick_summary(void) |
| 522 | { |
| 523 | printf("MPU %10ld kHz\n", cm_get_mpu_clk_hz() / 1000); |
| 524 | printf("DDR %10ld kHz\n", cm_get_sdram_clk_hz() / 1000); |
Marek Vasut | 93b4abd | 2015-07-25 08:44:27 +0200 | [diff] [blame] | 525 | printf("EOSC1 %8d kHz\n", cm_get_osc_clk_hz(1) / 1000); |
| 526 | printf("EOSC2 %8d kHz\n", cm_get_osc_clk_hz(2) / 1000); |
| 527 | printf("F2S_SDR_REF %8d kHz\n", cm_get_f2s_sdr_ref_clk_hz() / 1000); |
| 528 | printf("F2S_PER_REF %8d kHz\n", cm_get_f2s_per_ref_clk_hz() / 1000); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 529 | printf("MMC %8d kHz\n", cm_get_mmc_controller_clk_hz() / 1000); |
| 530 | printf("QSPI %8d kHz\n", cm_get_qspi_controller_clk_hz() / 1000); |
| 531 | printf("UART %8d kHz\n", cm_get_l4_sp_clk_hz() / 1000); |
Stefan Roese | d2bb937 | 2014-11-07 13:50:29 +0100 | [diff] [blame] | 532 | printf("SPI %8d kHz\n", cm_get_spi_controller_clk_hz() / 1000); |
Pavel Machek | a832ddb | 2014-09-08 14:08:45 +0200 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | int set_cpu_clk_info(void) |
| 536 | { |
| 537 | /* Calculate the clock frequencies required for drivers */ |
| 538 | cm_get_l4_sp_clk_hz(); |
| 539 | cm_get_mmc_controller_clk_hz(); |
| 540 | |
| 541 | gd->bd->bi_arm_freq = cm_get_mpu_clk_hz() / 1000000; |
| 542 | gd->bd->bi_dsp_freq = 0; |
| 543 | gd->bd->bi_ddr_freq = cm_get_sdram_clk_hz() / 1000000; |
| 544 | |
| 545 | return 0; |
| 546 | } |
| 547 | |
| 548 | int do_showclocks(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) |
| 549 | { |
| 550 | cm_print_clock_quick_summary(); |
| 551 | return 0; |
| 552 | } |
| 553 | |
| 554 | U_BOOT_CMD( |
| 555 | clocks, CONFIG_SYS_MAXARGS, 1, do_showclocks, |
| 556 | "display clocks", |
| 557 | "" |
| 558 | ); |