Tom Warren | 4040ec1 | 2013-01-28 13:32:08 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010-2013, NVIDIA CORPORATION. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | |
| 17 | #include <common.h> |
| 18 | #include <asm/io.h> |
| 19 | #include <asm/arch/clock.h> |
| 20 | #include <asm/arch/flow.h> |
| 21 | #include <asm/arch/pinmux.h> |
| 22 | #include <asm/arch/tegra.h> |
| 23 | #include <asm/arch-tegra/clk_rst.h> |
| 24 | #include <asm/arch-tegra/pmc.h> |
| 25 | #include "../tegra-common/cpu.h" |
| 26 | |
| 27 | /* Tegra114-specific CPU init code */ |
| 28 | static void enable_cpu_power_rail(void) |
| 29 | { |
| 30 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 31 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 32 | u32 reg; |
| 33 | |
| 34 | debug("enable_cpu_power_rail entry\n"); |
| 35 | |
| 36 | /* un-tristate PWR_I2C SCL/SDA, rest of the defaults are correct */ |
| 37 | pinmux_tristate_disable(PINGRP_PWR_I2C_SCL); |
| 38 | pinmux_tristate_disable(PINGRP_PWR_I2C_SDA); |
| 39 | |
| 40 | /* |
| 41 | * Set CPUPWRGOOD_TIMER - APB clock is 1/2 of SCLK (102MHz), |
| 42 | * set it for 25ms (102MHz * .025) |
| 43 | */ |
| 44 | reg = 0x26E8F0; |
| 45 | writel(reg, &pmc->pmc_cpupwrgood_timer); |
| 46 | |
| 47 | /* Set polarity to 0 (normal) and enable CPUPWRREQ_OE */ |
| 48 | clrbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_POL); |
| 49 | setbits_le32(&pmc->pmc_cntrl, CPUPWRREQ_OE); |
| 50 | |
| 51 | /* |
| 52 | * Set CLK_RST_CONTROLLER_CPU_SOFTRST_CTRL2_0_CAR2PMC_CPU_ACK_WIDTH |
| 53 | * to 408 to satisfy the requirement of having at least 16 CPU clock |
| 54 | * cycles before clamp removal. |
| 55 | */ |
| 56 | |
| 57 | clrbits_le32(&clkrst->crc_cpu_softrst_ctrl2, 0xFFF); |
| 58 | setbits_le32(&clkrst->crc_cpu_softrst_ctrl2, 408); |
| 59 | } |
| 60 | |
| 61 | static void enable_cpu_clocks(void) |
| 62 | { |
| 63 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 64 | u32 reg; |
| 65 | |
| 66 | debug("enable_cpu_clocks entry\n"); |
| 67 | |
| 68 | /* Wait for PLL-X to lock */ |
| 69 | do { |
| 70 | reg = readl(&clkrst->crc_pll_simple[SIMPLE_PLLX].pll_base); |
| 71 | } while ((reg & (1 << 27)) == 0); |
| 72 | |
| 73 | /* Wait until all clocks are stable */ |
| 74 | udelay(PLL_STABILIZATION_DELAY); |
| 75 | |
| 76 | writel(CCLK_BURST_POLICY, &clkrst->crc_cclk_brst_pol); |
| 77 | writel(SUPER_CCLK_DIVIDER, &clkrst->crc_super_cclk_div); |
| 78 | |
| 79 | /* Always enable the main CPU complex clocks */ |
| 80 | clock_enable(PERIPH_ID_CPU); |
| 81 | clock_enable(PERIPH_ID_CPULP); |
| 82 | clock_enable(PERIPH_ID_CPUG); |
| 83 | } |
| 84 | |
| 85 | static void remove_cpu_resets(void) |
| 86 | { |
| 87 | struct clk_rst_ctlr *clkrst = (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 88 | u32 reg; |
| 89 | |
| 90 | debug("remove_cpu_resets entry\n"); |
| 91 | /* Take the slow non-CPU partition out of reset */ |
| 92 | reg = readl(&clkrst->crc_rst_cpulp_cmplx_clr); |
| 93 | writel((reg | CLR_NONCPURESET), &clkrst->crc_rst_cpulp_cmplx_clr); |
| 94 | |
| 95 | /* Take the fast non-CPU partition out of reset */ |
| 96 | reg = readl(&clkrst->crc_rst_cpug_cmplx_clr); |
| 97 | writel((reg | CLR_NONCPURESET), &clkrst->crc_rst_cpug_cmplx_clr); |
| 98 | |
| 99 | /* Clear the SW-controlled reset of the slow cluster */ |
| 100 | reg = readl(&clkrst->crc_rst_cpulp_cmplx_clr); |
| 101 | reg |= (CLR_CPURESET0+CLR_DBGRESET0+CLR_CORERESET0+CLR_CXRESET0); |
| 102 | writel(reg, &clkrst->crc_rst_cpulp_cmplx_clr); |
| 103 | |
| 104 | /* Clear the SW-controlled reset of the fast cluster */ |
| 105 | reg = readl(&clkrst->crc_rst_cpug_cmplx_clr); |
| 106 | reg |= (CLR_CPURESET0+CLR_DBGRESET0+CLR_CORERESET0+CLR_CXRESET0); |
| 107 | reg |= (CLR_CPURESET1+CLR_DBGRESET1+CLR_CORERESET1+CLR_CXRESET1); |
| 108 | reg |= (CLR_CPURESET2+CLR_DBGRESET2+CLR_CORERESET2+CLR_CXRESET2); |
| 109 | reg |= (CLR_CPURESET3+CLR_DBGRESET3+CLR_CORERESET3+CLR_CXRESET3); |
| 110 | writel(reg, &clkrst->crc_rst_cpug_cmplx_clr); |
| 111 | } |
| 112 | |
| 113 | /** |
| 114 | * The T114 requires some special clock initialization, including setting up |
| 115 | * the DVC I2C, turning on MSELECT and selecting the G CPU cluster |
| 116 | */ |
| 117 | void t114_init_clocks(void) |
| 118 | { |
| 119 | struct clk_rst_ctlr *clkrst = |
| 120 | (struct clk_rst_ctlr *)NV_PA_CLK_RST_BASE; |
| 121 | struct flow_ctlr *flow = (struct flow_ctlr *)NV_PA_FLOW_BASE; |
| 122 | u32 val; |
| 123 | |
| 124 | debug("t114_init_clocks entry\n"); |
| 125 | |
| 126 | /* Set active CPU cluster to G */ |
| 127 | clrbits_le32(&flow->cluster_control, 1); |
| 128 | |
| 129 | /* |
| 130 | * Switch system clock to PLLP_OUT4 (108 MHz), AVP will now run |
| 131 | * at 108 MHz. This is glitch free as only the source is changed, no |
| 132 | * special precaution needed. |
| 133 | */ |
| 134 | val = (SCLK_SOURCE_PLLP_OUT4 << SCLK_SWAKEUP_FIQ_SOURCE_SHIFT) | |
| 135 | (SCLK_SOURCE_PLLP_OUT4 << SCLK_SWAKEUP_IRQ_SOURCE_SHIFT) | |
| 136 | (SCLK_SOURCE_PLLP_OUT4 << SCLK_SWAKEUP_RUN_SOURCE_SHIFT) | |
| 137 | (SCLK_SOURCE_PLLP_OUT4 << SCLK_SWAKEUP_IDLE_SOURCE_SHIFT) | |
| 138 | (SCLK_SYS_STATE_RUN << SCLK_SYS_STATE_SHIFT); |
| 139 | writel(val, &clkrst->crc_sclk_brst_pol); |
| 140 | |
| 141 | writel(SUPER_SCLK_ENB_MASK, &clkrst->crc_super_sclk_div); |
| 142 | |
| 143 | debug("Setting up PLLX\n"); |
| 144 | init_pllx(); |
| 145 | |
| 146 | val = (1 << CLK_SYS_RATE_AHB_RATE_SHIFT); |
| 147 | writel(val, &clkrst->crc_clk_sys_rate); |
| 148 | |
| 149 | /* Enable clocks to required peripherals. TBD - minimize this list */ |
| 150 | debug("Enabling clocks\n"); |
| 151 | |
| 152 | clock_set_enable(PERIPH_ID_CACHE2, 1); |
| 153 | clock_set_enable(PERIPH_ID_GPIO, 1); |
| 154 | clock_set_enable(PERIPH_ID_TMR, 1); |
| 155 | clock_set_enable(PERIPH_ID_RTC, 1); |
| 156 | clock_set_enable(PERIPH_ID_CPU, 1); |
| 157 | clock_set_enable(PERIPH_ID_EMC, 1); |
| 158 | clock_set_enable(PERIPH_ID_I2C5, 1); |
| 159 | clock_set_enable(PERIPH_ID_FUSE, 1); |
| 160 | clock_set_enable(PERIPH_ID_PMC, 1); |
| 161 | clock_set_enable(PERIPH_ID_APBDMA, 1); |
| 162 | clock_set_enable(PERIPH_ID_MEM, 1); |
| 163 | clock_set_enable(PERIPH_ID_IRAMA, 1); |
| 164 | clock_set_enable(PERIPH_ID_IRAMB, 1); |
| 165 | clock_set_enable(PERIPH_ID_IRAMC, 1); |
| 166 | clock_set_enable(PERIPH_ID_IRAMD, 1); |
| 167 | clock_set_enable(PERIPH_ID_CORESIGHT, 1); |
| 168 | clock_set_enable(PERIPH_ID_MSELECT, 1); |
| 169 | clock_set_enable(PERIPH_ID_EMC1, 1); |
| 170 | clock_set_enable(PERIPH_ID_MC1, 1); |
| 171 | clock_set_enable(PERIPH_ID_DVFS, 1); |
| 172 | |
| 173 | /* Switch MSELECT clock to PLLP (00) */ |
| 174 | clock_ll_set_source(PERIPH_ID_MSELECT, 0); |
| 175 | |
| 176 | /* |
| 177 | * Clock divider request for 102MHz would setup MSELECT clock as |
| 178 | * 102MHz for PLLP base 408MHz |
| 179 | */ |
| 180 | clock_ll_set_source_divisor(PERIPH_ID_MSELECT, 0, |
| 181 | (NVBL_PLLP_KHZ/102000)); |
| 182 | |
| 183 | /* I2C5 (DVC) gets CLK_M and a divisor of 17 */ |
| 184 | clock_ll_set_source_divisor(PERIPH_ID_I2C5, 3, 16); |
| 185 | |
| 186 | /* Give clocks time to stabilize */ |
| 187 | udelay(1000); |
| 188 | |
| 189 | /* Take required peripherals out of reset */ |
| 190 | debug("Taking periphs out of reset\n"); |
| 191 | reset_set_enable(PERIPH_ID_CACHE2, 0); |
| 192 | reset_set_enable(PERIPH_ID_GPIO, 0); |
| 193 | reset_set_enable(PERIPH_ID_TMR, 0); |
| 194 | reset_set_enable(PERIPH_ID_COP, 0); |
| 195 | reset_set_enable(PERIPH_ID_EMC, 0); |
| 196 | reset_set_enable(PERIPH_ID_I2C5, 0); |
| 197 | reset_set_enable(PERIPH_ID_FUSE, 0); |
| 198 | reset_set_enable(PERIPH_ID_APBDMA, 0); |
| 199 | reset_set_enable(PERIPH_ID_MEM, 0); |
| 200 | reset_set_enable(PERIPH_ID_CORESIGHT, 0); |
| 201 | reset_set_enable(PERIPH_ID_MSELECT, 0); |
| 202 | reset_set_enable(PERIPH_ID_EMC1, 0); |
| 203 | reset_set_enable(PERIPH_ID_MC1, 0); |
Tom Warren | 702b872 | 2013-02-27 11:10:01 +0000 | [diff] [blame^] | 204 | reset_set_enable(PERIPH_ID_DVFS, 0); |
Tom Warren | 4040ec1 | 2013-01-28 13:32:08 +0000 | [diff] [blame] | 205 | |
| 206 | debug("t114_init_clocks exit\n"); |
| 207 | } |
| 208 | |
| 209 | static int is_partition_powered(u32 mask) |
| 210 | { |
| 211 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 212 | u32 reg; |
| 213 | |
| 214 | /* Get power gate status */ |
| 215 | reg = readl(&pmc->pmc_pwrgate_status); |
| 216 | return (reg & mask) == mask; |
| 217 | } |
| 218 | |
| 219 | static int is_clamp_enabled(u32 mask) |
| 220 | { |
| 221 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 222 | u32 reg; |
| 223 | |
| 224 | /* Get clamp status. TODO: Add pmc_clamp_status alias to pmc.h */ |
| 225 | reg = readl(&pmc->pmc_pwrgate_timer_on); |
| 226 | return (reg & mask) == mask; |
| 227 | } |
| 228 | |
| 229 | static void power_partition(u32 status, u32 partid) |
| 230 | { |
| 231 | struct pmc_ctlr *pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; |
| 232 | |
| 233 | debug("%s: status = %08X, part ID = %08X\n", __func__, status, partid); |
| 234 | /* Is the partition already on? */ |
| 235 | if (!is_partition_powered(status)) { |
| 236 | /* No, toggle the partition power state (OFF -> ON) */ |
| 237 | debug("power_partition, toggling state\n"); |
| 238 | clrbits_le32(&pmc->pmc_pwrgate_toggle, 0x1F); |
| 239 | setbits_le32(&pmc->pmc_pwrgate_toggle, partid); |
| 240 | setbits_le32(&pmc->pmc_pwrgate_toggle, START_CP); |
| 241 | |
| 242 | /* Wait for the power to come up */ |
| 243 | while (!is_partition_powered(status)) |
| 244 | ; |
| 245 | |
| 246 | /* Wait for the clamp status to be cleared */ |
| 247 | while (is_clamp_enabled(status)) |
| 248 | ; |
| 249 | |
| 250 | /* Give I/O signals time to stabilize */ |
| 251 | udelay(IO_STABILIZATION_DELAY); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | void powerup_cpus(void) |
| 256 | { |
| 257 | debug("powerup_cpus entry\n"); |
| 258 | |
| 259 | /* We boot to the fast cluster */ |
| 260 | debug("powerup_cpus entry: G cluster\n"); |
| 261 | /* Power up the fast cluster rail partition */ |
| 262 | power_partition(CRAIL, CRAILID); |
| 263 | |
| 264 | /* Power up the fast cluster non-CPU partition */ |
| 265 | power_partition(C0NC, C0NCID); |
| 266 | |
| 267 | /* Power up the fast cluster CPU0 partition */ |
| 268 | power_partition(CE0, CE0ID); |
| 269 | } |
| 270 | |
| 271 | void start_cpu(u32 reset_vector) |
| 272 | { |
| 273 | debug("start_cpu entry, reset_vector = %x\n", reset_vector); |
| 274 | |
| 275 | t114_init_clocks(); |
| 276 | |
| 277 | /* Enable VDD_CPU */ |
| 278 | enable_cpu_power_rail(); |
| 279 | |
| 280 | /* Get the CPU(s) running */ |
| 281 | enable_cpu_clocks(); |
| 282 | |
| 283 | /* Enable CoreSight */ |
| 284 | clock_enable_coresight(1); |
| 285 | |
| 286 | /* Take CPU(s) out of reset */ |
| 287 | remove_cpu_resets(); |
| 288 | |
| 289 | /* |
| 290 | * Set the entry point for CPU execution from reset, |
| 291 | * if it's a non-zero value. |
| 292 | */ |
| 293 | if (reset_vector) |
| 294 | writel(reset_vector, EXCEP_VECTOR_CPU_RESET_VECTOR); |
| 295 | |
| 296 | /* If the CPU(s) don't already have power, power 'em up */ |
| 297 | powerup_cpus(); |
| 298 | } |