blob: 41e5022ea0cd1cdc6f6bfdd1e59894e76dcf8114 [file] [log] [blame]
Tero Kristob4a72a92021-06-11 11:45:14 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments K3 clock driver
4 *
Nishanth Menona94a4072023-11-01 15:56:03 -05005 * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
Tero Kristob4a72a92021-06-11 11:45:14 +03006 * Tero Kristo <t-kristo@ti.com>
7 */
8
Tero Kristob4a72a92021-06-11 11:45:14 +03009#include <dm.h>
10#include <errno.h>
11#include <soc.h>
12#include <clk-uclass.h>
Udit Kumar89118092023-09-21 22:30:38 +053013#include <k3-avs.h>
Tero Kristob4a72a92021-06-11 11:45:14 +030014#include "k3-clk.h"
15
16#define PLL_MIN_FREQ 800000000
17#define PLL_MAX_FREQ 3200000000UL
18#define PLL_MAX_DIV 127
19
20/**
21 * struct clk_map - mapping from dev/clk id tuples towards physical clocks
22 * @dev_id: device ID for the clock
23 * @clk_id: clock ID for the clock
24 * @clk: pointer to the registered clock entry for the mapping
25 */
26struct clk_map {
27 u16 dev_id;
28 u32 clk_id;
29 struct clk *clk;
30};
31
32/**
33 * struct ti_clk_data - clock controller information structure
34 * @map: mapping from dev/clk id tuples to physical clock entries
35 * @size: number of entries in the map
36 */
37struct ti_clk_data {
38 struct clk_map *map;
39 int size;
40};
41
42static ulong osc_freq;
43
44static void clk_add_map(struct ti_clk_data *data, struct clk *clk,
45 u32 dev_id, u32 clk_id)
46{
47 struct clk_map *map;
48
49 debug("%s: added clk=%p, data=%p, dev=%d, clk=%d\n", __func__,
50 clk, data, dev_id, clk_id);
51 if (!clk)
52 return;
53
54 map = data->map + data->size++;
55
56 map->dev_id = dev_id;
57 map->clk_id = clk_id;
58 map->clk = clk;
59}
60
61static const struct soc_attr ti_k3_soc_clk_data[] = {
62#if IS_ENABLED(CONFIG_SOC_K3_J721E)
63 {
64 .family = "J721E",
65 .data = &j721e_clk_platdata,
66 },
67 {
68 .family = "J7200",
69 .data = &j7200_clk_platdata,
70 },
David Huang55bdc202022-01-25 20:56:33 +053071#elif CONFIG_SOC_K3_J721S2
72 {
73 .family = "J721S2",
74 .data = &j721s2_clk_platdata,
75 },
Tero Kristob4a72a92021-06-11 11:45:14 +030076#endif
Suman Anna4b8903a2022-05-25 13:38:43 +053077#ifdef CONFIG_SOC_K3_AM625
78 {
79 .family = "AM62X",
80 .data = &am62x_clk_platdata,
81 },
82#endif
Bryan Brattlofb6cbcd62022-11-03 19:13:56 -050083#ifdef CONFIG_SOC_K3_AM62A7
84 {
85 .family = "AM62AX",
86 .data = &am62ax_clk_platdata,
87 },
88#endif
Apurva Nandan95209992024-02-24 01:51:44 +053089#ifdef CONFIG_SOC_K3_J784S4
90 {
91 .family = "J784S4",
92 .data = &j784s4_clk_platdata,
93 },
94#endif
Bryan Brattlof1bcc7a42024-03-12 15:20:21 -050095#ifdef CONFIG_SOC_K3_AM62P5
96 {
97 .family = "AM62PX",
98 .data = &am62px_clk_platdata,
99 },
100#endif
Tero Kristob4a72a92021-06-11 11:45:14 +0300101 { /* sentinel */ }
102};
103
104static int ti_clk_probe(struct udevice *dev)
105{
106 struct ti_clk_data *data = dev_get_priv(dev);
107 struct clk *clk;
108 const char *name;
109 const struct clk_data *ti_clk_data;
110 int i, j;
111 const struct soc_attr *soc_match_data;
112 const struct ti_k3_clk_platdata *pdata;
113
114 debug("%s(dev=%p)\n", __func__, dev);
115
116 soc_match_data = soc_device_match(ti_k3_soc_clk_data);
117 if (!soc_match_data)
118 return -ENODEV;
119
120 pdata = (const struct ti_k3_clk_platdata *)soc_match_data->data;
121
122 data->map = kcalloc(pdata->soc_dev_clk_data_cnt, sizeof(*data->map),
123 GFP_KERNEL);
124 data->size = 0;
125
126 for (i = 0; i < pdata->clk_list_cnt; i++) {
127 ti_clk_data = &pdata->clk_list[i];
128
129 switch (ti_clk_data->type) {
130 case CLK_TYPE_FIXED_RATE:
131 name = ti_clk_data->clk.fixed_rate.name;
132 clk = clk_register_fixed_rate(NULL,
133 name,
134 ti_clk_data->clk.fixed_rate.rate);
135 break;
136 case CLK_TYPE_DIV:
137 name = ti_clk_data->clk.div.name;
138 clk = clk_register_divider(NULL, name,
139 ti_clk_data->clk.div.parent,
140 ti_clk_data->clk.div.flags,
141 map_physmem(ti_clk_data->clk.div.reg, 0, MAP_NOCACHE),
142 ti_clk_data->clk.div.shift,
143 ti_clk_data->clk.div.width,
Suman Annacfd50df2021-09-07 17:16:58 -0500144 ti_clk_data->clk.div.div_flags);
Tero Kristob4a72a92021-06-11 11:45:14 +0300145 break;
146 case CLK_TYPE_MUX:
147 name = ti_clk_data->clk.mux.name;
148 clk = clk_register_mux(NULL, name,
149 ti_clk_data->clk.mux.parents,
150 ti_clk_data->clk.mux.num_parents,
151 ti_clk_data->clk.mux.flags,
152 map_physmem(ti_clk_data->clk.mux.reg, 0, MAP_NOCACHE),
153 ti_clk_data->clk.mux.shift,
154 ti_clk_data->clk.mux.width,
155 0);
156 break;
157 case CLK_TYPE_PLL:
158 name = ti_clk_data->clk.pll.name;
159 clk = clk_register_ti_pll(name,
160 ti_clk_data->clk.pll.parent,
161 map_physmem(ti_clk_data->clk.pll.reg, 0, MAP_NOCACHE));
162
163 if (!osc_freq)
164 osc_freq = clk_get_rate(clk_get_parent(clk));
165 break;
166 default:
167 name = NULL;
168 clk = NULL;
169 printf("WARNING: %s has encountered unknown clk type %d\n",
170 __func__, ti_clk_data->type);
171 }
172
173 if (clk && ti_clk_data->default_freq)
174 clk_set_rate(clk, ti_clk_data->default_freq);
175
176 if (clk && name) {
177 for (j = 0; j < pdata->soc_dev_clk_data_cnt; j++) {
178 if (!strcmp(name, pdata->soc_dev_clk_data[j].clk_name)) {
179 clk_add_map(data, clk, pdata->soc_dev_clk_data[j].dev_id,
180 pdata->soc_dev_clk_data[j].clk_id);
181 }
182 }
183 }
184 }
185
186 return 0;
187}
188
189static int _clk_cmp(u32 dev_id, u32 clk_id, const struct clk_map *map)
190{
191 if (map->dev_id == dev_id && map->clk_id == clk_id)
192 return 0;
193 if (map->dev_id > dev_id ||
194 (map->dev_id == dev_id && map->clk_id > clk_id))
195 return -1;
196 return 1;
197}
198
199static int bsearch(u32 dev_id, u32 clk_id, struct clk_map *map, int num)
200{
201 int result;
202 int idx;
203
204 for (idx = 0; idx < num; idx++) {
205 result = _clk_cmp(dev_id, clk_id, &map[idx]);
206
207 if (result == 0)
208 return idx;
209 }
210
211 return -ENOENT;
212}
213
214static int ti_clk_of_xlate(struct clk *clk,
215 struct ofnode_phandle_args *args)
216{
217 struct ti_clk_data *data = dev_get_priv(clk->dev);
218 int idx;
219
220 debug("%s(clk=%p, args_count=%d [0]=%d [1]=%d)\n", __func__, clk,
221 args->args_count, args->args[0], args->args[1]);
222
223 if (args->args_count != 2) {
224 debug("Invalid args_count: %d\n", args->args_count);
225 return -EINVAL;
226 }
227
228 if (!data->size)
229 return -EPROBE_DEFER;
230
231 idx = bsearch(args->args[0], args->args[1], data->map, data->size);
232 if (idx < 0)
233 return idx;
234
235 clk->id = idx;
236
237 return 0;
238}
239
240static ulong ti_clk_get_rate(struct clk *clk)
241{
242 struct ti_clk_data *data = dev_get_priv(clk->dev);
243 struct clk *clkp = data->map[clk->id].clk;
244
245 return clk_get_rate(clkp);
246}
247
248static ulong ti_clk_set_rate(struct clk *clk, ulong rate)
249{
250 struct ti_clk_data *data = dev_get_priv(clk->dev);
251 struct clk *clkp = data->map[clk->id].clk;
252 int div = 1;
253 ulong child_rate;
254 const struct clk_ops *ops;
255 ulong new_rate, rem;
256 ulong diff, new_diff;
Udit Kumar89118092023-09-21 22:30:38 +0530257 int freq_scale_up = rate >= ti_clk_get_rate(clk) ? 1 : 0;
Tero Kristob4a72a92021-06-11 11:45:14 +0300258
Udit Kumar89118092023-09-21 22:30:38 +0530259 if (IS_ENABLED(CONFIG_K3_AVS0) && freq_scale_up)
260 k3_avs_notify_freq(data->map[clk->id].dev_id,
261 data->map[clk->id].clk_id, rate);
Tero Kristob4a72a92021-06-11 11:45:14 +0300262 /*
263 * We must propagate rate change to parent if current clock type
264 * does not allow setting it.
265 */
266 while (clkp) {
267 ops = clkp->dev->driver->ops;
268 if (ops->set_rate)
269 break;
270
271 /*
272 * Store child rate so we can calculate the clock rate
273 * that must be passed to parent
274 */
275 child_rate = clk_get_rate(clkp);
276 clkp = clk_get_parent(clkp);
277 if (clkp) {
278 debug("%s: propagating rate change to parent %s, rate=%u.\n",
279 __func__, clkp->dev->name, (u32)rate / div);
280 div *= clk_get_rate(clkp) / child_rate;
281 }
282 }
283
284 if (!clkp)
285 return -ENOSYS;
286
287 child_rate = clk_get_rate(clkp);
288
289 new_rate = clk_set_rate(clkp, rate / div);
290
291 diff = abs(new_rate - rate / div);
292
293 debug("%s: clk=%s, div=%d, rate=%u, new_rate=%u, diff=%u\n", __func__,
294 clkp->dev->name, div, (u32)rate, (u32)new_rate, (u32)diff);
295
296 /*
297 * If the new rate differs by 50% of the target,
298 * modify parent. This handles typical cases where we have a hsdiv
299 * following directly a PLL
300 */
301
302 if (diff > rate / div / 2) {
303 ulong pll_tgt;
304 int pll_div = 0;
305
306 clk = clkp;
307
308 debug("%s: propagating rate change to parent, rate=%u.\n",
309 __func__, (u32)rate / div);
310
311 clkp = clk_get_parent(clkp);
312
313 if (rate > osc_freq) {
314 if (rate > PLL_MAX_FREQ / 2 && rate < PLL_MAX_FREQ) {
315 pll_tgt = rate;
316 pll_div = 1;
317 } else {
318 for (pll_div = 2; pll_div < PLL_MAX_DIV; pll_div++) {
319 pll_tgt = rate / div * pll_div;
320 if (pll_tgt >= PLL_MIN_FREQ && pll_tgt <= PLL_MAX_FREQ)
321 break;
322 }
323 }
324 } else {
325 pll_tgt = osc_freq;
326 pll_div = rate / div / osc_freq;
327 }
328
329 debug("%s: pll_tgt=%u, rate=%u, div=%u\n", __func__,
330 (u32)pll_tgt, (u32)rate, pll_div);
331
332 clk_set_rate(clkp, pll_tgt);
333
334 return clk_set_rate(clk, rate / div) * div;
335 }
336
337 /*
338 * If the new rate differs by at least 5% of the target,
339 * we must check for rounding error in a divider, so try
340 * set rate with rate + (parent_freq % rate).
341 */
342
343 if (diff > rate / div / 20) {
344 u64 parent_freq = clk_get_parent_rate(clkp);
345
346 rem = parent_freq % rate;
347 new_rate = clk_set_rate(clkp, (rate / div) + rem);
348 new_diff = abs(new_rate - rate / div);
349
350 if (new_diff > diff) {
351 new_rate = clk_set_rate(clkp, rate / div);
352 } else {
353 debug("%s: Using better rate %lu that gives diff %lu\n",
354 __func__, new_rate, new_diff);
355 }
356 }
357
Udit Kumar89118092023-09-21 22:30:38 +0530358 if (IS_ENABLED(CONFIG_K3_AVS0) && !freq_scale_up)
359 k3_avs_notify_freq(data->map[clk->id].dev_id,
360 data->map[clk->id].clk_id, rate);
361
Tero Kristob4a72a92021-06-11 11:45:14 +0300362 return new_rate;
363}
364
365static int ti_clk_set_parent(struct clk *clk, struct clk *parent)
366{
367 struct ti_clk_data *data = dev_get_priv(clk->dev);
368 struct clk *clkp = data->map[clk->id].clk;
369 struct clk *parentp = data->map[parent->id].clk;
370
371 return clk_set_parent(clkp, parentp);
372}
373
374static int ti_clk_enable(struct clk *clk)
375{
376 struct ti_clk_data *data = dev_get_priv(clk->dev);
377 struct clk *clkp = data->map[clk->id].clk;
378
379 return clk_enable(clkp);
380}
381
382static int ti_clk_disable(struct clk *clk)
383{
384 struct ti_clk_data *data = dev_get_priv(clk->dev);
385 struct clk *clkp = data->map[clk->id].clk;
386
387 return clk_disable(clkp);
388}
389
390static const struct udevice_id ti_clk_of_match[] = {
391 { .compatible = "ti,k2g-sci-clk" },
392 { /* sentinel */ },
393};
394
395static const struct clk_ops ti_clk_ops = {
396 .of_xlate = ti_clk_of_xlate,
397 .set_rate = ti_clk_set_rate,
398 .get_rate = ti_clk_get_rate,
399 .enable = ti_clk_enable,
400 .disable = ti_clk_disable,
401 .set_parent = ti_clk_set_parent,
402};
403
404U_BOOT_DRIVER(ti_clk) = {
405 .name = "ti-clk",
406 .id = UCLASS_CLK,
407 .of_match = ti_clk_of_match,
408 .probe = ti_clk_probe,
409 .priv_auto = sizeof(struct ti_clk_data),
410 .ops = &ti_clk_ops,
411};