blob: ae1e4b9e04f18e4d9c63019d2ba675ba89ee60b3 [file] [log] [blame]
Lokesh Vutla4c850352019-09-04 16:01:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
5 * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
10#include <dm.h>
11#include <remoteproc.h>
12#include <errno.h>
13#include <clk.h>
14#include <reset.h>
15#include <asm/io.h>
16#include <linux/kernel.h>
17#include <linux/soc/ti/ti_sci_protocol.h>
18#include "ti_sci_proc.h"
19
20/*
21 * R5F's view of this address can either be for ATCM or BTCM with the other
22 * at address 0x0 based on loczrama signal.
23 */
24#define K3_R5_TCM_DEV_ADDR 0x41010000
25
26/* R5 TI-SCI Processor Configuration Flags */
27#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
28#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
29#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
30#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
31#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
32#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
33#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
34#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
35#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
36
37/* R5 TI-SCI Processor Control Flags */
38#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
39
40/* R5 TI-SCI Processor Status Flags */
41#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
42#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
43#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
44#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
45
46#define NR_CORES 2
47
48enum cluster_mode {
49 CLUSTER_MODE_SPLIT = 0,
50 CLUSTER_MODE_LOCKSTEP,
51};
52
53/**
54 * struct k3_r5_mem - internal memory structure
55 * @cpu_addr: MPU virtual address of the memory region
56 * @bus_addr: Bus address used to access the memory region
57 * @dev_addr: Device address from remoteproc view
58 * @size: Size of the memory region
59 */
60struct k3_r5f_mem {
61 void __iomem *cpu_addr;
62 phys_addr_t bus_addr;
63 u32 dev_addr;
64 size_t size;
65};
66
67/**
68 * struct k3_r5f_core - K3 R5 core structure
69 * @dev: cached device pointer
70 * @cluster: pointer to the parent cluster.
71 * @reset: reset control handle
72 * @tsp: TI-SCI processor control handle
73 * @mem: Array of available internal memories
74 * @num_mem: Number of available memories
75 * @atcm_enable: flag to control ATCM enablement
76 * @btcm_enable: flag to control BTCM enablement
77 * @loczrama: flag to dictate which TCM is at device address 0x0
78 * @in_use: flag to tell if the core is already in use.
79 */
80struct k3_r5f_core {
81 struct udevice *dev;
82 struct k3_r5f_cluster *cluster;
83 struct reset_ctl reset;
84 struct ti_sci_proc tsp;
85 struct k3_r5f_mem *mem;
86 int num_mems;
87 u32 atcm_enable;
88 u32 btcm_enable;
89 u32 loczrama;
90 bool in_use;
91};
92
93/**
94 * struct k3_r5f_cluster - K3 R5F Cluster structure
95 * @mode: Mode to configure the Cluster - Split or LockStep
96 * @cores: Array of pointers to R5 cores within the cluster
97 */
98struct k3_r5f_cluster {
99 enum cluster_mode mode;
100 struct k3_r5f_core *cores[NR_CORES];
101};
102
103static bool is_primary_core(struct k3_r5f_core *core)
104{
105 return core == core->cluster->cores[0];
106}
107
108static int k3_r5f_proc_request(struct k3_r5f_core *core)
109{
110 struct k3_r5f_cluster *cluster = core->cluster;
111 int i, ret;
112
113 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
114 for (i = 0; i < NR_CORES; i++) {
115 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
116 if (ret)
117 goto proc_release;
118 }
119 } else {
120 ret = ti_sci_proc_request(&core->tsp);
121 }
122
123 return 0;
124
125proc_release:
126 while (i >= 0) {
127 ti_sci_proc_release(&cluster->cores[i]->tsp);
128 i--;
129 }
130 return ret;
131}
132
133static void k3_r5f_proc_release(struct k3_r5f_core *core)
134{
135 struct k3_r5f_cluster *cluster = core->cluster;
136 int i;
137
138 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
139 for (i = 0; i < NR_CORES; i++)
140 ti_sci_proc_release(&cluster->cores[i]->tsp);
141 else
142 ti_sci_proc_release(&core->tsp);
143}
144
145static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
146{
147 int ret, c;
148
149 dev_dbg(dev, "%s\n", __func__);
150
151 for (c = NR_CORES - 1; c >= 0; c--) {
152 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
153 if (ret)
154 goto unroll_module_reset;
155 }
156
157 /* deassert local reset on all applicable cores */
158 for (c = NR_CORES - 1; c >= 0; c--) {
159 ret = reset_deassert(&cluster->cores[c]->reset);
160 if (ret)
161 goto unroll_local_reset;
162 }
163
164 return 0;
165
166unroll_local_reset:
167 while (c < NR_CORES) {
168 reset_assert(&cluster->cores[c]->reset);
169 c++;
170 }
171 c = 0;
172unroll_module_reset:
173 while (c < NR_CORES) {
174 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
175 c++;
176 }
177
178 return ret;
179}
180
181static int k3_r5f_split_release(struct k3_r5f_core *core)
182{
183 int ret;
184
185 dev_dbg(dev, "%s\n", __func__);
186
187 ret = ti_sci_proc_power_domain_on(&core->tsp);
188 if (ret) {
189 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
190 ret);
191 return ret;
192 }
193
194 ret = reset_deassert(&core->reset);
195 if (ret) {
196 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
197 ret);
198 if (ti_sci_proc_power_domain_off(&core->tsp))
199 dev_warn(core->dev, "module-reset assert back failed\n");
200 }
201
202 return ret;
203}
204
205static int k3_r5f_prepare(struct udevice *dev)
206{
207 struct k3_r5f_core *core = dev_get_priv(dev);
208 struct k3_r5f_cluster *cluster = core->cluster;
209 int ret = 0;
210
211 dev_dbg(dev, "%s\n", __func__);
212
213 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
214 ret = k3_r5f_lockstep_release(cluster);
215 else
216 ret = k3_r5f_split_release(core);
217
218 if (ret)
219 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
220 ret);
221
222 return ret;
223}
224
225static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
226{
227 struct k3_r5f_cluster *cluster = core->cluster;
228
229 if (core->in_use) {
230 dev_err(dev, "Invalid op: Trying to load/start on already running core %d\n",
231 core->tsp.proc_id);
232 return -EINVAL;
233 }
234
235 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
236 printf("Secondary core is not probed in this cluster\n");
237 return -EAGAIN;
238 }
239
240 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
241 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
242 core->tsp.proc_id);
243 return -EINVAL;
244 }
245
246 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
247 if (!core->cluster->cores[0]->in_use) {
248 dev_err(dev, "Invalid seq: Enable primary core before loading secondary core\n");
249 return -EINVAL;
250 }
251 }
252
253 return 0;
254}
255
256/**
257 * k3_r5f_load() - Load up the Remote processor image
258 * @dev: rproc device pointer
259 * @addr: Address at which image is available
260 * @size: size of the image
261 *
262 * Return: 0 if all goes good, else appropriate error message.
263 */
264static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
265{
266 struct k3_r5f_core *core = dev_get_priv(dev);
267 u32 boot_vector;
268 int ret;
269
270 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
271
272 ret = k3_r5f_core_sanity_check(core);
273 if (ret)
274 return ret;
275
276 ret = k3_r5f_proc_request(core);
277 if (ret)
278 return ret;
279
280 ret = k3_r5f_prepare(dev);
281 if (ret) {
282 dev_err(dev, "R5f prepare failed for core %d\n",
283 core->tsp.proc_id);
284 goto proc_release;
285 }
286
287 /* Zero out TCMs so that ECC can be effective on all TCM addresses */
288 if (core->atcm_enable)
289 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
290 if (core->btcm_enable)
291 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
292
293 ret = rproc_elf_load_image(dev, addr, size);
294 if (ret < 0) {
295 dev_err(dev, "Loading elf failedi %d\n", ret);
296 goto proc_release;
297 }
298
299 boot_vector = rproc_elf_get_boot_addr(dev, addr);
300
301 dev_dbg(dev, "%s: Boot vector = 0x%x\n", __func__, boot_vector);
302
303 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
304
305proc_release:
306 k3_r5f_proc_release(core);
307
308 return ret;
309}
310
311static int k3_r5f_core_halt(struct k3_r5f_core *core)
312{
313 int ret;
314
315 ret = ti_sci_proc_set_control(&core->tsp,
316 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
317 if (ret)
318 dev_err(core->dev, "Core %d failed to stop\n",
319 core->tsp.proc_id);
320
321 return ret;
322}
323
324static int k3_r5f_core_run(struct k3_r5f_core *core)
325{
326 int ret;
327
328 ret = ti_sci_proc_set_control(&core->tsp,
329 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
330 if (ret) {
331 dev_err(core->dev, "Core %d failed to start\n",
332 core->tsp.proc_id);
333 return ret;
334 }
335
336 return 0;
337}
338
339/**
340 * k3_r5f_start() - Start the remote processor
341 * @dev: rproc device pointer
342 *
343 * Return: 0 if all went ok, else return appropriate error
344 */
345static int k3_r5f_start(struct udevice *dev)
346{
347 struct k3_r5f_core *core = dev_get_priv(dev);
348 struct k3_r5f_cluster *cluster = core->cluster;
349 int ret, c;
350
351 dev_dbg(dev, "%s\n", __func__);
352
353 ret = k3_r5f_core_sanity_check(core);
354 if (ret)
355 return ret;
356
357 ret = k3_r5f_proc_request(core);
358 if (ret)
359 return ret;
360
361 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
362 if (is_primary_core(core)) {
363 for (c = NR_CORES - 1; c >= 0; c--) {
364 ret = k3_r5f_core_run(cluster->cores[c]);
365 if (ret)
366 goto unroll_core_run;
367 }
368 } else {
369 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
370 core->tsp.proc_id);
371 ret = -EINVAL;
372 goto proc_release;
373 }
374 } else {
375 ret = k3_r5f_core_run(core);
376 if (ret)
377 goto proc_release;
378 }
379
380 core->in_use = true;
381
382 k3_r5f_proc_release(core);
383 return 0;
384
385unroll_core_run:
386 while (c < NR_CORES) {
387 k3_r5f_core_halt(cluster->cores[c]);
388 c++;
389 }
390proc_release:
391 k3_r5f_proc_release(core);
392
393 return ret;
394}
395
396static int k3_r5f_split_reset(struct k3_r5f_core *core)
397{
398 int ret;
399
400 dev_dbg(dev, "%s\n", __func__);
401
402 if (reset_assert(&core->reset))
403 ret = -EINVAL;
404
405 if (ti_sci_proc_power_domain_off(&core->tsp))
406 ret = -EINVAL;
407
408 return ret;
409}
410
411static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
412{
413 int ret = 0, c;
414
415 dev_dbg(dev, "%s\n", __func__);
416
417 for (c = 0; c < NR_CORES; c++)
418 if (reset_assert(&cluster->cores[c]->reset))
419 ret = -EINVAL;
420
421 /* disable PSC modules on all applicable cores */
422 for (c = 0; c < NR_CORES; c++)
423 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
424 ret = -EINVAL;
425
426 return ret;
427}
428
429static int k3_r5f_unprepare(struct udevice *dev)
430{
431 struct k3_r5f_core *core = dev_get_priv(dev);
432 struct k3_r5f_cluster *cluster = core->cluster;
433 int ret;
434
435 dev_dbg(dev, "%s\n", __func__);
436
437 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
438 if (is_primary_core(core))
439 ret = k3_r5f_lockstep_reset(cluster);
440 } else {
441 ret = k3_r5f_split_reset(core);
442 }
443
444 if (ret)
445 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
446 ret);
447
448 return 0;
449}
450
451static int k3_r5f_stop(struct udevice *dev)
452{
453 struct k3_r5f_core *core = dev_get_priv(dev);
454 struct k3_r5f_cluster *cluster = core->cluster;
455 int c, ret;
456
457 dev_dbg(dev, "%s\n", __func__);
458
459 ret = k3_r5f_proc_request(core);
460 if (ret)
461 return ret;
462
463 core->in_use = false;
464
465 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
466 if (is_primary_core(core)) {
467 for (c = 0; c < NR_CORES; c++)
468 k3_r5f_core_halt(cluster->cores[c]);
469 } else {
470 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
471 ret = -EINVAL;
472 goto proc_release;
473 }
474 } else {
475 k3_r5f_core_halt(core);
476 }
477
478 ret = k3_r5f_unprepare(dev);
479proc_release:
480 k3_r5f_proc_release(core);
481 return ret;
482}
483
484static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
485{
486 struct k3_r5f_core *core = dev_get_priv(dev);
487 void __iomem *va = NULL;
488 phys_addr_t bus_addr;
489 u32 dev_addr, offset;
490 ulong mem_size;
491 int i;
492
493 dev_dbg(dev, "%s\n", __func__);
494
495 if (size <= 0)
496 return NULL;
497
498 for (i = 0; i < core->num_mems; i++) {
499 bus_addr = core->mem[i].bus_addr;
500 dev_addr = core->mem[i].dev_addr;
501 mem_size = core->mem[i].size;
502
503 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
504 offset = da - bus_addr;
505 va = core->mem[i].cpu_addr + offset;
506 return (__force void *)va;
507 }
508
509 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
510 offset = da - dev_addr;
511 va = core->mem[i].cpu_addr + offset;
512 return (__force void *)va;
513 }
514 }
515
516 /* Assume it is DDR region and return da */
517 return map_physmem(da, size, MAP_NOCACHE);
518}
519
520static int k3_r5f_init(struct udevice *dev)
521{
522 return 0;
523}
524
525static int k3_r5f_reset(struct udevice *dev)
526{
527 return 0;
528}
529
530static const struct dm_rproc_ops k3_r5f_rproc_ops = {
531 .init = k3_r5f_init,
532 .reset = k3_r5f_reset,
533 .start = k3_r5f_start,
534 .stop = k3_r5f_stop,
535 .load = k3_r5f_load,
536 .device_to_virt = k3_r5f_da_to_va,
537};
538
539static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
540{
541 struct k3_r5f_cluster *cluster = core->cluster;
542 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
543 u64 boot_vec = 0;
544 int ret;
545
546 dev_dbg(dev, "%s\n", __func__);
547
548 ret = ti_sci_proc_request(&core->tsp);
549 if (ret < 0)
550 return ret;
551
552 /* Do not touch boot vector now. Load will take care of it. */
553 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
554
555 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
556 if (ret)
557 goto out;
558
559 /* Sanity check for Lockstep mode */
560 if (cluster->mode && is_primary_core(core) &&
561 !(sts & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED)) {
562 dev_err(core->dev, "LockStep mode not permitted on this device\n");
563 ret = -EINVAL;
564 goto out;
565 }
566
567 /* Primary core only configuration */
568 if (is_primary_core(core)) {
569 /* always enable ARM mode */
570 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
571 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
572 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
573 else
574 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
575 }
576
577 if (core->atcm_enable)
578 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
579 else
580 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
581
582 if (core->btcm_enable)
583 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
584 else
585 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
586
587 if (core->loczrama)
588 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
589 else
590 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
591
592 ret = k3_r5f_core_halt(core);
593 if (ret)
594 goto out;
595
596 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
597out:
598 ti_sci_proc_release(&core->tsp);
599 return ret;
600}
601
602static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
603{
604 u32 ids[2];
605 int ret;
606
607 dev_dbg(dev, "%s\n", __func__);
608
609 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
610 if (IS_ERR(tsp->sci)) {
611 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
612 return PTR_ERR(tsp->sci);
613 }
614
615 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
616 if (ret) {
617 dev_err(dev, "Proc IDs not populated %d\n", ret);
618 return ret;
619 }
620
621 tsp->ops = &tsp->sci->ops.proc_ops;
622 tsp->proc_id = ids[0];
623 tsp->host_id = ids[1];
624 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
625 TI_SCI_RESOURCE_NULL);
626 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
627 dev_err(dev, "Device ID not populated %d\n", ret);
628 return -ENODEV;
629 }
630
631 return 0;
632}
633
634static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
635{
636 int ret;
637
638 dev_dbg(dev, "%s\n", __func__);
639
640 core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
641 core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
642 core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
643
644 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
645 if (ret)
646 return ret;
647
648 ret = reset_get_by_index(core->dev, 0, &core->reset);
649 if (ret) {
650 dev_err(core->dev, "Reset lines not available: %d\n", ret);
651 return ret;
652 }
653
654 return 0;
655}
656
657static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
658{
659 static const char * const mem_names[] = {"atcm", "btcm"};
660 struct udevice *dev = core->dev;
661 int i;
662
663 dev_dbg(dev, "%s\n", __func__);
664
665 core->num_mems = ARRAY_SIZE(mem_names);
666 core->mem = calloc(core->num_mems, sizeof(*core->mem));
667 if (!core->mem)
668 return -ENOMEM;
669
670 for (i = 0; i < core->num_mems; i++) {
671 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
672 mem_names[i],
673 (fdt_addr_t *)&core->mem[i].size);
674 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
675 dev_err(dev, "%s bus address not found\n",
676 mem_names[i]);
677 return -EINVAL;
678 }
679 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
680 core->mem[i].size,
681 MAP_NOCACHE);
682 if (!strcmp(mem_names[i], "atcm")) {
683 core->mem[i].dev_addr = core->loczrama ?
684 0 : K3_R5_TCM_DEV_ADDR;
685 } else {
686 core->mem[i].dev_addr = core->loczrama ?
687 K3_R5_TCM_DEV_ADDR : 0;
688 }
689
690 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
691 mem_names[i], &core->mem[i].bus_addr,
692 core->mem[i].size, core->mem[i].cpu_addr,
693 core->mem[i].dev_addr);
694 }
695
696 return 0;
697}
698
699/**
700 * k3_r5f_probe() - Basic probe
701 * @dev: corresponding k3 remote processor device
702 *
703 * Return: 0 if all goes good, else appropriate error message.
704 */
705static int k3_r5f_probe(struct udevice *dev)
706{
707 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
708 struct k3_r5f_core *core = dev_get_priv(dev);
709 bool r_state;
710 int ret;
711
712 dev_dbg(dev, "%s\n", __func__);
713
714 core->dev = dev;
715 ret = k3_r5f_of_to_priv(core);
716 if (ret)
717 return ret;
718
719 core->cluster = cluster;
720 /* Assume Primary core gets probed first */
721 if (!cluster->cores[0])
722 cluster->cores[0] = core;
723 else
724 cluster->cores[1] = core;
725
726 ret = k3_r5f_core_of_get_memories(core);
727 if (ret) {
728 dev_err(dev, "Rproc getting internal memories failed\n");
729 return ret;
730 }
731
732 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
733 &r_state, &core->in_use);
734 if (ret)
735 return ret;
736
737 if (core->in_use) {
738 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
739 core->tsp.proc_id);
740 return 0;
741 }
742
743 /* Make sure Local reset is asserted. Redundant? */
744 reset_assert(&core->reset);
745
746 ret = k3_r5f_rproc_configure(core);
747 if (ret) {
748 dev_err(dev, "rproc configure failed %d\n", ret);
749 return ret;
750 }
751
752 dev_dbg(dev, "Remoteproc successfully probed\n");
753
754 return 0;
755}
756
757static int k3_r5f_remove(struct udevice *dev)
758{
759 struct k3_r5f_core *core = dev_get_priv(dev);
760
761 free(core->mem);
762
763 ti_sci_proc_release(&core->tsp);
764
765 return 0;
766}
767
768static const struct udevice_id k3_r5f_rproc_ids[] = {
769 { .compatible = "ti,am654-r5f"},
770 { .compatible = "ti,j721e-r5f"},
771 {}
772};
773
774U_BOOT_DRIVER(k3_r5f_rproc) = {
775 .name = "k3_r5f_rproc",
776 .of_match = k3_r5f_rproc_ids,
777 .id = UCLASS_REMOTEPROC,
778 .ops = &k3_r5f_rproc_ops,
779 .probe = k3_r5f_probe,
780 .remove = k3_r5f_remove,
781 .priv_auto_alloc_size = sizeof(struct k3_r5f_core),
782};
783
784static int k3_r5f_cluster_probe(struct udevice *dev)
785{
786 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
787
788 dev_dbg(dev, "%s\n", __func__);
789
790 cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
791 CLUSTER_MODE_LOCKSTEP);
792
793 if (device_get_child_count(dev) != 2) {
794 dev_err(dev, "Invalid number of R5 cores");
795 return -EINVAL;
796 }
797
798 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
799 __func__, cluster->mode ? "lockstep" : "split");
800
801 return 0;
802}
803
804static const struct udevice_id k3_r5fss_ids[] = {
805 { .compatible = "ti,am654-r5fss"},
806 { .compatible = "ti,j721e-r5fss"},
807 {}
808};
809
810U_BOOT_DRIVER(k3_r5fss) = {
811 .name = "k3_r5fss",
812 .of_match = k3_r5fss_ids,
813 .id = UCLASS_MISC,
814 .probe = k3_r5f_cluster_probe,
815 .priv_auto_alloc_size = sizeof(struct k3_r5f_cluster),
816};