blob: 1196ce07123d7189c78c189df86602509fa5f2a1 [file] [log] [blame]
Lokesh Vutla32cd2512018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <mailbox.h>
14#include <dm/device.h>
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053015#include <linux/compat.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053016#include <linux/err.h>
17#include <linux/soc/ti/k3-sec-proxy.h>
18#include <linux/soc/ti/ti_sci_protocol.h>
19
20#include "ti_sci.h"
21
22/* List of all TI SCI devices active in system */
23static LIST_HEAD(ti_sci_list);
24
25/**
26 * struct ti_sci_xfer - Structure representing a message flow
27 * @tx_message: Transmit message
28 * @rx_len: Receive message length
29 */
30struct ti_sci_xfer {
31 struct k3_sec_proxy_msg tx_message;
32 u8 rx_len;
33};
34
35/**
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053036 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
37 * management representation of dev_ids.
38 * @dev_id: TISCI device ID
39 * @type: Corresponding id as identified by TISCI RM.
40 *
41 * Note: This is used only as a work around for using RM range apis
42 * for AM654 SoC. For future SoCs dev_id will be used as type
43 * for RM range APIs. In order to maintain ABI backward compatibility
44 * type is not being changed for AM654 SoC.
45 */
46struct ti_sci_rm_type_map {
47 u32 dev_id;
48 u16 type;
49};
50
51/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +053052 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053053 * @default_host_id: Host identifier representing the compute entity
54 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
55 * @max_msgs: Maximum number of messages that can be pending
56 * simultaneously in the system
57 * @max_msg_size: Maximum size of data per message that can be handled.
58 * @rm_type_map: RM resource type mapping structure.
Lokesh Vutla32cd2512018-08-27 15:57:32 +053059 */
60struct ti_sci_desc {
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053061 u8 default_host_id;
62 int max_rx_timeout_ms;
63 int max_msgs;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053064 int max_msg_size;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053065 struct ti_sci_rm_type_map *rm_type_map;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053066};
67
68/**
69 * struct ti_sci_info - Structure representing a TI SCI instance
70 * @dev: Device pointer
71 * @desc: SoC description for this instance
72 * @handle: Instance of TI SCI handle to send to clients.
73 * @chan_tx: Transmit mailbox channel
74 * @chan_rx: Receive mailbox channel
75 * @xfer: xfer info
76 * @list: list head
77 * @is_secure: Determines if the communication is through secure threads.
78 * @host_id: Host identifier representing the compute entity
79 * @seq: Seq id used for verification for tx and rx message.
80 */
81struct ti_sci_info {
82 struct udevice *dev;
83 const struct ti_sci_desc *desc;
84 struct ti_sci_handle handle;
85 struct mbox_chan chan_tx;
86 struct mbox_chan chan_rx;
87 struct mbox_chan chan_notify;
88 struct ti_sci_xfer xfer;
89 struct list_head list;
90 bool is_secure;
91 u8 host_id;
92 u8 seq;
93};
94
95#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
96
97/**
98 * ti_sci_setup_one_xfer() - Setup one message type
99 * @info: Pointer to SCI entity information
100 * @msg_type: Message type
101 * @msg_flags: Flag to set for the message
102 * @buf: Buffer to be send to mailbox channel
103 * @tx_message_size: transmit message size
104 * @rx_message_size: receive message size
105 *
106 * Helper function which is used by various command functions that are
107 * exposed to clients of this driver for allocating a message traffic event.
108 *
109 * Return: Corresponding ti_sci_xfer pointer if all went fine,
110 * else appropriate error pointer.
111 */
112static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
113 u16 msg_type, u32 msg_flags,
114 u32 *buf,
115 size_t tx_message_size,
116 size_t rx_message_size)
117{
118 struct ti_sci_xfer *xfer = &info->xfer;
119 struct ti_sci_msg_hdr *hdr;
120
121 /* Ensure we have sane transfer sizes */
122 if (rx_message_size > info->desc->max_msg_size ||
123 tx_message_size > info->desc->max_msg_size ||
124 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
125 return ERR_PTR(-ERANGE);
126
127 info->seq = ~info->seq;
128 xfer->tx_message.buf = buf;
129 xfer->tx_message.len = tx_message_size;
130 xfer->rx_len = (u8)rx_message_size;
131
132 hdr = (struct ti_sci_msg_hdr *)buf;
133 hdr->seq = info->seq;
134 hdr->type = msg_type;
135 hdr->host = info->host_id;
136 hdr->flags = msg_flags;
137
138 return xfer;
139}
140
141/**
142 * ti_sci_get_response() - Receive response from mailbox channel
143 * @info: Pointer to SCI entity information
144 * @xfer: Transfer to initiate and wait for response
145 * @chan: Channel to receive the response
146 *
147 * Return: -ETIMEDOUT in case of no response, if transmit error,
148 * return corresponding error, else if all goes well,
149 * return 0.
150 */
151static inline int ti_sci_get_response(struct ti_sci_info *info,
152 struct ti_sci_xfer *xfer,
153 struct mbox_chan *chan)
154{
155 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
156 struct ti_sci_secure_msg_hdr *secure_hdr;
157 struct ti_sci_msg_hdr *hdr;
158 int ret;
159
160 /* Receive the response */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +0530161 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530162 if (ret) {
163 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
164 __func__, ret);
165 return ret;
166 }
167
168 /* ToDo: Verify checksum */
169 if (info->is_secure) {
170 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
171 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
172 }
173
174 /* msg is updated by mailbox driver */
175 hdr = (struct ti_sci_msg_hdr *)msg->buf;
176
177 /* Sanity check for message response */
178 if (hdr->seq != info->seq) {
179 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
180 __func__, hdr->seq);
181 return ret;
182 }
183
184 if (msg->len > info->desc->max_msg_size) {
185 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
186 __func__, msg->len, info->desc->max_msg_size);
187 return -EINVAL;
188 }
189
190 if (msg->len < xfer->rx_len) {
191 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
192 __func__, msg->len, xfer->rx_len);
193 }
194
195 return ret;
196}
197
198/**
199 * ti_sci_do_xfer() - Do one transfer
200 * @info: Pointer to SCI entity information
201 * @xfer: Transfer to initiate and wait for response
202 *
203 * Return: 0 if all went fine, else return appropriate error.
204 */
205static inline int ti_sci_do_xfer(struct ti_sci_info *info,
206 struct ti_sci_xfer *xfer)
207{
208 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
209 u8 secure_buf[info->desc->max_msg_size];
210 struct ti_sci_secure_msg_hdr secure_hdr;
211 int ret;
212
213 if (info->is_secure) {
214 /* ToDo: get checksum of the entire message */
215 secure_hdr.checksum = 0;
216 secure_hdr.reserved = 0;
217 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
218 xfer->tx_message.len);
219
220 xfer->tx_message.buf = (u32 *)secure_buf;
221 xfer->tx_message.len += sizeof(secure_hdr);
222 xfer->rx_len += sizeof(secure_hdr);
223 }
224
225 /* Send the message */
226 ret = mbox_send(&info->chan_tx, msg);
227 if (ret) {
228 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
229 __func__, ret);
230 return ret;
231 }
232
233 return ti_sci_get_response(info, xfer, &info->chan_rx);
234}
235
236/**
237 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
238 * @handle: pointer to TI SCI handle
239 *
240 * Updates the SCI information in the internal data structure.
241 *
242 * Return: 0 if all went fine, else return appropriate error.
243 */
244static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
245{
246 struct ti_sci_msg_resp_version *rev_info;
247 struct ti_sci_version_info *ver;
248 struct ti_sci_msg_hdr hdr;
249 struct ti_sci_info *info;
250 struct ti_sci_xfer *xfer;
251 int ret;
252
253 if (IS_ERR(handle))
254 return PTR_ERR(handle);
255 if (!handle)
256 return -EINVAL;
257
258 info = handle_to_ti_sci_info(handle);
259
260 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION, 0x0,
261 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
262 sizeof(*rev_info));
263 if (IS_ERR(xfer)) {
264 ret = PTR_ERR(xfer);
265 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
266 return ret;
267 }
268
269 ret = ti_sci_do_xfer(info, xfer);
270 if (ret) {
271 dev_err(info->dev, "Mbox communication fail %d\n", ret);
272 return ret;
273 }
274
275 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
276
277 ver = &handle->version;
278 ver->abi_major = rev_info->abi_major;
279 ver->abi_minor = rev_info->abi_minor;
280 ver->firmware_revision = rev_info->firmware_revision;
281 strncpy(ver->firmware_description, rev_info->firmware_description,
282 sizeof(ver->firmware_description));
283
284 return 0;
285}
286
287/**
288 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
289 * @r: pointer to response buffer
290 *
291 * Return: true if the response was an ACK, else returns false.
292 */
293static inline bool ti_sci_is_response_ack(void *r)
294{
295 struct ti_sci_msg_hdr *hdr = r;
296
297 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
298}
299
300/**
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530301 * cmd_set_board_config_using_msg() - Common command to send board configuration
302 * message
303 * @handle: pointer to TI SCI handle
304 * @msg_type: One of the TISCI message types to set board configuration
305 * @addr: Address where the board config structure is located
306 * @size: Size of the board config structure
307 *
308 * Return: 0 if all went well, else returns appropriate error value.
309 */
310static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
311 u16 msg_type, u64 addr, u32 size)
312{
313 struct ti_sci_msg_board_config req;
314 struct ti_sci_msg_hdr *resp;
315 struct ti_sci_info *info;
316 struct ti_sci_xfer *xfer;
317 int ret = 0;
318
319 if (IS_ERR(handle))
320 return PTR_ERR(handle);
321 if (!handle)
322 return -EINVAL;
323
324 info = handle_to_ti_sci_info(handle);
325
326 xfer = ti_sci_setup_one_xfer(info, msg_type,
327 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
328 (u32 *)&req, sizeof(req), sizeof(*resp));
329 if (IS_ERR(xfer)) {
330 ret = PTR_ERR(xfer);
331 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
332 return ret;
333 }
334 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
335 req.boardcfgp_low = addr & 0xffffffff;
336 req.boardcfg_size = size;
337
338 ret = ti_sci_do_xfer(info, xfer);
339 if (ret) {
340 dev_err(info->dev, "Mbox send fail %d\n", ret);
341 return ret;
342 }
343
344 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
345
346 if (!ti_sci_is_response_ack(resp))
347 return -ENODEV;
348
349 return ret;
350}
351
352/**
353 * ti_sci_cmd_set_board_config() - Command to send board configuration message
354 * @handle: pointer to TI SCI handle
355 * @addr: Address where the board config structure is located
356 * @size: Size of the board config structure
357 *
358 * Return: 0 if all went well, else returns appropriate error value.
359 */
360static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
361 u64 addr, u32 size)
362{
363 return cmd_set_board_config_using_msg(handle,
364 TI_SCI_MSG_BOARD_CONFIG,
365 addr, size);
366}
367
368/**
369 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
370 * management configuration
371 * @handle: pointer to TI SCI handle
372 * @addr: Address where the board RM config structure is located
373 * @size: Size of the RM config structure
374 *
375 * Return: 0 if all went well, else returns appropriate error value.
376 */
377static
378int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
379 u64 addr, u32 size)
380{
381 return cmd_set_board_config_using_msg(handle,
382 TI_SCI_MSG_BOARD_CONFIG_RM,
383 addr, size);
384}
385
386/**
387 * ti_sci_cmd_set_board_config_security() - Command to send board security
388 * configuration message
389 * @handle: pointer to TI SCI handle
390 * @addr: Address where the board security config structure is located
391 * @size: Size of the security config structure
392 *
393 * Return: 0 if all went well, else returns appropriate error value.
394 */
395static
396int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
397 u64 addr, u32 size)
398{
399 return cmd_set_board_config_using_msg(handle,
400 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
401 addr, size);
402}
403
404/**
405 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
406 * configuration message
407 * @handle: pointer to TI SCI handle
408 * @addr: Address where the board PM config structure is located
409 * @size: Size of the PM config structure
410 *
411 * Return: 0 if all went well, else returns appropriate error value.
412 */
413static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
414 u64 addr, u32 size)
415{
416 return cmd_set_board_config_using_msg(handle,
417 TI_SCI_MSG_BOARD_CONFIG_PM,
418 addr, size);
419}
420
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530421/**
422 * ti_sci_set_device_state() - Set device state helper
423 * @handle: pointer to TI SCI handle
424 * @id: Device identifier
425 * @flags: flags to setup for the device
426 * @state: State to move the device to
427 *
428 * Return: 0 if all went well, else returns appropriate error value.
429 */
430static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
431 u32 id, u32 flags, u8 state)
432{
433 struct ti_sci_msg_req_set_device_state req;
434 struct ti_sci_msg_hdr *resp;
435 struct ti_sci_info *info;
436 struct ti_sci_xfer *xfer;
437 int ret = 0;
438
439 if (IS_ERR(handle))
440 return PTR_ERR(handle);
441 if (!handle)
442 return -EINVAL;
443
444 info = handle_to_ti_sci_info(handle);
445
446 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
447 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
448 (u32 *)&req, sizeof(req), sizeof(*resp));
449 if (IS_ERR(xfer)) {
450 ret = PTR_ERR(xfer);
451 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
452 return ret;
453 }
454 req.id = id;
455 req.state = state;
456
457 ret = ti_sci_do_xfer(info, xfer);
458 if (ret) {
459 dev_err(info->dev, "Mbox send fail %d\n", ret);
460 return ret;
461 }
462
463 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
464
465 if (!ti_sci_is_response_ack(resp))
466 return -ENODEV;
467
468 return ret;
469}
470
471/**
472 * ti_sci_get_device_state() - Get device state helper
473 * @handle: Handle to the device
474 * @id: Device Identifier
475 * @clcnt: Pointer to Context Loss Count
476 * @resets: pointer to resets
477 * @p_state: pointer to p_state
478 * @c_state: pointer to c_state
479 *
480 * Return: 0 if all went fine, else return appropriate error.
481 */
482static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
483 u32 id, u32 *clcnt, u32 *resets,
484 u8 *p_state, u8 *c_state)
485{
486 struct ti_sci_msg_resp_get_device_state *resp;
487 struct ti_sci_msg_req_get_device_state req;
488 struct ti_sci_info *info;
489 struct ti_sci_xfer *xfer;
490 int ret = 0;
491
492 if (IS_ERR(handle))
493 return PTR_ERR(handle);
494 if (!handle)
495 return -EINVAL;
496
497 if (!clcnt && !resets && !p_state && !c_state)
498 return -EINVAL;
499
500 info = handle_to_ti_sci_info(handle);
501
502 /* Response is expected, so need of any flags */
503 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 0,
504 (u32 *)&req, sizeof(req), sizeof(*resp));
505 if (IS_ERR(xfer)) {
506 ret = PTR_ERR(xfer);
507 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
508 return ret;
509 }
510 req.id = id;
511
512 ret = ti_sci_do_xfer(info, xfer);
513 if (ret) {
514 dev_err(dev, "Mbox send fail %d\n", ret);
515 return ret;
516 }
517
518 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
519 if (!ti_sci_is_response_ack(resp))
520 return -ENODEV;
521
522 if (clcnt)
523 *clcnt = resp->context_loss_count;
524 if (resets)
525 *resets = resp->resets;
526 if (p_state)
527 *p_state = resp->programmed_state;
528 if (c_state)
529 *c_state = resp->current_state;
530
531 return ret;
532}
533
534/**
535 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
536 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
537 * @id: Device Identifier
538 *
539 * Request for the device - NOTE: the client MUST maintain integrity of
540 * usage count by balancing get_device with put_device. No refcounting is
541 * managed by driver for that purpose.
542 *
543 * NOTE: The request is for exclusive access for the processor.
544 *
545 * Return: 0 if all went fine, else return appropriate error.
546 */
547static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
548{
549 return ti_sci_set_device_state(handle, id,
550 MSG_FLAG_DEVICE_EXCLUSIVE,
551 MSG_DEVICE_SW_STATE_ON);
552}
553
554/**
555 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
556 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
557 * @id: Device Identifier
558 *
559 * Request for the device - NOTE: the client MUST maintain integrity of
560 * usage count by balancing get_device with put_device. No refcounting is
561 * managed by driver for that purpose.
562 *
563 * Return: 0 if all went fine, else return appropriate error.
564 */
565static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
566{
567 return ti_sci_set_device_state(handle, id,
568 MSG_FLAG_DEVICE_EXCLUSIVE,
569 MSG_DEVICE_SW_STATE_RETENTION);
570}
571
572/**
573 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
574 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
575 * @id: Device Identifier
576 *
577 * Request for the device - NOTE: the client MUST maintain integrity of
578 * usage count by balancing get_device with put_device. No refcounting is
579 * managed by driver for that purpose.
580 *
581 * Return: 0 if all went fine, else return appropriate error.
582 */
583static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
584{
585 return ti_sci_set_device_state(handle, id,
586 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
587}
588
589/**
590 * ti_sci_cmd_dev_is_valid() - Is the device valid
591 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
592 * @id: Device Identifier
593 *
594 * Return: 0 if all went fine and the device ID is valid, else return
595 * appropriate error.
596 */
597static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
598{
599 u8 unused;
600
601 /* check the device state which will also tell us if the ID is valid */
602 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
603}
604
605/**
606 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
607 * @handle: Pointer to TISCI handle
608 * @id: Device Identifier
609 * @count: Pointer to Context Loss counter to populate
610 *
611 * Return: 0 if all went fine, else return appropriate error.
612 */
613static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
614 u32 *count)
615{
616 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
617}
618
619/**
620 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
621 * @handle: Pointer to TISCI handle
622 * @id: Device Identifier
623 * @r_state: true if requested to be idle
624 *
625 * Return: 0 if all went fine, else return appropriate error.
626 */
627static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
628 bool *r_state)
629{
630 int ret;
631 u8 state;
632
633 if (!r_state)
634 return -EINVAL;
635
636 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
637 if (ret)
638 return ret;
639
640 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
641
642 return 0;
643}
644
645/**
646 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
647 * @handle: Pointer to TISCI handle
648 * @id: Device Identifier
649 * @r_state: true if requested to be stopped
650 * @curr_state: true if currently stopped.
651 *
652 * Return: 0 if all went fine, else return appropriate error.
653 */
654static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
655 bool *r_state, bool *curr_state)
656{
657 int ret;
658 u8 p_state, c_state;
659
660 if (!r_state && !curr_state)
661 return -EINVAL;
662
663 ret =
664 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
665 if (ret)
666 return ret;
667
668 if (r_state)
669 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
670 if (curr_state)
671 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
672
673 return 0;
674}
675
676/**
677 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
678 * @handle: Pointer to TISCI handle
679 * @id: Device Identifier
680 * @r_state: true if requested to be ON
681 * @curr_state: true if currently ON and active
682 *
683 * Return: 0 if all went fine, else return appropriate error.
684 */
685static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
686 bool *r_state, bool *curr_state)
687{
688 int ret;
689 u8 p_state, c_state;
690
691 if (!r_state && !curr_state)
692 return -EINVAL;
693
694 ret =
695 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
696 if (ret)
697 return ret;
698
699 if (r_state)
700 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
701 if (curr_state)
702 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
703
704 return 0;
705}
706
707/**
708 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
709 * @handle: Pointer to TISCI handle
710 * @id: Device Identifier
711 * @curr_state: true if currently transitioning.
712 *
713 * Return: 0 if all went fine, else return appropriate error.
714 */
715static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
716 bool *curr_state)
717{
718 int ret;
719 u8 state;
720
721 if (!curr_state)
722 return -EINVAL;
723
724 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
725 if (ret)
726 return ret;
727
728 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
729
730 return 0;
731}
732
733/**
734 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
735 * by TISCI
736 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
737 * @id: Device Identifier
738 * @reset_state: Device specific reset bit field
739 *
740 * Return: 0 if all went fine, else return appropriate error.
741 */
742static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
743 u32 id, u32 reset_state)
744{
745 struct ti_sci_msg_req_set_device_resets req;
746 struct ti_sci_msg_hdr *resp;
747 struct ti_sci_info *info;
748 struct ti_sci_xfer *xfer;
749 int ret = 0;
750
751 if (IS_ERR(handle))
752 return PTR_ERR(handle);
753 if (!handle)
754 return -EINVAL;
755
756 info = handle_to_ti_sci_info(handle);
757
758 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
759 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
760 (u32 *)&req, sizeof(req), sizeof(*resp));
761 if (IS_ERR(xfer)) {
762 ret = PTR_ERR(xfer);
763 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
764 return ret;
765 }
766 req.id = id;
767 req.resets = reset_state;
768
769 ret = ti_sci_do_xfer(info, xfer);
770 if (ret) {
771 dev_err(info->dev, "Mbox send fail %d\n", ret);
772 return ret;
773 }
774
775 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
776
777 if (!ti_sci_is_response_ack(resp))
778 return -ENODEV;
779
780 return ret;
781}
782
783/**
784 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
785 * by TISCI
786 * @handle: Pointer to TISCI handle
787 * @id: Device Identifier
788 * @reset_state: Pointer to reset state to populate
789 *
790 * Return: 0 if all went fine, else return appropriate error.
791 */
792static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
793 u32 id, u32 *reset_state)
794{
795 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
796 NULL);
797}
798
Lokesh Vutla9b871812018-08-27 15:57:35 +0530799/**
800 * ti_sci_set_clock_state() - Set clock state helper
801 * @handle: pointer to TI SCI handle
802 * @dev_id: Device identifier this request is for
803 * @clk_id: Clock identifier for the device for this request.
804 * Each device has it's own set of clock inputs. This indexes
805 * which clock input to modify.
806 * @flags: Header flags as needed
807 * @state: State to request for the clock.
808 *
809 * Return: 0 if all went well, else returns appropriate error value.
810 */
811static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
812 u32 dev_id, u8 clk_id,
813 u32 flags, u8 state)
814{
815 struct ti_sci_msg_req_set_clock_state req;
816 struct ti_sci_msg_hdr *resp;
817 struct ti_sci_info *info;
818 struct ti_sci_xfer *xfer;
819 int ret = 0;
820
821 if (IS_ERR(handle))
822 return PTR_ERR(handle);
823 if (!handle)
824 return -EINVAL;
825
826 info = handle_to_ti_sci_info(handle);
827
828 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
829 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
830 (u32 *)&req, sizeof(req), sizeof(*resp));
831 if (IS_ERR(xfer)) {
832 ret = PTR_ERR(xfer);
833 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
834 return ret;
835 }
836 req.dev_id = dev_id;
837 req.clk_id = clk_id;
838 req.request_state = state;
839
840 ret = ti_sci_do_xfer(info, xfer);
841 if (ret) {
842 dev_err(info->dev, "Mbox send fail %d\n", ret);
843 return ret;
844 }
845
846 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
847
848 if (!ti_sci_is_response_ack(resp))
849 return -ENODEV;
850
851 return ret;
852}
853
854/**
855 * ti_sci_cmd_get_clock_state() - Get clock state helper
856 * @handle: pointer to TI SCI handle
857 * @dev_id: Device identifier this request is for
858 * @clk_id: Clock identifier for the device for this request.
859 * Each device has it's own set of clock inputs. This indexes
860 * which clock input to modify.
861 * @programmed_state: State requested for clock to move to
862 * @current_state: State that the clock is currently in
863 *
864 * Return: 0 if all went well, else returns appropriate error value.
865 */
866static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
867 u32 dev_id, u8 clk_id,
868 u8 *programmed_state, u8 *current_state)
869{
870 struct ti_sci_msg_resp_get_clock_state *resp;
871 struct ti_sci_msg_req_get_clock_state req;
872 struct ti_sci_info *info;
873 struct ti_sci_xfer *xfer;
874 int ret = 0;
875
876 if (IS_ERR(handle))
877 return PTR_ERR(handle);
878 if (!handle)
879 return -EINVAL;
880
881 if (!programmed_state && !current_state)
882 return -EINVAL;
883
884 info = handle_to_ti_sci_info(handle);
885
886 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
887 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
888 (u32 *)&req, sizeof(req), sizeof(*resp));
889 if (IS_ERR(xfer)) {
890 ret = PTR_ERR(xfer);
891 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
892 return ret;
893 }
894 req.dev_id = dev_id;
895 req.clk_id = clk_id;
896
897 ret = ti_sci_do_xfer(info, xfer);
898 if (ret) {
899 dev_err(info->dev, "Mbox send fail %d\n", ret);
900 return ret;
901 }
902
903 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
904
905 if (!ti_sci_is_response_ack(resp))
906 return -ENODEV;
907
908 if (programmed_state)
909 *programmed_state = resp->programmed_state;
910 if (current_state)
911 *current_state = resp->current_state;
912
913 return ret;
914}
915
916/**
917 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
918 * @handle: pointer to TI SCI handle
919 * @dev_id: Device identifier this request is for
920 * @clk_id: Clock identifier for the device for this request.
921 * Each device has it's own set of clock inputs. This indexes
922 * which clock input to modify.
923 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
924 * @can_change_freq: 'true' if frequency change is desired, else 'false'
925 * @enable_input_term: 'true' if input termination is desired, else 'false'
926 *
927 * Return: 0 if all went well, else returns appropriate error value.
928 */
929static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
930 u8 clk_id, bool needs_ssc, bool can_change_freq,
931 bool enable_input_term)
932{
933 u32 flags = 0;
934
935 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
936 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
937 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
938
939 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
940 MSG_CLOCK_SW_STATE_REQ);
941}
942
943/**
944 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
945 * @handle: pointer to TI SCI handle
946 * @dev_id: Device identifier this request is for
947 * @clk_id: Clock identifier for the device for this request.
948 * Each device has it's own set of clock inputs. This indexes
949 * which clock input to modify.
950 *
951 * NOTE: This clock must have been requested by get_clock previously.
952 *
953 * Return: 0 if all went well, else returns appropriate error value.
954 */
955static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
956 u32 dev_id, u8 clk_id)
957{
958 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
959 MSG_CLOCK_SW_STATE_UNREQ);
960}
961
962/**
963 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
964 * @handle: pointer to TI SCI handle
965 * @dev_id: Device identifier this request is for
966 * @clk_id: Clock identifier for the device for this request.
967 * Each device has it's own set of clock inputs. This indexes
968 * which clock input to modify.
969 *
970 * NOTE: This clock must have been requested by get_clock previously.
971 *
972 * Return: 0 if all went well, else returns appropriate error value.
973 */
974static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
975 u32 dev_id, u8 clk_id)
976{
977 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
978 MSG_CLOCK_SW_STATE_AUTO);
979}
980
981/**
982 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
983 * @handle: pointer to TI SCI handle
984 * @dev_id: Device identifier this request is for
985 * @clk_id: Clock identifier for the device for this request.
986 * Each device has it's own set of clock inputs. This indexes
987 * which clock input to modify.
988 * @req_state: state indicating if the clock is auto managed
989 *
990 * Return: 0 if all went well, else returns appropriate error value.
991 */
992static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
993 u32 dev_id, u8 clk_id, bool *req_state)
994{
995 u8 state = 0;
996 int ret;
997
998 if (!req_state)
999 return -EINVAL;
1000
1001 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1002 if (ret)
1003 return ret;
1004
1005 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1006 return 0;
1007}
1008
1009/**
1010 * ti_sci_cmd_clk_is_on() - Is the clock ON
1011 * @handle: pointer to TI SCI handle
1012 * @dev_id: Device identifier this request is for
1013 * @clk_id: Clock identifier for the device for this request.
1014 * Each device has it's own set of clock inputs. This indexes
1015 * which clock input to modify.
1016 * @req_state: state indicating if the clock is managed by us and enabled
1017 * @curr_state: state indicating if the clock is ready for operation
1018 *
1019 * Return: 0 if all went well, else returns appropriate error value.
1020 */
1021static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1022 u8 clk_id, bool *req_state, bool *curr_state)
1023{
1024 u8 c_state = 0, r_state = 0;
1025 int ret;
1026
1027 if (!req_state && !curr_state)
1028 return -EINVAL;
1029
1030 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1031 &r_state, &c_state);
1032 if (ret)
1033 return ret;
1034
1035 if (req_state)
1036 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1037 if (curr_state)
1038 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1039 return 0;
1040}
1041
1042/**
1043 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1044 * @handle: pointer to TI SCI handle
1045 * @dev_id: Device identifier this request is for
1046 * @clk_id: Clock identifier for the device for this request.
1047 * Each device has it's own set of clock inputs. This indexes
1048 * which clock input to modify.
1049 * @req_state: state indicating if the clock is managed by us and disabled
1050 * @curr_state: state indicating if the clock is NOT ready for operation
1051 *
1052 * Return: 0 if all went well, else returns appropriate error value.
1053 */
1054static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1055 u8 clk_id, bool *req_state, bool *curr_state)
1056{
1057 u8 c_state = 0, r_state = 0;
1058 int ret;
1059
1060 if (!req_state && !curr_state)
1061 return -EINVAL;
1062
1063 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1064 &r_state, &c_state);
1065 if (ret)
1066 return ret;
1067
1068 if (req_state)
1069 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1070 if (curr_state)
1071 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1072 return 0;
1073}
1074
1075/**
1076 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1077 * @handle: pointer to TI SCI handle
1078 * @dev_id: Device identifier this request is for
1079 * @clk_id: Clock identifier for the device for this request.
1080 * Each device has it's own set of clock inputs. This indexes
1081 * which clock input to modify.
1082 * @parent_id: Parent clock identifier to set
1083 *
1084 * Return: 0 if all went well, else returns appropriate error value.
1085 */
1086static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1087 u32 dev_id, u8 clk_id, u8 parent_id)
1088{
1089 struct ti_sci_msg_req_set_clock_parent req;
1090 struct ti_sci_msg_hdr *resp;
1091 struct ti_sci_info *info;
1092 struct ti_sci_xfer *xfer;
1093 int ret = 0;
1094
1095 if (IS_ERR(handle))
1096 return PTR_ERR(handle);
1097 if (!handle)
1098 return -EINVAL;
1099
1100 info = handle_to_ti_sci_info(handle);
1101
1102 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1103 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1104 (u32 *)&req, sizeof(req), sizeof(*resp));
1105 if (IS_ERR(xfer)) {
1106 ret = PTR_ERR(xfer);
1107 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1108 return ret;
1109 }
1110 req.dev_id = dev_id;
1111 req.clk_id = clk_id;
1112 req.parent_id = parent_id;
1113
1114 ret = ti_sci_do_xfer(info, xfer);
1115 if (ret) {
1116 dev_err(info->dev, "Mbox send fail %d\n", ret);
1117 return ret;
1118 }
1119
1120 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1121
1122 if (!ti_sci_is_response_ack(resp))
1123 return -ENODEV;
1124
1125 return ret;
1126}
1127
1128/**
1129 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1130 * @handle: pointer to TI SCI handle
1131 * @dev_id: Device identifier this request is for
1132 * @clk_id: Clock identifier for the device for this request.
1133 * Each device has it's own set of clock inputs. This indexes
1134 * which clock input to modify.
1135 * @parent_id: Current clock parent
1136 *
1137 * Return: 0 if all went well, else returns appropriate error value.
1138 */
1139static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1140 u32 dev_id, u8 clk_id, u8 *parent_id)
1141{
1142 struct ti_sci_msg_resp_get_clock_parent *resp;
1143 struct ti_sci_msg_req_get_clock_parent req;
1144 struct ti_sci_info *info;
1145 struct ti_sci_xfer *xfer;
1146 int ret = 0;
1147
1148 if (IS_ERR(handle))
1149 return PTR_ERR(handle);
1150 if (!handle || !parent_id)
1151 return -EINVAL;
1152
1153 info = handle_to_ti_sci_info(handle);
1154
1155 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1156 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1157 (u32 *)&req, sizeof(req), sizeof(*resp));
1158 if (IS_ERR(xfer)) {
1159 ret = PTR_ERR(xfer);
1160 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1161 return ret;
1162 }
1163 req.dev_id = dev_id;
1164 req.clk_id = clk_id;
1165
1166 ret = ti_sci_do_xfer(info, xfer);
1167 if (ret) {
1168 dev_err(info->dev, "Mbox send fail %d\n", ret);
1169 return ret;
1170 }
1171
1172 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1173
1174 if (!ti_sci_is_response_ack(resp))
1175 ret = -ENODEV;
1176 else
1177 *parent_id = resp->parent_id;
1178
1179 return ret;
1180}
1181
1182/**
1183 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1184 * @handle: pointer to TI SCI handle
1185 * @dev_id: Device identifier this request is for
1186 * @clk_id: Clock identifier for the device for this request.
1187 * Each device has it's own set of clock inputs. This indexes
1188 * which clock input to modify.
1189 * @num_parents: Returns he number of parents to the current clock.
1190 *
1191 * Return: 0 if all went well, else returns appropriate error value.
1192 */
1193static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1194 u32 dev_id, u8 clk_id,
1195 u8 *num_parents)
1196{
1197 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1198 struct ti_sci_msg_req_get_clock_num_parents req;
1199 struct ti_sci_info *info;
1200 struct ti_sci_xfer *xfer;
1201 int ret = 0;
1202
1203 if (IS_ERR(handle))
1204 return PTR_ERR(handle);
1205 if (!handle || !num_parents)
1206 return -EINVAL;
1207
1208 info = handle_to_ti_sci_info(handle);
1209
1210 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1211 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1212 (u32 *)&req, sizeof(req), sizeof(*resp));
1213 if (IS_ERR(xfer)) {
1214 ret = PTR_ERR(xfer);
1215 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1216 return ret;
1217 }
1218 req.dev_id = dev_id;
1219 req.clk_id = clk_id;
1220
1221 ret = ti_sci_do_xfer(info, xfer);
1222 if (ret) {
1223 dev_err(info->dev, "Mbox send fail %d\n", ret);
1224 return ret;
1225 }
1226
1227 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1228 xfer->tx_message.buf;
1229
1230 if (!ti_sci_is_response_ack(resp))
1231 ret = -ENODEV;
1232 else
1233 *num_parents = resp->num_parents;
1234
1235 return ret;
1236}
1237
1238/**
1239 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1240 * @handle: pointer to TI SCI handle
1241 * @dev_id: Device identifier this request is for
1242 * @clk_id: Clock identifier for the device for this request.
1243 * Each device has it's own set of clock inputs. This indexes
1244 * which clock input to modify.
1245 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1246 * allowable programmed frequency and does not account for clock
1247 * tolerances and jitter.
1248 * @target_freq: The target clock frequency in Hz. A frequency will be
1249 * processed as close to this target frequency as possible.
1250 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1251 * allowable programmed frequency and does not account for clock
1252 * tolerances and jitter.
1253 * @match_freq: Frequency match in Hz response.
1254 *
1255 * Return: 0 if all went well, else returns appropriate error value.
1256 */
1257static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1258 u32 dev_id, u8 clk_id, u64 min_freq,
1259 u64 target_freq, u64 max_freq,
1260 u64 *match_freq)
1261{
1262 struct ti_sci_msg_resp_query_clock_freq *resp;
1263 struct ti_sci_msg_req_query_clock_freq req;
1264 struct ti_sci_info *info;
1265 struct ti_sci_xfer *xfer;
1266 int ret = 0;
1267
1268 if (IS_ERR(handle))
1269 return PTR_ERR(handle);
1270 if (!handle || !match_freq)
1271 return -EINVAL;
1272
1273 info = handle_to_ti_sci_info(handle);
1274
1275 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1276 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1277 (u32 *)&req, sizeof(req), sizeof(*resp));
1278 if (IS_ERR(xfer)) {
1279 ret = PTR_ERR(xfer);
1280 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1281 return ret;
1282 }
1283 req.dev_id = dev_id;
1284 req.clk_id = clk_id;
1285 req.min_freq_hz = min_freq;
1286 req.target_freq_hz = target_freq;
1287 req.max_freq_hz = max_freq;
1288
1289 ret = ti_sci_do_xfer(info, xfer);
1290 if (ret) {
1291 dev_err(info->dev, "Mbox send fail %d\n", ret);
1292 return ret;
1293 }
1294
1295 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1296
1297 if (!ti_sci_is_response_ack(resp))
1298 ret = -ENODEV;
1299 else
1300 *match_freq = resp->freq_hz;
1301
1302 return ret;
1303}
1304
1305/**
1306 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1307 * @handle: pointer to TI SCI handle
1308 * @dev_id: Device identifier this request is for
1309 * @clk_id: Clock identifier for the device for this request.
1310 * Each device has it's own set of clock inputs. This indexes
1311 * which clock input to modify.
1312 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1313 * allowable programmed frequency and does not account for clock
1314 * tolerances and jitter.
1315 * @target_freq: The target clock frequency in Hz. A frequency will be
1316 * processed as close to this target frequency as possible.
1317 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1318 * allowable programmed frequency and does not account for clock
1319 * tolerances and jitter.
1320 *
1321 * Return: 0 if all went well, else returns appropriate error value.
1322 */
1323static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1324 u32 dev_id, u8 clk_id, u64 min_freq,
1325 u64 target_freq, u64 max_freq)
1326{
1327 struct ti_sci_msg_req_set_clock_freq req;
1328 struct ti_sci_msg_hdr *resp;
1329 struct ti_sci_info *info;
1330 struct ti_sci_xfer *xfer;
1331 int ret = 0;
1332
1333 if (IS_ERR(handle))
1334 return PTR_ERR(handle);
1335 if (!handle)
1336 return -EINVAL;
1337
1338 info = handle_to_ti_sci_info(handle);
1339
1340 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1341 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1342 (u32 *)&req, sizeof(req), sizeof(*resp));
1343 if (IS_ERR(xfer)) {
1344 ret = PTR_ERR(xfer);
1345 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1346 return ret;
1347 }
1348 req.dev_id = dev_id;
1349 req.clk_id = clk_id;
1350 req.min_freq_hz = min_freq;
1351 req.target_freq_hz = target_freq;
1352 req.max_freq_hz = max_freq;
1353
1354 ret = ti_sci_do_xfer(info, xfer);
1355 if (ret) {
1356 dev_err(info->dev, "Mbox send fail %d\n", ret);
1357 return ret;
1358 }
1359
1360 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1361
1362 if (!ti_sci_is_response_ack(resp))
1363 return -ENODEV;
1364
1365 return ret;
1366}
1367
1368/**
1369 * ti_sci_cmd_clk_get_freq() - Get current frequency
1370 * @handle: pointer to TI SCI handle
1371 * @dev_id: Device identifier this request is for
1372 * @clk_id: Clock identifier for the device for this request.
1373 * Each device has it's own set of clock inputs. This indexes
1374 * which clock input to modify.
1375 * @freq: Currently frequency in Hz
1376 *
1377 * Return: 0 if all went well, else returns appropriate error value.
1378 */
1379static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1380 u32 dev_id, u8 clk_id, u64 *freq)
1381{
1382 struct ti_sci_msg_resp_get_clock_freq *resp;
1383 struct ti_sci_msg_req_get_clock_freq req;
1384 struct ti_sci_info *info;
1385 struct ti_sci_xfer *xfer;
1386 int ret = 0;
1387
1388 if (IS_ERR(handle))
1389 return PTR_ERR(handle);
1390 if (!handle || !freq)
1391 return -EINVAL;
1392
1393 info = handle_to_ti_sci_info(handle);
1394
1395 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1396 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1397 (u32 *)&req, sizeof(req), sizeof(*resp));
1398 if (IS_ERR(xfer)) {
1399 ret = PTR_ERR(xfer);
1400 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1401 return ret;
1402 }
1403 req.dev_id = dev_id;
1404 req.clk_id = clk_id;
1405
1406 ret = ti_sci_do_xfer(info, xfer);
1407 if (ret) {
1408 dev_err(info->dev, "Mbox send fail %d\n", ret);
1409 return ret;
1410 }
1411
1412 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1413
1414 if (!ti_sci_is_response_ack(resp))
1415 ret = -ENODEV;
1416 else
1417 *freq = resp->freq_hz;
1418
1419 return ret;
1420}
1421
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301422/**
1423 * ti_sci_cmd_core_reboot() - Command to request system reset
1424 * @handle: pointer to TI SCI handle
1425 *
1426 * Return: 0 if all went well, else returns appropriate error value.
1427 */
1428static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1429{
1430 struct ti_sci_msg_req_reboot req;
1431 struct ti_sci_msg_hdr *resp;
1432 struct ti_sci_info *info;
1433 struct ti_sci_xfer *xfer;
1434 int ret = 0;
1435
1436 if (IS_ERR(handle))
1437 return PTR_ERR(handle);
1438 if (!handle)
1439 return -EINVAL;
1440
1441 info = handle_to_ti_sci_info(handle);
1442
1443 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1444 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1445 (u32 *)&req, sizeof(req), sizeof(*resp));
1446 if (IS_ERR(xfer)) {
1447 ret = PTR_ERR(xfer);
1448 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1449 return ret;
1450 }
1451
1452 ret = ti_sci_do_xfer(info, xfer);
1453 if (ret) {
1454 dev_err(dev, "Mbox send fail %d\n", ret);
1455 return ret;
1456 }
1457
1458 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1459
1460 if (!ti_sci_is_response_ack(resp))
1461 return -ENODEV;
1462
1463 return ret;
1464}
1465
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301466static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1467 u16 *type)
1468{
1469 struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1470 bool found = false;
1471 int i;
1472
1473 /* If map is not provided then assume dev_id is used as type */
1474 if (!rm_type_map) {
1475 *type = dev_id;
1476 return 0;
1477 }
1478
1479 for (i = 0; rm_type_map[i].dev_id; i++) {
1480 if (rm_type_map[i].dev_id == dev_id) {
1481 *type = rm_type_map[i].type;
1482 found = true;
1483 break;
1484 }
1485 }
1486
1487 if (!found)
1488 return -EINVAL;
1489
1490 return 0;
1491}
1492
1493/**
1494 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1495 * to a host. Resource is uniquely identified by
1496 * type and subtype.
1497 * @handle: Pointer to TISCI handle.
1498 * @dev_id: TISCI device ID.
1499 * @subtype: Resource assignment subtype that is being requested
1500 * from the given device.
1501 * @s_host: Host processor ID to which the resources are allocated
1502 * @range_start: Start index of the resource range
1503 * @range_num: Number of resources in the range
1504 *
1505 * Return: 0 if all went fine, else return appropriate error.
1506 */
1507static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1508 u32 dev_id, u8 subtype, u8 s_host,
1509 u16 *range_start, u16 *range_num)
1510{
1511 struct ti_sci_msg_resp_get_resource_range *resp;
1512 struct ti_sci_msg_req_get_resource_range req;
1513 struct ti_sci_xfer *xfer;
1514 struct ti_sci_info *info;
1515 u16 type;
1516 int ret = 0;
1517
1518 if (IS_ERR(handle))
1519 return PTR_ERR(handle);
1520 if (!handle)
1521 return -EINVAL;
1522
1523 info = handle_to_ti_sci_info(handle);
1524
1525 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1526 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1527 (u32 *)&req, sizeof(req), sizeof(*resp));
1528 if (IS_ERR(xfer)) {
1529 ret = PTR_ERR(xfer);
1530 dev_err(dev, "Message alloc failed(%d)\n", ret);
1531 return ret;
1532 }
1533
1534 ret = ti_sci_get_resource_type(info, dev_id, &type);
1535 if (ret) {
1536 dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1537 goto fail;
1538 }
1539
1540 req.secondary_host = s_host;
1541 req.type = type & MSG_RM_RESOURCE_TYPE_MASK;
1542 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1543
1544 ret = ti_sci_do_xfer(info, xfer);
1545 if (ret) {
1546 dev_err(dev, "Mbox send fail %d\n", ret);
1547 goto fail;
1548 }
1549
1550 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1551 if (!ti_sci_is_response_ack(resp)) {
1552 ret = -ENODEV;
1553 } else if (!resp->range_start && !resp->range_num) {
1554 ret = -ENODEV;
1555 } else {
1556 *range_start = resp->range_start;
1557 *range_num = resp->range_num;
1558 };
1559
1560fail:
1561 return ret;
1562}
1563
1564/**
1565 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1566 * that is same as ti sci interface host.
1567 * @handle: Pointer to TISCI handle.
1568 * @dev_id: TISCI device ID.
1569 * @subtype: Resource assignment subtype that is being requested
1570 * from the given device.
1571 * @range_start: Start index of the resource range
1572 * @range_num: Number of resources in the range
1573 *
1574 * Return: 0 if all went fine, else return appropriate error.
1575 */
1576static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1577 u32 dev_id, u8 subtype,
1578 u16 *range_start, u16 *range_num)
1579{
1580 return ti_sci_get_resource_range(handle, dev_id, subtype,
1581 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1582 range_start, range_num);
1583}
1584
1585/**
1586 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1587 * assigned to a specified host.
1588 * @handle: Pointer to TISCI handle.
1589 * @dev_id: TISCI device ID.
1590 * @subtype: Resource assignment subtype that is being requested
1591 * from the given device.
1592 * @s_host: Host processor ID to which the resources are allocated
1593 * @range_start: Start index of the resource range
1594 * @range_num: Number of resources in the range
1595 *
1596 * Return: 0 if all went fine, else return appropriate error.
1597 */
1598static
1599int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1600 u32 dev_id, u8 subtype, u8 s_host,
1601 u16 *range_start, u16 *range_num)
1602{
1603 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1604 range_start, range_num);
1605}
1606
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301607/**
Lokesh Vutla826eb742019-03-08 11:47:32 +05301608 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1609 * @handle: pointer to TI SCI handle
1610 * @msms_start: MSMC start as returned by tisci
1611 * @msmc_end: MSMC end as returned by tisci
1612 *
1613 * Return: 0 if all went well, else returns appropriate error value.
1614 */
1615static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1616 u64 *msmc_start, u64 *msmc_end)
1617{
1618 struct ti_sci_msg_resp_query_msmc *resp;
1619 struct ti_sci_msg_hdr req;
1620 struct ti_sci_info *info;
1621 struct ti_sci_xfer *xfer;
1622 int ret = 0;
1623
1624 if (IS_ERR(handle))
1625 return PTR_ERR(handle);
1626 if (!handle)
1627 return -EINVAL;
1628
1629 info = handle_to_ti_sci_info(handle);
1630
1631 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1633 (u32 *)&req, sizeof(req), sizeof(*resp));
1634 if (IS_ERR(xfer)) {
1635 ret = PTR_ERR(xfer);
1636 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1637 return ret;
1638 }
1639
1640 ret = ti_sci_do_xfer(info, xfer);
1641 if (ret) {
1642 dev_err(dev, "Mbox send fail %d\n", ret);
1643 return ret;
1644 }
1645
1646 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1647
1648 if (!ti_sci_is_response_ack(resp))
1649 return -ENODEV;
1650
1651 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1652 resp->msmc_start_low;
1653 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1654 resp->msmc_end_low;
1655
1656 return ret;
1657}
1658
1659/**
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301660 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1661 * @handle: Pointer to TI SCI handle
1662 * @proc_id: Processor ID this request is for
1663 *
1664 * Return: 0 if all went well, else returns appropriate error value.
1665 */
1666static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1667 u8 proc_id)
1668{
1669 struct ti_sci_msg_req_proc_request req;
1670 struct ti_sci_msg_hdr *resp;
1671 struct ti_sci_info *info;
1672 struct ti_sci_xfer *xfer;
1673 int ret = 0;
1674
1675 if (IS_ERR(handle))
1676 return PTR_ERR(handle);
1677 if (!handle)
1678 return -EINVAL;
1679
1680 info = handle_to_ti_sci_info(handle);
1681
1682 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1683 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1684 (u32 *)&req, sizeof(req), sizeof(*resp));
1685 if (IS_ERR(xfer)) {
1686 ret = PTR_ERR(xfer);
1687 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1688 return ret;
1689 }
1690 req.processor_id = proc_id;
1691
1692 ret = ti_sci_do_xfer(info, xfer);
1693 if (ret) {
1694 dev_err(info->dev, "Mbox send fail %d\n", ret);
1695 return ret;
1696 }
1697
1698 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1699
1700 if (!ti_sci_is_response_ack(resp))
1701 ret = -ENODEV;
1702
1703 return ret;
1704}
1705
1706/**
1707 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1708 * @handle: Pointer to TI SCI handle
1709 * @proc_id: Processor ID this request is for
1710 *
1711 * Return: 0 if all went well, else returns appropriate error value.
1712 */
1713static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1714 u8 proc_id)
1715{
1716 struct ti_sci_msg_req_proc_release req;
1717 struct ti_sci_msg_hdr *resp;
1718 struct ti_sci_info *info;
1719 struct ti_sci_xfer *xfer;
1720 int ret = 0;
1721
1722 if (IS_ERR(handle))
1723 return PTR_ERR(handle);
1724 if (!handle)
1725 return -EINVAL;
1726
1727 info = handle_to_ti_sci_info(handle);
1728
1729 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1730 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1731 (u32 *)&req, sizeof(req), sizeof(*resp));
1732 if (IS_ERR(xfer)) {
1733 ret = PTR_ERR(xfer);
1734 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1735 return ret;
1736 }
1737 req.processor_id = proc_id;
1738
1739 ret = ti_sci_do_xfer(info, xfer);
1740 if (ret) {
1741 dev_err(info->dev, "Mbox send fail %d\n", ret);
1742 return ret;
1743 }
1744
1745 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1746
1747 if (!ti_sci_is_response_ack(resp))
1748 ret = -ENODEV;
1749
1750 return ret;
1751}
1752
1753/**
1754 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1755 * control to a host in the processor's access
1756 * control list.
1757 * @handle: Pointer to TI SCI handle
1758 * @proc_id: Processor ID this request is for
1759 * @host_id: Host ID to get the control of the processor
1760 *
1761 * Return: 0 if all went well, else returns appropriate error value.
1762 */
1763static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1764 u8 proc_id, u8 host_id)
1765{
1766 struct ti_sci_msg_req_proc_handover req;
1767 struct ti_sci_msg_hdr *resp;
1768 struct ti_sci_info *info;
1769 struct ti_sci_xfer *xfer;
1770 int ret = 0;
1771
1772 if (IS_ERR(handle))
1773 return PTR_ERR(handle);
1774 if (!handle)
1775 return -EINVAL;
1776
1777 info = handle_to_ti_sci_info(handle);
1778
1779 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1780 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1781 (u32 *)&req, sizeof(req), sizeof(*resp));
1782 if (IS_ERR(xfer)) {
1783 ret = PTR_ERR(xfer);
1784 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1785 return ret;
1786 }
1787 req.processor_id = proc_id;
1788 req.host_id = host_id;
1789
1790 ret = ti_sci_do_xfer(info, xfer);
1791 if (ret) {
1792 dev_err(info->dev, "Mbox send fail %d\n", ret);
1793 return ret;
1794 }
1795
1796 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1797
1798 if (!ti_sci_is_response_ack(resp))
1799 ret = -ENODEV;
1800
1801 return ret;
1802}
1803
1804/**
1805 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1806 * configuration flags
1807 * @handle: Pointer to TI SCI handle
1808 * @proc_id: Processor ID this request is for
1809 * @config_flags_set: Configuration flags to be set
1810 * @config_flags_clear: Configuration flags to be cleared.
1811 *
1812 * Return: 0 if all went well, else returns appropriate error value.
1813 */
1814static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1815 u8 proc_id, u64 bootvector,
1816 u32 config_flags_set,
1817 u32 config_flags_clear)
1818{
1819 struct ti_sci_msg_req_set_proc_boot_config req;
1820 struct ti_sci_msg_hdr *resp;
1821 struct ti_sci_info *info;
1822 struct ti_sci_xfer *xfer;
1823 int ret = 0;
1824
1825 if (IS_ERR(handle))
1826 return PTR_ERR(handle);
1827 if (!handle)
1828 return -EINVAL;
1829
1830 info = handle_to_ti_sci_info(handle);
1831
1832 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1833 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1834 (u32 *)&req, sizeof(req), sizeof(*resp));
1835 if (IS_ERR(xfer)) {
1836 ret = PTR_ERR(xfer);
1837 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1838 return ret;
1839 }
1840 req.processor_id = proc_id;
1841 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1842 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1843 TISCI_ADDR_HIGH_SHIFT;
1844 req.config_flags_set = config_flags_set;
1845 req.config_flags_clear = config_flags_clear;
1846
1847 ret = ti_sci_do_xfer(info, xfer);
1848 if (ret) {
1849 dev_err(info->dev, "Mbox send fail %d\n", ret);
1850 return ret;
1851 }
1852
1853 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1854
1855 if (!ti_sci_is_response_ack(resp))
1856 ret = -ENODEV;
1857
1858 return ret;
1859}
1860
1861/**
1862 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1863 * control flags
1864 * @handle: Pointer to TI SCI handle
1865 * @proc_id: Processor ID this request is for
1866 * @control_flags_set: Control flags to be set
1867 * @control_flags_clear: Control flags to be cleared
1868 *
1869 * Return: 0 if all went well, else returns appropriate error value.
1870 */
1871static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1872 u8 proc_id, u32 control_flags_set,
1873 u32 control_flags_clear)
1874{
1875 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1876 struct ti_sci_msg_hdr *resp;
1877 struct ti_sci_info *info;
1878 struct ti_sci_xfer *xfer;
1879 int ret = 0;
1880
1881 if (IS_ERR(handle))
1882 return PTR_ERR(handle);
1883 if (!handle)
1884 return -EINVAL;
1885
1886 info = handle_to_ti_sci_info(handle);
1887
1888 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1889 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1890 (u32 *)&req, sizeof(req), sizeof(*resp));
1891 if (IS_ERR(xfer)) {
1892 ret = PTR_ERR(xfer);
1893 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1894 return ret;
1895 }
1896 req.processor_id = proc_id;
1897 req.control_flags_set = control_flags_set;
1898 req.control_flags_clear = control_flags_clear;
1899
1900 ret = ti_sci_do_xfer(info, xfer);
1901 if (ret) {
1902 dev_err(info->dev, "Mbox send fail %d\n", ret);
1903 return ret;
1904 }
1905
1906 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1907
1908 if (!ti_sci_is_response_ack(resp))
1909 ret = -ENODEV;
1910
1911 return ret;
1912}
1913
1914/**
1915 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1916 * image and then set the processor configuration flags.
1917 * @handle: Pointer to TI SCI handle
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001918 * @image_addr: Memory address at which payload image and certificate is
1919 * located in memory, this is updated if the image data is
1920 * moved during authentication.
1921 * @image_size: This is updated with the final size of the image after
1922 * authentication.
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301923 *
1924 * Return: 0 if all went well, else returns appropriate error value.
1925 */
1926static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001927 u64 *image_addr, u32 *image_size)
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301928{
1929 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001930 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301931 struct ti_sci_info *info;
1932 struct ti_sci_xfer *xfer;
1933 int ret = 0;
1934
1935 if (IS_ERR(handle))
1936 return PTR_ERR(handle);
1937 if (!handle)
1938 return -EINVAL;
1939
1940 info = handle_to_ti_sci_info(handle);
1941
1942 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
1943 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1944 (u32 *)&req, sizeof(req), sizeof(*resp));
1945 if (IS_ERR(xfer)) {
1946 ret = PTR_ERR(xfer);
1947 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1948 return ret;
1949 }
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001950 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
1951 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301952 TISCI_ADDR_HIGH_SHIFT;
1953
1954 ret = ti_sci_do_xfer(info, xfer);
1955 if (ret) {
1956 dev_err(info->dev, "Mbox send fail %d\n", ret);
1957 return ret;
1958 }
1959
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001960 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301961
1962 if (!ti_sci_is_response_ack(resp))
Andrew F. Davisff6043a2019-04-12 12:54:44 -04001963 return -ENODEV;
1964
1965 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
1966 (((u64)resp->image_addr_high <<
1967 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
1968 *image_size = resp->image_size;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301969
1970 return ret;
1971}
1972
1973/**
1974 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1975 * @handle: Pointer to TI SCI handle
1976 * @proc_id: Processor ID this request is for
1977 *
1978 * Return: 0 if all went well, else returns appropriate error value.
1979 */
1980static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1981 u8 proc_id, u64 *bv, u32 *cfg_flags,
1982 u32 *ctrl_flags, u32 *sts_flags)
1983{
1984 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1985 struct ti_sci_msg_req_get_proc_boot_status req;
1986 struct ti_sci_info *info;
1987 struct ti_sci_xfer *xfer;
1988 int ret = 0;
1989
1990 if (IS_ERR(handle))
1991 return PTR_ERR(handle);
1992 if (!handle)
1993 return -EINVAL;
1994
1995 info = handle_to_ti_sci_info(handle);
1996
1997 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
1998 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1999 (u32 *)&req, sizeof(req), sizeof(*resp));
2000 if (IS_ERR(xfer)) {
2001 ret = PTR_ERR(xfer);
2002 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2003 return ret;
2004 }
2005 req.processor_id = proc_id;
2006
2007 ret = ti_sci_do_xfer(info, xfer);
2008 if (ret) {
2009 dev_err(info->dev, "Mbox send fail %d\n", ret);
2010 return ret;
2011 }
2012
2013 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2014 xfer->tx_message.buf;
2015
2016 if (!ti_sci_is_response_ack(resp))
2017 return -ENODEV;
2018 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2019 (((u64)resp->bootvector_high <<
2020 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2021 *cfg_flags = resp->config_flags;
2022 *ctrl_flags = resp->control_flags;
2023 *sts_flags = resp->status_flags;
2024
2025 return ret;
2026}
2027
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302028/**
2029 * ti_sci_cmd_ring_config() - configure RA ring
2030 * @handle: pointer to TI SCI handle
2031 * @valid_params: Bitfield defining validity of ring configuration parameters.
2032 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2033 * @index: Ring index.
2034 * @addr_lo: The ring base address lo 32 bits
2035 * @addr_hi: The ring base address hi 32 bits
2036 * @count: Number of ring elements.
2037 * @mode: The mode of the ring
2038 * @size: The ring element size.
2039 * @order_id: Specifies the ring's bus order ID.
2040 *
2041 * Return: 0 if all went well, else returns appropriate error value.
2042 *
2043 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2044 */
2045static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2046 u32 valid_params, u16 nav_id, u16 index,
2047 u32 addr_lo, u32 addr_hi, u32 count,
2048 u8 mode, u8 size, u8 order_id)
2049{
2050 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2051 struct ti_sci_msg_rm_ring_cfg_req req;
2052 struct ti_sci_xfer *xfer;
2053 struct ti_sci_info *info;
2054 int ret = 0;
2055
2056 if (IS_ERR(handle))
2057 return PTR_ERR(handle);
2058 if (!handle)
2059 return -EINVAL;
2060
2061 info = handle_to_ti_sci_info(handle);
2062
2063 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2064 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2065 (u32 *)&req, sizeof(req), sizeof(*resp));
2066 if (IS_ERR(xfer)) {
2067 ret = PTR_ERR(xfer);
2068 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2069 return ret;
2070 }
2071 req.valid_params = valid_params;
2072 req.nav_id = nav_id;
2073 req.index = index;
2074 req.addr_lo = addr_lo;
2075 req.addr_hi = addr_hi;
2076 req.count = count;
2077 req.mode = mode;
2078 req.size = size;
2079 req.order_id = order_id;
2080
2081 ret = ti_sci_do_xfer(info, xfer);
2082 if (ret) {
2083 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2084 goto fail;
2085 }
2086
2087 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2088
2089 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2090
2091fail:
2092 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2093 return ret;
2094}
2095
2096/**
2097 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2098 * @handle: pointer to TI SCI handle
2099 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2100 * @index: Ring index.
2101 * @addr_lo: returns ring's base address lo 32 bits
2102 * @addr_hi: returns ring's base address hi 32 bits
2103 * @count: returns number of ring elements.
2104 * @mode: returns mode of the ring
2105 * @size: returns ring element size.
2106 * @order_id: returns ring's bus order ID.
2107 *
2108 * Return: 0 if all went well, else returns appropriate error value.
2109 *
2110 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2111 */
2112static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2113 u32 nav_id, u32 index, u8 *mode,
2114 u32 *addr_lo, u32 *addr_hi,
2115 u32 *count, u8 *size, u8 *order_id)
2116{
2117 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2118 struct ti_sci_msg_rm_ring_get_cfg_req req;
2119 struct ti_sci_xfer *xfer;
2120 struct ti_sci_info *info;
2121 int ret = 0;
2122
2123 if (IS_ERR(handle))
2124 return PTR_ERR(handle);
2125 if (!handle)
2126 return -EINVAL;
2127
2128 info = handle_to_ti_sci_info(handle);
2129
2130 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2131 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2132 (u32 *)&req, sizeof(req), sizeof(*resp));
2133 if (IS_ERR(xfer)) {
2134 ret = PTR_ERR(xfer);
2135 dev_err(info->dev,
2136 "RM_RA:Message get config failed(%d)\n", ret);
2137 return ret;
2138 }
2139 req.nav_id = nav_id;
2140 req.index = index;
2141
2142 ret = ti_sci_do_xfer(info, xfer);
2143 if (ret) {
2144 dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
2145 goto fail;
2146 }
2147
2148 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->tx_message.buf;
2149
2150 if (!ti_sci_is_response_ack(resp)) {
2151 ret = -ENODEV;
2152 } else {
2153 if (mode)
2154 *mode = resp->mode;
2155 if (addr_lo)
2156 *addr_lo = resp->addr_lo;
2157 if (addr_hi)
2158 *addr_hi = resp->addr_hi;
2159 if (count)
2160 *count = resp->count;
2161 if (size)
2162 *size = resp->size;
2163 if (order_id)
2164 *order_id = resp->order_id;
2165 };
2166
2167fail:
2168 dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2169 return ret;
2170}
2171
2172static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2173 u32 nav_id, u32 src_thread, u32 dst_thread)
2174{
2175 struct ti_sci_msg_hdr *resp;
2176 struct ti_sci_msg_psil_pair req;
2177 struct ti_sci_xfer *xfer;
2178 struct ti_sci_info *info;
2179 int ret = 0;
2180
2181 if (IS_ERR(handle))
2182 return PTR_ERR(handle);
2183 if (!handle)
2184 return -EINVAL;
2185
2186 info = handle_to_ti_sci_info(handle);
2187
2188 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2189 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2190 (u32 *)&req, sizeof(req), sizeof(*resp));
2191 if (IS_ERR(xfer)) {
2192 ret = PTR_ERR(xfer);
2193 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2194 return ret;
2195 }
2196 req.nav_id = nav_id;
2197 req.src_thread = src_thread;
2198 req.dst_thread = dst_thread;
2199
2200 ret = ti_sci_do_xfer(info, xfer);
2201 if (ret) {
2202 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2203 goto fail;
2204 }
2205
2206 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2207 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2208
2209fail:
2210 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2211 nav_id, src_thread, dst_thread, ret);
2212 return ret;
2213}
2214
2215static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2216 u32 nav_id, u32 src_thread, u32 dst_thread)
2217{
2218 struct ti_sci_msg_hdr *resp;
2219 struct ti_sci_msg_psil_unpair req;
2220 struct ti_sci_xfer *xfer;
2221 struct ti_sci_info *info;
2222 int ret = 0;
2223
2224 if (IS_ERR(handle))
2225 return PTR_ERR(handle);
2226 if (!handle)
2227 return -EINVAL;
2228
2229 info = handle_to_ti_sci_info(handle);
2230
2231 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2232 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2233 (u32 *)&req, sizeof(req), sizeof(*resp));
2234 if (IS_ERR(xfer)) {
2235 ret = PTR_ERR(xfer);
2236 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2237 return ret;
2238 }
2239 req.nav_id = nav_id;
2240 req.src_thread = src_thread;
2241 req.dst_thread = dst_thread;
2242
2243 ret = ti_sci_do_xfer(info, xfer);
2244 if (ret) {
2245 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2246 goto fail;
2247 }
2248
2249 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2250 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2251
2252fail:
2253 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2254 src_thread, dst_thread, ret);
2255 return ret;
2256}
2257
2258static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2259 const struct ti_sci_handle *handle,
2260 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2261{
2262 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2263 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2264 struct ti_sci_xfer *xfer;
2265 struct ti_sci_info *info;
2266 int ret = 0;
2267
2268 if (IS_ERR(handle))
2269 return PTR_ERR(handle);
2270 if (!handle)
2271 return -EINVAL;
2272
2273 info = handle_to_ti_sci_info(handle);
2274
2275 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2276 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2277 (u32 *)&req, sizeof(req), sizeof(*resp));
2278 if (IS_ERR(xfer)) {
2279 ret = PTR_ERR(xfer);
2280 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2281 return ret;
2282 }
2283 req.valid_params = params->valid_params;
2284 req.nav_id = params->nav_id;
2285 req.index = params->index;
2286 req.tx_pause_on_err = params->tx_pause_on_err;
2287 req.tx_filt_einfo = params->tx_filt_einfo;
2288 req.tx_filt_pswords = params->tx_filt_pswords;
2289 req.tx_atype = params->tx_atype;
2290 req.tx_chan_type = params->tx_chan_type;
2291 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2292 req.tx_fetch_size = params->tx_fetch_size;
2293 req.tx_credit_count = params->tx_credit_count;
2294 req.txcq_qnum = params->txcq_qnum;
2295 req.tx_priority = params->tx_priority;
2296 req.tx_qos = params->tx_qos;
2297 req.tx_orderid = params->tx_orderid;
2298 req.fdepth = params->fdepth;
2299 req.tx_sched_priority = params->tx_sched_priority;
2300
2301 ret = ti_sci_do_xfer(info, xfer);
2302 if (ret) {
2303 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2304 goto fail;
2305 }
2306
2307 resp =
2308 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2309 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2310
2311fail:
2312 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2313 return ret;
2314}
2315
2316static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2317 const struct ti_sci_handle *handle,
2318 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2319{
2320 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2321 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2322 struct ti_sci_xfer *xfer;
2323 struct ti_sci_info *info;
2324 int ret = 0;
2325
2326 if (IS_ERR(handle))
2327 return PTR_ERR(handle);
2328 if (!handle)
2329 return -EINVAL;
2330
2331 info = handle_to_ti_sci_info(handle);
2332
2333 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2334 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2335 (u32 *)&req, sizeof(req), sizeof(*resp));
2336 if (IS_ERR(xfer)) {
2337 ret = PTR_ERR(xfer);
2338 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2339 return ret;
2340 }
2341
2342 req.valid_params = params->valid_params;
2343 req.nav_id = params->nav_id;
2344 req.index = params->index;
2345 req.rx_fetch_size = params->rx_fetch_size;
2346 req.rxcq_qnum = params->rxcq_qnum;
2347 req.rx_priority = params->rx_priority;
2348 req.rx_qos = params->rx_qos;
2349 req.rx_orderid = params->rx_orderid;
2350 req.rx_sched_priority = params->rx_sched_priority;
2351 req.flowid_start = params->flowid_start;
2352 req.flowid_cnt = params->flowid_cnt;
2353 req.rx_pause_on_err = params->rx_pause_on_err;
2354 req.rx_atype = params->rx_atype;
2355 req.rx_chan_type = params->rx_chan_type;
2356 req.rx_ignore_short = params->rx_ignore_short;
2357 req.rx_ignore_long = params->rx_ignore_long;
2358
2359 ret = ti_sci_do_xfer(info, xfer);
2360 if (ret) {
2361 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2362 goto fail;
2363 }
2364
2365 resp =
2366 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2367 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2368
2369fail:
2370 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2371 return ret;
2372}
2373
2374static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2375 const struct ti_sci_handle *handle,
2376 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2377{
2378 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2379 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2380 struct ti_sci_xfer *xfer;
2381 struct ti_sci_info *info;
2382 int ret = 0;
2383
2384 if (IS_ERR(handle))
2385 return PTR_ERR(handle);
2386 if (!handle)
2387 return -EINVAL;
2388
2389 info = handle_to_ti_sci_info(handle);
2390
2391 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2392 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2393 (u32 *)&req, sizeof(req), sizeof(*resp));
2394 if (IS_ERR(xfer)) {
2395 ret = PTR_ERR(xfer);
2396 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2397 return ret;
2398 }
2399
2400 req.valid_params = params->valid_params;
2401 req.nav_id = params->nav_id;
2402 req.flow_index = params->flow_index;
2403 req.rx_einfo_present = params->rx_einfo_present;
2404 req.rx_psinfo_present = params->rx_psinfo_present;
2405 req.rx_error_handling = params->rx_error_handling;
2406 req.rx_desc_type = params->rx_desc_type;
2407 req.rx_sop_offset = params->rx_sop_offset;
2408 req.rx_dest_qnum = params->rx_dest_qnum;
2409 req.rx_src_tag_hi = params->rx_src_tag_hi;
2410 req.rx_src_tag_lo = params->rx_src_tag_lo;
2411 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2412 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2413 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2414 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2415 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2416 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2417 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2418 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2419 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2420 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2421 req.rx_ps_location = params->rx_ps_location;
2422
2423 ret = ti_sci_do_xfer(info, xfer);
2424 if (ret) {
2425 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2426 goto fail;
2427 }
2428
2429 resp =
2430 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2431 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2432
2433fail:
2434 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2435 return ret;
2436}
2437
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002438/**
2439 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2440 * @handle: pointer to TI SCI handle
2441 * @region: region configuration parameters
2442 *
2443 * Return: 0 if all went well, else returns appropriate error value.
2444 */
2445static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2446 const struct ti_sci_msg_fwl_region *region)
2447{
2448 struct ti_sci_msg_fwl_set_firewall_region_req req;
2449 struct ti_sci_msg_hdr *resp;
2450 struct ti_sci_info *info;
2451 struct ti_sci_xfer *xfer;
2452 int ret = 0;
2453
2454 if (IS_ERR(handle))
2455 return PTR_ERR(handle);
2456 if (!handle)
2457 return -EINVAL;
2458
2459 info = handle_to_ti_sci_info(handle);
2460
2461 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2462 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2463 (u32 *)&req, sizeof(req), sizeof(*resp));
2464 if (IS_ERR(xfer)) {
2465 ret = PTR_ERR(xfer);
2466 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2467 return ret;
2468 }
2469
2470 req.fwl_id = region->fwl_id;
2471 req.region = region->region;
2472 req.n_permission_regs = region->n_permission_regs;
2473 req.control = region->control;
2474 req.permissions[0] = region->permissions[0];
2475 req.permissions[1] = region->permissions[1];
2476 req.permissions[2] = region->permissions[2];
2477 req.start_address = region->start_address;
2478 req.end_address = region->end_address;
2479
2480 ret = ti_sci_do_xfer(info, xfer);
2481 if (ret) {
2482 dev_err(info->dev, "Mbox send fail %d\n", ret);
2483 return ret;
2484 }
2485
2486 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2487
2488 if (!ti_sci_is_response_ack(resp))
2489 return -ENODEV;
2490
2491 return 0;
2492}
2493
2494/**
2495 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2496 * @handle: pointer to TI SCI handle
2497 * @region: region configuration parameters
2498 *
2499 * Return: 0 if all went well, else returns appropriate error value.
2500 */
2501static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2502 struct ti_sci_msg_fwl_region *region)
2503{
2504 struct ti_sci_msg_fwl_get_firewall_region_req req;
2505 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2506 struct ti_sci_info *info;
2507 struct ti_sci_xfer *xfer;
2508 int ret = 0;
2509
2510 if (IS_ERR(handle))
2511 return PTR_ERR(handle);
2512 if (!handle)
2513 return -EINVAL;
2514
2515 info = handle_to_ti_sci_info(handle);
2516
2517 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2518 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2519 (u32 *)&req, sizeof(req), sizeof(*resp));
2520 if (IS_ERR(xfer)) {
2521 ret = PTR_ERR(xfer);
2522 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2523 return ret;
2524 }
2525
2526 req.fwl_id = region->fwl_id;
2527 req.region = region->region;
2528 req.n_permission_regs = region->n_permission_regs;
2529
2530 ret = ti_sci_do_xfer(info, xfer);
2531 if (ret) {
2532 dev_err(info->dev, "Mbox send fail %d\n", ret);
2533 return ret;
2534 }
2535
2536 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2537
2538 if (!ti_sci_is_response_ack(resp))
2539 return -ENODEV;
2540
2541 region->fwl_id = resp->fwl_id;
2542 region->region = resp->region;
2543 region->n_permission_regs = resp->n_permission_regs;
2544 region->control = resp->control;
2545 region->permissions[0] = resp->permissions[0];
2546 region->permissions[1] = resp->permissions[1];
2547 region->permissions[2] = resp->permissions[2];
2548 region->start_address = resp->start_address;
2549 region->end_address = resp->end_address;
2550
2551 return 0;
2552}
2553
2554/**
2555 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2556 * @handle: pointer to TI SCI handle
2557 * @region: region configuration parameters
2558 *
2559 * Return: 0 if all went well, else returns appropriate error value.
2560 */
2561static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2562 struct ti_sci_msg_fwl_owner *owner)
2563{
2564 struct ti_sci_msg_fwl_change_owner_info_req req;
2565 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2566 struct ti_sci_info *info;
2567 struct ti_sci_xfer *xfer;
2568 int ret = 0;
2569
2570 if (IS_ERR(handle))
2571 return PTR_ERR(handle);
2572 if (!handle)
2573 return -EINVAL;
2574
2575 info = handle_to_ti_sci_info(handle);
2576
2577 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2578 TISCI_MSG_FWL_CHANGE_OWNER,
2579 (u32 *)&req, sizeof(req), sizeof(*resp));
2580 if (IS_ERR(xfer)) {
2581 ret = PTR_ERR(xfer);
2582 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2583 return ret;
2584 }
2585
2586 req.fwl_id = owner->fwl_id;
2587 req.region = owner->region;
2588 req.owner_index = owner->owner_index;
2589
2590 ret = ti_sci_do_xfer(info, xfer);
2591 if (ret) {
2592 dev_err(info->dev, "Mbox send fail %d\n", ret);
2593 return ret;
2594 }
2595
2596 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2597
2598 if (!ti_sci_is_response_ack(resp))
2599 return -ENODEV;
2600
2601 owner->fwl_id = resp->fwl_id;
2602 owner->region = resp->region;
2603 owner->owner_index = resp->owner_index;
2604 owner->owner_privid = resp->owner_privid;
2605 owner->owner_permission_bits = resp->owner_permission_bits;
2606
2607 return ret;
2608}
2609
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302610/*
2611 * ti_sci_setup_ops() - Setup the operations structures
2612 * @info: pointer to TISCI pointer
2613 */
2614static void ti_sci_setup_ops(struct ti_sci_info *info)
2615{
2616 struct ti_sci_ops *ops = &info->handle.ops;
2617 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302618 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302619 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302620 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302621 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302622 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302623 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2624 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2625 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002626 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302627
2628 bops->board_config = ti_sci_cmd_set_board_config;
2629 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2630 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2631 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302632
2633 dops->get_device = ti_sci_cmd_get_device;
2634 dops->idle_device = ti_sci_cmd_idle_device;
2635 dops->put_device = ti_sci_cmd_put_device;
2636 dops->is_valid = ti_sci_cmd_dev_is_valid;
2637 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2638 dops->is_idle = ti_sci_cmd_dev_is_idle;
2639 dops->is_stop = ti_sci_cmd_dev_is_stop;
2640 dops->is_on = ti_sci_cmd_dev_is_on;
2641 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2642 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2643 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302644
2645 cops->get_clock = ti_sci_cmd_get_clock;
2646 cops->idle_clock = ti_sci_cmd_idle_clock;
2647 cops->put_clock = ti_sci_cmd_put_clock;
2648 cops->is_auto = ti_sci_cmd_clk_is_auto;
2649 cops->is_on = ti_sci_cmd_clk_is_on;
2650 cops->is_off = ti_sci_cmd_clk_is_off;
2651
2652 cops->set_parent = ti_sci_cmd_clk_set_parent;
2653 cops->get_parent = ti_sci_cmd_clk_get_parent;
2654 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2655
2656 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2657 cops->set_freq = ti_sci_cmd_clk_set_freq;
2658 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302659
2660 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla826eb742019-03-08 11:47:32 +05302661 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302662
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302663 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2664 rm_core_ops->get_range_from_shost =
2665 ti_sci_cmd_get_resource_range_from_shost;
2666
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302667 pops->proc_request = ti_sci_cmd_proc_request;
2668 pops->proc_release = ti_sci_cmd_proc_release;
2669 pops->proc_handover = ti_sci_cmd_proc_handover;
2670 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2671 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2672 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2673 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302674
2675 rops->config = ti_sci_cmd_ring_config;
2676 rops->get_config = ti_sci_cmd_ring_get_config;
2677
2678 psilops->pair = ti_sci_cmd_rm_psil_pair;
2679 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2680
2681 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2682 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2683 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis32ca8ff2019-04-12 12:54:43 -04002684
2685 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2686 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2687 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302688}
2689
2690/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302691 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2692 * @dev: Pointer to the SYSFW device
2693 *
2694 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2695 * are encountered.
2696 */
2697const
2698struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2699{
2700 if (!sci_dev)
2701 return ERR_PTR(-EINVAL);
2702
2703 struct ti_sci_info *info = dev_get_priv(sci_dev);
2704
2705 if (!info)
2706 return ERR_PTR(-EINVAL);
2707
2708 struct ti_sci_handle *handle = &info->handle;
2709
2710 if (!handle)
2711 return ERR_PTR(-EINVAL);
2712
2713 return handle;
2714}
2715
2716/**
2717 * ti_sci_get_handle() - Get the TI SCI handle for a device
2718 * @dev: Pointer to device for which we want SCI handle
2719 *
2720 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2721 * are encountered.
2722 */
2723const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2724{
2725 if (!dev)
2726 return ERR_PTR(-EINVAL);
2727
2728 struct udevice *sci_dev = dev_get_parent(dev);
2729
2730 return ti_sci_get_handle_from_sysfw(sci_dev);
2731}
2732
2733/**
2734 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2735 * @dev: device node
2736 * @propname: property name containing phandle on TISCI node
2737 *
2738 * Return: pointer to handle if successful, else appropriate error value.
2739 */
2740const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2741 const char *property)
2742{
2743 struct ti_sci_info *entry, *info = NULL;
2744 u32 phandle, err;
2745 ofnode node;
2746
2747 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2748 if (err)
2749 return ERR_PTR(err);
2750
2751 node = ofnode_get_by_phandle(phandle);
2752 if (!ofnode_valid(node))
2753 return ERR_PTR(-EINVAL);
2754
2755 list_for_each_entry(entry, &ti_sci_list, list)
2756 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2757 info = entry;
2758 break;
2759 }
2760
2761 if (!info)
2762 return ERR_PTR(-ENODEV);
2763
2764 return &info->handle;
2765}
2766
2767/**
2768 * ti_sci_of_to_info() - generate private data from device tree
2769 * @dev: corresponding system controller interface device
2770 * @info: pointer to driver specific private data
2771 *
2772 * Return: 0 if all goes good, else appropriate error message.
2773 */
2774static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2775{
2776 int ret;
2777
2778 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2779 if (ret) {
2780 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2781 __func__, ret);
2782 return ret;
2783 }
2784
2785 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2786 if (ret) {
2787 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2788 __func__, ret);
2789 return ret;
2790 }
2791
2792 /* Notify channel is optional. Enable only if populated */
2793 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2794 if (ret) {
2795 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2796 __func__, ret);
2797 }
2798
2799 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302800 info->desc->default_host_id);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302801
2802 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2803
2804 return 0;
2805}
2806
2807/**
2808 * ti_sci_probe() - Basic probe
2809 * @dev: corresponding system controller interface device
2810 *
2811 * Return: 0 if all goes good, else appropriate error message.
2812 */
2813static int ti_sci_probe(struct udevice *dev)
2814{
2815 struct ti_sci_info *info;
2816 int ret;
2817
2818 debug("%s(dev=%p)\n", __func__, dev);
2819
2820 info = dev_get_priv(dev);
2821 info->desc = (void *)dev_get_driver_data(dev);
2822
2823 ret = ti_sci_of_to_info(dev, info);
2824 if (ret) {
2825 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2826 return ret;
2827 }
2828
2829 info->dev = dev;
2830 info->seq = 0xA;
2831
2832 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302833 ti_sci_setup_ops(info);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302834
2835 ret = ti_sci_cmd_get_revision(&info->handle);
2836
2837 return ret;
2838}
2839
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302840/*
2841 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2842 * @res: Pointer to the TISCI resource
2843 *
2844 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2845 */
2846u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2847{
2848 u16 set, free_bit;
2849
2850 for (set = 0; set < res->sets; set++) {
2851 free_bit = find_first_zero_bit(res->desc[set].res_map,
2852 res->desc[set].num);
2853 if (free_bit != res->desc[set].num) {
2854 set_bit(free_bit, res->desc[set].res_map);
2855 return res->desc[set].start + free_bit;
2856 }
2857 }
2858
2859 return TI_SCI_RESOURCE_NULL;
2860}
2861
2862/**
2863 * ti_sci_release_resource() - Release a resource from TISCI resource.
2864 * @res: Pointer to the TISCI resource
2865 */
2866void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2867{
2868 u16 set;
2869
2870 for (set = 0; set < res->sets; set++) {
2871 if (res->desc[set].start <= id &&
2872 (res->desc[set].num + res->desc[set].start) > id)
2873 clear_bit(id - res->desc[set].start,
2874 res->desc[set].res_map);
2875 }
2876}
2877
2878/**
2879 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2880 * @handle: TISCI handle
2881 * @dev: Device pointer to which the resource is assigned
2882 * @of_prop: property name by which the resource are represented
2883 *
2884 * Note: This function expects of_prop to be in the form of tuples
2885 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2886 * for each of_prop. Client driver can directly call
2887 * ti_sci_(get_free, release)_resource apis for handling the resource.
2888 *
2889 * Return: Pointer to ti_sci_resource if all went well else appropriate
2890 * error pointer.
2891 */
2892struct ti_sci_resource *
2893devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2894 struct udevice *dev, u32 dev_id, char *of_prop)
2895{
2896 u32 resource_subtype;
2897 u16 resource_type;
2898 struct ti_sci_resource *res;
2899 int sets, i, ret;
2900 u32 *temp;
2901
2902 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2903 if (!res)
2904 return ERR_PTR(-ENOMEM);
2905
2906 sets = dev_read_size(dev, of_prop);
2907 if (sets < 0) {
2908 dev_err(dev, "%s resource type ids not available\n", of_prop);
2909 return ERR_PTR(sets);
2910 }
2911 temp = malloc(sets);
2912 sets /= sizeof(u32);
2913 res->sets = sets;
2914
2915 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2916 GFP_KERNEL);
2917 if (!res->desc)
2918 return ERR_PTR(-ENOMEM);
2919
2920 ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id,
2921 &resource_type);
2922 if (ret) {
2923 dev_err(dev, "No valid resource type for %u\n", dev_id);
2924 return ERR_PTR(-EINVAL);
2925 }
2926
2927 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2928 if (ret)
2929 return ERR_PTR(-EINVAL);
2930
2931 for (i = 0; i < res->sets; i++) {
2932 resource_subtype = temp[i];
2933 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2934 resource_subtype,
2935 &res->desc[i].start,
2936 &res->desc[i].num);
2937 if (ret) {
2938 dev_err(dev, "type %d subtype %d not allocated for host %d\n",
2939 resource_type, resource_subtype,
2940 handle_to_ti_sci_info(handle)->host_id);
2941 return ERR_PTR(ret);
2942 }
2943
2944 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
2945 resource_type, resource_subtype, res->desc[i].start,
2946 res->desc[i].num);
2947
2948 res->desc[i].res_map =
2949 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2950 sizeof(*res->desc[i].res_map), GFP_KERNEL);
2951 if (!res->desc[i].res_map)
2952 return ERR_PTR(-ENOMEM);
2953 }
2954
2955 return res;
2956}
2957
2958/* Description for K2G */
2959static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2960 .default_host_id = 2,
2961 /* Conservative duration */
2962 .max_rx_timeout_ms = 10000,
2963 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2964 .max_msgs = 20,
2965 .max_msg_size = 64,
2966 .rm_type_map = NULL,
2967};
2968
2969static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2970 {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2971 {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2972 {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2973 {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2974 {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2975 {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2976 {.dev_id = 0, .type = 0x000}, /* end of table */
2977};
2978
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302979/* Description for AM654 */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302980static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2981 .default_host_id = 12,
2982 /* Conservative duration */
2983 .max_rx_timeout_ms = 10000,
2984 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2985 .max_msgs = 20,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302986 .max_msg_size = 60,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302987 .rm_type_map = ti_sci_am654_rm_type_map,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302988};
2989
2990static const struct udevice_id ti_sci_ids[] = {
2991 {
2992 .compatible = "ti,k2g-sci",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302993 .data = (ulong)&ti_sci_pmmc_k2g_desc
2994 },
2995 {
2996 .compatible = "ti,am654-sci",
2997 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302998 },
2999 { /* Sentinel */ },
3000};
3001
3002U_BOOT_DRIVER(ti_sci) = {
3003 .name = "ti_sci",
3004 .id = UCLASS_FIRMWARE,
3005 .of_match = ti_sci_ids,
3006 .probe = ti_sci_probe,
3007 .priv_auto_alloc_size = sizeof(struct ti_sci_info),
3008};