blob: ec78a520e702cc711677d72d82953e31c1ecb38f [file] [log] [blame]
Lokesh Vutla32cd2512018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <mailbox.h>
14#include <dm/device.h>
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053015#include <linux/compat.h>
Lokesh Vutla32cd2512018-08-27 15:57:32 +053016#include <linux/err.h>
17#include <linux/soc/ti/k3-sec-proxy.h>
18#include <linux/soc/ti/ti_sci_protocol.h>
19
20#include "ti_sci.h"
21
22/* List of all TI SCI devices active in system */
23static LIST_HEAD(ti_sci_list);
24
25/**
26 * struct ti_sci_xfer - Structure representing a message flow
27 * @tx_message: Transmit message
28 * @rx_len: Receive message length
29 */
30struct ti_sci_xfer {
31 struct k3_sec_proxy_msg tx_message;
32 u8 rx_len;
33};
34
35/**
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053036 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
37 * management representation of dev_ids.
38 * @dev_id: TISCI device ID
39 * @type: Corresponding id as identified by TISCI RM.
40 *
41 * Note: This is used only as a work around for using RM range apis
42 * for AM654 SoC. For future SoCs dev_id will be used as type
43 * for RM range APIs. In order to maintain ABI backward compatibility
44 * type is not being changed for AM654 SoC.
45 */
46struct ti_sci_rm_type_map {
47 u32 dev_id;
48 u16 type;
49};
50
51/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +053052 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053053 * @default_host_id: Host identifier representing the compute entity
54 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
55 * @max_msgs: Maximum number of messages that can be pending
56 * simultaneously in the system
57 * @max_msg_size: Maximum size of data per message that can be handled.
58 * @rm_type_map: RM resource type mapping structure.
Lokesh Vutla32cd2512018-08-27 15:57:32 +053059 */
60struct ti_sci_desc {
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053061 u8 default_host_id;
62 int max_rx_timeout_ms;
63 int max_msgs;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053064 int max_msg_size;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +053065 struct ti_sci_rm_type_map *rm_type_map;
Lokesh Vutla32cd2512018-08-27 15:57:32 +053066};
67
68/**
69 * struct ti_sci_info - Structure representing a TI SCI instance
70 * @dev: Device pointer
71 * @desc: SoC description for this instance
72 * @handle: Instance of TI SCI handle to send to clients.
73 * @chan_tx: Transmit mailbox channel
74 * @chan_rx: Receive mailbox channel
75 * @xfer: xfer info
76 * @list: list head
77 * @is_secure: Determines if the communication is through secure threads.
78 * @host_id: Host identifier representing the compute entity
79 * @seq: Seq id used for verification for tx and rx message.
80 */
81struct ti_sci_info {
82 struct udevice *dev;
83 const struct ti_sci_desc *desc;
84 struct ti_sci_handle handle;
85 struct mbox_chan chan_tx;
86 struct mbox_chan chan_rx;
87 struct mbox_chan chan_notify;
88 struct ti_sci_xfer xfer;
89 struct list_head list;
90 bool is_secure;
91 u8 host_id;
92 u8 seq;
93};
94
95#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
96
97/**
98 * ti_sci_setup_one_xfer() - Setup one message type
99 * @info: Pointer to SCI entity information
100 * @msg_type: Message type
101 * @msg_flags: Flag to set for the message
102 * @buf: Buffer to be send to mailbox channel
103 * @tx_message_size: transmit message size
104 * @rx_message_size: receive message size
105 *
106 * Helper function which is used by various command functions that are
107 * exposed to clients of this driver for allocating a message traffic event.
108 *
109 * Return: Corresponding ti_sci_xfer pointer if all went fine,
110 * else appropriate error pointer.
111 */
112static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
113 u16 msg_type, u32 msg_flags,
114 u32 *buf,
115 size_t tx_message_size,
116 size_t rx_message_size)
117{
118 struct ti_sci_xfer *xfer = &info->xfer;
119 struct ti_sci_msg_hdr *hdr;
120
121 /* Ensure we have sane transfer sizes */
122 if (rx_message_size > info->desc->max_msg_size ||
123 tx_message_size > info->desc->max_msg_size ||
124 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
125 return ERR_PTR(-ERANGE);
126
127 info->seq = ~info->seq;
128 xfer->tx_message.buf = buf;
129 xfer->tx_message.len = tx_message_size;
130 xfer->rx_len = (u8)rx_message_size;
131
132 hdr = (struct ti_sci_msg_hdr *)buf;
133 hdr->seq = info->seq;
134 hdr->type = msg_type;
135 hdr->host = info->host_id;
136 hdr->flags = msg_flags;
137
138 return xfer;
139}
140
141/**
142 * ti_sci_get_response() - Receive response from mailbox channel
143 * @info: Pointer to SCI entity information
144 * @xfer: Transfer to initiate and wait for response
145 * @chan: Channel to receive the response
146 *
147 * Return: -ETIMEDOUT in case of no response, if transmit error,
148 * return corresponding error, else if all goes well,
149 * return 0.
150 */
151static inline int ti_sci_get_response(struct ti_sci_info *info,
152 struct ti_sci_xfer *xfer,
153 struct mbox_chan *chan)
154{
155 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
156 struct ti_sci_secure_msg_hdr *secure_hdr;
157 struct ti_sci_msg_hdr *hdr;
158 int ret;
159
160 /* Receive the response */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +0530161 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms);
Lokesh Vutla32cd2512018-08-27 15:57:32 +0530162 if (ret) {
163 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
164 __func__, ret);
165 return ret;
166 }
167
168 /* ToDo: Verify checksum */
169 if (info->is_secure) {
170 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
171 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
172 }
173
174 /* msg is updated by mailbox driver */
175 hdr = (struct ti_sci_msg_hdr *)msg->buf;
176
177 /* Sanity check for message response */
178 if (hdr->seq != info->seq) {
179 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
180 __func__, hdr->seq);
181 return ret;
182 }
183
184 if (msg->len > info->desc->max_msg_size) {
185 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
186 __func__, msg->len, info->desc->max_msg_size);
187 return -EINVAL;
188 }
189
190 if (msg->len < xfer->rx_len) {
191 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
192 __func__, msg->len, xfer->rx_len);
193 }
194
195 return ret;
196}
197
198/**
199 * ti_sci_do_xfer() - Do one transfer
200 * @info: Pointer to SCI entity information
201 * @xfer: Transfer to initiate and wait for response
202 *
203 * Return: 0 if all went fine, else return appropriate error.
204 */
205static inline int ti_sci_do_xfer(struct ti_sci_info *info,
206 struct ti_sci_xfer *xfer)
207{
208 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
209 u8 secure_buf[info->desc->max_msg_size];
210 struct ti_sci_secure_msg_hdr secure_hdr;
211 int ret;
212
213 if (info->is_secure) {
214 /* ToDo: get checksum of the entire message */
215 secure_hdr.checksum = 0;
216 secure_hdr.reserved = 0;
217 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
218 xfer->tx_message.len);
219
220 xfer->tx_message.buf = (u32 *)secure_buf;
221 xfer->tx_message.len += sizeof(secure_hdr);
222 xfer->rx_len += sizeof(secure_hdr);
223 }
224
225 /* Send the message */
226 ret = mbox_send(&info->chan_tx, msg);
227 if (ret) {
228 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
229 __func__, ret);
230 return ret;
231 }
232
233 return ti_sci_get_response(info, xfer, &info->chan_rx);
234}
235
236/**
237 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
238 * @handle: pointer to TI SCI handle
239 *
240 * Updates the SCI information in the internal data structure.
241 *
242 * Return: 0 if all went fine, else return appropriate error.
243 */
244static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
245{
246 struct ti_sci_msg_resp_version *rev_info;
247 struct ti_sci_version_info *ver;
248 struct ti_sci_msg_hdr hdr;
249 struct ti_sci_info *info;
250 struct ti_sci_xfer *xfer;
251 int ret;
252
253 if (IS_ERR(handle))
254 return PTR_ERR(handle);
255 if (!handle)
256 return -EINVAL;
257
258 info = handle_to_ti_sci_info(handle);
259
260 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION, 0x0,
261 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
262 sizeof(*rev_info));
263 if (IS_ERR(xfer)) {
264 ret = PTR_ERR(xfer);
265 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
266 return ret;
267 }
268
269 ret = ti_sci_do_xfer(info, xfer);
270 if (ret) {
271 dev_err(info->dev, "Mbox communication fail %d\n", ret);
272 return ret;
273 }
274
275 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
276
277 ver = &handle->version;
278 ver->abi_major = rev_info->abi_major;
279 ver->abi_minor = rev_info->abi_minor;
280 ver->firmware_revision = rev_info->firmware_revision;
281 strncpy(ver->firmware_description, rev_info->firmware_description,
282 sizeof(ver->firmware_description));
283
284 return 0;
285}
286
287/**
288 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
289 * @r: pointer to response buffer
290 *
291 * Return: true if the response was an ACK, else returns false.
292 */
293static inline bool ti_sci_is_response_ack(void *r)
294{
295 struct ti_sci_msg_hdr *hdr = r;
296
297 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
298}
299
300/**
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +0530301 * cmd_set_board_config_using_msg() - Common command to send board configuration
302 * message
303 * @handle: pointer to TI SCI handle
304 * @msg_type: One of the TISCI message types to set board configuration
305 * @addr: Address where the board config structure is located
306 * @size: Size of the board config structure
307 *
308 * Return: 0 if all went well, else returns appropriate error value.
309 */
310static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
311 u16 msg_type, u64 addr, u32 size)
312{
313 struct ti_sci_msg_board_config req;
314 struct ti_sci_msg_hdr *resp;
315 struct ti_sci_info *info;
316 struct ti_sci_xfer *xfer;
317 int ret = 0;
318
319 if (IS_ERR(handle))
320 return PTR_ERR(handle);
321 if (!handle)
322 return -EINVAL;
323
324 info = handle_to_ti_sci_info(handle);
325
326 xfer = ti_sci_setup_one_xfer(info, msg_type,
327 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
328 (u32 *)&req, sizeof(req), sizeof(*resp));
329 if (IS_ERR(xfer)) {
330 ret = PTR_ERR(xfer);
331 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
332 return ret;
333 }
334 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
335 req.boardcfgp_low = addr & 0xffffffff;
336 req.boardcfg_size = size;
337
338 ret = ti_sci_do_xfer(info, xfer);
339 if (ret) {
340 dev_err(info->dev, "Mbox send fail %d\n", ret);
341 return ret;
342 }
343
344 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
345
346 if (!ti_sci_is_response_ack(resp))
347 return -ENODEV;
348
349 return ret;
350}
351
352/**
353 * ti_sci_cmd_set_board_config() - Command to send board configuration message
354 * @handle: pointer to TI SCI handle
355 * @addr: Address where the board config structure is located
356 * @size: Size of the board config structure
357 *
358 * Return: 0 if all went well, else returns appropriate error value.
359 */
360static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
361 u64 addr, u32 size)
362{
363 return cmd_set_board_config_using_msg(handle,
364 TI_SCI_MSG_BOARD_CONFIG,
365 addr, size);
366}
367
368/**
369 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
370 * management configuration
371 * @handle: pointer to TI SCI handle
372 * @addr: Address where the board RM config structure is located
373 * @size: Size of the RM config structure
374 *
375 * Return: 0 if all went well, else returns appropriate error value.
376 */
377static
378int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
379 u64 addr, u32 size)
380{
381 return cmd_set_board_config_using_msg(handle,
382 TI_SCI_MSG_BOARD_CONFIG_RM,
383 addr, size);
384}
385
386/**
387 * ti_sci_cmd_set_board_config_security() - Command to send board security
388 * configuration message
389 * @handle: pointer to TI SCI handle
390 * @addr: Address where the board security config structure is located
391 * @size: Size of the security config structure
392 *
393 * Return: 0 if all went well, else returns appropriate error value.
394 */
395static
396int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
397 u64 addr, u32 size)
398{
399 return cmd_set_board_config_using_msg(handle,
400 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
401 addr, size);
402}
403
404/**
405 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
406 * configuration message
407 * @handle: pointer to TI SCI handle
408 * @addr: Address where the board PM config structure is located
409 * @size: Size of the PM config structure
410 *
411 * Return: 0 if all went well, else returns appropriate error value.
412 */
413static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
414 u64 addr, u32 size)
415{
416 return cmd_set_board_config_using_msg(handle,
417 TI_SCI_MSG_BOARD_CONFIG_PM,
418 addr, size);
419}
420
Andreas Dannenberg7bc33042018-08-27 15:57:34 +0530421/**
422 * ti_sci_set_device_state() - Set device state helper
423 * @handle: pointer to TI SCI handle
424 * @id: Device identifier
425 * @flags: flags to setup for the device
426 * @state: State to move the device to
427 *
428 * Return: 0 if all went well, else returns appropriate error value.
429 */
430static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
431 u32 id, u32 flags, u8 state)
432{
433 struct ti_sci_msg_req_set_device_state req;
434 struct ti_sci_msg_hdr *resp;
435 struct ti_sci_info *info;
436 struct ti_sci_xfer *xfer;
437 int ret = 0;
438
439 if (IS_ERR(handle))
440 return PTR_ERR(handle);
441 if (!handle)
442 return -EINVAL;
443
444 info = handle_to_ti_sci_info(handle);
445
446 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
447 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
448 (u32 *)&req, sizeof(req), sizeof(*resp));
449 if (IS_ERR(xfer)) {
450 ret = PTR_ERR(xfer);
451 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
452 return ret;
453 }
454 req.id = id;
455 req.state = state;
456
457 ret = ti_sci_do_xfer(info, xfer);
458 if (ret) {
459 dev_err(info->dev, "Mbox send fail %d\n", ret);
460 return ret;
461 }
462
463 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
464
465 if (!ti_sci_is_response_ack(resp))
466 return -ENODEV;
467
468 return ret;
469}
470
471/**
472 * ti_sci_get_device_state() - Get device state helper
473 * @handle: Handle to the device
474 * @id: Device Identifier
475 * @clcnt: Pointer to Context Loss Count
476 * @resets: pointer to resets
477 * @p_state: pointer to p_state
478 * @c_state: pointer to c_state
479 *
480 * Return: 0 if all went fine, else return appropriate error.
481 */
482static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
483 u32 id, u32 *clcnt, u32 *resets,
484 u8 *p_state, u8 *c_state)
485{
486 struct ti_sci_msg_resp_get_device_state *resp;
487 struct ti_sci_msg_req_get_device_state req;
488 struct ti_sci_info *info;
489 struct ti_sci_xfer *xfer;
490 int ret = 0;
491
492 if (IS_ERR(handle))
493 return PTR_ERR(handle);
494 if (!handle)
495 return -EINVAL;
496
497 if (!clcnt && !resets && !p_state && !c_state)
498 return -EINVAL;
499
500 info = handle_to_ti_sci_info(handle);
501
502 /* Response is expected, so need of any flags */
503 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 0,
504 (u32 *)&req, sizeof(req), sizeof(*resp));
505 if (IS_ERR(xfer)) {
506 ret = PTR_ERR(xfer);
507 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
508 return ret;
509 }
510 req.id = id;
511
512 ret = ti_sci_do_xfer(info, xfer);
513 if (ret) {
514 dev_err(dev, "Mbox send fail %d\n", ret);
515 return ret;
516 }
517
518 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
519 if (!ti_sci_is_response_ack(resp))
520 return -ENODEV;
521
522 if (clcnt)
523 *clcnt = resp->context_loss_count;
524 if (resets)
525 *resets = resp->resets;
526 if (p_state)
527 *p_state = resp->programmed_state;
528 if (c_state)
529 *c_state = resp->current_state;
530
531 return ret;
532}
533
534/**
535 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
536 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
537 * @id: Device Identifier
538 *
539 * Request for the device - NOTE: the client MUST maintain integrity of
540 * usage count by balancing get_device with put_device. No refcounting is
541 * managed by driver for that purpose.
542 *
543 * NOTE: The request is for exclusive access for the processor.
544 *
545 * Return: 0 if all went fine, else return appropriate error.
546 */
547static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
548{
549 return ti_sci_set_device_state(handle, id,
550 MSG_FLAG_DEVICE_EXCLUSIVE,
551 MSG_DEVICE_SW_STATE_ON);
552}
553
554/**
555 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
556 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
557 * @id: Device Identifier
558 *
559 * Request for the device - NOTE: the client MUST maintain integrity of
560 * usage count by balancing get_device with put_device. No refcounting is
561 * managed by driver for that purpose.
562 *
563 * Return: 0 if all went fine, else return appropriate error.
564 */
565static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
566{
567 return ti_sci_set_device_state(handle, id,
568 MSG_FLAG_DEVICE_EXCLUSIVE,
569 MSG_DEVICE_SW_STATE_RETENTION);
570}
571
572/**
573 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
574 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
575 * @id: Device Identifier
576 *
577 * Request for the device - NOTE: the client MUST maintain integrity of
578 * usage count by balancing get_device with put_device. No refcounting is
579 * managed by driver for that purpose.
580 *
581 * Return: 0 if all went fine, else return appropriate error.
582 */
583static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
584{
585 return ti_sci_set_device_state(handle, id,
586 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
587}
588
589/**
590 * ti_sci_cmd_dev_is_valid() - Is the device valid
591 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
592 * @id: Device Identifier
593 *
594 * Return: 0 if all went fine and the device ID is valid, else return
595 * appropriate error.
596 */
597static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
598{
599 u8 unused;
600
601 /* check the device state which will also tell us if the ID is valid */
602 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
603}
604
605/**
606 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
607 * @handle: Pointer to TISCI handle
608 * @id: Device Identifier
609 * @count: Pointer to Context Loss counter to populate
610 *
611 * Return: 0 if all went fine, else return appropriate error.
612 */
613static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
614 u32 *count)
615{
616 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
617}
618
619/**
620 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
621 * @handle: Pointer to TISCI handle
622 * @id: Device Identifier
623 * @r_state: true if requested to be idle
624 *
625 * Return: 0 if all went fine, else return appropriate error.
626 */
627static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
628 bool *r_state)
629{
630 int ret;
631 u8 state;
632
633 if (!r_state)
634 return -EINVAL;
635
636 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
637 if (ret)
638 return ret;
639
640 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
641
642 return 0;
643}
644
645/**
646 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
647 * @handle: Pointer to TISCI handle
648 * @id: Device Identifier
649 * @r_state: true if requested to be stopped
650 * @curr_state: true if currently stopped.
651 *
652 * Return: 0 if all went fine, else return appropriate error.
653 */
654static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
655 bool *r_state, bool *curr_state)
656{
657 int ret;
658 u8 p_state, c_state;
659
660 if (!r_state && !curr_state)
661 return -EINVAL;
662
663 ret =
664 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
665 if (ret)
666 return ret;
667
668 if (r_state)
669 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
670 if (curr_state)
671 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
672
673 return 0;
674}
675
676/**
677 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
678 * @handle: Pointer to TISCI handle
679 * @id: Device Identifier
680 * @r_state: true if requested to be ON
681 * @curr_state: true if currently ON and active
682 *
683 * Return: 0 if all went fine, else return appropriate error.
684 */
685static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
686 bool *r_state, bool *curr_state)
687{
688 int ret;
689 u8 p_state, c_state;
690
691 if (!r_state && !curr_state)
692 return -EINVAL;
693
694 ret =
695 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
696 if (ret)
697 return ret;
698
699 if (r_state)
700 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
701 if (curr_state)
702 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
703
704 return 0;
705}
706
707/**
708 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
709 * @handle: Pointer to TISCI handle
710 * @id: Device Identifier
711 * @curr_state: true if currently transitioning.
712 *
713 * Return: 0 if all went fine, else return appropriate error.
714 */
715static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
716 bool *curr_state)
717{
718 int ret;
719 u8 state;
720
721 if (!curr_state)
722 return -EINVAL;
723
724 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
725 if (ret)
726 return ret;
727
728 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
729
730 return 0;
731}
732
733/**
734 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
735 * by TISCI
736 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
737 * @id: Device Identifier
738 * @reset_state: Device specific reset bit field
739 *
740 * Return: 0 if all went fine, else return appropriate error.
741 */
742static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
743 u32 id, u32 reset_state)
744{
745 struct ti_sci_msg_req_set_device_resets req;
746 struct ti_sci_msg_hdr *resp;
747 struct ti_sci_info *info;
748 struct ti_sci_xfer *xfer;
749 int ret = 0;
750
751 if (IS_ERR(handle))
752 return PTR_ERR(handle);
753 if (!handle)
754 return -EINVAL;
755
756 info = handle_to_ti_sci_info(handle);
757
758 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
759 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
760 (u32 *)&req, sizeof(req), sizeof(*resp));
761 if (IS_ERR(xfer)) {
762 ret = PTR_ERR(xfer);
763 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
764 return ret;
765 }
766 req.id = id;
767 req.resets = reset_state;
768
769 ret = ti_sci_do_xfer(info, xfer);
770 if (ret) {
771 dev_err(info->dev, "Mbox send fail %d\n", ret);
772 return ret;
773 }
774
775 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
776
777 if (!ti_sci_is_response_ack(resp))
778 return -ENODEV;
779
780 return ret;
781}
782
783/**
784 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
785 * by TISCI
786 * @handle: Pointer to TISCI handle
787 * @id: Device Identifier
788 * @reset_state: Pointer to reset state to populate
789 *
790 * Return: 0 if all went fine, else return appropriate error.
791 */
792static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
793 u32 id, u32 *reset_state)
794{
795 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
796 NULL);
797}
798
Lokesh Vutla9b871812018-08-27 15:57:35 +0530799/**
800 * ti_sci_set_clock_state() - Set clock state helper
801 * @handle: pointer to TI SCI handle
802 * @dev_id: Device identifier this request is for
803 * @clk_id: Clock identifier for the device for this request.
804 * Each device has it's own set of clock inputs. This indexes
805 * which clock input to modify.
806 * @flags: Header flags as needed
807 * @state: State to request for the clock.
808 *
809 * Return: 0 if all went well, else returns appropriate error value.
810 */
811static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
812 u32 dev_id, u8 clk_id,
813 u32 flags, u8 state)
814{
815 struct ti_sci_msg_req_set_clock_state req;
816 struct ti_sci_msg_hdr *resp;
817 struct ti_sci_info *info;
818 struct ti_sci_xfer *xfer;
819 int ret = 0;
820
821 if (IS_ERR(handle))
822 return PTR_ERR(handle);
823 if (!handle)
824 return -EINVAL;
825
826 info = handle_to_ti_sci_info(handle);
827
828 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
829 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
830 (u32 *)&req, sizeof(req), sizeof(*resp));
831 if (IS_ERR(xfer)) {
832 ret = PTR_ERR(xfer);
833 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
834 return ret;
835 }
836 req.dev_id = dev_id;
837 req.clk_id = clk_id;
838 req.request_state = state;
839
840 ret = ti_sci_do_xfer(info, xfer);
841 if (ret) {
842 dev_err(info->dev, "Mbox send fail %d\n", ret);
843 return ret;
844 }
845
846 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
847
848 if (!ti_sci_is_response_ack(resp))
849 return -ENODEV;
850
851 return ret;
852}
853
854/**
855 * ti_sci_cmd_get_clock_state() - Get clock state helper
856 * @handle: pointer to TI SCI handle
857 * @dev_id: Device identifier this request is for
858 * @clk_id: Clock identifier for the device for this request.
859 * Each device has it's own set of clock inputs. This indexes
860 * which clock input to modify.
861 * @programmed_state: State requested for clock to move to
862 * @current_state: State that the clock is currently in
863 *
864 * Return: 0 if all went well, else returns appropriate error value.
865 */
866static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
867 u32 dev_id, u8 clk_id,
868 u8 *programmed_state, u8 *current_state)
869{
870 struct ti_sci_msg_resp_get_clock_state *resp;
871 struct ti_sci_msg_req_get_clock_state req;
872 struct ti_sci_info *info;
873 struct ti_sci_xfer *xfer;
874 int ret = 0;
875
876 if (IS_ERR(handle))
877 return PTR_ERR(handle);
878 if (!handle)
879 return -EINVAL;
880
881 if (!programmed_state && !current_state)
882 return -EINVAL;
883
884 info = handle_to_ti_sci_info(handle);
885
886 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
887 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
888 (u32 *)&req, sizeof(req), sizeof(*resp));
889 if (IS_ERR(xfer)) {
890 ret = PTR_ERR(xfer);
891 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
892 return ret;
893 }
894 req.dev_id = dev_id;
895 req.clk_id = clk_id;
896
897 ret = ti_sci_do_xfer(info, xfer);
898 if (ret) {
899 dev_err(info->dev, "Mbox send fail %d\n", ret);
900 return ret;
901 }
902
903 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
904
905 if (!ti_sci_is_response_ack(resp))
906 return -ENODEV;
907
908 if (programmed_state)
909 *programmed_state = resp->programmed_state;
910 if (current_state)
911 *current_state = resp->current_state;
912
913 return ret;
914}
915
916/**
917 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
918 * @handle: pointer to TI SCI handle
919 * @dev_id: Device identifier this request is for
920 * @clk_id: Clock identifier for the device for this request.
921 * Each device has it's own set of clock inputs. This indexes
922 * which clock input to modify.
923 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
924 * @can_change_freq: 'true' if frequency change is desired, else 'false'
925 * @enable_input_term: 'true' if input termination is desired, else 'false'
926 *
927 * Return: 0 if all went well, else returns appropriate error value.
928 */
929static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
930 u8 clk_id, bool needs_ssc, bool can_change_freq,
931 bool enable_input_term)
932{
933 u32 flags = 0;
934
935 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
936 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
937 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
938
939 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
940 MSG_CLOCK_SW_STATE_REQ);
941}
942
943/**
944 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
945 * @handle: pointer to TI SCI handle
946 * @dev_id: Device identifier this request is for
947 * @clk_id: Clock identifier for the device for this request.
948 * Each device has it's own set of clock inputs. This indexes
949 * which clock input to modify.
950 *
951 * NOTE: This clock must have been requested by get_clock previously.
952 *
953 * Return: 0 if all went well, else returns appropriate error value.
954 */
955static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
956 u32 dev_id, u8 clk_id)
957{
958 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
959 MSG_CLOCK_SW_STATE_UNREQ);
960}
961
962/**
963 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
964 * @handle: pointer to TI SCI handle
965 * @dev_id: Device identifier this request is for
966 * @clk_id: Clock identifier for the device for this request.
967 * Each device has it's own set of clock inputs. This indexes
968 * which clock input to modify.
969 *
970 * NOTE: This clock must have been requested by get_clock previously.
971 *
972 * Return: 0 if all went well, else returns appropriate error value.
973 */
974static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
975 u32 dev_id, u8 clk_id)
976{
977 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
978 MSG_CLOCK_SW_STATE_AUTO);
979}
980
981/**
982 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
983 * @handle: pointer to TI SCI handle
984 * @dev_id: Device identifier this request is for
985 * @clk_id: Clock identifier for the device for this request.
986 * Each device has it's own set of clock inputs. This indexes
987 * which clock input to modify.
988 * @req_state: state indicating if the clock is auto managed
989 *
990 * Return: 0 if all went well, else returns appropriate error value.
991 */
992static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
993 u32 dev_id, u8 clk_id, bool *req_state)
994{
995 u8 state = 0;
996 int ret;
997
998 if (!req_state)
999 return -EINVAL;
1000
1001 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1002 if (ret)
1003 return ret;
1004
1005 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1006 return 0;
1007}
1008
1009/**
1010 * ti_sci_cmd_clk_is_on() - Is the clock ON
1011 * @handle: pointer to TI SCI handle
1012 * @dev_id: Device identifier this request is for
1013 * @clk_id: Clock identifier for the device for this request.
1014 * Each device has it's own set of clock inputs. This indexes
1015 * which clock input to modify.
1016 * @req_state: state indicating if the clock is managed by us and enabled
1017 * @curr_state: state indicating if the clock is ready for operation
1018 *
1019 * Return: 0 if all went well, else returns appropriate error value.
1020 */
1021static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1022 u8 clk_id, bool *req_state, bool *curr_state)
1023{
1024 u8 c_state = 0, r_state = 0;
1025 int ret;
1026
1027 if (!req_state && !curr_state)
1028 return -EINVAL;
1029
1030 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1031 &r_state, &c_state);
1032 if (ret)
1033 return ret;
1034
1035 if (req_state)
1036 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1037 if (curr_state)
1038 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1039 return 0;
1040}
1041
1042/**
1043 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1044 * @handle: pointer to TI SCI handle
1045 * @dev_id: Device identifier this request is for
1046 * @clk_id: Clock identifier for the device for this request.
1047 * Each device has it's own set of clock inputs. This indexes
1048 * which clock input to modify.
1049 * @req_state: state indicating if the clock is managed by us and disabled
1050 * @curr_state: state indicating if the clock is NOT ready for operation
1051 *
1052 * Return: 0 if all went well, else returns appropriate error value.
1053 */
1054static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1055 u8 clk_id, bool *req_state, bool *curr_state)
1056{
1057 u8 c_state = 0, r_state = 0;
1058 int ret;
1059
1060 if (!req_state && !curr_state)
1061 return -EINVAL;
1062
1063 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1064 &r_state, &c_state);
1065 if (ret)
1066 return ret;
1067
1068 if (req_state)
1069 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1070 if (curr_state)
1071 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1072 return 0;
1073}
1074
1075/**
1076 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1077 * @handle: pointer to TI SCI handle
1078 * @dev_id: Device identifier this request is for
1079 * @clk_id: Clock identifier for the device for this request.
1080 * Each device has it's own set of clock inputs. This indexes
1081 * which clock input to modify.
1082 * @parent_id: Parent clock identifier to set
1083 *
1084 * Return: 0 if all went well, else returns appropriate error value.
1085 */
1086static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1087 u32 dev_id, u8 clk_id, u8 parent_id)
1088{
1089 struct ti_sci_msg_req_set_clock_parent req;
1090 struct ti_sci_msg_hdr *resp;
1091 struct ti_sci_info *info;
1092 struct ti_sci_xfer *xfer;
1093 int ret = 0;
1094
1095 if (IS_ERR(handle))
1096 return PTR_ERR(handle);
1097 if (!handle)
1098 return -EINVAL;
1099
1100 info = handle_to_ti_sci_info(handle);
1101
1102 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1103 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1104 (u32 *)&req, sizeof(req), sizeof(*resp));
1105 if (IS_ERR(xfer)) {
1106 ret = PTR_ERR(xfer);
1107 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1108 return ret;
1109 }
1110 req.dev_id = dev_id;
1111 req.clk_id = clk_id;
1112 req.parent_id = parent_id;
1113
1114 ret = ti_sci_do_xfer(info, xfer);
1115 if (ret) {
1116 dev_err(info->dev, "Mbox send fail %d\n", ret);
1117 return ret;
1118 }
1119
1120 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1121
1122 if (!ti_sci_is_response_ack(resp))
1123 return -ENODEV;
1124
1125 return ret;
1126}
1127
1128/**
1129 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1130 * @handle: pointer to TI SCI handle
1131 * @dev_id: Device identifier this request is for
1132 * @clk_id: Clock identifier for the device for this request.
1133 * Each device has it's own set of clock inputs. This indexes
1134 * which clock input to modify.
1135 * @parent_id: Current clock parent
1136 *
1137 * Return: 0 if all went well, else returns appropriate error value.
1138 */
1139static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1140 u32 dev_id, u8 clk_id, u8 *parent_id)
1141{
1142 struct ti_sci_msg_resp_get_clock_parent *resp;
1143 struct ti_sci_msg_req_get_clock_parent req;
1144 struct ti_sci_info *info;
1145 struct ti_sci_xfer *xfer;
1146 int ret = 0;
1147
1148 if (IS_ERR(handle))
1149 return PTR_ERR(handle);
1150 if (!handle || !parent_id)
1151 return -EINVAL;
1152
1153 info = handle_to_ti_sci_info(handle);
1154
1155 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1156 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1157 (u32 *)&req, sizeof(req), sizeof(*resp));
1158 if (IS_ERR(xfer)) {
1159 ret = PTR_ERR(xfer);
1160 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1161 return ret;
1162 }
1163 req.dev_id = dev_id;
1164 req.clk_id = clk_id;
1165
1166 ret = ti_sci_do_xfer(info, xfer);
1167 if (ret) {
1168 dev_err(info->dev, "Mbox send fail %d\n", ret);
1169 return ret;
1170 }
1171
1172 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1173
1174 if (!ti_sci_is_response_ack(resp))
1175 ret = -ENODEV;
1176 else
1177 *parent_id = resp->parent_id;
1178
1179 return ret;
1180}
1181
1182/**
1183 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1184 * @handle: pointer to TI SCI handle
1185 * @dev_id: Device identifier this request is for
1186 * @clk_id: Clock identifier for the device for this request.
1187 * Each device has it's own set of clock inputs. This indexes
1188 * which clock input to modify.
1189 * @num_parents: Returns he number of parents to the current clock.
1190 *
1191 * Return: 0 if all went well, else returns appropriate error value.
1192 */
1193static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1194 u32 dev_id, u8 clk_id,
1195 u8 *num_parents)
1196{
1197 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1198 struct ti_sci_msg_req_get_clock_num_parents req;
1199 struct ti_sci_info *info;
1200 struct ti_sci_xfer *xfer;
1201 int ret = 0;
1202
1203 if (IS_ERR(handle))
1204 return PTR_ERR(handle);
1205 if (!handle || !num_parents)
1206 return -EINVAL;
1207
1208 info = handle_to_ti_sci_info(handle);
1209
1210 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1211 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1212 (u32 *)&req, sizeof(req), sizeof(*resp));
1213 if (IS_ERR(xfer)) {
1214 ret = PTR_ERR(xfer);
1215 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1216 return ret;
1217 }
1218 req.dev_id = dev_id;
1219 req.clk_id = clk_id;
1220
1221 ret = ti_sci_do_xfer(info, xfer);
1222 if (ret) {
1223 dev_err(info->dev, "Mbox send fail %d\n", ret);
1224 return ret;
1225 }
1226
1227 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1228 xfer->tx_message.buf;
1229
1230 if (!ti_sci_is_response_ack(resp))
1231 ret = -ENODEV;
1232 else
1233 *num_parents = resp->num_parents;
1234
1235 return ret;
1236}
1237
1238/**
1239 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1240 * @handle: pointer to TI SCI handle
1241 * @dev_id: Device identifier this request is for
1242 * @clk_id: Clock identifier for the device for this request.
1243 * Each device has it's own set of clock inputs. This indexes
1244 * which clock input to modify.
1245 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1246 * allowable programmed frequency and does not account for clock
1247 * tolerances and jitter.
1248 * @target_freq: The target clock frequency in Hz. A frequency will be
1249 * processed as close to this target frequency as possible.
1250 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1251 * allowable programmed frequency and does not account for clock
1252 * tolerances and jitter.
1253 * @match_freq: Frequency match in Hz response.
1254 *
1255 * Return: 0 if all went well, else returns appropriate error value.
1256 */
1257static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1258 u32 dev_id, u8 clk_id, u64 min_freq,
1259 u64 target_freq, u64 max_freq,
1260 u64 *match_freq)
1261{
1262 struct ti_sci_msg_resp_query_clock_freq *resp;
1263 struct ti_sci_msg_req_query_clock_freq req;
1264 struct ti_sci_info *info;
1265 struct ti_sci_xfer *xfer;
1266 int ret = 0;
1267
1268 if (IS_ERR(handle))
1269 return PTR_ERR(handle);
1270 if (!handle || !match_freq)
1271 return -EINVAL;
1272
1273 info = handle_to_ti_sci_info(handle);
1274
1275 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1276 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1277 (u32 *)&req, sizeof(req), sizeof(*resp));
1278 if (IS_ERR(xfer)) {
1279 ret = PTR_ERR(xfer);
1280 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1281 return ret;
1282 }
1283 req.dev_id = dev_id;
1284 req.clk_id = clk_id;
1285 req.min_freq_hz = min_freq;
1286 req.target_freq_hz = target_freq;
1287 req.max_freq_hz = max_freq;
1288
1289 ret = ti_sci_do_xfer(info, xfer);
1290 if (ret) {
1291 dev_err(info->dev, "Mbox send fail %d\n", ret);
1292 return ret;
1293 }
1294
1295 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1296
1297 if (!ti_sci_is_response_ack(resp))
1298 ret = -ENODEV;
1299 else
1300 *match_freq = resp->freq_hz;
1301
1302 return ret;
1303}
1304
1305/**
1306 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1307 * @handle: pointer to TI SCI handle
1308 * @dev_id: Device identifier this request is for
1309 * @clk_id: Clock identifier for the device for this request.
1310 * Each device has it's own set of clock inputs. This indexes
1311 * which clock input to modify.
1312 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1313 * allowable programmed frequency and does not account for clock
1314 * tolerances and jitter.
1315 * @target_freq: The target clock frequency in Hz. A frequency will be
1316 * processed as close to this target frequency as possible.
1317 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1318 * allowable programmed frequency and does not account for clock
1319 * tolerances and jitter.
1320 *
1321 * Return: 0 if all went well, else returns appropriate error value.
1322 */
1323static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1324 u32 dev_id, u8 clk_id, u64 min_freq,
1325 u64 target_freq, u64 max_freq)
1326{
1327 struct ti_sci_msg_req_set_clock_freq req;
1328 struct ti_sci_msg_hdr *resp;
1329 struct ti_sci_info *info;
1330 struct ti_sci_xfer *xfer;
1331 int ret = 0;
1332
1333 if (IS_ERR(handle))
1334 return PTR_ERR(handle);
1335 if (!handle)
1336 return -EINVAL;
1337
1338 info = handle_to_ti_sci_info(handle);
1339
1340 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1341 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1342 (u32 *)&req, sizeof(req), sizeof(*resp));
1343 if (IS_ERR(xfer)) {
1344 ret = PTR_ERR(xfer);
1345 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1346 return ret;
1347 }
1348 req.dev_id = dev_id;
1349 req.clk_id = clk_id;
1350 req.min_freq_hz = min_freq;
1351 req.target_freq_hz = target_freq;
1352 req.max_freq_hz = max_freq;
1353
1354 ret = ti_sci_do_xfer(info, xfer);
1355 if (ret) {
1356 dev_err(info->dev, "Mbox send fail %d\n", ret);
1357 return ret;
1358 }
1359
1360 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1361
1362 if (!ti_sci_is_response_ack(resp))
1363 return -ENODEV;
1364
1365 return ret;
1366}
1367
1368/**
1369 * ti_sci_cmd_clk_get_freq() - Get current frequency
1370 * @handle: pointer to TI SCI handle
1371 * @dev_id: Device identifier this request is for
1372 * @clk_id: Clock identifier for the device for this request.
1373 * Each device has it's own set of clock inputs. This indexes
1374 * which clock input to modify.
1375 * @freq: Currently frequency in Hz
1376 *
1377 * Return: 0 if all went well, else returns appropriate error value.
1378 */
1379static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1380 u32 dev_id, u8 clk_id, u64 *freq)
1381{
1382 struct ti_sci_msg_resp_get_clock_freq *resp;
1383 struct ti_sci_msg_req_get_clock_freq req;
1384 struct ti_sci_info *info;
1385 struct ti_sci_xfer *xfer;
1386 int ret = 0;
1387
1388 if (IS_ERR(handle))
1389 return PTR_ERR(handle);
1390 if (!handle || !freq)
1391 return -EINVAL;
1392
1393 info = handle_to_ti_sci_info(handle);
1394
1395 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1396 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1397 (u32 *)&req, sizeof(req), sizeof(*resp));
1398 if (IS_ERR(xfer)) {
1399 ret = PTR_ERR(xfer);
1400 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1401 return ret;
1402 }
1403 req.dev_id = dev_id;
1404 req.clk_id = clk_id;
1405
1406 ret = ti_sci_do_xfer(info, xfer);
1407 if (ret) {
1408 dev_err(info->dev, "Mbox send fail %d\n", ret);
1409 return ret;
1410 }
1411
1412 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1413
1414 if (!ti_sci_is_response_ack(resp))
1415 ret = -ENODEV;
1416 else
1417 *freq = resp->freq_hz;
1418
1419 return ret;
1420}
1421
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05301422/**
1423 * ti_sci_cmd_core_reboot() - Command to request system reset
1424 * @handle: pointer to TI SCI handle
1425 *
1426 * Return: 0 if all went well, else returns appropriate error value.
1427 */
1428static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1429{
1430 struct ti_sci_msg_req_reboot req;
1431 struct ti_sci_msg_hdr *resp;
1432 struct ti_sci_info *info;
1433 struct ti_sci_xfer *xfer;
1434 int ret = 0;
1435
1436 if (IS_ERR(handle))
1437 return PTR_ERR(handle);
1438 if (!handle)
1439 return -EINVAL;
1440
1441 info = handle_to_ti_sci_info(handle);
1442
1443 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1444 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1445 (u32 *)&req, sizeof(req), sizeof(*resp));
1446 if (IS_ERR(xfer)) {
1447 ret = PTR_ERR(xfer);
1448 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1449 return ret;
1450 }
1451
1452 ret = ti_sci_do_xfer(info, xfer);
1453 if (ret) {
1454 dev_err(dev, "Mbox send fail %d\n", ret);
1455 return ret;
1456 }
1457
1458 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1459
1460 if (!ti_sci_is_response_ack(resp))
1461 return -ENODEV;
1462
1463 return ret;
1464}
1465
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301466static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1467 u16 *type)
1468{
1469 struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1470 bool found = false;
1471 int i;
1472
1473 /* If map is not provided then assume dev_id is used as type */
1474 if (!rm_type_map) {
1475 *type = dev_id;
1476 return 0;
1477 }
1478
1479 for (i = 0; rm_type_map[i].dev_id; i++) {
1480 if (rm_type_map[i].dev_id == dev_id) {
1481 *type = rm_type_map[i].type;
1482 found = true;
1483 break;
1484 }
1485 }
1486
1487 if (!found)
1488 return -EINVAL;
1489
1490 return 0;
1491}
1492
1493/**
1494 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1495 * to a host. Resource is uniquely identified by
1496 * type and subtype.
1497 * @handle: Pointer to TISCI handle.
1498 * @dev_id: TISCI device ID.
1499 * @subtype: Resource assignment subtype that is being requested
1500 * from the given device.
1501 * @s_host: Host processor ID to which the resources are allocated
1502 * @range_start: Start index of the resource range
1503 * @range_num: Number of resources in the range
1504 *
1505 * Return: 0 if all went fine, else return appropriate error.
1506 */
1507static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1508 u32 dev_id, u8 subtype, u8 s_host,
1509 u16 *range_start, u16 *range_num)
1510{
1511 struct ti_sci_msg_resp_get_resource_range *resp;
1512 struct ti_sci_msg_req_get_resource_range req;
1513 struct ti_sci_xfer *xfer;
1514 struct ti_sci_info *info;
1515 u16 type;
1516 int ret = 0;
1517
1518 if (IS_ERR(handle))
1519 return PTR_ERR(handle);
1520 if (!handle)
1521 return -EINVAL;
1522
1523 info = handle_to_ti_sci_info(handle);
1524
1525 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1526 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1527 (u32 *)&req, sizeof(req), sizeof(*resp));
1528 if (IS_ERR(xfer)) {
1529 ret = PTR_ERR(xfer);
1530 dev_err(dev, "Message alloc failed(%d)\n", ret);
1531 return ret;
1532 }
1533
1534 ret = ti_sci_get_resource_type(info, dev_id, &type);
1535 if (ret) {
1536 dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1537 goto fail;
1538 }
1539
1540 req.secondary_host = s_host;
1541 req.type = type & MSG_RM_RESOURCE_TYPE_MASK;
1542 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1543
1544 ret = ti_sci_do_xfer(info, xfer);
1545 if (ret) {
1546 dev_err(dev, "Mbox send fail %d\n", ret);
1547 goto fail;
1548 }
1549
1550 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1551 if (!ti_sci_is_response_ack(resp)) {
1552 ret = -ENODEV;
1553 } else if (!resp->range_start && !resp->range_num) {
1554 ret = -ENODEV;
1555 } else {
1556 *range_start = resp->range_start;
1557 *range_num = resp->range_num;
1558 };
1559
1560fail:
1561 return ret;
1562}
1563
1564/**
1565 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1566 * that is same as ti sci interface host.
1567 * @handle: Pointer to TISCI handle.
1568 * @dev_id: TISCI device ID.
1569 * @subtype: Resource assignment subtype that is being requested
1570 * from the given device.
1571 * @range_start: Start index of the resource range
1572 * @range_num: Number of resources in the range
1573 *
1574 * Return: 0 if all went fine, else return appropriate error.
1575 */
1576static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1577 u32 dev_id, u8 subtype,
1578 u16 *range_start, u16 *range_num)
1579{
1580 return ti_sci_get_resource_range(handle, dev_id, subtype,
1581 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1582 range_start, range_num);
1583}
1584
1585/**
1586 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1587 * assigned to a specified host.
1588 * @handle: Pointer to TISCI handle.
1589 * @dev_id: TISCI device ID.
1590 * @subtype: Resource assignment subtype that is being requested
1591 * from the given device.
1592 * @s_host: Host processor ID to which the resources are allocated
1593 * @range_start: Start index of the resource range
1594 * @range_num: Number of resources in the range
1595 *
1596 * Return: 0 if all went fine, else return appropriate error.
1597 */
1598static
1599int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1600 u32 dev_id, u8 subtype, u8 s_host,
1601 u16 *range_start, u16 *range_num)
1602{
1603 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1604 range_start, range_num);
1605}
1606
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05301607/**
1608 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1609 * @handle: Pointer to TI SCI handle
1610 * @proc_id: Processor ID this request is for
1611 *
1612 * Return: 0 if all went well, else returns appropriate error value.
1613 */
1614static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1615 u8 proc_id)
1616{
1617 struct ti_sci_msg_req_proc_request req;
1618 struct ti_sci_msg_hdr *resp;
1619 struct ti_sci_info *info;
1620 struct ti_sci_xfer *xfer;
1621 int ret = 0;
1622
1623 if (IS_ERR(handle))
1624 return PTR_ERR(handle);
1625 if (!handle)
1626 return -EINVAL;
1627
1628 info = handle_to_ti_sci_info(handle);
1629
1630 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1631 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1632 (u32 *)&req, sizeof(req), sizeof(*resp));
1633 if (IS_ERR(xfer)) {
1634 ret = PTR_ERR(xfer);
1635 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1636 return ret;
1637 }
1638 req.processor_id = proc_id;
1639
1640 ret = ti_sci_do_xfer(info, xfer);
1641 if (ret) {
1642 dev_err(info->dev, "Mbox send fail %d\n", ret);
1643 return ret;
1644 }
1645
1646 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1647
1648 if (!ti_sci_is_response_ack(resp))
1649 ret = -ENODEV;
1650
1651 return ret;
1652}
1653
1654/**
1655 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1656 * @handle: Pointer to TI SCI handle
1657 * @proc_id: Processor ID this request is for
1658 *
1659 * Return: 0 if all went well, else returns appropriate error value.
1660 */
1661static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1662 u8 proc_id)
1663{
1664 struct ti_sci_msg_req_proc_release req;
1665 struct ti_sci_msg_hdr *resp;
1666 struct ti_sci_info *info;
1667 struct ti_sci_xfer *xfer;
1668 int ret = 0;
1669
1670 if (IS_ERR(handle))
1671 return PTR_ERR(handle);
1672 if (!handle)
1673 return -EINVAL;
1674
1675 info = handle_to_ti_sci_info(handle);
1676
1677 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1678 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1679 (u32 *)&req, sizeof(req), sizeof(*resp));
1680 if (IS_ERR(xfer)) {
1681 ret = PTR_ERR(xfer);
1682 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1683 return ret;
1684 }
1685 req.processor_id = proc_id;
1686
1687 ret = ti_sci_do_xfer(info, xfer);
1688 if (ret) {
1689 dev_err(info->dev, "Mbox send fail %d\n", ret);
1690 return ret;
1691 }
1692
1693 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1694
1695 if (!ti_sci_is_response_ack(resp))
1696 ret = -ENODEV;
1697
1698 return ret;
1699}
1700
1701/**
1702 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1703 * control to a host in the processor's access
1704 * control list.
1705 * @handle: Pointer to TI SCI handle
1706 * @proc_id: Processor ID this request is for
1707 * @host_id: Host ID to get the control of the processor
1708 *
1709 * Return: 0 if all went well, else returns appropriate error value.
1710 */
1711static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1712 u8 proc_id, u8 host_id)
1713{
1714 struct ti_sci_msg_req_proc_handover req;
1715 struct ti_sci_msg_hdr *resp;
1716 struct ti_sci_info *info;
1717 struct ti_sci_xfer *xfer;
1718 int ret = 0;
1719
1720 if (IS_ERR(handle))
1721 return PTR_ERR(handle);
1722 if (!handle)
1723 return -EINVAL;
1724
1725 info = handle_to_ti_sci_info(handle);
1726
1727 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1728 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1729 (u32 *)&req, sizeof(req), sizeof(*resp));
1730 if (IS_ERR(xfer)) {
1731 ret = PTR_ERR(xfer);
1732 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1733 return ret;
1734 }
1735 req.processor_id = proc_id;
1736 req.host_id = host_id;
1737
1738 ret = ti_sci_do_xfer(info, xfer);
1739 if (ret) {
1740 dev_err(info->dev, "Mbox send fail %d\n", ret);
1741 return ret;
1742 }
1743
1744 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1745
1746 if (!ti_sci_is_response_ack(resp))
1747 ret = -ENODEV;
1748
1749 return ret;
1750}
1751
1752/**
1753 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1754 * configuration flags
1755 * @handle: Pointer to TI SCI handle
1756 * @proc_id: Processor ID this request is for
1757 * @config_flags_set: Configuration flags to be set
1758 * @config_flags_clear: Configuration flags to be cleared.
1759 *
1760 * Return: 0 if all went well, else returns appropriate error value.
1761 */
1762static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1763 u8 proc_id, u64 bootvector,
1764 u32 config_flags_set,
1765 u32 config_flags_clear)
1766{
1767 struct ti_sci_msg_req_set_proc_boot_config req;
1768 struct ti_sci_msg_hdr *resp;
1769 struct ti_sci_info *info;
1770 struct ti_sci_xfer *xfer;
1771 int ret = 0;
1772
1773 if (IS_ERR(handle))
1774 return PTR_ERR(handle);
1775 if (!handle)
1776 return -EINVAL;
1777
1778 info = handle_to_ti_sci_info(handle);
1779
1780 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1781 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1782 (u32 *)&req, sizeof(req), sizeof(*resp));
1783 if (IS_ERR(xfer)) {
1784 ret = PTR_ERR(xfer);
1785 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1786 return ret;
1787 }
1788 req.processor_id = proc_id;
1789 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1790 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1791 TISCI_ADDR_HIGH_SHIFT;
1792 req.config_flags_set = config_flags_set;
1793 req.config_flags_clear = config_flags_clear;
1794
1795 ret = ti_sci_do_xfer(info, xfer);
1796 if (ret) {
1797 dev_err(info->dev, "Mbox send fail %d\n", ret);
1798 return ret;
1799 }
1800
1801 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1802
1803 if (!ti_sci_is_response_ack(resp))
1804 ret = -ENODEV;
1805
1806 return ret;
1807}
1808
1809/**
1810 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1811 * control flags
1812 * @handle: Pointer to TI SCI handle
1813 * @proc_id: Processor ID this request is for
1814 * @control_flags_set: Control flags to be set
1815 * @control_flags_clear: Control flags to be cleared
1816 *
1817 * Return: 0 if all went well, else returns appropriate error value.
1818 */
1819static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1820 u8 proc_id, u32 control_flags_set,
1821 u32 control_flags_clear)
1822{
1823 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1824 struct ti_sci_msg_hdr *resp;
1825 struct ti_sci_info *info;
1826 struct ti_sci_xfer *xfer;
1827 int ret = 0;
1828
1829 if (IS_ERR(handle))
1830 return PTR_ERR(handle);
1831 if (!handle)
1832 return -EINVAL;
1833
1834 info = handle_to_ti_sci_info(handle);
1835
1836 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1837 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1838 (u32 *)&req, sizeof(req), sizeof(*resp));
1839 if (IS_ERR(xfer)) {
1840 ret = PTR_ERR(xfer);
1841 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1842 return ret;
1843 }
1844 req.processor_id = proc_id;
1845 req.control_flags_set = control_flags_set;
1846 req.control_flags_clear = control_flags_clear;
1847
1848 ret = ti_sci_do_xfer(info, xfer);
1849 if (ret) {
1850 dev_err(info->dev, "Mbox send fail %d\n", ret);
1851 return ret;
1852 }
1853
1854 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1855
1856 if (!ti_sci_is_response_ack(resp))
1857 ret = -ENODEV;
1858
1859 return ret;
1860}
1861
1862/**
1863 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1864 * image and then set the processor configuration flags.
1865 * @handle: Pointer to TI SCI handle
1866 * @proc_id: Processor ID this request is for
1867 * @cert_addr: Memory address at which payload image certificate is located.
1868 *
1869 * Return: 0 if all went well, else returns appropriate error value.
1870 */
1871static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
1872 u8 proc_id, u64 cert_addr)
1873{
1874 struct ti_sci_msg_req_proc_auth_boot_image req;
1875 struct ti_sci_msg_hdr *resp;
1876 struct ti_sci_info *info;
1877 struct ti_sci_xfer *xfer;
1878 int ret = 0;
1879
1880 if (IS_ERR(handle))
1881 return PTR_ERR(handle);
1882 if (!handle)
1883 return -EINVAL;
1884
1885 info = handle_to_ti_sci_info(handle);
1886
1887 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
1888 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1889 (u32 *)&req, sizeof(req), sizeof(*resp));
1890 if (IS_ERR(xfer)) {
1891 ret = PTR_ERR(xfer);
1892 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1893 return ret;
1894 }
1895 req.processor_id = proc_id;
1896 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1897 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1898 TISCI_ADDR_HIGH_SHIFT;
1899
1900 ret = ti_sci_do_xfer(info, xfer);
1901 if (ret) {
1902 dev_err(info->dev, "Mbox send fail %d\n", ret);
1903 return ret;
1904 }
1905
1906 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1907
1908 if (!ti_sci_is_response_ack(resp))
1909 ret = -ENODEV;
1910
1911 return ret;
1912}
1913
1914/**
1915 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1916 * @handle: Pointer to TI SCI handle
1917 * @proc_id: Processor ID this request is for
1918 *
1919 * Return: 0 if all went well, else returns appropriate error value.
1920 */
1921static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1922 u8 proc_id, u64 *bv, u32 *cfg_flags,
1923 u32 *ctrl_flags, u32 *sts_flags)
1924{
1925 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1926 struct ti_sci_msg_req_get_proc_boot_status req;
1927 struct ti_sci_info *info;
1928 struct ti_sci_xfer *xfer;
1929 int ret = 0;
1930
1931 if (IS_ERR(handle))
1932 return PTR_ERR(handle);
1933 if (!handle)
1934 return -EINVAL;
1935
1936 info = handle_to_ti_sci_info(handle);
1937
1938 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
1939 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1940 (u32 *)&req, sizeof(req), sizeof(*resp));
1941 if (IS_ERR(xfer)) {
1942 ret = PTR_ERR(xfer);
1943 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1944 return ret;
1945 }
1946 req.processor_id = proc_id;
1947
1948 ret = ti_sci_do_xfer(info, xfer);
1949 if (ret) {
1950 dev_err(info->dev, "Mbox send fail %d\n", ret);
1951 return ret;
1952 }
1953
1954 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
1955 xfer->tx_message.buf;
1956
1957 if (!ti_sci_is_response_ack(resp))
1958 return -ENODEV;
1959 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
1960 (((u64)resp->bootvector_high <<
1961 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
1962 *cfg_flags = resp->config_flags;
1963 *ctrl_flags = resp->control_flags;
1964 *sts_flags = resp->status_flags;
1965
1966 return ret;
1967}
1968
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05301969/**
1970 * ti_sci_cmd_ring_config() - configure RA ring
1971 * @handle: pointer to TI SCI handle
1972 * @valid_params: Bitfield defining validity of ring configuration parameters.
1973 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
1974 * @index: Ring index.
1975 * @addr_lo: The ring base address lo 32 bits
1976 * @addr_hi: The ring base address hi 32 bits
1977 * @count: Number of ring elements.
1978 * @mode: The mode of the ring
1979 * @size: The ring element size.
1980 * @order_id: Specifies the ring's bus order ID.
1981 *
1982 * Return: 0 if all went well, else returns appropriate error value.
1983 *
1984 * See @ti_sci_msg_rm_ring_cfg_req for more info.
1985 */
1986static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
1987 u32 valid_params, u16 nav_id, u16 index,
1988 u32 addr_lo, u32 addr_hi, u32 count,
1989 u8 mode, u8 size, u8 order_id)
1990{
1991 struct ti_sci_msg_rm_ring_cfg_resp *resp;
1992 struct ti_sci_msg_rm_ring_cfg_req req;
1993 struct ti_sci_xfer *xfer;
1994 struct ti_sci_info *info;
1995 int ret = 0;
1996
1997 if (IS_ERR(handle))
1998 return PTR_ERR(handle);
1999 if (!handle)
2000 return -EINVAL;
2001
2002 info = handle_to_ti_sci_info(handle);
2003
2004 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2005 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2006 (u32 *)&req, sizeof(req), sizeof(*resp));
2007 if (IS_ERR(xfer)) {
2008 ret = PTR_ERR(xfer);
2009 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2010 return ret;
2011 }
2012 req.valid_params = valid_params;
2013 req.nav_id = nav_id;
2014 req.index = index;
2015 req.addr_lo = addr_lo;
2016 req.addr_hi = addr_hi;
2017 req.count = count;
2018 req.mode = mode;
2019 req.size = size;
2020 req.order_id = order_id;
2021
2022 ret = ti_sci_do_xfer(info, xfer);
2023 if (ret) {
2024 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2025 goto fail;
2026 }
2027
2028 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2029
2030 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2031
2032fail:
2033 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2034 return ret;
2035}
2036
2037/**
2038 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2039 * @handle: pointer to TI SCI handle
2040 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2041 * @index: Ring index.
2042 * @addr_lo: returns ring's base address lo 32 bits
2043 * @addr_hi: returns ring's base address hi 32 bits
2044 * @count: returns number of ring elements.
2045 * @mode: returns mode of the ring
2046 * @size: returns ring element size.
2047 * @order_id: returns ring's bus order ID.
2048 *
2049 * Return: 0 if all went well, else returns appropriate error value.
2050 *
2051 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2052 */
2053static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2054 u32 nav_id, u32 index, u8 *mode,
2055 u32 *addr_lo, u32 *addr_hi,
2056 u32 *count, u8 *size, u8 *order_id)
2057{
2058 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2059 struct ti_sci_msg_rm_ring_get_cfg_req req;
2060 struct ti_sci_xfer *xfer;
2061 struct ti_sci_info *info;
2062 int ret = 0;
2063
2064 if (IS_ERR(handle))
2065 return PTR_ERR(handle);
2066 if (!handle)
2067 return -EINVAL;
2068
2069 info = handle_to_ti_sci_info(handle);
2070
2071 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2072 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2073 (u32 *)&req, sizeof(req), sizeof(*resp));
2074 if (IS_ERR(xfer)) {
2075 ret = PTR_ERR(xfer);
2076 dev_err(info->dev,
2077 "RM_RA:Message get config failed(%d)\n", ret);
2078 return ret;
2079 }
2080 req.nav_id = nav_id;
2081 req.index = index;
2082
2083 ret = ti_sci_do_xfer(info, xfer);
2084 if (ret) {
2085 dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
2086 goto fail;
2087 }
2088
2089 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->tx_message.buf;
2090
2091 if (!ti_sci_is_response_ack(resp)) {
2092 ret = -ENODEV;
2093 } else {
2094 if (mode)
2095 *mode = resp->mode;
2096 if (addr_lo)
2097 *addr_lo = resp->addr_lo;
2098 if (addr_hi)
2099 *addr_hi = resp->addr_hi;
2100 if (count)
2101 *count = resp->count;
2102 if (size)
2103 *size = resp->size;
2104 if (order_id)
2105 *order_id = resp->order_id;
2106 };
2107
2108fail:
2109 dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2110 return ret;
2111}
2112
2113static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2114 u32 nav_id, u32 src_thread, u32 dst_thread)
2115{
2116 struct ti_sci_msg_hdr *resp;
2117 struct ti_sci_msg_psil_pair req;
2118 struct ti_sci_xfer *xfer;
2119 struct ti_sci_info *info;
2120 int ret = 0;
2121
2122 if (IS_ERR(handle))
2123 return PTR_ERR(handle);
2124 if (!handle)
2125 return -EINVAL;
2126
2127 info = handle_to_ti_sci_info(handle);
2128
2129 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2130 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2131 (u32 *)&req, sizeof(req), sizeof(*resp));
2132 if (IS_ERR(xfer)) {
2133 ret = PTR_ERR(xfer);
2134 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2135 return ret;
2136 }
2137 req.nav_id = nav_id;
2138 req.src_thread = src_thread;
2139 req.dst_thread = dst_thread;
2140
2141 ret = ti_sci_do_xfer(info, xfer);
2142 if (ret) {
2143 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2144 goto fail;
2145 }
2146
2147 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2148 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2149
2150fail:
2151 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2152 nav_id, src_thread, dst_thread, ret);
2153 return ret;
2154}
2155
2156static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2157 u32 nav_id, u32 src_thread, u32 dst_thread)
2158{
2159 struct ti_sci_msg_hdr *resp;
2160 struct ti_sci_msg_psil_unpair req;
2161 struct ti_sci_xfer *xfer;
2162 struct ti_sci_info *info;
2163 int ret = 0;
2164
2165 if (IS_ERR(handle))
2166 return PTR_ERR(handle);
2167 if (!handle)
2168 return -EINVAL;
2169
2170 info = handle_to_ti_sci_info(handle);
2171
2172 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2173 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2174 (u32 *)&req, sizeof(req), sizeof(*resp));
2175 if (IS_ERR(xfer)) {
2176 ret = PTR_ERR(xfer);
2177 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2178 return ret;
2179 }
2180 req.nav_id = nav_id;
2181 req.src_thread = src_thread;
2182 req.dst_thread = dst_thread;
2183
2184 ret = ti_sci_do_xfer(info, xfer);
2185 if (ret) {
2186 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2187 goto fail;
2188 }
2189
2190 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2191 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2192
2193fail:
2194 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2195 src_thread, dst_thread, ret);
2196 return ret;
2197}
2198
2199static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2200 const struct ti_sci_handle *handle,
2201 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2202{
2203 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2204 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2205 struct ti_sci_xfer *xfer;
2206 struct ti_sci_info *info;
2207 int ret = 0;
2208
2209 if (IS_ERR(handle))
2210 return PTR_ERR(handle);
2211 if (!handle)
2212 return -EINVAL;
2213
2214 info = handle_to_ti_sci_info(handle);
2215
2216 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2217 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2218 (u32 *)&req, sizeof(req), sizeof(*resp));
2219 if (IS_ERR(xfer)) {
2220 ret = PTR_ERR(xfer);
2221 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2222 return ret;
2223 }
2224 req.valid_params = params->valid_params;
2225 req.nav_id = params->nav_id;
2226 req.index = params->index;
2227 req.tx_pause_on_err = params->tx_pause_on_err;
2228 req.tx_filt_einfo = params->tx_filt_einfo;
2229 req.tx_filt_pswords = params->tx_filt_pswords;
2230 req.tx_atype = params->tx_atype;
2231 req.tx_chan_type = params->tx_chan_type;
2232 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2233 req.tx_fetch_size = params->tx_fetch_size;
2234 req.tx_credit_count = params->tx_credit_count;
2235 req.txcq_qnum = params->txcq_qnum;
2236 req.tx_priority = params->tx_priority;
2237 req.tx_qos = params->tx_qos;
2238 req.tx_orderid = params->tx_orderid;
2239 req.fdepth = params->fdepth;
2240 req.tx_sched_priority = params->tx_sched_priority;
2241
2242 ret = ti_sci_do_xfer(info, xfer);
2243 if (ret) {
2244 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2245 goto fail;
2246 }
2247
2248 resp =
2249 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2250 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2251
2252fail:
2253 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2254 return ret;
2255}
2256
2257static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2258 const struct ti_sci_handle *handle,
2259 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2260{
2261 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2262 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2263 struct ti_sci_xfer *xfer;
2264 struct ti_sci_info *info;
2265 int ret = 0;
2266
2267 if (IS_ERR(handle))
2268 return PTR_ERR(handle);
2269 if (!handle)
2270 return -EINVAL;
2271
2272 info = handle_to_ti_sci_info(handle);
2273
2274 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2275 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2276 (u32 *)&req, sizeof(req), sizeof(*resp));
2277 if (IS_ERR(xfer)) {
2278 ret = PTR_ERR(xfer);
2279 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2280 return ret;
2281 }
2282
2283 req.valid_params = params->valid_params;
2284 req.nav_id = params->nav_id;
2285 req.index = params->index;
2286 req.rx_fetch_size = params->rx_fetch_size;
2287 req.rxcq_qnum = params->rxcq_qnum;
2288 req.rx_priority = params->rx_priority;
2289 req.rx_qos = params->rx_qos;
2290 req.rx_orderid = params->rx_orderid;
2291 req.rx_sched_priority = params->rx_sched_priority;
2292 req.flowid_start = params->flowid_start;
2293 req.flowid_cnt = params->flowid_cnt;
2294 req.rx_pause_on_err = params->rx_pause_on_err;
2295 req.rx_atype = params->rx_atype;
2296 req.rx_chan_type = params->rx_chan_type;
2297 req.rx_ignore_short = params->rx_ignore_short;
2298 req.rx_ignore_long = params->rx_ignore_long;
2299
2300 ret = ti_sci_do_xfer(info, xfer);
2301 if (ret) {
2302 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2303 goto fail;
2304 }
2305
2306 resp =
2307 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2308 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2309
2310fail:
2311 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2312 return ret;
2313}
2314
2315static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2316 const struct ti_sci_handle *handle,
2317 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2318{
2319 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2320 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2321 struct ti_sci_xfer *xfer;
2322 struct ti_sci_info *info;
2323 int ret = 0;
2324
2325 if (IS_ERR(handle))
2326 return PTR_ERR(handle);
2327 if (!handle)
2328 return -EINVAL;
2329
2330 info = handle_to_ti_sci_info(handle);
2331
2332 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2333 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2334 (u32 *)&req, sizeof(req), sizeof(*resp));
2335 if (IS_ERR(xfer)) {
2336 ret = PTR_ERR(xfer);
2337 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2338 return ret;
2339 }
2340
2341 req.valid_params = params->valid_params;
2342 req.nav_id = params->nav_id;
2343 req.flow_index = params->flow_index;
2344 req.rx_einfo_present = params->rx_einfo_present;
2345 req.rx_psinfo_present = params->rx_psinfo_present;
2346 req.rx_error_handling = params->rx_error_handling;
2347 req.rx_desc_type = params->rx_desc_type;
2348 req.rx_sop_offset = params->rx_sop_offset;
2349 req.rx_dest_qnum = params->rx_dest_qnum;
2350 req.rx_src_tag_hi = params->rx_src_tag_hi;
2351 req.rx_src_tag_lo = params->rx_src_tag_lo;
2352 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2353 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2354 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2355 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2356 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2357 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2358 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2359 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2360 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2361 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2362 req.rx_ps_location = params->rx_ps_location;
2363
2364 ret = ti_sci_do_xfer(info, xfer);
2365 if (ret) {
2366 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2367 goto fail;
2368 }
2369
2370 resp =
2371 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2372 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2373
2374fail:
2375 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2376 return ret;
2377}
2378
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302379/*
2380 * ti_sci_setup_ops() - Setup the operations structures
2381 * @info: pointer to TISCI pointer
2382 */
2383static void ti_sci_setup_ops(struct ti_sci_info *info)
2384{
2385 struct ti_sci_ops *ops = &info->handle.ops;
2386 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302387 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302388 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302389 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302390 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302391 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302392 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2393 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2394 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302395
2396 bops->board_config = ti_sci_cmd_set_board_config;
2397 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2398 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2399 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg7bc33042018-08-27 15:57:34 +05302400
2401 dops->get_device = ti_sci_cmd_get_device;
2402 dops->idle_device = ti_sci_cmd_idle_device;
2403 dops->put_device = ti_sci_cmd_put_device;
2404 dops->is_valid = ti_sci_cmd_dev_is_valid;
2405 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2406 dops->is_idle = ti_sci_cmd_dev_is_idle;
2407 dops->is_stop = ti_sci_cmd_dev_is_stop;
2408 dops->is_on = ti_sci_cmd_dev_is_on;
2409 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2410 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2411 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla9b871812018-08-27 15:57:35 +05302412
2413 cops->get_clock = ti_sci_cmd_get_clock;
2414 cops->idle_clock = ti_sci_cmd_idle_clock;
2415 cops->put_clock = ti_sci_cmd_put_clock;
2416 cops->is_auto = ti_sci_cmd_clk_is_auto;
2417 cops->is_on = ti_sci_cmd_clk_is_on;
2418 cops->is_off = ti_sci_cmd_clk_is_off;
2419
2420 cops->set_parent = ti_sci_cmd_clk_set_parent;
2421 cops->get_parent = ti_sci_cmd_clk_get_parent;
2422 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2423
2424 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2425 cops->set_freq = ti_sci_cmd_clk_set_freq;
2426 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenbergf369b0f2018-08-27 15:57:36 +05302427
2428 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302429
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302430 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2431 rm_core_ops->get_range_from_shost =
2432 ti_sci_cmd_get_resource_range_from_shost;
2433
Lokesh Vutlaccbc8b22018-08-27 15:57:37 +05302434 pops->proc_request = ti_sci_cmd_proc_request;
2435 pops->proc_release = ti_sci_cmd_proc_release;
2436 pops->proc_handover = ti_sci_cmd_proc_handover;
2437 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2438 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2439 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2440 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302441
2442 rops->config = ti_sci_cmd_ring_config;
2443 rops->get_config = ti_sci_cmd_ring_get_config;
2444
2445 psilops->pair = ti_sci_cmd_rm_psil_pair;
2446 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2447
2448 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2449 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2450 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302451}
2452
2453/**
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302454 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2455 * @dev: Pointer to the SYSFW device
2456 *
2457 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2458 * are encountered.
2459 */
2460const
2461struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2462{
2463 if (!sci_dev)
2464 return ERR_PTR(-EINVAL);
2465
2466 struct ti_sci_info *info = dev_get_priv(sci_dev);
2467
2468 if (!info)
2469 return ERR_PTR(-EINVAL);
2470
2471 struct ti_sci_handle *handle = &info->handle;
2472
2473 if (!handle)
2474 return ERR_PTR(-EINVAL);
2475
2476 return handle;
2477}
2478
2479/**
2480 * ti_sci_get_handle() - Get the TI SCI handle for a device
2481 * @dev: Pointer to device for which we want SCI handle
2482 *
2483 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2484 * are encountered.
2485 */
2486const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2487{
2488 if (!dev)
2489 return ERR_PTR(-EINVAL);
2490
2491 struct udevice *sci_dev = dev_get_parent(dev);
2492
2493 return ti_sci_get_handle_from_sysfw(sci_dev);
2494}
2495
2496/**
2497 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2498 * @dev: device node
2499 * @propname: property name containing phandle on TISCI node
2500 *
2501 * Return: pointer to handle if successful, else appropriate error value.
2502 */
2503const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2504 const char *property)
2505{
2506 struct ti_sci_info *entry, *info = NULL;
2507 u32 phandle, err;
2508 ofnode node;
2509
2510 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2511 if (err)
2512 return ERR_PTR(err);
2513
2514 node = ofnode_get_by_phandle(phandle);
2515 if (!ofnode_valid(node))
2516 return ERR_PTR(-EINVAL);
2517
2518 list_for_each_entry(entry, &ti_sci_list, list)
2519 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2520 info = entry;
2521 break;
2522 }
2523
2524 if (!info)
2525 return ERR_PTR(-ENODEV);
2526
2527 return &info->handle;
2528}
2529
2530/**
2531 * ti_sci_of_to_info() - generate private data from device tree
2532 * @dev: corresponding system controller interface device
2533 * @info: pointer to driver specific private data
2534 *
2535 * Return: 0 if all goes good, else appropriate error message.
2536 */
2537static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2538{
2539 int ret;
2540
2541 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2542 if (ret) {
2543 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2544 __func__, ret);
2545 return ret;
2546 }
2547
2548 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2549 if (ret) {
2550 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2551 __func__, ret);
2552 return ret;
2553 }
2554
2555 /* Notify channel is optional. Enable only if populated */
2556 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2557 if (ret) {
2558 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2559 __func__, ret);
2560 }
2561
2562 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302563 info->desc->default_host_id);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302564
2565 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2566
2567 return 0;
2568}
2569
2570/**
2571 * ti_sci_probe() - Basic probe
2572 * @dev: corresponding system controller interface device
2573 *
2574 * Return: 0 if all goes good, else appropriate error message.
2575 */
2576static int ti_sci_probe(struct udevice *dev)
2577{
2578 struct ti_sci_info *info;
2579 int ret;
2580
2581 debug("%s(dev=%p)\n", __func__, dev);
2582
2583 info = dev_get_priv(dev);
2584 info->desc = (void *)dev_get_driver_data(dev);
2585
2586 ret = ti_sci_of_to_info(dev, info);
2587 if (ret) {
2588 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2589 return ret;
2590 }
2591
2592 info->dev = dev;
2593 info->seq = 0xA;
2594
2595 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenbergdcfc52a2018-08-27 15:57:33 +05302596 ti_sci_setup_ops(info);
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302597
2598 ret = ti_sci_cmd_get_revision(&info->handle);
2599
2600 return ret;
2601}
2602
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302603/*
2604 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2605 * @res: Pointer to the TISCI resource
2606 *
2607 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2608 */
2609u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2610{
2611 u16 set, free_bit;
2612
2613 for (set = 0; set < res->sets; set++) {
2614 free_bit = find_first_zero_bit(res->desc[set].res_map,
2615 res->desc[set].num);
2616 if (free_bit != res->desc[set].num) {
2617 set_bit(free_bit, res->desc[set].res_map);
2618 return res->desc[set].start + free_bit;
2619 }
2620 }
2621
2622 return TI_SCI_RESOURCE_NULL;
2623}
2624
2625/**
2626 * ti_sci_release_resource() - Release a resource from TISCI resource.
2627 * @res: Pointer to the TISCI resource
2628 */
2629void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2630{
2631 u16 set;
2632
2633 for (set = 0; set < res->sets; set++) {
2634 if (res->desc[set].start <= id &&
2635 (res->desc[set].num + res->desc[set].start) > id)
2636 clear_bit(id - res->desc[set].start,
2637 res->desc[set].res_map);
2638 }
2639}
2640
2641/**
2642 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2643 * @handle: TISCI handle
2644 * @dev: Device pointer to which the resource is assigned
2645 * @of_prop: property name by which the resource are represented
2646 *
2647 * Note: This function expects of_prop to be in the form of tuples
2648 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2649 * for each of_prop. Client driver can directly call
2650 * ti_sci_(get_free, release)_resource apis for handling the resource.
2651 *
2652 * Return: Pointer to ti_sci_resource if all went well else appropriate
2653 * error pointer.
2654 */
2655struct ti_sci_resource *
2656devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2657 struct udevice *dev, u32 dev_id, char *of_prop)
2658{
2659 u32 resource_subtype;
2660 u16 resource_type;
2661 struct ti_sci_resource *res;
2662 int sets, i, ret;
2663 u32 *temp;
2664
2665 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2666 if (!res)
2667 return ERR_PTR(-ENOMEM);
2668
2669 sets = dev_read_size(dev, of_prop);
2670 if (sets < 0) {
2671 dev_err(dev, "%s resource type ids not available\n", of_prop);
2672 return ERR_PTR(sets);
2673 }
2674 temp = malloc(sets);
2675 sets /= sizeof(u32);
2676 res->sets = sets;
2677
2678 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2679 GFP_KERNEL);
2680 if (!res->desc)
2681 return ERR_PTR(-ENOMEM);
2682
2683 ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id,
2684 &resource_type);
2685 if (ret) {
2686 dev_err(dev, "No valid resource type for %u\n", dev_id);
2687 return ERR_PTR(-EINVAL);
2688 }
2689
2690 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2691 if (ret)
2692 return ERR_PTR(-EINVAL);
2693
2694 for (i = 0; i < res->sets; i++) {
2695 resource_subtype = temp[i];
2696 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2697 resource_subtype,
2698 &res->desc[i].start,
2699 &res->desc[i].num);
2700 if (ret) {
2701 dev_err(dev, "type %d subtype %d not allocated for host %d\n",
2702 resource_type, resource_subtype,
2703 handle_to_ti_sci_info(handle)->host_id);
2704 return ERR_PTR(ret);
2705 }
2706
2707 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
2708 resource_type, resource_subtype, res->desc[i].start,
2709 res->desc[i].num);
2710
2711 res->desc[i].res_map =
2712 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2713 sizeof(*res->desc[i].res_map), GFP_KERNEL);
2714 if (!res->desc[i].res_map)
2715 return ERR_PTR(-ENOMEM);
2716 }
2717
2718 return res;
2719}
2720
2721/* Description for K2G */
2722static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2723 .default_host_id = 2,
2724 /* Conservative duration */
2725 .max_rx_timeout_ms = 10000,
2726 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2727 .max_msgs = 20,
2728 .max_msg_size = 64,
2729 .rm_type_map = NULL,
2730};
2731
2732static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2733 {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2734 {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2735 {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2736 {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2737 {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2738 {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2739 {.dev_id = 0, .type = 0x000}, /* end of table */
2740};
2741
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302742/* Description for AM654 */
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302743static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2744 .default_host_id = 12,
2745 /* Conservative duration */
2746 .max_rx_timeout_ms = 10000,
2747 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2748 .max_msgs = 20,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302749 .max_msg_size = 60,
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302750 .rm_type_map = ti_sci_am654_rm_type_map,
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302751};
2752
2753static const struct udevice_id ti_sci_ids[] = {
2754 {
2755 .compatible = "ti,k2g-sci",
Grygorii Strashkofd6b40b2019-02-05 17:31:21 +05302756 .data = (ulong)&ti_sci_pmmc_k2g_desc
2757 },
2758 {
2759 .compatible = "ti,am654-sci",
2760 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla32cd2512018-08-27 15:57:32 +05302761 },
2762 { /* Sentinel */ },
2763};
2764
2765U_BOOT_DRIVER(ti_sci) = {
2766 .name = "ti_sci",
2767 .id = UCLASS_FIRMWARE,
2768 .of_match = ti_sci_ids,
2769 .probe = ti_sci_probe,
2770 .priv_auto_alloc_size = sizeof(struct ti_sci_info),
2771};